1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 #include <linux/module.h>
28 #include <linux/cpumask.h>
29 #include <linux/bpf_mem_alloc.h>
31 #include <linux/trace_events.h>
32 #include <linux/kallsyms.h>
36 static const struct bpf_verifier_ops
* const bpf_verifier_ops
[] = {
37 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
38 [_id] = & _name ## _verifier_ops,
39 #define BPF_MAP_TYPE(_id, _ops)
40 #define BPF_LINK_TYPE(_id, _name)
41 #include <linux/bpf_types.h>
47 struct bpf_mem_alloc bpf_global_percpu_ma
;
48 static bool bpf_global_percpu_ma_set
;
50 /* bpf_check() is a static code analyzer that walks eBPF program
51 * instruction by instruction and updates register/stack state.
52 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
54 * The first pass is depth-first-search to check that the program is a DAG.
55 * It rejects the following programs:
56 * - larger than BPF_MAXINSNS insns
57 * - if loop is present (detected via back-edge)
58 * - unreachable insns exist (shouldn't be a forest. program = one function)
59 * - out of bounds or malformed jumps
60 * The second pass is all possible path descent from the 1st insn.
61 * Since it's analyzing all paths through the program, the length of the
62 * analysis is limited to 64k insn, which may be hit even if total number of
63 * insn is less then 4K, but there are too many branches that change stack/regs.
64 * Number of 'branches to be analyzed' is limited to 1k
66 * On entry to each instruction, each register has a type, and the instruction
67 * changes the types of the registers depending on instruction semantics.
68 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
71 * All registers are 64-bit.
72 * R0 - return register
73 * R1-R5 argument passing registers
74 * R6-R9 callee saved registers
75 * R10 - frame pointer read-only
77 * At the start of BPF program the register R1 contains a pointer to bpf_context
78 * and has type PTR_TO_CTX.
80 * Verifier tracks arithmetic operations on pointers in case:
81 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
82 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
83 * 1st insn copies R10 (which has FRAME_PTR) type into R1
84 * and 2nd arithmetic instruction is pattern matched to recognize
85 * that it wants to construct a pointer to some element within stack.
86 * So after 2nd insn, the register R1 has type PTR_TO_STACK
87 * (and -20 constant is saved for further stack bounds checking).
88 * Meaning that this reg is a pointer to stack plus known immediate constant.
90 * Most of the time the registers have SCALAR_VALUE type, which
91 * means the register has some value, but it's not a valid pointer.
92 * (like pointer plus pointer becomes SCALAR_VALUE type)
94 * When verifier sees load or store instructions the type of base register
95 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
96 * four pointer types recognized by check_mem_access() function.
98 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
99 * and the range of [ptr, ptr + map's value_size) is accessible.
101 * registers used to pass values to function calls are checked against
102 * function argument constraints.
104 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
105 * It means that the register type passed to this function must be
106 * PTR_TO_STACK and it will be used inside the function as
107 * 'pointer to map element key'
109 * For example the argument constraints for bpf_map_lookup_elem():
110 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
111 * .arg1_type = ARG_CONST_MAP_PTR,
112 * .arg2_type = ARG_PTR_TO_MAP_KEY,
114 * ret_type says that this function returns 'pointer to map elem value or null'
115 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
116 * 2nd argument should be a pointer to stack, which will be used inside
117 * the helper function as a pointer to map element key.
119 * On the kernel side the helper function looks like:
120 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
122 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
123 * void *key = (void *) (unsigned long) r2;
126 * here kernel can access 'key' and 'map' pointers safely, knowing that
127 * [key, key + map->key_size) bytes are valid and were initialized on
128 * the stack of eBPF program.
131 * Corresponding eBPF program may look like:
132 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
133 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
134 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
135 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
136 * here verifier looks at prototype of map_lookup_elem() and sees:
137 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
138 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
140 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
141 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
142 * and were initialized prior to this call.
143 * If it's ok, then verifier allows this BPF_CALL insn and looks at
144 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
145 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
146 * returns either pointer to map value or NULL.
148 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
149 * insn, the register holding that pointer in the true branch changes state to
150 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
151 * branch. See check_cond_jmp_op().
153 * After the call R0 is set to return type of the function and registers R1-R5
154 * are set to NOT_INIT to indicate that they are no longer readable.
156 * The following reference types represent a potential reference to a kernel
157 * resource which, after first being allocated, must be checked and freed by
159 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
161 * When the verifier sees a helper call return a reference type, it allocates a
162 * pointer id for the reference and stores it in the current function state.
163 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
164 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
165 * passes through a NULL-check conditional. For the branch wherein the state is
166 * changed to CONST_IMM, the verifier releases the reference.
168 * For each helper function that allocates a reference, such as
169 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
170 * bpf_sk_release(). When a reference type passes into the release function,
171 * the verifier also releases the reference. If any unchecked or unreleased
172 * reference remains at the end of the program, the verifier rejects it.
175 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
176 struct bpf_verifier_stack_elem
{
177 /* verifier state is 'st'
178 * before processing instruction 'insn_idx'
179 * and after processing instruction 'prev_insn_idx'
181 struct bpf_verifier_state st
;
184 struct bpf_verifier_stack_elem
*next
;
185 /* length of verifier log at the time this state was pushed on stack */
189 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
190 #define BPF_COMPLEXITY_LIMIT_STATES 64
192 #define BPF_MAP_KEY_POISON (1ULL << 63)
193 #define BPF_MAP_KEY_SEEN (1ULL << 62)
195 #define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512
197 #define BPF_PRIV_STACK_MIN_SIZE 64
199 static int acquire_reference_state(struct bpf_verifier_env
*env
, int insn_idx
);
200 static int release_reference(struct bpf_verifier_env
*env
, int ref_obj_id
);
201 static void invalidate_non_owning_refs(struct bpf_verifier_env
*env
);
202 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env
*env
);
203 static int ref_set_non_owning(struct bpf_verifier_env
*env
,
204 struct bpf_reg_state
*reg
);
205 static void specialize_kfunc(struct bpf_verifier_env
*env
,
206 u32 func_id
, u16 offset
, unsigned long *addr
);
207 static bool is_trusted_reg(const struct bpf_reg_state
*reg
);
209 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data
*aux
)
211 return aux
->map_ptr_state
.poison
;
214 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data
*aux
)
216 return aux
->map_ptr_state
.unpriv
;
219 static void bpf_map_ptr_store(struct bpf_insn_aux_data
*aux
,
221 bool unpriv
, bool poison
)
223 unpriv
|= bpf_map_ptr_unpriv(aux
);
224 aux
->map_ptr_state
.unpriv
= unpriv
;
225 aux
->map_ptr_state
.poison
= poison
;
226 aux
->map_ptr_state
.map_ptr
= map
;
229 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data
*aux
)
231 return aux
->map_key_state
& BPF_MAP_KEY_POISON
;
234 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data
*aux
)
236 return !(aux
->map_key_state
& BPF_MAP_KEY_SEEN
);
239 static u64
bpf_map_key_immediate(const struct bpf_insn_aux_data
*aux
)
241 return aux
->map_key_state
& ~(BPF_MAP_KEY_SEEN
| BPF_MAP_KEY_POISON
);
244 static void bpf_map_key_store(struct bpf_insn_aux_data
*aux
, u64 state
)
246 bool poisoned
= bpf_map_key_poisoned(aux
);
248 aux
->map_key_state
= state
| BPF_MAP_KEY_SEEN
|
249 (poisoned
? BPF_MAP_KEY_POISON
: 0ULL);
252 static bool bpf_helper_call(const struct bpf_insn
*insn
)
254 return insn
->code
== (BPF_JMP
| BPF_CALL
) &&
258 static bool bpf_pseudo_call(const struct bpf_insn
*insn
)
260 return insn
->code
== (BPF_JMP
| BPF_CALL
) &&
261 insn
->src_reg
== BPF_PSEUDO_CALL
;
264 static bool bpf_pseudo_kfunc_call(const struct bpf_insn
*insn
)
266 return insn
->code
== (BPF_JMP
| BPF_CALL
) &&
267 insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
;
270 struct bpf_call_arg_meta
{
271 struct bpf_map
*map_ptr
;
288 struct btf_field
*kptr_field
;
291 struct bpf_kfunc_call_arg_meta
{
296 const struct btf_type
*func_proto
;
297 const char *func_name
;
310 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
311 * generally to pass info about user-defined local kptr types to later
313 * bpf_obj_drop/bpf_percpu_obj_drop
314 * Record the local kptr type to be drop'd
315 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
316 * Record the local kptr type to be refcount_incr'd and use
317 * arg_owning_ref to determine whether refcount_acquire should be
325 struct btf_field
*field
;
328 struct btf_field
*field
;
331 enum bpf_dynptr_type type
;
334 } initialized_dynptr
;
346 struct btf
*btf_vmlinux
;
348 static const char *btf_type_name(const struct btf
*btf
, u32 id
)
350 return btf_name_by_offset(btf
, btf_type_by_id(btf
, id
)->name_off
);
353 static DEFINE_MUTEX(bpf_verifier_lock
);
354 static DEFINE_MUTEX(bpf_percpu_ma_lock
);
356 __printf(2, 3) static void verbose(void *private_data
, const char *fmt
, ...)
358 struct bpf_verifier_env
*env
= private_data
;
361 if (!bpf_verifier_log_needed(&env
->log
))
365 bpf_verifier_vlog(&env
->log
, fmt
, args
);
369 static void verbose_invalid_scalar(struct bpf_verifier_env
*env
,
370 struct bpf_reg_state
*reg
,
371 struct bpf_retval_range range
, const char *ctx
,
372 const char *reg_name
)
376 verbose(env
, "%s the register %s has", ctx
, reg_name
);
377 if (reg
->smin_value
> S64_MIN
) {
378 verbose(env
, " smin=%lld", reg
->smin_value
);
381 if (reg
->smax_value
< S64_MAX
) {
382 verbose(env
, " smax=%lld", reg
->smax_value
);
386 verbose(env
, " unknown scalar value");
387 verbose(env
, " should have been in [%d, %d]\n", range
.minval
, range
.maxval
);
390 static bool reg_not_null(const struct bpf_reg_state
*reg
)
392 enum bpf_reg_type type
;
395 if (type_may_be_null(type
))
398 type
= base_type(type
);
399 return type
== PTR_TO_SOCKET
||
400 type
== PTR_TO_TCP_SOCK
||
401 type
== PTR_TO_MAP_VALUE
||
402 type
== PTR_TO_MAP_KEY
||
403 type
== PTR_TO_SOCK_COMMON
||
404 (type
== PTR_TO_BTF_ID
&& is_trusted_reg(reg
)) ||
408 static struct btf_record
*reg_btf_record(const struct bpf_reg_state
*reg
)
410 struct btf_record
*rec
= NULL
;
411 struct btf_struct_meta
*meta
;
413 if (reg
->type
== PTR_TO_MAP_VALUE
) {
414 rec
= reg
->map_ptr
->record
;
415 } else if (type_is_ptr_alloc_obj(reg
->type
)) {
416 meta
= btf_find_struct_meta(reg
->btf
, reg
->btf_id
);
423 static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
) {
424 return reg
->type
== (PTR_TO_BTF_ID
| PTR_TRUSTED
| PTR_MAYBE_NULL
) &&
425 bpf_prog_is_raw_tp(env
->prog
) && !reg
->ref_obj_id
;
428 static bool mask_raw_tp_reg(const struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
430 if (!mask_raw_tp_reg_cond(env
, reg
))
432 reg
->type
&= ~PTR_MAYBE_NULL
;
436 static void unmask_raw_tp_reg(struct bpf_reg_state
*reg
, bool result
)
439 reg
->type
|= PTR_MAYBE_NULL
;
442 static bool subprog_is_global(const struct bpf_verifier_env
*env
, int subprog
)
444 struct bpf_func_info_aux
*aux
= env
->prog
->aux
->func_info_aux
;
446 return aux
&& aux
[subprog
].linkage
== BTF_FUNC_GLOBAL
;
449 static const char *subprog_name(const struct bpf_verifier_env
*env
, int subprog
)
451 struct bpf_func_info
*info
;
453 if (!env
->prog
->aux
->func_info
)
456 info
= &env
->prog
->aux
->func_info
[subprog
];
457 return btf_type_name(env
->prog
->aux
->btf
, info
->type_id
);
460 static void mark_subprog_exc_cb(struct bpf_verifier_env
*env
, int subprog
)
462 struct bpf_subprog_info
*info
= subprog_info(env
, subprog
);
465 info
->is_async_cb
= true;
466 info
->is_exception_cb
= true;
469 static bool subprog_is_exc_cb(struct bpf_verifier_env
*env
, int subprog
)
471 return subprog_info(env
, subprog
)->is_exception_cb
;
474 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state
*reg
)
476 return btf_record_has_field(reg_btf_record(reg
), BPF_SPIN_LOCK
);
479 static bool type_is_rdonly_mem(u32 type
)
481 return type
& MEM_RDONLY
;
484 static bool is_acquire_function(enum bpf_func_id func_id
,
485 const struct bpf_map
*map
)
487 enum bpf_map_type map_type
= map
? map
->map_type
: BPF_MAP_TYPE_UNSPEC
;
489 if (func_id
== BPF_FUNC_sk_lookup_tcp
||
490 func_id
== BPF_FUNC_sk_lookup_udp
||
491 func_id
== BPF_FUNC_skc_lookup_tcp
||
492 func_id
== BPF_FUNC_ringbuf_reserve
||
493 func_id
== BPF_FUNC_kptr_xchg
)
496 if (func_id
== BPF_FUNC_map_lookup_elem
&&
497 (map_type
== BPF_MAP_TYPE_SOCKMAP
||
498 map_type
== BPF_MAP_TYPE_SOCKHASH
))
504 static bool is_ptr_cast_function(enum bpf_func_id func_id
)
506 return func_id
== BPF_FUNC_tcp_sock
||
507 func_id
== BPF_FUNC_sk_fullsock
||
508 func_id
== BPF_FUNC_skc_to_tcp_sock
||
509 func_id
== BPF_FUNC_skc_to_tcp6_sock
||
510 func_id
== BPF_FUNC_skc_to_udp6_sock
||
511 func_id
== BPF_FUNC_skc_to_mptcp_sock
||
512 func_id
== BPF_FUNC_skc_to_tcp_timewait_sock
||
513 func_id
== BPF_FUNC_skc_to_tcp_request_sock
;
516 static bool is_dynptr_ref_function(enum bpf_func_id func_id
)
518 return func_id
== BPF_FUNC_dynptr_data
;
521 static bool is_sync_callback_calling_kfunc(u32 btf_id
);
522 static bool is_async_callback_calling_kfunc(u32 btf_id
);
523 static bool is_callback_calling_kfunc(u32 btf_id
);
524 static bool is_bpf_throw_kfunc(struct bpf_insn
*insn
);
526 static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id
);
528 static bool is_sync_callback_calling_function(enum bpf_func_id func_id
)
530 return func_id
== BPF_FUNC_for_each_map_elem
||
531 func_id
== BPF_FUNC_find_vma
||
532 func_id
== BPF_FUNC_loop
||
533 func_id
== BPF_FUNC_user_ringbuf_drain
;
536 static bool is_async_callback_calling_function(enum bpf_func_id func_id
)
538 return func_id
== BPF_FUNC_timer_set_callback
;
541 static bool is_callback_calling_function(enum bpf_func_id func_id
)
543 return is_sync_callback_calling_function(func_id
) ||
544 is_async_callback_calling_function(func_id
);
547 static bool is_sync_callback_calling_insn(struct bpf_insn
*insn
)
549 return (bpf_helper_call(insn
) && is_sync_callback_calling_function(insn
->imm
)) ||
550 (bpf_pseudo_kfunc_call(insn
) && is_sync_callback_calling_kfunc(insn
->imm
));
553 static bool is_async_callback_calling_insn(struct bpf_insn
*insn
)
555 return (bpf_helper_call(insn
) && is_async_callback_calling_function(insn
->imm
)) ||
556 (bpf_pseudo_kfunc_call(insn
) && is_async_callback_calling_kfunc(insn
->imm
));
559 static bool is_may_goto_insn(struct bpf_insn
*insn
)
561 return insn
->code
== (BPF_JMP
| BPF_JCOND
) && insn
->src_reg
== BPF_MAY_GOTO
;
564 static bool is_may_goto_insn_at(struct bpf_verifier_env
*env
, int insn_idx
)
566 return is_may_goto_insn(&env
->prog
->insnsi
[insn_idx
]);
569 static bool is_storage_get_function(enum bpf_func_id func_id
)
571 return func_id
== BPF_FUNC_sk_storage_get
||
572 func_id
== BPF_FUNC_inode_storage_get
||
573 func_id
== BPF_FUNC_task_storage_get
||
574 func_id
== BPF_FUNC_cgrp_storage_get
;
577 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id
,
578 const struct bpf_map
*map
)
580 int ref_obj_uses
= 0;
582 if (is_ptr_cast_function(func_id
))
584 if (is_acquire_function(func_id
, map
))
586 if (is_dynptr_ref_function(func_id
))
589 return ref_obj_uses
> 1;
592 static bool is_cmpxchg_insn(const struct bpf_insn
*insn
)
594 return BPF_CLASS(insn
->code
) == BPF_STX
&&
595 BPF_MODE(insn
->code
) == BPF_ATOMIC
&&
596 insn
->imm
== BPF_CMPXCHG
;
599 static int __get_spi(s32 off
)
601 return (-off
- 1) / BPF_REG_SIZE
;
604 static struct bpf_func_state
*func(struct bpf_verifier_env
*env
,
605 const struct bpf_reg_state
*reg
)
607 struct bpf_verifier_state
*cur
= env
->cur_state
;
609 return cur
->frame
[reg
->frameno
];
612 static bool is_spi_bounds_valid(struct bpf_func_state
*state
, int spi
, int nr_slots
)
614 int allocated_slots
= state
->allocated_stack
/ BPF_REG_SIZE
;
616 /* We need to check that slots between [spi - nr_slots + 1, spi] are
617 * within [0, allocated_stack).
619 * Please note that the spi grows downwards. For example, a dynptr
620 * takes the size of two stack slots; the first slot will be at
621 * spi and the second slot will be at spi - 1.
623 return spi
- nr_slots
+ 1 >= 0 && spi
< allocated_slots
;
626 static int stack_slot_obj_get_spi(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
627 const char *obj_kind
, int nr_slots
)
631 if (!tnum_is_const(reg
->var_off
)) {
632 verbose(env
, "%s has to be at a constant offset\n", obj_kind
);
636 off
= reg
->off
+ reg
->var_off
.value
;
637 if (off
% BPF_REG_SIZE
) {
638 verbose(env
, "cannot pass in %s at an offset=%d\n", obj_kind
, off
);
642 spi
= __get_spi(off
);
643 if (spi
+ 1 < nr_slots
) {
644 verbose(env
, "cannot pass in %s at an offset=%d\n", obj_kind
, off
);
648 if (!is_spi_bounds_valid(func(env
, reg
), spi
, nr_slots
))
653 static int dynptr_get_spi(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
655 return stack_slot_obj_get_spi(env
, reg
, "dynptr", BPF_DYNPTR_NR_SLOTS
);
658 static int iter_get_spi(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
, int nr_slots
)
660 return stack_slot_obj_get_spi(env
, reg
, "iter", nr_slots
);
663 static enum bpf_dynptr_type
arg_to_dynptr_type(enum bpf_arg_type arg_type
)
665 switch (arg_type
& DYNPTR_TYPE_FLAG_MASK
) {
666 case DYNPTR_TYPE_LOCAL
:
667 return BPF_DYNPTR_TYPE_LOCAL
;
668 case DYNPTR_TYPE_RINGBUF
:
669 return BPF_DYNPTR_TYPE_RINGBUF
;
670 case DYNPTR_TYPE_SKB
:
671 return BPF_DYNPTR_TYPE_SKB
;
672 case DYNPTR_TYPE_XDP
:
673 return BPF_DYNPTR_TYPE_XDP
;
675 return BPF_DYNPTR_TYPE_INVALID
;
679 static enum bpf_type_flag
get_dynptr_type_flag(enum bpf_dynptr_type type
)
682 case BPF_DYNPTR_TYPE_LOCAL
:
683 return DYNPTR_TYPE_LOCAL
;
684 case BPF_DYNPTR_TYPE_RINGBUF
:
685 return DYNPTR_TYPE_RINGBUF
;
686 case BPF_DYNPTR_TYPE_SKB
:
687 return DYNPTR_TYPE_SKB
;
688 case BPF_DYNPTR_TYPE_XDP
:
689 return DYNPTR_TYPE_XDP
;
695 static bool dynptr_type_refcounted(enum bpf_dynptr_type type
)
697 return type
== BPF_DYNPTR_TYPE_RINGBUF
;
700 static void __mark_dynptr_reg(struct bpf_reg_state
*reg
,
701 enum bpf_dynptr_type type
,
702 bool first_slot
, int dynptr_id
);
704 static void __mark_reg_not_init(const struct bpf_verifier_env
*env
,
705 struct bpf_reg_state
*reg
);
707 static void mark_dynptr_stack_regs(struct bpf_verifier_env
*env
,
708 struct bpf_reg_state
*sreg1
,
709 struct bpf_reg_state
*sreg2
,
710 enum bpf_dynptr_type type
)
712 int id
= ++env
->id_gen
;
714 __mark_dynptr_reg(sreg1
, type
, true, id
);
715 __mark_dynptr_reg(sreg2
, type
, false, id
);
718 static void mark_dynptr_cb_reg(struct bpf_verifier_env
*env
,
719 struct bpf_reg_state
*reg
,
720 enum bpf_dynptr_type type
)
722 __mark_dynptr_reg(reg
, type
, true, ++env
->id_gen
);
725 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env
*env
,
726 struct bpf_func_state
*state
, int spi
);
728 static int mark_stack_slots_dynptr(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
729 enum bpf_arg_type arg_type
, int insn_idx
, int clone_ref_obj_id
)
731 struct bpf_func_state
*state
= func(env
, reg
);
732 enum bpf_dynptr_type type
;
735 spi
= dynptr_get_spi(env
, reg
);
739 /* We cannot assume both spi and spi - 1 belong to the same dynptr,
740 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
741 * to ensure that for the following example:
744 * So marking spi = 2 should lead to destruction of both d1 and d2. In
745 * case they do belong to same dynptr, second call won't see slot_type
746 * as STACK_DYNPTR and will simply skip destruction.
748 err
= destroy_if_dynptr_stack_slot(env
, state
, spi
);
751 err
= destroy_if_dynptr_stack_slot(env
, state
, spi
- 1);
755 for (i
= 0; i
< BPF_REG_SIZE
; i
++) {
756 state
->stack
[spi
].slot_type
[i
] = STACK_DYNPTR
;
757 state
->stack
[spi
- 1].slot_type
[i
] = STACK_DYNPTR
;
760 type
= arg_to_dynptr_type(arg_type
);
761 if (type
== BPF_DYNPTR_TYPE_INVALID
)
764 mark_dynptr_stack_regs(env
, &state
->stack
[spi
].spilled_ptr
,
765 &state
->stack
[spi
- 1].spilled_ptr
, type
);
767 if (dynptr_type_refcounted(type
)) {
768 /* The id is used to track proper releasing */
771 if (clone_ref_obj_id
)
772 id
= clone_ref_obj_id
;
774 id
= acquire_reference_state(env
, insn_idx
);
779 state
->stack
[spi
].spilled_ptr
.ref_obj_id
= id
;
780 state
->stack
[spi
- 1].spilled_ptr
.ref_obj_id
= id
;
783 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
784 state
->stack
[spi
- 1].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
789 static void invalidate_dynptr(struct bpf_verifier_env
*env
, struct bpf_func_state
*state
, int spi
)
793 for (i
= 0; i
< BPF_REG_SIZE
; i
++) {
794 state
->stack
[spi
].slot_type
[i
] = STACK_INVALID
;
795 state
->stack
[spi
- 1].slot_type
[i
] = STACK_INVALID
;
798 __mark_reg_not_init(env
, &state
->stack
[spi
].spilled_ptr
);
799 __mark_reg_not_init(env
, &state
->stack
[spi
- 1].spilled_ptr
);
801 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
803 * While we don't allow reading STACK_INVALID, it is still possible to
804 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
805 * helpers or insns can do partial read of that part without failing,
806 * but check_stack_range_initialized, check_stack_read_var_off, and
807 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
808 * the slot conservatively. Hence we need to prevent those liveness
811 * This was not a problem before because STACK_INVALID is only set by
812 * default (where the default reg state has its reg->parent as NULL), or
813 * in clean_live_states after REG_LIVE_DONE (at which point
814 * mark_reg_read won't walk reg->parent chain), but not randomly during
815 * verifier state exploration (like we did above). Hence, for our case
816 * parentage chain will still be live (i.e. reg->parent may be
817 * non-NULL), while earlier reg->parent was NULL, so we need
818 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
819 * done later on reads or by mark_dynptr_read as well to unnecessary
820 * mark registers in verifier state.
822 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
823 state
->stack
[spi
- 1].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
826 static int unmark_stack_slots_dynptr(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
828 struct bpf_func_state
*state
= func(env
, reg
);
829 int spi
, ref_obj_id
, i
;
831 spi
= dynptr_get_spi(env
, reg
);
835 if (!dynptr_type_refcounted(state
->stack
[spi
].spilled_ptr
.dynptr
.type
)) {
836 invalidate_dynptr(env
, state
, spi
);
840 ref_obj_id
= state
->stack
[spi
].spilled_ptr
.ref_obj_id
;
842 /* If the dynptr has a ref_obj_id, then we need to invalidate
845 * 1) Any dynptrs with a matching ref_obj_id (clones)
846 * 2) Any slices derived from this dynptr.
849 /* Invalidate any slices associated with this dynptr */
850 WARN_ON_ONCE(release_reference(env
, ref_obj_id
));
852 /* Invalidate any dynptr clones */
853 for (i
= 1; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
854 if (state
->stack
[i
].spilled_ptr
.ref_obj_id
!= ref_obj_id
)
857 /* it should always be the case that if the ref obj id
858 * matches then the stack slot also belongs to a
861 if (state
->stack
[i
].slot_type
[0] != STACK_DYNPTR
) {
862 verbose(env
, "verifier internal error: misconfigured ref_obj_id\n");
865 if (state
->stack
[i
].spilled_ptr
.dynptr
.first_slot
)
866 invalidate_dynptr(env
, state
, i
);
872 static void __mark_reg_unknown(const struct bpf_verifier_env
*env
,
873 struct bpf_reg_state
*reg
);
875 static void mark_reg_invalid(const struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
877 if (!env
->allow_ptr_leaks
)
878 __mark_reg_not_init(env
, reg
);
880 __mark_reg_unknown(env
, reg
);
883 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env
*env
,
884 struct bpf_func_state
*state
, int spi
)
886 struct bpf_func_state
*fstate
;
887 struct bpf_reg_state
*dreg
;
890 /* We always ensure that STACK_DYNPTR is never set partially,
891 * hence just checking for slot_type[0] is enough. This is
892 * different for STACK_SPILL, where it may be only set for
893 * 1 byte, so code has to use is_spilled_reg.
895 if (state
->stack
[spi
].slot_type
[0] != STACK_DYNPTR
)
898 /* Reposition spi to first slot */
899 if (!state
->stack
[spi
].spilled_ptr
.dynptr
.first_slot
)
902 if (dynptr_type_refcounted(state
->stack
[spi
].spilled_ptr
.dynptr
.type
)) {
903 verbose(env
, "cannot overwrite referenced dynptr\n");
907 mark_stack_slot_scratched(env
, spi
);
908 mark_stack_slot_scratched(env
, spi
- 1);
910 /* Writing partially to one dynptr stack slot destroys both. */
911 for (i
= 0; i
< BPF_REG_SIZE
; i
++) {
912 state
->stack
[spi
].slot_type
[i
] = STACK_INVALID
;
913 state
->stack
[spi
- 1].slot_type
[i
] = STACK_INVALID
;
916 dynptr_id
= state
->stack
[spi
].spilled_ptr
.id
;
917 /* Invalidate any slices associated with this dynptr */
918 bpf_for_each_reg_in_vstate(env
->cur_state
, fstate
, dreg
, ({
919 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
920 if (dreg
->type
!= (PTR_TO_MEM
| PTR_MAYBE_NULL
) && dreg
->type
!= PTR_TO_MEM
)
922 if (dreg
->dynptr_id
== dynptr_id
)
923 mark_reg_invalid(env
, dreg
);
926 /* Do not release reference state, we are destroying dynptr on stack,
927 * not using some helper to release it. Just reset register.
929 __mark_reg_not_init(env
, &state
->stack
[spi
].spilled_ptr
);
930 __mark_reg_not_init(env
, &state
->stack
[spi
- 1].spilled_ptr
);
932 /* Same reason as unmark_stack_slots_dynptr above */
933 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
934 state
->stack
[spi
- 1].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
939 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
943 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
946 spi
= dynptr_get_spi(env
, reg
);
948 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an
949 * error because this just means the stack state hasn't been updated yet.
950 * We will do check_mem_access to check and update stack bounds later.
952 if (spi
< 0 && spi
!= -ERANGE
)
955 /* We don't need to check if the stack slots are marked by previous
956 * dynptr initializations because we allow overwriting existing unreferenced
957 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls
958 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are
959 * touching are completely destructed before we reinitialize them for a new
960 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early
961 * instead of delaying it until the end where the user will get "Unreleased
967 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
969 struct bpf_func_state
*state
= func(env
, reg
);
972 /* This already represents first slot of initialized bpf_dynptr.
974 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
975 * check_func_arg_reg_off's logic, so we don't need to check its
976 * offset and alignment.
978 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
981 spi
= dynptr_get_spi(env
, reg
);
984 if (!state
->stack
[spi
].spilled_ptr
.dynptr
.first_slot
)
987 for (i
= 0; i
< BPF_REG_SIZE
; i
++) {
988 if (state
->stack
[spi
].slot_type
[i
] != STACK_DYNPTR
||
989 state
->stack
[spi
- 1].slot_type
[i
] != STACK_DYNPTR
)
996 static bool is_dynptr_type_expected(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
997 enum bpf_arg_type arg_type
)
999 struct bpf_func_state
*state
= func(env
, reg
);
1000 enum bpf_dynptr_type dynptr_type
;
1003 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1004 if (arg_type
== ARG_PTR_TO_DYNPTR
)
1007 dynptr_type
= arg_to_dynptr_type(arg_type
);
1008 if (reg
->type
== CONST_PTR_TO_DYNPTR
) {
1009 return reg
->dynptr
.type
== dynptr_type
;
1011 spi
= dynptr_get_spi(env
, reg
);
1014 return state
->stack
[spi
].spilled_ptr
.dynptr
.type
== dynptr_type
;
1018 static void __mark_reg_known_zero(struct bpf_reg_state
*reg
);
1020 static bool in_rcu_cs(struct bpf_verifier_env
*env
);
1022 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta
*meta
);
1024 static int mark_stack_slots_iter(struct bpf_verifier_env
*env
,
1025 struct bpf_kfunc_call_arg_meta
*meta
,
1026 struct bpf_reg_state
*reg
, int insn_idx
,
1027 struct btf
*btf
, u32 btf_id
, int nr_slots
)
1029 struct bpf_func_state
*state
= func(env
, reg
);
1032 spi
= iter_get_spi(env
, reg
, nr_slots
);
1036 id
= acquire_reference_state(env
, insn_idx
);
1040 for (i
= 0; i
< nr_slots
; i
++) {
1041 struct bpf_stack_state
*slot
= &state
->stack
[spi
- i
];
1042 struct bpf_reg_state
*st
= &slot
->spilled_ptr
;
1044 __mark_reg_known_zero(st
);
1045 st
->type
= PTR_TO_STACK
; /* we don't have dedicated reg type */
1046 if (is_kfunc_rcu_protected(meta
)) {
1048 st
->type
|= MEM_RCU
;
1050 st
->type
|= PTR_UNTRUSTED
;
1052 st
->live
|= REG_LIVE_WRITTEN
;
1053 st
->ref_obj_id
= i
== 0 ? id
: 0;
1055 st
->iter
.btf_id
= btf_id
;
1056 st
->iter
.state
= BPF_ITER_STATE_ACTIVE
;
1059 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
1060 slot
->slot_type
[j
] = STACK_ITER
;
1062 mark_stack_slot_scratched(env
, spi
- i
);
1068 static int unmark_stack_slots_iter(struct bpf_verifier_env
*env
,
1069 struct bpf_reg_state
*reg
, int nr_slots
)
1071 struct bpf_func_state
*state
= func(env
, reg
);
1074 spi
= iter_get_spi(env
, reg
, nr_slots
);
1078 for (i
= 0; i
< nr_slots
; i
++) {
1079 struct bpf_stack_state
*slot
= &state
->stack
[spi
- i
];
1080 struct bpf_reg_state
*st
= &slot
->spilled_ptr
;
1083 WARN_ON_ONCE(release_reference(env
, st
->ref_obj_id
));
1085 __mark_reg_not_init(env
, st
);
1087 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
1088 st
->live
|= REG_LIVE_WRITTEN
;
1090 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
1091 slot
->slot_type
[j
] = STACK_INVALID
;
1093 mark_stack_slot_scratched(env
, spi
- i
);
1099 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env
*env
,
1100 struct bpf_reg_state
*reg
, int nr_slots
)
1102 struct bpf_func_state
*state
= func(env
, reg
);
1105 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
1106 * will do check_mem_access to check and update stack bounds later, so
1107 * return true for that case.
1109 spi
= iter_get_spi(env
, reg
, nr_slots
);
1115 for (i
= 0; i
< nr_slots
; i
++) {
1116 struct bpf_stack_state
*slot
= &state
->stack
[spi
- i
];
1118 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
1119 if (slot
->slot_type
[j
] == STACK_ITER
)
1126 static int is_iter_reg_valid_init(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
1127 struct btf
*btf
, u32 btf_id
, int nr_slots
)
1129 struct bpf_func_state
*state
= func(env
, reg
);
1132 spi
= iter_get_spi(env
, reg
, nr_slots
);
1136 for (i
= 0; i
< nr_slots
; i
++) {
1137 struct bpf_stack_state
*slot
= &state
->stack
[spi
- i
];
1138 struct bpf_reg_state
*st
= &slot
->spilled_ptr
;
1140 if (st
->type
& PTR_UNTRUSTED
)
1142 /* only main (first) slot has ref_obj_id set */
1143 if (i
== 0 && !st
->ref_obj_id
)
1145 if (i
!= 0 && st
->ref_obj_id
)
1147 if (st
->iter
.btf
!= btf
|| st
->iter
.btf_id
!= btf_id
)
1150 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
1151 if (slot
->slot_type
[j
] != STACK_ITER
)
1158 /* Check if given stack slot is "special":
1159 * - spilled register state (STACK_SPILL);
1160 * - dynptr state (STACK_DYNPTR);
1161 * - iter state (STACK_ITER).
1163 static bool is_stack_slot_special(const struct bpf_stack_state
*stack
)
1165 enum bpf_stack_slot_type type
= stack
->slot_type
[BPF_REG_SIZE
- 1];
1177 WARN_ONCE(1, "unknown stack slot type %d\n", type
);
1182 /* The reg state of a pointer or a bounded scalar was saved when
1183 * it was spilled to the stack.
1185 static bool is_spilled_reg(const struct bpf_stack_state
*stack
)
1187 return stack
->slot_type
[BPF_REG_SIZE
- 1] == STACK_SPILL
;
1190 static bool is_spilled_scalar_reg(const struct bpf_stack_state
*stack
)
1192 return stack
->slot_type
[BPF_REG_SIZE
- 1] == STACK_SPILL
&&
1193 stack
->spilled_ptr
.type
== SCALAR_VALUE
;
1196 static bool is_spilled_scalar_reg64(const struct bpf_stack_state
*stack
)
1198 return stack
->slot_type
[0] == STACK_SPILL
&&
1199 stack
->spilled_ptr
.type
== SCALAR_VALUE
;
1202 /* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
1203 * case they are equivalent, or it's STACK_ZERO, in which case we preserve
1204 * more precise STACK_ZERO.
1205 * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take
1206 * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
1208 static void mark_stack_slot_misc(struct bpf_verifier_env
*env
, u8
*stype
)
1210 if (*stype
== STACK_ZERO
)
1212 if (env
->allow_ptr_leaks
&& *stype
== STACK_INVALID
)
1214 *stype
= STACK_MISC
;
1217 static void scrub_spilled_slot(u8
*stype
)
1219 if (*stype
!= STACK_INVALID
)
1220 *stype
= STACK_MISC
;
1223 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1224 * small to hold src. This is different from krealloc since we don't want to preserve
1225 * the contents of dst.
1227 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1230 static void *copy_array(void *dst
, const void *src
, size_t n
, size_t size
, gfp_t flags
)
1236 if (ZERO_OR_NULL_PTR(src
))
1239 if (unlikely(check_mul_overflow(n
, size
, &bytes
)))
1242 alloc_bytes
= max(ksize(orig
), kmalloc_size_roundup(bytes
));
1243 dst
= krealloc(orig
, alloc_bytes
, flags
);
1249 memcpy(dst
, src
, bytes
);
1251 return dst
? dst
: ZERO_SIZE_PTR
;
1254 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1255 * small to hold new_n items. new items are zeroed out if the array grows.
1257 * Contrary to krealloc_array, does not free arr if new_n is zero.
1259 static void *realloc_array(void *arr
, size_t old_n
, size_t new_n
, size_t size
)
1264 if (!new_n
|| old_n
== new_n
)
1267 alloc_size
= kmalloc_size_roundup(size_mul(new_n
, size
));
1268 new_arr
= krealloc(arr
, alloc_size
, GFP_KERNEL
);
1276 memset(arr
+ old_n
* size
, 0, (new_n
- old_n
) * size
);
1279 return arr
? arr
: ZERO_SIZE_PTR
;
1282 static int copy_reference_state(struct bpf_func_state
*dst
, const struct bpf_func_state
*src
)
1284 dst
->refs
= copy_array(dst
->refs
, src
->refs
, src
->acquired_refs
,
1285 sizeof(struct bpf_reference_state
), GFP_KERNEL
);
1289 dst
->active_locks
= src
->active_locks
;
1290 dst
->acquired_refs
= src
->acquired_refs
;
1294 static int copy_stack_state(struct bpf_func_state
*dst
, const struct bpf_func_state
*src
)
1296 size_t n
= src
->allocated_stack
/ BPF_REG_SIZE
;
1298 dst
->stack
= copy_array(dst
->stack
, src
->stack
, n
, sizeof(struct bpf_stack_state
),
1303 dst
->allocated_stack
= src
->allocated_stack
;
1307 static int resize_reference_state(struct bpf_func_state
*state
, size_t n
)
1309 state
->refs
= realloc_array(state
->refs
, state
->acquired_refs
, n
,
1310 sizeof(struct bpf_reference_state
));
1314 state
->acquired_refs
= n
;
1318 /* Possibly update state->allocated_stack to be at least size bytes. Also
1319 * possibly update the function's high-water mark in its bpf_subprog_info.
1321 static int grow_stack_state(struct bpf_verifier_env
*env
, struct bpf_func_state
*state
, int size
)
1323 size_t old_n
= state
->allocated_stack
/ BPF_REG_SIZE
, n
;
1325 /* The stack size is always a multiple of BPF_REG_SIZE. */
1326 size
= round_up(size
, BPF_REG_SIZE
);
1327 n
= size
/ BPF_REG_SIZE
;
1332 state
->stack
= realloc_array(state
->stack
, old_n
, n
, sizeof(struct bpf_stack_state
));
1336 state
->allocated_stack
= size
;
1338 /* update known max for given subprogram */
1339 if (env
->subprog_info
[state
->subprogno
].stack_depth
< size
)
1340 env
->subprog_info
[state
->subprogno
].stack_depth
= size
;
1345 /* Acquire a pointer id from the env and update the state->refs to include
1346 * this new pointer reference.
1347 * On success, returns a valid pointer id to associate with the register
1348 * On failure, returns a negative errno.
1350 static int acquire_reference_state(struct bpf_verifier_env
*env
, int insn_idx
)
1352 struct bpf_func_state
*state
= cur_func(env
);
1353 int new_ofs
= state
->acquired_refs
;
1356 err
= resize_reference_state(state
, state
->acquired_refs
+ 1);
1360 state
->refs
[new_ofs
].type
= REF_TYPE_PTR
;
1361 state
->refs
[new_ofs
].id
= id
;
1362 state
->refs
[new_ofs
].insn_idx
= insn_idx
;
1367 static int acquire_lock_state(struct bpf_verifier_env
*env
, int insn_idx
, enum ref_state_type type
,
1370 struct bpf_func_state
*state
= cur_func(env
);
1371 int new_ofs
= state
->acquired_refs
;
1374 err
= resize_reference_state(state
, state
->acquired_refs
+ 1);
1377 state
->refs
[new_ofs
].type
= type
;
1378 state
->refs
[new_ofs
].id
= id
;
1379 state
->refs
[new_ofs
].insn_idx
= insn_idx
;
1380 state
->refs
[new_ofs
].ptr
= ptr
;
1382 state
->active_locks
++;
1386 /* release function corresponding to acquire_reference_state(). Idempotent. */
1387 static int release_reference_state(struct bpf_func_state
*state
, int ptr_id
)
1391 last_idx
= state
->acquired_refs
- 1;
1392 for (i
= 0; i
< state
->acquired_refs
; i
++) {
1393 if (state
->refs
[i
].type
!= REF_TYPE_PTR
)
1395 if (state
->refs
[i
].id
== ptr_id
) {
1396 if (last_idx
&& i
!= last_idx
)
1397 memcpy(&state
->refs
[i
], &state
->refs
[last_idx
],
1398 sizeof(*state
->refs
));
1399 memset(&state
->refs
[last_idx
], 0, sizeof(*state
->refs
));
1400 state
->acquired_refs
--;
1407 static int release_lock_state(struct bpf_func_state
*state
, int type
, int id
, void *ptr
)
1411 last_idx
= state
->acquired_refs
- 1;
1412 for (i
= 0; i
< state
->acquired_refs
; i
++) {
1413 if (state
->refs
[i
].type
!= type
)
1415 if (state
->refs
[i
].id
== id
&& state
->refs
[i
].ptr
== ptr
) {
1416 if (last_idx
&& i
!= last_idx
)
1417 memcpy(&state
->refs
[i
], &state
->refs
[last_idx
],
1418 sizeof(*state
->refs
));
1419 memset(&state
->refs
[last_idx
], 0, sizeof(*state
->refs
));
1420 state
->acquired_refs
--;
1421 state
->active_locks
--;
1428 static struct bpf_reference_state
*find_lock_state(struct bpf_verifier_env
*env
, enum ref_state_type type
,
1431 struct bpf_func_state
*state
= cur_func(env
);
1434 for (i
= 0; i
< state
->acquired_refs
; i
++) {
1435 struct bpf_reference_state
*s
= &state
->refs
[i
];
1437 if (s
->type
== REF_TYPE_PTR
|| s
->type
!= type
)
1440 if (s
->id
== id
&& s
->ptr
== ptr
)
1446 static void free_func_state(struct bpf_func_state
*state
)
1451 kfree(state
->stack
);
1455 static void free_verifier_state(struct bpf_verifier_state
*state
,
1460 for (i
= 0; i
<= state
->curframe
; i
++) {
1461 free_func_state(state
->frame
[i
]);
1462 state
->frame
[i
] = NULL
;
1468 /* copy verifier state from src to dst growing dst stack space
1469 * when necessary to accommodate larger src stack
1471 static int copy_func_state(struct bpf_func_state
*dst
,
1472 const struct bpf_func_state
*src
)
1476 memcpy(dst
, src
, offsetof(struct bpf_func_state
, acquired_refs
));
1477 err
= copy_reference_state(dst
, src
);
1480 return copy_stack_state(dst
, src
);
1483 static int copy_verifier_state(struct bpf_verifier_state
*dst_state
,
1484 const struct bpf_verifier_state
*src
)
1486 struct bpf_func_state
*dst
;
1489 /* if dst has more stack frames then src frame, free them, this is also
1490 * necessary in case of exceptional exits using bpf_throw.
1492 for (i
= src
->curframe
+ 1; i
<= dst_state
->curframe
; i
++) {
1493 free_func_state(dst_state
->frame
[i
]);
1494 dst_state
->frame
[i
] = NULL
;
1496 dst_state
->speculative
= src
->speculative
;
1497 dst_state
->active_rcu_lock
= src
->active_rcu_lock
;
1498 dst_state
->active_preempt_lock
= src
->active_preempt_lock
;
1499 dst_state
->in_sleepable
= src
->in_sleepable
;
1500 dst_state
->curframe
= src
->curframe
;
1501 dst_state
->branches
= src
->branches
;
1502 dst_state
->parent
= src
->parent
;
1503 dst_state
->first_insn_idx
= src
->first_insn_idx
;
1504 dst_state
->last_insn_idx
= src
->last_insn_idx
;
1505 dst_state
->insn_hist_start
= src
->insn_hist_start
;
1506 dst_state
->insn_hist_end
= src
->insn_hist_end
;
1507 dst_state
->dfs_depth
= src
->dfs_depth
;
1508 dst_state
->callback_unroll_depth
= src
->callback_unroll_depth
;
1509 dst_state
->used_as_loop_entry
= src
->used_as_loop_entry
;
1510 dst_state
->may_goto_depth
= src
->may_goto_depth
;
1511 for (i
= 0; i
<= src
->curframe
; i
++) {
1512 dst
= dst_state
->frame
[i
];
1514 dst
= kzalloc(sizeof(*dst
), GFP_KERNEL
);
1517 dst_state
->frame
[i
] = dst
;
1519 err
= copy_func_state(dst
, src
->frame
[i
]);
1526 static u32
state_htab_size(struct bpf_verifier_env
*env
)
1528 return env
->prog
->len
;
1531 static struct bpf_verifier_state_list
**explored_state(struct bpf_verifier_env
*env
, int idx
)
1533 struct bpf_verifier_state
*cur
= env
->cur_state
;
1534 struct bpf_func_state
*state
= cur
->frame
[cur
->curframe
];
1536 return &env
->explored_states
[(idx
^ state
->callsite
) % state_htab_size(env
)];
1539 static bool same_callsites(struct bpf_verifier_state
*a
, struct bpf_verifier_state
*b
)
1543 if (a
->curframe
!= b
->curframe
)
1546 for (fr
= a
->curframe
; fr
>= 0; fr
--)
1547 if (a
->frame
[fr
]->callsite
!= b
->frame
[fr
]->callsite
)
1553 /* Open coded iterators allow back-edges in the state graph in order to
1554 * check unbounded loops that iterators.
1556 * In is_state_visited() it is necessary to know if explored states are
1557 * part of some loops in order to decide whether non-exact states
1558 * comparison could be used:
1559 * - non-exact states comparison establishes sub-state relation and uses
1560 * read and precision marks to do so, these marks are propagated from
1561 * children states and thus are not guaranteed to be final in a loop;
1562 * - exact states comparison just checks if current and explored states
1563 * are identical (and thus form a back-edge).
1565 * Paper "A New Algorithm for Identifying Loops in Decompilation"
1566 * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient
1567 * algorithm for loop structure detection and gives an overview of
1568 * relevant terminology. It also has helpful illustrations.
1570 * [1] https://api.semanticscholar.org/CorpusID:15784067
1572 * We use a similar algorithm but because loop nested structure is
1573 * irrelevant for verifier ours is significantly simpler and resembles
1574 * strongly connected components algorithm from Sedgewick's textbook.
1576 * Define topmost loop entry as a first node of the loop traversed in a
1577 * depth first search starting from initial state. The goal of the loop
1578 * tracking algorithm is to associate topmost loop entries with states
1579 * derived from these entries.
1581 * For each step in the DFS states traversal algorithm needs to identify
1582 * the following situations:
1584 * initial initial initial
1587 * ... ... .---------> hdr
1590 * cur .-> succ | .------...
1593 * succ '-- cur | ... ...
1603 * (A) successor state of cur (B) successor state of cur or it's entry
1604 * not yet traversed are in current DFS path, thus cur and succ
1605 * are members of the same outermost loop
1613 * .------... .------...
1616 * .-> hdr ... ... ...
1619 * | succ <- cur succ <- cur
1626 * (C) successor state of cur is a part of some loop but this loop
1627 * does not include cur or successor state is not in a loop at all.
1629 * Algorithm could be described as the following python code:
1631 * traversed = set() # Set of traversed nodes
1632 * entries = {} # Mapping from node to loop entry
1633 * depths = {} # Depth level assigned to graph node
1634 * path = set() # Current DFS path
1636 * # Find outermost loop entry known for n
1637 * def get_loop_entry(n):
1638 * h = entries.get(n, None)
1639 * while h in entries and entries[h] != h:
1643 * # Update n's loop entry if h's outermost entry comes
1644 * # before n's outermost entry in current DFS path.
1645 * def update_loop_entry(n, h):
1646 * n1 = get_loop_entry(n) or n
1647 * h1 = get_loop_entry(h) or h
1648 * if h1 in path and depths[h1] <= depths[n1]:
1651 * def dfs(n, depth):
1655 * for succ in G.successors(n):
1656 * if succ not in traversed:
1657 * # Case A: explore succ and update cur's loop entry
1658 * # only if succ's entry is in current DFS path.
1659 * dfs(succ, depth + 1)
1660 * h = get_loop_entry(succ)
1661 * update_loop_entry(n, h)
1663 * # Case B or C depending on `h1 in path` check in update_loop_entry().
1664 * update_loop_entry(n, succ)
1667 * To adapt this algorithm for use with verifier:
1668 * - use st->branch == 0 as a signal that DFS of succ had been finished
1669 * and cur's loop entry has to be updated (case A), handle this in
1670 * update_branch_counts();
1671 * - use st->branch > 0 as a signal that st is in the current DFS path;
1672 * - handle cases B and C in is_state_visited();
1673 * - update topmost loop entry for intermediate states in get_loop_entry().
1675 static struct bpf_verifier_state
*get_loop_entry(struct bpf_verifier_state
*st
)
1677 struct bpf_verifier_state
*topmost
= st
->loop_entry
, *old
;
1679 while (topmost
&& topmost
->loop_entry
&& topmost
!= topmost
->loop_entry
)
1680 topmost
= topmost
->loop_entry
;
1681 /* Update loop entries for intermediate states to avoid this
1682 * traversal in future get_loop_entry() calls.
1684 while (st
&& st
->loop_entry
!= topmost
) {
1685 old
= st
->loop_entry
;
1686 st
->loop_entry
= topmost
;
1692 static void update_loop_entry(struct bpf_verifier_state
*cur
, struct bpf_verifier_state
*hdr
)
1694 struct bpf_verifier_state
*cur1
, *hdr1
;
1696 cur1
= get_loop_entry(cur
) ?: cur
;
1697 hdr1
= get_loop_entry(hdr
) ?: hdr
;
1698 /* The head1->branches check decides between cases B and C in
1699 * comment for get_loop_entry(). If hdr1->branches == 0 then
1700 * head's topmost loop entry is not in current DFS path,
1701 * hence 'cur' and 'hdr' are not in the same loop and there is
1702 * no need to update cur->loop_entry.
1704 if (hdr1
->branches
&& hdr1
->dfs_depth
<= cur1
->dfs_depth
) {
1705 cur
->loop_entry
= hdr
;
1706 hdr
->used_as_loop_entry
= true;
1710 static void update_branch_counts(struct bpf_verifier_env
*env
, struct bpf_verifier_state
*st
)
1713 u32 br
= --st
->branches
;
1715 /* br == 0 signals that DFS exploration for 'st' is finished,
1716 * thus it is necessary to update parent's loop entry if it
1717 * turned out that st is a part of some loop.
1718 * This is a part of 'case A' in get_loop_entry() comment.
1720 if (br
== 0 && st
->parent
&& st
->loop_entry
)
1721 update_loop_entry(st
->parent
, st
->loop_entry
);
1723 /* WARN_ON(br > 1) technically makes sense here,
1724 * but see comment in push_stack(), hence:
1726 WARN_ONCE((int)br
< 0,
1727 "BUG update_branch_counts:branches_to_explore=%d\n",
1735 static int pop_stack(struct bpf_verifier_env
*env
, int *prev_insn_idx
,
1736 int *insn_idx
, bool pop_log
)
1738 struct bpf_verifier_state
*cur
= env
->cur_state
;
1739 struct bpf_verifier_stack_elem
*elem
, *head
= env
->head
;
1742 if (env
->head
== NULL
)
1746 err
= copy_verifier_state(cur
, &head
->st
);
1751 bpf_vlog_reset(&env
->log
, head
->log_pos
);
1753 *insn_idx
= head
->insn_idx
;
1755 *prev_insn_idx
= head
->prev_insn_idx
;
1757 free_verifier_state(&head
->st
, false);
1764 static struct bpf_verifier_state
*push_stack(struct bpf_verifier_env
*env
,
1765 int insn_idx
, int prev_insn_idx
,
1768 struct bpf_verifier_state
*cur
= env
->cur_state
;
1769 struct bpf_verifier_stack_elem
*elem
;
1772 elem
= kzalloc(sizeof(struct bpf_verifier_stack_elem
), GFP_KERNEL
);
1776 elem
->insn_idx
= insn_idx
;
1777 elem
->prev_insn_idx
= prev_insn_idx
;
1778 elem
->next
= env
->head
;
1779 elem
->log_pos
= env
->log
.end_pos
;
1782 err
= copy_verifier_state(&elem
->st
, cur
);
1785 elem
->st
.speculative
|= speculative
;
1786 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_JMP_SEQ
) {
1787 verbose(env
, "The sequence of %d jumps is too complex.\n",
1791 if (elem
->st
.parent
) {
1792 ++elem
->st
.parent
->branches
;
1793 /* WARN_ON(branches > 2) technically makes sense here,
1795 * 1. speculative states will bump 'branches' for non-branch
1797 * 2. is_state_visited() heuristics may decide not to create
1798 * a new state for a sequence of branches and all such current
1799 * and cloned states will be pointing to a single parent state
1800 * which might have large 'branches' count.
1805 free_verifier_state(env
->cur_state
, true);
1806 env
->cur_state
= NULL
;
1807 /* pop all elements and return */
1808 while (!pop_stack(env
, NULL
, NULL
, false));
1812 #define CALLER_SAVED_REGS 6
1813 static const int caller_saved
[CALLER_SAVED_REGS
] = {
1814 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
1817 /* This helper doesn't clear reg->id */
1818 static void ___mark_reg_known(struct bpf_reg_state
*reg
, u64 imm
)
1820 reg
->var_off
= tnum_const(imm
);
1821 reg
->smin_value
= (s64
)imm
;
1822 reg
->smax_value
= (s64
)imm
;
1823 reg
->umin_value
= imm
;
1824 reg
->umax_value
= imm
;
1826 reg
->s32_min_value
= (s32
)imm
;
1827 reg
->s32_max_value
= (s32
)imm
;
1828 reg
->u32_min_value
= (u32
)imm
;
1829 reg
->u32_max_value
= (u32
)imm
;
1832 /* Mark the unknown part of a register (variable offset or scalar value) as
1833 * known to have the value @imm.
1835 static void __mark_reg_known(struct bpf_reg_state
*reg
, u64 imm
)
1837 /* Clear off and union(map_ptr, range) */
1838 memset(((u8
*)reg
) + sizeof(reg
->type
), 0,
1839 offsetof(struct bpf_reg_state
, var_off
) - sizeof(reg
->type
));
1841 reg
->ref_obj_id
= 0;
1842 ___mark_reg_known(reg
, imm
);
1845 static void __mark_reg32_known(struct bpf_reg_state
*reg
, u64 imm
)
1847 reg
->var_off
= tnum_const_subreg(reg
->var_off
, imm
);
1848 reg
->s32_min_value
= (s32
)imm
;
1849 reg
->s32_max_value
= (s32
)imm
;
1850 reg
->u32_min_value
= (u32
)imm
;
1851 reg
->u32_max_value
= (u32
)imm
;
1854 /* Mark the 'variable offset' part of a register as zero. This should be
1855 * used only on registers holding a pointer type.
1857 static void __mark_reg_known_zero(struct bpf_reg_state
*reg
)
1859 __mark_reg_known(reg
, 0);
1862 static void __mark_reg_const_zero(const struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
1864 __mark_reg_known(reg
, 0);
1865 reg
->type
= SCALAR_VALUE
;
1866 /* all scalars are assumed imprecise initially (unless unprivileged,
1867 * in which case everything is forced to be precise)
1869 reg
->precise
= !env
->bpf_capable
;
1872 static void mark_reg_known_zero(struct bpf_verifier_env
*env
,
1873 struct bpf_reg_state
*regs
, u32 regno
)
1875 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
1876 verbose(env
, "mark_reg_known_zero(regs, %u)\n", regno
);
1877 /* Something bad happened, let's kill all regs */
1878 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
1879 __mark_reg_not_init(env
, regs
+ regno
);
1882 __mark_reg_known_zero(regs
+ regno
);
1885 static void __mark_dynptr_reg(struct bpf_reg_state
*reg
, enum bpf_dynptr_type type
,
1886 bool first_slot
, int dynptr_id
)
1888 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1889 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1890 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1892 __mark_reg_known_zero(reg
);
1893 reg
->type
= CONST_PTR_TO_DYNPTR
;
1894 /* Give each dynptr a unique id to uniquely associate slices to it. */
1895 reg
->id
= dynptr_id
;
1896 reg
->dynptr
.type
= type
;
1897 reg
->dynptr
.first_slot
= first_slot
;
1900 static void mark_ptr_not_null_reg(struct bpf_reg_state
*reg
)
1902 if (base_type(reg
->type
) == PTR_TO_MAP_VALUE
) {
1903 const struct bpf_map
*map
= reg
->map_ptr
;
1905 if (map
->inner_map_meta
) {
1906 reg
->type
= CONST_PTR_TO_MAP
;
1907 reg
->map_ptr
= map
->inner_map_meta
;
1908 /* transfer reg's id which is unique for every map_lookup_elem
1909 * as UID of the inner map.
1911 if (btf_record_has_field(map
->inner_map_meta
->record
, BPF_TIMER
))
1912 reg
->map_uid
= reg
->id
;
1913 if (btf_record_has_field(map
->inner_map_meta
->record
, BPF_WORKQUEUE
))
1914 reg
->map_uid
= reg
->id
;
1915 } else if (map
->map_type
== BPF_MAP_TYPE_XSKMAP
) {
1916 reg
->type
= PTR_TO_XDP_SOCK
;
1917 } else if (map
->map_type
== BPF_MAP_TYPE_SOCKMAP
||
1918 map
->map_type
== BPF_MAP_TYPE_SOCKHASH
) {
1919 reg
->type
= PTR_TO_SOCKET
;
1921 reg
->type
= PTR_TO_MAP_VALUE
;
1926 reg
->type
&= ~PTR_MAYBE_NULL
;
1929 static void mark_reg_graph_node(struct bpf_reg_state
*regs
, u32 regno
,
1930 struct btf_field_graph_root
*ds_head
)
1932 __mark_reg_known_zero(®s
[regno
]);
1933 regs
[regno
].type
= PTR_TO_BTF_ID
| MEM_ALLOC
;
1934 regs
[regno
].btf
= ds_head
->btf
;
1935 regs
[regno
].btf_id
= ds_head
->value_btf_id
;
1936 regs
[regno
].off
= ds_head
->node_offset
;
1939 static bool reg_is_pkt_pointer(const struct bpf_reg_state
*reg
)
1941 return type_is_pkt_pointer(reg
->type
);
1944 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state
*reg
)
1946 return reg_is_pkt_pointer(reg
) ||
1947 reg
->type
== PTR_TO_PACKET_END
;
1950 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state
*reg
)
1952 return base_type(reg
->type
) == PTR_TO_MEM
&&
1953 (reg
->type
& DYNPTR_TYPE_SKB
|| reg
->type
& DYNPTR_TYPE_XDP
);
1956 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1957 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state
*reg
,
1958 enum bpf_reg_type which
)
1960 /* The register can already have a range from prior markings.
1961 * This is fine as long as it hasn't been advanced from its
1964 return reg
->type
== which
&&
1967 tnum_equals_const(reg
->var_off
, 0);
1970 /* Reset the min/max bounds of a register */
1971 static void __mark_reg_unbounded(struct bpf_reg_state
*reg
)
1973 reg
->smin_value
= S64_MIN
;
1974 reg
->smax_value
= S64_MAX
;
1975 reg
->umin_value
= 0;
1976 reg
->umax_value
= U64_MAX
;
1978 reg
->s32_min_value
= S32_MIN
;
1979 reg
->s32_max_value
= S32_MAX
;
1980 reg
->u32_min_value
= 0;
1981 reg
->u32_max_value
= U32_MAX
;
1984 static void __mark_reg64_unbounded(struct bpf_reg_state
*reg
)
1986 reg
->smin_value
= S64_MIN
;
1987 reg
->smax_value
= S64_MAX
;
1988 reg
->umin_value
= 0;
1989 reg
->umax_value
= U64_MAX
;
1992 static void __mark_reg32_unbounded(struct bpf_reg_state
*reg
)
1994 reg
->s32_min_value
= S32_MIN
;
1995 reg
->s32_max_value
= S32_MAX
;
1996 reg
->u32_min_value
= 0;
1997 reg
->u32_max_value
= U32_MAX
;
2000 static void __update_reg32_bounds(struct bpf_reg_state
*reg
)
2002 struct tnum var32_off
= tnum_subreg(reg
->var_off
);
2004 /* min signed is max(sign bit) | min(other bits) */
2005 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
,
2006 var32_off
.value
| (var32_off
.mask
& S32_MIN
));
2007 /* max signed is min(sign bit) | max(other bits) */
2008 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
,
2009 var32_off
.value
| (var32_off
.mask
& S32_MAX
));
2010 reg
->u32_min_value
= max_t(u32
, reg
->u32_min_value
, (u32
)var32_off
.value
);
2011 reg
->u32_max_value
= min(reg
->u32_max_value
,
2012 (u32
)(var32_off
.value
| var32_off
.mask
));
2015 static void __update_reg64_bounds(struct bpf_reg_state
*reg
)
2017 /* min signed is max(sign bit) | min(other bits) */
2018 reg
->smin_value
= max_t(s64
, reg
->smin_value
,
2019 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MIN
));
2020 /* max signed is min(sign bit) | max(other bits) */
2021 reg
->smax_value
= min_t(s64
, reg
->smax_value
,
2022 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MAX
));
2023 reg
->umin_value
= max(reg
->umin_value
, reg
->var_off
.value
);
2024 reg
->umax_value
= min(reg
->umax_value
,
2025 reg
->var_off
.value
| reg
->var_off
.mask
);
2028 static void __update_reg_bounds(struct bpf_reg_state
*reg
)
2030 __update_reg32_bounds(reg
);
2031 __update_reg64_bounds(reg
);
2034 /* Uses signed min/max values to inform unsigned, and vice-versa */
2035 static void __reg32_deduce_bounds(struct bpf_reg_state
*reg
)
2037 /* If upper 32 bits of u64/s64 range don't change, we can use lower 32
2038 * bits to improve our u32/s32 boundaries.
2040 * E.g., the case where we have upper 32 bits as zero ([10, 20] in
2041 * u64) is pretty trivial, it's obvious that in u32 we'll also have
2042 * [10, 20] range. But this property holds for any 64-bit range as
2043 * long as upper 32 bits in that entire range of values stay the same.
2045 * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311]
2046 * in decimal) has the same upper 32 bits throughout all the values in
2047 * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15])
2050 * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32,
2051 * following the rules outlined below about u64/s64 correspondence
2052 * (which equally applies to u32 vs s32 correspondence). In general it
2053 * depends on actual hexadecimal values of 32-bit range. They can form
2054 * only valid u32, or only valid s32 ranges in some cases.
2056 * So we use all these insights to derive bounds for subregisters here.
2058 if ((reg
->umin_value
>> 32) == (reg
->umax_value
>> 32)) {
2059 /* u64 to u32 casting preserves validity of low 32 bits as
2060 * a range, if upper 32 bits are the same
2062 reg
->u32_min_value
= max_t(u32
, reg
->u32_min_value
, (u32
)reg
->umin_value
);
2063 reg
->u32_max_value
= min_t(u32
, reg
->u32_max_value
, (u32
)reg
->umax_value
);
2065 if ((s32
)reg
->umin_value
<= (s32
)reg
->umax_value
) {
2066 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, (s32
)reg
->umin_value
);
2067 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, (s32
)reg
->umax_value
);
2070 if ((reg
->smin_value
>> 32) == (reg
->smax_value
>> 32)) {
2071 /* low 32 bits should form a proper u32 range */
2072 if ((u32
)reg
->smin_value
<= (u32
)reg
->smax_value
) {
2073 reg
->u32_min_value
= max_t(u32
, reg
->u32_min_value
, (u32
)reg
->smin_value
);
2074 reg
->u32_max_value
= min_t(u32
, reg
->u32_max_value
, (u32
)reg
->smax_value
);
2076 /* low 32 bits should form a proper s32 range */
2077 if ((s32
)reg
->smin_value
<= (s32
)reg
->smax_value
) {
2078 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, (s32
)reg
->smin_value
);
2079 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, (s32
)reg
->smax_value
);
2082 /* Special case where upper bits form a small sequence of two
2083 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
2084 * 0x00000000 is also valid), while lower bits form a proper s32 range
2085 * going from negative numbers to positive numbers. E.g., let's say we
2086 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]).
2087 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff,
2088 * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits,
2089 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]).
2090 * Note that it doesn't have to be 0xffffffff going to 0x00000000 in
2091 * upper 32 bits. As a random example, s64 range
2092 * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range
2093 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister.
2095 if ((u32
)(reg
->umin_value
>> 32) + 1 == (u32
)(reg
->umax_value
>> 32) &&
2096 (s32
)reg
->umin_value
< 0 && (s32
)reg
->umax_value
>= 0) {
2097 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, (s32
)reg
->umin_value
);
2098 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, (s32
)reg
->umax_value
);
2100 if ((u32
)(reg
->smin_value
>> 32) + 1 == (u32
)(reg
->smax_value
>> 32) &&
2101 (s32
)reg
->smin_value
< 0 && (s32
)reg
->smax_value
>= 0) {
2102 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, (s32
)reg
->smin_value
);
2103 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, (s32
)reg
->smax_value
);
2105 /* if u32 range forms a valid s32 range (due to matching sign bit),
2106 * try to learn from that
2108 if ((s32
)reg
->u32_min_value
<= (s32
)reg
->u32_max_value
) {
2109 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, reg
->u32_min_value
);
2110 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, reg
->u32_max_value
);
2112 /* If we cannot cross the sign boundary, then signed and unsigned bounds
2113 * are the same, so combine. This works even in the negative case, e.g.
2114 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2116 if ((u32
)reg
->s32_min_value
<= (u32
)reg
->s32_max_value
) {
2117 reg
->u32_min_value
= max_t(u32
, reg
->s32_min_value
, reg
->u32_min_value
);
2118 reg
->u32_max_value
= min_t(u32
, reg
->s32_max_value
, reg
->u32_max_value
);
2122 static void __reg64_deduce_bounds(struct bpf_reg_state
*reg
)
2124 /* If u64 range forms a valid s64 range (due to matching sign bit),
2125 * try to learn from that. Let's do a bit of ASCII art to see when
2126 * this is happening. Let's take u64 range first:
2128 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
2129 * |-------------------------------|--------------------------------|
2131 * Valid u64 range is formed when umin and umax are anywhere in the
2132 * range [0, U64_MAX], and umin <= umax. u64 case is simple and
2133 * straightforward. Let's see how s64 range maps onto the same range
2134 * of values, annotated below the line for comparison:
2136 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
2137 * |-------------------------------|--------------------------------|
2138 * 0 S64_MAX S64_MIN -1
2140 * So s64 values basically start in the middle and they are logically
2141 * contiguous to the right of it, wrapping around from -1 to 0, and
2142 * then finishing as S64_MAX (0x7fffffffffffffff) right before
2143 * S64_MIN. We can try drawing the continuity of u64 vs s64 values
2144 * more visually as mapped to sign-agnostic range of hex values.
2147 * _______________________________________________________________
2149 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
2150 * |-------------------------------|--------------------------------|
2151 * 0 S64_MAX S64_MIN -1
2153 * >------------------------------ ------------------------------->
2154 * s64 continues... s64 end s64 start s64 "midpoint"
2156 * What this means is that, in general, we can't always derive
2157 * something new about u64 from any random s64 range, and vice versa.
2159 * But we can do that in two particular cases. One is when entire
2160 * u64/s64 range is *entirely* contained within left half of the above
2161 * diagram or when it is *entirely* contained in the right half. I.e.:
2163 * |-------------------------------|--------------------------------|
2167 * [A, B] and [C, D] are contained entirely in their respective halves
2168 * and form valid contiguous ranges as both u64 and s64 values. [A, B]
2169 * will be non-negative both as u64 and s64 (and in fact it will be
2170 * identical ranges no matter the signedness). [C, D] treated as s64
2171 * will be a range of negative values, while in u64 it will be
2172 * non-negative range of values larger than 0x8000000000000000.
2174 * Now, any other range here can't be represented in both u64 and s64
2175 * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid
2176 * contiguous u64 ranges, but they are discontinuous in s64. [B, C]
2177 * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX],
2178 * for example. Similarly, valid s64 range [D, A] (going from negative
2179 * to positive values), would be two separate [D, U64_MAX] and [0, A]
2180 * ranges as u64. Currently reg_state can't represent two segments per
2181 * numeric domain, so in such situations we can only derive maximal
2182 * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64).
2184 * So we use these facts to derive umin/umax from smin/smax and vice
2185 * versa only if they stay within the same "half". This is equivalent
2186 * to checking sign bit: lower half will have sign bit as zero, upper
2187 * half have sign bit 1. Below in code we simplify this by just
2188 * casting umin/umax as smin/smax and checking if they form valid
2189 * range, and vice versa. Those are equivalent checks.
2191 if ((s64
)reg
->umin_value
<= (s64
)reg
->umax_value
) {
2192 reg
->smin_value
= max_t(s64
, reg
->smin_value
, reg
->umin_value
);
2193 reg
->smax_value
= min_t(s64
, reg
->smax_value
, reg
->umax_value
);
2195 /* If we cannot cross the sign boundary, then signed and unsigned bounds
2196 * are the same, so combine. This works even in the negative case, e.g.
2197 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2199 if ((u64
)reg
->smin_value
<= (u64
)reg
->smax_value
) {
2200 reg
->umin_value
= max_t(u64
, reg
->smin_value
, reg
->umin_value
);
2201 reg
->umax_value
= min_t(u64
, reg
->smax_value
, reg
->umax_value
);
2205 static void __reg_deduce_mixed_bounds(struct bpf_reg_state
*reg
)
2207 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit
2208 * values on both sides of 64-bit range in hope to have tighter range.
2209 * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from
2210 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff].
2211 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound
2212 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of
2213 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a
2214 * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff].
2215 * We just need to make sure that derived bounds we are intersecting
2216 * with are well-formed ranges in respective s64 or u64 domain, just
2217 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments.
2219 __u64 new_umin
, new_umax
;
2220 __s64 new_smin
, new_smax
;
2222 /* u32 -> u64 tightening, it's always well-formed */
2223 new_umin
= (reg
->umin_value
& ~0xffffffffULL
) | reg
->u32_min_value
;
2224 new_umax
= (reg
->umax_value
& ~0xffffffffULL
) | reg
->u32_max_value
;
2225 reg
->umin_value
= max_t(u64
, reg
->umin_value
, new_umin
);
2226 reg
->umax_value
= min_t(u64
, reg
->umax_value
, new_umax
);
2227 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */
2228 new_smin
= (reg
->smin_value
& ~0xffffffffULL
) | reg
->u32_min_value
;
2229 new_smax
= (reg
->smax_value
& ~0xffffffffULL
) | reg
->u32_max_value
;
2230 reg
->smin_value
= max_t(s64
, reg
->smin_value
, new_smin
);
2231 reg
->smax_value
= min_t(s64
, reg
->smax_value
, new_smax
);
2233 /* if s32 can be treated as valid u32 range, we can use it as well */
2234 if ((u32
)reg
->s32_min_value
<= (u32
)reg
->s32_max_value
) {
2235 /* s32 -> u64 tightening */
2236 new_umin
= (reg
->umin_value
& ~0xffffffffULL
) | (u32
)reg
->s32_min_value
;
2237 new_umax
= (reg
->umax_value
& ~0xffffffffULL
) | (u32
)reg
->s32_max_value
;
2238 reg
->umin_value
= max_t(u64
, reg
->umin_value
, new_umin
);
2239 reg
->umax_value
= min_t(u64
, reg
->umax_value
, new_umax
);
2240 /* s32 -> s64 tightening */
2241 new_smin
= (reg
->smin_value
& ~0xffffffffULL
) | (u32
)reg
->s32_min_value
;
2242 new_smax
= (reg
->smax_value
& ~0xffffffffULL
) | (u32
)reg
->s32_max_value
;
2243 reg
->smin_value
= max_t(s64
, reg
->smin_value
, new_smin
);
2244 reg
->smax_value
= min_t(s64
, reg
->smax_value
, new_smax
);
2247 /* Here we would like to handle a special case after sign extending load,
2248 * when upper bits for a 64-bit range are all 1s or all 0s.
2250 * Upper bits are all 1s when register is in a range:
2251 * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff]
2252 * Upper bits are all 0s when register is in a range:
2253 * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff]
2254 * Together this forms are continuous range:
2255 * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff]
2257 * Now, suppose that register range is in fact tighter:
2258 * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R)
2259 * Also suppose that it's 32-bit range is positive,
2260 * meaning that lower 32-bits of the full 64-bit register
2262 * [0x0000_0000, 0x7fff_ffff] (W)
2264 * If this happens, then any value in a range:
2265 * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff]
2266 * is smaller than a lowest bound of the range (R):
2267 * 0xffff_ffff_8000_0000
2268 * which means that upper bits of the full 64-bit register
2269 * can't be all 1s, when lower bits are in range (W).
2272 * - 0xffff_ffff_8000_0000 == (s64)S32_MIN
2273 * - 0x0000_0000_7fff_ffff == (s64)S32_MAX
2274 * These relations are used in the conditions below.
2276 if (reg
->s32_min_value
>= 0 && reg
->smin_value
>= S32_MIN
&& reg
->smax_value
<= S32_MAX
) {
2277 reg
->smin_value
= reg
->s32_min_value
;
2278 reg
->smax_value
= reg
->s32_max_value
;
2279 reg
->umin_value
= reg
->s32_min_value
;
2280 reg
->umax_value
= reg
->s32_max_value
;
2281 reg
->var_off
= tnum_intersect(reg
->var_off
,
2282 tnum_range(reg
->smin_value
, reg
->smax_value
));
2286 static void __reg_deduce_bounds(struct bpf_reg_state
*reg
)
2288 __reg32_deduce_bounds(reg
);
2289 __reg64_deduce_bounds(reg
);
2290 __reg_deduce_mixed_bounds(reg
);
2293 /* Attempts to improve var_off based on unsigned min/max information */
2294 static void __reg_bound_offset(struct bpf_reg_state
*reg
)
2296 struct tnum var64_off
= tnum_intersect(reg
->var_off
,
2297 tnum_range(reg
->umin_value
,
2299 struct tnum var32_off
= tnum_intersect(tnum_subreg(var64_off
),
2300 tnum_range(reg
->u32_min_value
,
2301 reg
->u32_max_value
));
2303 reg
->var_off
= tnum_or(tnum_clear_subreg(var64_off
), var32_off
);
2306 static void reg_bounds_sync(struct bpf_reg_state
*reg
)
2308 /* We might have learned new bounds from the var_off. */
2309 __update_reg_bounds(reg
);
2310 /* We might have learned something about the sign bit. */
2311 __reg_deduce_bounds(reg
);
2312 __reg_deduce_bounds(reg
);
2313 /* We might have learned some bits from the bounds. */
2314 __reg_bound_offset(reg
);
2315 /* Intersecting with the old var_off might have improved our bounds
2316 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2317 * then new var_off is (0; 0x7f...fc) which improves our umax.
2319 __update_reg_bounds(reg
);
2322 static int reg_bounds_sanity_check(struct bpf_verifier_env
*env
,
2323 struct bpf_reg_state
*reg
, const char *ctx
)
2327 if (reg
->umin_value
> reg
->umax_value
||
2328 reg
->smin_value
> reg
->smax_value
||
2329 reg
->u32_min_value
> reg
->u32_max_value
||
2330 reg
->s32_min_value
> reg
->s32_max_value
) {
2331 msg
= "range bounds violation";
2335 if (tnum_is_const(reg
->var_off
)) {
2336 u64 uval
= reg
->var_off
.value
;
2337 s64 sval
= (s64
)uval
;
2339 if (reg
->umin_value
!= uval
|| reg
->umax_value
!= uval
||
2340 reg
->smin_value
!= sval
|| reg
->smax_value
!= sval
) {
2341 msg
= "const tnum out of sync with range bounds";
2346 if (tnum_subreg_is_const(reg
->var_off
)) {
2347 u32 uval32
= tnum_subreg(reg
->var_off
).value
;
2348 s32 sval32
= (s32
)uval32
;
2350 if (reg
->u32_min_value
!= uval32
|| reg
->u32_max_value
!= uval32
||
2351 reg
->s32_min_value
!= sval32
|| reg
->s32_max_value
!= sval32
) {
2352 msg
= "const subreg tnum out of sync with range bounds";
2359 verbose(env
, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] "
2360 "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n",
2361 ctx
, msg
, reg
->umin_value
, reg
->umax_value
,
2362 reg
->smin_value
, reg
->smax_value
,
2363 reg
->u32_min_value
, reg
->u32_max_value
,
2364 reg
->s32_min_value
, reg
->s32_max_value
,
2365 reg
->var_off
.value
, reg
->var_off
.mask
);
2366 if (env
->test_reg_invariants
)
2368 __mark_reg_unbounded(reg
);
2372 static bool __reg32_bound_s64(s32 a
)
2374 return a
>= 0 && a
<= S32_MAX
;
2377 static void __reg_assign_32_into_64(struct bpf_reg_state
*reg
)
2379 reg
->umin_value
= reg
->u32_min_value
;
2380 reg
->umax_value
= reg
->u32_max_value
;
2382 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
2383 * be positive otherwise set to worse case bounds and refine later
2386 if (__reg32_bound_s64(reg
->s32_min_value
) &&
2387 __reg32_bound_s64(reg
->s32_max_value
)) {
2388 reg
->smin_value
= reg
->s32_min_value
;
2389 reg
->smax_value
= reg
->s32_max_value
;
2391 reg
->smin_value
= 0;
2392 reg
->smax_value
= U32_MAX
;
2396 /* Mark a register as having a completely unknown (scalar) value. */
2397 static void __mark_reg_unknown_imprecise(struct bpf_reg_state
*reg
)
2400 * Clear type, off, and union(map_ptr, range) and
2401 * padding between 'type' and union
2403 memset(reg
, 0, offsetof(struct bpf_reg_state
, var_off
));
2404 reg
->type
= SCALAR_VALUE
;
2406 reg
->ref_obj_id
= 0;
2407 reg
->var_off
= tnum_unknown
;
2409 reg
->precise
= false;
2410 __mark_reg_unbounded(reg
);
2413 /* Mark a register as having a completely unknown (scalar) value,
2414 * initialize .precise as true when not bpf capable.
2416 static void __mark_reg_unknown(const struct bpf_verifier_env
*env
,
2417 struct bpf_reg_state
*reg
)
2419 __mark_reg_unknown_imprecise(reg
);
2420 reg
->precise
= !env
->bpf_capable
;
2423 static void mark_reg_unknown(struct bpf_verifier_env
*env
,
2424 struct bpf_reg_state
*regs
, u32 regno
)
2426 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
2427 verbose(env
, "mark_reg_unknown(regs, %u)\n", regno
);
2428 /* Something bad happened, let's kill all regs except FP */
2429 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
2430 __mark_reg_not_init(env
, regs
+ regno
);
2433 __mark_reg_unknown(env
, regs
+ regno
);
2436 static int __mark_reg_s32_range(struct bpf_verifier_env
*env
,
2437 struct bpf_reg_state
*regs
,
2442 struct bpf_reg_state
*reg
= regs
+ regno
;
2444 reg
->s32_min_value
= max_t(s32
, reg
->s32_min_value
, s32_min
);
2445 reg
->s32_max_value
= min_t(s32
, reg
->s32_max_value
, s32_max
);
2447 reg
->smin_value
= max_t(s64
, reg
->smin_value
, s32_min
);
2448 reg
->smax_value
= min_t(s64
, reg
->smax_value
, s32_max
);
2450 reg_bounds_sync(reg
);
2452 return reg_bounds_sanity_check(env
, reg
, "s32_range");
2455 static void __mark_reg_not_init(const struct bpf_verifier_env
*env
,
2456 struct bpf_reg_state
*reg
)
2458 __mark_reg_unknown(env
, reg
);
2459 reg
->type
= NOT_INIT
;
2462 static void mark_reg_not_init(struct bpf_verifier_env
*env
,
2463 struct bpf_reg_state
*regs
, u32 regno
)
2465 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
2466 verbose(env
, "mark_reg_not_init(regs, %u)\n", regno
);
2467 /* Something bad happened, let's kill all regs except FP */
2468 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
2469 __mark_reg_not_init(env
, regs
+ regno
);
2472 __mark_reg_not_init(env
, regs
+ regno
);
2475 static void mark_btf_ld_reg(struct bpf_verifier_env
*env
,
2476 struct bpf_reg_state
*regs
, u32 regno
,
2477 enum bpf_reg_type reg_type
,
2478 struct btf
*btf
, u32 btf_id
,
2479 enum bpf_type_flag flag
)
2481 if (reg_type
== SCALAR_VALUE
) {
2482 mark_reg_unknown(env
, regs
, regno
);
2485 mark_reg_known_zero(env
, regs
, regno
);
2486 regs
[regno
].type
= PTR_TO_BTF_ID
| flag
;
2487 regs
[regno
].btf
= btf
;
2488 regs
[regno
].btf_id
= btf_id
;
2489 if (type_may_be_null(flag
))
2490 regs
[regno
].id
= ++env
->id_gen
;
2493 #define DEF_NOT_SUBREG (0)
2494 static void init_reg_state(struct bpf_verifier_env
*env
,
2495 struct bpf_func_state
*state
)
2497 struct bpf_reg_state
*regs
= state
->regs
;
2500 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
2501 mark_reg_not_init(env
, regs
, i
);
2502 regs
[i
].live
= REG_LIVE_NONE
;
2503 regs
[i
].parent
= NULL
;
2504 regs
[i
].subreg_def
= DEF_NOT_SUBREG
;
2508 regs
[BPF_REG_FP
].type
= PTR_TO_STACK
;
2509 mark_reg_known_zero(env
, regs
, BPF_REG_FP
);
2510 regs
[BPF_REG_FP
].frameno
= state
->frameno
;
2513 static struct bpf_retval_range
retval_range(s32 minval
, s32 maxval
)
2515 return (struct bpf_retval_range
){ minval
, maxval
};
2518 #define BPF_MAIN_FUNC (-1)
2519 static void init_func_state(struct bpf_verifier_env
*env
,
2520 struct bpf_func_state
*state
,
2521 int callsite
, int frameno
, int subprogno
)
2523 state
->callsite
= callsite
;
2524 state
->frameno
= frameno
;
2525 state
->subprogno
= subprogno
;
2526 state
->callback_ret_range
= retval_range(0, 0);
2527 init_reg_state(env
, state
);
2528 mark_verifier_state_scratched(env
);
2531 /* Similar to push_stack(), but for async callbacks */
2532 static struct bpf_verifier_state
*push_async_cb(struct bpf_verifier_env
*env
,
2533 int insn_idx
, int prev_insn_idx
,
2534 int subprog
, bool is_sleepable
)
2536 struct bpf_verifier_stack_elem
*elem
;
2537 struct bpf_func_state
*frame
;
2539 elem
= kzalloc(sizeof(struct bpf_verifier_stack_elem
), GFP_KERNEL
);
2543 elem
->insn_idx
= insn_idx
;
2544 elem
->prev_insn_idx
= prev_insn_idx
;
2545 elem
->next
= env
->head
;
2546 elem
->log_pos
= env
->log
.end_pos
;
2549 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_JMP_SEQ
) {
2551 "The sequence of %d jumps is too complex for async cb.\n",
2555 /* Unlike push_stack() do not copy_verifier_state().
2556 * The caller state doesn't matter.
2557 * This is async callback. It starts in a fresh stack.
2558 * Initialize it similar to do_check_common().
2559 * But we do need to make sure to not clobber insn_hist, so we keep
2560 * chaining insn_hist_start/insn_hist_end indices as for a normal
2563 elem
->st
.branches
= 1;
2564 elem
->st
.in_sleepable
= is_sleepable
;
2565 elem
->st
.insn_hist_start
= env
->cur_state
->insn_hist_end
;
2566 elem
->st
.insn_hist_end
= elem
->st
.insn_hist_start
;
2567 frame
= kzalloc(sizeof(*frame
), GFP_KERNEL
);
2570 init_func_state(env
, frame
,
2571 BPF_MAIN_FUNC
/* callsite */,
2572 0 /* frameno within this callchain */,
2573 subprog
/* subprog number within this prog */);
2574 elem
->st
.frame
[0] = frame
;
2577 free_verifier_state(env
->cur_state
, true);
2578 env
->cur_state
= NULL
;
2579 /* pop all elements and return */
2580 while (!pop_stack(env
, NULL
, NULL
, false));
2586 SRC_OP
, /* register is used as source operand */
2587 DST_OP
, /* register is used as destination operand */
2588 DST_OP_NO_MARK
/* same as above, check only, don't mark */
2591 static int cmp_subprogs(const void *a
, const void *b
)
2593 return ((struct bpf_subprog_info
*)a
)->start
-
2594 ((struct bpf_subprog_info
*)b
)->start
;
2597 static int find_subprog(struct bpf_verifier_env
*env
, int off
)
2599 struct bpf_subprog_info
*p
;
2601 p
= bsearch(&off
, env
->subprog_info
, env
->subprog_cnt
,
2602 sizeof(env
->subprog_info
[0]), cmp_subprogs
);
2605 return p
- env
->subprog_info
;
2609 static int add_subprog(struct bpf_verifier_env
*env
, int off
)
2611 int insn_cnt
= env
->prog
->len
;
2614 if (off
>= insn_cnt
|| off
< 0) {
2615 verbose(env
, "call to invalid destination\n");
2618 ret
= find_subprog(env
, off
);
2621 if (env
->subprog_cnt
>= BPF_MAX_SUBPROGS
) {
2622 verbose(env
, "too many subprograms\n");
2625 /* determine subprog starts. The end is one before the next starts */
2626 env
->subprog_info
[env
->subprog_cnt
++].start
= off
;
2627 sort(env
->subprog_info
, env
->subprog_cnt
,
2628 sizeof(env
->subprog_info
[0]), cmp_subprogs
, NULL
);
2629 return env
->subprog_cnt
- 1;
2632 static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env
*env
)
2634 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
2635 struct btf
*btf
= aux
->btf
;
2636 const struct btf_type
*t
;
2637 u32 main_btf_id
, id
;
2641 /* Non-zero func_info_cnt implies valid btf */
2642 if (!aux
->func_info_cnt
)
2644 main_btf_id
= aux
->func_info
[0].type_id
;
2646 t
= btf_type_by_id(btf
, main_btf_id
);
2648 verbose(env
, "invalid btf id for main subprog in func_info\n");
2652 name
= btf_find_decl_tag_value(btf
, t
, -1, "exception_callback:");
2654 ret
= PTR_ERR(name
);
2655 /* If there is no tag present, there is no exception callback */
2658 else if (ret
== -EEXIST
)
2659 verbose(env
, "multiple exception callback tags for main subprog\n");
2663 ret
= btf_find_by_name_kind(btf
, name
, BTF_KIND_FUNC
);
2665 verbose(env
, "exception callback '%s' could not be found in BTF\n", name
);
2669 t
= btf_type_by_id(btf
, id
);
2670 if (btf_func_linkage(t
) != BTF_FUNC_GLOBAL
) {
2671 verbose(env
, "exception callback '%s' must have global linkage\n", name
);
2675 for (i
= 0; i
< aux
->func_info_cnt
; i
++) {
2676 if (aux
->func_info
[i
].type_id
!= id
)
2678 ret
= aux
->func_info
[i
].insn_off
;
2679 /* Further func_info and subprog checks will also happen
2680 * later, so assume this is the right insn_off for now.
2683 verbose(env
, "invalid exception callback insn_off in func_info: 0\n");
2688 verbose(env
, "exception callback type id not found in func_info\n");
2694 #define MAX_KFUNC_DESCS 256
2695 #define MAX_KFUNC_BTFS 256
2697 struct bpf_kfunc_desc
{
2698 struct btf_func_model func_model
;
2705 struct bpf_kfunc_btf
{
2707 struct module
*module
;
2711 struct bpf_kfunc_desc_tab
{
2712 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
2713 * verification. JITs do lookups by bpf_insn, where func_id may not be
2714 * available, therefore at the end of verification do_misc_fixups()
2715 * sorts this by imm and offset.
2717 struct bpf_kfunc_desc descs
[MAX_KFUNC_DESCS
];
2721 struct bpf_kfunc_btf_tab
{
2722 struct bpf_kfunc_btf descs
[MAX_KFUNC_BTFS
];
2726 static int kfunc_desc_cmp_by_id_off(const void *a
, const void *b
)
2728 const struct bpf_kfunc_desc
*d0
= a
;
2729 const struct bpf_kfunc_desc
*d1
= b
;
2731 /* func_id is not greater than BTF_MAX_TYPE */
2732 return d0
->func_id
- d1
->func_id
?: d0
->offset
- d1
->offset
;
2735 static int kfunc_btf_cmp_by_off(const void *a
, const void *b
)
2737 const struct bpf_kfunc_btf
*d0
= a
;
2738 const struct bpf_kfunc_btf
*d1
= b
;
2740 return d0
->offset
- d1
->offset
;
2743 static const struct bpf_kfunc_desc
*
2744 find_kfunc_desc(const struct bpf_prog
*prog
, u32 func_id
, u16 offset
)
2746 struct bpf_kfunc_desc desc
= {
2750 struct bpf_kfunc_desc_tab
*tab
;
2752 tab
= prog
->aux
->kfunc_tab
;
2753 return bsearch(&desc
, tab
->descs
, tab
->nr_descs
,
2754 sizeof(tab
->descs
[0]), kfunc_desc_cmp_by_id_off
);
2757 int bpf_get_kfunc_addr(const struct bpf_prog
*prog
, u32 func_id
,
2758 u16 btf_fd_idx
, u8
**func_addr
)
2760 const struct bpf_kfunc_desc
*desc
;
2762 desc
= find_kfunc_desc(prog
, func_id
, btf_fd_idx
);
2766 *func_addr
= (u8
*)desc
->addr
;
2770 static struct btf
*__find_kfunc_desc_btf(struct bpf_verifier_env
*env
,
2773 struct bpf_kfunc_btf kf_btf
= { .offset
= offset
};
2774 struct bpf_kfunc_btf_tab
*tab
;
2775 struct bpf_kfunc_btf
*b
;
2780 tab
= env
->prog
->aux
->kfunc_btf_tab
;
2781 b
= bsearch(&kf_btf
, tab
->descs
, tab
->nr_descs
,
2782 sizeof(tab
->descs
[0]), kfunc_btf_cmp_by_off
);
2784 if (tab
->nr_descs
== MAX_KFUNC_BTFS
) {
2785 verbose(env
, "too many different module BTFs\n");
2786 return ERR_PTR(-E2BIG
);
2789 if (bpfptr_is_null(env
->fd_array
)) {
2790 verbose(env
, "kfunc offset > 0 without fd_array is invalid\n");
2791 return ERR_PTR(-EPROTO
);
2794 if (copy_from_bpfptr_offset(&btf_fd
, env
->fd_array
,
2795 offset
* sizeof(btf_fd
),
2797 return ERR_PTR(-EFAULT
);
2799 btf
= btf_get_by_fd(btf_fd
);
2801 verbose(env
, "invalid module BTF fd specified\n");
2805 if (!btf_is_module(btf
)) {
2806 verbose(env
, "BTF fd for kfunc is not a module BTF\n");
2808 return ERR_PTR(-EINVAL
);
2811 mod
= btf_try_get_module(btf
);
2814 return ERR_PTR(-ENXIO
);
2817 b
= &tab
->descs
[tab
->nr_descs
++];
2822 /* sort() reorders entries by value, so b may no longer point
2823 * to the right entry after this
2825 sort(tab
->descs
, tab
->nr_descs
, sizeof(tab
->descs
[0]),
2826 kfunc_btf_cmp_by_off
, NULL
);
2834 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab
*tab
)
2839 while (tab
->nr_descs
--) {
2840 module_put(tab
->descs
[tab
->nr_descs
].module
);
2841 btf_put(tab
->descs
[tab
->nr_descs
].btf
);
2846 static struct btf
*find_kfunc_desc_btf(struct bpf_verifier_env
*env
, s16 offset
)
2850 /* In the future, this can be allowed to increase limit
2851 * of fd index into fd_array, interpreted as u16.
2853 verbose(env
, "negative offset disallowed for kernel module function call\n");
2854 return ERR_PTR(-EINVAL
);
2857 return __find_kfunc_desc_btf(env
, offset
);
2859 return btf_vmlinux
?: ERR_PTR(-ENOENT
);
2862 static int add_kfunc_call(struct bpf_verifier_env
*env
, u32 func_id
, s16 offset
)
2864 const struct btf_type
*func
, *func_proto
;
2865 struct bpf_kfunc_btf_tab
*btf_tab
;
2866 struct bpf_kfunc_desc_tab
*tab
;
2867 struct bpf_prog_aux
*prog_aux
;
2868 struct bpf_kfunc_desc
*desc
;
2869 const char *func_name
;
2870 struct btf
*desc_btf
;
2871 unsigned long call_imm
;
2875 prog_aux
= env
->prog
->aux
;
2876 tab
= prog_aux
->kfunc_tab
;
2877 btf_tab
= prog_aux
->kfunc_btf_tab
;
2880 verbose(env
, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2884 if (!env
->prog
->jit_requested
) {
2885 verbose(env
, "JIT is required for calling kernel function\n");
2889 if (!bpf_jit_supports_kfunc_call()) {
2890 verbose(env
, "JIT does not support calling kernel function\n");
2894 if (!env
->prog
->gpl_compatible
) {
2895 verbose(env
, "cannot call kernel function from non-GPL compatible program\n");
2899 tab
= kzalloc(sizeof(*tab
), GFP_KERNEL
);
2902 prog_aux
->kfunc_tab
= tab
;
2905 /* func_id == 0 is always invalid, but instead of returning an error, be
2906 * conservative and wait until the code elimination pass before returning
2907 * error, so that invalid calls that get pruned out can be in BPF programs
2908 * loaded from userspace. It is also required that offset be untouched
2911 if (!func_id
&& !offset
)
2914 if (!btf_tab
&& offset
) {
2915 btf_tab
= kzalloc(sizeof(*btf_tab
), GFP_KERNEL
);
2918 prog_aux
->kfunc_btf_tab
= btf_tab
;
2921 desc_btf
= find_kfunc_desc_btf(env
, offset
);
2922 if (IS_ERR(desc_btf
)) {
2923 verbose(env
, "failed to find BTF for kernel function\n");
2924 return PTR_ERR(desc_btf
);
2927 if (find_kfunc_desc(env
->prog
, func_id
, offset
))
2930 if (tab
->nr_descs
== MAX_KFUNC_DESCS
) {
2931 verbose(env
, "too many different kernel function calls\n");
2935 func
= btf_type_by_id(desc_btf
, func_id
);
2936 if (!func
|| !btf_type_is_func(func
)) {
2937 verbose(env
, "kernel btf_id %u is not a function\n",
2941 func_proto
= btf_type_by_id(desc_btf
, func
->type
);
2942 if (!func_proto
|| !btf_type_is_func_proto(func_proto
)) {
2943 verbose(env
, "kernel function btf_id %u does not have a valid func_proto\n",
2948 func_name
= btf_name_by_offset(desc_btf
, func
->name_off
);
2949 addr
= kallsyms_lookup_name(func_name
);
2951 verbose(env
, "cannot find address for kernel function %s\n",
2955 specialize_kfunc(env
, func_id
, offset
, &addr
);
2957 if (bpf_jit_supports_far_kfunc_call()) {
2960 call_imm
= BPF_CALL_IMM(addr
);
2961 /* Check whether the relative offset overflows desc->imm */
2962 if ((unsigned long)(s32
)call_imm
!= call_imm
) {
2963 verbose(env
, "address of kernel function %s is out of range\n",
2969 if (bpf_dev_bound_kfunc_id(func_id
)) {
2970 err
= bpf_dev_bound_kfunc_check(&env
->log
, prog_aux
);
2975 desc
= &tab
->descs
[tab
->nr_descs
++];
2976 desc
->func_id
= func_id
;
2977 desc
->imm
= call_imm
;
2978 desc
->offset
= offset
;
2980 err
= btf_distill_func_proto(&env
->log
, desc_btf
,
2981 func_proto
, func_name
,
2984 sort(tab
->descs
, tab
->nr_descs
, sizeof(tab
->descs
[0]),
2985 kfunc_desc_cmp_by_id_off
, NULL
);
2989 static int kfunc_desc_cmp_by_imm_off(const void *a
, const void *b
)
2991 const struct bpf_kfunc_desc
*d0
= a
;
2992 const struct bpf_kfunc_desc
*d1
= b
;
2994 if (d0
->imm
!= d1
->imm
)
2995 return d0
->imm
< d1
->imm
? -1 : 1;
2996 if (d0
->offset
!= d1
->offset
)
2997 return d0
->offset
< d1
->offset
? -1 : 1;
3001 static void sort_kfunc_descs_by_imm_off(struct bpf_prog
*prog
)
3003 struct bpf_kfunc_desc_tab
*tab
;
3005 tab
= prog
->aux
->kfunc_tab
;
3009 sort(tab
->descs
, tab
->nr_descs
, sizeof(tab
->descs
[0]),
3010 kfunc_desc_cmp_by_imm_off
, NULL
);
3013 bool bpf_prog_has_kfunc_call(const struct bpf_prog
*prog
)
3015 return !!prog
->aux
->kfunc_tab
;
3018 const struct btf_func_model
*
3019 bpf_jit_find_kfunc_model(const struct bpf_prog
*prog
,
3020 const struct bpf_insn
*insn
)
3022 const struct bpf_kfunc_desc desc
= {
3024 .offset
= insn
->off
,
3026 const struct bpf_kfunc_desc
*res
;
3027 struct bpf_kfunc_desc_tab
*tab
;
3029 tab
= prog
->aux
->kfunc_tab
;
3030 res
= bsearch(&desc
, tab
->descs
, tab
->nr_descs
,
3031 sizeof(tab
->descs
[0]), kfunc_desc_cmp_by_imm_off
);
3033 return res
? &res
->func_model
: NULL
;
3036 static int add_subprog_and_kfunc(struct bpf_verifier_env
*env
)
3038 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
3039 int i
, ret
, insn_cnt
= env
->prog
->len
, ex_cb_insn
;
3040 struct bpf_insn
*insn
= env
->prog
->insnsi
;
3042 /* Add entry function. */
3043 ret
= add_subprog(env
, 0);
3047 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
3048 if (!bpf_pseudo_func(insn
) && !bpf_pseudo_call(insn
) &&
3049 !bpf_pseudo_kfunc_call(insn
))
3052 if (!env
->bpf_capable
) {
3053 verbose(env
, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
3057 if (bpf_pseudo_func(insn
) || bpf_pseudo_call(insn
))
3058 ret
= add_subprog(env
, i
+ insn
->imm
+ 1);
3060 ret
= add_kfunc_call(env
, insn
->imm
, insn
->off
);
3066 ret
= bpf_find_exception_callback_insn_off(env
);
3071 /* If ex_cb_insn > 0, this means that the main program has a subprog
3072 * marked using BTF decl tag to serve as the exception callback.
3075 ret
= add_subprog(env
, ex_cb_insn
);
3078 for (i
= 1; i
< env
->subprog_cnt
; i
++) {
3079 if (env
->subprog_info
[i
].start
!= ex_cb_insn
)
3081 env
->exception_callback_subprog
= i
;
3082 mark_subprog_exc_cb(env
, i
);
3087 /* Add a fake 'exit' subprog which could simplify subprog iteration
3088 * logic. 'subprog_cnt' should not be increased.
3090 subprog
[env
->subprog_cnt
].start
= insn_cnt
;
3092 if (env
->log
.level
& BPF_LOG_LEVEL2
)
3093 for (i
= 0; i
< env
->subprog_cnt
; i
++)
3094 verbose(env
, "func#%d @%d\n", i
, subprog
[i
].start
);
3099 static int check_subprogs(struct bpf_verifier_env
*env
)
3101 int i
, subprog_start
, subprog_end
, off
, cur_subprog
= 0;
3102 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
3103 struct bpf_insn
*insn
= env
->prog
->insnsi
;
3104 int insn_cnt
= env
->prog
->len
;
3106 /* now check that all jumps are within the same subprog */
3107 subprog_start
= subprog
[cur_subprog
].start
;
3108 subprog_end
= subprog
[cur_subprog
+ 1].start
;
3109 for (i
= 0; i
< insn_cnt
; i
++) {
3110 u8 code
= insn
[i
].code
;
3112 if (code
== (BPF_JMP
| BPF_CALL
) &&
3113 insn
[i
].src_reg
== 0 &&
3114 insn
[i
].imm
== BPF_FUNC_tail_call
) {
3115 subprog
[cur_subprog
].has_tail_call
= true;
3116 subprog
[cur_subprog
].tail_call_reachable
= true;
3118 if (BPF_CLASS(code
) == BPF_LD
&&
3119 (BPF_MODE(code
) == BPF_ABS
|| BPF_MODE(code
) == BPF_IND
))
3120 subprog
[cur_subprog
].has_ld_abs
= true;
3121 if (BPF_CLASS(code
) != BPF_JMP
&& BPF_CLASS(code
) != BPF_JMP32
)
3123 if (BPF_OP(code
) == BPF_EXIT
|| BPF_OP(code
) == BPF_CALL
)
3125 if (code
== (BPF_JMP32
| BPF_JA
))
3126 off
= i
+ insn
[i
].imm
+ 1;
3128 off
= i
+ insn
[i
].off
+ 1;
3129 if (off
< subprog_start
|| off
>= subprog_end
) {
3130 verbose(env
, "jump out of range from insn %d to %d\n", i
, off
);
3134 if (i
== subprog_end
- 1) {
3135 /* to avoid fall-through from one subprog into another
3136 * the last insn of the subprog should be either exit
3137 * or unconditional jump back or bpf_throw call
3139 if (code
!= (BPF_JMP
| BPF_EXIT
) &&
3140 code
!= (BPF_JMP32
| BPF_JA
) &&
3141 code
!= (BPF_JMP
| BPF_JA
)) {
3142 verbose(env
, "last insn is not an exit or jmp\n");
3145 subprog_start
= subprog_end
;
3147 if (cur_subprog
< env
->subprog_cnt
)
3148 subprog_end
= subprog
[cur_subprog
+ 1].start
;
3154 /* Parentage chain of this register (or stack slot) should take care of all
3155 * issues like callee-saved registers, stack slot allocation time, etc.
3157 static int mark_reg_read(struct bpf_verifier_env
*env
,
3158 const struct bpf_reg_state
*state
,
3159 struct bpf_reg_state
*parent
, u8 flag
)
3161 bool writes
= parent
== state
->parent
; /* Observe write marks */
3165 /* if read wasn't screened by an earlier write ... */
3166 if (writes
&& state
->live
& REG_LIVE_WRITTEN
)
3168 if (parent
->live
& REG_LIVE_DONE
) {
3169 verbose(env
, "verifier BUG type %s var_off %lld off %d\n",
3170 reg_type_str(env
, parent
->type
),
3171 parent
->var_off
.value
, parent
->off
);
3174 /* The first condition is more likely to be true than the
3175 * second, checked it first.
3177 if ((parent
->live
& REG_LIVE_READ
) == flag
||
3178 parent
->live
& REG_LIVE_READ64
)
3179 /* The parentage chain never changes and
3180 * this parent was already marked as LIVE_READ.
3181 * There is no need to keep walking the chain again and
3182 * keep re-marking all parents as LIVE_READ.
3183 * This case happens when the same register is read
3184 * multiple times without writes into it in-between.
3185 * Also, if parent has the stronger REG_LIVE_READ64 set,
3186 * then no need to set the weak REG_LIVE_READ32.
3189 /* ... then we depend on parent's value */
3190 parent
->live
|= flag
;
3191 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
3192 if (flag
== REG_LIVE_READ64
)
3193 parent
->live
&= ~REG_LIVE_READ32
;
3195 parent
= state
->parent
;
3200 if (env
->longest_mark_read_walk
< cnt
)
3201 env
->longest_mark_read_walk
= cnt
;
3205 static int mark_dynptr_read(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
3207 struct bpf_func_state
*state
= func(env
, reg
);
3210 /* For CONST_PTR_TO_DYNPTR, it must have already been done by
3211 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
3214 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
3216 spi
= dynptr_get_spi(env
, reg
);
3219 /* Caller ensures dynptr is valid and initialized, which means spi is in
3220 * bounds and spi is the first dynptr slot. Simply mark stack slot as
3223 ret
= mark_reg_read(env
, &state
->stack
[spi
].spilled_ptr
,
3224 state
->stack
[spi
].spilled_ptr
.parent
, REG_LIVE_READ64
);
3227 return mark_reg_read(env
, &state
->stack
[spi
- 1].spilled_ptr
,
3228 state
->stack
[spi
- 1].spilled_ptr
.parent
, REG_LIVE_READ64
);
3231 static int mark_iter_read(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
3232 int spi
, int nr_slots
)
3234 struct bpf_func_state
*state
= func(env
, reg
);
3237 for (i
= 0; i
< nr_slots
; i
++) {
3238 struct bpf_reg_state
*st
= &state
->stack
[spi
- i
].spilled_ptr
;
3240 err
= mark_reg_read(env
, st
, st
->parent
, REG_LIVE_READ64
);
3244 mark_stack_slot_scratched(env
, spi
- i
);
3250 /* This function is supposed to be used by the following 32-bit optimization
3251 * code only. It returns TRUE if the source or destination register operates
3252 * on 64-bit, otherwise return FALSE.
3254 static bool is_reg64(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
3255 u32 regno
, struct bpf_reg_state
*reg
, enum reg_arg_type t
)
3260 class = BPF_CLASS(code
);
3262 if (class == BPF_JMP
) {
3263 /* BPF_EXIT for "main" will reach here. Return TRUE
3268 if (op
== BPF_CALL
) {
3269 /* BPF to BPF call will reach here because of marking
3270 * caller saved clobber with DST_OP_NO_MARK for which we
3271 * don't care the register def because they are anyway
3272 * marked as NOT_INIT already.
3274 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
3276 /* Helper call will reach here because of arg type
3277 * check, conservatively return TRUE.
3286 if (class == BPF_ALU64
&& op
== BPF_END
&& (insn
->imm
== 16 || insn
->imm
== 32))
3289 if (class == BPF_ALU64
|| class == BPF_JMP
||
3290 (class == BPF_ALU
&& op
== BPF_END
&& insn
->imm
== 64))
3293 if (class == BPF_ALU
|| class == BPF_JMP32
)
3296 if (class == BPF_LDX
) {
3298 return BPF_SIZE(code
) == BPF_DW
|| BPF_MODE(code
) == BPF_MEMSX
;
3299 /* LDX source must be ptr. */
3303 if (class == BPF_STX
) {
3304 /* BPF_STX (including atomic variants) has multiple source
3305 * operands, one of which is a ptr. Check whether the caller is
3308 if (t
== SRC_OP
&& reg
->type
!= SCALAR_VALUE
)
3310 return BPF_SIZE(code
) == BPF_DW
;
3313 if (class == BPF_LD
) {
3314 u8 mode
= BPF_MODE(code
);
3317 if (mode
== BPF_IMM
)
3320 /* Both LD_IND and LD_ABS return 32-bit data. */
3324 /* Implicit ctx ptr. */
3325 if (regno
== BPF_REG_6
)
3328 /* Explicit source could be any width. */
3332 if (class == BPF_ST
)
3333 /* The only source register for BPF_ST is a ptr. */
3336 /* Conservatively return true at default. */
3340 /* Return the regno defined by the insn, or -1. */
3341 static int insn_def_regno(const struct bpf_insn
*insn
)
3343 switch (BPF_CLASS(insn
->code
)) {
3349 if ((BPF_MODE(insn
->code
) == BPF_ATOMIC
||
3350 BPF_MODE(insn
->code
) == BPF_PROBE_ATOMIC
) &&
3351 (insn
->imm
& BPF_FETCH
)) {
3352 if (insn
->imm
== BPF_CMPXCHG
)
3355 return insn
->src_reg
;
3360 return insn
->dst_reg
;
3364 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3365 static bool insn_has_def32(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
3367 int dst_reg
= insn_def_regno(insn
);
3372 return !is_reg64(env
, insn
, dst_reg
, NULL
, DST_OP
);
3375 static void mark_insn_zext(struct bpf_verifier_env
*env
,
3376 struct bpf_reg_state
*reg
)
3378 s32 def_idx
= reg
->subreg_def
;
3380 if (def_idx
== DEF_NOT_SUBREG
)
3383 env
->insn_aux_data
[def_idx
- 1].zext_dst
= true;
3384 /* The dst will be zero extended, so won't be sub-register anymore. */
3385 reg
->subreg_def
= DEF_NOT_SUBREG
;
3388 static int __check_reg_arg(struct bpf_verifier_env
*env
, struct bpf_reg_state
*regs
, u32 regno
,
3389 enum reg_arg_type t
)
3391 struct bpf_insn
*insn
= env
->prog
->insnsi
+ env
->insn_idx
;
3392 struct bpf_reg_state
*reg
;
3395 if (regno
>= MAX_BPF_REG
) {
3396 verbose(env
, "R%d is invalid\n", regno
);
3400 mark_reg_scratched(env
, regno
);
3403 rw64
= is_reg64(env
, insn
, regno
, reg
, t
);
3405 /* check whether register used as source operand can be read */
3406 if (reg
->type
== NOT_INIT
) {
3407 verbose(env
, "R%d !read_ok\n", regno
);
3410 /* We don't need to worry about FP liveness because it's read-only */
3411 if (regno
== BPF_REG_FP
)
3415 mark_insn_zext(env
, reg
);
3417 return mark_reg_read(env
, reg
, reg
->parent
,
3418 rw64
? REG_LIVE_READ64
: REG_LIVE_READ32
);
3420 /* check whether register used as dest operand can be written to */
3421 if (regno
== BPF_REG_FP
) {
3422 verbose(env
, "frame pointer is read only\n");
3425 reg
->live
|= REG_LIVE_WRITTEN
;
3426 reg
->subreg_def
= rw64
? DEF_NOT_SUBREG
: env
->insn_idx
+ 1;
3428 mark_reg_unknown(env
, regs
, regno
);
3433 static int check_reg_arg(struct bpf_verifier_env
*env
, u32 regno
,
3434 enum reg_arg_type t
)
3436 struct bpf_verifier_state
*vstate
= env
->cur_state
;
3437 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
3439 return __check_reg_arg(env
, state
->regs
, regno
, t
);
3442 static int insn_stack_access_flags(int frameno
, int spi
)
3444 return INSN_F_STACK_ACCESS
| (spi
<< INSN_F_SPI_SHIFT
) | frameno
;
3447 static int insn_stack_access_spi(int insn_flags
)
3449 return (insn_flags
>> INSN_F_SPI_SHIFT
) & INSN_F_SPI_MASK
;
3452 static int insn_stack_access_frameno(int insn_flags
)
3454 return insn_flags
& INSN_F_FRAMENO_MASK
;
3457 static void mark_jmp_point(struct bpf_verifier_env
*env
, int idx
)
3459 env
->insn_aux_data
[idx
].jmp_point
= true;
3462 static bool is_jmp_point(struct bpf_verifier_env
*env
, int insn_idx
)
3464 return env
->insn_aux_data
[insn_idx
].jmp_point
;
3467 #define LR_FRAMENO_BITS 3
3468 #define LR_SPI_BITS 6
3469 #define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1)
3470 #define LR_SIZE_BITS 4
3471 #define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
3472 #define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
3473 #define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
3474 #define LR_SPI_OFF LR_FRAMENO_BITS
3475 #define LR_IS_REG_OFF (LR_SPI_BITS + LR_FRAMENO_BITS)
3476 #define LINKED_REGS_MAX 6
3487 struct linked_regs
{
3489 struct linked_reg entries
[LINKED_REGS_MAX
];
3492 static struct linked_reg
*linked_regs_push(struct linked_regs
*s
)
3494 if (s
->cnt
< LINKED_REGS_MAX
)
3495 return &s
->entries
[s
->cnt
++];
3500 /* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
3501 * number of elements currently in stack.
3502 * Pack one history entry for linked registers as 10 bits in the following format:
3504 * - 6-bits spi_or_reg
3507 static u64
linked_regs_pack(struct linked_regs
*s
)
3512 for (i
= 0; i
< s
->cnt
; ++i
) {
3513 struct linked_reg
*e
= &s
->entries
[i
];
3517 tmp
|= e
->spi
<< LR_SPI_OFF
;
3518 tmp
|= (e
->is_reg
? 1 : 0) << LR_IS_REG_OFF
;
3520 val
<<= LR_ENTRY_BITS
;
3523 val
<<= LR_SIZE_BITS
;
3528 static void linked_regs_unpack(u64 val
, struct linked_regs
*s
)
3532 s
->cnt
= val
& LR_SIZE_MASK
;
3533 val
>>= LR_SIZE_BITS
;
3535 for (i
= 0; i
< s
->cnt
; ++i
) {
3536 struct linked_reg
*e
= &s
->entries
[i
];
3538 e
->frameno
= val
& LR_FRAMENO_MASK
;
3539 e
->spi
= (val
>> LR_SPI_OFF
) & LR_SPI_MASK
;
3540 e
->is_reg
= (val
>> LR_IS_REG_OFF
) & 0x1;
3541 val
>>= LR_ENTRY_BITS
;
3545 /* for any branch, call, exit record the history of jmps in the given state */
3546 static int push_insn_history(struct bpf_verifier_env
*env
, struct bpf_verifier_state
*cur
,
3547 int insn_flags
, u64 linked_regs
)
3549 struct bpf_insn_hist_entry
*p
;
3552 /* combine instruction flags if we already recorded this instruction */
3553 if (env
->cur_hist_ent
) {
3554 /* atomic instructions push insn_flags twice, for READ and
3555 * WRITE sides, but they should agree on stack slot
3557 WARN_ONCE((env
->cur_hist_ent
->flags
& insn_flags
) &&
3558 (env
->cur_hist_ent
->flags
& insn_flags
) != insn_flags
,
3559 "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
3560 env
->insn_idx
, env
->cur_hist_ent
->flags
, insn_flags
);
3561 env
->cur_hist_ent
->flags
|= insn_flags
;
3562 WARN_ONCE(env
->cur_hist_ent
->linked_regs
!= 0,
3563 "verifier insn history bug: insn_idx %d linked_regs != 0: %#llx\n",
3564 env
->insn_idx
, env
->cur_hist_ent
->linked_regs
);
3565 env
->cur_hist_ent
->linked_regs
= linked_regs
;
3569 if (cur
->insn_hist_end
+ 1 > env
->insn_hist_cap
) {
3570 alloc_size
= size_mul(cur
->insn_hist_end
+ 1, sizeof(*p
));
3571 p
= kvrealloc(env
->insn_hist
, alloc_size
, GFP_USER
);
3575 env
->insn_hist_cap
= alloc_size
/ sizeof(*p
);
3578 p
= &env
->insn_hist
[cur
->insn_hist_end
];
3579 p
->idx
= env
->insn_idx
;
3580 p
->prev_idx
= env
->prev_insn_idx
;
3581 p
->flags
= insn_flags
;
3582 p
->linked_regs
= linked_regs
;
3584 cur
->insn_hist_end
++;
3585 env
->cur_hist_ent
= p
;
3590 static struct bpf_insn_hist_entry
*get_insn_hist_entry(struct bpf_verifier_env
*env
,
3591 u32 hist_start
, u32 hist_end
, int insn_idx
)
3593 if (hist_end
> hist_start
&& env
->insn_hist
[hist_end
- 1].idx
== insn_idx
)
3594 return &env
->insn_hist
[hist_end
- 1];
3598 /* Backtrack one insn at a time. If idx is not at the top of recorded
3599 * history then previous instruction came from straight line execution.
3600 * Return -ENOENT if we exhausted all instructions within given state.
3602 * It's legal to have a bit of a looping with the same starting and ending
3603 * insn index within the same state, e.g.: 3->4->5->3, so just because current
3604 * instruction index is the same as state's first_idx doesn't mean we are
3605 * done. If there is still some jump history left, we should keep going. We
3606 * need to take into account that we might have a jump history between given
3607 * state's parent and itself, due to checkpointing. In this case, we'll have
3608 * history entry recording a jump from last instruction of parent state and
3609 * first instruction of given state.
3611 static int get_prev_insn_idx(const struct bpf_verifier_env
*env
,
3612 struct bpf_verifier_state
*st
,
3613 int insn_idx
, u32 hist_start
, u32
*hist_endp
)
3615 u32 hist_end
= *hist_endp
;
3616 u32 cnt
= hist_end
- hist_start
;
3618 if (insn_idx
== st
->first_insn_idx
) {
3621 if (cnt
== 1 && env
->insn_hist
[hist_start
].idx
== insn_idx
)
3625 if (cnt
&& env
->insn_hist
[hist_end
- 1].idx
== insn_idx
) {
3627 return env
->insn_hist
[hist_end
- 1].prev_idx
;
3629 return insn_idx
- 1;
3633 static const char *disasm_kfunc_name(void *data
, const struct bpf_insn
*insn
)
3635 const struct btf_type
*func
;
3636 struct btf
*desc_btf
;
3638 if (insn
->src_reg
!= BPF_PSEUDO_KFUNC_CALL
)
3641 desc_btf
= find_kfunc_desc_btf(data
, insn
->off
);
3642 if (IS_ERR(desc_btf
))
3645 func
= btf_type_by_id(desc_btf
, insn
->imm
);
3646 return btf_name_by_offset(desc_btf
, func
->name_off
);
3649 static inline void bt_init(struct backtrack_state
*bt
, u32 frame
)
3654 static inline void bt_reset(struct backtrack_state
*bt
)
3656 struct bpf_verifier_env
*env
= bt
->env
;
3658 memset(bt
, 0, sizeof(*bt
));
3662 static inline u32
bt_empty(struct backtrack_state
*bt
)
3667 for (i
= 0; i
<= bt
->frame
; i
++)
3668 mask
|= bt
->reg_masks
[i
] | bt
->stack_masks
[i
];
3673 static inline int bt_subprog_enter(struct backtrack_state
*bt
)
3675 if (bt
->frame
== MAX_CALL_FRAMES
- 1) {
3676 verbose(bt
->env
, "BUG subprog enter from frame %d\n", bt
->frame
);
3677 WARN_ONCE(1, "verifier backtracking bug");
3684 static inline int bt_subprog_exit(struct backtrack_state
*bt
)
3686 if (bt
->frame
== 0) {
3687 verbose(bt
->env
, "BUG subprog exit from frame 0\n");
3688 WARN_ONCE(1, "verifier backtracking bug");
3695 static inline void bt_set_frame_reg(struct backtrack_state
*bt
, u32 frame
, u32 reg
)
3697 bt
->reg_masks
[frame
] |= 1 << reg
;
3700 static inline void bt_clear_frame_reg(struct backtrack_state
*bt
, u32 frame
, u32 reg
)
3702 bt
->reg_masks
[frame
] &= ~(1 << reg
);
3705 static inline void bt_set_reg(struct backtrack_state
*bt
, u32 reg
)
3707 bt_set_frame_reg(bt
, bt
->frame
, reg
);
3710 static inline void bt_clear_reg(struct backtrack_state
*bt
, u32 reg
)
3712 bt_clear_frame_reg(bt
, bt
->frame
, reg
);
3715 static inline void bt_set_frame_slot(struct backtrack_state
*bt
, u32 frame
, u32 slot
)
3717 bt
->stack_masks
[frame
] |= 1ull << slot
;
3720 static inline void bt_clear_frame_slot(struct backtrack_state
*bt
, u32 frame
, u32 slot
)
3722 bt
->stack_masks
[frame
] &= ~(1ull << slot
);
3725 static inline u32
bt_frame_reg_mask(struct backtrack_state
*bt
, u32 frame
)
3727 return bt
->reg_masks
[frame
];
3730 static inline u32
bt_reg_mask(struct backtrack_state
*bt
)
3732 return bt
->reg_masks
[bt
->frame
];
3735 static inline u64
bt_frame_stack_mask(struct backtrack_state
*bt
, u32 frame
)
3737 return bt
->stack_masks
[frame
];
3740 static inline u64
bt_stack_mask(struct backtrack_state
*bt
)
3742 return bt
->stack_masks
[bt
->frame
];
3745 static inline bool bt_is_reg_set(struct backtrack_state
*bt
, u32 reg
)
3747 return bt
->reg_masks
[bt
->frame
] & (1 << reg
);
3750 static inline bool bt_is_frame_reg_set(struct backtrack_state
*bt
, u32 frame
, u32 reg
)
3752 return bt
->reg_masks
[frame
] & (1 << reg
);
3755 static inline bool bt_is_frame_slot_set(struct backtrack_state
*bt
, u32 frame
, u32 slot
)
3757 return bt
->stack_masks
[frame
] & (1ull << slot
);
3760 /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
3761 static void fmt_reg_mask(char *buf
, ssize_t buf_sz
, u32 reg_mask
)
3763 DECLARE_BITMAP(mask
, 64);
3769 bitmap_from_u64(mask
, reg_mask
);
3770 for_each_set_bit(i
, mask
, 32) {
3771 n
= snprintf(buf
, buf_sz
, "%sr%d", first
? "" : ",", i
);
3779 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3780 static void fmt_stack_mask(char *buf
, ssize_t buf_sz
, u64 stack_mask
)
3782 DECLARE_BITMAP(mask
, 64);
3788 bitmap_from_u64(mask
, stack_mask
);
3789 for_each_set_bit(i
, mask
, 64) {
3790 n
= snprintf(buf
, buf_sz
, "%s%d", first
? "" : ",", -(i
+ 1) * 8);
3799 /* If any register R in hist->linked_regs is marked as precise in bt,
3800 * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
3802 static void bt_sync_linked_regs(struct backtrack_state
*bt
, struct bpf_insn_hist_entry
*hist
)
3804 struct linked_regs linked_regs
;
3805 bool some_precise
= false;
3808 if (!hist
|| hist
->linked_regs
== 0)
3811 linked_regs_unpack(hist
->linked_regs
, &linked_regs
);
3812 for (i
= 0; i
< linked_regs
.cnt
; ++i
) {
3813 struct linked_reg
*e
= &linked_regs
.entries
[i
];
3815 if ((e
->is_reg
&& bt_is_frame_reg_set(bt
, e
->frameno
, e
->regno
)) ||
3816 (!e
->is_reg
&& bt_is_frame_slot_set(bt
, e
->frameno
, e
->spi
))) {
3817 some_precise
= true;
3825 for (i
= 0; i
< linked_regs
.cnt
; ++i
) {
3826 struct linked_reg
*e
= &linked_regs
.entries
[i
];
3829 bt_set_frame_reg(bt
, e
->frameno
, e
->regno
);
3831 bt_set_frame_slot(bt
, e
->frameno
, e
->spi
);
3835 static bool calls_callback(struct bpf_verifier_env
*env
, int insn_idx
);
3837 /* For given verifier state backtrack_insn() is called from the last insn to
3838 * the first insn. Its purpose is to compute a bitmask of registers and
3839 * stack slots that needs precision in the parent verifier state.
3841 * @idx is an index of the instruction we are currently processing;
3842 * @subseq_idx is an index of the subsequent instruction that:
3843 * - *would be* executed next, if jump history is viewed in forward order;
3844 * - *was* processed previously during backtracking.
3846 static int backtrack_insn(struct bpf_verifier_env
*env
, int idx
, int subseq_idx
,
3847 struct bpf_insn_hist_entry
*hist
, struct backtrack_state
*bt
)
3849 const struct bpf_insn_cbs cbs
= {
3850 .cb_call
= disasm_kfunc_name
,
3851 .cb_print
= verbose
,
3852 .private_data
= env
,
3854 struct bpf_insn
*insn
= env
->prog
->insnsi
+ idx
;
3855 u8
class = BPF_CLASS(insn
->code
);
3856 u8 opcode
= BPF_OP(insn
->code
);
3857 u8 mode
= BPF_MODE(insn
->code
);
3858 u32 dreg
= insn
->dst_reg
;
3859 u32 sreg
= insn
->src_reg
;
3862 if (insn
->code
== 0)
3864 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
3865 fmt_reg_mask(env
->tmp_str_buf
, TMP_STR_BUF_LEN
, bt_reg_mask(bt
));
3866 verbose(env
, "mark_precise: frame%d: regs=%s ",
3867 bt
->frame
, env
->tmp_str_buf
);
3868 fmt_stack_mask(env
->tmp_str_buf
, TMP_STR_BUF_LEN
, bt_stack_mask(bt
));
3869 verbose(env
, "stack=%s before ", env
->tmp_str_buf
);
3870 verbose(env
, "%d: ", idx
);
3871 print_bpf_insn(&cbs
, insn
, env
->allow_ptr_leaks
);
3874 /* If there is a history record that some registers gained range at this insn,
3875 * propagate precision marks to those registers, so that bt_is_reg_set()
3876 * accounts for these registers.
3878 bt_sync_linked_regs(bt
, hist
);
3880 if (class == BPF_ALU
|| class == BPF_ALU64
) {
3881 if (!bt_is_reg_set(bt
, dreg
))
3883 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
3884 /* sreg is reserved and unused
3885 * dreg still need precision before this insn
3888 } else if (opcode
== BPF_MOV
) {
3889 if (BPF_SRC(insn
->code
) == BPF_X
) {
3890 /* dreg = sreg or dreg = (s8, s16, s32)sreg
3891 * dreg needs precision after this insn
3892 * sreg needs precision before this insn
3894 bt_clear_reg(bt
, dreg
);
3895 if (sreg
!= BPF_REG_FP
)
3896 bt_set_reg(bt
, sreg
);
3899 * dreg needs precision after this insn.
3900 * Corresponding register is already marked
3901 * as precise=true in this verifier state.
3902 * No further markings in parent are necessary
3904 bt_clear_reg(bt
, dreg
);
3907 if (BPF_SRC(insn
->code
) == BPF_X
) {
3909 * both dreg and sreg need precision
3912 if (sreg
!= BPF_REG_FP
)
3913 bt_set_reg(bt
, sreg
);
3915 * dreg still needs precision before this insn
3918 } else if (class == BPF_LDX
) {
3919 if (!bt_is_reg_set(bt
, dreg
))
3921 bt_clear_reg(bt
, dreg
);
3923 /* scalars can only be spilled into stack w/o losing precision.
3924 * Load from any other memory can be zero extended.
3925 * The desire to keep that precision is already indicated
3926 * by 'precise' mark in corresponding register of this state.
3927 * No further tracking necessary.
3929 if (!hist
|| !(hist
->flags
& INSN_F_STACK_ACCESS
))
3931 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
3932 * that [fp - off] slot contains scalar that needs to be
3933 * tracked with precision
3935 spi
= insn_stack_access_spi(hist
->flags
);
3936 fr
= insn_stack_access_frameno(hist
->flags
);
3937 bt_set_frame_slot(bt
, fr
, spi
);
3938 } else if (class == BPF_STX
|| class == BPF_ST
) {
3939 if (bt_is_reg_set(bt
, dreg
))
3940 /* stx & st shouldn't be using _scalar_ dst_reg
3941 * to access memory. It means backtracking
3942 * encountered a case of pointer subtraction.
3945 /* scalars can only be spilled into stack */
3946 if (!hist
|| !(hist
->flags
& INSN_F_STACK_ACCESS
))
3948 spi
= insn_stack_access_spi(hist
->flags
);
3949 fr
= insn_stack_access_frameno(hist
->flags
);
3950 if (!bt_is_frame_slot_set(bt
, fr
, spi
))
3952 bt_clear_frame_slot(bt
, fr
, spi
);
3953 if (class == BPF_STX
)
3954 bt_set_reg(bt
, sreg
);
3955 } else if (class == BPF_JMP
|| class == BPF_JMP32
) {
3956 if (bpf_pseudo_call(insn
)) {
3957 int subprog_insn_idx
, subprog
;
3959 subprog_insn_idx
= idx
+ insn
->imm
+ 1;
3960 subprog
= find_subprog(env
, subprog_insn_idx
);
3964 if (subprog_is_global(env
, subprog
)) {
3965 /* check that jump history doesn't have any
3966 * extra instructions from subprog; the next
3967 * instruction after call to global subprog
3968 * should be literally next instruction in
3971 WARN_ONCE(idx
+ 1 != subseq_idx
, "verifier backtracking bug");
3972 /* r1-r5 are invalidated after subprog call,
3973 * so for global func call it shouldn't be set
3976 if (bt_reg_mask(bt
) & BPF_REGMASK_ARGS
) {
3977 verbose(env
, "BUG regs %x\n", bt_reg_mask(bt
));
3978 WARN_ONCE(1, "verifier backtracking bug");
3981 /* global subprog always sets R0 */
3982 bt_clear_reg(bt
, BPF_REG_0
);
3985 /* static subprog call instruction, which
3986 * means that we are exiting current subprog,
3987 * so only r1-r5 could be still requested as
3988 * precise, r0 and r6-r10 or any stack slot in
3989 * the current frame should be zero by now
3991 if (bt_reg_mask(bt
) & ~BPF_REGMASK_ARGS
) {
3992 verbose(env
, "BUG regs %x\n", bt_reg_mask(bt
));
3993 WARN_ONCE(1, "verifier backtracking bug");
3996 /* we are now tracking register spills correctly,
3997 * so any instance of leftover slots is a bug
3999 if (bt_stack_mask(bt
) != 0) {
4000 verbose(env
, "BUG stack slots %llx\n", bt_stack_mask(bt
));
4001 WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)");
4004 /* propagate r1-r5 to the caller */
4005 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++) {
4006 if (bt_is_reg_set(bt
, i
)) {
4007 bt_clear_reg(bt
, i
);
4008 bt_set_frame_reg(bt
, bt
->frame
- 1, i
);
4011 if (bt_subprog_exit(bt
))
4015 } else if (is_sync_callback_calling_insn(insn
) && idx
!= subseq_idx
- 1) {
4016 /* exit from callback subprog to callback-calling helper or
4017 * kfunc call. Use idx/subseq_idx check to discern it from
4018 * straight line code backtracking.
4019 * Unlike the subprog call handling above, we shouldn't
4020 * propagate precision of r1-r5 (if any requested), as they are
4021 * not actually arguments passed directly to callback subprogs
4023 if (bt_reg_mask(bt
) & ~BPF_REGMASK_ARGS
) {
4024 verbose(env
, "BUG regs %x\n", bt_reg_mask(bt
));
4025 WARN_ONCE(1, "verifier backtracking bug");
4028 if (bt_stack_mask(bt
) != 0) {
4029 verbose(env
, "BUG stack slots %llx\n", bt_stack_mask(bt
));
4030 WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)");
4033 /* clear r1-r5 in callback subprog's mask */
4034 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++)
4035 bt_clear_reg(bt
, i
);
4036 if (bt_subprog_exit(bt
))
4039 } else if (opcode
== BPF_CALL
) {
4040 /* kfunc with imm==0 is invalid and fixup_kfunc_call will
4041 * catch this error later. Make backtracking conservative
4044 if (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
&& insn
->imm
== 0)
4046 /* regular helper call sets R0 */
4047 bt_clear_reg(bt
, BPF_REG_0
);
4048 if (bt_reg_mask(bt
) & BPF_REGMASK_ARGS
) {
4049 /* if backtracing was looking for registers R1-R5
4050 * they should have been found already.
4052 verbose(env
, "BUG regs %x\n", bt_reg_mask(bt
));
4053 WARN_ONCE(1, "verifier backtracking bug");
4056 } else if (opcode
== BPF_EXIT
) {
4059 /* Backtracking to a nested function call, 'idx' is a part of
4060 * the inner frame 'subseq_idx' is a part of the outer frame.
4061 * In case of a regular function call, instructions giving
4062 * precision to registers R1-R5 should have been found already.
4063 * In case of a callback, it is ok to have R1-R5 marked for
4064 * backtracking, as these registers are set by the function
4065 * invoking callback.
4067 if (subseq_idx
>= 0 && calls_callback(env
, subseq_idx
))
4068 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++)
4069 bt_clear_reg(bt
, i
);
4070 if (bt_reg_mask(bt
) & BPF_REGMASK_ARGS
) {
4071 verbose(env
, "BUG regs %x\n", bt_reg_mask(bt
));
4072 WARN_ONCE(1, "verifier backtracking bug");
4076 /* BPF_EXIT in subprog or callback always returns
4077 * right after the call instruction, so by checking
4078 * whether the instruction at subseq_idx-1 is subprog
4079 * call or not we can distinguish actual exit from
4080 * *subprog* from exit from *callback*. In the former
4081 * case, we need to propagate r0 precision, if
4082 * necessary. In the former we never do that.
4084 r0_precise
= subseq_idx
- 1 >= 0 &&
4085 bpf_pseudo_call(&env
->prog
->insnsi
[subseq_idx
- 1]) &&
4086 bt_is_reg_set(bt
, BPF_REG_0
);
4088 bt_clear_reg(bt
, BPF_REG_0
);
4089 if (bt_subprog_enter(bt
))
4093 bt_set_reg(bt
, BPF_REG_0
);
4094 /* r6-r9 and stack slots will stay set in caller frame
4095 * bitmasks until we return back from callee(s)
4098 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
4099 if (!bt_is_reg_set(bt
, dreg
) && !bt_is_reg_set(bt
, sreg
))
4102 * Both dreg and sreg need precision before
4103 * this insn. If only sreg was marked precise
4104 * before it would be equally necessary to
4105 * propagate it to dreg.
4107 bt_set_reg(bt
, dreg
);
4108 bt_set_reg(bt
, sreg
);
4109 } else if (BPF_SRC(insn
->code
) == BPF_K
) {
4111 * Only dreg still needs precision before
4112 * this insn, so for the K-based conditional
4113 * there is nothing new to be marked.
4116 } else if (class == BPF_LD
) {
4117 if (!bt_is_reg_set(bt
, dreg
))
4119 bt_clear_reg(bt
, dreg
);
4120 /* It's ld_imm64 or ld_abs or ld_ind.
4121 * For ld_imm64 no further tracking of precision
4122 * into parent is necessary
4124 if (mode
== BPF_IND
|| mode
== BPF_ABS
)
4125 /* to be analyzed */
4128 /* Propagate precision marks to linked registers, to account for
4129 * registers marked as precise in this function.
4131 bt_sync_linked_regs(bt
, hist
);
4135 /* the scalar precision tracking algorithm:
4136 * . at the start all registers have precise=false.
4137 * . scalar ranges are tracked as normal through alu and jmp insns.
4138 * . once precise value of the scalar register is used in:
4139 * . ptr + scalar alu
4140 * . if (scalar cond K|scalar)
4141 * . helper_call(.., scalar, ...) where ARG_CONST is expected
4142 * backtrack through the verifier states and mark all registers and
4143 * stack slots with spilled constants that these scalar regisers
4144 * should be precise.
4145 * . during state pruning two registers (or spilled stack slots)
4146 * are equivalent if both are not precise.
4148 * Note the verifier cannot simply walk register parentage chain,
4149 * since many different registers and stack slots could have been
4150 * used to compute single precise scalar.
4152 * The approach of starting with precise=true for all registers and then
4153 * backtrack to mark a register as not precise when the verifier detects
4154 * that program doesn't care about specific value (e.g., when helper
4155 * takes register as ARG_ANYTHING parameter) is not safe.
4157 * It's ok to walk single parentage chain of the verifier states.
4158 * It's possible that this backtracking will go all the way till 1st insn.
4159 * All other branches will be explored for needing precision later.
4161 * The backtracking needs to deal with cases like:
4162 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
4165 * if r5 > 0x79f goto pc+7
4166 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
4169 * call bpf_perf_event_output#25
4170 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
4174 * call foo // uses callee's r6 inside to compute r0
4178 * to track above reg_mask/stack_mask needs to be independent for each frame.
4180 * Also if parent's curframe > frame where backtracking started,
4181 * the verifier need to mark registers in both frames, otherwise callees
4182 * may incorrectly prune callers. This is similar to
4183 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
4185 * For now backtracking falls back into conservative marking.
4187 static void mark_all_scalars_precise(struct bpf_verifier_env
*env
,
4188 struct bpf_verifier_state
*st
)
4190 struct bpf_func_state
*func
;
4191 struct bpf_reg_state
*reg
;
4194 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
4195 verbose(env
, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
4199 /* big hammer: mark all scalars precise in this path.
4200 * pop_stack may still get !precise scalars.
4201 * We also skip current state and go straight to first parent state,
4202 * because precision markings in current non-checkpointed state are
4203 * not needed. See why in the comment in __mark_chain_precision below.
4205 for (st
= st
->parent
; st
; st
= st
->parent
) {
4206 for (i
= 0; i
<= st
->curframe
; i
++) {
4207 func
= st
->frame
[i
];
4208 for (j
= 0; j
< BPF_REG_FP
; j
++) {
4209 reg
= &func
->regs
[j
];
4210 if (reg
->type
!= SCALAR_VALUE
|| reg
->precise
)
4212 reg
->precise
= true;
4213 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
4214 verbose(env
, "force_precise: frame%d: forcing r%d to be precise\n",
4218 for (j
= 0; j
< func
->allocated_stack
/ BPF_REG_SIZE
; j
++) {
4219 if (!is_spilled_reg(&func
->stack
[j
]))
4221 reg
= &func
->stack
[j
].spilled_ptr
;
4222 if (reg
->type
!= SCALAR_VALUE
|| reg
->precise
)
4224 reg
->precise
= true;
4225 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
4226 verbose(env
, "force_precise: frame%d: forcing fp%d to be precise\n",
4234 static void mark_all_scalars_imprecise(struct bpf_verifier_env
*env
, struct bpf_verifier_state
*st
)
4236 struct bpf_func_state
*func
;
4237 struct bpf_reg_state
*reg
;
4240 for (i
= 0; i
<= st
->curframe
; i
++) {
4241 func
= st
->frame
[i
];
4242 for (j
= 0; j
< BPF_REG_FP
; j
++) {
4243 reg
= &func
->regs
[j
];
4244 if (reg
->type
!= SCALAR_VALUE
)
4246 reg
->precise
= false;
4248 for (j
= 0; j
< func
->allocated_stack
/ BPF_REG_SIZE
; j
++) {
4249 if (!is_spilled_reg(&func
->stack
[j
]))
4251 reg
= &func
->stack
[j
].spilled_ptr
;
4252 if (reg
->type
!= SCALAR_VALUE
)
4254 reg
->precise
= false;
4260 * __mark_chain_precision() backtracks BPF program instruction sequence and
4261 * chain of verifier states making sure that register *regno* (if regno >= 0)
4262 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
4263 * SCALARS, as well as any other registers and slots that contribute to
4264 * a tracked state of given registers/stack slots, depending on specific BPF
4265 * assembly instructions (see backtrack_insns() for exact instruction handling
4266 * logic). This backtracking relies on recorded insn_hist and is able to
4267 * traverse entire chain of parent states. This process ends only when all the
4268 * necessary registers/slots and their transitive dependencies are marked as
4271 * One important and subtle aspect is that precise marks *do not matter* in
4272 * the currently verified state (current state). It is important to understand
4273 * why this is the case.
4275 * First, note that current state is the state that is not yet "checkpointed",
4276 * i.e., it is not yet put into env->explored_states, and it has no children
4277 * states as well. It's ephemeral, and can end up either a) being discarded if
4278 * compatible explored state is found at some point or BPF_EXIT instruction is
4279 * reached or b) checkpointed and put into env->explored_states, branching out
4280 * into one or more children states.
4282 * In the former case, precise markings in current state are completely
4283 * ignored by state comparison code (see regsafe() for details). Only
4284 * checkpointed ("old") state precise markings are important, and if old
4285 * state's register/slot is precise, regsafe() assumes current state's
4286 * register/slot as precise and checks value ranges exactly and precisely. If
4287 * states turn out to be compatible, current state's necessary precise
4288 * markings and any required parent states' precise markings are enforced
4289 * after the fact with propagate_precision() logic, after the fact. But it's
4290 * important to realize that in this case, even after marking current state
4291 * registers/slots as precise, we immediately discard current state. So what
4292 * actually matters is any of the precise markings propagated into current
4293 * state's parent states, which are always checkpointed (due to b) case above).
4294 * As such, for scenario a) it doesn't matter if current state has precise
4295 * markings set or not.
4297 * Now, for the scenario b), checkpointing and forking into child(ren)
4298 * state(s). Note that before current state gets to checkpointing step, any
4299 * processed instruction always assumes precise SCALAR register/slot
4300 * knowledge: if precise value or range is useful to prune jump branch, BPF
4301 * verifier takes this opportunity enthusiastically. Similarly, when
4302 * register's value is used to calculate offset or memory address, exact
4303 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
4304 * what we mentioned above about state comparison ignoring precise markings
4305 * during state comparison, BPF verifier ignores and also assumes precise
4306 * markings *at will* during instruction verification process. But as verifier
4307 * assumes precision, it also propagates any precision dependencies across
4308 * parent states, which are not yet finalized, so can be further restricted
4309 * based on new knowledge gained from restrictions enforced by their children
4310 * states. This is so that once those parent states are finalized, i.e., when
4311 * they have no more active children state, state comparison logic in
4312 * is_state_visited() would enforce strict and precise SCALAR ranges, if
4313 * required for correctness.
4315 * To build a bit more intuition, note also that once a state is checkpointed,
4316 * the path we took to get to that state is not important. This is crucial
4317 * property for state pruning. When state is checkpointed and finalized at
4318 * some instruction index, it can be correctly and safely used to "short
4319 * circuit" any *compatible* state that reaches exactly the same instruction
4320 * index. I.e., if we jumped to that instruction from a completely different
4321 * code path than original finalized state was derived from, it doesn't
4322 * matter, current state can be discarded because from that instruction
4323 * forward having a compatible state will ensure we will safely reach the
4324 * exit. States describe preconditions for further exploration, but completely
4325 * forget the history of how we got here.
4327 * This also means that even if we needed precise SCALAR range to get to
4328 * finalized state, but from that point forward *that same* SCALAR register is
4329 * never used in a precise context (i.e., it's precise value is not needed for
4330 * correctness), it's correct and safe to mark such register as "imprecise"
4331 * (i.e., precise marking set to false). This is what we rely on when we do
4332 * not set precise marking in current state. If no child state requires
4333 * precision for any given SCALAR register, it's safe to dictate that it can
4334 * be imprecise. If any child state does require this register to be precise,
4335 * we'll mark it precise later retroactively during precise markings
4336 * propagation from child state to parent states.
4338 * Skipping precise marking setting in current state is a mild version of
4339 * relying on the above observation. But we can utilize this property even
4340 * more aggressively by proactively forgetting any precise marking in the
4341 * current state (which we inherited from the parent state), right before we
4342 * checkpoint it and branch off into new child state. This is done by
4343 * mark_all_scalars_imprecise() to hopefully get more permissive and generic
4344 * finalized states which help in short circuiting more future states.
4346 static int __mark_chain_precision(struct bpf_verifier_env
*env
, int regno
)
4348 struct backtrack_state
*bt
= &env
->bt
;
4349 struct bpf_verifier_state
*st
= env
->cur_state
;
4350 int first_idx
= st
->first_insn_idx
;
4351 int last_idx
= env
->insn_idx
;
4352 int subseq_idx
= -1;
4353 struct bpf_func_state
*func
;
4354 struct bpf_reg_state
*reg
;
4355 bool skip_first
= true;
4358 if (!env
->bpf_capable
)
4361 /* set frame number from which we are starting to backtrack */
4362 bt_init(bt
, env
->cur_state
->curframe
);
4364 /* Do sanity checks against current state of register and/or stack
4365 * slot, but don't set precise flag in current state, as precision
4366 * tracking in the current state is unnecessary.
4368 func
= st
->frame
[bt
->frame
];
4370 reg
= &func
->regs
[regno
];
4371 if (reg
->type
!= SCALAR_VALUE
) {
4372 WARN_ONCE(1, "backtracing misuse");
4375 bt_set_reg(bt
, regno
);
4382 DECLARE_BITMAP(mask
, 64);
4383 u32 hist_start
= st
->insn_hist_start
;
4384 u32 hist_end
= st
->insn_hist_end
;
4385 struct bpf_insn_hist_entry
*hist
;
4387 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
4388 verbose(env
, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
4389 bt
->frame
, last_idx
, first_idx
, subseq_idx
);
4393 /* we are at the entry into subprog, which
4394 * is expected for global funcs, but only if
4395 * requested precise registers are R1-R5
4396 * (which are global func's input arguments)
4398 if (st
->curframe
== 0 &&
4399 st
->frame
[0]->subprogno
> 0 &&
4400 st
->frame
[0]->callsite
== BPF_MAIN_FUNC
&&
4401 bt_stack_mask(bt
) == 0 &&
4402 (bt_reg_mask(bt
) & ~BPF_REGMASK_ARGS
) == 0) {
4403 bitmap_from_u64(mask
, bt_reg_mask(bt
));
4404 for_each_set_bit(i
, mask
, 32) {
4405 reg
= &st
->frame
[0]->regs
[i
];
4406 bt_clear_reg(bt
, i
);
4407 if (reg
->type
== SCALAR_VALUE
)
4408 reg
->precise
= true;
4413 verbose(env
, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n",
4414 st
->frame
[0]->subprogno
, bt_reg_mask(bt
), bt_stack_mask(bt
));
4415 WARN_ONCE(1, "verifier backtracking bug");
4419 for (i
= last_idx
;;) {
4424 hist
= get_insn_hist_entry(env
, hist_start
, hist_end
, i
);
4425 err
= backtrack_insn(env
, i
, subseq_idx
, hist
, bt
);
4427 if (err
== -ENOTSUPP
) {
4428 mark_all_scalars_precise(env
, env
->cur_state
);
4435 /* Found assignment(s) into tracked register in this state.
4436 * Since this state is already marked, just return.
4437 * Nothing to be tracked further in the parent state.
4441 i
= get_prev_insn_idx(env
, st
, i
, hist_start
, &hist_end
);
4444 if (i
>= env
->prog
->len
) {
4445 /* This can happen if backtracking reached insn 0
4446 * and there are still reg_mask or stack_mask
4448 * It means the backtracking missed the spot where
4449 * particular register was initialized with a constant.
4451 verbose(env
, "BUG backtracking idx %d\n", i
);
4452 WARN_ONCE(1, "verifier backtracking bug");
4460 for (fr
= bt
->frame
; fr
>= 0; fr
--) {
4461 func
= st
->frame
[fr
];
4462 bitmap_from_u64(mask
, bt_frame_reg_mask(bt
, fr
));
4463 for_each_set_bit(i
, mask
, 32) {
4464 reg
= &func
->regs
[i
];
4465 if (reg
->type
!= SCALAR_VALUE
) {
4466 bt_clear_frame_reg(bt
, fr
, i
);
4470 bt_clear_frame_reg(bt
, fr
, i
);
4472 reg
->precise
= true;
4475 bitmap_from_u64(mask
, bt_frame_stack_mask(bt
, fr
));
4476 for_each_set_bit(i
, mask
, 64) {
4477 if (i
>= func
->allocated_stack
/ BPF_REG_SIZE
) {
4478 verbose(env
, "BUG backtracking (stack slot %d, total slots %d)\n",
4479 i
, func
->allocated_stack
/ BPF_REG_SIZE
);
4480 WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)");
4484 if (!is_spilled_scalar_reg(&func
->stack
[i
])) {
4485 bt_clear_frame_slot(bt
, fr
, i
);
4488 reg
= &func
->stack
[i
].spilled_ptr
;
4490 bt_clear_frame_slot(bt
, fr
, i
);
4492 reg
->precise
= true;
4494 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
4495 fmt_reg_mask(env
->tmp_str_buf
, TMP_STR_BUF_LEN
,
4496 bt_frame_reg_mask(bt
, fr
));
4497 verbose(env
, "mark_precise: frame%d: parent state regs=%s ",
4498 fr
, env
->tmp_str_buf
);
4499 fmt_stack_mask(env
->tmp_str_buf
, TMP_STR_BUF_LEN
,
4500 bt_frame_stack_mask(bt
, fr
));
4501 verbose(env
, "stack=%s: ", env
->tmp_str_buf
);
4502 print_verifier_state(env
, func
, true);
4509 subseq_idx
= first_idx
;
4510 last_idx
= st
->last_insn_idx
;
4511 first_idx
= st
->first_insn_idx
;
4514 /* if we still have requested precise regs or slots, we missed
4515 * something (e.g., stack access through non-r10 register), so
4516 * fallback to marking all precise
4518 if (!bt_empty(bt
)) {
4519 mark_all_scalars_precise(env
, env
->cur_state
);
4526 int mark_chain_precision(struct bpf_verifier_env
*env
, int regno
)
4528 return __mark_chain_precision(env
, regno
);
4531 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4532 * desired reg and stack masks across all relevant frames
4534 static int mark_chain_precision_batch(struct bpf_verifier_env
*env
)
4536 return __mark_chain_precision(env
, -1);
4539 static bool is_spillable_regtype(enum bpf_reg_type type
)
4541 switch (base_type(type
)) {
4542 case PTR_TO_MAP_VALUE
:
4546 case PTR_TO_PACKET_META
:
4547 case PTR_TO_PACKET_END
:
4548 case PTR_TO_FLOW_KEYS
:
4549 case CONST_PTR_TO_MAP
:
4551 case PTR_TO_SOCK_COMMON
:
4552 case PTR_TO_TCP_SOCK
:
4553 case PTR_TO_XDP_SOCK
:
4558 case PTR_TO_MAP_KEY
:
4566 /* Does this register contain a constant zero? */
4567 static bool register_is_null(struct bpf_reg_state
*reg
)
4569 return reg
->type
== SCALAR_VALUE
&& tnum_equals_const(reg
->var_off
, 0);
4572 /* check if register is a constant scalar value */
4573 static bool is_reg_const(struct bpf_reg_state
*reg
, bool subreg32
)
4575 return reg
->type
== SCALAR_VALUE
&&
4576 tnum_is_const(subreg32
? tnum_subreg(reg
->var_off
) : reg
->var_off
);
4579 /* assuming is_reg_const() is true, return constant value of a register */
4580 static u64
reg_const_value(struct bpf_reg_state
*reg
, bool subreg32
)
4582 return subreg32
? tnum_subreg(reg
->var_off
).value
: reg
->var_off
.value
;
4585 static bool __is_pointer_value(bool allow_ptr_leaks
,
4586 const struct bpf_reg_state
*reg
)
4588 if (allow_ptr_leaks
)
4591 return reg
->type
!= SCALAR_VALUE
;
4594 static void assign_scalar_id_before_mov(struct bpf_verifier_env
*env
,
4595 struct bpf_reg_state
*src_reg
)
4597 if (src_reg
->type
!= SCALAR_VALUE
)
4600 if (src_reg
->id
& BPF_ADD_CONST
) {
4602 * The verifier is processing rX = rY insn and
4603 * rY->id has special linked register already.
4604 * Cleared it, since multiple rX += const are not supported.
4610 if (!src_reg
->id
&& !tnum_is_const(src_reg
->var_off
))
4611 /* Ensure that src_reg has a valid ID that will be copied to
4612 * dst_reg and then will be used by sync_linked_regs() to
4613 * propagate min/max range.
4615 src_reg
->id
= ++env
->id_gen
;
4618 /* Copy src state preserving dst->parent and dst->live fields */
4619 static void copy_register_state(struct bpf_reg_state
*dst
, const struct bpf_reg_state
*src
)
4621 struct bpf_reg_state
*parent
= dst
->parent
;
4622 enum bpf_reg_liveness live
= dst
->live
;
4625 dst
->parent
= parent
;
4629 static void save_register_state(struct bpf_verifier_env
*env
,
4630 struct bpf_func_state
*state
,
4631 int spi
, struct bpf_reg_state
*reg
,
4636 copy_register_state(&state
->stack
[spi
].spilled_ptr
, reg
);
4637 if (size
== BPF_REG_SIZE
)
4638 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
4640 for (i
= BPF_REG_SIZE
; i
> BPF_REG_SIZE
- size
; i
--)
4641 state
->stack
[spi
].slot_type
[i
- 1] = STACK_SPILL
;
4643 /* size < 8 bytes spill */
4645 mark_stack_slot_misc(env
, &state
->stack
[spi
].slot_type
[i
- 1]);
4648 static bool is_bpf_st_mem(struct bpf_insn
*insn
)
4650 return BPF_CLASS(insn
->code
) == BPF_ST
&& BPF_MODE(insn
->code
) == BPF_MEM
;
4653 static int get_reg_width(struct bpf_reg_state
*reg
)
4655 return fls64(reg
->umax_value
);
4658 /* See comment for mark_fastcall_pattern_for_call() */
4659 static void check_fastcall_stack_contract(struct bpf_verifier_env
*env
,
4660 struct bpf_func_state
*state
, int insn_idx
, int off
)
4662 struct bpf_subprog_info
*subprog
= &env
->subprog_info
[state
->subprogno
];
4663 struct bpf_insn_aux_data
*aux
= env
->insn_aux_data
;
4666 if (subprog
->fastcall_stack_off
<= off
|| aux
[insn_idx
].fastcall_pattern
)
4668 /* access to the region [max_stack_depth .. fastcall_stack_off)
4669 * from something that is not a part of the fastcall pattern,
4670 * disable fastcall rewrites for current subprogram by setting
4671 * fastcall_stack_off to a value smaller than any possible offset.
4673 subprog
->fastcall_stack_off
= S16_MIN
;
4674 /* reset fastcall aux flags within subprogram,
4675 * happens at most once per subprogram
4677 for (i
= subprog
->start
; i
< (subprog
+ 1)->start
; ++i
) {
4678 aux
[i
].fastcall_spills_num
= 0;
4679 aux
[i
].fastcall_pattern
= 0;
4683 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
4684 * stack boundary and alignment are checked in check_mem_access()
4686 static int check_stack_write_fixed_off(struct bpf_verifier_env
*env
,
4687 /* stack frame we're writing to */
4688 struct bpf_func_state
*state
,
4689 int off
, int size
, int value_regno
,
4692 struct bpf_func_state
*cur
; /* state of the current function */
4693 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
, err
;
4694 struct bpf_insn
*insn
= &env
->prog
->insnsi
[insn_idx
];
4695 struct bpf_reg_state
*reg
= NULL
;
4696 int insn_flags
= insn_stack_access_flags(state
->frameno
, spi
);
4698 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
4699 * so it's aligned access and [off, off + size) are within stack limits
4701 if (!env
->allow_ptr_leaks
&&
4702 is_spilled_reg(&state
->stack
[spi
]) &&
4703 size
!= BPF_REG_SIZE
) {
4704 verbose(env
, "attempt to corrupt spilled pointer on stack\n");
4708 cur
= env
->cur_state
->frame
[env
->cur_state
->curframe
];
4709 if (value_regno
>= 0)
4710 reg
= &cur
->regs
[value_regno
];
4711 if (!env
->bypass_spec_v4
) {
4712 bool sanitize
= reg
&& is_spillable_regtype(reg
->type
);
4714 for (i
= 0; i
< size
; i
++) {
4715 u8 type
= state
->stack
[spi
].slot_type
[i
];
4717 if (type
!= STACK_MISC
&& type
!= STACK_ZERO
) {
4724 env
->insn_aux_data
[insn_idx
].sanitize_stack_spill
= true;
4727 err
= destroy_if_dynptr_stack_slot(env
, state
, spi
);
4731 check_fastcall_stack_contract(env
, state
, insn_idx
, off
);
4732 mark_stack_slot_scratched(env
, spi
);
4733 if (reg
&& !(off
% BPF_REG_SIZE
) && reg
->type
== SCALAR_VALUE
&& env
->bpf_capable
) {
4734 bool reg_value_fits
;
4736 reg_value_fits
= get_reg_width(reg
) <= BITS_PER_BYTE
* size
;
4737 /* Make sure that reg had an ID to build a relation on spill. */
4739 assign_scalar_id_before_mov(env
, reg
);
4740 save_register_state(env
, state
, spi
, reg
, size
);
4741 /* Break the relation on a narrowing spill. */
4742 if (!reg_value_fits
)
4743 state
->stack
[spi
].spilled_ptr
.id
= 0;
4744 } else if (!reg
&& !(off
% BPF_REG_SIZE
) && is_bpf_st_mem(insn
) &&
4746 struct bpf_reg_state
*tmp_reg
= &env
->fake_reg
[0];
4748 memset(tmp_reg
, 0, sizeof(*tmp_reg
));
4749 __mark_reg_known(tmp_reg
, insn
->imm
);
4750 tmp_reg
->type
= SCALAR_VALUE
;
4751 save_register_state(env
, state
, spi
, tmp_reg
, size
);
4752 } else if (reg
&& is_spillable_regtype(reg
->type
)) {
4753 /* register containing pointer is being spilled into stack */
4754 if (size
!= BPF_REG_SIZE
) {
4755 verbose_linfo(env
, insn_idx
, "; ");
4756 verbose(env
, "invalid size of register spill\n");
4759 if (state
!= cur
&& reg
->type
== PTR_TO_STACK
) {
4760 verbose(env
, "cannot spill pointers to stack into stack frame of the caller\n");
4763 save_register_state(env
, state
, spi
, reg
, size
);
4765 u8 type
= STACK_MISC
;
4767 /* regular write of data into stack destroys any spilled ptr */
4768 state
->stack
[spi
].spilled_ptr
.type
= NOT_INIT
;
4769 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */
4770 if (is_stack_slot_special(&state
->stack
[spi
]))
4771 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
4772 scrub_spilled_slot(&state
->stack
[spi
].slot_type
[i
]);
4774 /* only mark the slot as written if all 8 bytes were written
4775 * otherwise read propagation may incorrectly stop too soon
4776 * when stack slots are partially written.
4777 * This heuristic means that read propagation will be
4778 * conservative, since it will add reg_live_read marks
4779 * to stack slots all the way to first state when programs
4780 * writes+reads less than 8 bytes
4782 if (size
== BPF_REG_SIZE
)
4783 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
4785 /* when we zero initialize stack slots mark them as such */
4786 if ((reg
&& register_is_null(reg
)) ||
4787 (!reg
&& is_bpf_st_mem(insn
) && insn
->imm
== 0)) {
4788 /* STACK_ZERO case happened because register spill
4789 * wasn't properly aligned at the stack slot boundary,
4790 * so it's not a register spill anymore; force
4791 * originating register to be precise to make
4792 * STACK_ZERO correct for subsequent states
4794 err
= mark_chain_precision(env
, value_regno
);
4800 /* Mark slots affected by this stack write. */
4801 for (i
= 0; i
< size
; i
++)
4802 state
->stack
[spi
].slot_type
[(slot
- i
) % BPF_REG_SIZE
] = type
;
4803 insn_flags
= 0; /* not a register spill */
4807 return push_insn_history(env
, env
->cur_state
, insn_flags
, 0);
4811 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
4812 * known to contain a variable offset.
4813 * This function checks whether the write is permitted and conservatively
4814 * tracks the effects of the write, considering that each stack slot in the
4815 * dynamic range is potentially written to.
4817 * 'off' includes 'regno->off'.
4818 * 'value_regno' can be -1, meaning that an unknown value is being written to
4821 * Spilled pointers in range are not marked as written because we don't know
4822 * what's going to be actually written. This means that read propagation for
4823 * future reads cannot be terminated by this write.
4825 * For privileged programs, uninitialized stack slots are considered
4826 * initialized by this write (even though we don't know exactly what offsets
4827 * are going to be written to). The idea is that we don't want the verifier to
4828 * reject future reads that access slots written to through variable offsets.
4830 static int check_stack_write_var_off(struct bpf_verifier_env
*env
,
4831 /* func where register points to */
4832 struct bpf_func_state
*state
,
4833 int ptr_regno
, int off
, int size
,
4834 int value_regno
, int insn_idx
)
4836 struct bpf_func_state
*cur
; /* state of the current function */
4837 int min_off
, max_off
;
4839 struct bpf_reg_state
*ptr_reg
= NULL
, *value_reg
= NULL
;
4840 struct bpf_insn
*insn
= &env
->prog
->insnsi
[insn_idx
];
4841 bool writing_zero
= false;
4842 /* set if the fact that we're writing a zero is used to let any
4843 * stack slots remain STACK_ZERO
4845 bool zero_used
= false;
4847 cur
= env
->cur_state
->frame
[env
->cur_state
->curframe
];
4848 ptr_reg
= &cur
->regs
[ptr_regno
];
4849 min_off
= ptr_reg
->smin_value
+ off
;
4850 max_off
= ptr_reg
->smax_value
+ off
+ size
;
4851 if (value_regno
>= 0)
4852 value_reg
= &cur
->regs
[value_regno
];
4853 if ((value_reg
&& register_is_null(value_reg
)) ||
4854 (!value_reg
&& is_bpf_st_mem(insn
) && insn
->imm
== 0))
4855 writing_zero
= true;
4857 for (i
= min_off
; i
< max_off
; i
++) {
4861 err
= destroy_if_dynptr_stack_slot(env
, state
, spi
);
4866 check_fastcall_stack_contract(env
, state
, insn_idx
, min_off
);
4867 /* Variable offset writes destroy any spilled pointers in range. */
4868 for (i
= min_off
; i
< max_off
; i
++) {
4869 u8 new_type
, *stype
;
4873 spi
= slot
/ BPF_REG_SIZE
;
4874 stype
= &state
->stack
[spi
].slot_type
[slot
% BPF_REG_SIZE
];
4875 mark_stack_slot_scratched(env
, spi
);
4877 if (!env
->allow_ptr_leaks
&& *stype
!= STACK_MISC
&& *stype
!= STACK_ZERO
) {
4878 /* Reject the write if range we may write to has not
4879 * been initialized beforehand. If we didn't reject
4880 * here, the ptr status would be erased below (even
4881 * though not all slots are actually overwritten),
4882 * possibly opening the door to leaks.
4884 * We do however catch STACK_INVALID case below, and
4885 * only allow reading possibly uninitialized memory
4886 * later for CAP_PERFMON, as the write may not happen to
4889 verbose(env
, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
4894 /* If writing_zero and the spi slot contains a spill of value 0,
4895 * maintain the spill type.
4897 if (writing_zero
&& *stype
== STACK_SPILL
&&
4898 is_spilled_scalar_reg(&state
->stack
[spi
])) {
4899 struct bpf_reg_state
*spill_reg
= &state
->stack
[spi
].spilled_ptr
;
4901 if (tnum_is_const(spill_reg
->var_off
) && spill_reg
->var_off
.value
== 0) {
4907 /* Erase all other spilled pointers. */
4908 state
->stack
[spi
].spilled_ptr
.type
= NOT_INIT
;
4910 /* Update the slot type. */
4911 new_type
= STACK_MISC
;
4912 if (writing_zero
&& *stype
== STACK_ZERO
) {
4913 new_type
= STACK_ZERO
;
4916 /* If the slot is STACK_INVALID, we check whether it's OK to
4917 * pretend that it will be initialized by this write. The slot
4918 * might not actually be written to, and so if we mark it as
4919 * initialized future reads might leak uninitialized memory.
4920 * For privileged programs, we will accept such reads to slots
4921 * that may or may not be written because, if we're reject
4922 * them, the error would be too confusing.
4924 if (*stype
== STACK_INVALID
&& !env
->allow_uninit_stack
) {
4925 verbose(env
, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
4932 /* backtracking doesn't work for STACK_ZERO yet. */
4933 err
= mark_chain_precision(env
, value_regno
);
4940 /* When register 'dst_regno' is assigned some values from stack[min_off,
4941 * max_off), we set the register's type according to the types of the
4942 * respective stack slots. If all the stack values are known to be zeros, then
4943 * so is the destination reg. Otherwise, the register is considered to be
4944 * SCALAR. This function does not deal with register filling; the caller must
4945 * ensure that all spilled registers in the stack range have been marked as
4948 static void mark_reg_stack_read(struct bpf_verifier_env
*env
,
4949 /* func where src register points to */
4950 struct bpf_func_state
*ptr_state
,
4951 int min_off
, int max_off
, int dst_regno
)
4953 struct bpf_verifier_state
*vstate
= env
->cur_state
;
4954 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
4959 for (i
= min_off
; i
< max_off
; i
++) {
4961 spi
= slot
/ BPF_REG_SIZE
;
4962 mark_stack_slot_scratched(env
, spi
);
4963 stype
= ptr_state
->stack
[spi
].slot_type
;
4964 if (stype
[slot
% BPF_REG_SIZE
] != STACK_ZERO
)
4968 if (zeros
== max_off
- min_off
) {
4969 /* Any access_size read into register is zero extended,
4970 * so the whole register == const_zero.
4972 __mark_reg_const_zero(env
, &state
->regs
[dst_regno
]);
4974 /* have read misc data from the stack */
4975 mark_reg_unknown(env
, state
->regs
, dst_regno
);
4977 state
->regs
[dst_regno
].live
|= REG_LIVE_WRITTEN
;
4980 /* Read the stack at 'off' and put the results into the register indicated by
4981 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
4984 * 'dst_regno' can be -1, meaning that the read value is not going to a
4987 * The access is assumed to be within the current stack bounds.
4989 static int check_stack_read_fixed_off(struct bpf_verifier_env
*env
,
4990 /* func where src register points to */
4991 struct bpf_func_state
*reg_state
,
4992 int off
, int size
, int dst_regno
)
4994 struct bpf_verifier_state
*vstate
= env
->cur_state
;
4995 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
4996 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
;
4997 struct bpf_reg_state
*reg
;
4999 int insn_flags
= insn_stack_access_flags(reg_state
->frameno
, spi
);
5001 stype
= reg_state
->stack
[spi
].slot_type
;
5002 reg
= ®_state
->stack
[spi
].spilled_ptr
;
5004 mark_stack_slot_scratched(env
, spi
);
5005 check_fastcall_stack_contract(env
, state
, env
->insn_idx
, off
);
5007 if (is_spilled_reg(®_state
->stack
[spi
])) {
5010 for (i
= BPF_REG_SIZE
- 1; i
> 0 && stype
[i
- 1] == STACK_SPILL
; i
--)
5013 if (size
!= BPF_REG_SIZE
|| spill_size
!= BPF_REG_SIZE
) {
5014 if (reg
->type
!= SCALAR_VALUE
) {
5015 verbose_linfo(env
, env
->insn_idx
, "; ");
5016 verbose(env
, "invalid size of register fill\n");
5020 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
5024 if (size
<= spill_size
&&
5025 bpf_stack_narrow_access_ok(off
, size
, spill_size
)) {
5026 /* The earlier check_reg_arg() has decided the
5027 * subreg_def for this insn. Save it first.
5029 s32 subreg_def
= state
->regs
[dst_regno
].subreg_def
;
5031 copy_register_state(&state
->regs
[dst_regno
], reg
);
5032 state
->regs
[dst_regno
].subreg_def
= subreg_def
;
5034 /* Break the relation on a narrowing fill.
5035 * coerce_reg_to_size will adjust the boundaries.
5037 if (get_reg_width(reg
) > size
* BITS_PER_BYTE
)
5038 state
->regs
[dst_regno
].id
= 0;
5040 int spill_cnt
= 0, zero_cnt
= 0;
5042 for (i
= 0; i
< size
; i
++) {
5043 type
= stype
[(slot
- i
) % BPF_REG_SIZE
];
5044 if (type
== STACK_SPILL
) {
5048 if (type
== STACK_MISC
)
5050 if (type
== STACK_ZERO
) {
5054 if (type
== STACK_INVALID
&& env
->allow_uninit_stack
)
5056 verbose(env
, "invalid read from stack off %d+%d size %d\n",
5061 if (spill_cnt
== size
&&
5062 tnum_is_const(reg
->var_off
) && reg
->var_off
.value
== 0) {
5063 __mark_reg_const_zero(env
, &state
->regs
[dst_regno
]);
5064 /* this IS register fill, so keep insn_flags */
5065 } else if (zero_cnt
== size
) {
5066 /* similarly to mark_reg_stack_read(), preserve zeroes */
5067 __mark_reg_const_zero(env
, &state
->regs
[dst_regno
]);
5068 insn_flags
= 0; /* not restoring original register state */
5070 mark_reg_unknown(env
, state
->regs
, dst_regno
);
5071 insn_flags
= 0; /* not restoring original register state */
5074 state
->regs
[dst_regno
].live
|= REG_LIVE_WRITTEN
;
5075 } else if (dst_regno
>= 0) {
5076 /* restore register state from stack */
5077 copy_register_state(&state
->regs
[dst_regno
], reg
);
5078 /* mark reg as written since spilled pointer state likely
5079 * has its liveness marks cleared by is_state_visited()
5080 * which resets stack/reg liveness for state transitions
5082 state
->regs
[dst_regno
].live
|= REG_LIVE_WRITTEN
;
5083 } else if (__is_pointer_value(env
->allow_ptr_leaks
, reg
)) {
5084 /* If dst_regno==-1, the caller is asking us whether
5085 * it is acceptable to use this value as a SCALAR_VALUE
5087 * We must not allow unprivileged callers to do that
5088 * with spilled pointers.
5090 verbose(env
, "leaking pointer from stack off %d\n",
5094 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
5096 for (i
= 0; i
< size
; i
++) {
5097 type
= stype
[(slot
- i
) % BPF_REG_SIZE
];
5098 if (type
== STACK_MISC
)
5100 if (type
== STACK_ZERO
)
5102 if (type
== STACK_INVALID
&& env
->allow_uninit_stack
)
5104 verbose(env
, "invalid read from stack off %d+%d size %d\n",
5108 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
5110 mark_reg_stack_read(env
, reg_state
, off
, off
+ size
, dst_regno
);
5111 insn_flags
= 0; /* we are not restoring spilled register */
5114 return push_insn_history(env
, env
->cur_state
, insn_flags
, 0);
5118 enum bpf_access_src
{
5119 ACCESS_DIRECT
= 1, /* the access is performed by an instruction */
5120 ACCESS_HELPER
= 2, /* the access is performed by a helper */
5123 static int check_stack_range_initialized(struct bpf_verifier_env
*env
,
5124 int regno
, int off
, int access_size
,
5125 bool zero_size_allowed
,
5126 enum bpf_access_src type
,
5127 struct bpf_call_arg_meta
*meta
);
5129 static struct bpf_reg_state
*reg_state(struct bpf_verifier_env
*env
, int regno
)
5131 return cur_regs(env
) + regno
;
5134 /* Read the stack at 'ptr_regno + off' and put the result into the register
5136 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
5137 * but not its variable offset.
5138 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
5140 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
5141 * filling registers (i.e. reads of spilled register cannot be detected when
5142 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
5143 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
5144 * offset; for a fixed offset check_stack_read_fixed_off should be used
5147 static int check_stack_read_var_off(struct bpf_verifier_env
*env
,
5148 int ptr_regno
, int off
, int size
, int dst_regno
)
5150 /* The state of the source register. */
5151 struct bpf_reg_state
*reg
= reg_state(env
, ptr_regno
);
5152 struct bpf_func_state
*ptr_state
= func(env
, reg
);
5154 int min_off
, max_off
;
5156 /* Note that we pass a NULL meta, so raw access will not be permitted.
5158 err
= check_stack_range_initialized(env
, ptr_regno
, off
, size
,
5159 false, ACCESS_DIRECT
, NULL
);
5163 min_off
= reg
->smin_value
+ off
;
5164 max_off
= reg
->smax_value
+ off
;
5165 mark_reg_stack_read(env
, ptr_state
, min_off
, max_off
+ size
, dst_regno
);
5166 check_fastcall_stack_contract(env
, ptr_state
, env
->insn_idx
, min_off
);
5170 /* check_stack_read dispatches to check_stack_read_fixed_off or
5171 * check_stack_read_var_off.
5173 * The caller must ensure that the offset falls within the allocated stack
5176 * 'dst_regno' is a register which will receive the value from the stack. It
5177 * can be -1, meaning that the read value is not going to a register.
5179 static int check_stack_read(struct bpf_verifier_env
*env
,
5180 int ptr_regno
, int off
, int size
,
5183 struct bpf_reg_state
*reg
= reg_state(env
, ptr_regno
);
5184 struct bpf_func_state
*state
= func(env
, reg
);
5186 /* Some accesses are only permitted with a static offset. */
5187 bool var_off
= !tnum_is_const(reg
->var_off
);
5189 /* The offset is required to be static when reads don't go to a
5190 * register, in order to not leak pointers (see
5191 * check_stack_read_fixed_off).
5193 if (dst_regno
< 0 && var_off
) {
5196 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
5197 verbose(env
, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
5201 /* Variable offset is prohibited for unprivileged mode for simplicity
5202 * since it requires corresponding support in Spectre masking for stack
5203 * ALU. See also retrieve_ptr_limit(). The check in
5204 * check_stack_access_for_ptr_arithmetic() called by
5205 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
5206 * with variable offsets, therefore no check is required here. Further,
5207 * just checking it here would be insufficient as speculative stack
5208 * writes could still lead to unsafe speculative behaviour.
5211 off
+= reg
->var_off
.value
;
5212 err
= check_stack_read_fixed_off(env
, state
, off
, size
,
5215 /* Variable offset stack reads need more conservative handling
5216 * than fixed offset ones. Note that dst_regno >= 0 on this
5219 err
= check_stack_read_var_off(env
, ptr_regno
, off
, size
,
5226 /* check_stack_write dispatches to check_stack_write_fixed_off or
5227 * check_stack_write_var_off.
5229 * 'ptr_regno' is the register used as a pointer into the stack.
5230 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
5231 * 'value_regno' is the register whose value we're writing to the stack. It can
5232 * be -1, meaning that we're not writing from a register.
5234 * The caller must ensure that the offset falls within the maximum stack size.
5236 static int check_stack_write(struct bpf_verifier_env
*env
,
5237 int ptr_regno
, int off
, int size
,
5238 int value_regno
, int insn_idx
)
5240 struct bpf_reg_state
*reg
= reg_state(env
, ptr_regno
);
5241 struct bpf_func_state
*state
= func(env
, reg
);
5244 if (tnum_is_const(reg
->var_off
)) {
5245 off
+= reg
->var_off
.value
;
5246 err
= check_stack_write_fixed_off(env
, state
, off
, size
,
5247 value_regno
, insn_idx
);
5249 /* Variable offset stack reads need more conservative handling
5250 * than fixed offset ones.
5252 err
= check_stack_write_var_off(env
, state
,
5253 ptr_regno
, off
, size
,
5254 value_regno
, insn_idx
);
5259 static int check_map_access_type(struct bpf_verifier_env
*env
, u32 regno
,
5260 int off
, int size
, enum bpf_access_type type
)
5262 struct bpf_reg_state
*regs
= cur_regs(env
);
5263 struct bpf_map
*map
= regs
[regno
].map_ptr
;
5264 u32 cap
= bpf_map_flags_to_cap(map
);
5266 if (type
== BPF_WRITE
&& !(cap
& BPF_MAP_CAN_WRITE
)) {
5267 verbose(env
, "write into map forbidden, value_size=%d off=%d size=%d\n",
5268 map
->value_size
, off
, size
);
5272 if (type
== BPF_READ
&& !(cap
& BPF_MAP_CAN_READ
)) {
5273 verbose(env
, "read from map forbidden, value_size=%d off=%d size=%d\n",
5274 map
->value_size
, off
, size
);
5281 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
5282 static int __check_mem_access(struct bpf_verifier_env
*env
, int regno
,
5283 int off
, int size
, u32 mem_size
,
5284 bool zero_size_allowed
)
5286 bool size_ok
= size
> 0 || (size
== 0 && zero_size_allowed
);
5287 struct bpf_reg_state
*reg
;
5289 if (off
>= 0 && size_ok
&& (u64
)off
+ size
<= mem_size
)
5292 reg
= &cur_regs(env
)[regno
];
5293 switch (reg
->type
) {
5294 case PTR_TO_MAP_KEY
:
5295 verbose(env
, "invalid access to map key, key_size=%d off=%d size=%d\n",
5296 mem_size
, off
, size
);
5298 case PTR_TO_MAP_VALUE
:
5299 verbose(env
, "invalid access to map value, value_size=%d off=%d size=%d\n",
5300 mem_size
, off
, size
);
5303 case PTR_TO_PACKET_META
:
5304 case PTR_TO_PACKET_END
:
5305 verbose(env
, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
5306 off
, size
, regno
, reg
->id
, off
, mem_size
);
5310 verbose(env
, "invalid access to memory, mem_size=%u off=%d size=%d\n",
5311 mem_size
, off
, size
);
5317 /* check read/write into a memory region with possible variable offset */
5318 static int check_mem_region_access(struct bpf_verifier_env
*env
, u32 regno
,
5319 int off
, int size
, u32 mem_size
,
5320 bool zero_size_allowed
)
5322 struct bpf_verifier_state
*vstate
= env
->cur_state
;
5323 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
5324 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
5327 /* We may have adjusted the register pointing to memory region, so we
5328 * need to try adding each of min_value and max_value to off
5329 * to make sure our theoretical access will be safe.
5331 * The minimum value is only important with signed
5332 * comparisons where we can't assume the floor of a
5333 * value is 0. If we are using signed variables for our
5334 * index'es we need to make sure that whatever we use
5335 * will have a set floor within our range.
5337 if (reg
->smin_value
< 0 &&
5338 (reg
->smin_value
== S64_MIN
||
5339 (off
+ reg
->smin_value
!= (s64
)(s32
)(off
+ reg
->smin_value
)) ||
5340 reg
->smin_value
+ off
< 0)) {
5341 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5345 err
= __check_mem_access(env
, regno
, reg
->smin_value
+ off
, size
,
5346 mem_size
, zero_size_allowed
);
5348 verbose(env
, "R%d min value is outside of the allowed memory range\n",
5353 /* If we haven't set a max value then we need to bail since we can't be
5354 * sure we won't do bad things.
5355 * If reg->umax_value + off could overflow, treat that as unbounded too.
5357 if (reg
->umax_value
>= BPF_MAX_VAR_OFF
) {
5358 verbose(env
, "R%d unbounded memory access, make sure to bounds check any such access\n",
5362 err
= __check_mem_access(env
, regno
, reg
->umax_value
+ off
, size
,
5363 mem_size
, zero_size_allowed
);
5365 verbose(env
, "R%d max value is outside of the allowed memory range\n",
5373 static int __check_ptr_off_reg(struct bpf_verifier_env
*env
,
5374 const struct bpf_reg_state
*reg
, int regno
,
5377 /* Access to this pointer-typed register or passing it to a helper
5378 * is only allowed in its original, unmodified form.
5382 verbose(env
, "negative offset %s ptr R%d off=%d disallowed\n",
5383 reg_type_str(env
, reg
->type
), regno
, reg
->off
);
5387 if (!fixed_off_ok
&& reg
->off
) {
5388 verbose(env
, "dereference of modified %s ptr R%d off=%d disallowed\n",
5389 reg_type_str(env
, reg
->type
), regno
, reg
->off
);
5393 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
5396 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
5397 verbose(env
, "variable %s access var_off=%s disallowed\n",
5398 reg_type_str(env
, reg
->type
), tn_buf
);
5405 static int check_ptr_off_reg(struct bpf_verifier_env
*env
,
5406 const struct bpf_reg_state
*reg
, int regno
)
5408 return __check_ptr_off_reg(env
, reg
, regno
, false);
5411 static int map_kptr_match_type(struct bpf_verifier_env
*env
,
5412 struct btf_field
*kptr_field
,
5413 struct bpf_reg_state
*reg
, u32 regno
)
5415 const char *targ_name
= btf_type_name(kptr_field
->kptr
.btf
, kptr_field
->kptr
.btf_id
);
5417 const char *reg_name
= "";
5419 if (btf_is_kernel(reg
->btf
)) {
5420 perm_flags
= PTR_MAYBE_NULL
| PTR_TRUSTED
| MEM_RCU
;
5422 /* Only unreferenced case accepts untrusted pointers */
5423 if (kptr_field
->type
== BPF_KPTR_UNREF
)
5424 perm_flags
|= PTR_UNTRUSTED
;
5426 perm_flags
= PTR_MAYBE_NULL
| MEM_ALLOC
;
5427 if (kptr_field
->type
== BPF_KPTR_PERCPU
)
5428 perm_flags
|= MEM_PERCPU
;
5431 if (base_type(reg
->type
) != PTR_TO_BTF_ID
|| (type_flag(reg
->type
) & ~perm_flags
))
5434 /* We need to verify reg->type and reg->btf, before accessing reg->btf */
5435 reg_name
= btf_type_name(reg
->btf
, reg
->btf_id
);
5437 /* For ref_ptr case, release function check should ensure we get one
5438 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
5439 * normal store of unreferenced kptr, we must ensure var_off is zero.
5440 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
5441 * reg->off and reg->ref_obj_id are not needed here.
5443 if (__check_ptr_off_reg(env
, reg
, regno
, true))
5446 /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and
5447 * we also need to take into account the reg->off.
5449 * We want to support cases like:
5457 * v = func(); // PTR_TO_BTF_ID
5458 * val->foo = v; // reg->off is zero, btf and btf_id match type
5459 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
5460 * // first member type of struct after comparison fails
5461 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
5464 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
5465 * is zero. We must also ensure that btf_struct_ids_match does not walk
5466 * the struct to match type against first member of struct, i.e. reject
5467 * second case from above. Hence, when type is BPF_KPTR_REF, we set
5468 * strict mode to true for type match.
5470 if (!btf_struct_ids_match(&env
->log
, reg
->btf
, reg
->btf_id
, reg
->off
,
5471 kptr_field
->kptr
.btf
, kptr_field
->kptr
.btf_id
,
5472 kptr_field
->type
!= BPF_KPTR_UNREF
))
5476 verbose(env
, "invalid kptr access, R%d type=%s%s ", regno
,
5477 reg_type_str(env
, reg
->type
), reg_name
);
5478 verbose(env
, "expected=%s%s", reg_type_str(env
, PTR_TO_BTF_ID
), targ_name
);
5479 if (kptr_field
->type
== BPF_KPTR_UNREF
)
5480 verbose(env
, " or %s%s\n", reg_type_str(env
, PTR_TO_BTF_ID
| PTR_UNTRUSTED
),
5487 static bool in_sleepable(struct bpf_verifier_env
*env
)
5489 return env
->prog
->sleepable
||
5490 (env
->cur_state
&& env
->cur_state
->in_sleepable
);
5493 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5494 * can dereference RCU protected pointers and result is PTR_TRUSTED.
5496 static bool in_rcu_cs(struct bpf_verifier_env
*env
)
5498 return env
->cur_state
->active_rcu_lock
||
5499 cur_func(env
)->active_locks
||
5503 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
5504 BTF_SET_START(rcu_protected_types
)
5505 BTF_ID(struct, prog_test_ref_kfunc
)
5506 #ifdef CONFIG_CGROUPS
5507 BTF_ID(struct, cgroup
)
5509 #ifdef CONFIG_BPF_JIT
5510 BTF_ID(struct, bpf_cpumask
)
5512 BTF_ID(struct, task_struct
)
5513 BTF_ID(struct, bpf_crypto_ctx
)
5514 BTF_SET_END(rcu_protected_types
)
5516 static bool rcu_protected_object(const struct btf
*btf
, u32 btf_id
)
5518 if (!btf_is_kernel(btf
))
5520 return btf_id_set_contains(&rcu_protected_types
, btf_id
);
5523 static struct btf_record
*kptr_pointee_btf_record(struct btf_field
*kptr_field
)
5525 struct btf_struct_meta
*meta
;
5527 if (btf_is_kernel(kptr_field
->kptr
.btf
))
5530 meta
= btf_find_struct_meta(kptr_field
->kptr
.btf
,
5531 kptr_field
->kptr
.btf_id
);
5533 return meta
? meta
->record
: NULL
;
5536 static bool rcu_safe_kptr(const struct btf_field
*field
)
5538 const struct btf_field_kptr
*kptr
= &field
->kptr
;
5540 return field
->type
== BPF_KPTR_PERCPU
||
5541 (field
->type
== BPF_KPTR_REF
&& rcu_protected_object(kptr
->btf
, kptr
->btf_id
));
5544 static u32
btf_ld_kptr_type(struct bpf_verifier_env
*env
, struct btf_field
*kptr_field
)
5546 struct btf_record
*rec
;
5549 ret
= PTR_MAYBE_NULL
;
5550 if (rcu_safe_kptr(kptr_field
) && in_rcu_cs(env
)) {
5552 if (kptr_field
->type
== BPF_KPTR_PERCPU
)
5554 else if (!btf_is_kernel(kptr_field
->kptr
.btf
))
5557 rec
= kptr_pointee_btf_record(kptr_field
);
5558 if (rec
&& btf_record_has_field(rec
, BPF_GRAPH_NODE
))
5561 ret
|= PTR_UNTRUSTED
;
5567 static int mark_uptr_ld_reg(struct bpf_verifier_env
*env
, u32 regno
,
5568 struct btf_field
*field
)
5570 struct bpf_reg_state
*reg
;
5571 const struct btf_type
*t
;
5573 t
= btf_type_by_id(field
->kptr
.btf
, field
->kptr
.btf_id
);
5574 mark_reg_known_zero(env
, cur_regs(env
), regno
);
5575 reg
= reg_state(env
, regno
);
5576 reg
->type
= PTR_TO_MEM
| PTR_MAYBE_NULL
;
5577 reg
->mem_size
= t
->size
;
5578 reg
->id
= ++env
->id_gen
;
5583 static int check_map_kptr_access(struct bpf_verifier_env
*env
, u32 regno
,
5584 int value_regno
, int insn_idx
,
5585 struct btf_field
*kptr_field
)
5587 struct bpf_insn
*insn
= &env
->prog
->insnsi
[insn_idx
];
5588 int class = BPF_CLASS(insn
->code
);
5589 struct bpf_reg_state
*val_reg
;
5591 /* Things we already checked for in check_map_access and caller:
5592 * - Reject cases where variable offset may touch kptr
5593 * - size of access (must be BPF_DW)
5594 * - tnum_is_const(reg->var_off)
5595 * - kptr_field->offset == off + reg->var_off.value
5597 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
5598 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
5599 verbose(env
, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
5603 /* We only allow loading referenced kptr, since it will be marked as
5604 * untrusted, similar to unreferenced kptr.
5606 if (class != BPF_LDX
&&
5607 (kptr_field
->type
== BPF_KPTR_REF
|| kptr_field
->type
== BPF_KPTR_PERCPU
)) {
5608 verbose(env
, "store to referenced kptr disallowed\n");
5611 if (class != BPF_LDX
&& kptr_field
->type
== BPF_UPTR
) {
5612 verbose(env
, "store to uptr disallowed\n");
5616 if (class == BPF_LDX
) {
5617 if (kptr_field
->type
== BPF_UPTR
)
5618 return mark_uptr_ld_reg(env
, value_regno
, kptr_field
);
5620 /* We can simply mark the value_regno receiving the pointer
5621 * value from map as PTR_TO_BTF_ID, with the correct type.
5623 mark_btf_ld_reg(env
, cur_regs(env
), value_regno
, PTR_TO_BTF_ID
, kptr_field
->kptr
.btf
,
5624 kptr_field
->kptr
.btf_id
, btf_ld_kptr_type(env
, kptr_field
));
5625 } else if (class == BPF_STX
) {
5626 val_reg
= reg_state(env
, value_regno
);
5627 if (!register_is_null(val_reg
) &&
5628 map_kptr_match_type(env
, kptr_field
, val_reg
, value_regno
))
5630 } else if (class == BPF_ST
) {
5632 verbose(env
, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
5633 kptr_field
->offset
);
5637 verbose(env
, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
5643 /* check read/write into a map element with possible variable offset */
5644 static int check_map_access(struct bpf_verifier_env
*env
, u32 regno
,
5645 int off
, int size
, bool zero_size_allowed
,
5646 enum bpf_access_src src
)
5648 struct bpf_verifier_state
*vstate
= env
->cur_state
;
5649 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
5650 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
5651 struct bpf_map
*map
= reg
->map_ptr
;
5652 struct btf_record
*rec
;
5655 err
= check_mem_region_access(env
, regno
, off
, size
, map
->value_size
,
5660 if (IS_ERR_OR_NULL(map
->record
))
5663 for (i
= 0; i
< rec
->cnt
; i
++) {
5664 struct btf_field
*field
= &rec
->fields
[i
];
5665 u32 p
= field
->offset
;
5667 /* If any part of a field can be touched by load/store, reject
5668 * this program. To check that [x1, x2) overlaps with [y1, y2),
5669 * it is sufficient to check x1 < y2 && y1 < x2.
5671 if (reg
->smin_value
+ off
< p
+ field
->size
&&
5672 p
< reg
->umax_value
+ off
+ size
) {
5673 switch (field
->type
) {
5674 case BPF_KPTR_UNREF
:
5676 case BPF_KPTR_PERCPU
:
5678 if (src
!= ACCESS_DIRECT
) {
5679 verbose(env
, "%s cannot be accessed indirectly by helper\n",
5680 btf_field_type_name(field
->type
));
5683 if (!tnum_is_const(reg
->var_off
)) {
5684 verbose(env
, "%s access cannot have variable offset\n",
5685 btf_field_type_name(field
->type
));
5688 if (p
!= off
+ reg
->var_off
.value
) {
5689 verbose(env
, "%s access misaligned expected=%u off=%llu\n",
5690 btf_field_type_name(field
->type
),
5691 p
, off
+ reg
->var_off
.value
);
5694 if (size
!= bpf_size_to_bytes(BPF_DW
)) {
5695 verbose(env
, "%s access size must be BPF_DW\n",
5696 btf_field_type_name(field
->type
));
5701 verbose(env
, "%s cannot be accessed directly by load/store\n",
5702 btf_field_type_name(field
->type
));
5710 #define MAX_PACKET_OFF 0xffff
5712 static bool may_access_direct_pkt_data(struct bpf_verifier_env
*env
,
5713 const struct bpf_call_arg_meta
*meta
,
5714 enum bpf_access_type t
)
5716 enum bpf_prog_type prog_type
= resolve_prog_type(env
->prog
);
5718 switch (prog_type
) {
5719 /* Program types only with direct read access go here! */
5720 case BPF_PROG_TYPE_LWT_IN
:
5721 case BPF_PROG_TYPE_LWT_OUT
:
5722 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
5723 case BPF_PROG_TYPE_SK_REUSEPORT
:
5724 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
5725 case BPF_PROG_TYPE_CGROUP_SKB
:
5730 /* Program types with direct read + write access go here! */
5731 case BPF_PROG_TYPE_SCHED_CLS
:
5732 case BPF_PROG_TYPE_SCHED_ACT
:
5733 case BPF_PROG_TYPE_XDP
:
5734 case BPF_PROG_TYPE_LWT_XMIT
:
5735 case BPF_PROG_TYPE_SK_SKB
:
5736 case BPF_PROG_TYPE_SK_MSG
:
5738 return meta
->pkt_access
;
5740 env
->seen_direct_write
= true;
5743 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
5745 env
->seen_direct_write
= true;
5754 static int check_packet_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
5755 int size
, bool zero_size_allowed
)
5757 struct bpf_reg_state
*regs
= cur_regs(env
);
5758 struct bpf_reg_state
*reg
= ®s
[regno
];
5761 /* We may have added a variable offset to the packet pointer; but any
5762 * reg->range we have comes after that. We are only checking the fixed
5766 /* We don't allow negative numbers, because we aren't tracking enough
5767 * detail to prove they're safe.
5769 if (reg
->smin_value
< 0) {
5770 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5775 err
= reg
->range
< 0 ? -EINVAL
:
5776 __check_mem_access(env
, regno
, off
, size
, reg
->range
,
5779 verbose(env
, "R%d offset is outside of the packet\n", regno
);
5783 /* __check_mem_access has made sure "off + size - 1" is within u16.
5784 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
5785 * otherwise find_good_pkt_pointers would have refused to set range info
5786 * that __check_mem_access would have rejected this pkt access.
5787 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
5789 env
->prog
->aux
->max_pkt_offset
=
5790 max_t(u32
, env
->prog
->aux
->max_pkt_offset
,
5791 off
+ reg
->umax_value
+ size
- 1);
5796 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
5797 static int check_ctx_access(struct bpf_verifier_env
*env
, int insn_idx
, int off
, int size
,
5798 enum bpf_access_type t
, enum bpf_reg_type
*reg_type
,
5799 struct btf
**btf
, u32
*btf_id
, bool *is_retval
, bool is_ldsx
)
5801 struct bpf_insn_access_aux info
= {
5802 .reg_type
= *reg_type
,
5808 if (env
->ops
->is_valid_access
&&
5809 env
->ops
->is_valid_access(off
, size
, t
, env
->prog
, &info
)) {
5810 /* A non zero info.ctx_field_size indicates that this field is a
5811 * candidate for later verifier transformation to load the whole
5812 * field and then apply a mask when accessed with a narrower
5813 * access than actual ctx access size. A zero info.ctx_field_size
5814 * will only allow for whole field access and rejects any other
5815 * type of narrower access.
5817 *reg_type
= info
.reg_type
;
5818 *is_retval
= info
.is_retval
;
5820 if (base_type(*reg_type
) == PTR_TO_BTF_ID
) {
5822 *btf_id
= info
.btf_id
;
5824 env
->insn_aux_data
[insn_idx
].ctx_field_size
= info
.ctx_field_size
;
5826 /* remember the offset of last byte accessed in ctx */
5827 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
5828 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
5832 verbose(env
, "invalid bpf_context access off=%d size=%d\n", off
, size
);
5836 static int check_flow_keys_access(struct bpf_verifier_env
*env
, int off
,
5839 if (size
< 0 || off
< 0 ||
5840 (u64
)off
+ size
> sizeof(struct bpf_flow_keys
)) {
5841 verbose(env
, "invalid access to flow keys off=%d size=%d\n",
5848 static int check_sock_access(struct bpf_verifier_env
*env
, int insn_idx
,
5849 u32 regno
, int off
, int size
,
5850 enum bpf_access_type t
)
5852 struct bpf_reg_state
*regs
= cur_regs(env
);
5853 struct bpf_reg_state
*reg
= ®s
[regno
];
5854 struct bpf_insn_access_aux info
= {};
5857 if (reg
->smin_value
< 0) {
5858 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5863 switch (reg
->type
) {
5864 case PTR_TO_SOCK_COMMON
:
5865 valid
= bpf_sock_common_is_valid_access(off
, size
, t
, &info
);
5868 valid
= bpf_sock_is_valid_access(off
, size
, t
, &info
);
5870 case PTR_TO_TCP_SOCK
:
5871 valid
= bpf_tcp_sock_is_valid_access(off
, size
, t
, &info
);
5873 case PTR_TO_XDP_SOCK
:
5874 valid
= bpf_xdp_sock_is_valid_access(off
, size
, t
, &info
);
5882 env
->insn_aux_data
[insn_idx
].ctx_field_size
=
5883 info
.ctx_field_size
;
5887 verbose(env
, "R%d invalid %s access off=%d size=%d\n",
5888 regno
, reg_type_str(env
, reg
->type
), off
, size
);
5893 static bool is_pointer_value(struct bpf_verifier_env
*env
, int regno
)
5895 return __is_pointer_value(env
->allow_ptr_leaks
, reg_state(env
, regno
));
5898 static bool is_ctx_reg(struct bpf_verifier_env
*env
, int regno
)
5900 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
5902 return reg
->type
== PTR_TO_CTX
;
5905 static bool is_sk_reg(struct bpf_verifier_env
*env
, int regno
)
5907 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
5909 return type_is_sk_pointer(reg
->type
);
5912 static bool is_pkt_reg(struct bpf_verifier_env
*env
, int regno
)
5914 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
5916 return type_is_pkt_pointer(reg
->type
);
5919 static bool is_flow_key_reg(struct bpf_verifier_env
*env
, int regno
)
5921 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
5923 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
5924 return reg
->type
== PTR_TO_FLOW_KEYS
;
5927 static bool is_arena_reg(struct bpf_verifier_env
*env
, int regno
)
5929 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
5931 return reg
->type
== PTR_TO_ARENA
;
5934 static u32
*reg2btf_ids
[__BPF_REG_TYPE_MAX
] = {
5936 [PTR_TO_SOCKET
] = &btf_sock_ids
[BTF_SOCK_TYPE_SOCK
],
5937 [PTR_TO_SOCK_COMMON
] = &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
5938 [PTR_TO_TCP_SOCK
] = &btf_sock_ids
[BTF_SOCK_TYPE_TCP
],
5940 [CONST_PTR_TO_MAP
] = btf_bpf_map_id
,
5943 static bool is_trusted_reg(const struct bpf_reg_state
*reg
)
5945 /* A referenced register is always trusted. */
5946 if (reg
->ref_obj_id
)
5949 /* Types listed in the reg2btf_ids are always trusted */
5950 if (reg2btf_ids
[base_type(reg
->type
)] &&
5951 !bpf_type_has_unsafe_modifiers(reg
->type
))
5954 /* If a register is not referenced, it is trusted if it has the
5955 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
5956 * other type modifiers may be safe, but we elect to take an opt-in
5957 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
5960 * Eventually, we should make PTR_TRUSTED the single source of truth
5961 * for whether a register is trusted.
5963 return type_flag(reg
->type
) & BPF_REG_TRUSTED_MODIFIERS
&&
5964 !bpf_type_has_unsafe_modifiers(reg
->type
);
5967 static bool is_rcu_reg(const struct bpf_reg_state
*reg
)
5969 return reg
->type
& MEM_RCU
;
5972 static void clear_trusted_flags(enum bpf_type_flag
*flag
)
5974 *flag
&= ~(BPF_REG_TRUSTED_MODIFIERS
| MEM_RCU
);
5977 static int check_pkt_ptr_alignment(struct bpf_verifier_env
*env
,
5978 const struct bpf_reg_state
*reg
,
5979 int off
, int size
, bool strict
)
5981 struct tnum reg_off
;
5984 /* Byte size accesses are always allowed. */
5985 if (!strict
|| size
== 1)
5988 /* For platforms that do not have a Kconfig enabling
5989 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
5990 * NET_IP_ALIGN is universally set to '2'. And on platforms
5991 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
5992 * to this code only in strict mode where we want to emulate
5993 * the NET_IP_ALIGN==2 checking. Therefore use an
5994 * unconditional IP align value of '2'.
5998 reg_off
= tnum_add(reg
->var_off
, tnum_const(ip_align
+ reg
->off
+ off
));
5999 if (!tnum_is_aligned(reg_off
, size
)) {
6002 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
6004 "misaligned packet access off %d+%s+%d+%d size %d\n",
6005 ip_align
, tn_buf
, reg
->off
, off
, size
);
6012 static int check_generic_ptr_alignment(struct bpf_verifier_env
*env
,
6013 const struct bpf_reg_state
*reg
,
6014 const char *pointer_desc
,
6015 int off
, int size
, bool strict
)
6017 struct tnum reg_off
;
6019 /* Byte size accesses are always allowed. */
6020 if (!strict
|| size
== 1)
6023 reg_off
= tnum_add(reg
->var_off
, tnum_const(reg
->off
+ off
));
6024 if (!tnum_is_aligned(reg_off
, size
)) {
6027 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
6028 verbose(env
, "misaligned %saccess off %s+%d+%d size %d\n",
6029 pointer_desc
, tn_buf
, reg
->off
, off
, size
);
6036 static int check_ptr_alignment(struct bpf_verifier_env
*env
,
6037 const struct bpf_reg_state
*reg
, int off
,
6038 int size
, bool strict_alignment_once
)
6040 bool strict
= env
->strict_alignment
|| strict_alignment_once
;
6041 const char *pointer_desc
= "";
6043 switch (reg
->type
) {
6045 case PTR_TO_PACKET_META
:
6046 /* Special case, because of NET_IP_ALIGN. Given metadata sits
6047 * right in front, treat it the very same way.
6049 return check_pkt_ptr_alignment(env
, reg
, off
, size
, strict
);
6050 case PTR_TO_FLOW_KEYS
:
6051 pointer_desc
= "flow keys ";
6053 case PTR_TO_MAP_KEY
:
6054 pointer_desc
= "key ";
6056 case PTR_TO_MAP_VALUE
:
6057 pointer_desc
= "value ";
6060 pointer_desc
= "context ";
6063 pointer_desc
= "stack ";
6064 /* The stack spill tracking logic in check_stack_write_fixed_off()
6065 * and check_stack_read_fixed_off() relies on stack accesses being
6071 pointer_desc
= "sock ";
6073 case PTR_TO_SOCK_COMMON
:
6074 pointer_desc
= "sock_common ";
6076 case PTR_TO_TCP_SOCK
:
6077 pointer_desc
= "tcp_sock ";
6079 case PTR_TO_XDP_SOCK
:
6080 pointer_desc
= "xdp_sock ";
6087 return check_generic_ptr_alignment(env
, reg
, pointer_desc
, off
, size
,
6091 static enum priv_stack_mode
bpf_enable_priv_stack(struct bpf_prog
*prog
)
6093 if (!bpf_jit_supports_private_stack())
6094 return NO_PRIV_STACK
;
6096 /* bpf_prog_check_recur() checks all prog types that use bpf trampoline
6097 * while kprobe/tp/perf_event/raw_tp don't use trampoline hence checked
6100 switch (prog
->type
) {
6101 case BPF_PROG_TYPE_KPROBE
:
6102 case BPF_PROG_TYPE_TRACEPOINT
:
6103 case BPF_PROG_TYPE_PERF_EVENT
:
6104 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
6105 return PRIV_STACK_ADAPTIVE
;
6106 case BPF_PROG_TYPE_TRACING
:
6107 case BPF_PROG_TYPE_LSM
:
6108 case BPF_PROG_TYPE_STRUCT_OPS
:
6109 if (prog
->aux
->priv_stack_requested
|| bpf_prog_check_recur(prog
))
6110 return PRIV_STACK_ADAPTIVE
;
6116 return NO_PRIV_STACK
;
6119 static int round_up_stack_depth(struct bpf_verifier_env
*env
, int stack_depth
)
6121 if (env
->prog
->jit_requested
)
6122 return round_up(stack_depth
, 16);
6124 /* round up to 32-bytes, since this is granularity
6125 * of interpreter stack size
6127 return round_up(max_t(u32
, stack_depth
, 1), 32);
6130 /* starting from main bpf function walk all instructions of the function
6131 * and recursively walk all callees that given function can call.
6132 * Ignore jump and exit insns.
6133 * Since recursion is prevented by check_cfg() this algorithm
6134 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
6136 static int check_max_stack_depth_subprog(struct bpf_verifier_env
*env
, int idx
,
6137 bool priv_stack_supported
)
6139 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
6140 struct bpf_insn
*insn
= env
->prog
->insnsi
;
6141 int depth
= 0, frame
= 0, i
, subprog_end
, subprog_depth
;
6142 bool tail_call_reachable
= false;
6143 int ret_insn
[MAX_CALL_FRAMES
];
6144 int ret_prog
[MAX_CALL_FRAMES
];
6147 i
= subprog
[idx
].start
;
6148 if (!priv_stack_supported
)
6149 subprog
[idx
].priv_stack_mode
= NO_PRIV_STACK
;
6151 /* protect against potential stack overflow that might happen when
6152 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
6153 * depth for such case down to 256 so that the worst case scenario
6154 * would result in 8k stack size (32 which is tailcall limit * 256 =
6157 * To get the idea what might happen, see an example:
6158 * func1 -> sub rsp, 128
6159 * subfunc1 -> sub rsp, 256
6160 * tailcall1 -> add rsp, 256
6161 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
6162 * subfunc2 -> sub rsp, 64
6163 * subfunc22 -> sub rsp, 128
6164 * tailcall2 -> add rsp, 128
6165 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
6167 * tailcall will unwind the current stack frame but it will not get rid
6168 * of caller's stack as shown on the example above.
6170 if (idx
&& subprog
[idx
].has_tail_call
&& depth
>= 256) {
6172 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
6177 subprog_depth
= round_up_stack_depth(env
, subprog
[idx
].stack_depth
);
6178 if (priv_stack_supported
) {
6179 /* Request private stack support only if the subprog stack
6180 * depth is no less than BPF_PRIV_STACK_MIN_SIZE. This is to
6181 * avoid jit penalty if the stack usage is small.
6183 if (subprog
[idx
].priv_stack_mode
== PRIV_STACK_UNKNOWN
&&
6184 subprog_depth
>= BPF_PRIV_STACK_MIN_SIZE
)
6185 subprog
[idx
].priv_stack_mode
= PRIV_STACK_ADAPTIVE
;
6188 if (subprog
[idx
].priv_stack_mode
== PRIV_STACK_ADAPTIVE
) {
6189 if (subprog_depth
> MAX_BPF_STACK
) {
6190 verbose(env
, "stack size of subprog %d is %d. Too large\n",
6191 idx
, subprog_depth
);
6195 depth
+= subprog_depth
;
6196 if (depth
> MAX_BPF_STACK
) {
6197 verbose(env
, "combined stack size of %d calls is %d. Too large\n",
6203 subprog_end
= subprog
[idx
+ 1].start
;
6204 for (; i
< subprog_end
; i
++) {
6205 int next_insn
, sidx
;
6207 if (bpf_pseudo_kfunc_call(insn
+ i
) && !insn
[i
].off
) {
6210 if (!is_bpf_throw_kfunc(insn
+ i
))
6212 if (subprog
[idx
].is_cb
)
6214 for (int c
= 0; c
< frame
&& !err
; c
++) {
6215 if (subprog
[ret_prog
[c
]].is_cb
) {
6223 "bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n",
6228 if (!bpf_pseudo_call(insn
+ i
) && !bpf_pseudo_func(insn
+ i
))
6230 /* remember insn and function to return to */
6231 ret_insn
[frame
] = i
+ 1;
6232 ret_prog
[frame
] = idx
;
6234 /* find the callee */
6235 next_insn
= i
+ insn
[i
].imm
+ 1;
6236 sidx
= find_subprog(env
, next_insn
);
6238 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6242 if (subprog
[sidx
].is_async_cb
) {
6243 if (subprog
[sidx
].has_tail_call
) {
6244 verbose(env
, "verifier bug. subprog has tail_call and async cb\n");
6247 /* async callbacks don't increase bpf prog stack size unless called directly */
6248 if (!bpf_pseudo_call(insn
+ i
))
6250 if (subprog
[sidx
].is_exception_cb
) {
6251 verbose(env
, "insn %d cannot call exception cb directly\n", i
);
6257 if (!priv_stack_supported
)
6258 subprog
[idx
].priv_stack_mode
= NO_PRIV_STACK
;
6260 if (subprog
[idx
].has_tail_call
)
6261 tail_call_reachable
= true;
6264 if (frame
>= MAX_CALL_FRAMES
) {
6265 verbose(env
, "the call stack of %d frames is too deep !\n",
6271 /* if tail call got detected across bpf2bpf calls then mark each of the
6272 * currently present subprog frames as tail call reachable subprogs;
6273 * this info will be utilized by JIT so that we will be preserving the
6274 * tail call counter throughout bpf2bpf calls combined with tailcalls
6276 if (tail_call_reachable
)
6277 for (j
= 0; j
< frame
; j
++) {
6278 if (subprog
[ret_prog
[j
]].is_exception_cb
) {
6279 verbose(env
, "cannot tail call within exception cb\n");
6282 subprog
[ret_prog
[j
]].tail_call_reachable
= true;
6284 if (subprog
[0].tail_call_reachable
)
6285 env
->prog
->aux
->tail_call_reachable
= true;
6287 /* end of for() loop means the last insn of the 'subprog'
6288 * was reached. Doesn't matter whether it was JA or EXIT
6292 if (subprog
[idx
].priv_stack_mode
!= PRIV_STACK_ADAPTIVE
)
6293 depth
-= round_up_stack_depth(env
, subprog
[idx
].stack_depth
);
6295 i
= ret_insn
[frame
];
6296 idx
= ret_prog
[frame
];
6300 static int check_max_stack_depth(struct bpf_verifier_env
*env
)
6302 enum priv_stack_mode priv_stack_mode
= PRIV_STACK_UNKNOWN
;
6303 struct bpf_subprog_info
*si
= env
->subprog_info
;
6304 bool priv_stack_supported
;
6307 for (int i
= 0; i
< env
->subprog_cnt
; i
++) {
6308 if (si
[i
].has_tail_call
) {
6309 priv_stack_mode
= NO_PRIV_STACK
;
6314 if (priv_stack_mode
== PRIV_STACK_UNKNOWN
)
6315 priv_stack_mode
= bpf_enable_priv_stack(env
->prog
);
6317 /* All async_cb subprogs use normal kernel stack. If a particular
6318 * subprog appears in both main prog and async_cb subtree, that
6319 * subprog will use normal kernel stack to avoid potential nesting.
6320 * The reverse subprog traversal ensures when main prog subtree is
6321 * checked, the subprogs appearing in async_cb subtrees are already
6322 * marked as using normal kernel stack, so stack size checking can
6325 for (int i
= env
->subprog_cnt
- 1; i
>= 0; i
--) {
6326 if (!i
|| si
[i
].is_async_cb
) {
6327 priv_stack_supported
= !i
&& priv_stack_mode
== PRIV_STACK_ADAPTIVE
;
6328 ret
= check_max_stack_depth_subprog(env
, i
, priv_stack_supported
);
6334 for (int i
= 0; i
< env
->subprog_cnt
; i
++) {
6335 if (si
[i
].priv_stack_mode
== PRIV_STACK_ADAPTIVE
) {
6336 env
->prog
->aux
->jits_use_priv_stack
= true;
6344 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6345 static int get_callee_stack_depth(struct bpf_verifier_env
*env
,
6346 const struct bpf_insn
*insn
, int idx
)
6348 int start
= idx
+ insn
->imm
+ 1, subprog
;
6350 subprog
= find_subprog(env
, start
);
6352 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6356 return env
->subprog_info
[subprog
].stack_depth
;
6360 static int __check_buffer_access(struct bpf_verifier_env
*env
,
6361 const char *buf_info
,
6362 const struct bpf_reg_state
*reg
,
6363 int regno
, int off
, int size
)
6367 "R%d invalid %s buffer access: off=%d, size=%d\n",
6368 regno
, buf_info
, off
, size
);
6371 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
6374 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
6376 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
6377 regno
, off
, tn_buf
);
6384 static int check_tp_buffer_access(struct bpf_verifier_env
*env
,
6385 const struct bpf_reg_state
*reg
,
6386 int regno
, int off
, int size
)
6390 err
= __check_buffer_access(env
, "tracepoint", reg
, regno
, off
, size
);
6394 if (off
+ size
> env
->prog
->aux
->max_tp_access
)
6395 env
->prog
->aux
->max_tp_access
= off
+ size
;
6400 static int check_buffer_access(struct bpf_verifier_env
*env
,
6401 const struct bpf_reg_state
*reg
,
6402 int regno
, int off
, int size
,
6403 bool zero_size_allowed
,
6406 const char *buf_info
= type_is_rdonly_mem(reg
->type
) ? "rdonly" : "rdwr";
6409 err
= __check_buffer_access(env
, buf_info
, reg
, regno
, off
, size
);
6413 if (off
+ size
> *max_access
)
6414 *max_access
= off
+ size
;
6419 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6420 static void zext_32_to_64(struct bpf_reg_state
*reg
)
6422 reg
->var_off
= tnum_subreg(reg
->var_off
);
6423 __reg_assign_32_into_64(reg
);
6426 /* truncate register to smaller size (in bytes)
6427 * must be called with size < BPF_REG_SIZE
6429 static void coerce_reg_to_size(struct bpf_reg_state
*reg
, int size
)
6433 /* clear high bits in bit representation */
6434 reg
->var_off
= tnum_cast(reg
->var_off
, size
);
6436 /* fix arithmetic bounds */
6437 mask
= ((u64
)1 << (size
* 8)) - 1;
6438 if ((reg
->umin_value
& ~mask
) == (reg
->umax_value
& ~mask
)) {
6439 reg
->umin_value
&= mask
;
6440 reg
->umax_value
&= mask
;
6442 reg
->umin_value
= 0;
6443 reg
->umax_value
= mask
;
6445 reg
->smin_value
= reg
->umin_value
;
6446 reg
->smax_value
= reg
->umax_value
;
6448 /* If size is smaller than 32bit register the 32bit register
6449 * values are also truncated so we push 64-bit bounds into
6450 * 32-bit bounds. Above were truncated < 32-bits already.
6453 __mark_reg32_unbounded(reg
);
6455 reg_bounds_sync(reg
);
6458 static void set_sext64_default_val(struct bpf_reg_state
*reg
, int size
)
6461 reg
->smin_value
= reg
->s32_min_value
= S8_MIN
;
6462 reg
->smax_value
= reg
->s32_max_value
= S8_MAX
;
6463 } else if (size
== 2) {
6464 reg
->smin_value
= reg
->s32_min_value
= S16_MIN
;
6465 reg
->smax_value
= reg
->s32_max_value
= S16_MAX
;
6468 reg
->smin_value
= reg
->s32_min_value
= S32_MIN
;
6469 reg
->smax_value
= reg
->s32_max_value
= S32_MAX
;
6471 reg
->umin_value
= reg
->u32_min_value
= 0;
6472 reg
->umax_value
= U64_MAX
;
6473 reg
->u32_max_value
= U32_MAX
;
6474 reg
->var_off
= tnum_unknown
;
6477 static void coerce_reg_to_size_sx(struct bpf_reg_state
*reg
, int size
)
6479 s64 init_s64_max
, init_s64_min
, s64_max
, s64_min
, u64_cval
;
6480 u64 top_smax_value
, top_smin_value
;
6481 u64 num_bits
= size
* 8;
6483 if (tnum_is_const(reg
->var_off
)) {
6484 u64_cval
= reg
->var_off
.value
;
6486 reg
->var_off
= tnum_const((s8
)u64_cval
);
6488 reg
->var_off
= tnum_const((s16
)u64_cval
);
6491 reg
->var_off
= tnum_const((s32
)u64_cval
);
6493 u64_cval
= reg
->var_off
.value
;
6494 reg
->smax_value
= reg
->smin_value
= u64_cval
;
6495 reg
->umax_value
= reg
->umin_value
= u64_cval
;
6496 reg
->s32_max_value
= reg
->s32_min_value
= u64_cval
;
6497 reg
->u32_max_value
= reg
->u32_min_value
= u64_cval
;
6501 top_smax_value
= ((u64
)reg
->smax_value
>> num_bits
) << num_bits
;
6502 top_smin_value
= ((u64
)reg
->smin_value
>> num_bits
) << num_bits
;
6504 if (top_smax_value
!= top_smin_value
)
6507 /* find the s64_min and s64_min after sign extension */
6509 init_s64_max
= (s8
)reg
->smax_value
;
6510 init_s64_min
= (s8
)reg
->smin_value
;
6511 } else if (size
== 2) {
6512 init_s64_max
= (s16
)reg
->smax_value
;
6513 init_s64_min
= (s16
)reg
->smin_value
;
6515 init_s64_max
= (s32
)reg
->smax_value
;
6516 init_s64_min
= (s32
)reg
->smin_value
;
6519 s64_max
= max(init_s64_max
, init_s64_min
);
6520 s64_min
= min(init_s64_max
, init_s64_min
);
6522 /* both of s64_max/s64_min positive or negative */
6523 if ((s64_max
>= 0) == (s64_min
>= 0)) {
6524 reg
->s32_min_value
= reg
->smin_value
= s64_min
;
6525 reg
->s32_max_value
= reg
->smax_value
= s64_max
;
6526 reg
->u32_min_value
= reg
->umin_value
= s64_min
;
6527 reg
->u32_max_value
= reg
->umax_value
= s64_max
;
6528 reg
->var_off
= tnum_range(s64_min
, s64_max
);
6533 set_sext64_default_val(reg
, size
);
6536 static void set_sext32_default_val(struct bpf_reg_state
*reg
, int size
)
6539 reg
->s32_min_value
= S8_MIN
;
6540 reg
->s32_max_value
= S8_MAX
;
6543 reg
->s32_min_value
= S16_MIN
;
6544 reg
->s32_max_value
= S16_MAX
;
6546 reg
->u32_min_value
= 0;
6547 reg
->u32_max_value
= U32_MAX
;
6548 reg
->var_off
= tnum_subreg(tnum_unknown
);
6551 static void coerce_subreg_to_size_sx(struct bpf_reg_state
*reg
, int size
)
6553 s32 init_s32_max
, init_s32_min
, s32_max
, s32_min
, u32_val
;
6554 u32 top_smax_value
, top_smin_value
;
6555 u32 num_bits
= size
* 8;
6557 if (tnum_is_const(reg
->var_off
)) {
6558 u32_val
= reg
->var_off
.value
;
6560 reg
->var_off
= tnum_const((s8
)u32_val
);
6562 reg
->var_off
= tnum_const((s16
)u32_val
);
6564 u32_val
= reg
->var_off
.value
;
6565 reg
->s32_min_value
= reg
->s32_max_value
= u32_val
;
6566 reg
->u32_min_value
= reg
->u32_max_value
= u32_val
;
6570 top_smax_value
= ((u32
)reg
->s32_max_value
>> num_bits
) << num_bits
;
6571 top_smin_value
= ((u32
)reg
->s32_min_value
>> num_bits
) << num_bits
;
6573 if (top_smax_value
!= top_smin_value
)
6576 /* find the s32_min and s32_min after sign extension */
6578 init_s32_max
= (s8
)reg
->s32_max_value
;
6579 init_s32_min
= (s8
)reg
->s32_min_value
;
6582 init_s32_max
= (s16
)reg
->s32_max_value
;
6583 init_s32_min
= (s16
)reg
->s32_min_value
;
6585 s32_max
= max(init_s32_max
, init_s32_min
);
6586 s32_min
= min(init_s32_max
, init_s32_min
);
6588 if ((s32_min
>= 0) == (s32_max
>= 0)) {
6589 reg
->s32_min_value
= s32_min
;
6590 reg
->s32_max_value
= s32_max
;
6591 reg
->u32_min_value
= (u32
)s32_min
;
6592 reg
->u32_max_value
= (u32
)s32_max
;
6593 reg
->var_off
= tnum_subreg(tnum_range(s32_min
, s32_max
));
6598 set_sext32_default_val(reg
, size
);
6601 static bool bpf_map_is_rdonly(const struct bpf_map
*map
)
6603 /* A map is considered read-only if the following condition are true:
6605 * 1) BPF program side cannot change any of the map content. The
6606 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
6607 * and was set at map creation time.
6608 * 2) The map value(s) have been initialized from user space by a
6609 * loader and then "frozen", such that no new map update/delete
6610 * operations from syscall side are possible for the rest of
6611 * the map's lifetime from that point onwards.
6612 * 3) Any parallel/pending map update/delete operations from syscall
6613 * side have been completed. Only after that point, it's safe to
6614 * assume that map value(s) are immutable.
6616 return (map
->map_flags
& BPF_F_RDONLY_PROG
) &&
6617 READ_ONCE(map
->frozen
) &&
6618 !bpf_map_write_active(map
);
6621 static int bpf_map_direct_read(struct bpf_map
*map
, int off
, int size
, u64
*val
,
6628 err
= map
->ops
->map_direct_value_addr(map
, &addr
, off
);
6631 ptr
= (void *)(long)addr
+ off
;
6635 *val
= is_ldsx
? (s64
)*(s8
*)ptr
: (u64
)*(u8
*)ptr
;
6638 *val
= is_ldsx
? (s64
)*(s16
*)ptr
: (u64
)*(u16
*)ptr
;
6641 *val
= is_ldsx
? (s64
)*(s32
*)ptr
: (u64
)*(u32
*)ptr
;
6652 #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
6653 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null)
6654 #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
6655 #define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type) __PASTE(__type, __safe_trusted_or_null)
6658 * Allow list few fields as RCU trusted or full trusted.
6659 * This logic doesn't allow mix tagging and will be removed once GCC supports
6663 /* RCU trusted: these fields are trusted in RCU CS and never NULL */
6664 BTF_TYPE_SAFE_RCU(struct task_struct
) {
6665 const cpumask_t
*cpus_ptr
;
6666 struct css_set __rcu
*cgroups
;
6667 struct task_struct __rcu
*real_parent
;
6668 struct task_struct
*group_leader
;
6671 BTF_TYPE_SAFE_RCU(struct cgroup
) {
6672 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */
6673 struct kernfs_node
*kn
;
6676 BTF_TYPE_SAFE_RCU(struct css_set
) {
6677 struct cgroup
*dfl_cgrp
;
6680 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */
6681 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct
) {
6682 struct file __rcu
*exe_file
;
6685 /* skb->sk, req->sk are not RCU protected, but we mark them as such
6686 * because bpf prog accessible sockets are SOCK_RCU_FREE.
6688 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff
) {
6692 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock
) {
6696 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */
6697 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta
) {
6698 struct seq_file
*seq
;
6701 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task
) {
6702 struct bpf_iter_meta
*meta
;
6703 struct task_struct
*task
;
6706 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm
) {
6710 BTF_TYPE_SAFE_TRUSTED(struct file
) {
6711 struct inode
*f_inode
;
6714 BTF_TYPE_SAFE_TRUSTED(struct dentry
) {
6715 /* no negative dentry-s in places where bpf can see it */
6716 struct inode
*d_inode
;
6719 BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket
) {
6723 static bool type_is_rcu(struct bpf_verifier_env
*env
,
6724 struct bpf_reg_state
*reg
,
6725 const char *field_name
, u32 btf_id
)
6727 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct
));
6728 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup
));
6729 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set
));
6731 return btf_nested_type_is_trusted(&env
->log
, reg
, field_name
, btf_id
, "__safe_rcu");
6734 static bool type_is_rcu_or_null(struct bpf_verifier_env
*env
,
6735 struct bpf_reg_state
*reg
,
6736 const char *field_name
, u32 btf_id
)
6738 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct
));
6739 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff
));
6740 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock
));
6742 return btf_nested_type_is_trusted(&env
->log
, reg
, field_name
, btf_id
, "__safe_rcu_or_null");
6745 static bool type_is_trusted(struct bpf_verifier_env
*env
,
6746 struct bpf_reg_state
*reg
,
6747 const char *field_name
, u32 btf_id
)
6749 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta
));
6750 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task
));
6751 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm
));
6752 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file
));
6753 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry
));
6755 return btf_nested_type_is_trusted(&env
->log
, reg
, field_name
, btf_id
, "__safe_trusted");
6758 static bool type_is_trusted_or_null(struct bpf_verifier_env
*env
,
6759 struct bpf_reg_state
*reg
,
6760 const char *field_name
, u32 btf_id
)
6762 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket
));
6764 return btf_nested_type_is_trusted(&env
->log
, reg
, field_name
, btf_id
,
6765 "__safe_trusted_or_null");
6768 static int check_ptr_to_btf_access(struct bpf_verifier_env
*env
,
6769 struct bpf_reg_state
*regs
,
6770 int regno
, int off
, int size
,
6771 enum bpf_access_type atype
,
6774 struct bpf_reg_state
*reg
= regs
+ regno
;
6775 const struct btf_type
*t
= btf_type_by_id(reg
->btf
, reg
->btf_id
);
6776 const char *tname
= btf_name_by_offset(reg
->btf
, t
->name_off
);
6777 const char *field_name
= NULL
;
6778 enum bpf_type_flag flag
= 0;
6783 if (!env
->allow_ptr_leaks
) {
6785 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
6789 if (!env
->prog
->gpl_compatible
&& btf_is_kernel(reg
->btf
)) {
6791 "Cannot access kernel 'struct %s' from non-GPL compatible program\n",
6797 "R%d is ptr_%s invalid negative access: off=%d\n",
6801 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
6804 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
6806 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
6807 regno
, tname
, off
, tn_buf
);
6811 if (reg
->type
& MEM_USER
) {
6813 "R%d is ptr_%s access user memory: off=%d\n",
6818 if (reg
->type
& MEM_PERCPU
) {
6820 "R%d is ptr_%s access percpu memory: off=%d\n",
6825 if (env
->ops
->btf_struct_access
&& !type_is_alloc(reg
->type
) && atype
== BPF_WRITE
) {
6826 if (!btf_is_kernel(reg
->btf
)) {
6827 verbose(env
, "verifier internal error: reg->btf must be kernel btf\n");
6830 ret
= env
->ops
->btf_struct_access(&env
->log
, reg
, off
, size
);
6832 /* Writes are permitted with default btf_struct_access for
6833 * program allocated objects (which always have ref_obj_id > 0),
6834 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
6836 if (atype
!= BPF_READ
&& !type_is_ptr_alloc_obj(reg
->type
)) {
6837 verbose(env
, "only read is supported\n");
6841 if (type_is_alloc(reg
->type
) && !type_is_non_owning_ref(reg
->type
) &&
6842 !(reg
->type
& MEM_RCU
) && !reg
->ref_obj_id
) {
6843 verbose(env
, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
6847 ret
= btf_struct_access(&env
->log
, reg
, off
, size
, atype
, &btf_id
, &flag
, &field_name
);
6852 /* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL
6853 * trusted PTR_TO_BTF_ID, these are the ones that are possibly
6854 * arguments to the raw_tp. Since internal checks in for trusted
6855 * reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL
6856 * modifier as problematic, mask it out temporarily for the
6857 * check. Don't apply this to pointers with ref_obj_id > 0, as
6858 * those won't be raw_tp args.
6860 * We may end up applying this relaxation to other trusted
6861 * PTR_TO_BTF_ID with maybe null flag, since we cannot
6862 * distinguish PTR_MAYBE_NULL tagged for arguments vs normal
6863 * tagging, but that should expand allowed behavior, and not
6864 * cause regression for existing behavior.
6866 mask
= mask_raw_tp_reg(env
, reg
);
6867 if (ret
!= PTR_TO_BTF_ID
) {
6870 } else if (type_flag(reg
->type
) & PTR_UNTRUSTED
) {
6871 /* If this is an untrusted pointer, all pointers formed by walking it
6872 * also inherit the untrusted flag.
6874 flag
= PTR_UNTRUSTED
;
6876 } else if (is_trusted_reg(reg
) || is_rcu_reg(reg
)) {
6877 /* By default any pointer obtained from walking a trusted pointer is no
6878 * longer trusted, unless the field being accessed has explicitly been
6879 * marked as inheriting its parent's state of trust (either full or RCU).
6881 * 'cgroups' pointer is untrusted if task->cgroups dereference
6882 * happened in a sleepable program outside of bpf_rcu_read_lock()
6883 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU).
6884 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED.
6886 * A regular RCU-protected pointer with __rcu tag can also be deemed
6887 * trusted if we are in an RCU CS. Such pointer can be NULL.
6889 if (type_is_trusted(env
, reg
, field_name
, btf_id
)) {
6890 flag
|= PTR_TRUSTED
;
6891 } else if (type_is_trusted_or_null(env
, reg
, field_name
, btf_id
)) {
6892 flag
|= PTR_TRUSTED
| PTR_MAYBE_NULL
;
6893 } else if (in_rcu_cs(env
) && !type_may_be_null(reg
->type
)) {
6894 if (type_is_rcu(env
, reg
, field_name
, btf_id
)) {
6895 /* ignore __rcu tag and mark it MEM_RCU */
6897 } else if (flag
& MEM_RCU
||
6898 type_is_rcu_or_null(env
, reg
, field_name
, btf_id
)) {
6899 /* __rcu tagged pointers can be NULL */
6900 flag
|= MEM_RCU
| PTR_MAYBE_NULL
;
6902 /* We always trust them */
6903 if (type_is_rcu_or_null(env
, reg
, field_name
, btf_id
) &&
6904 flag
& PTR_UNTRUSTED
)
6905 flag
&= ~PTR_UNTRUSTED
;
6906 } else if (flag
& (MEM_PERCPU
| MEM_USER
)) {
6909 /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */
6910 clear_trusted_flags(&flag
);
6914 * If not in RCU CS or MEM_RCU pointer can be NULL then
6915 * aggressively mark as untrusted otherwise such
6916 * pointers will be plain PTR_TO_BTF_ID without flags
6917 * and will be allowed to be passed into helpers for
6920 flag
= PTR_UNTRUSTED
;
6923 /* Old compat. Deprecated */
6924 clear_trusted_flags(&flag
);
6927 if (atype
== BPF_READ
&& value_regno
>= 0) {
6928 mark_btf_ld_reg(env
, regs
, value_regno
, ret
, reg
->btf
, btf_id
, flag
);
6929 /* We've assigned a new type to regno, so don't undo masking. */
6930 if (regno
== value_regno
)
6933 unmask_raw_tp_reg(reg
, mask
);
6938 static int check_ptr_to_map_access(struct bpf_verifier_env
*env
,
6939 struct bpf_reg_state
*regs
,
6940 int regno
, int off
, int size
,
6941 enum bpf_access_type atype
,
6944 struct bpf_reg_state
*reg
= regs
+ regno
;
6945 struct bpf_map
*map
= reg
->map_ptr
;
6946 struct bpf_reg_state map_reg
;
6947 enum bpf_type_flag flag
= 0;
6948 const struct btf_type
*t
;
6954 verbose(env
, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
6958 if (!map
->ops
->map_btf_id
|| !*map
->ops
->map_btf_id
) {
6959 verbose(env
, "map_ptr access not supported for map type %d\n",
6964 t
= btf_type_by_id(btf_vmlinux
, *map
->ops
->map_btf_id
);
6965 tname
= btf_name_by_offset(btf_vmlinux
, t
->name_off
);
6967 if (!env
->allow_ptr_leaks
) {
6969 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
6975 verbose(env
, "R%d is %s invalid negative access: off=%d\n",
6980 if (atype
!= BPF_READ
) {
6981 verbose(env
, "only read from %s is supported\n", tname
);
6985 /* Simulate access to a PTR_TO_BTF_ID */
6986 memset(&map_reg
, 0, sizeof(map_reg
));
6987 mark_btf_ld_reg(env
, &map_reg
, 0, PTR_TO_BTF_ID
, btf_vmlinux
, *map
->ops
->map_btf_id
, 0);
6988 ret
= btf_struct_access(&env
->log
, &map_reg
, off
, size
, atype
, &btf_id
, &flag
, NULL
);
6992 if (value_regno
>= 0)
6993 mark_btf_ld_reg(env
, regs
, value_regno
, ret
, btf_vmlinux
, btf_id
, flag
);
6998 /* Check that the stack access at the given offset is within bounds. The
6999 * maximum valid offset is -1.
7001 * The minimum valid offset is -MAX_BPF_STACK for writes, and
7002 * -state->allocated_stack for reads.
7004 static int check_stack_slot_within_bounds(struct bpf_verifier_env
*env
,
7006 struct bpf_func_state
*state
,
7007 enum bpf_access_type t
)
7011 if (t
== BPF_WRITE
|| env
->allow_uninit_stack
)
7012 min_valid_off
= -MAX_BPF_STACK
;
7014 min_valid_off
= -state
->allocated_stack
;
7016 if (off
< min_valid_off
|| off
> -1)
7021 /* Check that the stack access at 'regno + off' falls within the maximum stack
7024 * 'off' includes `regno->offset`, but not its dynamic part (if any).
7026 static int check_stack_access_within_bounds(
7027 struct bpf_verifier_env
*env
,
7028 int regno
, int off
, int access_size
,
7029 enum bpf_access_src src
, enum bpf_access_type type
)
7031 struct bpf_reg_state
*regs
= cur_regs(env
);
7032 struct bpf_reg_state
*reg
= regs
+ regno
;
7033 struct bpf_func_state
*state
= func(env
, reg
);
7034 s64 min_off
, max_off
;
7038 if (src
== ACCESS_HELPER
)
7039 /* We don't know if helpers are reading or writing (or both). */
7040 err_extra
= " indirect access to";
7041 else if (type
== BPF_READ
)
7042 err_extra
= " read from";
7044 err_extra
= " write to";
7046 if (tnum_is_const(reg
->var_off
)) {
7047 min_off
= (s64
)reg
->var_off
.value
+ off
;
7048 max_off
= min_off
+ access_size
;
7050 if (reg
->smax_value
>= BPF_MAX_VAR_OFF
||
7051 reg
->smin_value
<= -BPF_MAX_VAR_OFF
) {
7052 verbose(env
, "invalid unbounded variable-offset%s stack R%d\n",
7056 min_off
= reg
->smin_value
+ off
;
7057 max_off
= reg
->smax_value
+ off
+ access_size
;
7060 err
= check_stack_slot_within_bounds(env
, min_off
, state
, type
);
7061 if (!err
&& max_off
> 0)
7062 err
= -EINVAL
; /* out of stack access into non-negative offsets */
7063 if (!err
&& access_size
< 0)
7064 /* access_size should not be negative (or overflow an int); others checks
7065 * along the way should have prevented such an access.
7067 err
= -EFAULT
; /* invalid negative access size; integer overflow? */
7070 if (tnum_is_const(reg
->var_off
)) {
7071 verbose(env
, "invalid%s stack R%d off=%d size=%d\n",
7072 err_extra
, regno
, off
, access_size
);
7076 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
7077 verbose(env
, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n",
7078 err_extra
, regno
, tn_buf
, off
, access_size
);
7083 /* Note that there is no stack access with offset zero, so the needed stack
7084 * size is -min_off, not -min_off+1.
7086 return grow_stack_state(env
, state
, -min_off
/* size */);
7089 static bool get_func_retval_range(struct bpf_prog
*prog
,
7090 struct bpf_retval_range
*range
)
7092 if (prog
->type
== BPF_PROG_TYPE_LSM
&&
7093 prog
->expected_attach_type
== BPF_LSM_MAC
&&
7094 !bpf_lsm_get_retval_range(prog
, range
)) {
7100 /* check whether memory at (regno + off) is accessible for t = (read | write)
7101 * if t==write, value_regno is a register which value is stored into memory
7102 * if t==read, value_regno is a register which will receive the value from memory
7103 * if t==write && value_regno==-1, some unknown value is stored into memory
7104 * if t==read && value_regno==-1, don't care what we read from memory
7106 static int check_mem_access(struct bpf_verifier_env
*env
, int insn_idx
, u32 regno
,
7107 int off
, int bpf_size
, enum bpf_access_type t
,
7108 int value_regno
, bool strict_alignment_once
, bool is_ldsx
)
7110 struct bpf_reg_state
*regs
= cur_regs(env
);
7111 struct bpf_reg_state
*reg
= regs
+ regno
;
7114 size
= bpf_size_to_bytes(bpf_size
);
7118 /* alignment checks will add in reg->off themselves */
7119 err
= check_ptr_alignment(env
, reg
, off
, size
, strict_alignment_once
);
7123 /* for access checks, reg->off is just part of off */
7126 if (reg
->type
== PTR_TO_MAP_KEY
) {
7127 if (t
== BPF_WRITE
) {
7128 verbose(env
, "write to change key R%d not allowed\n", regno
);
7132 err
= check_mem_region_access(env
, regno
, off
, size
,
7133 reg
->map_ptr
->key_size
, false);
7136 if (value_regno
>= 0)
7137 mark_reg_unknown(env
, regs
, value_regno
);
7138 } else if (reg
->type
== PTR_TO_MAP_VALUE
) {
7139 struct btf_field
*kptr_field
= NULL
;
7141 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
7142 is_pointer_value(env
, value_regno
)) {
7143 verbose(env
, "R%d leaks addr into map\n", value_regno
);
7146 err
= check_map_access_type(env
, regno
, off
, size
, t
);
7149 err
= check_map_access(env
, regno
, off
, size
, false, ACCESS_DIRECT
);
7152 if (tnum_is_const(reg
->var_off
))
7153 kptr_field
= btf_record_find(reg
->map_ptr
->record
,
7154 off
+ reg
->var_off
.value
, BPF_KPTR
| BPF_UPTR
);
7156 err
= check_map_kptr_access(env
, regno
, value_regno
, insn_idx
, kptr_field
);
7157 } else if (t
== BPF_READ
&& value_regno
>= 0) {
7158 struct bpf_map
*map
= reg
->map_ptr
;
7160 /* if map is read-only, track its contents as scalars */
7161 if (tnum_is_const(reg
->var_off
) &&
7162 bpf_map_is_rdonly(map
) &&
7163 map
->ops
->map_direct_value_addr
) {
7164 int map_off
= off
+ reg
->var_off
.value
;
7167 err
= bpf_map_direct_read(map
, map_off
, size
,
7172 regs
[value_regno
].type
= SCALAR_VALUE
;
7173 __mark_reg_known(®s
[value_regno
], val
);
7175 mark_reg_unknown(env
, regs
, value_regno
);
7178 } else if (base_type(reg
->type
) == PTR_TO_MEM
) {
7179 bool rdonly_mem
= type_is_rdonly_mem(reg
->type
);
7181 if (type_may_be_null(reg
->type
)) {
7182 verbose(env
, "R%d invalid mem access '%s'\n", regno
,
7183 reg_type_str(env
, reg
->type
));
7187 if (t
== BPF_WRITE
&& rdonly_mem
) {
7188 verbose(env
, "R%d cannot write into %s\n",
7189 regno
, reg_type_str(env
, reg
->type
));
7193 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
7194 is_pointer_value(env
, value_regno
)) {
7195 verbose(env
, "R%d leaks addr into mem\n", value_regno
);
7199 err
= check_mem_region_access(env
, regno
, off
, size
,
7200 reg
->mem_size
, false);
7201 if (!err
&& value_regno
>= 0 && (t
== BPF_READ
|| rdonly_mem
))
7202 mark_reg_unknown(env
, regs
, value_regno
);
7203 } else if (reg
->type
== PTR_TO_CTX
) {
7204 bool is_retval
= false;
7205 struct bpf_retval_range range
;
7206 enum bpf_reg_type reg_type
= SCALAR_VALUE
;
7207 struct btf
*btf
= NULL
;
7210 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
7211 is_pointer_value(env
, value_regno
)) {
7212 verbose(env
, "R%d leaks addr into ctx\n", value_regno
);
7216 err
= check_ptr_off_reg(env
, reg
, regno
);
7220 err
= check_ctx_access(env
, insn_idx
, off
, size
, t
, ®_type
, &btf
,
7221 &btf_id
, &is_retval
, is_ldsx
);
7223 verbose_linfo(env
, insn_idx
, "; ");
7224 if (!err
&& t
== BPF_READ
&& value_regno
>= 0) {
7225 /* ctx access returns either a scalar, or a
7226 * PTR_TO_PACKET[_META,_END]. In the latter
7227 * case, we know the offset is zero.
7229 if (reg_type
== SCALAR_VALUE
) {
7230 if (is_retval
&& get_func_retval_range(env
->prog
, &range
)) {
7231 err
= __mark_reg_s32_range(env
, regs
, value_regno
,
7232 range
.minval
, range
.maxval
);
7236 mark_reg_unknown(env
, regs
, value_regno
);
7239 mark_reg_known_zero(env
, regs
,
7241 if (type_may_be_null(reg_type
))
7242 regs
[value_regno
].id
= ++env
->id_gen
;
7243 /* A load of ctx field could have different
7244 * actual load size with the one encoded in the
7245 * insn. When the dst is PTR, it is for sure not
7248 regs
[value_regno
].subreg_def
= DEF_NOT_SUBREG
;
7249 if (base_type(reg_type
) == PTR_TO_BTF_ID
) {
7250 regs
[value_regno
].btf
= btf
;
7251 regs
[value_regno
].btf_id
= btf_id
;
7254 regs
[value_regno
].type
= reg_type
;
7257 } else if (reg
->type
== PTR_TO_STACK
) {
7258 /* Basic bounds checks. */
7259 err
= check_stack_access_within_bounds(env
, regno
, off
, size
, ACCESS_DIRECT
, t
);
7264 err
= check_stack_read(env
, regno
, off
, size
,
7267 err
= check_stack_write(env
, regno
, off
, size
,
7268 value_regno
, insn_idx
);
7269 } else if (reg_is_pkt_pointer(reg
)) {
7270 if (t
== BPF_WRITE
&& !may_access_direct_pkt_data(env
, NULL
, t
)) {
7271 verbose(env
, "cannot write into packet\n");
7274 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
7275 is_pointer_value(env
, value_regno
)) {
7276 verbose(env
, "R%d leaks addr into packet\n",
7280 err
= check_packet_access(env
, regno
, off
, size
, false);
7281 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
7282 mark_reg_unknown(env
, regs
, value_regno
);
7283 } else if (reg
->type
== PTR_TO_FLOW_KEYS
) {
7284 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
7285 is_pointer_value(env
, value_regno
)) {
7286 verbose(env
, "R%d leaks addr into flow keys\n",
7291 err
= check_flow_keys_access(env
, off
, size
);
7292 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
7293 mark_reg_unknown(env
, regs
, value_regno
);
7294 } else if (type_is_sk_pointer(reg
->type
)) {
7295 if (t
== BPF_WRITE
) {
7296 verbose(env
, "R%d cannot write into %s\n",
7297 regno
, reg_type_str(env
, reg
->type
));
7300 err
= check_sock_access(env
, insn_idx
, regno
, off
, size
, t
);
7301 if (!err
&& value_regno
>= 0)
7302 mark_reg_unknown(env
, regs
, value_regno
);
7303 } else if (reg
->type
== PTR_TO_TP_BUFFER
) {
7304 err
= check_tp_buffer_access(env
, reg
, regno
, off
, size
);
7305 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
7306 mark_reg_unknown(env
, regs
, value_regno
);
7307 } else if (base_type(reg
->type
) == PTR_TO_BTF_ID
&&
7308 (mask_raw_tp_reg_cond(env
, reg
) || !type_may_be_null(reg
->type
))) {
7309 err
= check_ptr_to_btf_access(env
, regs
, regno
, off
, size
, t
,
7311 } else if (reg
->type
== CONST_PTR_TO_MAP
) {
7312 err
= check_ptr_to_map_access(env
, regs
, regno
, off
, size
, t
,
7314 } else if (base_type(reg
->type
) == PTR_TO_BUF
) {
7315 bool rdonly_mem
= type_is_rdonly_mem(reg
->type
);
7319 if (t
== BPF_WRITE
) {
7320 verbose(env
, "R%d cannot write into %s\n",
7321 regno
, reg_type_str(env
, reg
->type
));
7324 max_access
= &env
->prog
->aux
->max_rdonly_access
;
7326 max_access
= &env
->prog
->aux
->max_rdwr_access
;
7329 err
= check_buffer_access(env
, reg
, regno
, off
, size
, false,
7332 if (!err
&& value_regno
>= 0 && (rdonly_mem
|| t
== BPF_READ
))
7333 mark_reg_unknown(env
, regs
, value_regno
);
7334 } else if (reg
->type
== PTR_TO_ARENA
) {
7335 if (t
== BPF_READ
&& value_regno
>= 0)
7336 mark_reg_unknown(env
, regs
, value_regno
);
7338 verbose(env
, "R%d invalid mem access '%s'\n", regno
,
7339 reg_type_str(env
, reg
->type
));
7343 if (!err
&& size
< BPF_REG_SIZE
&& value_regno
>= 0 && t
== BPF_READ
&&
7344 regs
[value_regno
].type
== SCALAR_VALUE
) {
7346 /* b/h/w load zero-extends, mark upper bits as known 0 */
7347 coerce_reg_to_size(®s
[value_regno
], size
);
7349 coerce_reg_to_size_sx(®s
[value_regno
], size
);
7354 static int save_aux_ptr_type(struct bpf_verifier_env
*env
, enum bpf_reg_type type
,
7355 bool allow_trust_mismatch
);
7357 static int check_atomic(struct bpf_verifier_env
*env
, int insn_idx
, struct bpf_insn
*insn
)
7362 switch (insn
->imm
) {
7364 case BPF_ADD
| BPF_FETCH
:
7366 case BPF_AND
| BPF_FETCH
:
7368 case BPF_OR
| BPF_FETCH
:
7370 case BPF_XOR
| BPF_FETCH
:
7375 verbose(env
, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn
->imm
);
7379 if (BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) {
7380 verbose(env
, "invalid atomic operand size\n");
7384 /* check src1 operand */
7385 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
7389 /* check src2 operand */
7390 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
7394 if (insn
->imm
== BPF_CMPXCHG
) {
7395 /* Check comparison of R0 with memory location */
7396 const u32 aux_reg
= BPF_REG_0
;
7398 err
= check_reg_arg(env
, aux_reg
, SRC_OP
);
7402 if (is_pointer_value(env
, aux_reg
)) {
7403 verbose(env
, "R%d leaks addr into mem\n", aux_reg
);
7408 if (is_pointer_value(env
, insn
->src_reg
)) {
7409 verbose(env
, "R%d leaks addr into mem\n", insn
->src_reg
);
7413 if (is_ctx_reg(env
, insn
->dst_reg
) ||
7414 is_pkt_reg(env
, insn
->dst_reg
) ||
7415 is_flow_key_reg(env
, insn
->dst_reg
) ||
7416 is_sk_reg(env
, insn
->dst_reg
) ||
7417 (is_arena_reg(env
, insn
->dst_reg
) && !bpf_jit_supports_insn(insn
, true))) {
7418 verbose(env
, "BPF_ATOMIC stores into R%d %s is not allowed\n",
7420 reg_type_str(env
, reg_state(env
, insn
->dst_reg
)->type
));
7424 if (insn
->imm
& BPF_FETCH
) {
7425 if (insn
->imm
== BPF_CMPXCHG
)
7426 load_reg
= BPF_REG_0
;
7428 load_reg
= insn
->src_reg
;
7430 /* check and record load of old value */
7431 err
= check_reg_arg(env
, load_reg
, DST_OP
);
7435 /* This instruction accesses a memory location but doesn't
7436 * actually load it into a register.
7441 /* Check whether we can read the memory, with second call for fetch
7442 * case to simulate the register fill.
7444 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
7445 BPF_SIZE(insn
->code
), BPF_READ
, -1, true, false);
7446 if (!err
&& load_reg
>= 0)
7447 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
7448 BPF_SIZE(insn
->code
), BPF_READ
, load_reg
,
7453 if (is_arena_reg(env
, insn
->dst_reg
)) {
7454 err
= save_aux_ptr_type(env
, PTR_TO_ARENA
, false);
7458 /* Check whether we can write into the same memory. */
7459 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
7460 BPF_SIZE(insn
->code
), BPF_WRITE
, -1, true, false);
7466 /* When register 'regno' is used to read the stack (either directly or through
7467 * a helper function) make sure that it's within stack boundary and, depending
7468 * on the access type and privileges, that all elements of the stack are
7471 * 'off' includes 'regno->off', but not its dynamic part (if any).
7473 * All registers that have been spilled on the stack in the slots within the
7474 * read offsets are marked as read.
7476 static int check_stack_range_initialized(
7477 struct bpf_verifier_env
*env
, int regno
, int off
,
7478 int access_size
, bool zero_size_allowed
,
7479 enum bpf_access_src type
, struct bpf_call_arg_meta
*meta
)
7481 struct bpf_reg_state
*reg
= reg_state(env
, regno
);
7482 struct bpf_func_state
*state
= func(env
, reg
);
7483 int err
, min_off
, max_off
, i
, j
, slot
, spi
;
7484 char *err_extra
= type
== ACCESS_HELPER
? " indirect" : "";
7485 enum bpf_access_type bounds_check_type
;
7486 /* Some accesses can write anything into the stack, others are
7489 bool clobber
= false;
7491 if (access_size
== 0 && !zero_size_allowed
) {
7492 verbose(env
, "invalid zero-sized read\n");
7496 if (type
== ACCESS_HELPER
) {
7497 /* The bounds checks for writes are more permissive than for
7498 * reads. However, if raw_mode is not set, we'll do extra
7501 bounds_check_type
= BPF_WRITE
;
7504 bounds_check_type
= BPF_READ
;
7506 err
= check_stack_access_within_bounds(env
, regno
, off
, access_size
,
7507 type
, bounds_check_type
);
7512 if (tnum_is_const(reg
->var_off
)) {
7513 min_off
= max_off
= reg
->var_off
.value
+ off
;
7515 /* Variable offset is prohibited for unprivileged mode for
7516 * simplicity since it requires corresponding support in
7517 * Spectre masking for stack ALU.
7518 * See also retrieve_ptr_limit().
7520 if (!env
->bypass_spec_v1
) {
7523 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
7524 verbose(env
, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
7525 regno
, err_extra
, tn_buf
);
7528 /* Only initialized buffer on stack is allowed to be accessed
7529 * with variable offset. With uninitialized buffer it's hard to
7530 * guarantee that whole memory is marked as initialized on
7531 * helper return since specific bounds are unknown what may
7532 * cause uninitialized stack leaking.
7534 if (meta
&& meta
->raw_mode
)
7537 min_off
= reg
->smin_value
+ off
;
7538 max_off
= reg
->smax_value
+ off
;
7541 if (meta
&& meta
->raw_mode
) {
7542 /* Ensure we won't be overwriting dynptrs when simulating byte
7543 * by byte access in check_helper_call using meta.access_size.
7544 * This would be a problem if we have a helper in the future
7547 * helper(uninit_mem, len, dynptr)
7549 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
7550 * may end up writing to dynptr itself when touching memory from
7551 * arg 1. This can be relaxed on a case by case basis for known
7552 * safe cases, but reject due to the possibilitiy of aliasing by
7555 for (i
= min_off
; i
< max_off
+ access_size
; i
++) {
7556 int stack_off
= -i
- 1;
7559 /* raw_mode may write past allocated_stack */
7560 if (state
->allocated_stack
<= stack_off
)
7562 if (state
->stack
[spi
].slot_type
[stack_off
% BPF_REG_SIZE
] == STACK_DYNPTR
) {
7563 verbose(env
, "potential write to dynptr at off=%d disallowed\n", i
);
7567 meta
->access_size
= access_size
;
7568 meta
->regno
= regno
;
7572 for (i
= min_off
; i
< max_off
+ access_size
; i
++) {
7576 spi
= slot
/ BPF_REG_SIZE
;
7577 if (state
->allocated_stack
<= slot
) {
7578 verbose(env
, "verifier bug: allocated_stack too small");
7582 stype
= &state
->stack
[spi
].slot_type
[slot
% BPF_REG_SIZE
];
7583 if (*stype
== STACK_MISC
)
7585 if ((*stype
== STACK_ZERO
) ||
7586 (*stype
== STACK_INVALID
&& env
->allow_uninit_stack
)) {
7588 /* helper can write anything into the stack */
7589 *stype
= STACK_MISC
;
7594 if (is_spilled_reg(&state
->stack
[spi
]) &&
7595 (state
->stack
[spi
].spilled_ptr
.type
== SCALAR_VALUE
||
7596 env
->allow_ptr_leaks
)) {
7598 __mark_reg_unknown(env
, &state
->stack
[spi
].spilled_ptr
);
7599 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
7600 scrub_spilled_slot(&state
->stack
[spi
].slot_type
[j
]);
7605 if (tnum_is_const(reg
->var_off
)) {
7606 verbose(env
, "invalid%s read from stack R%d off %d+%d size %d\n",
7607 err_extra
, regno
, min_off
, i
- min_off
, access_size
);
7611 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
7612 verbose(env
, "invalid%s read from stack R%d var_off %s+%d size %d\n",
7613 err_extra
, regno
, tn_buf
, i
- min_off
, access_size
);
7617 /* reading any byte out of 8-byte 'spill_slot' will cause
7618 * the whole slot to be marked as 'read'
7620 mark_reg_read(env
, &state
->stack
[spi
].spilled_ptr
,
7621 state
->stack
[spi
].spilled_ptr
.parent
,
7623 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
7624 * be sure that whether stack slot is written to or not. Hence,
7625 * we must still conservatively propagate reads upwards even if
7626 * helper may write to the entire memory range.
7632 static int check_helper_mem_access(struct bpf_verifier_env
*env
, int regno
,
7633 int access_size
, enum bpf_access_type access_type
,
7634 bool zero_size_allowed
,
7635 struct bpf_call_arg_meta
*meta
)
7637 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
7640 switch (base_type(reg
->type
)) {
7642 case PTR_TO_PACKET_META
:
7643 return check_packet_access(env
, regno
, reg
->off
, access_size
,
7645 case PTR_TO_MAP_KEY
:
7646 if (access_type
== BPF_WRITE
) {
7647 verbose(env
, "R%d cannot write into %s\n", regno
,
7648 reg_type_str(env
, reg
->type
));
7651 return check_mem_region_access(env
, regno
, reg
->off
, access_size
,
7652 reg
->map_ptr
->key_size
, false);
7653 case PTR_TO_MAP_VALUE
:
7654 if (check_map_access_type(env
, regno
, reg
->off
, access_size
, access_type
))
7656 return check_map_access(env
, regno
, reg
->off
, access_size
,
7657 zero_size_allowed
, ACCESS_HELPER
);
7659 if (type_is_rdonly_mem(reg
->type
)) {
7660 if (access_type
== BPF_WRITE
) {
7661 verbose(env
, "R%d cannot write into %s\n", regno
,
7662 reg_type_str(env
, reg
->type
));
7666 return check_mem_region_access(env
, regno
, reg
->off
,
7667 access_size
, reg
->mem_size
,
7670 if (type_is_rdonly_mem(reg
->type
)) {
7671 if (access_type
== BPF_WRITE
) {
7672 verbose(env
, "R%d cannot write into %s\n", regno
,
7673 reg_type_str(env
, reg
->type
));
7677 max_access
= &env
->prog
->aux
->max_rdonly_access
;
7679 max_access
= &env
->prog
->aux
->max_rdwr_access
;
7681 return check_buffer_access(env
, reg
, regno
, reg
->off
,
7682 access_size
, zero_size_allowed
,
7685 return check_stack_range_initialized(
7687 regno
, reg
->off
, access_size
,
7688 zero_size_allowed
, ACCESS_HELPER
, meta
);
7690 return check_ptr_to_btf_access(env
, regs
, regno
, reg
->off
,
7691 access_size
, BPF_READ
, -1);
7693 /* in case the function doesn't know how to access the context,
7694 * (because we are in a program of type SYSCALL for example), we
7695 * can not statically check its size.
7696 * Dynamically check it now.
7698 if (!env
->ops
->convert_ctx_access
) {
7699 int offset
= access_size
- 1;
7701 /* Allow zero-byte read from PTR_TO_CTX */
7702 if (access_size
== 0)
7703 return zero_size_allowed
? 0 : -EACCES
;
7705 return check_mem_access(env
, env
->insn_idx
, regno
, offset
, BPF_B
,
7706 access_type
, -1, false, false);
7710 default: /* scalar_value or invalid ptr */
7711 /* Allow zero-byte read from NULL, regardless of pointer type */
7712 if (zero_size_allowed
&& access_size
== 0 &&
7713 register_is_null(reg
))
7716 verbose(env
, "R%d type=%s ", regno
,
7717 reg_type_str(env
, reg
->type
));
7718 verbose(env
, "expected=%s\n", reg_type_str(env
, PTR_TO_STACK
));
7723 /* verify arguments to helpers or kfuncs consisting of a pointer and an access
7726 * @regno is the register containing the access size. regno-1 is the register
7727 * containing the pointer.
7729 static int check_mem_size_reg(struct bpf_verifier_env
*env
,
7730 struct bpf_reg_state
*reg
, u32 regno
,
7731 enum bpf_access_type access_type
,
7732 bool zero_size_allowed
,
7733 struct bpf_call_arg_meta
*meta
)
7737 /* This is used to refine r0 return value bounds for helpers
7738 * that enforce this value as an upper bound on return values.
7739 * See do_refine_retval_range() for helpers that can refine
7740 * the return value. C type of helper is u32 so we pull register
7741 * bound from umax_value however, if negative verifier errors
7742 * out. Only upper bounds can be learned because retval is an
7743 * int type and negative retvals are allowed.
7745 meta
->msize_max_value
= reg
->umax_value
;
7747 /* The register is SCALAR_VALUE; the access check happens using
7748 * its boundaries. For unprivileged variable accesses, disable
7749 * raw mode so that the program is required to initialize all
7750 * the memory that the helper could just partially fill up.
7752 if (!tnum_is_const(reg
->var_off
))
7755 if (reg
->smin_value
< 0) {
7756 verbose(env
, "R%d min value is negative, either use unsigned or 'var &= const'\n",
7761 if (reg
->umin_value
== 0 && !zero_size_allowed
) {
7762 verbose(env
, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
7763 regno
, reg
->umin_value
, reg
->umax_value
);
7767 if (reg
->umax_value
>= BPF_MAX_VAR_SIZ
) {
7768 verbose(env
, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
7772 err
= check_helper_mem_access(env
, regno
- 1, reg
->umax_value
,
7773 access_type
, zero_size_allowed
, meta
);
7775 err
= mark_chain_precision(env
, regno
);
7779 static int check_mem_reg(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
7780 u32 regno
, u32 mem_size
)
7782 bool may_be_null
= type_may_be_null(reg
->type
);
7783 struct bpf_reg_state saved_reg
;
7786 if (register_is_null(reg
))
7789 /* Assuming that the register contains a value check if the memory
7790 * access is safe. Temporarily save and restore the register's state as
7791 * the conversion shouldn't be visible to a caller.
7795 mark_ptr_not_null_reg(reg
);
7798 err
= check_helper_mem_access(env
, regno
, mem_size
, BPF_READ
, true, NULL
);
7799 err
= err
?: check_helper_mem_access(env
, regno
, mem_size
, BPF_WRITE
, true, NULL
);
7807 static int check_kfunc_mem_size_reg(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
7810 struct bpf_reg_state
*mem_reg
= &cur_regs(env
)[regno
- 1];
7811 bool may_be_null
= type_may_be_null(mem_reg
->type
);
7812 struct bpf_reg_state saved_reg
;
7813 struct bpf_call_arg_meta meta
;
7816 WARN_ON_ONCE(regno
< BPF_REG_2
|| regno
> BPF_REG_5
);
7818 memset(&meta
, 0, sizeof(meta
));
7821 saved_reg
= *mem_reg
;
7822 mark_ptr_not_null_reg(mem_reg
);
7825 err
= check_mem_size_reg(env
, reg
, regno
, BPF_READ
, true, &meta
);
7826 err
= err
?: check_mem_size_reg(env
, reg
, regno
, BPF_WRITE
, true, &meta
);
7829 *mem_reg
= saved_reg
;
7834 /* Implementation details:
7835 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
7836 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
7837 * Two bpf_map_lookups (even with the same key) will have different reg->id.
7838 * Two separate bpf_obj_new will also have different reg->id.
7839 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
7840 * clears reg->id after value_or_null->value transition, since the verifier only
7841 * cares about the range of access to valid map value pointer and doesn't care
7842 * about actual address of the map element.
7843 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
7844 * reg->id > 0 after value_or_null->value transition. By doing so
7845 * two bpf_map_lookups will be considered two different pointers that
7846 * point to different bpf_spin_locks. Likewise for pointers to allocated objects
7847 * returned from bpf_obj_new.
7848 * The verifier allows taking only one bpf_spin_lock at a time to avoid
7850 * Since only one bpf_spin_lock is allowed the checks are simpler than
7851 * reg_is_refcounted() logic. The verifier needs to remember only
7852 * one spin_lock instead of array of acquired_refs.
7853 * cur_func(env)->active_locks remembers which map value element or allocated
7854 * object got locked and clears it after bpf_spin_unlock.
7856 static int process_spin_lock(struct bpf_verifier_env
*env
, int regno
,
7859 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
7860 bool is_const
= tnum_is_const(reg
->var_off
);
7861 struct bpf_func_state
*cur
= cur_func(env
);
7862 u64 val
= reg
->var_off
.value
;
7863 struct bpf_map
*map
= NULL
;
7864 struct btf
*btf
= NULL
;
7865 struct btf_record
*rec
;
7870 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
7874 if (reg
->type
== PTR_TO_MAP_VALUE
) {
7878 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
7886 rec
= reg_btf_record(reg
);
7887 if (!btf_record_has_field(rec
, BPF_SPIN_LOCK
)) {
7888 verbose(env
, "%s '%s' has no valid bpf_spin_lock\n", map
? "map" : "local",
7889 map
? map
->name
: "kptr");
7892 if (rec
->spin_lock_off
!= val
+ reg
->off
) {
7893 verbose(env
, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
7894 val
+ reg
->off
, rec
->spin_lock_off
);
7905 if (cur
->active_locks
) {
7907 "Locking two bpf_spin_locks are not allowed\n");
7910 err
= acquire_lock_state(env
, env
->insn_idx
, REF_TYPE_LOCK
, reg
->id
, ptr
);
7912 verbose(env
, "Failed to acquire lock state\n");
7923 if (!cur
->active_locks
) {
7924 verbose(env
, "bpf_spin_unlock without taking a lock\n");
7928 if (release_lock_state(cur_func(env
), REF_TYPE_LOCK
, reg
->id
, ptr
)) {
7929 verbose(env
, "bpf_spin_unlock of different lock\n");
7933 invalidate_non_owning_refs(env
);
7938 static int process_timer_func(struct bpf_verifier_env
*env
, int regno
,
7939 struct bpf_call_arg_meta
*meta
)
7941 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
7942 bool is_const
= tnum_is_const(reg
->var_off
);
7943 struct bpf_map
*map
= reg
->map_ptr
;
7944 u64 val
= reg
->var_off
.value
;
7948 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
7953 verbose(env
, "map '%s' has to have BTF in order to use bpf_timer\n",
7957 if (!btf_record_has_field(map
->record
, BPF_TIMER
)) {
7958 verbose(env
, "map '%s' has no valid bpf_timer\n", map
->name
);
7961 if (map
->record
->timer_off
!= val
+ reg
->off
) {
7962 verbose(env
, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
7963 val
+ reg
->off
, map
->record
->timer_off
);
7966 if (meta
->map_ptr
) {
7967 verbose(env
, "verifier bug. Two map pointers in a timer helper\n");
7970 meta
->map_uid
= reg
->map_uid
;
7971 meta
->map_ptr
= map
;
7975 static int process_wq_func(struct bpf_verifier_env
*env
, int regno
,
7976 struct bpf_kfunc_call_arg_meta
*meta
)
7978 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
7979 struct bpf_map
*map
= reg
->map_ptr
;
7980 u64 val
= reg
->var_off
.value
;
7982 if (map
->record
->wq_off
!= val
+ reg
->off
) {
7983 verbose(env
, "off %lld doesn't point to 'struct bpf_wq' that is at %d\n",
7984 val
+ reg
->off
, map
->record
->wq_off
);
7987 meta
->map
.uid
= reg
->map_uid
;
7988 meta
->map
.ptr
= map
;
7992 static int process_kptr_func(struct bpf_verifier_env
*env
, int regno
,
7993 struct bpf_call_arg_meta
*meta
)
7995 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
7996 struct btf_field
*kptr_field
;
7997 struct bpf_map
*map_ptr
;
7998 struct btf_record
*rec
;
8001 if (type_is_ptr_alloc_obj(reg
->type
)) {
8002 rec
= reg_btf_record(reg
);
8003 } else { /* PTR_TO_MAP_VALUE */
8004 map_ptr
= reg
->map_ptr
;
8005 if (!map_ptr
->btf
) {
8006 verbose(env
, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
8010 rec
= map_ptr
->record
;
8011 meta
->map_ptr
= map_ptr
;
8014 if (!tnum_is_const(reg
->var_off
)) {
8016 "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
8021 if (!btf_record_has_field(rec
, BPF_KPTR
)) {
8022 verbose(env
, "R%d has no valid kptr\n", regno
);
8026 kptr_off
= reg
->off
+ reg
->var_off
.value
;
8027 kptr_field
= btf_record_find(rec
, kptr_off
, BPF_KPTR
);
8029 verbose(env
, "off=%d doesn't point to kptr\n", kptr_off
);
8032 if (kptr_field
->type
!= BPF_KPTR_REF
&& kptr_field
->type
!= BPF_KPTR_PERCPU
) {
8033 verbose(env
, "off=%d kptr isn't referenced kptr\n", kptr_off
);
8036 meta
->kptr_field
= kptr_field
;
8040 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
8041 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
8043 * In both cases we deal with the first 8 bytes, but need to mark the next 8
8044 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
8045 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
8047 * Mutability of bpf_dynptr is at two levels, one is at the level of struct
8048 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
8049 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
8050 * mutate the view of the dynptr and also possibly destroy it. In the latter
8051 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
8052 * memory that dynptr points to.
8054 * The verifier will keep track both levels of mutation (bpf_dynptr's in
8055 * reg->type and the memory's in reg->dynptr.type), but there is no support for
8056 * readonly dynptr view yet, hence only the first case is tracked and checked.
8058 * This is consistent with how C applies the const modifier to a struct object,
8059 * where the pointer itself inside bpf_dynptr becomes const but not what it
8062 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
8063 * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
8065 static int process_dynptr_func(struct bpf_verifier_env
*env
, int regno
, int insn_idx
,
8066 enum bpf_arg_type arg_type
, int clone_ref_obj_id
)
8068 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
8071 if (reg
->type
!= PTR_TO_STACK
&& reg
->type
!= CONST_PTR_TO_DYNPTR
) {
8073 "arg#%d expected pointer to stack or const struct bpf_dynptr\n",
8078 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
8079 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
8081 if ((arg_type
& (MEM_UNINIT
| MEM_RDONLY
)) == (MEM_UNINIT
| MEM_RDONLY
)) {
8082 verbose(env
, "verifier internal error: misconfigured dynptr helper type flags\n");
8086 /* MEM_UNINIT - Points to memory that is an appropriate candidate for
8087 * constructing a mutable bpf_dynptr object.
8089 * Currently, this is only possible with PTR_TO_STACK
8090 * pointing to a region of at least 16 bytes which doesn't
8091 * contain an existing bpf_dynptr.
8093 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
8094 * mutated or destroyed. However, the memory it points to
8097 * None - Points to a initialized dynptr that can be mutated and
8098 * destroyed, including mutation of the memory it points
8101 if (arg_type
& MEM_UNINIT
) {
8104 if (!is_dynptr_reg_valid_uninit(env
, reg
)) {
8105 verbose(env
, "Dynptr has to be an uninitialized dynptr\n");
8109 /* we write BPF_DW bits (8 bytes) at a time */
8110 for (i
= 0; i
< BPF_DYNPTR_SIZE
; i
+= 8) {
8111 err
= check_mem_access(env
, insn_idx
, regno
,
8112 i
, BPF_DW
, BPF_WRITE
, -1, false, false);
8117 err
= mark_stack_slots_dynptr(env
, reg
, arg_type
, insn_idx
, clone_ref_obj_id
);
8118 } else /* MEM_RDONLY and None case from above */ {
8119 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
8120 if (reg
->type
== CONST_PTR_TO_DYNPTR
&& !(arg_type
& MEM_RDONLY
)) {
8121 verbose(env
, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
8125 if (!is_dynptr_reg_valid_init(env
, reg
)) {
8127 "Expected an initialized dynptr as arg #%d\n",
8132 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
8133 if (!is_dynptr_type_expected(env
, reg
, arg_type
& ~MEM_RDONLY
)) {
8135 "Expected a dynptr of type %s as arg #%d\n",
8136 dynptr_type_str(arg_to_dynptr_type(arg_type
)), regno
);
8140 err
= mark_dynptr_read(env
, reg
);
8145 static u32
iter_ref_obj_id(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
, int spi
)
8147 struct bpf_func_state
*state
= func(env
, reg
);
8149 return state
->stack
[spi
].spilled_ptr
.ref_obj_id
;
8152 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta
*meta
)
8154 return meta
->kfunc_flags
& (KF_ITER_NEW
| KF_ITER_NEXT
| KF_ITER_DESTROY
);
8157 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta
*meta
)
8159 return meta
->kfunc_flags
& KF_ITER_NEW
;
8162 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta
*meta
)
8164 return meta
->kfunc_flags
& KF_ITER_NEXT
;
8167 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta
*meta
)
8169 return meta
->kfunc_flags
& KF_ITER_DESTROY
;
8172 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta
*meta
, int arg_idx
,
8173 const struct btf_param
*arg
)
8175 /* btf_check_iter_kfuncs() guarantees that first argument of any iter
8176 * kfunc is iter state pointer
8178 if (is_iter_kfunc(meta
))
8179 return arg_idx
== 0;
8181 /* iter passed as an argument to a generic kfunc */
8182 return btf_param_match_suffix(meta
->btf
, arg
, "__iter");
8185 static int process_iter_arg(struct bpf_verifier_env
*env
, int regno
, int insn_idx
,
8186 struct bpf_kfunc_call_arg_meta
*meta
)
8188 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
8189 const struct btf_type
*t
;
8190 int spi
, err
, i
, nr_slots
, btf_id
;
8192 /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs()
8193 * ensures struct convention, so we wouldn't need to do any BTF
8194 * validation here. But given iter state can be passed as a parameter
8195 * to any kfunc, if arg has "__iter" suffix, we need to be a bit more
8196 * conservative here.
8198 btf_id
= btf_check_iter_arg(meta
->btf
, meta
->func_proto
, regno
- 1);
8200 verbose(env
, "expected valid iter pointer as arg #%d\n", regno
);
8203 t
= btf_type_by_id(meta
->btf
, btf_id
);
8204 nr_slots
= t
->size
/ BPF_REG_SIZE
;
8206 if (is_iter_new_kfunc(meta
)) {
8207 /* bpf_iter_<type>_new() expects pointer to uninit iter state */
8208 if (!is_iter_reg_valid_uninit(env
, reg
, nr_slots
)) {
8209 verbose(env
, "expected uninitialized iter_%s as arg #%d\n",
8210 iter_type_str(meta
->btf
, btf_id
), regno
);
8214 for (i
= 0; i
< nr_slots
* 8; i
+= BPF_REG_SIZE
) {
8215 err
= check_mem_access(env
, insn_idx
, regno
,
8216 i
, BPF_DW
, BPF_WRITE
, -1, false, false);
8221 err
= mark_stack_slots_iter(env
, meta
, reg
, insn_idx
, meta
->btf
, btf_id
, nr_slots
);
8225 /* iter_next() or iter_destroy(), as well as any kfunc
8226 * accepting iter argument, expect initialized iter state
8228 err
= is_iter_reg_valid_init(env
, reg
, meta
->btf
, btf_id
, nr_slots
);
8233 verbose(env
, "expected an initialized iter_%s as arg #%d\n",
8234 iter_type_str(meta
->btf
, btf_id
), regno
);
8237 verbose(env
, "expected an RCU CS when using %s\n", meta
->func_name
);
8243 spi
= iter_get_spi(env
, reg
, nr_slots
);
8247 err
= mark_iter_read(env
, reg
, spi
, nr_slots
);
8251 /* remember meta->iter info for process_iter_next_call() */
8252 meta
->iter
.spi
= spi
;
8253 meta
->iter
.frameno
= reg
->frameno
;
8254 meta
->ref_obj_id
= iter_ref_obj_id(env
, reg
, spi
);
8256 if (is_iter_destroy_kfunc(meta
)) {
8257 err
= unmark_stack_slots_iter(env
, reg
, nr_slots
);
8266 /* Look for a previous loop entry at insn_idx: nearest parent state
8267 * stopped at insn_idx with callsites matching those in cur->frame.
8269 static struct bpf_verifier_state
*find_prev_entry(struct bpf_verifier_env
*env
,
8270 struct bpf_verifier_state
*cur
,
8273 struct bpf_verifier_state_list
*sl
;
8274 struct bpf_verifier_state
*st
;
8276 /* Explored states are pushed in stack order, most recent states come first */
8277 sl
= *explored_state(env
, insn_idx
);
8278 for (; sl
; sl
= sl
->next
) {
8279 /* If st->branches != 0 state is a part of current DFS verification path,
8280 * hence cur & st for a loop.
8283 if (st
->insn_idx
== insn_idx
&& st
->branches
&& same_callsites(st
, cur
) &&
8284 st
->dfs_depth
< cur
->dfs_depth
)
8291 static void reset_idmap_scratch(struct bpf_verifier_env
*env
);
8292 static bool regs_exact(const struct bpf_reg_state
*rold
,
8293 const struct bpf_reg_state
*rcur
,
8294 struct bpf_idmap
*idmap
);
8296 static void maybe_widen_reg(struct bpf_verifier_env
*env
,
8297 struct bpf_reg_state
*rold
, struct bpf_reg_state
*rcur
,
8298 struct bpf_idmap
*idmap
)
8300 if (rold
->type
!= SCALAR_VALUE
)
8302 if (rold
->type
!= rcur
->type
)
8304 if (rold
->precise
|| rcur
->precise
|| regs_exact(rold
, rcur
, idmap
))
8306 __mark_reg_unknown(env
, rcur
);
8309 static int widen_imprecise_scalars(struct bpf_verifier_env
*env
,
8310 struct bpf_verifier_state
*old
,
8311 struct bpf_verifier_state
*cur
)
8313 struct bpf_func_state
*fold
, *fcur
;
8316 reset_idmap_scratch(env
);
8317 for (fr
= old
->curframe
; fr
>= 0; fr
--) {
8318 fold
= old
->frame
[fr
];
8319 fcur
= cur
->frame
[fr
];
8321 for (i
= 0; i
< MAX_BPF_REG
; i
++)
8322 maybe_widen_reg(env
,
8325 &env
->idmap_scratch
);
8327 for (i
= 0; i
< fold
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
8328 if (!is_spilled_reg(&fold
->stack
[i
]) ||
8329 !is_spilled_reg(&fcur
->stack
[i
]))
8332 maybe_widen_reg(env
,
8333 &fold
->stack
[i
].spilled_ptr
,
8334 &fcur
->stack
[i
].spilled_ptr
,
8335 &env
->idmap_scratch
);
8341 static struct bpf_reg_state
*get_iter_from_state(struct bpf_verifier_state
*cur_st
,
8342 struct bpf_kfunc_call_arg_meta
*meta
)
8344 int iter_frameno
= meta
->iter
.frameno
;
8345 int iter_spi
= meta
->iter
.spi
;
8347 return &cur_st
->frame
[iter_frameno
]->stack
[iter_spi
].spilled_ptr
;
8350 /* process_iter_next_call() is called when verifier gets to iterator's next
8351 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
8352 * to it as just "iter_next()" in comments below.
8354 * BPF verifier relies on a crucial contract for any iter_next()
8355 * implementation: it should *eventually* return NULL, and once that happens
8356 * it should keep returning NULL. That is, once iterator exhausts elements to
8357 * iterate, it should never reset or spuriously return new elements.
8359 * With the assumption of such contract, process_iter_next_call() simulates
8360 * a fork in the verifier state to validate loop logic correctness and safety
8361 * without having to simulate infinite amount of iterations.
8363 * In current state, we first assume that iter_next() returned NULL and
8364 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
8365 * conditions we should not form an infinite loop and should eventually reach
8368 * Besides that, we also fork current state and enqueue it for later
8369 * verification. In a forked state we keep iterator state as ACTIVE
8370 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
8371 * also bump iteration depth to prevent erroneous infinite loop detection
8372 * later on (see iter_active_depths_differ() comment for details). In this
8373 * state we assume that we'll eventually loop back to another iter_next()
8374 * calls (it could be in exactly same location or in some other instruction,
8375 * it doesn't matter, we don't make any unnecessary assumptions about this,
8376 * everything revolves around iterator state in a stack slot, not which
8377 * instruction is calling iter_next()). When that happens, we either will come
8378 * to iter_next() with equivalent state and can conclude that next iteration
8379 * will proceed in exactly the same way as we just verified, so it's safe to
8380 * assume that loop converges. If not, we'll go on another iteration
8381 * simulation with a different input state, until all possible starting states
8382 * are validated or we reach maximum number of instructions limit.
8384 * This way, we will either exhaustively discover all possible input states
8385 * that iterator loop can start with and eventually will converge, or we'll
8386 * effectively regress into bounded loop simulation logic and either reach
8387 * maximum number of instructions if loop is not provably convergent, or there
8388 * is some statically known limit on number of iterations (e.g., if there is
8389 * an explicit `if n > 100 then break;` statement somewhere in the loop).
8391 * Iteration convergence logic in is_state_visited() relies on exact
8392 * states comparison, which ignores read and precision marks.
8393 * This is necessary because read and precision marks are not finalized
8394 * while in the loop. Exact comparison might preclude convergence for
8395 * simple programs like below:
8398 * while(iter_next(&it))
8401 * At each iteration step i++ would produce a new distinct state and
8402 * eventually instruction processing limit would be reached.
8404 * To avoid such behavior speculatively forget (widen) range for
8405 * imprecise scalar registers, if those registers were not precise at the
8406 * end of the previous iteration and do not match exactly.
8408 * This is a conservative heuristic that allows to verify wide range of programs,
8409 * however it precludes verification of programs that conjure an
8410 * imprecise value on the first loop iteration and use it as precise on a second.
8411 * For example, the following safe program would fail to verify:
8413 * struct bpf_num_iter it;
8416 * bpf_iter_num_new(&it, 0, 10);
8417 * while (bpf_iter_num_next(&it)) {
8420 * i = 7; // Because i changed verifier would forget
8421 * // it's range on second loop entry.
8423 * arr[i] = 42; // This would fail to verify.
8426 * bpf_iter_num_destroy(&it);
8428 static int process_iter_next_call(struct bpf_verifier_env
*env
, int insn_idx
,
8429 struct bpf_kfunc_call_arg_meta
*meta
)
8431 struct bpf_verifier_state
*cur_st
= env
->cur_state
, *queued_st
, *prev_st
;
8432 struct bpf_func_state
*cur_fr
= cur_st
->frame
[cur_st
->curframe
], *queued_fr
;
8433 struct bpf_reg_state
*cur_iter
, *queued_iter
;
8435 BTF_TYPE_EMIT(struct bpf_iter
);
8437 cur_iter
= get_iter_from_state(cur_st
, meta
);
8439 if (cur_iter
->iter
.state
!= BPF_ITER_STATE_ACTIVE
&&
8440 cur_iter
->iter
.state
!= BPF_ITER_STATE_DRAINED
) {
8441 verbose(env
, "verifier internal error: unexpected iterator state %d (%s)\n",
8442 cur_iter
->iter
.state
, iter_state_str(cur_iter
->iter
.state
));
8446 if (cur_iter
->iter
.state
== BPF_ITER_STATE_ACTIVE
) {
8447 /* Because iter_next() call is a checkpoint is_state_visitied()
8448 * should guarantee parent state with same call sites and insn_idx.
8450 if (!cur_st
->parent
|| cur_st
->parent
->insn_idx
!= insn_idx
||
8451 !same_callsites(cur_st
->parent
, cur_st
)) {
8452 verbose(env
, "bug: bad parent state for iter next call");
8455 /* Note cur_st->parent in the call below, it is necessary to skip
8456 * checkpoint created for cur_st by is_state_visited()
8457 * right at this instruction.
8459 prev_st
= find_prev_entry(env
, cur_st
->parent
, insn_idx
);
8460 /* branch out active iter state */
8461 queued_st
= push_stack(env
, insn_idx
+ 1, insn_idx
, false);
8465 queued_iter
= get_iter_from_state(queued_st
, meta
);
8466 queued_iter
->iter
.state
= BPF_ITER_STATE_ACTIVE
;
8467 queued_iter
->iter
.depth
++;
8469 widen_imprecise_scalars(env
, prev_st
, queued_st
);
8471 queued_fr
= queued_st
->frame
[queued_st
->curframe
];
8472 mark_ptr_not_null_reg(&queued_fr
->regs
[BPF_REG_0
]);
8475 /* switch to DRAINED state, but keep the depth unchanged */
8476 /* mark current iter state as drained and assume returned NULL */
8477 cur_iter
->iter
.state
= BPF_ITER_STATE_DRAINED
;
8478 __mark_reg_const_zero(env
, &cur_fr
->regs
[BPF_REG_0
]);
8483 static bool arg_type_is_mem_size(enum bpf_arg_type type
)
8485 return type
== ARG_CONST_SIZE
||
8486 type
== ARG_CONST_SIZE_OR_ZERO
;
8489 static bool arg_type_is_raw_mem(enum bpf_arg_type type
)
8491 return base_type(type
) == ARG_PTR_TO_MEM
&&
8495 static bool arg_type_is_release(enum bpf_arg_type type
)
8497 return type
& OBJ_RELEASE
;
8500 static bool arg_type_is_dynptr(enum bpf_arg_type type
)
8502 return base_type(type
) == ARG_PTR_TO_DYNPTR
;
8505 static int resolve_map_arg_type(struct bpf_verifier_env
*env
,
8506 const struct bpf_call_arg_meta
*meta
,
8507 enum bpf_arg_type
*arg_type
)
8509 if (!meta
->map_ptr
) {
8510 /* kernel subsystem misconfigured verifier */
8511 verbose(env
, "invalid map_ptr to access map->type\n");
8515 switch (meta
->map_ptr
->map_type
) {
8516 case BPF_MAP_TYPE_SOCKMAP
:
8517 case BPF_MAP_TYPE_SOCKHASH
:
8518 if (*arg_type
== ARG_PTR_TO_MAP_VALUE
) {
8519 *arg_type
= ARG_PTR_TO_BTF_ID_SOCK_COMMON
;
8521 verbose(env
, "invalid arg_type for sockmap/sockhash\n");
8525 case BPF_MAP_TYPE_BLOOM_FILTER
:
8526 if (meta
->func_id
== BPF_FUNC_map_peek_elem
)
8527 *arg_type
= ARG_PTR_TO_MAP_VALUE
;
8535 struct bpf_reg_types
{
8536 const enum bpf_reg_type types
[10];
8540 static const struct bpf_reg_types sock_types
= {
8550 static const struct bpf_reg_types btf_id_sock_common_types
= {
8557 PTR_TO_BTF_ID
| PTR_TRUSTED
,
8559 .btf_id
= &btf_sock_ids
[BTF_SOCK_TYPE_SOCK_COMMON
],
8563 static const struct bpf_reg_types mem_types
= {
8571 PTR_TO_MEM
| MEM_RINGBUF
,
8573 PTR_TO_BTF_ID
| PTR_TRUSTED
,
8577 static const struct bpf_reg_types spin_lock_types
= {
8580 PTR_TO_BTF_ID
| MEM_ALLOC
,
8584 static const struct bpf_reg_types fullsock_types
= { .types
= { PTR_TO_SOCKET
} };
8585 static const struct bpf_reg_types scalar_types
= { .types
= { SCALAR_VALUE
} };
8586 static const struct bpf_reg_types context_types
= { .types
= { PTR_TO_CTX
} };
8587 static const struct bpf_reg_types ringbuf_mem_types
= { .types
= { PTR_TO_MEM
| MEM_RINGBUF
} };
8588 static const struct bpf_reg_types const_map_ptr_types
= { .types
= { CONST_PTR_TO_MAP
} };
8589 static const struct bpf_reg_types btf_ptr_types
= {
8592 PTR_TO_BTF_ID
| PTR_TRUSTED
,
8593 PTR_TO_BTF_ID
| MEM_RCU
,
8596 static const struct bpf_reg_types percpu_btf_ptr_types
= {
8598 PTR_TO_BTF_ID
| MEM_PERCPU
,
8599 PTR_TO_BTF_ID
| MEM_PERCPU
| MEM_RCU
,
8600 PTR_TO_BTF_ID
| MEM_PERCPU
| PTR_TRUSTED
,
8603 static const struct bpf_reg_types func_ptr_types
= { .types
= { PTR_TO_FUNC
} };
8604 static const struct bpf_reg_types stack_ptr_types
= { .types
= { PTR_TO_STACK
} };
8605 static const struct bpf_reg_types const_str_ptr_types
= { .types
= { PTR_TO_MAP_VALUE
} };
8606 static const struct bpf_reg_types timer_types
= { .types
= { PTR_TO_MAP_VALUE
} };
8607 static const struct bpf_reg_types kptr_xchg_dest_types
= {
8610 PTR_TO_BTF_ID
| MEM_ALLOC
8613 static const struct bpf_reg_types dynptr_types
= {
8616 CONST_PTR_TO_DYNPTR
,
8620 static const struct bpf_reg_types
*compatible_reg_types
[__BPF_ARG_TYPE_MAX
] = {
8621 [ARG_PTR_TO_MAP_KEY
] = &mem_types
,
8622 [ARG_PTR_TO_MAP_VALUE
] = &mem_types
,
8623 [ARG_CONST_SIZE
] = &scalar_types
,
8624 [ARG_CONST_SIZE_OR_ZERO
] = &scalar_types
,
8625 [ARG_CONST_ALLOC_SIZE_OR_ZERO
] = &scalar_types
,
8626 [ARG_CONST_MAP_PTR
] = &const_map_ptr_types
,
8627 [ARG_PTR_TO_CTX
] = &context_types
,
8628 [ARG_PTR_TO_SOCK_COMMON
] = &sock_types
,
8630 [ARG_PTR_TO_BTF_ID_SOCK_COMMON
] = &btf_id_sock_common_types
,
8632 [ARG_PTR_TO_SOCKET
] = &fullsock_types
,
8633 [ARG_PTR_TO_BTF_ID
] = &btf_ptr_types
,
8634 [ARG_PTR_TO_SPIN_LOCK
] = &spin_lock_types
,
8635 [ARG_PTR_TO_MEM
] = &mem_types
,
8636 [ARG_PTR_TO_RINGBUF_MEM
] = &ringbuf_mem_types
,
8637 [ARG_PTR_TO_PERCPU_BTF_ID
] = &percpu_btf_ptr_types
,
8638 [ARG_PTR_TO_FUNC
] = &func_ptr_types
,
8639 [ARG_PTR_TO_STACK
] = &stack_ptr_types
,
8640 [ARG_PTR_TO_CONST_STR
] = &const_str_ptr_types
,
8641 [ARG_PTR_TO_TIMER
] = &timer_types
,
8642 [ARG_KPTR_XCHG_DEST
] = &kptr_xchg_dest_types
,
8643 [ARG_PTR_TO_DYNPTR
] = &dynptr_types
,
8646 static int check_reg_type(struct bpf_verifier_env
*env
, u32 regno
,
8647 enum bpf_arg_type arg_type
,
8648 const u32
*arg_btf_id
,
8649 struct bpf_call_arg_meta
*meta
)
8651 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
8652 enum bpf_reg_type expected
, type
= reg
->type
;
8653 const struct bpf_reg_types
*compatible
;
8656 compatible
= compatible_reg_types
[base_type(arg_type
)];
8658 verbose(env
, "verifier internal error: unsupported arg type %d\n", arg_type
);
8662 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
8663 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
8665 * Same for MAYBE_NULL:
8667 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
8668 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
8670 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type.
8672 * Therefore we fold these flags depending on the arg_type before comparison.
8674 if (arg_type
& MEM_RDONLY
)
8675 type
&= ~MEM_RDONLY
;
8676 if (arg_type
& PTR_MAYBE_NULL
)
8677 type
&= ~PTR_MAYBE_NULL
;
8678 if (base_type(arg_type
) == ARG_PTR_TO_MEM
)
8679 type
&= ~DYNPTR_TYPE_FLAG_MASK
;
8681 /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */
8682 if (meta
->func_id
== BPF_FUNC_kptr_xchg
&& type_is_alloc(type
) && regno
== BPF_REG_2
) {
8684 type
&= ~MEM_PERCPU
;
8687 for (i
= 0; i
< ARRAY_SIZE(compatible
->types
); i
++) {
8688 expected
= compatible
->types
[i
];
8689 if (expected
== NOT_INIT
)
8692 if (type
== expected
)
8696 verbose(env
, "R%d type=%s expected=", regno
, reg_type_str(env
, reg
->type
));
8697 for (j
= 0; j
+ 1 < i
; j
++)
8698 verbose(env
, "%s, ", reg_type_str(env
, compatible
->types
[j
]));
8699 verbose(env
, "%s\n", reg_type_str(env
, compatible
->types
[j
]));
8703 if (base_type(reg
->type
) != PTR_TO_BTF_ID
)
8706 if (compatible
== &mem_types
) {
8707 if (!(arg_type
& MEM_RDONLY
)) {
8709 "%s() may write into memory pointed by R%d type=%s\n",
8710 func_id_name(meta
->func_id
),
8711 regno
, reg_type_str(env
, reg
->type
));
8717 switch ((int)reg
->type
) {
8719 case PTR_TO_BTF_ID
| PTR_TRUSTED
:
8720 case PTR_TO_BTF_ID
| PTR_TRUSTED
| PTR_MAYBE_NULL
:
8721 case PTR_TO_BTF_ID
| MEM_RCU
:
8722 case PTR_TO_BTF_ID
| PTR_MAYBE_NULL
:
8723 case PTR_TO_BTF_ID
| PTR_MAYBE_NULL
| MEM_RCU
:
8725 /* For bpf_sk_release, it needs to match against first member
8726 * 'struct sock_common', hence make an exception for it. This
8727 * allows bpf_sk_release to work for multiple socket types.
8729 bool strict_type_match
= arg_type_is_release(arg_type
) &&
8730 meta
->func_id
!= BPF_FUNC_sk_release
;
8732 if (type_may_be_null(reg
->type
) &&
8733 (!type_may_be_null(arg_type
) || arg_type_is_release(arg_type
))) {
8734 verbose(env
, "Possibly NULL pointer passed to helper arg%d\n", regno
);
8739 if (!compatible
->btf_id
) {
8740 verbose(env
, "verifier internal error: missing arg compatible BTF ID\n");
8743 arg_btf_id
= compatible
->btf_id
;
8746 if (meta
->func_id
== BPF_FUNC_kptr_xchg
) {
8747 if (map_kptr_match_type(env
, meta
->kptr_field
, reg
, regno
))
8750 if (arg_btf_id
== BPF_PTR_POISON
) {
8751 verbose(env
, "verifier internal error:");
8752 verbose(env
, "R%d has non-overwritten BPF_PTR_POISON type\n",
8757 if (!btf_struct_ids_match(&env
->log
, reg
->btf
, reg
->btf_id
, reg
->off
,
8758 btf_vmlinux
, *arg_btf_id
,
8759 strict_type_match
)) {
8760 verbose(env
, "R%d is of type %s but %s is expected\n",
8761 regno
, btf_type_name(reg
->btf
, reg
->btf_id
),
8762 btf_type_name(btf_vmlinux
, *arg_btf_id
));
8768 case PTR_TO_BTF_ID
| MEM_ALLOC
:
8769 case PTR_TO_BTF_ID
| MEM_PERCPU
| MEM_ALLOC
:
8770 if (meta
->func_id
!= BPF_FUNC_spin_lock
&& meta
->func_id
!= BPF_FUNC_spin_unlock
&&
8771 meta
->func_id
!= BPF_FUNC_kptr_xchg
) {
8772 verbose(env
, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
8775 /* Check if local kptr in src arg matches kptr in dst arg */
8776 if (meta
->func_id
== BPF_FUNC_kptr_xchg
&& regno
== BPF_REG_2
) {
8777 if (map_kptr_match_type(env
, meta
->kptr_field
, reg
, regno
))
8781 case PTR_TO_BTF_ID
| MEM_PERCPU
:
8782 case PTR_TO_BTF_ID
| MEM_PERCPU
| MEM_RCU
:
8783 case PTR_TO_BTF_ID
| MEM_PERCPU
| PTR_TRUSTED
:
8784 /* Handled by helper specific checks */
8787 verbose(env
, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n");
8793 static struct btf_field
*
8794 reg_find_field_offset(const struct bpf_reg_state
*reg
, s32 off
, u32 fields
)
8796 struct btf_field
*field
;
8797 struct btf_record
*rec
;
8799 rec
= reg_btf_record(reg
);
8803 field
= btf_record_find(rec
, off
, fields
);
8810 static int check_func_arg_reg_off(struct bpf_verifier_env
*env
,
8811 const struct bpf_reg_state
*reg
, int regno
,
8812 enum bpf_arg_type arg_type
)
8814 u32 type
= reg
->type
;
8816 /* When referenced register is passed to release function, its fixed
8819 * We will check arg_type_is_release reg has ref_obj_id when storing
8820 * meta->release_regno.
8822 if (arg_type_is_release(arg_type
)) {
8823 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
8824 * may not directly point to the object being released, but to
8825 * dynptr pointing to such object, which might be at some offset
8826 * on the stack. In that case, we simply to fallback to the
8829 if (arg_type_is_dynptr(arg_type
) && type
== PTR_TO_STACK
)
8832 /* Doing check_ptr_off_reg check for the offset will catch this
8833 * because fixed_off_ok is false, but checking here allows us
8834 * to give the user a better error message.
8837 verbose(env
, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
8841 return __check_ptr_off_reg(env
, reg
, regno
, false);
8845 /* Pointer types where both fixed and variable offset is explicitly allowed: */
8848 case PTR_TO_PACKET_META
:
8849 case PTR_TO_MAP_KEY
:
8850 case PTR_TO_MAP_VALUE
:
8852 case PTR_TO_MEM
| MEM_RDONLY
:
8853 case PTR_TO_MEM
| MEM_RINGBUF
:
8855 case PTR_TO_BUF
| MEM_RDONLY
:
8859 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows
8863 case PTR_TO_BTF_ID
| MEM_ALLOC
:
8864 case PTR_TO_BTF_ID
| PTR_TRUSTED
:
8865 case PTR_TO_BTF_ID
| MEM_RCU
:
8866 case PTR_TO_BTF_ID
| MEM_ALLOC
| NON_OWN_REF
:
8867 case PTR_TO_BTF_ID
| MEM_ALLOC
| NON_OWN_REF
| MEM_RCU
:
8868 /* When referenced PTR_TO_BTF_ID is passed to release function,
8869 * its fixed offset must be 0. In the other cases, fixed offset
8870 * can be non-zero. This was already checked above. So pass
8871 * fixed_off_ok as true to allow fixed offset for all other
8872 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
8873 * still need to do checks instead of returning.
8875 return __check_ptr_off_reg(env
, reg
, regno
, true);
8877 return __check_ptr_off_reg(env
, reg
, regno
, false);
8881 static struct bpf_reg_state
*get_dynptr_arg_reg(struct bpf_verifier_env
*env
,
8882 const struct bpf_func_proto
*fn
,
8883 struct bpf_reg_state
*regs
)
8885 struct bpf_reg_state
*state
= NULL
;
8888 for (i
= 0; i
< MAX_BPF_FUNC_REG_ARGS
; i
++)
8889 if (arg_type_is_dynptr(fn
->arg_type
[i
])) {
8891 verbose(env
, "verifier internal error: multiple dynptr args\n");
8894 state
= ®s
[BPF_REG_1
+ i
];
8898 verbose(env
, "verifier internal error: no dynptr arg found\n");
8903 static int dynptr_id(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
8905 struct bpf_func_state
*state
= func(env
, reg
);
8908 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
8910 spi
= dynptr_get_spi(env
, reg
);
8913 return state
->stack
[spi
].spilled_ptr
.id
;
8916 static int dynptr_ref_obj_id(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
8918 struct bpf_func_state
*state
= func(env
, reg
);
8921 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
8922 return reg
->ref_obj_id
;
8923 spi
= dynptr_get_spi(env
, reg
);
8926 return state
->stack
[spi
].spilled_ptr
.ref_obj_id
;
8929 static enum bpf_dynptr_type
dynptr_get_type(struct bpf_verifier_env
*env
,
8930 struct bpf_reg_state
*reg
)
8932 struct bpf_func_state
*state
= func(env
, reg
);
8935 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
8936 return reg
->dynptr
.type
;
8938 spi
= __get_spi(reg
->off
);
8940 verbose(env
, "verifier internal error: invalid spi when querying dynptr type\n");
8941 return BPF_DYNPTR_TYPE_INVALID
;
8944 return state
->stack
[spi
].spilled_ptr
.dynptr
.type
;
8947 static int check_reg_const_str(struct bpf_verifier_env
*env
,
8948 struct bpf_reg_state
*reg
, u32 regno
)
8950 struct bpf_map
*map
= reg
->map_ptr
;
8956 if (reg
->type
!= PTR_TO_MAP_VALUE
)
8959 if (!bpf_map_is_rdonly(map
)) {
8960 verbose(env
, "R%d does not point to a readonly map'\n", regno
);
8964 if (!tnum_is_const(reg
->var_off
)) {
8965 verbose(env
, "R%d is not a constant address'\n", regno
);
8969 if (!map
->ops
->map_direct_value_addr
) {
8970 verbose(env
, "no direct value access support for this map type\n");
8974 err
= check_map_access(env
, regno
, reg
->off
,
8975 map
->value_size
- reg
->off
, false,
8980 map_off
= reg
->off
+ reg
->var_off
.value
;
8981 err
= map
->ops
->map_direct_value_addr(map
, &map_addr
, map_off
);
8983 verbose(env
, "direct value access on string failed\n");
8987 str_ptr
= (char *)(long)(map_addr
);
8988 if (!strnchr(str_ptr
+ map_off
, map
->value_size
- map_off
, 0)) {
8989 verbose(env
, "string is not zero-terminated\n");
8995 static int check_func_arg(struct bpf_verifier_env
*env
, u32 arg
,
8996 struct bpf_call_arg_meta
*meta
,
8997 const struct bpf_func_proto
*fn
,
9000 u32 regno
= BPF_REG_1
+ arg
;
9001 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
9002 enum bpf_arg_type arg_type
= fn
->arg_type
[arg
];
9003 enum bpf_reg_type type
= reg
->type
;
9004 u32
*arg_btf_id
= NULL
;
9008 if (arg_type
== ARG_DONTCARE
)
9011 err
= check_reg_arg(env
, regno
, SRC_OP
);
9015 if (arg_type
== ARG_ANYTHING
) {
9016 if (is_pointer_value(env
, regno
)) {
9017 verbose(env
, "R%d leaks addr into helper function\n",
9024 if (type_is_pkt_pointer(type
) &&
9025 !may_access_direct_pkt_data(env
, meta
, BPF_READ
)) {
9026 verbose(env
, "helper access to the packet is not allowed\n");
9030 if (base_type(arg_type
) == ARG_PTR_TO_MAP_VALUE
) {
9031 err
= resolve_map_arg_type(env
, meta
, &arg_type
);
9036 if (register_is_null(reg
) && type_may_be_null(arg_type
))
9037 /* A NULL register has a SCALAR_VALUE type, so skip
9040 goto skip_type_check
;
9042 /* arg_btf_id and arg_size are in a union. */
9043 if (base_type(arg_type
) == ARG_PTR_TO_BTF_ID
||
9044 base_type(arg_type
) == ARG_PTR_TO_SPIN_LOCK
)
9045 arg_btf_id
= fn
->arg_btf_id
[arg
];
9047 mask
= mask_raw_tp_reg(env
, reg
);
9048 err
= check_reg_type(env
, regno
, arg_type
, arg_btf_id
, meta
);
9050 err
= err
?: check_func_arg_reg_off(env
, reg
, regno
, arg_type
);
9051 unmask_raw_tp_reg(reg
, mask
);
9056 if (arg_type_is_release(arg_type
)) {
9057 if (arg_type_is_dynptr(arg_type
)) {
9058 struct bpf_func_state
*state
= func(env
, reg
);
9061 /* Only dynptr created on stack can be released, thus
9062 * the get_spi and stack state checks for spilled_ptr
9063 * should only be done before process_dynptr_func for
9066 if (reg
->type
== PTR_TO_STACK
) {
9067 spi
= dynptr_get_spi(env
, reg
);
9068 if (spi
< 0 || !state
->stack
[spi
].spilled_ptr
.ref_obj_id
) {
9069 verbose(env
, "arg %d is an unacquired reference\n", regno
);
9073 verbose(env
, "cannot release unowned const bpf_dynptr\n");
9076 } else if (!reg
->ref_obj_id
&& !register_is_null(reg
)) {
9077 verbose(env
, "R%d must be referenced when passed to release function\n",
9081 if (meta
->release_regno
) {
9082 verbose(env
, "verifier internal error: more than one release argument\n");
9085 meta
->release_regno
= regno
;
9088 if (reg
->ref_obj_id
&& base_type(arg_type
) != ARG_KPTR_XCHG_DEST
) {
9089 if (meta
->ref_obj_id
) {
9090 verbose(env
, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
9091 regno
, reg
->ref_obj_id
,
9095 meta
->ref_obj_id
= reg
->ref_obj_id
;
9098 switch (base_type(arg_type
)) {
9099 case ARG_CONST_MAP_PTR
:
9100 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
9101 if (meta
->map_ptr
) {
9102 /* Use map_uid (which is unique id of inner map) to reject:
9103 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
9104 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
9105 * if (inner_map1 && inner_map2) {
9106 * timer = bpf_map_lookup_elem(inner_map1);
9108 * // mismatch would have been allowed
9109 * bpf_timer_init(timer, inner_map2);
9112 * Comparing map_ptr is enough to distinguish normal and outer maps.
9114 if (meta
->map_ptr
!= reg
->map_ptr
||
9115 meta
->map_uid
!= reg
->map_uid
) {
9117 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
9118 meta
->map_uid
, reg
->map_uid
);
9122 meta
->map_ptr
= reg
->map_ptr
;
9123 meta
->map_uid
= reg
->map_uid
;
9125 case ARG_PTR_TO_MAP_KEY
:
9126 /* bpf_map_xxx(..., map_ptr, ..., key) call:
9127 * check that [key, key + map->key_size) are within
9128 * stack limits and initialized
9130 if (!meta
->map_ptr
) {
9131 /* in function declaration map_ptr must come before
9132 * map_key, so that it's verified and known before
9133 * we have to check map_key here. Otherwise it means
9134 * that kernel subsystem misconfigured verifier
9136 verbose(env
, "invalid map_ptr to access map->key\n");
9139 err
= check_helper_mem_access(env
, regno
, meta
->map_ptr
->key_size
,
9140 BPF_READ
, false, NULL
);
9142 case ARG_PTR_TO_MAP_VALUE
:
9143 if (type_may_be_null(arg_type
) && register_is_null(reg
))
9146 /* bpf_map_xxx(..., map_ptr, ..., value) call:
9147 * check [value, value + map->value_size) validity
9149 if (!meta
->map_ptr
) {
9150 /* kernel subsystem misconfigured verifier */
9151 verbose(env
, "invalid map_ptr to access map->value\n");
9154 meta
->raw_mode
= arg_type
& MEM_UNINIT
;
9155 err
= check_helper_mem_access(env
, regno
, meta
->map_ptr
->value_size
,
9156 arg_type
& MEM_WRITE
? BPF_WRITE
: BPF_READ
,
9159 case ARG_PTR_TO_PERCPU_BTF_ID
:
9161 verbose(env
, "Helper has invalid btf_id in R%d\n", regno
);
9164 meta
->ret_btf
= reg
->btf
;
9165 meta
->ret_btf_id
= reg
->btf_id
;
9167 case ARG_PTR_TO_SPIN_LOCK
:
9168 if (in_rbtree_lock_required_cb(env
)) {
9169 verbose(env
, "can't spin_{lock,unlock} in rbtree cb\n");
9172 if (meta
->func_id
== BPF_FUNC_spin_lock
) {
9173 err
= process_spin_lock(env
, regno
, true);
9176 } else if (meta
->func_id
== BPF_FUNC_spin_unlock
) {
9177 err
= process_spin_lock(env
, regno
, false);
9181 verbose(env
, "verifier internal error\n");
9185 case ARG_PTR_TO_TIMER
:
9186 err
= process_timer_func(env
, regno
, meta
);
9190 case ARG_PTR_TO_FUNC
:
9191 meta
->subprogno
= reg
->subprogno
;
9193 case ARG_PTR_TO_MEM
:
9194 /* The access to this pointer is only checked when we hit the
9195 * next is_mem_size argument below.
9197 meta
->raw_mode
= arg_type
& MEM_UNINIT
;
9198 if (arg_type
& MEM_FIXED_SIZE
) {
9199 err
= check_helper_mem_access(env
, regno
, fn
->arg_size
[arg
],
9200 arg_type
& MEM_WRITE
? BPF_WRITE
: BPF_READ
,
9204 if (arg_type
& MEM_ALIGNED
)
9205 err
= check_ptr_alignment(env
, reg
, 0, fn
->arg_size
[arg
], true);
9208 case ARG_CONST_SIZE
:
9209 err
= check_mem_size_reg(env
, reg
, regno
,
9210 fn
->arg_type
[arg
- 1] & MEM_WRITE
?
9211 BPF_WRITE
: BPF_READ
,
9214 case ARG_CONST_SIZE_OR_ZERO
:
9215 err
= check_mem_size_reg(env
, reg
, regno
,
9216 fn
->arg_type
[arg
- 1] & MEM_WRITE
?
9217 BPF_WRITE
: BPF_READ
,
9220 case ARG_PTR_TO_DYNPTR
:
9221 err
= process_dynptr_func(env
, regno
, insn_idx
, arg_type
, 0);
9225 case ARG_CONST_ALLOC_SIZE_OR_ZERO
:
9226 if (!tnum_is_const(reg
->var_off
)) {
9227 verbose(env
, "R%d is not a known constant'\n",
9231 meta
->mem_size
= reg
->var_off
.value
;
9232 err
= mark_chain_precision(env
, regno
);
9236 case ARG_PTR_TO_CONST_STR
:
9238 err
= check_reg_const_str(env
, reg
, regno
);
9243 case ARG_KPTR_XCHG_DEST
:
9244 err
= process_kptr_func(env
, regno
, meta
);
9253 static bool may_update_sockmap(struct bpf_verifier_env
*env
, int func_id
)
9255 enum bpf_attach_type eatype
= env
->prog
->expected_attach_type
;
9256 enum bpf_prog_type type
= resolve_prog_type(env
->prog
);
9258 if (func_id
!= BPF_FUNC_map_update_elem
&&
9259 func_id
!= BPF_FUNC_map_delete_elem
)
9262 /* It's not possible to get access to a locked struct sock in these
9263 * contexts, so updating is safe.
9266 case BPF_PROG_TYPE_TRACING
:
9267 if (eatype
== BPF_TRACE_ITER
)
9270 case BPF_PROG_TYPE_SOCK_OPS
:
9271 /* map_update allowed only via dedicated helpers with event type checks */
9272 if (func_id
== BPF_FUNC_map_delete_elem
)
9275 case BPF_PROG_TYPE_SOCKET_FILTER
:
9276 case BPF_PROG_TYPE_SCHED_CLS
:
9277 case BPF_PROG_TYPE_SCHED_ACT
:
9278 case BPF_PROG_TYPE_XDP
:
9279 case BPF_PROG_TYPE_SK_REUSEPORT
:
9280 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
9281 case BPF_PROG_TYPE_SK_LOOKUP
:
9287 verbose(env
, "cannot update sockmap in this context\n");
9291 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env
*env
)
9293 return env
->prog
->jit_requested
&&
9294 bpf_jit_supports_subprog_tailcalls();
9297 static int check_map_func_compatibility(struct bpf_verifier_env
*env
,
9298 struct bpf_map
*map
, int func_id
)
9303 /* We need a two way check, first is from map perspective ... */
9304 switch (map
->map_type
) {
9305 case BPF_MAP_TYPE_PROG_ARRAY
:
9306 if (func_id
!= BPF_FUNC_tail_call
)
9309 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
9310 if (func_id
!= BPF_FUNC_perf_event_read
&&
9311 func_id
!= BPF_FUNC_perf_event_output
&&
9312 func_id
!= BPF_FUNC_skb_output
&&
9313 func_id
!= BPF_FUNC_perf_event_read_value
&&
9314 func_id
!= BPF_FUNC_xdp_output
)
9317 case BPF_MAP_TYPE_RINGBUF
:
9318 if (func_id
!= BPF_FUNC_ringbuf_output
&&
9319 func_id
!= BPF_FUNC_ringbuf_reserve
&&
9320 func_id
!= BPF_FUNC_ringbuf_query
&&
9321 func_id
!= BPF_FUNC_ringbuf_reserve_dynptr
&&
9322 func_id
!= BPF_FUNC_ringbuf_submit_dynptr
&&
9323 func_id
!= BPF_FUNC_ringbuf_discard_dynptr
)
9326 case BPF_MAP_TYPE_USER_RINGBUF
:
9327 if (func_id
!= BPF_FUNC_user_ringbuf_drain
)
9330 case BPF_MAP_TYPE_STACK_TRACE
:
9331 if (func_id
!= BPF_FUNC_get_stackid
)
9334 case BPF_MAP_TYPE_CGROUP_ARRAY
:
9335 if (func_id
!= BPF_FUNC_skb_under_cgroup
&&
9336 func_id
!= BPF_FUNC_current_task_under_cgroup
)
9339 case BPF_MAP_TYPE_CGROUP_STORAGE
:
9340 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
9341 if (func_id
!= BPF_FUNC_get_local_storage
)
9344 case BPF_MAP_TYPE_DEVMAP
:
9345 case BPF_MAP_TYPE_DEVMAP_HASH
:
9346 if (func_id
!= BPF_FUNC_redirect_map
&&
9347 func_id
!= BPF_FUNC_map_lookup_elem
)
9350 /* Restrict bpf side of cpumap and xskmap, open when use-cases
9353 case BPF_MAP_TYPE_CPUMAP
:
9354 if (func_id
!= BPF_FUNC_redirect_map
)
9357 case BPF_MAP_TYPE_XSKMAP
:
9358 if (func_id
!= BPF_FUNC_redirect_map
&&
9359 func_id
!= BPF_FUNC_map_lookup_elem
)
9362 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
9363 case BPF_MAP_TYPE_HASH_OF_MAPS
:
9364 if (func_id
!= BPF_FUNC_map_lookup_elem
)
9367 case BPF_MAP_TYPE_SOCKMAP
:
9368 if (func_id
!= BPF_FUNC_sk_redirect_map
&&
9369 func_id
!= BPF_FUNC_sock_map_update
&&
9370 func_id
!= BPF_FUNC_msg_redirect_map
&&
9371 func_id
!= BPF_FUNC_sk_select_reuseport
&&
9372 func_id
!= BPF_FUNC_map_lookup_elem
&&
9373 !may_update_sockmap(env
, func_id
))
9376 case BPF_MAP_TYPE_SOCKHASH
:
9377 if (func_id
!= BPF_FUNC_sk_redirect_hash
&&
9378 func_id
!= BPF_FUNC_sock_hash_update
&&
9379 func_id
!= BPF_FUNC_msg_redirect_hash
&&
9380 func_id
!= BPF_FUNC_sk_select_reuseport
&&
9381 func_id
!= BPF_FUNC_map_lookup_elem
&&
9382 !may_update_sockmap(env
, func_id
))
9385 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
:
9386 if (func_id
!= BPF_FUNC_sk_select_reuseport
)
9389 case BPF_MAP_TYPE_QUEUE
:
9390 case BPF_MAP_TYPE_STACK
:
9391 if (func_id
!= BPF_FUNC_map_peek_elem
&&
9392 func_id
!= BPF_FUNC_map_pop_elem
&&
9393 func_id
!= BPF_FUNC_map_push_elem
)
9396 case BPF_MAP_TYPE_SK_STORAGE
:
9397 if (func_id
!= BPF_FUNC_sk_storage_get
&&
9398 func_id
!= BPF_FUNC_sk_storage_delete
&&
9399 func_id
!= BPF_FUNC_kptr_xchg
)
9402 case BPF_MAP_TYPE_INODE_STORAGE
:
9403 if (func_id
!= BPF_FUNC_inode_storage_get
&&
9404 func_id
!= BPF_FUNC_inode_storage_delete
&&
9405 func_id
!= BPF_FUNC_kptr_xchg
)
9408 case BPF_MAP_TYPE_TASK_STORAGE
:
9409 if (func_id
!= BPF_FUNC_task_storage_get
&&
9410 func_id
!= BPF_FUNC_task_storage_delete
&&
9411 func_id
!= BPF_FUNC_kptr_xchg
)
9414 case BPF_MAP_TYPE_CGRP_STORAGE
:
9415 if (func_id
!= BPF_FUNC_cgrp_storage_get
&&
9416 func_id
!= BPF_FUNC_cgrp_storage_delete
&&
9417 func_id
!= BPF_FUNC_kptr_xchg
)
9420 case BPF_MAP_TYPE_BLOOM_FILTER
:
9421 if (func_id
!= BPF_FUNC_map_peek_elem
&&
9422 func_id
!= BPF_FUNC_map_push_elem
)
9429 /* ... and second from the function itself. */
9431 case BPF_FUNC_tail_call
:
9432 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
9434 if (env
->subprog_cnt
> 1 && !allow_tail_call_in_subprogs(env
)) {
9435 verbose(env
, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
9439 case BPF_FUNC_perf_event_read
:
9440 case BPF_FUNC_perf_event_output
:
9441 case BPF_FUNC_perf_event_read_value
:
9442 case BPF_FUNC_skb_output
:
9443 case BPF_FUNC_xdp_output
:
9444 if (map
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
)
9447 case BPF_FUNC_ringbuf_output
:
9448 case BPF_FUNC_ringbuf_reserve
:
9449 case BPF_FUNC_ringbuf_query
:
9450 case BPF_FUNC_ringbuf_reserve_dynptr
:
9451 case BPF_FUNC_ringbuf_submit_dynptr
:
9452 case BPF_FUNC_ringbuf_discard_dynptr
:
9453 if (map
->map_type
!= BPF_MAP_TYPE_RINGBUF
)
9456 case BPF_FUNC_user_ringbuf_drain
:
9457 if (map
->map_type
!= BPF_MAP_TYPE_USER_RINGBUF
)
9460 case BPF_FUNC_get_stackid
:
9461 if (map
->map_type
!= BPF_MAP_TYPE_STACK_TRACE
)
9464 case BPF_FUNC_current_task_under_cgroup
:
9465 case BPF_FUNC_skb_under_cgroup
:
9466 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_ARRAY
)
9469 case BPF_FUNC_redirect_map
:
9470 if (map
->map_type
!= BPF_MAP_TYPE_DEVMAP
&&
9471 map
->map_type
!= BPF_MAP_TYPE_DEVMAP_HASH
&&
9472 map
->map_type
!= BPF_MAP_TYPE_CPUMAP
&&
9473 map
->map_type
!= BPF_MAP_TYPE_XSKMAP
)
9476 case BPF_FUNC_sk_redirect_map
:
9477 case BPF_FUNC_msg_redirect_map
:
9478 case BPF_FUNC_sock_map_update
:
9479 if (map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
)
9482 case BPF_FUNC_sk_redirect_hash
:
9483 case BPF_FUNC_msg_redirect_hash
:
9484 case BPF_FUNC_sock_hash_update
:
9485 if (map
->map_type
!= BPF_MAP_TYPE_SOCKHASH
)
9488 case BPF_FUNC_get_local_storage
:
9489 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_STORAGE
&&
9490 map
->map_type
!= BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
)
9493 case BPF_FUNC_sk_select_reuseport
:
9494 if (map
->map_type
!= BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
&&
9495 map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
&&
9496 map
->map_type
!= BPF_MAP_TYPE_SOCKHASH
)
9499 case BPF_FUNC_map_pop_elem
:
9500 if (map
->map_type
!= BPF_MAP_TYPE_QUEUE
&&
9501 map
->map_type
!= BPF_MAP_TYPE_STACK
)
9504 case BPF_FUNC_map_peek_elem
:
9505 case BPF_FUNC_map_push_elem
:
9506 if (map
->map_type
!= BPF_MAP_TYPE_QUEUE
&&
9507 map
->map_type
!= BPF_MAP_TYPE_STACK
&&
9508 map
->map_type
!= BPF_MAP_TYPE_BLOOM_FILTER
)
9511 case BPF_FUNC_map_lookup_percpu_elem
:
9512 if (map
->map_type
!= BPF_MAP_TYPE_PERCPU_ARRAY
&&
9513 map
->map_type
!= BPF_MAP_TYPE_PERCPU_HASH
&&
9514 map
->map_type
!= BPF_MAP_TYPE_LRU_PERCPU_HASH
)
9517 case BPF_FUNC_sk_storage_get
:
9518 case BPF_FUNC_sk_storage_delete
:
9519 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
)
9522 case BPF_FUNC_inode_storage_get
:
9523 case BPF_FUNC_inode_storage_delete
:
9524 if (map
->map_type
!= BPF_MAP_TYPE_INODE_STORAGE
)
9527 case BPF_FUNC_task_storage_get
:
9528 case BPF_FUNC_task_storage_delete
:
9529 if (map
->map_type
!= BPF_MAP_TYPE_TASK_STORAGE
)
9532 case BPF_FUNC_cgrp_storage_get
:
9533 case BPF_FUNC_cgrp_storage_delete
:
9534 if (map
->map_type
!= BPF_MAP_TYPE_CGRP_STORAGE
)
9543 verbose(env
, "cannot pass map_type %d into func %s#%d\n",
9544 map
->map_type
, func_id_name(func_id
), func_id
);
9548 static bool check_raw_mode_ok(const struct bpf_func_proto
*fn
)
9552 if (arg_type_is_raw_mem(fn
->arg1_type
))
9554 if (arg_type_is_raw_mem(fn
->arg2_type
))
9556 if (arg_type_is_raw_mem(fn
->arg3_type
))
9558 if (arg_type_is_raw_mem(fn
->arg4_type
))
9560 if (arg_type_is_raw_mem(fn
->arg5_type
))
9563 /* We only support one arg being in raw mode at the moment,
9564 * which is sufficient for the helper functions we have
9570 static bool check_args_pair_invalid(const struct bpf_func_proto
*fn
, int arg
)
9572 bool is_fixed
= fn
->arg_type
[arg
] & MEM_FIXED_SIZE
;
9573 bool has_size
= fn
->arg_size
[arg
] != 0;
9574 bool is_next_size
= false;
9576 if (arg
+ 1 < ARRAY_SIZE(fn
->arg_type
))
9577 is_next_size
= arg_type_is_mem_size(fn
->arg_type
[arg
+ 1]);
9579 if (base_type(fn
->arg_type
[arg
]) != ARG_PTR_TO_MEM
)
9580 return is_next_size
;
9582 return has_size
== is_next_size
|| is_next_size
== is_fixed
;
9585 static bool check_arg_pair_ok(const struct bpf_func_proto
*fn
)
9587 /* bpf_xxx(..., buf, len) call will access 'len'
9588 * bytes from memory 'buf'. Both arg types need
9589 * to be paired, so make sure there's no buggy
9590 * helper function specification.
9592 if (arg_type_is_mem_size(fn
->arg1_type
) ||
9593 check_args_pair_invalid(fn
, 0) ||
9594 check_args_pair_invalid(fn
, 1) ||
9595 check_args_pair_invalid(fn
, 2) ||
9596 check_args_pair_invalid(fn
, 3) ||
9597 check_args_pair_invalid(fn
, 4))
9603 static bool check_btf_id_ok(const struct bpf_func_proto
*fn
)
9607 for (i
= 0; i
< ARRAY_SIZE(fn
->arg_type
); i
++) {
9608 if (base_type(fn
->arg_type
[i
]) == ARG_PTR_TO_BTF_ID
)
9609 return !!fn
->arg_btf_id
[i
];
9610 if (base_type(fn
->arg_type
[i
]) == ARG_PTR_TO_SPIN_LOCK
)
9611 return fn
->arg_btf_id
[i
] == BPF_PTR_POISON
;
9612 if (base_type(fn
->arg_type
[i
]) != ARG_PTR_TO_BTF_ID
&& fn
->arg_btf_id
[i
] &&
9613 /* arg_btf_id and arg_size are in a union. */
9614 (base_type(fn
->arg_type
[i
]) != ARG_PTR_TO_MEM
||
9615 !(fn
->arg_type
[i
] & MEM_FIXED_SIZE
)))
9622 static int check_func_proto(const struct bpf_func_proto
*fn
, int func_id
)
9624 return check_raw_mode_ok(fn
) &&
9625 check_arg_pair_ok(fn
) &&
9626 check_btf_id_ok(fn
) ? 0 : -EINVAL
;
9629 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
9630 * are now invalid, so turn them into unknown SCALAR_VALUE.
9632 * This also applies to dynptr slices belonging to skb and xdp dynptrs,
9633 * since these slices point to packet data.
9635 static void clear_all_pkt_pointers(struct bpf_verifier_env
*env
)
9637 struct bpf_func_state
*state
;
9638 struct bpf_reg_state
*reg
;
9640 bpf_for_each_reg_in_vstate(env
->cur_state
, state
, reg
, ({
9641 if (reg_is_pkt_pointer_any(reg
) || reg_is_dynptr_slice_pkt(reg
))
9642 mark_reg_invalid(env
, reg
);
9648 BEYOND_PKT_END
= -2,
9651 static void mark_pkt_end(struct bpf_verifier_state
*vstate
, int regn
, bool range_open
)
9653 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
9654 struct bpf_reg_state
*reg
= &state
->regs
[regn
];
9656 if (reg
->type
!= PTR_TO_PACKET
)
9657 /* PTR_TO_PACKET_META is not supported yet */
9660 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
9661 * How far beyond pkt_end it goes is unknown.
9662 * if (!range_open) it's the case of pkt >= pkt_end
9663 * if (range_open) it's the case of pkt > pkt_end
9664 * hence this pointer is at least 1 byte bigger than pkt_end
9667 reg
->range
= BEYOND_PKT_END
;
9669 reg
->range
= AT_PKT_END
;
9672 /* The pointer with the specified id has released its reference to kernel
9673 * resources. Identify all copies of the same pointer and clear the reference.
9675 static int release_reference(struct bpf_verifier_env
*env
,
9678 struct bpf_func_state
*state
;
9679 struct bpf_reg_state
*reg
;
9682 err
= release_reference_state(cur_func(env
), ref_obj_id
);
9686 bpf_for_each_reg_in_vstate(env
->cur_state
, state
, reg
, ({
9687 if (reg
->ref_obj_id
== ref_obj_id
)
9688 mark_reg_invalid(env
, reg
);
9694 static void invalidate_non_owning_refs(struct bpf_verifier_env
*env
)
9696 struct bpf_func_state
*unused
;
9697 struct bpf_reg_state
*reg
;
9699 bpf_for_each_reg_in_vstate(env
->cur_state
, unused
, reg
, ({
9700 if (type_is_non_owning_ref(reg
->type
))
9701 mark_reg_invalid(env
, reg
);
9705 static void clear_caller_saved_regs(struct bpf_verifier_env
*env
,
9706 struct bpf_reg_state
*regs
)
9710 /* after the call registers r0 - r5 were scratched */
9711 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
9712 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
9713 __check_reg_arg(env
, regs
, caller_saved
[i
], DST_OP_NO_MARK
);
9717 typedef int (*set_callee_state_fn
)(struct bpf_verifier_env
*env
,
9718 struct bpf_func_state
*caller
,
9719 struct bpf_func_state
*callee
,
9722 static int set_callee_state(struct bpf_verifier_env
*env
,
9723 struct bpf_func_state
*caller
,
9724 struct bpf_func_state
*callee
, int insn_idx
);
9726 static int setup_func_entry(struct bpf_verifier_env
*env
, int subprog
, int callsite
,
9727 set_callee_state_fn set_callee_state_cb
,
9728 struct bpf_verifier_state
*state
)
9730 struct bpf_func_state
*caller
, *callee
;
9733 if (state
->curframe
+ 1 >= MAX_CALL_FRAMES
) {
9734 verbose(env
, "the call stack of %d frames is too deep\n",
9735 state
->curframe
+ 2);
9739 if (state
->frame
[state
->curframe
+ 1]) {
9740 verbose(env
, "verifier bug. Frame %d already allocated\n",
9741 state
->curframe
+ 1);
9745 caller
= state
->frame
[state
->curframe
];
9746 callee
= kzalloc(sizeof(*callee
), GFP_KERNEL
);
9749 state
->frame
[state
->curframe
+ 1] = callee
;
9751 /* callee cannot access r0, r6 - r9 for reading and has to write
9752 * into its own stack before reading from it.
9753 * callee can read/write into caller's stack
9755 init_func_state(env
, callee
,
9756 /* remember the callsite, it will be used by bpf_exit */
9758 state
->curframe
+ 1 /* frameno within this callchain */,
9759 subprog
/* subprog number within this prog */);
9760 /* Transfer references to the callee */
9761 err
= copy_reference_state(callee
, caller
);
9762 err
= err
?: set_callee_state_cb(env
, caller
, callee
, callsite
);
9766 /* only increment it after check_reg_arg() finished */
9772 free_func_state(callee
);
9773 state
->frame
[state
->curframe
+ 1] = NULL
;
9777 static int btf_check_func_arg_match(struct bpf_verifier_env
*env
, int subprog
,
9778 const struct btf
*btf
,
9779 struct bpf_reg_state
*regs
)
9781 struct bpf_subprog_info
*sub
= subprog_info(env
, subprog
);
9782 struct bpf_verifier_log
*log
= &env
->log
;
9786 ret
= btf_prepare_func_args(env
, subprog
);
9790 /* check that BTF function arguments match actual types that the
9793 for (i
= 0; i
< sub
->arg_cnt
; i
++) {
9795 struct bpf_reg_state
*reg
= ®s
[regno
];
9796 struct bpf_subprog_arg_info
*arg
= &sub
->args
[i
];
9798 if (arg
->arg_type
== ARG_ANYTHING
) {
9799 if (reg
->type
!= SCALAR_VALUE
) {
9800 bpf_log(log
, "R%d is not a scalar\n", regno
);
9803 } else if (arg
->arg_type
== ARG_PTR_TO_CTX
) {
9804 ret
= check_func_arg_reg_off(env
, reg
, regno
, ARG_DONTCARE
);
9807 /* If function expects ctx type in BTF check that caller
9808 * is passing PTR_TO_CTX.
9810 if (reg
->type
!= PTR_TO_CTX
) {
9811 bpf_log(log
, "arg#%d expects pointer to ctx\n", i
);
9814 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_MEM
) {
9815 ret
= check_func_arg_reg_off(env
, reg
, regno
, ARG_DONTCARE
);
9818 if (check_mem_reg(env
, reg
, regno
, arg
->mem_size
))
9820 if (!(arg
->arg_type
& PTR_MAYBE_NULL
) && (reg
->type
& PTR_MAYBE_NULL
)) {
9821 bpf_log(log
, "arg#%d is expected to be non-NULL\n", i
);
9824 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_ARENA
) {
9826 * Can pass any value and the kernel won't crash, but
9827 * only PTR_TO_ARENA or SCALAR make sense. Everything
9828 * else is a bug in the bpf program. Point it out to
9829 * the user at the verification time instead of
9830 * run-time debug nightmare.
9832 if (reg
->type
!= PTR_TO_ARENA
&& reg
->type
!= SCALAR_VALUE
) {
9833 bpf_log(log
, "R%d is not a pointer to arena or scalar.\n", regno
);
9836 } else if (arg
->arg_type
== (ARG_PTR_TO_DYNPTR
| MEM_RDONLY
)) {
9837 ret
= check_func_arg_reg_off(env
, reg
, regno
, ARG_PTR_TO_DYNPTR
);
9841 ret
= process_dynptr_func(env
, regno
, -1, arg
->arg_type
, 0);
9844 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_BTF_ID
) {
9845 struct bpf_call_arg_meta meta
;
9849 if (register_is_null(reg
) && type_may_be_null(arg
->arg_type
))
9852 memset(&meta
, 0, sizeof(meta
)); /* leave func_id as zero */
9853 mask
= mask_raw_tp_reg(env
, reg
);
9854 err
= check_reg_type(env
, regno
, arg
->arg_type
, &arg
->btf_id
, &meta
);
9855 err
= err
?: check_func_arg_reg_off(env
, reg
, regno
, arg
->arg_type
);
9856 unmask_raw_tp_reg(reg
, mask
);
9860 bpf_log(log
, "verifier bug: unrecognized arg#%d type %d\n",
9869 /* Compare BTF of a function call with given bpf_reg_state.
9871 * EFAULT - there is a verifier bug. Abort verification.
9872 * EINVAL - there is a type mismatch or BTF is not available.
9873 * 0 - BTF matches with what bpf_reg_state expects.
9874 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
9876 static int btf_check_subprog_call(struct bpf_verifier_env
*env
, int subprog
,
9877 struct bpf_reg_state
*regs
)
9879 struct bpf_prog
*prog
= env
->prog
;
9880 struct btf
*btf
= prog
->aux
->btf
;
9884 if (!prog
->aux
->func_info
)
9887 btf_id
= prog
->aux
->func_info
[subprog
].type_id
;
9891 if (prog
->aux
->func_info_aux
[subprog
].unreliable
)
9894 err
= btf_check_func_arg_match(env
, subprog
, btf
, regs
);
9895 /* Compiler optimizations can remove arguments from static functions
9896 * or mismatched type can be passed into a global function.
9897 * In such cases mark the function as unreliable from BTF point of view.
9900 prog
->aux
->func_info_aux
[subprog
].unreliable
= true;
9904 static int push_callback_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
9905 int insn_idx
, int subprog
,
9906 set_callee_state_fn set_callee_state_cb
)
9908 struct bpf_verifier_state
*state
= env
->cur_state
, *callback_state
;
9909 struct bpf_func_state
*caller
, *callee
;
9912 caller
= state
->frame
[state
->curframe
];
9913 err
= btf_check_subprog_call(env
, subprog
, caller
->regs
);
9917 /* set_callee_state is used for direct subprog calls, but we are
9918 * interested in validating only BPF helpers that can call subprogs as
9921 env
->subprog_info
[subprog
].is_cb
= true;
9922 if (bpf_pseudo_kfunc_call(insn
) &&
9923 !is_callback_calling_kfunc(insn
->imm
)) {
9924 verbose(env
, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
9925 func_id_name(insn
->imm
), insn
->imm
);
9927 } else if (!bpf_pseudo_kfunc_call(insn
) &&
9928 !is_callback_calling_function(insn
->imm
)) { /* helper */
9929 verbose(env
, "verifier bug: helper %s#%d not marked as callback-calling\n",
9930 func_id_name(insn
->imm
), insn
->imm
);
9934 if (is_async_callback_calling_insn(insn
)) {
9935 struct bpf_verifier_state
*async_cb
;
9937 /* there is no real recursion here. timer and workqueue callbacks are async */
9938 env
->subprog_info
[subprog
].is_async_cb
= true;
9939 async_cb
= push_async_cb(env
, env
->subprog_info
[subprog
].start
,
9941 is_bpf_wq_set_callback_impl_kfunc(insn
->imm
));
9944 callee
= async_cb
->frame
[0];
9945 callee
->async_entry_cnt
= caller
->async_entry_cnt
+ 1;
9947 /* Convert bpf_timer_set_callback() args into timer callback args */
9948 err
= set_callee_state_cb(env
, caller
, callee
, insn_idx
);
9955 /* for callback functions enqueue entry to callback and
9956 * proceed with next instruction within current frame.
9958 callback_state
= push_stack(env
, env
->subprog_info
[subprog
].start
, insn_idx
, false);
9959 if (!callback_state
)
9962 err
= setup_func_entry(env
, subprog
, insn_idx
, set_callee_state_cb
,
9967 callback_state
->callback_unroll_depth
++;
9968 callback_state
->frame
[callback_state
->curframe
- 1]->callback_depth
++;
9969 caller
->callback_depth
= 0;
9973 static int check_func_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
9976 struct bpf_verifier_state
*state
= env
->cur_state
;
9977 struct bpf_func_state
*caller
;
9978 int err
, subprog
, target_insn
;
9980 target_insn
= *insn_idx
+ insn
->imm
+ 1;
9981 subprog
= find_subprog(env
, target_insn
);
9983 verbose(env
, "verifier bug. No program starts at insn %d\n", target_insn
);
9987 caller
= state
->frame
[state
->curframe
];
9988 err
= btf_check_subprog_call(env
, subprog
, caller
->regs
);
9991 if (subprog_is_global(env
, subprog
)) {
9992 const char *sub_name
= subprog_name(env
, subprog
);
9994 /* Only global subprogs cannot be called with a lock held. */
9995 if (cur_func(env
)->active_locks
) {
9996 verbose(env
, "global function calls are not allowed while holding a lock,\n"
9997 "use static function instead\n");
10001 /* Only global subprogs cannot be called with preemption disabled. */
10002 if (env
->cur_state
->active_preempt_lock
) {
10003 verbose(env
, "global function calls are not allowed with preemption disabled,\n"
10004 "use static function instead\n");
10009 verbose(env
, "Caller passes invalid args into func#%d ('%s')\n",
10010 subprog
, sub_name
);
10014 verbose(env
, "Func#%d ('%s') is global and assumed valid.\n",
10015 subprog
, sub_name
);
10016 /* mark global subprog for verifying after main prog */
10017 subprog_aux(env
, subprog
)->called
= true;
10018 clear_caller_saved_regs(env
, caller
->regs
);
10020 /* All global functions return a 64-bit SCALAR_VALUE */
10021 mark_reg_unknown(env
, caller
->regs
, BPF_REG_0
);
10022 caller
->regs
[BPF_REG_0
].subreg_def
= DEF_NOT_SUBREG
;
10024 /* continue with next insn after call */
10028 /* for regular function entry setup new frame and continue
10031 err
= setup_func_entry(env
, subprog
, *insn_idx
, set_callee_state
, state
);
10035 clear_caller_saved_regs(env
, caller
->regs
);
10037 /* and go analyze first insn of the callee */
10038 *insn_idx
= env
->subprog_info
[subprog
].start
- 1;
10040 if (env
->log
.level
& BPF_LOG_LEVEL
) {
10041 verbose(env
, "caller:\n");
10042 print_verifier_state(env
, caller
, true);
10043 verbose(env
, "callee:\n");
10044 print_verifier_state(env
, state
->frame
[state
->curframe
], true);
10050 int map_set_for_each_callback_args(struct bpf_verifier_env
*env
,
10051 struct bpf_func_state
*caller
,
10052 struct bpf_func_state
*callee
)
10054 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
10055 * void *callback_ctx, u64 flags);
10056 * callback_fn(struct bpf_map *map, void *key, void *value,
10057 * void *callback_ctx);
10059 callee
->regs
[BPF_REG_1
] = caller
->regs
[BPF_REG_1
];
10061 callee
->regs
[BPF_REG_2
].type
= PTR_TO_MAP_KEY
;
10062 __mark_reg_known_zero(&callee
->regs
[BPF_REG_2
]);
10063 callee
->regs
[BPF_REG_2
].map_ptr
= caller
->regs
[BPF_REG_1
].map_ptr
;
10065 callee
->regs
[BPF_REG_3
].type
= PTR_TO_MAP_VALUE
;
10066 __mark_reg_known_zero(&callee
->regs
[BPF_REG_3
]);
10067 callee
->regs
[BPF_REG_3
].map_ptr
= caller
->regs
[BPF_REG_1
].map_ptr
;
10069 /* pointer to stack or null */
10070 callee
->regs
[BPF_REG_4
] = caller
->regs
[BPF_REG_3
];
10073 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10077 static int set_callee_state(struct bpf_verifier_env
*env
,
10078 struct bpf_func_state
*caller
,
10079 struct bpf_func_state
*callee
, int insn_idx
)
10083 /* copy r1 - r5 args that callee can access. The copy includes parent
10084 * pointers, which connects us up to the liveness chain
10086 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++)
10087 callee
->regs
[i
] = caller
->regs
[i
];
10091 static int set_map_elem_callback_state(struct bpf_verifier_env
*env
,
10092 struct bpf_func_state
*caller
,
10093 struct bpf_func_state
*callee
,
10096 struct bpf_insn_aux_data
*insn_aux
= &env
->insn_aux_data
[insn_idx
];
10097 struct bpf_map
*map
;
10100 /* valid map_ptr and poison value does not matter */
10101 map
= insn_aux
->map_ptr_state
.map_ptr
;
10102 if (!map
->ops
->map_set_for_each_callback_args
||
10103 !map
->ops
->map_for_each_callback
) {
10104 verbose(env
, "callback function not allowed for map\n");
10108 err
= map
->ops
->map_set_for_each_callback_args(env
, caller
, callee
);
10112 callee
->in_callback_fn
= true;
10113 callee
->callback_ret_range
= retval_range(0, 1);
10117 static int set_loop_callback_state(struct bpf_verifier_env
*env
,
10118 struct bpf_func_state
*caller
,
10119 struct bpf_func_state
*callee
,
10122 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
10124 * callback_fn(u64 index, void *callback_ctx);
10126 callee
->regs
[BPF_REG_1
].type
= SCALAR_VALUE
;
10127 callee
->regs
[BPF_REG_2
] = caller
->regs
[BPF_REG_3
];
10130 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_3
]);
10131 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_4
]);
10132 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10134 callee
->in_callback_fn
= true;
10135 callee
->callback_ret_range
= retval_range(0, 1);
10139 static int set_timer_callback_state(struct bpf_verifier_env
*env
,
10140 struct bpf_func_state
*caller
,
10141 struct bpf_func_state
*callee
,
10144 struct bpf_map
*map_ptr
= caller
->regs
[BPF_REG_1
].map_ptr
;
10146 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
10147 * callback_fn(struct bpf_map *map, void *key, void *value);
10149 callee
->regs
[BPF_REG_1
].type
= CONST_PTR_TO_MAP
;
10150 __mark_reg_known_zero(&callee
->regs
[BPF_REG_1
]);
10151 callee
->regs
[BPF_REG_1
].map_ptr
= map_ptr
;
10153 callee
->regs
[BPF_REG_2
].type
= PTR_TO_MAP_KEY
;
10154 __mark_reg_known_zero(&callee
->regs
[BPF_REG_2
]);
10155 callee
->regs
[BPF_REG_2
].map_ptr
= map_ptr
;
10157 callee
->regs
[BPF_REG_3
].type
= PTR_TO_MAP_VALUE
;
10158 __mark_reg_known_zero(&callee
->regs
[BPF_REG_3
]);
10159 callee
->regs
[BPF_REG_3
].map_ptr
= map_ptr
;
10162 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_4
]);
10163 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10164 callee
->in_async_callback_fn
= true;
10165 callee
->callback_ret_range
= retval_range(0, 1);
10169 static int set_find_vma_callback_state(struct bpf_verifier_env
*env
,
10170 struct bpf_func_state
*caller
,
10171 struct bpf_func_state
*callee
,
10174 /* bpf_find_vma(struct task_struct *task, u64 addr,
10175 * void *callback_fn, void *callback_ctx, u64 flags)
10176 * (callback_fn)(struct task_struct *task,
10177 * struct vm_area_struct *vma, void *callback_ctx);
10179 callee
->regs
[BPF_REG_1
] = caller
->regs
[BPF_REG_1
];
10181 callee
->regs
[BPF_REG_2
].type
= PTR_TO_BTF_ID
;
10182 __mark_reg_known_zero(&callee
->regs
[BPF_REG_2
]);
10183 callee
->regs
[BPF_REG_2
].btf
= btf_vmlinux
;
10184 callee
->regs
[BPF_REG_2
].btf_id
= btf_tracing_ids
[BTF_TRACING_TYPE_VMA
];
10186 /* pointer to stack or null */
10187 callee
->regs
[BPF_REG_3
] = caller
->regs
[BPF_REG_4
];
10190 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_4
]);
10191 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10192 callee
->in_callback_fn
= true;
10193 callee
->callback_ret_range
= retval_range(0, 1);
10197 static int set_user_ringbuf_callback_state(struct bpf_verifier_env
*env
,
10198 struct bpf_func_state
*caller
,
10199 struct bpf_func_state
*callee
,
10202 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
10203 * callback_ctx, u64 flags);
10204 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
10206 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_0
]);
10207 mark_dynptr_cb_reg(env
, &callee
->regs
[BPF_REG_1
], BPF_DYNPTR_TYPE_LOCAL
);
10208 callee
->regs
[BPF_REG_2
] = caller
->regs
[BPF_REG_3
];
10211 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_3
]);
10212 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_4
]);
10213 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10215 callee
->in_callback_fn
= true;
10216 callee
->callback_ret_range
= retval_range(0, 1);
10220 static int set_rbtree_add_callback_state(struct bpf_verifier_env
*env
,
10221 struct bpf_func_state
*caller
,
10222 struct bpf_func_state
*callee
,
10225 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
10226 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
10228 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset
10229 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
10230 * by this point, so look at 'root'
10232 struct btf_field
*field
;
10234 field
= reg_find_field_offset(&caller
->regs
[BPF_REG_1
], caller
->regs
[BPF_REG_1
].off
,
10236 if (!field
|| !field
->graph_root
.value_btf_id
)
10239 mark_reg_graph_node(callee
->regs
, BPF_REG_1
, &field
->graph_root
);
10240 ref_set_non_owning(env
, &callee
->regs
[BPF_REG_1
]);
10241 mark_reg_graph_node(callee
->regs
, BPF_REG_2
, &field
->graph_root
);
10242 ref_set_non_owning(env
, &callee
->regs
[BPF_REG_2
]);
10244 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_3
]);
10245 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_4
]);
10246 __mark_reg_not_init(env
, &callee
->regs
[BPF_REG_5
]);
10247 callee
->in_callback_fn
= true;
10248 callee
->callback_ret_range
= retval_range(0, 1);
10252 static bool is_rbtree_lock_required_kfunc(u32 btf_id
);
10254 /* Are we currently verifying the callback for a rbtree helper that must
10255 * be called with lock held? If so, no need to complain about unreleased
10258 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env
*env
)
10260 struct bpf_verifier_state
*state
= env
->cur_state
;
10261 struct bpf_insn
*insn
= env
->prog
->insnsi
;
10262 struct bpf_func_state
*callee
;
10265 if (!state
->curframe
)
10268 callee
= state
->frame
[state
->curframe
];
10270 if (!callee
->in_callback_fn
)
10273 kfunc_btf_id
= insn
[callee
->callsite
].imm
;
10274 return is_rbtree_lock_required_kfunc(kfunc_btf_id
);
10277 static bool retval_range_within(struct bpf_retval_range range
, const struct bpf_reg_state
*reg
,
10281 return range
.minval
<= reg
->s32_min_value
&& reg
->s32_max_value
<= range
.maxval
;
10283 return range
.minval
<= reg
->smin_value
&& reg
->smax_value
<= range
.maxval
;
10286 static int prepare_func_exit(struct bpf_verifier_env
*env
, int *insn_idx
)
10288 struct bpf_verifier_state
*state
= env
->cur_state
, *prev_st
;
10289 struct bpf_func_state
*caller
, *callee
;
10290 struct bpf_reg_state
*r0
;
10291 bool in_callback_fn
;
10294 callee
= state
->frame
[state
->curframe
];
10295 r0
= &callee
->regs
[BPF_REG_0
];
10296 if (r0
->type
== PTR_TO_STACK
) {
10297 /* technically it's ok to return caller's stack pointer
10298 * (or caller's caller's pointer) back to the caller,
10299 * since these pointers are valid. Only current stack
10300 * pointer will be invalid as soon as function exits,
10301 * but let's be conservative
10303 verbose(env
, "cannot return stack pointer to the caller\n");
10307 caller
= state
->frame
[state
->curframe
- 1];
10308 if (callee
->in_callback_fn
) {
10309 if (r0
->type
!= SCALAR_VALUE
) {
10310 verbose(env
, "R0 not a scalar value\n");
10314 /* we are going to rely on register's precise value */
10315 err
= mark_reg_read(env
, r0
, r0
->parent
, REG_LIVE_READ64
);
10316 err
= err
?: mark_chain_precision(env
, BPF_REG_0
);
10320 /* enforce R0 return value range, and bpf_callback_t returns 64bit */
10321 if (!retval_range_within(callee
->callback_ret_range
, r0
, false)) {
10322 verbose_invalid_scalar(env
, r0
, callee
->callback_ret_range
,
10323 "At callback return", "R0");
10326 if (!calls_callback(env
, callee
->callsite
)) {
10327 verbose(env
, "BUG: in callback at %d, callsite %d !calls_callback\n",
10328 *insn_idx
, callee
->callsite
);
10332 /* return to the caller whatever r0 had in the callee */
10333 caller
->regs
[BPF_REG_0
] = *r0
;
10336 /* Transfer references to the caller */
10337 err
= copy_reference_state(caller
, callee
);
10341 /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
10342 * there function call logic would reschedule callback visit. If iteration
10343 * converges is_state_visited() would prune that visit eventually.
10345 in_callback_fn
= callee
->in_callback_fn
;
10346 if (in_callback_fn
)
10347 *insn_idx
= callee
->callsite
;
10349 *insn_idx
= callee
->callsite
+ 1;
10351 if (env
->log
.level
& BPF_LOG_LEVEL
) {
10352 verbose(env
, "returning from callee:\n");
10353 print_verifier_state(env
, callee
, true);
10354 verbose(env
, "to caller at %d:\n", *insn_idx
);
10355 print_verifier_state(env
, caller
, true);
10357 /* clear everything in the callee. In case of exceptional exits using
10358 * bpf_throw, this will be done by copy_verifier_state for extra frames. */
10359 free_func_state(callee
);
10360 state
->frame
[state
->curframe
--] = NULL
;
10362 /* for callbacks widen imprecise scalars to make programs like below verify:
10364 * struct ctx { int i; }
10365 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... }
10367 * struct ctx = { .i = 0; }
10368 * bpf_loop(100, cb, &ctx, 0);
10370 * This is similar to what is done in process_iter_next_call() for open
10373 prev_st
= in_callback_fn
? find_prev_entry(env
, state
, *insn_idx
) : NULL
;
10375 err
= widen_imprecise_scalars(env
, prev_st
, state
);
10382 static int do_refine_retval_range(struct bpf_verifier_env
*env
,
10383 struct bpf_reg_state
*regs
, int ret_type
,
10385 struct bpf_call_arg_meta
*meta
)
10387 struct bpf_reg_state
*ret_reg
= ®s
[BPF_REG_0
];
10389 if (ret_type
!= RET_INTEGER
)
10393 case BPF_FUNC_get_stack
:
10394 case BPF_FUNC_get_task_stack
:
10395 case BPF_FUNC_probe_read_str
:
10396 case BPF_FUNC_probe_read_kernel_str
:
10397 case BPF_FUNC_probe_read_user_str
:
10398 ret_reg
->smax_value
= meta
->msize_max_value
;
10399 ret_reg
->s32_max_value
= meta
->msize_max_value
;
10400 ret_reg
->smin_value
= -MAX_ERRNO
;
10401 ret_reg
->s32_min_value
= -MAX_ERRNO
;
10402 reg_bounds_sync(ret_reg
);
10404 case BPF_FUNC_get_smp_processor_id
:
10405 ret_reg
->umax_value
= nr_cpu_ids
- 1;
10406 ret_reg
->u32_max_value
= nr_cpu_ids
- 1;
10407 ret_reg
->smax_value
= nr_cpu_ids
- 1;
10408 ret_reg
->s32_max_value
= nr_cpu_ids
- 1;
10409 ret_reg
->umin_value
= 0;
10410 ret_reg
->u32_min_value
= 0;
10411 ret_reg
->smin_value
= 0;
10412 ret_reg
->s32_min_value
= 0;
10413 reg_bounds_sync(ret_reg
);
10417 return reg_bounds_sanity_check(env
, ret_reg
, "retval");
10421 record_func_map(struct bpf_verifier_env
*env
, struct bpf_call_arg_meta
*meta
,
10422 int func_id
, int insn_idx
)
10424 struct bpf_insn_aux_data
*aux
= &env
->insn_aux_data
[insn_idx
];
10425 struct bpf_map
*map
= meta
->map_ptr
;
10427 if (func_id
!= BPF_FUNC_tail_call
&&
10428 func_id
!= BPF_FUNC_map_lookup_elem
&&
10429 func_id
!= BPF_FUNC_map_update_elem
&&
10430 func_id
!= BPF_FUNC_map_delete_elem
&&
10431 func_id
!= BPF_FUNC_map_push_elem
&&
10432 func_id
!= BPF_FUNC_map_pop_elem
&&
10433 func_id
!= BPF_FUNC_map_peek_elem
&&
10434 func_id
!= BPF_FUNC_for_each_map_elem
&&
10435 func_id
!= BPF_FUNC_redirect_map
&&
10436 func_id
!= BPF_FUNC_map_lookup_percpu_elem
)
10440 verbose(env
, "kernel subsystem misconfigured verifier\n");
10444 /* In case of read-only, some additional restrictions
10445 * need to be applied in order to prevent altering the
10446 * state of the map from program side.
10448 if ((map
->map_flags
& BPF_F_RDONLY_PROG
) &&
10449 (func_id
== BPF_FUNC_map_delete_elem
||
10450 func_id
== BPF_FUNC_map_update_elem
||
10451 func_id
== BPF_FUNC_map_push_elem
||
10452 func_id
== BPF_FUNC_map_pop_elem
)) {
10453 verbose(env
, "write into map forbidden\n");
10457 if (!aux
->map_ptr_state
.map_ptr
)
10458 bpf_map_ptr_store(aux
, meta
->map_ptr
,
10459 !meta
->map_ptr
->bypass_spec_v1
, false);
10460 else if (aux
->map_ptr_state
.map_ptr
!= meta
->map_ptr
)
10461 bpf_map_ptr_store(aux
, meta
->map_ptr
,
10462 !meta
->map_ptr
->bypass_spec_v1
, true);
10467 record_func_key(struct bpf_verifier_env
*env
, struct bpf_call_arg_meta
*meta
,
10468 int func_id
, int insn_idx
)
10470 struct bpf_insn_aux_data
*aux
= &env
->insn_aux_data
[insn_idx
];
10471 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
;
10472 struct bpf_map
*map
= meta
->map_ptr
;
10476 if (func_id
!= BPF_FUNC_tail_call
)
10478 if (!map
|| map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
) {
10479 verbose(env
, "kernel subsystem misconfigured verifier\n");
10483 reg
= ®s
[BPF_REG_3
];
10484 val
= reg
->var_off
.value
;
10485 max
= map
->max_entries
;
10487 if (!(is_reg_const(reg
, false) && val
< max
)) {
10488 bpf_map_key_store(aux
, BPF_MAP_KEY_POISON
);
10492 err
= mark_chain_precision(env
, BPF_REG_3
);
10495 if (bpf_map_key_unseen(aux
))
10496 bpf_map_key_store(aux
, val
);
10497 else if (!bpf_map_key_poisoned(aux
) &&
10498 bpf_map_key_immediate(aux
) != val
)
10499 bpf_map_key_store(aux
, BPF_MAP_KEY_POISON
);
10503 static int check_reference_leak(struct bpf_verifier_env
*env
, bool exception_exit
)
10505 struct bpf_func_state
*state
= cur_func(env
);
10506 bool refs_lingering
= false;
10509 if (!exception_exit
&& state
->frameno
)
10512 for (i
= 0; i
< state
->acquired_refs
; i
++) {
10513 if (state
->refs
[i
].type
!= REF_TYPE_PTR
)
10515 verbose(env
, "Unreleased reference id=%d alloc_insn=%d\n",
10516 state
->refs
[i
].id
, state
->refs
[i
].insn_idx
);
10517 refs_lingering
= true;
10519 return refs_lingering
? -EINVAL
: 0;
10522 static int check_resource_leak(struct bpf_verifier_env
*env
, bool exception_exit
, bool check_lock
, const char *prefix
)
10526 if (check_lock
&& cur_func(env
)->active_locks
) {
10527 verbose(env
, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix
);
10531 err
= check_reference_leak(env
, exception_exit
);
10533 verbose(env
, "%s would lead to reference leak\n", prefix
);
10537 if (check_lock
&& env
->cur_state
->active_rcu_lock
) {
10538 verbose(env
, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix
);
10542 if (check_lock
&& env
->cur_state
->active_preempt_lock
) {
10543 verbose(env
, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix
);
10550 static int check_bpf_snprintf_call(struct bpf_verifier_env
*env
,
10551 struct bpf_reg_state
*regs
)
10553 struct bpf_reg_state
*fmt_reg
= ®s
[BPF_REG_3
];
10554 struct bpf_reg_state
*data_len_reg
= ®s
[BPF_REG_5
];
10555 struct bpf_map
*fmt_map
= fmt_reg
->map_ptr
;
10556 struct bpf_bprintf_data data
= {};
10557 int err
, fmt_map_off
, num_args
;
10561 /* data must be an array of u64 */
10562 if (data_len_reg
->var_off
.value
% 8)
10564 num_args
= data_len_reg
->var_off
.value
/ 8;
10566 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
10567 * and map_direct_value_addr is set.
10569 fmt_map_off
= fmt_reg
->off
+ fmt_reg
->var_off
.value
;
10570 err
= fmt_map
->ops
->map_direct_value_addr(fmt_map
, &fmt_addr
,
10573 verbose(env
, "verifier bug\n");
10576 fmt
= (char *)(long)fmt_addr
+ fmt_map_off
;
10578 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
10579 * can focus on validating the format specifiers.
10581 err
= bpf_bprintf_prepare(fmt
, UINT_MAX
, NULL
, num_args
, &data
);
10583 verbose(env
, "Invalid format string\n");
10588 static int check_get_func_ip(struct bpf_verifier_env
*env
)
10590 enum bpf_prog_type type
= resolve_prog_type(env
->prog
);
10591 int func_id
= BPF_FUNC_get_func_ip
;
10593 if (type
== BPF_PROG_TYPE_TRACING
) {
10594 if (!bpf_prog_has_trampoline(env
->prog
)) {
10595 verbose(env
, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
10596 func_id_name(func_id
), func_id
);
10600 } else if (type
== BPF_PROG_TYPE_KPROBE
) {
10604 verbose(env
, "func %s#%d not supported for program type %d\n",
10605 func_id_name(func_id
), func_id
, type
);
10609 static struct bpf_insn_aux_data
*cur_aux(struct bpf_verifier_env
*env
)
10611 return &env
->insn_aux_data
[env
->insn_idx
];
10614 static bool loop_flag_is_zero(struct bpf_verifier_env
*env
)
10616 struct bpf_reg_state
*regs
= cur_regs(env
);
10617 struct bpf_reg_state
*reg
= ®s
[BPF_REG_4
];
10618 bool reg_is_null
= register_is_null(reg
);
10621 mark_chain_precision(env
, BPF_REG_4
);
10623 return reg_is_null
;
10626 static void update_loop_inline_state(struct bpf_verifier_env
*env
, u32 subprogno
)
10628 struct bpf_loop_inline_state
*state
= &cur_aux(env
)->loop_inline_state
;
10630 if (!state
->initialized
) {
10631 state
->initialized
= 1;
10632 state
->fit_for_inline
= loop_flag_is_zero(env
);
10633 state
->callback_subprogno
= subprogno
;
10637 if (!state
->fit_for_inline
)
10640 state
->fit_for_inline
= (loop_flag_is_zero(env
) &&
10641 state
->callback_subprogno
== subprogno
);
10644 static int get_helper_proto(struct bpf_verifier_env
*env
, int func_id
,
10645 const struct bpf_func_proto
**ptr
)
10647 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
)
10650 if (!env
->ops
->get_func_proto
)
10653 *ptr
= env
->ops
->get_func_proto(func_id
, env
->prog
);
10654 return *ptr
? 0 : -EINVAL
;
10657 static int check_helper_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
10660 enum bpf_prog_type prog_type
= resolve_prog_type(env
->prog
);
10661 bool returns_cpu_specific_alloc_ptr
= false;
10662 const struct bpf_func_proto
*fn
= NULL
;
10663 enum bpf_return_type ret_type
;
10664 enum bpf_type_flag ret_flag
;
10665 struct bpf_reg_state
*regs
;
10666 struct bpf_call_arg_meta meta
;
10667 int insn_idx
= *insn_idx_p
;
10669 int i
, err
, func_id
;
10671 /* find function prototype */
10672 func_id
= insn
->imm
;
10673 err
= get_helper_proto(env
, insn
->imm
, &fn
);
10674 if (err
== -ERANGE
) {
10675 verbose(env
, "invalid func %s#%d\n", func_id_name(func_id
), func_id
);
10680 verbose(env
, "program of this type cannot use helper %s#%d\n",
10681 func_id_name(func_id
), func_id
);
10685 /* eBPF programs must be GPL compatible to use GPL-ed functions */
10686 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
10687 verbose(env
, "cannot call GPL-restricted function from non-GPL compatible program\n");
10691 if (fn
->allowed
&& !fn
->allowed(env
->prog
)) {
10692 verbose(env
, "helper call is not allowed in probe\n");
10696 if (!in_sleepable(env
) && fn
->might_sleep
) {
10697 verbose(env
, "helper call might sleep in a non-sleepable prog\n");
10701 /* With LD_ABS/IND some JITs save/restore skb from r1. */
10702 changes_data
= bpf_helper_changes_pkt_data(fn
->func
);
10703 if (changes_data
&& fn
->arg1_type
!= ARG_PTR_TO_CTX
) {
10704 verbose(env
, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
10705 func_id_name(func_id
), func_id
);
10709 memset(&meta
, 0, sizeof(meta
));
10710 meta
.pkt_access
= fn
->pkt_access
;
10712 err
= check_func_proto(fn
, func_id
);
10714 verbose(env
, "kernel subsystem misconfigured func %s#%d\n",
10715 func_id_name(func_id
), func_id
);
10719 if (env
->cur_state
->active_rcu_lock
) {
10720 if (fn
->might_sleep
) {
10721 verbose(env
, "sleepable helper %s#%d in rcu_read_lock region\n",
10722 func_id_name(func_id
), func_id
);
10726 if (in_sleepable(env
) && is_storage_get_function(func_id
))
10727 env
->insn_aux_data
[insn_idx
].storage_get_func_atomic
= true;
10730 if (env
->cur_state
->active_preempt_lock
) {
10731 if (fn
->might_sleep
) {
10732 verbose(env
, "sleepable helper %s#%d in non-preemptible region\n",
10733 func_id_name(func_id
), func_id
);
10737 if (in_sleepable(env
) && is_storage_get_function(func_id
))
10738 env
->insn_aux_data
[insn_idx
].storage_get_func_atomic
= true;
10741 meta
.func_id
= func_id
;
10743 for (i
= 0; i
< MAX_BPF_FUNC_REG_ARGS
; i
++) {
10744 err
= check_func_arg(env
, i
, &meta
, fn
, insn_idx
);
10749 err
= record_func_map(env
, &meta
, func_id
, insn_idx
);
10753 err
= record_func_key(env
, &meta
, func_id
, insn_idx
);
10757 /* Mark slots with STACK_MISC in case of raw mode, stack offset
10758 * is inferred from register state.
10760 for (i
= 0; i
< meta
.access_size
; i
++) {
10761 err
= check_mem_access(env
, insn_idx
, meta
.regno
, i
, BPF_B
,
10762 BPF_WRITE
, -1, false, false);
10767 regs
= cur_regs(env
);
10769 if (meta
.release_regno
) {
10771 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
10772 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
10773 * is safe to do directly.
10775 if (arg_type_is_dynptr(fn
->arg_type
[meta
.release_regno
- BPF_REG_1
])) {
10776 if (regs
[meta
.release_regno
].type
== CONST_PTR_TO_DYNPTR
) {
10777 verbose(env
, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
10780 err
= unmark_stack_slots_dynptr(env
, ®s
[meta
.release_regno
]);
10781 } else if (func_id
== BPF_FUNC_kptr_xchg
&& meta
.ref_obj_id
) {
10782 u32 ref_obj_id
= meta
.ref_obj_id
;
10783 bool in_rcu
= in_rcu_cs(env
);
10784 struct bpf_func_state
*state
;
10785 struct bpf_reg_state
*reg
;
10787 err
= release_reference_state(cur_func(env
), ref_obj_id
);
10789 bpf_for_each_reg_in_vstate(env
->cur_state
, state
, reg
, ({
10790 if (reg
->ref_obj_id
== ref_obj_id
) {
10791 if (in_rcu
&& (reg
->type
& MEM_ALLOC
) && (reg
->type
& MEM_PERCPU
)) {
10792 reg
->ref_obj_id
= 0;
10793 reg
->type
&= ~MEM_ALLOC
;
10794 reg
->type
|= MEM_RCU
;
10796 mark_reg_invalid(env
, reg
);
10801 } else if (meta
.ref_obj_id
) {
10802 err
= release_reference(env
, meta
.ref_obj_id
);
10803 } else if (register_is_null(®s
[meta
.release_regno
])) {
10804 /* meta.ref_obj_id can only be 0 if register that is meant to be
10805 * released is NULL, which must be > R0.
10810 verbose(env
, "func %s#%d reference has not been acquired before\n",
10811 func_id_name(func_id
), func_id
);
10817 case BPF_FUNC_tail_call
:
10818 err
= check_resource_leak(env
, false, true, "tail_call");
10822 case BPF_FUNC_get_local_storage
:
10823 /* check that flags argument in get_local_storage(map, flags) is 0,
10824 * this is required because get_local_storage() can't return an error.
10826 if (!register_is_null(®s
[BPF_REG_2
])) {
10827 verbose(env
, "get_local_storage() doesn't support non-zero flags\n");
10831 case BPF_FUNC_for_each_map_elem
:
10832 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
10833 set_map_elem_callback_state
);
10835 case BPF_FUNC_timer_set_callback
:
10836 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
10837 set_timer_callback_state
);
10839 case BPF_FUNC_find_vma
:
10840 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
10841 set_find_vma_callback_state
);
10843 case BPF_FUNC_snprintf
:
10844 err
= check_bpf_snprintf_call(env
, regs
);
10846 case BPF_FUNC_loop
:
10847 update_loop_inline_state(env
, meta
.subprogno
);
10848 /* Verifier relies on R1 value to determine if bpf_loop() iteration
10849 * is finished, thus mark it precise.
10851 err
= mark_chain_precision(env
, BPF_REG_1
);
10854 if (cur_func(env
)->callback_depth
< regs
[BPF_REG_1
].umax_value
) {
10855 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
10856 set_loop_callback_state
);
10858 cur_func(env
)->callback_depth
= 0;
10859 if (env
->log
.level
& BPF_LOG_LEVEL2
)
10860 verbose(env
, "frame%d bpf_loop iteration limit reached\n",
10861 env
->cur_state
->curframe
);
10864 case BPF_FUNC_dynptr_from_mem
:
10865 if (regs
[BPF_REG_1
].type
!= PTR_TO_MAP_VALUE
) {
10866 verbose(env
, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
10867 reg_type_str(env
, regs
[BPF_REG_1
].type
));
10871 case BPF_FUNC_set_retval
:
10872 if (prog_type
== BPF_PROG_TYPE_LSM
&&
10873 env
->prog
->expected_attach_type
== BPF_LSM_CGROUP
) {
10874 if (!env
->prog
->aux
->attach_func_proto
->type
) {
10875 /* Make sure programs that attach to void
10876 * hooks don't try to modify return value.
10878 verbose(env
, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
10883 case BPF_FUNC_dynptr_data
:
10885 struct bpf_reg_state
*reg
;
10886 int id
, ref_obj_id
;
10888 reg
= get_dynptr_arg_reg(env
, fn
, regs
);
10893 if (meta
.dynptr_id
) {
10894 verbose(env
, "verifier internal error: meta.dynptr_id already set\n");
10897 if (meta
.ref_obj_id
) {
10898 verbose(env
, "verifier internal error: meta.ref_obj_id already set\n");
10902 id
= dynptr_id(env
, reg
);
10904 verbose(env
, "verifier internal error: failed to obtain dynptr id\n");
10908 ref_obj_id
= dynptr_ref_obj_id(env
, reg
);
10909 if (ref_obj_id
< 0) {
10910 verbose(env
, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
10914 meta
.dynptr_id
= id
;
10915 meta
.ref_obj_id
= ref_obj_id
;
10919 case BPF_FUNC_dynptr_write
:
10921 enum bpf_dynptr_type dynptr_type
;
10922 struct bpf_reg_state
*reg
;
10924 reg
= get_dynptr_arg_reg(env
, fn
, regs
);
10928 dynptr_type
= dynptr_get_type(env
, reg
);
10929 if (dynptr_type
== BPF_DYNPTR_TYPE_INVALID
)
10932 if (dynptr_type
== BPF_DYNPTR_TYPE_SKB
)
10933 /* this will trigger clear_all_pkt_pointers(), which will
10934 * invalidate all dynptr slices associated with the skb
10936 changes_data
= true;
10940 case BPF_FUNC_per_cpu_ptr
:
10941 case BPF_FUNC_this_cpu_ptr
:
10943 struct bpf_reg_state
*reg
= ®s
[BPF_REG_1
];
10944 const struct btf_type
*type
;
10946 if (reg
->type
& MEM_RCU
) {
10947 type
= btf_type_by_id(reg
->btf
, reg
->btf_id
);
10948 if (!type
|| !btf_type_is_struct(type
)) {
10949 verbose(env
, "Helper has invalid btf/btf_id in R1\n");
10952 returns_cpu_specific_alloc_ptr
= true;
10953 env
->insn_aux_data
[insn_idx
].call_with_percpu_alloc_ptr
= true;
10957 case BPF_FUNC_user_ringbuf_drain
:
10958 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
10959 set_user_ringbuf_callback_state
);
10966 /* reset caller saved regs */
10967 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
10968 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
10969 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
10972 /* helper call returns 64-bit value. */
10973 regs
[BPF_REG_0
].subreg_def
= DEF_NOT_SUBREG
;
10975 /* update return register (already marked as written above) */
10976 ret_type
= fn
->ret_type
;
10977 ret_flag
= type_flag(ret_type
);
10979 switch (base_type(ret_type
)) {
10981 /* sets type to SCALAR_VALUE */
10982 mark_reg_unknown(env
, regs
, BPF_REG_0
);
10985 regs
[BPF_REG_0
].type
= NOT_INIT
;
10987 case RET_PTR_TO_MAP_VALUE
:
10988 /* There is no offset yet applied, variable or fixed */
10989 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
10990 /* remember map_ptr, so that check_map_access()
10991 * can check 'value_size' boundary of memory access
10992 * to map element returned from bpf_map_lookup_elem()
10994 if (meta
.map_ptr
== NULL
) {
10996 "kernel subsystem misconfigured verifier\n");
10999 regs
[BPF_REG_0
].map_ptr
= meta
.map_ptr
;
11000 regs
[BPF_REG_0
].map_uid
= meta
.map_uid
;
11001 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE
| ret_flag
;
11002 if (!type_may_be_null(ret_type
) &&
11003 btf_record_has_field(meta
.map_ptr
->record
, BPF_SPIN_LOCK
)) {
11004 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
11007 case RET_PTR_TO_SOCKET
:
11008 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11009 regs
[BPF_REG_0
].type
= PTR_TO_SOCKET
| ret_flag
;
11011 case RET_PTR_TO_SOCK_COMMON
:
11012 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11013 regs
[BPF_REG_0
].type
= PTR_TO_SOCK_COMMON
| ret_flag
;
11015 case RET_PTR_TO_TCP_SOCK
:
11016 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11017 regs
[BPF_REG_0
].type
= PTR_TO_TCP_SOCK
| ret_flag
;
11019 case RET_PTR_TO_MEM
:
11020 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11021 regs
[BPF_REG_0
].type
= PTR_TO_MEM
| ret_flag
;
11022 regs
[BPF_REG_0
].mem_size
= meta
.mem_size
;
11024 case RET_PTR_TO_MEM_OR_BTF_ID
:
11026 const struct btf_type
*t
;
11028 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11029 t
= btf_type_skip_modifiers(meta
.ret_btf
, meta
.ret_btf_id
, NULL
);
11030 if (!btf_type_is_struct(t
)) {
11032 const struct btf_type
*ret
;
11035 /* resolve the type size of ksym. */
11036 ret
= btf_resolve_size(meta
.ret_btf
, t
, &tsize
);
11038 tname
= btf_name_by_offset(meta
.ret_btf
, t
->name_off
);
11039 verbose(env
, "unable to resolve the size of type '%s': %ld\n",
11040 tname
, PTR_ERR(ret
));
11043 regs
[BPF_REG_0
].type
= PTR_TO_MEM
| ret_flag
;
11044 regs
[BPF_REG_0
].mem_size
= tsize
;
11046 if (returns_cpu_specific_alloc_ptr
) {
11047 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| MEM_ALLOC
| MEM_RCU
;
11049 /* MEM_RDONLY may be carried from ret_flag, but it
11050 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
11051 * it will confuse the check of PTR_TO_BTF_ID in
11052 * check_mem_access().
11054 ret_flag
&= ~MEM_RDONLY
;
11055 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| ret_flag
;
11058 regs
[BPF_REG_0
].btf
= meta
.ret_btf
;
11059 regs
[BPF_REG_0
].btf_id
= meta
.ret_btf_id
;
11063 case RET_PTR_TO_BTF_ID
:
11065 struct btf
*ret_btf
;
11068 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
11069 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| ret_flag
;
11070 if (func_id
== BPF_FUNC_kptr_xchg
) {
11071 ret_btf
= meta
.kptr_field
->kptr
.btf
;
11072 ret_btf_id
= meta
.kptr_field
->kptr
.btf_id
;
11073 if (!btf_is_kernel(ret_btf
)) {
11074 regs
[BPF_REG_0
].type
|= MEM_ALLOC
;
11075 if (meta
.kptr_field
->type
== BPF_KPTR_PERCPU
)
11076 regs
[BPF_REG_0
].type
|= MEM_PERCPU
;
11079 if (fn
->ret_btf_id
== BPF_PTR_POISON
) {
11080 verbose(env
, "verifier internal error:");
11081 verbose(env
, "func %s has non-overwritten BPF_PTR_POISON return type\n",
11082 func_id_name(func_id
));
11085 ret_btf
= btf_vmlinux
;
11086 ret_btf_id
= *fn
->ret_btf_id
;
11088 if (ret_btf_id
== 0) {
11089 verbose(env
, "invalid return type %u of func %s#%d\n",
11090 base_type(ret_type
), func_id_name(func_id
),
11094 regs
[BPF_REG_0
].btf
= ret_btf
;
11095 regs
[BPF_REG_0
].btf_id
= ret_btf_id
;
11099 verbose(env
, "unknown return type %u of func %s#%d\n",
11100 base_type(ret_type
), func_id_name(func_id
), func_id
);
11104 if (type_may_be_null(regs
[BPF_REG_0
].type
))
11105 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
11107 if (helper_multiple_ref_obj_use(func_id
, meta
.map_ptr
)) {
11108 verbose(env
, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
11109 func_id_name(func_id
), func_id
);
11113 if (is_dynptr_ref_function(func_id
))
11114 regs
[BPF_REG_0
].dynptr_id
= meta
.dynptr_id
;
11116 if (is_ptr_cast_function(func_id
) || is_dynptr_ref_function(func_id
)) {
11117 /* For release_reference() */
11118 regs
[BPF_REG_0
].ref_obj_id
= meta
.ref_obj_id
;
11119 } else if (is_acquire_function(func_id
, meta
.map_ptr
)) {
11120 int id
= acquire_reference_state(env
, insn_idx
);
11124 /* For mark_ptr_or_null_reg() */
11125 regs
[BPF_REG_0
].id
= id
;
11126 /* For release_reference() */
11127 regs
[BPF_REG_0
].ref_obj_id
= id
;
11130 err
= do_refine_retval_range(env
, regs
, fn
->ret_type
, func_id
, &meta
);
11134 err
= check_map_func_compatibility(env
, meta
.map_ptr
, func_id
);
11138 if ((func_id
== BPF_FUNC_get_stack
||
11139 func_id
== BPF_FUNC_get_task_stack
) &&
11140 !env
->prog
->has_callchain_buf
) {
11141 const char *err_str
;
11143 #ifdef CONFIG_PERF_EVENTS
11144 err
= get_callchain_buffers(sysctl_perf_event_max_stack
);
11145 err_str
= "cannot get callchain buffer for func %s#%d\n";
11148 err_str
= "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
11151 verbose(env
, err_str
, func_id_name(func_id
), func_id
);
11155 env
->prog
->has_callchain_buf
= true;
11158 if (func_id
== BPF_FUNC_get_stackid
|| func_id
== BPF_FUNC_get_stack
)
11159 env
->prog
->call_get_stack
= true;
11161 if (func_id
== BPF_FUNC_get_func_ip
) {
11162 if (check_get_func_ip(env
))
11164 env
->prog
->call_get_func_ip
= true;
11168 clear_all_pkt_pointers(env
);
11172 /* mark_btf_func_reg_size() is used when the reg size is determined by
11173 * the BTF func_proto's return value size and argument.
11175 static void mark_btf_func_reg_size(struct bpf_verifier_env
*env
, u32 regno
,
11178 struct bpf_reg_state
*reg
= &cur_regs(env
)[regno
];
11180 if (regno
== BPF_REG_0
) {
11181 /* Function return value */
11182 reg
->live
|= REG_LIVE_WRITTEN
;
11183 reg
->subreg_def
= reg_size
== sizeof(u64
) ?
11184 DEF_NOT_SUBREG
: env
->insn_idx
+ 1;
11186 /* Function argument */
11187 if (reg_size
== sizeof(u64
)) {
11188 mark_insn_zext(env
, reg
);
11189 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
11191 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ32
);
11196 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta
*meta
)
11198 return meta
->kfunc_flags
& KF_ACQUIRE
;
11201 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta
*meta
)
11203 return meta
->kfunc_flags
& KF_RELEASE
;
11206 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta
*meta
)
11208 return (meta
->kfunc_flags
& KF_TRUSTED_ARGS
) || is_kfunc_release(meta
);
11211 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta
*meta
)
11213 return meta
->kfunc_flags
& KF_SLEEPABLE
;
11216 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta
*meta
)
11218 return meta
->kfunc_flags
& KF_DESTRUCTIVE
;
11221 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta
*meta
)
11223 return meta
->kfunc_flags
& KF_RCU
;
11226 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta
*meta
)
11228 return meta
->kfunc_flags
& KF_RCU_PROTECTED
;
11231 static bool is_kfunc_arg_mem_size(const struct btf
*btf
,
11232 const struct btf_param
*arg
,
11233 const struct bpf_reg_state
*reg
)
11235 const struct btf_type
*t
;
11237 t
= btf_type_skip_modifiers(btf
, arg
->type
, NULL
);
11238 if (!btf_type_is_scalar(t
) || reg
->type
!= SCALAR_VALUE
)
11241 return btf_param_match_suffix(btf
, arg
, "__sz");
11244 static bool is_kfunc_arg_const_mem_size(const struct btf
*btf
,
11245 const struct btf_param
*arg
,
11246 const struct bpf_reg_state
*reg
)
11248 const struct btf_type
*t
;
11250 t
= btf_type_skip_modifiers(btf
, arg
->type
, NULL
);
11251 if (!btf_type_is_scalar(t
) || reg
->type
!= SCALAR_VALUE
)
11254 return btf_param_match_suffix(btf
, arg
, "__szk");
11257 static bool is_kfunc_arg_optional(const struct btf
*btf
, const struct btf_param
*arg
)
11259 return btf_param_match_suffix(btf
, arg
, "__opt");
11262 static bool is_kfunc_arg_constant(const struct btf
*btf
, const struct btf_param
*arg
)
11264 return btf_param_match_suffix(btf
, arg
, "__k");
11267 static bool is_kfunc_arg_ignore(const struct btf
*btf
, const struct btf_param
*arg
)
11269 return btf_param_match_suffix(btf
, arg
, "__ign");
11272 static bool is_kfunc_arg_map(const struct btf
*btf
, const struct btf_param
*arg
)
11274 return btf_param_match_suffix(btf
, arg
, "__map");
11277 static bool is_kfunc_arg_alloc_obj(const struct btf
*btf
, const struct btf_param
*arg
)
11279 return btf_param_match_suffix(btf
, arg
, "__alloc");
11282 static bool is_kfunc_arg_uninit(const struct btf
*btf
, const struct btf_param
*arg
)
11284 return btf_param_match_suffix(btf
, arg
, "__uninit");
11287 static bool is_kfunc_arg_refcounted_kptr(const struct btf
*btf
, const struct btf_param
*arg
)
11289 return btf_param_match_suffix(btf
, arg
, "__refcounted_kptr");
11292 static bool is_kfunc_arg_nullable(const struct btf
*btf
, const struct btf_param
*arg
)
11294 return btf_param_match_suffix(btf
, arg
, "__nullable");
11297 static bool is_kfunc_arg_const_str(const struct btf
*btf
, const struct btf_param
*arg
)
11299 return btf_param_match_suffix(btf
, arg
, "__str");
11302 static bool is_kfunc_arg_scalar_with_name(const struct btf
*btf
,
11303 const struct btf_param
*arg
,
11306 int len
, target_len
= strlen(name
);
11307 const char *param_name
;
11309 param_name
= btf_name_by_offset(btf
, arg
->name_off
);
11310 if (str_is_empty(param_name
))
11312 len
= strlen(param_name
);
11313 if (len
!= target_len
)
11315 if (strcmp(param_name
, name
))
11323 KF_ARG_LIST_HEAD_ID
,
11324 KF_ARG_LIST_NODE_ID
,
11327 KF_ARG_WORKQUEUE_ID
,
11330 BTF_ID_LIST(kf_arg_btf_ids
)
11331 BTF_ID(struct, bpf_dynptr
)
11332 BTF_ID(struct, bpf_list_head
)
11333 BTF_ID(struct, bpf_list_node
)
11334 BTF_ID(struct, bpf_rb_root
)
11335 BTF_ID(struct, bpf_rb_node
)
11336 BTF_ID(struct, bpf_wq
)
11338 static bool __is_kfunc_ptr_arg_type(const struct btf
*btf
,
11339 const struct btf_param
*arg
, int type
)
11341 const struct btf_type
*t
;
11344 t
= btf_type_skip_modifiers(btf
, arg
->type
, NULL
);
11347 if (!btf_type_is_ptr(t
))
11349 t
= btf_type_skip_modifiers(btf
, t
->type
, &res_id
);
11352 return btf_types_are_same(btf
, res_id
, btf_vmlinux
, kf_arg_btf_ids
[type
]);
11355 static bool is_kfunc_arg_dynptr(const struct btf
*btf
, const struct btf_param
*arg
)
11357 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_DYNPTR_ID
);
11360 static bool is_kfunc_arg_list_head(const struct btf
*btf
, const struct btf_param
*arg
)
11362 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_LIST_HEAD_ID
);
11365 static bool is_kfunc_arg_list_node(const struct btf
*btf
, const struct btf_param
*arg
)
11367 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_LIST_NODE_ID
);
11370 static bool is_kfunc_arg_rbtree_root(const struct btf
*btf
, const struct btf_param
*arg
)
11372 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_RB_ROOT_ID
);
11375 static bool is_kfunc_arg_rbtree_node(const struct btf
*btf
, const struct btf_param
*arg
)
11377 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_RB_NODE_ID
);
11380 static bool is_kfunc_arg_wq(const struct btf
*btf
, const struct btf_param
*arg
)
11382 return __is_kfunc_ptr_arg_type(btf
, arg
, KF_ARG_WORKQUEUE_ID
);
11385 static bool is_kfunc_arg_callback(struct bpf_verifier_env
*env
, const struct btf
*btf
,
11386 const struct btf_param
*arg
)
11388 const struct btf_type
*t
;
11390 t
= btf_type_resolve_func_ptr(btf
, arg
->type
, NULL
);
11397 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
11398 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env
*env
,
11399 const struct btf
*btf
,
11400 const struct btf_type
*t
, int rec
)
11402 const struct btf_type
*member_type
;
11403 const struct btf_member
*member
;
11406 if (!btf_type_is_struct(t
))
11409 for_each_member(i
, t
, member
) {
11410 const struct btf_array
*array
;
11412 member_type
= btf_type_skip_modifiers(btf
, member
->type
, NULL
);
11413 if (btf_type_is_struct(member_type
)) {
11415 verbose(env
, "max struct nesting depth exceeded\n");
11418 if (!__btf_type_is_scalar_struct(env
, btf
, member_type
, rec
+ 1))
11422 if (btf_type_is_array(member_type
)) {
11423 array
= btf_array(member_type
);
11424 if (!array
->nelems
)
11426 member_type
= btf_type_skip_modifiers(btf
, array
->type
, NULL
);
11427 if (!btf_type_is_scalar(member_type
))
11431 if (!btf_type_is_scalar(member_type
))
11437 enum kfunc_ptr_arg_type
{
11439 KF_ARG_PTR_TO_ALLOC_BTF_ID
, /* Allocated object */
11440 KF_ARG_PTR_TO_REFCOUNTED_KPTR
, /* Refcounted local kptr */
11441 KF_ARG_PTR_TO_DYNPTR
,
11442 KF_ARG_PTR_TO_ITER
,
11443 KF_ARG_PTR_TO_LIST_HEAD
,
11444 KF_ARG_PTR_TO_LIST_NODE
,
11445 KF_ARG_PTR_TO_BTF_ID
, /* Also covers reg2btf_ids conversions */
11447 KF_ARG_PTR_TO_MEM_SIZE
, /* Size derived from next argument, skip it */
11448 KF_ARG_PTR_TO_CALLBACK
,
11449 KF_ARG_PTR_TO_RB_ROOT
,
11450 KF_ARG_PTR_TO_RB_NODE
,
11451 KF_ARG_PTR_TO_NULL
,
11452 KF_ARG_PTR_TO_CONST_STR
,
11454 KF_ARG_PTR_TO_WORKQUEUE
,
11457 enum special_kfunc_type
{
11458 KF_bpf_obj_new_impl
,
11459 KF_bpf_obj_drop_impl
,
11460 KF_bpf_refcount_acquire_impl
,
11461 KF_bpf_list_push_front_impl
,
11462 KF_bpf_list_push_back_impl
,
11463 KF_bpf_list_pop_front
,
11464 KF_bpf_list_pop_back
,
11465 KF_bpf_cast_to_kern_ctx
,
11466 KF_bpf_rdonly_cast
,
11467 KF_bpf_rcu_read_lock
,
11468 KF_bpf_rcu_read_unlock
,
11469 KF_bpf_rbtree_remove
,
11470 KF_bpf_rbtree_add_impl
,
11471 KF_bpf_rbtree_first
,
11472 KF_bpf_dynptr_from_skb
,
11473 KF_bpf_dynptr_from_xdp
,
11474 KF_bpf_dynptr_slice
,
11475 KF_bpf_dynptr_slice_rdwr
,
11476 KF_bpf_dynptr_clone
,
11477 KF_bpf_percpu_obj_new_impl
,
11478 KF_bpf_percpu_obj_drop_impl
,
11480 KF_bpf_wq_set_callback_impl
,
11481 KF_bpf_preempt_disable
,
11482 KF_bpf_preempt_enable
,
11483 KF_bpf_iter_css_task_new
,
11484 KF_bpf_session_cookie
,
11485 KF_bpf_get_kmem_cache
,
11488 BTF_SET_START(special_kfunc_set
)
11489 BTF_ID(func
, bpf_obj_new_impl
)
11490 BTF_ID(func
, bpf_obj_drop_impl
)
11491 BTF_ID(func
, bpf_refcount_acquire_impl
)
11492 BTF_ID(func
, bpf_list_push_front_impl
)
11493 BTF_ID(func
, bpf_list_push_back_impl
)
11494 BTF_ID(func
, bpf_list_pop_front
)
11495 BTF_ID(func
, bpf_list_pop_back
)
11496 BTF_ID(func
, bpf_cast_to_kern_ctx
)
11497 BTF_ID(func
, bpf_rdonly_cast
)
11498 BTF_ID(func
, bpf_rbtree_remove
)
11499 BTF_ID(func
, bpf_rbtree_add_impl
)
11500 BTF_ID(func
, bpf_rbtree_first
)
11501 BTF_ID(func
, bpf_dynptr_from_skb
)
11502 BTF_ID(func
, bpf_dynptr_from_xdp
)
11503 BTF_ID(func
, bpf_dynptr_slice
)
11504 BTF_ID(func
, bpf_dynptr_slice_rdwr
)
11505 BTF_ID(func
, bpf_dynptr_clone
)
11506 BTF_ID(func
, bpf_percpu_obj_new_impl
)
11507 BTF_ID(func
, bpf_percpu_obj_drop_impl
)
11508 BTF_ID(func
, bpf_throw
)
11509 BTF_ID(func
, bpf_wq_set_callback_impl
)
11510 #ifdef CONFIG_CGROUPS
11511 BTF_ID(func
, bpf_iter_css_task_new
)
11513 BTF_SET_END(special_kfunc_set
)
11515 BTF_ID_LIST(special_kfunc_list
)
11516 BTF_ID(func
, bpf_obj_new_impl
)
11517 BTF_ID(func
, bpf_obj_drop_impl
)
11518 BTF_ID(func
, bpf_refcount_acquire_impl
)
11519 BTF_ID(func
, bpf_list_push_front_impl
)
11520 BTF_ID(func
, bpf_list_push_back_impl
)
11521 BTF_ID(func
, bpf_list_pop_front
)
11522 BTF_ID(func
, bpf_list_pop_back
)
11523 BTF_ID(func
, bpf_cast_to_kern_ctx
)
11524 BTF_ID(func
, bpf_rdonly_cast
)
11525 BTF_ID(func
, bpf_rcu_read_lock
)
11526 BTF_ID(func
, bpf_rcu_read_unlock
)
11527 BTF_ID(func
, bpf_rbtree_remove
)
11528 BTF_ID(func
, bpf_rbtree_add_impl
)
11529 BTF_ID(func
, bpf_rbtree_first
)
11530 BTF_ID(func
, bpf_dynptr_from_skb
)
11531 BTF_ID(func
, bpf_dynptr_from_xdp
)
11532 BTF_ID(func
, bpf_dynptr_slice
)
11533 BTF_ID(func
, bpf_dynptr_slice_rdwr
)
11534 BTF_ID(func
, bpf_dynptr_clone
)
11535 BTF_ID(func
, bpf_percpu_obj_new_impl
)
11536 BTF_ID(func
, bpf_percpu_obj_drop_impl
)
11537 BTF_ID(func
, bpf_throw
)
11538 BTF_ID(func
, bpf_wq_set_callback_impl
)
11539 BTF_ID(func
, bpf_preempt_disable
)
11540 BTF_ID(func
, bpf_preempt_enable
)
11541 #ifdef CONFIG_CGROUPS
11542 BTF_ID(func
, bpf_iter_css_task_new
)
11546 #ifdef CONFIG_BPF_EVENTS
11547 BTF_ID(func
, bpf_session_cookie
)
11551 BTF_ID(func
, bpf_get_kmem_cache
)
11553 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta
*meta
)
11555 if (meta
->func_id
== special_kfunc_list
[KF_bpf_refcount_acquire_impl
] &&
11556 meta
->arg_owning_ref
) {
11560 return meta
->kfunc_flags
& KF_RET_NULL
;
11563 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta
*meta
)
11565 return meta
->func_id
== special_kfunc_list
[KF_bpf_rcu_read_lock
];
11568 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta
*meta
)
11570 return meta
->func_id
== special_kfunc_list
[KF_bpf_rcu_read_unlock
];
11573 static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta
*meta
)
11575 return meta
->func_id
== special_kfunc_list
[KF_bpf_preempt_disable
];
11578 static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta
*meta
)
11580 return meta
->func_id
== special_kfunc_list
[KF_bpf_preempt_enable
];
11583 static enum kfunc_ptr_arg_type
11584 get_kfunc_ptr_arg_type(struct bpf_verifier_env
*env
,
11585 struct bpf_kfunc_call_arg_meta
*meta
,
11586 const struct btf_type
*t
, const struct btf_type
*ref_t
,
11587 const char *ref_tname
, const struct btf_param
*args
,
11588 int argno
, int nargs
)
11590 u32 regno
= argno
+ 1;
11591 struct bpf_reg_state
*regs
= cur_regs(env
);
11592 struct bpf_reg_state
*reg
= ®s
[regno
];
11593 bool arg_mem_size
= false;
11595 if (meta
->func_id
== special_kfunc_list
[KF_bpf_cast_to_kern_ctx
])
11596 return KF_ARG_PTR_TO_CTX
;
11598 /* In this function, we verify the kfunc's BTF as per the argument type,
11599 * leaving the rest of the verification with respect to the register
11600 * type to our caller. When a set of conditions hold in the BTF type of
11601 * arguments, we resolve it to a known kfunc_ptr_arg_type.
11603 if (btf_is_prog_ctx_type(&env
->log
, meta
->btf
, t
, resolve_prog_type(env
->prog
), argno
))
11604 return KF_ARG_PTR_TO_CTX
;
11606 if (is_kfunc_arg_nullable(meta
->btf
, &args
[argno
]) && register_is_null(reg
))
11607 return KF_ARG_PTR_TO_NULL
;
11609 if (is_kfunc_arg_alloc_obj(meta
->btf
, &args
[argno
]))
11610 return KF_ARG_PTR_TO_ALLOC_BTF_ID
;
11612 if (is_kfunc_arg_refcounted_kptr(meta
->btf
, &args
[argno
]))
11613 return KF_ARG_PTR_TO_REFCOUNTED_KPTR
;
11615 if (is_kfunc_arg_dynptr(meta
->btf
, &args
[argno
]))
11616 return KF_ARG_PTR_TO_DYNPTR
;
11618 if (is_kfunc_arg_iter(meta
, argno
, &args
[argno
]))
11619 return KF_ARG_PTR_TO_ITER
;
11621 if (is_kfunc_arg_list_head(meta
->btf
, &args
[argno
]))
11622 return KF_ARG_PTR_TO_LIST_HEAD
;
11624 if (is_kfunc_arg_list_node(meta
->btf
, &args
[argno
]))
11625 return KF_ARG_PTR_TO_LIST_NODE
;
11627 if (is_kfunc_arg_rbtree_root(meta
->btf
, &args
[argno
]))
11628 return KF_ARG_PTR_TO_RB_ROOT
;
11630 if (is_kfunc_arg_rbtree_node(meta
->btf
, &args
[argno
]))
11631 return KF_ARG_PTR_TO_RB_NODE
;
11633 if (is_kfunc_arg_const_str(meta
->btf
, &args
[argno
]))
11634 return KF_ARG_PTR_TO_CONST_STR
;
11636 if (is_kfunc_arg_map(meta
->btf
, &args
[argno
]))
11637 return KF_ARG_PTR_TO_MAP
;
11639 if (is_kfunc_arg_wq(meta
->btf
, &args
[argno
]))
11640 return KF_ARG_PTR_TO_WORKQUEUE
;
11642 if ((base_type(reg
->type
) == PTR_TO_BTF_ID
|| reg2btf_ids
[base_type(reg
->type
)])) {
11643 if (!btf_type_is_struct(ref_t
)) {
11644 verbose(env
, "kernel function %s args#%d pointer type %s %s is not supported\n",
11645 meta
->func_name
, argno
, btf_type_str(ref_t
), ref_tname
);
11648 return KF_ARG_PTR_TO_BTF_ID
;
11651 if (is_kfunc_arg_callback(env
, meta
->btf
, &args
[argno
]))
11652 return KF_ARG_PTR_TO_CALLBACK
;
11654 if (argno
+ 1 < nargs
&&
11655 (is_kfunc_arg_mem_size(meta
->btf
, &args
[argno
+ 1], ®s
[regno
+ 1]) ||
11656 is_kfunc_arg_const_mem_size(meta
->btf
, &args
[argno
+ 1], ®s
[regno
+ 1])))
11657 arg_mem_size
= true;
11659 /* This is the catch all argument type of register types supported by
11660 * check_helper_mem_access. However, we only allow when argument type is
11661 * pointer to scalar, or struct composed (recursively) of scalars. When
11662 * arg_mem_size is true, the pointer can be void *.
11664 if (!btf_type_is_scalar(ref_t
) && !__btf_type_is_scalar_struct(env
, meta
->btf
, ref_t
, 0) &&
11665 (arg_mem_size
? !btf_type_is_void(ref_t
) : 1)) {
11666 verbose(env
, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
11667 argno
, btf_type_str(ref_t
), ref_tname
, arg_mem_size
? "void, " : "");
11670 return arg_mem_size
? KF_ARG_PTR_TO_MEM_SIZE
: KF_ARG_PTR_TO_MEM
;
11673 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env
*env
,
11674 struct bpf_reg_state
*reg
,
11675 const struct btf_type
*ref_t
,
11676 const char *ref_tname
, u32 ref_id
,
11677 struct bpf_kfunc_call_arg_meta
*meta
,
11680 const struct btf_type
*reg_ref_t
;
11681 bool strict_type_match
= false;
11682 const struct btf
*reg_btf
;
11683 const char *reg_ref_tname
;
11684 bool taking_projection
;
11688 if (base_type(reg
->type
) == PTR_TO_BTF_ID
) {
11689 reg_btf
= reg
->btf
;
11690 reg_ref_id
= reg
->btf_id
;
11692 reg_btf
= btf_vmlinux
;
11693 reg_ref_id
= *reg2btf_ids
[base_type(reg
->type
)];
11696 /* Enforce strict type matching for calls to kfuncs that are acquiring
11697 * or releasing a reference, or are no-cast aliases. We do _not_
11698 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
11699 * as we want to enable BPF programs to pass types that are bitwise
11700 * equivalent without forcing them to explicitly cast with something
11701 * like bpf_cast_to_kern_ctx().
11703 * For example, say we had a type like the following:
11705 * struct bpf_cpumask {
11706 * cpumask_t cpumask;
11707 * refcount_t usage;
11710 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
11711 * to a struct cpumask, so it would be safe to pass a struct
11712 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
11714 * The philosophy here is similar to how we allow scalars of different
11715 * types to be passed to kfuncs as long as the size is the same. The
11716 * only difference here is that we're simply allowing
11717 * btf_struct_ids_match() to walk the struct at the 0th offset, and
11720 if ((is_kfunc_release(meta
) && reg
->ref_obj_id
) ||
11721 btf_type_ids_nocast_alias(&env
->log
, reg_btf
, reg_ref_id
, meta
->btf
, ref_id
))
11722 strict_type_match
= true;
11724 WARN_ON_ONCE(is_kfunc_release(meta
) &&
11725 (reg
->off
|| !tnum_is_const(reg
->var_off
) ||
11726 reg
->var_off
.value
));
11728 reg_ref_t
= btf_type_skip_modifiers(reg_btf
, reg_ref_id
, ®_ref_id
);
11729 reg_ref_tname
= btf_name_by_offset(reg_btf
, reg_ref_t
->name_off
);
11730 struct_same
= btf_struct_ids_match(&env
->log
, reg_btf
, reg_ref_id
, reg
->off
, meta
->btf
, ref_id
, strict_type_match
);
11731 /* If kfunc is accepting a projection type (ie. __sk_buff), it cannot
11732 * actually use it -- it must cast to the underlying type. So we allow
11733 * caller to pass in the underlying type.
11735 taking_projection
= btf_is_projection_of(ref_tname
, reg_ref_tname
);
11736 if (!taking_projection
&& !struct_same
) {
11737 verbose(env
, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
11738 meta
->func_name
, argno
, btf_type_str(ref_t
), ref_tname
, argno
+ 1,
11739 btf_type_str(reg_ref_t
), reg_ref_tname
);
11745 static int ref_set_non_owning(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
11747 struct btf_record
*rec
= reg_btf_record(reg
);
11749 if (!cur_func(env
)->active_locks
) {
11750 verbose(env
, "verifier internal error: ref_set_non_owning w/o active lock\n");
11754 if (type_flag(reg
->type
) & NON_OWN_REF
) {
11755 verbose(env
, "verifier internal error: NON_OWN_REF already set\n");
11759 reg
->type
|= NON_OWN_REF
;
11760 if (rec
->refcount_off
>= 0)
11761 reg
->type
|= MEM_RCU
;
11766 static int ref_convert_owning_non_owning(struct bpf_verifier_env
*env
, u32 ref_obj_id
)
11768 struct bpf_func_state
*state
, *unused
;
11769 struct bpf_reg_state
*reg
;
11772 state
= cur_func(env
);
11775 verbose(env
, "verifier internal error: ref_obj_id is zero for "
11776 "owning -> non-owning conversion\n");
11780 for (i
= 0; i
< state
->acquired_refs
; i
++) {
11781 if (state
->refs
[i
].id
!= ref_obj_id
)
11784 /* Clear ref_obj_id here so release_reference doesn't clobber
11787 bpf_for_each_reg_in_vstate(env
->cur_state
, unused
, reg
, ({
11788 if (reg
->ref_obj_id
== ref_obj_id
) {
11789 reg
->ref_obj_id
= 0;
11790 ref_set_non_owning(env
, reg
);
11796 verbose(env
, "verifier internal error: ref state missing for ref_obj_id\n");
11800 /* Implementation details:
11802 * Each register points to some region of memory, which we define as an
11803 * allocation. Each allocation may embed a bpf_spin_lock which protects any
11804 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
11805 * allocation. The lock and the data it protects are colocated in the same
11808 * Hence, everytime a register holds a pointer value pointing to such
11809 * allocation, the verifier preserves a unique reg->id for it.
11811 * The verifier remembers the lock 'ptr' and the lock 'id' whenever
11812 * bpf_spin_lock is called.
11814 * To enable this, lock state in the verifier captures two values:
11815 * active_lock.ptr = Register's type specific pointer
11816 * active_lock.id = A unique ID for each register pointer value
11818 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
11819 * supported register types.
11821 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
11822 * allocated objects is the reg->btf pointer.
11824 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
11825 * can establish the provenance of the map value statically for each distinct
11826 * lookup into such maps. They always contain a single map value hence unique
11827 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
11829 * So, in case of global variables, they use array maps with max_entries = 1,
11830 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
11831 * into the same map value as max_entries is 1, as described above).
11833 * In case of inner map lookups, the inner map pointer has same map_ptr as the
11834 * outer map pointer (in verifier context), but each lookup into an inner map
11835 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
11836 * maps from the same outer map share the same map_ptr as active_lock.ptr, they
11837 * will get different reg->id assigned to each lookup, hence different
11840 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
11841 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
11842 * returned from bpf_obj_new. Each allocation receives a new reg->id.
11844 static int check_reg_allocation_locked(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
)
11846 struct bpf_reference_state
*s
;
11850 switch ((int)reg
->type
) {
11851 case PTR_TO_MAP_VALUE
:
11852 ptr
= reg
->map_ptr
;
11854 case PTR_TO_BTF_ID
| MEM_ALLOC
:
11858 verbose(env
, "verifier internal error: unknown reg type for lock check\n");
11863 if (!cur_func(env
)->active_locks
)
11865 s
= find_lock_state(env
, REF_TYPE_LOCK
, id
, ptr
);
11867 verbose(env
, "held lock and object are not in the same allocation\n");
11873 static bool is_bpf_list_api_kfunc(u32 btf_id
)
11875 return btf_id
== special_kfunc_list
[KF_bpf_list_push_front_impl
] ||
11876 btf_id
== special_kfunc_list
[KF_bpf_list_push_back_impl
] ||
11877 btf_id
== special_kfunc_list
[KF_bpf_list_pop_front
] ||
11878 btf_id
== special_kfunc_list
[KF_bpf_list_pop_back
];
11881 static bool is_bpf_rbtree_api_kfunc(u32 btf_id
)
11883 return btf_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
] ||
11884 btf_id
== special_kfunc_list
[KF_bpf_rbtree_remove
] ||
11885 btf_id
== special_kfunc_list
[KF_bpf_rbtree_first
];
11888 static bool is_bpf_graph_api_kfunc(u32 btf_id
)
11890 return is_bpf_list_api_kfunc(btf_id
) || is_bpf_rbtree_api_kfunc(btf_id
) ||
11891 btf_id
== special_kfunc_list
[KF_bpf_refcount_acquire_impl
];
11894 static bool is_sync_callback_calling_kfunc(u32 btf_id
)
11896 return btf_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
];
11899 static bool is_async_callback_calling_kfunc(u32 btf_id
)
11901 return btf_id
== special_kfunc_list
[KF_bpf_wq_set_callback_impl
];
11904 static bool is_bpf_throw_kfunc(struct bpf_insn
*insn
)
11906 return bpf_pseudo_kfunc_call(insn
) && insn
->off
== 0 &&
11907 insn
->imm
== special_kfunc_list
[KF_bpf_throw
];
11910 static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id
)
11912 return btf_id
== special_kfunc_list
[KF_bpf_wq_set_callback_impl
];
11915 static bool is_callback_calling_kfunc(u32 btf_id
)
11917 return is_sync_callback_calling_kfunc(btf_id
) ||
11918 is_async_callback_calling_kfunc(btf_id
);
11921 static bool is_rbtree_lock_required_kfunc(u32 btf_id
)
11923 return is_bpf_rbtree_api_kfunc(btf_id
);
11926 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env
*env
,
11927 enum btf_field_type head_field_type
,
11932 switch (head_field_type
) {
11933 case BPF_LIST_HEAD
:
11934 ret
= is_bpf_list_api_kfunc(kfunc_btf_id
);
11937 ret
= is_bpf_rbtree_api_kfunc(kfunc_btf_id
);
11940 verbose(env
, "verifier internal error: unexpected graph root argument type %s\n",
11941 btf_field_type_name(head_field_type
));
11946 verbose(env
, "verifier internal error: %s head arg for unknown kfunc\n",
11947 btf_field_type_name(head_field_type
));
11951 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env
*env
,
11952 enum btf_field_type node_field_type
,
11957 switch (node_field_type
) {
11958 case BPF_LIST_NODE
:
11959 ret
= (kfunc_btf_id
== special_kfunc_list
[KF_bpf_list_push_front_impl
] ||
11960 kfunc_btf_id
== special_kfunc_list
[KF_bpf_list_push_back_impl
]);
11963 ret
= (kfunc_btf_id
== special_kfunc_list
[KF_bpf_rbtree_remove
] ||
11964 kfunc_btf_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
]);
11967 verbose(env
, "verifier internal error: unexpected graph node argument type %s\n",
11968 btf_field_type_name(node_field_type
));
11973 verbose(env
, "verifier internal error: %s node arg for unknown kfunc\n",
11974 btf_field_type_name(node_field_type
));
11979 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env
*env
,
11980 struct bpf_reg_state
*reg
, u32 regno
,
11981 struct bpf_kfunc_call_arg_meta
*meta
,
11982 enum btf_field_type head_field_type
,
11983 struct btf_field
**head_field
)
11985 const char *head_type_name
;
11986 struct btf_field
*field
;
11987 struct btf_record
*rec
;
11990 if (meta
->btf
!= btf_vmlinux
) {
11991 verbose(env
, "verifier internal error: unexpected btf mismatch in kfunc call\n");
11995 if (!check_kfunc_is_graph_root_api(env
, head_field_type
, meta
->func_id
))
11998 head_type_name
= btf_field_type_name(head_field_type
);
11999 if (!tnum_is_const(reg
->var_off
)) {
12001 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
12002 regno
, head_type_name
);
12006 rec
= reg_btf_record(reg
);
12007 head_off
= reg
->off
+ reg
->var_off
.value
;
12008 field
= btf_record_find(rec
, head_off
, head_field_type
);
12010 verbose(env
, "%s not found at offset=%u\n", head_type_name
, head_off
);
12014 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */
12015 if (check_reg_allocation_locked(env
, reg
)) {
12016 verbose(env
, "bpf_spin_lock at off=%d must be held for %s\n",
12017 rec
->spin_lock_off
, head_type_name
);
12022 verbose(env
, "verifier internal error: repeating %s arg\n", head_type_name
);
12025 *head_field
= field
;
12029 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env
*env
,
12030 struct bpf_reg_state
*reg
, u32 regno
,
12031 struct bpf_kfunc_call_arg_meta
*meta
)
12033 return __process_kf_arg_ptr_to_graph_root(env
, reg
, regno
, meta
, BPF_LIST_HEAD
,
12034 &meta
->arg_list_head
.field
);
12037 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env
*env
,
12038 struct bpf_reg_state
*reg
, u32 regno
,
12039 struct bpf_kfunc_call_arg_meta
*meta
)
12041 return __process_kf_arg_ptr_to_graph_root(env
, reg
, regno
, meta
, BPF_RB_ROOT
,
12042 &meta
->arg_rbtree_root
.field
);
12046 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env
*env
,
12047 struct bpf_reg_state
*reg
, u32 regno
,
12048 struct bpf_kfunc_call_arg_meta
*meta
,
12049 enum btf_field_type head_field_type
,
12050 enum btf_field_type node_field_type
,
12051 struct btf_field
**node_field
)
12053 const char *node_type_name
;
12054 const struct btf_type
*et
, *t
;
12055 struct btf_field
*field
;
12058 if (meta
->btf
!= btf_vmlinux
) {
12059 verbose(env
, "verifier internal error: unexpected btf mismatch in kfunc call\n");
12063 if (!check_kfunc_is_graph_node_api(env
, node_field_type
, meta
->func_id
))
12066 node_type_name
= btf_field_type_name(node_field_type
);
12067 if (!tnum_is_const(reg
->var_off
)) {
12069 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
12070 regno
, node_type_name
);
12074 node_off
= reg
->off
+ reg
->var_off
.value
;
12075 field
= reg_find_field_offset(reg
, node_off
, node_field_type
);
12077 verbose(env
, "%s not found at offset=%u\n", node_type_name
, node_off
);
12081 field
= *node_field
;
12083 et
= btf_type_by_id(field
->graph_root
.btf
, field
->graph_root
.value_btf_id
);
12084 t
= btf_type_by_id(reg
->btf
, reg
->btf_id
);
12085 if (!btf_struct_ids_match(&env
->log
, reg
->btf
, reg
->btf_id
, 0, field
->graph_root
.btf
,
12086 field
->graph_root
.value_btf_id
, true)) {
12087 verbose(env
, "operation on %s expects arg#1 %s at offset=%d "
12088 "in struct %s, but arg is at offset=%d in struct %s\n",
12089 btf_field_type_name(head_field_type
),
12090 btf_field_type_name(node_field_type
),
12091 field
->graph_root
.node_offset
,
12092 btf_name_by_offset(field
->graph_root
.btf
, et
->name_off
),
12093 node_off
, btf_name_by_offset(reg
->btf
, t
->name_off
));
12096 meta
->arg_btf
= reg
->btf
;
12097 meta
->arg_btf_id
= reg
->btf_id
;
12099 if (node_off
!= field
->graph_root
.node_offset
) {
12100 verbose(env
, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
12101 node_off
, btf_field_type_name(node_field_type
),
12102 field
->graph_root
.node_offset
,
12103 btf_name_by_offset(field
->graph_root
.btf
, et
->name_off
));
12110 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env
*env
,
12111 struct bpf_reg_state
*reg
, u32 regno
,
12112 struct bpf_kfunc_call_arg_meta
*meta
)
12114 return __process_kf_arg_ptr_to_graph_node(env
, reg
, regno
, meta
,
12115 BPF_LIST_HEAD
, BPF_LIST_NODE
,
12116 &meta
->arg_list_head
.field
);
12119 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env
*env
,
12120 struct bpf_reg_state
*reg
, u32 regno
,
12121 struct bpf_kfunc_call_arg_meta
*meta
)
12123 return __process_kf_arg_ptr_to_graph_node(env
, reg
, regno
, meta
,
12124 BPF_RB_ROOT
, BPF_RB_NODE
,
12125 &meta
->arg_rbtree_root
.field
);
12129 * css_task iter allowlist is needed to avoid dead locking on css_set_lock.
12130 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
12131 * Any sleepable progs are also safe since bpf_check_attach_target() enforce
12132 * them can only be attached to some specific hook points.
12134 static bool check_css_task_iter_allowlist(struct bpf_verifier_env
*env
)
12136 enum bpf_prog_type prog_type
= resolve_prog_type(env
->prog
);
12138 switch (prog_type
) {
12139 case BPF_PROG_TYPE_LSM
:
12141 case BPF_PROG_TYPE_TRACING
:
12142 if (env
->prog
->expected_attach_type
== BPF_TRACE_ITER
)
12146 return in_sleepable(env
);
12150 static int check_kfunc_args(struct bpf_verifier_env
*env
, struct bpf_kfunc_call_arg_meta
*meta
,
12153 const char *func_name
= meta
->func_name
, *ref_tname
;
12154 const struct btf
*btf
= meta
->btf
;
12155 const struct btf_param
*args
;
12156 struct btf_record
*rec
;
12160 args
= (const struct btf_param
*)(meta
->func_proto
+ 1);
12161 nargs
= btf_type_vlen(meta
->func_proto
);
12162 if (nargs
> MAX_BPF_FUNC_REG_ARGS
) {
12163 verbose(env
, "Function %s has %d > %d args\n", func_name
, nargs
,
12164 MAX_BPF_FUNC_REG_ARGS
);
12168 /* Check that BTF function arguments match actual types that the
12171 for (i
= 0; i
< nargs
; i
++) {
12172 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[i
+ 1];
12173 const struct btf_type
*t
, *ref_t
, *resolve_ret
;
12174 enum bpf_arg_type arg_type
= ARG_DONTCARE
;
12175 u32 regno
= i
+ 1, ref_id
, type_size
;
12176 bool is_ret_buf_sz
= false;
12180 t
= btf_type_skip_modifiers(btf
, args
[i
].type
, NULL
);
12182 if (is_kfunc_arg_ignore(btf
, &args
[i
]))
12185 if (btf_type_is_scalar(t
)) {
12186 if (reg
->type
!= SCALAR_VALUE
) {
12187 verbose(env
, "R%d is not a scalar\n", regno
);
12191 if (is_kfunc_arg_constant(meta
->btf
, &args
[i
])) {
12192 if (meta
->arg_constant
.found
) {
12193 verbose(env
, "verifier internal error: only one constant argument permitted\n");
12196 if (!tnum_is_const(reg
->var_off
)) {
12197 verbose(env
, "R%d must be a known constant\n", regno
);
12200 ret
= mark_chain_precision(env
, regno
);
12203 meta
->arg_constant
.found
= true;
12204 meta
->arg_constant
.value
= reg
->var_off
.value
;
12205 } else if (is_kfunc_arg_scalar_with_name(btf
, &args
[i
], "rdonly_buf_size")) {
12206 meta
->r0_rdonly
= true;
12207 is_ret_buf_sz
= true;
12208 } else if (is_kfunc_arg_scalar_with_name(btf
, &args
[i
], "rdwr_buf_size")) {
12209 is_ret_buf_sz
= true;
12212 if (is_ret_buf_sz
) {
12213 if (meta
->r0_size
) {
12214 verbose(env
, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
12218 if (!tnum_is_const(reg
->var_off
)) {
12219 verbose(env
, "R%d is not a const\n", regno
);
12223 meta
->r0_size
= reg
->var_off
.value
;
12224 ret
= mark_chain_precision(env
, regno
);
12231 if (!btf_type_is_ptr(t
)) {
12232 verbose(env
, "Unrecognized arg#%d type %s\n", i
, btf_type_str(t
));
12236 mask
= mask_raw_tp_reg(env
, reg
);
12237 if ((is_kfunc_trusted_args(meta
) || is_kfunc_rcu(meta
)) &&
12238 (register_is_null(reg
) || type_may_be_null(reg
->type
)) &&
12239 !is_kfunc_arg_nullable(meta
->btf
, &args
[i
])) {
12240 verbose(env
, "Possibly NULL pointer passed to trusted arg%d\n", i
);
12241 unmask_raw_tp_reg(reg
, mask
);
12244 unmask_raw_tp_reg(reg
, mask
);
12246 if (reg
->ref_obj_id
) {
12247 if (is_kfunc_release(meta
) && meta
->ref_obj_id
) {
12248 verbose(env
, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
12249 regno
, reg
->ref_obj_id
,
12253 meta
->ref_obj_id
= reg
->ref_obj_id
;
12254 if (is_kfunc_release(meta
))
12255 meta
->release_regno
= regno
;
12258 ref_t
= btf_type_skip_modifiers(btf
, t
->type
, &ref_id
);
12259 ref_tname
= btf_name_by_offset(btf
, ref_t
->name_off
);
12261 kf_arg_type
= get_kfunc_ptr_arg_type(env
, meta
, t
, ref_t
, ref_tname
, args
, i
, nargs
);
12262 if (kf_arg_type
< 0)
12263 return kf_arg_type
;
12265 switch (kf_arg_type
) {
12266 case KF_ARG_PTR_TO_NULL
:
12268 case KF_ARG_PTR_TO_MAP
:
12269 if (!reg
->map_ptr
) {
12270 verbose(env
, "pointer in R%d isn't map pointer\n", regno
);
12273 if (meta
->map
.ptr
&& reg
->map_ptr
->record
->wq_off
>= 0) {
12274 /* Use map_uid (which is unique id of inner map) to reject:
12275 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
12276 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
12277 * if (inner_map1 && inner_map2) {
12278 * wq = bpf_map_lookup_elem(inner_map1);
12280 * // mismatch would have been allowed
12281 * bpf_wq_init(wq, inner_map2);
12284 * Comparing map_ptr is enough to distinguish normal and outer maps.
12286 if (meta
->map
.ptr
!= reg
->map_ptr
||
12287 meta
->map
.uid
!= reg
->map_uid
) {
12289 "workqueue pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
12290 meta
->map
.uid
, reg
->map_uid
);
12294 meta
->map
.ptr
= reg
->map_ptr
;
12295 meta
->map
.uid
= reg
->map_uid
;
12297 case KF_ARG_PTR_TO_ALLOC_BTF_ID
:
12298 case KF_ARG_PTR_TO_BTF_ID
:
12299 if (!is_kfunc_trusted_args(meta
) && !is_kfunc_rcu(meta
))
12302 /* Allow passing maybe NULL raw_tp arguments to
12303 * kfuncs for compatibility. Don't apply this to
12304 * arguments with ref_obj_id > 0.
12306 mask
= mask_raw_tp_reg(env
, reg
);
12307 if (!is_trusted_reg(reg
)) {
12308 if (!is_kfunc_rcu(meta
)) {
12309 verbose(env
, "R%d must be referenced or trusted\n", regno
);
12310 unmask_raw_tp_reg(reg
, mask
);
12313 if (!is_rcu_reg(reg
)) {
12314 verbose(env
, "R%d must be a rcu pointer\n", regno
);
12315 unmask_raw_tp_reg(reg
, mask
);
12319 unmask_raw_tp_reg(reg
, mask
);
12321 case KF_ARG_PTR_TO_CTX
:
12322 case KF_ARG_PTR_TO_DYNPTR
:
12323 case KF_ARG_PTR_TO_ITER
:
12324 case KF_ARG_PTR_TO_LIST_HEAD
:
12325 case KF_ARG_PTR_TO_LIST_NODE
:
12326 case KF_ARG_PTR_TO_RB_ROOT
:
12327 case KF_ARG_PTR_TO_RB_NODE
:
12328 case KF_ARG_PTR_TO_MEM
:
12329 case KF_ARG_PTR_TO_MEM_SIZE
:
12330 case KF_ARG_PTR_TO_CALLBACK
:
12331 case KF_ARG_PTR_TO_REFCOUNTED_KPTR
:
12332 case KF_ARG_PTR_TO_CONST_STR
:
12333 case KF_ARG_PTR_TO_WORKQUEUE
:
12340 if (is_kfunc_release(meta
) && reg
->ref_obj_id
)
12341 arg_type
|= OBJ_RELEASE
;
12342 mask
= mask_raw_tp_reg(env
, reg
);
12343 ret
= check_func_arg_reg_off(env
, reg
, regno
, arg_type
);
12344 unmask_raw_tp_reg(reg
, mask
);
12348 switch (kf_arg_type
) {
12349 case KF_ARG_PTR_TO_CTX
:
12350 if (reg
->type
!= PTR_TO_CTX
) {
12351 verbose(env
, "arg#%d expected pointer to ctx, but got %s\n",
12352 i
, reg_type_str(env
, reg
->type
));
12356 if (meta
->func_id
== special_kfunc_list
[KF_bpf_cast_to_kern_ctx
]) {
12357 ret
= get_kern_ctx_btf_id(&env
->log
, resolve_prog_type(env
->prog
));
12360 meta
->ret_btf_id
= ret
;
12363 case KF_ARG_PTR_TO_ALLOC_BTF_ID
:
12364 if (reg
->type
== (PTR_TO_BTF_ID
| MEM_ALLOC
)) {
12365 if (meta
->func_id
!= special_kfunc_list
[KF_bpf_obj_drop_impl
]) {
12366 verbose(env
, "arg#%d expected for bpf_obj_drop_impl()\n", i
);
12369 } else if (reg
->type
== (PTR_TO_BTF_ID
| MEM_ALLOC
| MEM_PERCPU
)) {
12370 if (meta
->func_id
!= special_kfunc_list
[KF_bpf_percpu_obj_drop_impl
]) {
12371 verbose(env
, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i
);
12375 verbose(env
, "arg#%d expected pointer to allocated object\n", i
);
12378 if (!reg
->ref_obj_id
) {
12379 verbose(env
, "allocated object must be referenced\n");
12382 if (meta
->btf
== btf_vmlinux
) {
12383 meta
->arg_btf
= reg
->btf
;
12384 meta
->arg_btf_id
= reg
->btf_id
;
12387 case KF_ARG_PTR_TO_DYNPTR
:
12389 enum bpf_arg_type dynptr_arg_type
= ARG_PTR_TO_DYNPTR
;
12390 int clone_ref_obj_id
= 0;
12392 if (reg
->type
== CONST_PTR_TO_DYNPTR
)
12393 dynptr_arg_type
|= MEM_RDONLY
;
12395 if (is_kfunc_arg_uninit(btf
, &args
[i
]))
12396 dynptr_arg_type
|= MEM_UNINIT
;
12398 if (meta
->func_id
== special_kfunc_list
[KF_bpf_dynptr_from_skb
]) {
12399 dynptr_arg_type
|= DYNPTR_TYPE_SKB
;
12400 } else if (meta
->func_id
== special_kfunc_list
[KF_bpf_dynptr_from_xdp
]) {
12401 dynptr_arg_type
|= DYNPTR_TYPE_XDP
;
12402 } else if (meta
->func_id
== special_kfunc_list
[KF_bpf_dynptr_clone
] &&
12403 (dynptr_arg_type
& MEM_UNINIT
)) {
12404 enum bpf_dynptr_type parent_type
= meta
->initialized_dynptr
.type
;
12406 if (parent_type
== BPF_DYNPTR_TYPE_INVALID
) {
12407 verbose(env
, "verifier internal error: no dynptr type for parent of clone\n");
12411 dynptr_arg_type
|= (unsigned int)get_dynptr_type_flag(parent_type
);
12412 clone_ref_obj_id
= meta
->initialized_dynptr
.ref_obj_id
;
12413 if (dynptr_type_refcounted(parent_type
) && !clone_ref_obj_id
) {
12414 verbose(env
, "verifier internal error: missing ref obj id for parent of clone\n");
12419 ret
= process_dynptr_func(env
, regno
, insn_idx
, dynptr_arg_type
, clone_ref_obj_id
);
12423 if (!(dynptr_arg_type
& MEM_UNINIT
)) {
12424 int id
= dynptr_id(env
, reg
);
12427 verbose(env
, "verifier internal error: failed to obtain dynptr id\n");
12430 meta
->initialized_dynptr
.id
= id
;
12431 meta
->initialized_dynptr
.type
= dynptr_get_type(env
, reg
);
12432 meta
->initialized_dynptr
.ref_obj_id
= dynptr_ref_obj_id(env
, reg
);
12437 case KF_ARG_PTR_TO_ITER
:
12438 if (meta
->func_id
== special_kfunc_list
[KF_bpf_iter_css_task_new
]) {
12439 if (!check_css_task_iter_allowlist(env
)) {
12440 verbose(env
, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n");
12444 ret
= process_iter_arg(env
, regno
, insn_idx
, meta
);
12448 case KF_ARG_PTR_TO_LIST_HEAD
:
12449 if (reg
->type
!= PTR_TO_MAP_VALUE
&&
12450 reg
->type
!= (PTR_TO_BTF_ID
| MEM_ALLOC
)) {
12451 verbose(env
, "arg#%d expected pointer to map value or allocated object\n", i
);
12454 if (reg
->type
== (PTR_TO_BTF_ID
| MEM_ALLOC
) && !reg
->ref_obj_id
) {
12455 verbose(env
, "allocated object must be referenced\n");
12458 ret
= process_kf_arg_ptr_to_list_head(env
, reg
, regno
, meta
);
12462 case KF_ARG_PTR_TO_RB_ROOT
:
12463 if (reg
->type
!= PTR_TO_MAP_VALUE
&&
12464 reg
->type
!= (PTR_TO_BTF_ID
| MEM_ALLOC
)) {
12465 verbose(env
, "arg#%d expected pointer to map value or allocated object\n", i
);
12468 if (reg
->type
== (PTR_TO_BTF_ID
| MEM_ALLOC
) && !reg
->ref_obj_id
) {
12469 verbose(env
, "allocated object must be referenced\n");
12472 ret
= process_kf_arg_ptr_to_rbtree_root(env
, reg
, regno
, meta
);
12476 case KF_ARG_PTR_TO_LIST_NODE
:
12477 if (reg
->type
!= (PTR_TO_BTF_ID
| MEM_ALLOC
)) {
12478 verbose(env
, "arg#%d expected pointer to allocated object\n", i
);
12481 if (!reg
->ref_obj_id
) {
12482 verbose(env
, "allocated object must be referenced\n");
12485 ret
= process_kf_arg_ptr_to_list_node(env
, reg
, regno
, meta
);
12489 case KF_ARG_PTR_TO_RB_NODE
:
12490 if (meta
->func_id
== special_kfunc_list
[KF_bpf_rbtree_remove
]) {
12491 if (!type_is_non_owning_ref(reg
->type
) || reg
->ref_obj_id
) {
12492 verbose(env
, "rbtree_remove node input must be non-owning ref\n");
12495 if (in_rbtree_lock_required_cb(env
)) {
12496 verbose(env
, "rbtree_remove not allowed in rbtree cb\n");
12500 if (reg
->type
!= (PTR_TO_BTF_ID
| MEM_ALLOC
)) {
12501 verbose(env
, "arg#%d expected pointer to allocated object\n", i
);
12504 if (!reg
->ref_obj_id
) {
12505 verbose(env
, "allocated object must be referenced\n");
12510 ret
= process_kf_arg_ptr_to_rbtree_node(env
, reg
, regno
, meta
);
12514 case KF_ARG_PTR_TO_MAP
:
12515 /* If argument has '__map' suffix expect 'struct bpf_map *' */
12516 ref_id
= *reg2btf_ids
[CONST_PTR_TO_MAP
];
12517 ref_t
= btf_type_by_id(btf_vmlinux
, ref_id
);
12518 ref_tname
= btf_name_by_offset(btf
, ref_t
->name_off
);
12520 case KF_ARG_PTR_TO_BTF_ID
:
12521 mask
= mask_raw_tp_reg(env
, reg
);
12522 /* Only base_type is checked, further checks are done here */
12523 if ((base_type(reg
->type
) != PTR_TO_BTF_ID
||
12524 (bpf_type_has_unsafe_modifiers(reg
->type
) && !is_rcu_reg(reg
))) &&
12525 !reg2btf_ids
[base_type(reg
->type
)]) {
12526 verbose(env
, "arg#%d is %s ", i
, reg_type_str(env
, reg
->type
));
12527 verbose(env
, "expected %s or socket\n",
12528 reg_type_str(env
, base_type(reg
->type
) |
12529 (type_flag(reg
->type
) & BPF_REG_TRUSTED_MODIFIERS
)));
12530 unmask_raw_tp_reg(reg
, mask
);
12533 ret
= process_kf_arg_ptr_to_btf_id(env
, reg
, ref_t
, ref_tname
, ref_id
, meta
, i
);
12534 unmask_raw_tp_reg(reg
, mask
);
12538 case KF_ARG_PTR_TO_MEM
:
12539 resolve_ret
= btf_resolve_size(btf
, ref_t
, &type_size
);
12540 if (IS_ERR(resolve_ret
)) {
12541 verbose(env
, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
12542 i
, btf_type_str(ref_t
), ref_tname
, PTR_ERR(resolve_ret
));
12545 ret
= check_mem_reg(env
, reg
, regno
, type_size
);
12549 case KF_ARG_PTR_TO_MEM_SIZE
:
12551 struct bpf_reg_state
*buff_reg
= ®s
[regno
];
12552 const struct btf_param
*buff_arg
= &args
[i
];
12553 struct bpf_reg_state
*size_reg
= ®s
[regno
+ 1];
12554 const struct btf_param
*size_arg
= &args
[i
+ 1];
12556 if (!register_is_null(buff_reg
) || !is_kfunc_arg_optional(meta
->btf
, buff_arg
)) {
12557 ret
= check_kfunc_mem_size_reg(env
, size_reg
, regno
+ 1);
12559 verbose(env
, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i
, i
+ 1);
12564 if (is_kfunc_arg_const_mem_size(meta
->btf
, size_arg
, size_reg
)) {
12565 if (meta
->arg_constant
.found
) {
12566 verbose(env
, "verifier internal error: only one constant argument permitted\n");
12569 if (!tnum_is_const(size_reg
->var_off
)) {
12570 verbose(env
, "R%d must be a known constant\n", regno
+ 1);
12573 meta
->arg_constant
.found
= true;
12574 meta
->arg_constant
.value
= size_reg
->var_off
.value
;
12577 /* Skip next '__sz' or '__szk' argument */
12581 case KF_ARG_PTR_TO_CALLBACK
:
12582 if (reg
->type
!= PTR_TO_FUNC
) {
12583 verbose(env
, "arg%d expected pointer to func\n", i
);
12586 meta
->subprogno
= reg
->subprogno
;
12588 case KF_ARG_PTR_TO_REFCOUNTED_KPTR
:
12589 if (!type_is_ptr_alloc_obj(reg
->type
)) {
12590 verbose(env
, "arg#%d is neither owning or non-owning ref\n", i
);
12593 if (!type_is_non_owning_ref(reg
->type
))
12594 meta
->arg_owning_ref
= true;
12596 rec
= reg_btf_record(reg
);
12598 verbose(env
, "verifier internal error: Couldn't find btf_record\n");
12602 if (rec
->refcount_off
< 0) {
12603 verbose(env
, "arg#%d doesn't point to a type with bpf_refcount field\n", i
);
12607 meta
->arg_btf
= reg
->btf
;
12608 meta
->arg_btf_id
= reg
->btf_id
;
12610 case KF_ARG_PTR_TO_CONST_STR
:
12611 if (reg
->type
!= PTR_TO_MAP_VALUE
) {
12612 verbose(env
, "arg#%d doesn't point to a const string\n", i
);
12615 ret
= check_reg_const_str(env
, reg
, regno
);
12619 case KF_ARG_PTR_TO_WORKQUEUE
:
12620 if (reg
->type
!= PTR_TO_MAP_VALUE
) {
12621 verbose(env
, "arg#%d doesn't point to a map value\n", i
);
12624 ret
= process_wq_func(env
, regno
, meta
);
12631 if (is_kfunc_release(meta
) && !meta
->release_regno
) {
12632 verbose(env
, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
12640 static int fetch_kfunc_meta(struct bpf_verifier_env
*env
,
12641 struct bpf_insn
*insn
,
12642 struct bpf_kfunc_call_arg_meta
*meta
,
12643 const char **kfunc_name
)
12645 const struct btf_type
*func
, *func_proto
;
12646 u32 func_id
, *kfunc_flags
;
12647 const char *func_name
;
12648 struct btf
*desc_btf
;
12651 *kfunc_name
= NULL
;
12656 desc_btf
= find_kfunc_desc_btf(env
, insn
->off
);
12657 if (IS_ERR(desc_btf
))
12658 return PTR_ERR(desc_btf
);
12660 func_id
= insn
->imm
;
12661 func
= btf_type_by_id(desc_btf
, func_id
);
12662 func_name
= btf_name_by_offset(desc_btf
, func
->name_off
);
12664 *kfunc_name
= func_name
;
12665 func_proto
= btf_type_by_id(desc_btf
, func
->type
);
12667 kfunc_flags
= btf_kfunc_id_set_contains(desc_btf
, func_id
, env
->prog
);
12668 if (!kfunc_flags
) {
12672 memset(meta
, 0, sizeof(*meta
));
12673 meta
->btf
= desc_btf
;
12674 meta
->func_id
= func_id
;
12675 meta
->kfunc_flags
= *kfunc_flags
;
12676 meta
->func_proto
= func_proto
;
12677 meta
->func_name
= func_name
;
12682 static int check_return_code(struct bpf_verifier_env
*env
, int regno
, const char *reg_name
);
12684 static int check_kfunc_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
12687 bool sleepable
, rcu_lock
, rcu_unlock
, preempt_disable
, preempt_enable
;
12688 u32 i
, nargs
, ptr_type_id
, release_ref_obj_id
;
12689 struct bpf_reg_state
*regs
= cur_regs(env
);
12690 const char *func_name
, *ptr_type_name
;
12691 const struct btf_type
*t
, *ptr_type
;
12692 struct bpf_kfunc_call_arg_meta meta
;
12693 struct bpf_insn_aux_data
*insn_aux
;
12694 int err
, insn_idx
= *insn_idx_p
;
12695 const struct btf_param
*args
;
12696 const struct btf_type
*ret_t
;
12697 struct btf
*desc_btf
;
12699 /* skip for now, but return error when we find this in fixup_kfunc_call */
12703 err
= fetch_kfunc_meta(env
, insn
, &meta
, &func_name
);
12704 if (err
== -EACCES
&& func_name
)
12705 verbose(env
, "calling kernel function %s is not allowed\n", func_name
);
12708 desc_btf
= meta
.btf
;
12709 insn_aux
= &env
->insn_aux_data
[insn_idx
];
12711 insn_aux
->is_iter_next
= is_iter_next_kfunc(&meta
);
12713 if (is_kfunc_destructive(&meta
) && !capable(CAP_SYS_BOOT
)) {
12714 verbose(env
, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
12718 sleepable
= is_kfunc_sleepable(&meta
);
12719 if (sleepable
&& !in_sleepable(env
)) {
12720 verbose(env
, "program must be sleepable to call sleepable kfunc %s\n", func_name
);
12724 /* Check the arguments */
12725 err
= check_kfunc_args(env
, &meta
, insn_idx
);
12729 if (meta
.func_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
]) {
12730 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
12731 set_rbtree_add_callback_state
);
12733 verbose(env
, "kfunc %s#%d failed callback verification\n",
12734 func_name
, meta
.func_id
);
12739 if (meta
.func_id
== special_kfunc_list
[KF_bpf_session_cookie
]) {
12740 meta
.r0_size
= sizeof(u64
);
12741 meta
.r0_rdonly
= false;
12744 if (is_bpf_wq_set_callback_impl_kfunc(meta
.func_id
)) {
12745 err
= push_callback_call(env
, insn
, insn_idx
, meta
.subprogno
,
12746 set_timer_callback_state
);
12748 verbose(env
, "kfunc %s#%d failed callback verification\n",
12749 func_name
, meta
.func_id
);
12754 rcu_lock
= is_kfunc_bpf_rcu_read_lock(&meta
);
12755 rcu_unlock
= is_kfunc_bpf_rcu_read_unlock(&meta
);
12757 preempt_disable
= is_kfunc_bpf_preempt_disable(&meta
);
12758 preempt_enable
= is_kfunc_bpf_preempt_enable(&meta
);
12760 if (env
->cur_state
->active_rcu_lock
) {
12761 struct bpf_func_state
*state
;
12762 struct bpf_reg_state
*reg
;
12763 u32 clear_mask
= (1 << STACK_SPILL
) | (1 << STACK_ITER
);
12765 if (in_rbtree_lock_required_cb(env
) && (rcu_lock
|| rcu_unlock
)) {
12766 verbose(env
, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n");
12771 verbose(env
, "nested rcu read lock (kernel function %s)\n", func_name
);
12773 } else if (rcu_unlock
) {
12774 bpf_for_each_reg_in_vstate_mask(env
->cur_state
, state
, reg
, clear_mask
, ({
12775 if (reg
->type
& MEM_RCU
) {
12776 reg
->type
&= ~(MEM_RCU
| PTR_MAYBE_NULL
);
12777 reg
->type
|= PTR_UNTRUSTED
;
12780 env
->cur_state
->active_rcu_lock
= false;
12781 } else if (sleepable
) {
12782 verbose(env
, "kernel func %s is sleepable within rcu_read_lock region\n", func_name
);
12785 } else if (rcu_lock
) {
12786 env
->cur_state
->active_rcu_lock
= true;
12787 } else if (rcu_unlock
) {
12788 verbose(env
, "unmatched rcu read unlock (kernel function %s)\n", func_name
);
12792 if (env
->cur_state
->active_preempt_lock
) {
12793 if (preempt_disable
) {
12794 env
->cur_state
->active_preempt_lock
++;
12795 } else if (preempt_enable
) {
12796 env
->cur_state
->active_preempt_lock
--;
12797 } else if (sleepable
) {
12798 verbose(env
, "kernel func %s is sleepable within non-preemptible region\n", func_name
);
12801 } else if (preempt_disable
) {
12802 env
->cur_state
->active_preempt_lock
++;
12803 } else if (preempt_enable
) {
12804 verbose(env
, "unmatched attempt to enable preemption (kernel function %s)\n", func_name
);
12808 /* In case of release function, we get register number of refcounted
12809 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
12811 if (meta
.release_regno
) {
12812 err
= release_reference(env
, regs
[meta
.release_regno
].ref_obj_id
);
12814 verbose(env
, "kfunc %s#%d reference has not been acquired before\n",
12815 func_name
, meta
.func_id
);
12820 if (meta
.func_id
== special_kfunc_list
[KF_bpf_list_push_front_impl
] ||
12821 meta
.func_id
== special_kfunc_list
[KF_bpf_list_push_back_impl
] ||
12822 meta
.func_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
]) {
12823 release_ref_obj_id
= regs
[BPF_REG_2
].ref_obj_id
;
12824 insn_aux
->insert_off
= regs
[BPF_REG_2
].off
;
12825 insn_aux
->kptr_struct_meta
= btf_find_struct_meta(meta
.arg_btf
, meta
.arg_btf_id
);
12826 err
= ref_convert_owning_non_owning(env
, release_ref_obj_id
);
12828 verbose(env
, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
12829 func_name
, meta
.func_id
);
12833 err
= release_reference(env
, release_ref_obj_id
);
12835 verbose(env
, "kfunc %s#%d reference has not been acquired before\n",
12836 func_name
, meta
.func_id
);
12841 if (meta
.func_id
== special_kfunc_list
[KF_bpf_throw
]) {
12842 if (!bpf_jit_supports_exceptions()) {
12843 verbose(env
, "JIT does not support calling kfunc %s#%d\n",
12844 func_name
, meta
.func_id
);
12847 env
->seen_exception
= true;
12849 /* In the case of the default callback, the cookie value passed
12850 * to bpf_throw becomes the return value of the program.
12852 if (!env
->exception_callback_subprog
) {
12853 err
= check_return_code(env
, BPF_REG_1
, "R1");
12859 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++)
12860 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
12862 /* Check return type */
12863 t
= btf_type_skip_modifiers(desc_btf
, meta
.func_proto
->type
, NULL
);
12865 if (is_kfunc_acquire(&meta
) && !btf_type_is_struct_ptr(meta
.btf
, t
)) {
12866 /* Only exception is bpf_obj_new_impl */
12867 if (meta
.btf
!= btf_vmlinux
||
12868 (meta
.func_id
!= special_kfunc_list
[KF_bpf_obj_new_impl
] &&
12869 meta
.func_id
!= special_kfunc_list
[KF_bpf_percpu_obj_new_impl
] &&
12870 meta
.func_id
!= special_kfunc_list
[KF_bpf_refcount_acquire_impl
])) {
12871 verbose(env
, "acquire kernel function does not return PTR_TO_BTF_ID\n");
12876 if (btf_type_is_scalar(t
)) {
12877 mark_reg_unknown(env
, regs
, BPF_REG_0
);
12878 mark_btf_func_reg_size(env
, BPF_REG_0
, t
->size
);
12879 } else if (btf_type_is_ptr(t
)) {
12880 ptr_type
= btf_type_skip_modifiers(desc_btf
, t
->type
, &ptr_type_id
);
12882 if (meta
.btf
== btf_vmlinux
&& btf_id_set_contains(&special_kfunc_set
, meta
.func_id
)) {
12883 if (meta
.func_id
== special_kfunc_list
[KF_bpf_obj_new_impl
] ||
12884 meta
.func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
]) {
12885 struct btf_struct_meta
*struct_meta
;
12886 struct btf
*ret_btf
;
12889 if (meta
.func_id
== special_kfunc_list
[KF_bpf_obj_new_impl
] && !bpf_global_ma_set
)
12892 if (((u64
)(u32
)meta
.arg_constant
.value
) != meta
.arg_constant
.value
) {
12893 verbose(env
, "local type ID argument must be in range [0, U32_MAX]\n");
12897 ret_btf
= env
->prog
->aux
->btf
;
12898 ret_btf_id
= meta
.arg_constant
.value
;
12900 /* This may be NULL due to user not supplying a BTF */
12902 verbose(env
, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n");
12906 ret_t
= btf_type_by_id(ret_btf
, ret_btf_id
);
12907 if (!ret_t
|| !__btf_type_is_struct(ret_t
)) {
12908 verbose(env
, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n");
12912 if (meta
.func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
]) {
12913 if (ret_t
->size
> BPF_GLOBAL_PERCPU_MA_MAX_SIZE
) {
12914 verbose(env
, "bpf_percpu_obj_new type size (%d) is greater than %d\n",
12915 ret_t
->size
, BPF_GLOBAL_PERCPU_MA_MAX_SIZE
);
12919 if (!bpf_global_percpu_ma_set
) {
12920 mutex_lock(&bpf_percpu_ma_lock
);
12921 if (!bpf_global_percpu_ma_set
) {
12922 /* Charge memory allocated with bpf_global_percpu_ma to
12923 * root memcg. The obj_cgroup for root memcg is NULL.
12925 err
= bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma
, NULL
);
12927 bpf_global_percpu_ma_set
= true;
12929 mutex_unlock(&bpf_percpu_ma_lock
);
12934 mutex_lock(&bpf_percpu_ma_lock
);
12935 err
= bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma
, ret_t
->size
);
12936 mutex_unlock(&bpf_percpu_ma_lock
);
12941 struct_meta
= btf_find_struct_meta(ret_btf
, ret_btf_id
);
12942 if (meta
.func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
]) {
12943 if (!__btf_type_is_scalar_struct(env
, ret_btf
, ret_t
, 0)) {
12944 verbose(env
, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n");
12949 verbose(env
, "bpf_percpu_obj_new type ID argument must not contain special fields\n");
12954 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
12955 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| MEM_ALLOC
;
12956 regs
[BPF_REG_0
].btf
= ret_btf
;
12957 regs
[BPF_REG_0
].btf_id
= ret_btf_id
;
12958 if (meta
.func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
])
12959 regs
[BPF_REG_0
].type
|= MEM_PERCPU
;
12961 insn_aux
->obj_new_size
= ret_t
->size
;
12962 insn_aux
->kptr_struct_meta
= struct_meta
;
12963 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_refcount_acquire_impl
]) {
12964 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
12965 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| MEM_ALLOC
;
12966 regs
[BPF_REG_0
].btf
= meta
.arg_btf
;
12967 regs
[BPF_REG_0
].btf_id
= meta
.arg_btf_id
;
12969 insn_aux
->kptr_struct_meta
=
12970 btf_find_struct_meta(meta
.arg_btf
,
12972 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_list_pop_front
] ||
12973 meta
.func_id
== special_kfunc_list
[KF_bpf_list_pop_back
]) {
12974 struct btf_field
*field
= meta
.arg_list_head
.field
;
12976 mark_reg_graph_node(regs
, BPF_REG_0
, &field
->graph_root
);
12977 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_rbtree_remove
] ||
12978 meta
.func_id
== special_kfunc_list
[KF_bpf_rbtree_first
]) {
12979 struct btf_field
*field
= meta
.arg_rbtree_root
.field
;
12981 mark_reg_graph_node(regs
, BPF_REG_0
, &field
->graph_root
);
12982 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_cast_to_kern_ctx
]) {
12983 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
12984 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| PTR_TRUSTED
;
12985 regs
[BPF_REG_0
].btf
= desc_btf
;
12986 regs
[BPF_REG_0
].btf_id
= meta
.ret_btf_id
;
12987 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_rdonly_cast
]) {
12988 ret_t
= btf_type_by_id(desc_btf
, meta
.arg_constant
.value
);
12989 if (!ret_t
|| !btf_type_is_struct(ret_t
)) {
12991 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
12995 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
12996 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
| PTR_UNTRUSTED
;
12997 regs
[BPF_REG_0
].btf
= desc_btf
;
12998 regs
[BPF_REG_0
].btf_id
= meta
.arg_constant
.value
;
12999 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_dynptr_slice
] ||
13000 meta
.func_id
== special_kfunc_list
[KF_bpf_dynptr_slice_rdwr
]) {
13001 enum bpf_type_flag type_flag
= get_dynptr_type_flag(meta
.initialized_dynptr
.type
);
13003 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
13005 if (!meta
.arg_constant
.found
) {
13006 verbose(env
, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n");
13010 regs
[BPF_REG_0
].mem_size
= meta
.arg_constant
.value
;
13012 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */
13013 regs
[BPF_REG_0
].type
= PTR_TO_MEM
| type_flag
;
13015 if (meta
.func_id
== special_kfunc_list
[KF_bpf_dynptr_slice
]) {
13016 regs
[BPF_REG_0
].type
|= MEM_RDONLY
;
13018 /* this will set env->seen_direct_write to true */
13019 if (!may_access_direct_pkt_data(env
, NULL
, BPF_WRITE
)) {
13020 verbose(env
, "the prog does not allow writes to packet data\n");
13025 if (!meta
.initialized_dynptr
.id
) {
13026 verbose(env
, "verifier internal error: no dynptr id\n");
13029 regs
[BPF_REG_0
].dynptr_id
= meta
.initialized_dynptr
.id
;
13031 /* we don't need to set BPF_REG_0's ref obj id
13032 * because packet slices are not refcounted (see
13033 * dynptr_type_refcounted)
13036 verbose(env
, "kernel function %s unhandled dynamic return type\n",
13040 } else if (btf_type_is_void(ptr_type
)) {
13041 /* kfunc returning 'void *' is equivalent to returning scalar */
13042 mark_reg_unknown(env
, regs
, BPF_REG_0
);
13043 } else if (!__btf_type_is_struct(ptr_type
)) {
13044 if (!meta
.r0_size
) {
13047 if (!IS_ERR(btf_resolve_size(desc_btf
, ptr_type
, &sz
))) {
13049 meta
.r0_rdonly
= true;
13052 if (!meta
.r0_size
) {
13053 ptr_type_name
= btf_name_by_offset(desc_btf
,
13054 ptr_type
->name_off
);
13056 "kernel function %s returns pointer type %s %s is not supported\n",
13058 btf_type_str(ptr_type
),
13063 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
13064 regs
[BPF_REG_0
].type
= PTR_TO_MEM
;
13065 regs
[BPF_REG_0
].mem_size
= meta
.r0_size
;
13067 if (meta
.r0_rdonly
)
13068 regs
[BPF_REG_0
].type
|= MEM_RDONLY
;
13070 /* Ensures we don't access the memory after a release_reference() */
13071 if (meta
.ref_obj_id
)
13072 regs
[BPF_REG_0
].ref_obj_id
= meta
.ref_obj_id
;
13074 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
13075 regs
[BPF_REG_0
].btf
= desc_btf
;
13076 regs
[BPF_REG_0
].type
= PTR_TO_BTF_ID
;
13077 regs
[BPF_REG_0
].btf_id
= ptr_type_id
;
13079 if (meta
.func_id
== special_kfunc_list
[KF_bpf_get_kmem_cache
])
13080 regs
[BPF_REG_0
].type
|= PTR_UNTRUSTED
;
13082 if (is_iter_next_kfunc(&meta
)) {
13083 struct bpf_reg_state
*cur_iter
;
13085 cur_iter
= get_iter_from_state(env
->cur_state
, &meta
);
13087 if (cur_iter
->type
& MEM_RCU
) /* KF_RCU_PROTECTED */
13088 regs
[BPF_REG_0
].type
|= MEM_RCU
;
13090 regs
[BPF_REG_0
].type
|= PTR_TRUSTED
;
13094 if (is_kfunc_ret_null(&meta
)) {
13095 regs
[BPF_REG_0
].type
|= PTR_MAYBE_NULL
;
13096 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
13097 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
13099 mark_btf_func_reg_size(env
, BPF_REG_0
, sizeof(void *));
13100 if (is_kfunc_acquire(&meta
)) {
13101 int id
= acquire_reference_state(env
, insn_idx
);
13105 if (is_kfunc_ret_null(&meta
))
13106 regs
[BPF_REG_0
].id
= id
;
13107 regs
[BPF_REG_0
].ref_obj_id
= id
;
13108 } else if (meta
.func_id
== special_kfunc_list
[KF_bpf_rbtree_first
]) {
13109 ref_set_non_owning(env
, ®s
[BPF_REG_0
]);
13112 if (reg_may_point_to_spin_lock(®s
[BPF_REG_0
]) && !regs
[BPF_REG_0
].id
)
13113 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
13114 } else if (btf_type_is_void(t
)) {
13115 if (meta
.btf
== btf_vmlinux
&& btf_id_set_contains(&special_kfunc_set
, meta
.func_id
)) {
13116 if (meta
.func_id
== special_kfunc_list
[KF_bpf_obj_drop_impl
] ||
13117 meta
.func_id
== special_kfunc_list
[KF_bpf_percpu_obj_drop_impl
]) {
13118 insn_aux
->kptr_struct_meta
=
13119 btf_find_struct_meta(meta
.arg_btf
,
13125 nargs
= btf_type_vlen(meta
.func_proto
);
13126 args
= (const struct btf_param
*)(meta
.func_proto
+ 1);
13127 for (i
= 0; i
< nargs
; i
++) {
13130 t
= btf_type_skip_modifiers(desc_btf
, args
[i
].type
, NULL
);
13131 if (btf_type_is_ptr(t
))
13132 mark_btf_func_reg_size(env
, regno
, sizeof(void *));
13134 /* scalar. ensured by btf_check_kfunc_arg_match() */
13135 mark_btf_func_reg_size(env
, regno
, t
->size
);
13138 if (is_iter_next_kfunc(&meta
)) {
13139 err
= process_iter_next_call(env
, insn_idx
, &meta
);
13147 static bool check_reg_sane_offset(struct bpf_verifier_env
*env
,
13148 const struct bpf_reg_state
*reg
,
13149 enum bpf_reg_type type
)
13151 bool known
= tnum_is_const(reg
->var_off
);
13152 s64 val
= reg
->var_off
.value
;
13153 s64 smin
= reg
->smin_value
;
13155 if (known
&& (val
>= BPF_MAX_VAR_OFF
|| val
<= -BPF_MAX_VAR_OFF
)) {
13156 verbose(env
, "math between %s pointer and %lld is not allowed\n",
13157 reg_type_str(env
, type
), val
);
13161 if (reg
->off
>= BPF_MAX_VAR_OFF
|| reg
->off
<= -BPF_MAX_VAR_OFF
) {
13162 verbose(env
, "%s pointer offset %d is not allowed\n",
13163 reg_type_str(env
, type
), reg
->off
);
13167 if (smin
== S64_MIN
) {
13168 verbose(env
, "math between %s pointer and register with unbounded min value is not allowed\n",
13169 reg_type_str(env
, type
));
13173 if (smin
>= BPF_MAX_VAR_OFF
|| smin
<= -BPF_MAX_VAR_OFF
) {
13174 verbose(env
, "value %lld makes %s pointer be out of bounds\n",
13175 smin
, reg_type_str(env
, type
));
13183 REASON_BOUNDS
= -1,
13190 static int retrieve_ptr_limit(const struct bpf_reg_state
*ptr_reg
,
13191 u32
*alu_limit
, bool mask_to_left
)
13193 u32 max
= 0, ptr_limit
= 0;
13195 switch (ptr_reg
->type
) {
13197 /* Offset 0 is out-of-bounds, but acceptable start for the
13198 * left direction, see BPF_REG_FP. Also, unknown scalar
13199 * offset where we would need to deal with min/max bounds is
13200 * currently prohibited for unprivileged.
13202 max
= MAX_BPF_STACK
+ mask_to_left
;
13203 ptr_limit
= -(ptr_reg
->var_off
.value
+ ptr_reg
->off
);
13205 case PTR_TO_MAP_VALUE
:
13206 max
= ptr_reg
->map_ptr
->value_size
;
13207 ptr_limit
= (mask_to_left
?
13208 ptr_reg
->smin_value
:
13209 ptr_reg
->umax_value
) + ptr_reg
->off
;
13212 return REASON_TYPE
;
13215 if (ptr_limit
>= max
)
13216 return REASON_LIMIT
;
13217 *alu_limit
= ptr_limit
;
13221 static bool can_skip_alu_sanitation(const struct bpf_verifier_env
*env
,
13222 const struct bpf_insn
*insn
)
13224 return env
->bypass_spec_v1
|| BPF_SRC(insn
->code
) == BPF_K
;
13227 static int update_alu_sanitation_state(struct bpf_insn_aux_data
*aux
,
13228 u32 alu_state
, u32 alu_limit
)
13230 /* If we arrived here from different branches with different
13231 * state or limits to sanitize, then this won't work.
13233 if (aux
->alu_state
&&
13234 (aux
->alu_state
!= alu_state
||
13235 aux
->alu_limit
!= alu_limit
))
13236 return REASON_PATHS
;
13238 /* Corresponding fixup done in do_misc_fixups(). */
13239 aux
->alu_state
= alu_state
;
13240 aux
->alu_limit
= alu_limit
;
13244 static int sanitize_val_alu(struct bpf_verifier_env
*env
,
13245 struct bpf_insn
*insn
)
13247 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
13249 if (can_skip_alu_sanitation(env
, insn
))
13252 return update_alu_sanitation_state(aux
, BPF_ALU_NON_POINTER
, 0);
13255 static bool sanitize_needed(u8 opcode
)
13257 return opcode
== BPF_ADD
|| opcode
== BPF_SUB
;
13260 struct bpf_sanitize_info
{
13261 struct bpf_insn_aux_data aux
;
13265 static struct bpf_verifier_state
*
13266 sanitize_speculative_path(struct bpf_verifier_env
*env
,
13267 const struct bpf_insn
*insn
,
13268 u32 next_idx
, u32 curr_idx
)
13270 struct bpf_verifier_state
*branch
;
13271 struct bpf_reg_state
*regs
;
13273 branch
= push_stack(env
, next_idx
, curr_idx
, true);
13274 if (branch
&& insn
) {
13275 regs
= branch
->frame
[branch
->curframe
]->regs
;
13276 if (BPF_SRC(insn
->code
) == BPF_K
) {
13277 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
13278 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
13279 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
13280 mark_reg_unknown(env
, regs
, insn
->src_reg
);
13286 static int sanitize_ptr_alu(struct bpf_verifier_env
*env
,
13287 struct bpf_insn
*insn
,
13288 const struct bpf_reg_state
*ptr_reg
,
13289 const struct bpf_reg_state
*off_reg
,
13290 struct bpf_reg_state
*dst_reg
,
13291 struct bpf_sanitize_info
*info
,
13292 const bool commit_window
)
13294 struct bpf_insn_aux_data
*aux
= commit_window
? cur_aux(env
) : &info
->aux
;
13295 struct bpf_verifier_state
*vstate
= env
->cur_state
;
13296 bool off_is_imm
= tnum_is_const(off_reg
->var_off
);
13297 bool off_is_neg
= off_reg
->smin_value
< 0;
13298 bool ptr_is_dst_reg
= ptr_reg
== dst_reg
;
13299 u8 opcode
= BPF_OP(insn
->code
);
13300 u32 alu_state
, alu_limit
;
13301 struct bpf_reg_state tmp
;
13305 if (can_skip_alu_sanitation(env
, insn
))
13308 /* We already marked aux for masking from non-speculative
13309 * paths, thus we got here in the first place. We only care
13310 * to explore bad access from here.
13312 if (vstate
->speculative
)
13315 if (!commit_window
) {
13316 if (!tnum_is_const(off_reg
->var_off
) &&
13317 (off_reg
->smin_value
< 0) != (off_reg
->smax_value
< 0))
13318 return REASON_BOUNDS
;
13320 info
->mask_to_left
= (opcode
== BPF_ADD
&& off_is_neg
) ||
13321 (opcode
== BPF_SUB
&& !off_is_neg
);
13324 err
= retrieve_ptr_limit(ptr_reg
, &alu_limit
, info
->mask_to_left
);
13328 if (commit_window
) {
13329 /* In commit phase we narrow the masking window based on
13330 * the observed pointer move after the simulated operation.
13332 alu_state
= info
->aux
.alu_state
;
13333 alu_limit
= abs(info
->aux
.alu_limit
- alu_limit
);
13335 alu_state
= off_is_neg
? BPF_ALU_NEG_VALUE
: 0;
13336 alu_state
|= off_is_imm
? BPF_ALU_IMMEDIATE
: 0;
13337 alu_state
|= ptr_is_dst_reg
?
13338 BPF_ALU_SANITIZE_SRC
: BPF_ALU_SANITIZE_DST
;
13340 /* Limit pruning on unknown scalars to enable deep search for
13341 * potential masking differences from other program paths.
13344 env
->explore_alu_limits
= true;
13347 err
= update_alu_sanitation_state(aux
, alu_state
, alu_limit
);
13351 /* If we're in commit phase, we're done here given we already
13352 * pushed the truncated dst_reg into the speculative verification
13355 * Also, when register is a known constant, we rewrite register-based
13356 * operation to immediate-based, and thus do not need masking (and as
13357 * a consequence, do not need to simulate the zero-truncation either).
13359 if (commit_window
|| off_is_imm
)
13362 /* Simulate and find potential out-of-bounds access under
13363 * speculative execution from truncation as a result of
13364 * masking when off was not within expected range. If off
13365 * sits in dst, then we temporarily need to move ptr there
13366 * to simulate dst (== 0) +/-= ptr. Needed, for example,
13367 * for cases where we use K-based arithmetic in one direction
13368 * and truncated reg-based in the other in order to explore
13371 if (!ptr_is_dst_reg
) {
13373 copy_register_state(dst_reg
, ptr_reg
);
13375 ret
= sanitize_speculative_path(env
, NULL
, env
->insn_idx
+ 1,
13377 if (!ptr_is_dst_reg
&& ret
)
13379 return !ret
? REASON_STACK
: 0;
13382 static void sanitize_mark_insn_seen(struct bpf_verifier_env
*env
)
13384 struct bpf_verifier_state
*vstate
= env
->cur_state
;
13386 /* If we simulate paths under speculation, we don't update the
13387 * insn as 'seen' such that when we verify unreachable paths in
13388 * the non-speculative domain, sanitize_dead_code() can still
13389 * rewrite/sanitize them.
13391 if (!vstate
->speculative
)
13392 env
->insn_aux_data
[env
->insn_idx
].seen
= env
->pass_cnt
;
13395 static int sanitize_err(struct bpf_verifier_env
*env
,
13396 const struct bpf_insn
*insn
, int reason
,
13397 const struct bpf_reg_state
*off_reg
,
13398 const struct bpf_reg_state
*dst_reg
)
13400 static const char *err
= "pointer arithmetic with it prohibited for !root";
13401 const char *op
= BPF_OP(insn
->code
) == BPF_ADD
? "add" : "sub";
13402 u32 dst
= insn
->dst_reg
, src
= insn
->src_reg
;
13405 case REASON_BOUNDS
:
13406 verbose(env
, "R%d has unknown scalar with mixed signed bounds, %s\n",
13407 off_reg
== dst_reg
? dst
: src
, err
);
13410 verbose(env
, "R%d has pointer with unsupported alu operation, %s\n",
13411 off_reg
== dst_reg
? src
: dst
, err
);
13414 verbose(env
, "R%d tried to %s from different maps, paths or scalars, %s\n",
13418 verbose(env
, "R%d tried to %s beyond pointer bounds, %s\n",
13422 verbose(env
, "R%d could not be pushed for speculative verification, %s\n",
13426 verbose(env
, "verifier internal error: unknown reason (%d)\n",
13434 /* check that stack access falls within stack limits and that 'reg' doesn't
13435 * have a variable offset.
13437 * Variable offset is prohibited for unprivileged mode for simplicity since it
13438 * requires corresponding support in Spectre masking for stack ALU. See also
13439 * retrieve_ptr_limit().
13442 * 'off' includes 'reg->off'.
13444 static int check_stack_access_for_ptr_arithmetic(
13445 struct bpf_verifier_env
*env
,
13447 const struct bpf_reg_state
*reg
,
13450 if (!tnum_is_const(reg
->var_off
)) {
13453 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
13454 verbose(env
, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
13455 regno
, tn_buf
, off
);
13459 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
13460 verbose(env
, "R%d stack pointer arithmetic goes out of range, "
13461 "prohibited for !root; off=%d\n", regno
, off
);
13468 static int sanitize_check_bounds(struct bpf_verifier_env
*env
,
13469 const struct bpf_insn
*insn
,
13470 const struct bpf_reg_state
*dst_reg
)
13472 u32 dst
= insn
->dst_reg
;
13474 /* For unprivileged we require that resulting offset must be in bounds
13475 * in order to be able to sanitize access later on.
13477 if (env
->bypass_spec_v1
)
13480 switch (dst_reg
->type
) {
13482 if (check_stack_access_for_ptr_arithmetic(env
, dst
, dst_reg
,
13483 dst_reg
->off
+ dst_reg
->var_off
.value
))
13486 case PTR_TO_MAP_VALUE
:
13487 if (check_map_access(env
, dst
, dst_reg
->off
, 1, false, ACCESS_HELPER
)) {
13488 verbose(env
, "R%d pointer arithmetic of map value goes out of range, "
13489 "prohibited for !root\n", dst
);
13500 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
13501 * Caller should also handle BPF_MOV case separately.
13502 * If we return -EACCES, caller may want to try again treating pointer as a
13503 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
13505 static int adjust_ptr_min_max_vals(struct bpf_verifier_env
*env
,
13506 struct bpf_insn
*insn
,
13507 struct bpf_reg_state
*ptr_reg
,
13508 const struct bpf_reg_state
*off_reg
)
13510 struct bpf_verifier_state
*vstate
= env
->cur_state
;
13511 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
13512 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
;
13513 bool known
= tnum_is_const(off_reg
->var_off
);
13514 s64 smin_val
= off_reg
->smin_value
, smax_val
= off_reg
->smax_value
,
13515 smin_ptr
= ptr_reg
->smin_value
, smax_ptr
= ptr_reg
->smax_value
;
13516 u64 umin_val
= off_reg
->umin_value
, umax_val
= off_reg
->umax_value
,
13517 umin_ptr
= ptr_reg
->umin_value
, umax_ptr
= ptr_reg
->umax_value
;
13518 struct bpf_sanitize_info info
= {};
13519 u8 opcode
= BPF_OP(insn
->code
);
13520 u32 dst
= insn
->dst_reg
;
13524 dst_reg
= ®s
[dst
];
13526 if ((known
&& (smin_val
!= smax_val
|| umin_val
!= umax_val
)) ||
13527 smin_val
> smax_val
|| umin_val
> umax_val
) {
13528 /* Taint dst register if offset had invalid bounds derived from
13529 * e.g. dead branches.
13531 __mark_reg_unknown(env
, dst_reg
);
13535 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
13536 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
13537 if (opcode
== BPF_SUB
&& env
->allow_ptr_leaks
) {
13538 __mark_reg_unknown(env
, dst_reg
);
13543 "R%d 32-bit pointer arithmetic prohibited\n",
13548 mask
= mask_raw_tp_reg(env
, ptr_reg
);
13549 if (ptr_reg
->type
& PTR_MAYBE_NULL
) {
13550 verbose(env
, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
13551 dst
, reg_type_str(env
, ptr_reg
->type
));
13552 unmask_raw_tp_reg(ptr_reg
, mask
);
13555 unmask_raw_tp_reg(ptr_reg
, mask
);
13557 switch (base_type(ptr_reg
->type
)) {
13559 case PTR_TO_MAP_VALUE
:
13560 case PTR_TO_MAP_KEY
:
13562 case PTR_TO_PACKET_META
:
13563 case PTR_TO_PACKET
:
13564 case PTR_TO_TP_BUFFER
:
13565 case PTR_TO_BTF_ID
:
13569 case CONST_PTR_TO_DYNPTR
:
13571 case PTR_TO_FLOW_KEYS
:
13575 case CONST_PTR_TO_MAP
:
13576 /* smin_val represents the known value */
13577 if (known
&& smin_val
== 0 && opcode
== BPF_ADD
)
13581 verbose(env
, "R%d pointer arithmetic on %s prohibited\n",
13582 dst
, reg_type_str(env
, ptr_reg
->type
));
13586 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
13587 * The id may be overwritten later if we create a new variable offset.
13589 dst_reg
->type
= ptr_reg
->type
;
13590 dst_reg
->id
= ptr_reg
->id
;
13592 if (!check_reg_sane_offset(env
, off_reg
, ptr_reg
->type
) ||
13593 !check_reg_sane_offset(env
, ptr_reg
, ptr_reg
->type
))
13596 /* pointer types do not carry 32-bit bounds at the moment. */
13597 __mark_reg32_unbounded(dst_reg
);
13599 if (sanitize_needed(opcode
)) {
13600 ret
= sanitize_ptr_alu(env
, insn
, ptr_reg
, off_reg
, dst_reg
,
13603 return sanitize_err(env
, insn
, ret
, off_reg
, dst_reg
);
13608 /* We can take a fixed offset as long as it doesn't overflow
13609 * the s32 'off' field
13611 if (known
&& (ptr_reg
->off
+ smin_val
==
13612 (s64
)(s32
)(ptr_reg
->off
+ smin_val
))) {
13613 /* pointer += K. Accumulate it into fixed offset */
13614 dst_reg
->smin_value
= smin_ptr
;
13615 dst_reg
->smax_value
= smax_ptr
;
13616 dst_reg
->umin_value
= umin_ptr
;
13617 dst_reg
->umax_value
= umax_ptr
;
13618 dst_reg
->var_off
= ptr_reg
->var_off
;
13619 dst_reg
->off
= ptr_reg
->off
+ smin_val
;
13620 dst_reg
->raw
= ptr_reg
->raw
;
13623 /* A new variable offset is created. Note that off_reg->off
13624 * == 0, since it's a scalar.
13625 * dst_reg gets the pointer type and since some positive
13626 * integer value was added to the pointer, give it a new 'id'
13627 * if it's a PTR_TO_PACKET.
13628 * this creates a new 'base' pointer, off_reg (variable) gets
13629 * added into the variable offset, and we copy the fixed offset
13632 if (check_add_overflow(smin_ptr
, smin_val
, &dst_reg
->smin_value
) ||
13633 check_add_overflow(smax_ptr
, smax_val
, &dst_reg
->smax_value
)) {
13634 dst_reg
->smin_value
= S64_MIN
;
13635 dst_reg
->smax_value
= S64_MAX
;
13637 if (check_add_overflow(umin_ptr
, umin_val
, &dst_reg
->umin_value
) ||
13638 check_add_overflow(umax_ptr
, umax_val
, &dst_reg
->umax_value
)) {
13639 dst_reg
->umin_value
= 0;
13640 dst_reg
->umax_value
= U64_MAX
;
13642 dst_reg
->var_off
= tnum_add(ptr_reg
->var_off
, off_reg
->var_off
);
13643 dst_reg
->off
= ptr_reg
->off
;
13644 dst_reg
->raw
= ptr_reg
->raw
;
13645 if (reg_is_pkt_pointer(ptr_reg
)) {
13646 dst_reg
->id
= ++env
->id_gen
;
13647 /* something was added to pkt_ptr, set range to zero */
13648 memset(&dst_reg
->raw
, 0, sizeof(dst_reg
->raw
));
13652 if (dst_reg
== off_reg
) {
13653 /* scalar -= pointer. Creates an unknown scalar */
13654 verbose(env
, "R%d tried to subtract pointer from scalar\n",
13658 /* We don't allow subtraction from FP, because (according to
13659 * test_verifier.c test "invalid fp arithmetic", JITs might not
13660 * be able to deal with it.
13662 if (ptr_reg
->type
== PTR_TO_STACK
) {
13663 verbose(env
, "R%d subtraction from stack pointer prohibited\n",
13667 if (known
&& (ptr_reg
->off
- smin_val
==
13668 (s64
)(s32
)(ptr_reg
->off
- smin_val
))) {
13669 /* pointer -= K. Subtract it from fixed offset */
13670 dst_reg
->smin_value
= smin_ptr
;
13671 dst_reg
->smax_value
= smax_ptr
;
13672 dst_reg
->umin_value
= umin_ptr
;
13673 dst_reg
->umax_value
= umax_ptr
;
13674 dst_reg
->var_off
= ptr_reg
->var_off
;
13675 dst_reg
->id
= ptr_reg
->id
;
13676 dst_reg
->off
= ptr_reg
->off
- smin_val
;
13677 dst_reg
->raw
= ptr_reg
->raw
;
13680 /* A new variable offset is created. If the subtrahend is known
13681 * nonnegative, then any reg->range we had before is still good.
13683 if (check_sub_overflow(smin_ptr
, smax_val
, &dst_reg
->smin_value
) ||
13684 check_sub_overflow(smax_ptr
, smin_val
, &dst_reg
->smax_value
)) {
13685 /* Overflow possible, we know nothing */
13686 dst_reg
->smin_value
= S64_MIN
;
13687 dst_reg
->smax_value
= S64_MAX
;
13689 if (umin_ptr
< umax_val
) {
13690 /* Overflow possible, we know nothing */
13691 dst_reg
->umin_value
= 0;
13692 dst_reg
->umax_value
= U64_MAX
;
13694 /* Cannot overflow (as long as bounds are consistent) */
13695 dst_reg
->umin_value
= umin_ptr
- umax_val
;
13696 dst_reg
->umax_value
= umax_ptr
- umin_val
;
13698 dst_reg
->var_off
= tnum_sub(ptr_reg
->var_off
, off_reg
->var_off
);
13699 dst_reg
->off
= ptr_reg
->off
;
13700 dst_reg
->raw
= ptr_reg
->raw
;
13701 if (reg_is_pkt_pointer(ptr_reg
)) {
13702 dst_reg
->id
= ++env
->id_gen
;
13703 /* something was added to pkt_ptr, set range to zero */
13705 memset(&dst_reg
->raw
, 0, sizeof(dst_reg
->raw
));
13711 /* bitwise ops on pointers are troublesome, prohibit. */
13712 verbose(env
, "R%d bitwise operator %s on pointer prohibited\n",
13713 dst
, bpf_alu_string
[opcode
>> 4]);
13716 /* other operators (e.g. MUL,LSH) produce non-pointer results */
13717 verbose(env
, "R%d pointer arithmetic with %s operator prohibited\n",
13718 dst
, bpf_alu_string
[opcode
>> 4]);
13722 if (!check_reg_sane_offset(env
, dst_reg
, ptr_reg
->type
))
13724 reg_bounds_sync(dst_reg
);
13725 if (sanitize_check_bounds(env
, insn
, dst_reg
) < 0)
13727 if (sanitize_needed(opcode
)) {
13728 ret
= sanitize_ptr_alu(env
, insn
, dst_reg
, off_reg
, dst_reg
,
13731 return sanitize_err(env
, insn
, ret
, off_reg
, dst_reg
);
13737 static void scalar32_min_max_add(struct bpf_reg_state
*dst_reg
,
13738 struct bpf_reg_state
*src_reg
)
13740 s32
*dst_smin
= &dst_reg
->s32_min_value
;
13741 s32
*dst_smax
= &dst_reg
->s32_max_value
;
13742 u32
*dst_umin
= &dst_reg
->u32_min_value
;
13743 u32
*dst_umax
= &dst_reg
->u32_max_value
;
13745 if (check_add_overflow(*dst_smin
, src_reg
->s32_min_value
, dst_smin
) ||
13746 check_add_overflow(*dst_smax
, src_reg
->s32_max_value
, dst_smax
)) {
13747 *dst_smin
= S32_MIN
;
13748 *dst_smax
= S32_MAX
;
13750 if (check_add_overflow(*dst_umin
, src_reg
->u32_min_value
, dst_umin
) ||
13751 check_add_overflow(*dst_umax
, src_reg
->u32_max_value
, dst_umax
)) {
13753 *dst_umax
= U32_MAX
;
13757 static void scalar_min_max_add(struct bpf_reg_state
*dst_reg
,
13758 struct bpf_reg_state
*src_reg
)
13760 s64
*dst_smin
= &dst_reg
->smin_value
;
13761 s64
*dst_smax
= &dst_reg
->smax_value
;
13762 u64
*dst_umin
= &dst_reg
->umin_value
;
13763 u64
*dst_umax
= &dst_reg
->umax_value
;
13765 if (check_add_overflow(*dst_smin
, src_reg
->smin_value
, dst_smin
) ||
13766 check_add_overflow(*dst_smax
, src_reg
->smax_value
, dst_smax
)) {
13767 *dst_smin
= S64_MIN
;
13768 *dst_smax
= S64_MAX
;
13770 if (check_add_overflow(*dst_umin
, src_reg
->umin_value
, dst_umin
) ||
13771 check_add_overflow(*dst_umax
, src_reg
->umax_value
, dst_umax
)) {
13773 *dst_umax
= U64_MAX
;
13777 static void scalar32_min_max_sub(struct bpf_reg_state
*dst_reg
,
13778 struct bpf_reg_state
*src_reg
)
13780 s32
*dst_smin
= &dst_reg
->s32_min_value
;
13781 s32
*dst_smax
= &dst_reg
->s32_max_value
;
13782 u32 umin_val
= src_reg
->u32_min_value
;
13783 u32 umax_val
= src_reg
->u32_max_value
;
13785 if (check_sub_overflow(*dst_smin
, src_reg
->s32_max_value
, dst_smin
) ||
13786 check_sub_overflow(*dst_smax
, src_reg
->s32_min_value
, dst_smax
)) {
13787 /* Overflow possible, we know nothing */
13788 *dst_smin
= S32_MIN
;
13789 *dst_smax
= S32_MAX
;
13791 if (dst_reg
->u32_min_value
< umax_val
) {
13792 /* Overflow possible, we know nothing */
13793 dst_reg
->u32_min_value
= 0;
13794 dst_reg
->u32_max_value
= U32_MAX
;
13796 /* Cannot overflow (as long as bounds are consistent) */
13797 dst_reg
->u32_min_value
-= umax_val
;
13798 dst_reg
->u32_max_value
-= umin_val
;
13802 static void scalar_min_max_sub(struct bpf_reg_state
*dst_reg
,
13803 struct bpf_reg_state
*src_reg
)
13805 s64
*dst_smin
= &dst_reg
->smin_value
;
13806 s64
*dst_smax
= &dst_reg
->smax_value
;
13807 u64 umin_val
= src_reg
->umin_value
;
13808 u64 umax_val
= src_reg
->umax_value
;
13810 if (check_sub_overflow(*dst_smin
, src_reg
->smax_value
, dst_smin
) ||
13811 check_sub_overflow(*dst_smax
, src_reg
->smin_value
, dst_smax
)) {
13812 /* Overflow possible, we know nothing */
13813 *dst_smin
= S64_MIN
;
13814 *dst_smax
= S64_MAX
;
13816 if (dst_reg
->umin_value
< umax_val
) {
13817 /* Overflow possible, we know nothing */
13818 dst_reg
->umin_value
= 0;
13819 dst_reg
->umax_value
= U64_MAX
;
13821 /* Cannot overflow (as long as bounds are consistent) */
13822 dst_reg
->umin_value
-= umax_val
;
13823 dst_reg
->umax_value
-= umin_val
;
13827 static void scalar32_min_max_mul(struct bpf_reg_state
*dst_reg
,
13828 struct bpf_reg_state
*src_reg
)
13830 s32 smin_val
= src_reg
->s32_min_value
;
13831 u32 umin_val
= src_reg
->u32_min_value
;
13832 u32 umax_val
= src_reg
->u32_max_value
;
13834 if (smin_val
< 0 || dst_reg
->s32_min_value
< 0) {
13835 /* Ain't nobody got time to multiply that sign */
13836 __mark_reg32_unbounded(dst_reg
);
13839 /* Both values are positive, so we can work with unsigned and
13840 * copy the result to signed (unless it exceeds S32_MAX).
13842 if (umax_val
> U16_MAX
|| dst_reg
->u32_max_value
> U16_MAX
) {
13843 /* Potential overflow, we know nothing */
13844 __mark_reg32_unbounded(dst_reg
);
13847 dst_reg
->u32_min_value
*= umin_val
;
13848 dst_reg
->u32_max_value
*= umax_val
;
13849 if (dst_reg
->u32_max_value
> S32_MAX
) {
13850 /* Overflow possible, we know nothing */
13851 dst_reg
->s32_min_value
= S32_MIN
;
13852 dst_reg
->s32_max_value
= S32_MAX
;
13854 dst_reg
->s32_min_value
= dst_reg
->u32_min_value
;
13855 dst_reg
->s32_max_value
= dst_reg
->u32_max_value
;
13859 static void scalar_min_max_mul(struct bpf_reg_state
*dst_reg
,
13860 struct bpf_reg_state
*src_reg
)
13862 s64 smin_val
= src_reg
->smin_value
;
13863 u64 umin_val
= src_reg
->umin_value
;
13864 u64 umax_val
= src_reg
->umax_value
;
13866 if (smin_val
< 0 || dst_reg
->smin_value
< 0) {
13867 /* Ain't nobody got time to multiply that sign */
13868 __mark_reg64_unbounded(dst_reg
);
13871 /* Both values are positive, so we can work with unsigned and
13872 * copy the result to signed (unless it exceeds S64_MAX).
13874 if (umax_val
> U32_MAX
|| dst_reg
->umax_value
> U32_MAX
) {
13875 /* Potential overflow, we know nothing */
13876 __mark_reg64_unbounded(dst_reg
);
13879 dst_reg
->umin_value
*= umin_val
;
13880 dst_reg
->umax_value
*= umax_val
;
13881 if (dst_reg
->umax_value
> S64_MAX
) {
13882 /* Overflow possible, we know nothing */
13883 dst_reg
->smin_value
= S64_MIN
;
13884 dst_reg
->smax_value
= S64_MAX
;
13886 dst_reg
->smin_value
= dst_reg
->umin_value
;
13887 dst_reg
->smax_value
= dst_reg
->umax_value
;
13891 static void scalar32_min_max_and(struct bpf_reg_state
*dst_reg
,
13892 struct bpf_reg_state
*src_reg
)
13894 bool src_known
= tnum_subreg_is_const(src_reg
->var_off
);
13895 bool dst_known
= tnum_subreg_is_const(dst_reg
->var_off
);
13896 struct tnum var32_off
= tnum_subreg(dst_reg
->var_off
);
13897 u32 umax_val
= src_reg
->u32_max_value
;
13899 if (src_known
&& dst_known
) {
13900 __mark_reg32_known(dst_reg
, var32_off
.value
);
13904 /* We get our minimum from the var_off, since that's inherently
13905 * bitwise. Our maximum is the minimum of the operands' maxima.
13907 dst_reg
->u32_min_value
= var32_off
.value
;
13908 dst_reg
->u32_max_value
= min(dst_reg
->u32_max_value
, umax_val
);
13910 /* Safe to set s32 bounds by casting u32 result into s32 when u32
13911 * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
13913 if ((s32
)dst_reg
->u32_min_value
<= (s32
)dst_reg
->u32_max_value
) {
13914 dst_reg
->s32_min_value
= dst_reg
->u32_min_value
;
13915 dst_reg
->s32_max_value
= dst_reg
->u32_max_value
;
13917 dst_reg
->s32_min_value
= S32_MIN
;
13918 dst_reg
->s32_max_value
= S32_MAX
;
13922 static void scalar_min_max_and(struct bpf_reg_state
*dst_reg
,
13923 struct bpf_reg_state
*src_reg
)
13925 bool src_known
= tnum_is_const(src_reg
->var_off
);
13926 bool dst_known
= tnum_is_const(dst_reg
->var_off
);
13927 u64 umax_val
= src_reg
->umax_value
;
13929 if (src_known
&& dst_known
) {
13930 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
);
13934 /* We get our minimum from the var_off, since that's inherently
13935 * bitwise. Our maximum is the minimum of the operands' maxima.
13937 dst_reg
->umin_value
= dst_reg
->var_off
.value
;
13938 dst_reg
->umax_value
= min(dst_reg
->umax_value
, umax_val
);
13940 /* Safe to set s64 bounds by casting u64 result into s64 when u64
13941 * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
13943 if ((s64
)dst_reg
->umin_value
<= (s64
)dst_reg
->umax_value
) {
13944 dst_reg
->smin_value
= dst_reg
->umin_value
;
13945 dst_reg
->smax_value
= dst_reg
->umax_value
;
13947 dst_reg
->smin_value
= S64_MIN
;
13948 dst_reg
->smax_value
= S64_MAX
;
13950 /* We may learn something more from the var_off */
13951 __update_reg_bounds(dst_reg
);
13954 static void scalar32_min_max_or(struct bpf_reg_state
*dst_reg
,
13955 struct bpf_reg_state
*src_reg
)
13957 bool src_known
= tnum_subreg_is_const(src_reg
->var_off
);
13958 bool dst_known
= tnum_subreg_is_const(dst_reg
->var_off
);
13959 struct tnum var32_off
= tnum_subreg(dst_reg
->var_off
);
13960 u32 umin_val
= src_reg
->u32_min_value
;
13962 if (src_known
&& dst_known
) {
13963 __mark_reg32_known(dst_reg
, var32_off
.value
);
13967 /* We get our maximum from the var_off, and our minimum is the
13968 * maximum of the operands' minima
13970 dst_reg
->u32_min_value
= max(dst_reg
->u32_min_value
, umin_val
);
13971 dst_reg
->u32_max_value
= var32_off
.value
| var32_off
.mask
;
13973 /* Safe to set s32 bounds by casting u32 result into s32 when u32
13974 * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
13976 if ((s32
)dst_reg
->u32_min_value
<= (s32
)dst_reg
->u32_max_value
) {
13977 dst_reg
->s32_min_value
= dst_reg
->u32_min_value
;
13978 dst_reg
->s32_max_value
= dst_reg
->u32_max_value
;
13980 dst_reg
->s32_min_value
= S32_MIN
;
13981 dst_reg
->s32_max_value
= S32_MAX
;
13985 static void scalar_min_max_or(struct bpf_reg_state
*dst_reg
,
13986 struct bpf_reg_state
*src_reg
)
13988 bool src_known
= tnum_is_const(src_reg
->var_off
);
13989 bool dst_known
= tnum_is_const(dst_reg
->var_off
);
13990 u64 umin_val
= src_reg
->umin_value
;
13992 if (src_known
&& dst_known
) {
13993 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
);
13997 /* We get our maximum from the var_off, and our minimum is the
13998 * maximum of the operands' minima
14000 dst_reg
->umin_value
= max(dst_reg
->umin_value
, umin_val
);
14001 dst_reg
->umax_value
= dst_reg
->var_off
.value
| dst_reg
->var_off
.mask
;
14003 /* Safe to set s64 bounds by casting u64 result into s64 when u64
14004 * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
14006 if ((s64
)dst_reg
->umin_value
<= (s64
)dst_reg
->umax_value
) {
14007 dst_reg
->smin_value
= dst_reg
->umin_value
;
14008 dst_reg
->smax_value
= dst_reg
->umax_value
;
14010 dst_reg
->smin_value
= S64_MIN
;
14011 dst_reg
->smax_value
= S64_MAX
;
14013 /* We may learn something more from the var_off */
14014 __update_reg_bounds(dst_reg
);
14017 static void scalar32_min_max_xor(struct bpf_reg_state
*dst_reg
,
14018 struct bpf_reg_state
*src_reg
)
14020 bool src_known
= tnum_subreg_is_const(src_reg
->var_off
);
14021 bool dst_known
= tnum_subreg_is_const(dst_reg
->var_off
);
14022 struct tnum var32_off
= tnum_subreg(dst_reg
->var_off
);
14024 if (src_known
&& dst_known
) {
14025 __mark_reg32_known(dst_reg
, var32_off
.value
);
14029 /* We get both minimum and maximum from the var32_off. */
14030 dst_reg
->u32_min_value
= var32_off
.value
;
14031 dst_reg
->u32_max_value
= var32_off
.value
| var32_off
.mask
;
14033 /* Safe to set s32 bounds by casting u32 result into s32 when u32
14034 * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
14036 if ((s32
)dst_reg
->u32_min_value
<= (s32
)dst_reg
->u32_max_value
) {
14037 dst_reg
->s32_min_value
= dst_reg
->u32_min_value
;
14038 dst_reg
->s32_max_value
= dst_reg
->u32_max_value
;
14040 dst_reg
->s32_min_value
= S32_MIN
;
14041 dst_reg
->s32_max_value
= S32_MAX
;
14045 static void scalar_min_max_xor(struct bpf_reg_state
*dst_reg
,
14046 struct bpf_reg_state
*src_reg
)
14048 bool src_known
= tnum_is_const(src_reg
->var_off
);
14049 bool dst_known
= tnum_is_const(dst_reg
->var_off
);
14051 if (src_known
&& dst_known
) {
14052 /* dst_reg->var_off.value has been updated earlier */
14053 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
);
14057 /* We get both minimum and maximum from the var_off. */
14058 dst_reg
->umin_value
= dst_reg
->var_off
.value
;
14059 dst_reg
->umax_value
= dst_reg
->var_off
.value
| dst_reg
->var_off
.mask
;
14061 /* Safe to set s64 bounds by casting u64 result into s64 when u64
14062 * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
14064 if ((s64
)dst_reg
->umin_value
<= (s64
)dst_reg
->umax_value
) {
14065 dst_reg
->smin_value
= dst_reg
->umin_value
;
14066 dst_reg
->smax_value
= dst_reg
->umax_value
;
14068 dst_reg
->smin_value
= S64_MIN
;
14069 dst_reg
->smax_value
= S64_MAX
;
14072 __update_reg_bounds(dst_reg
);
14075 static void __scalar32_min_max_lsh(struct bpf_reg_state
*dst_reg
,
14076 u64 umin_val
, u64 umax_val
)
14078 /* We lose all sign bit information (except what we can pick
14081 dst_reg
->s32_min_value
= S32_MIN
;
14082 dst_reg
->s32_max_value
= S32_MAX
;
14083 /* If we might shift our top bit out, then we know nothing */
14084 if (umax_val
> 31 || dst_reg
->u32_max_value
> 1ULL << (31 - umax_val
)) {
14085 dst_reg
->u32_min_value
= 0;
14086 dst_reg
->u32_max_value
= U32_MAX
;
14088 dst_reg
->u32_min_value
<<= umin_val
;
14089 dst_reg
->u32_max_value
<<= umax_val
;
14093 static void scalar32_min_max_lsh(struct bpf_reg_state
*dst_reg
,
14094 struct bpf_reg_state
*src_reg
)
14096 u32 umax_val
= src_reg
->u32_max_value
;
14097 u32 umin_val
= src_reg
->u32_min_value
;
14098 /* u32 alu operation will zext upper bits */
14099 struct tnum subreg
= tnum_subreg(dst_reg
->var_off
);
14101 __scalar32_min_max_lsh(dst_reg
, umin_val
, umax_val
);
14102 dst_reg
->var_off
= tnum_subreg(tnum_lshift(subreg
, umin_val
));
14103 /* Not required but being careful mark reg64 bounds as unknown so
14104 * that we are forced to pick them up from tnum and zext later and
14105 * if some path skips this step we are still safe.
14107 __mark_reg64_unbounded(dst_reg
);
14108 __update_reg32_bounds(dst_reg
);
14111 static void __scalar64_min_max_lsh(struct bpf_reg_state
*dst_reg
,
14112 u64 umin_val
, u64 umax_val
)
14114 /* Special case <<32 because it is a common compiler pattern to sign
14115 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
14116 * positive we know this shift will also be positive so we can track
14117 * bounds correctly. Otherwise we lose all sign bit information except
14118 * what we can pick up from var_off. Perhaps we can generalize this
14119 * later to shifts of any length.
14121 if (umin_val
== 32 && umax_val
== 32 && dst_reg
->s32_max_value
>= 0)
14122 dst_reg
->smax_value
= (s64
)dst_reg
->s32_max_value
<< 32;
14124 dst_reg
->smax_value
= S64_MAX
;
14126 if (umin_val
== 32 && umax_val
== 32 && dst_reg
->s32_min_value
>= 0)
14127 dst_reg
->smin_value
= (s64
)dst_reg
->s32_min_value
<< 32;
14129 dst_reg
->smin_value
= S64_MIN
;
14131 /* If we might shift our top bit out, then we know nothing */
14132 if (dst_reg
->umax_value
> 1ULL << (63 - umax_val
)) {
14133 dst_reg
->umin_value
= 0;
14134 dst_reg
->umax_value
= U64_MAX
;
14136 dst_reg
->umin_value
<<= umin_val
;
14137 dst_reg
->umax_value
<<= umax_val
;
14141 static void scalar_min_max_lsh(struct bpf_reg_state
*dst_reg
,
14142 struct bpf_reg_state
*src_reg
)
14144 u64 umax_val
= src_reg
->umax_value
;
14145 u64 umin_val
= src_reg
->umin_value
;
14147 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
14148 __scalar64_min_max_lsh(dst_reg
, umin_val
, umax_val
);
14149 __scalar32_min_max_lsh(dst_reg
, umin_val
, umax_val
);
14151 dst_reg
->var_off
= tnum_lshift(dst_reg
->var_off
, umin_val
);
14152 /* We may learn something more from the var_off */
14153 __update_reg_bounds(dst_reg
);
14156 static void scalar32_min_max_rsh(struct bpf_reg_state
*dst_reg
,
14157 struct bpf_reg_state
*src_reg
)
14159 struct tnum subreg
= tnum_subreg(dst_reg
->var_off
);
14160 u32 umax_val
= src_reg
->u32_max_value
;
14161 u32 umin_val
= src_reg
->u32_min_value
;
14163 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
14164 * be negative, then either:
14165 * 1) src_reg might be zero, so the sign bit of the result is
14166 * unknown, so we lose our signed bounds
14167 * 2) it's known negative, thus the unsigned bounds capture the
14169 * 3) the signed bounds cross zero, so they tell us nothing
14171 * If the value in dst_reg is known nonnegative, then again the
14172 * unsigned bounds capture the signed bounds.
14173 * Thus, in all cases it suffices to blow away our signed bounds
14174 * and rely on inferring new ones from the unsigned bounds and
14175 * var_off of the result.
14177 dst_reg
->s32_min_value
= S32_MIN
;
14178 dst_reg
->s32_max_value
= S32_MAX
;
14180 dst_reg
->var_off
= tnum_rshift(subreg
, umin_val
);
14181 dst_reg
->u32_min_value
>>= umax_val
;
14182 dst_reg
->u32_max_value
>>= umin_val
;
14184 __mark_reg64_unbounded(dst_reg
);
14185 __update_reg32_bounds(dst_reg
);
14188 static void scalar_min_max_rsh(struct bpf_reg_state
*dst_reg
,
14189 struct bpf_reg_state
*src_reg
)
14191 u64 umax_val
= src_reg
->umax_value
;
14192 u64 umin_val
= src_reg
->umin_value
;
14194 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
14195 * be negative, then either:
14196 * 1) src_reg might be zero, so the sign bit of the result is
14197 * unknown, so we lose our signed bounds
14198 * 2) it's known negative, thus the unsigned bounds capture the
14200 * 3) the signed bounds cross zero, so they tell us nothing
14202 * If the value in dst_reg is known nonnegative, then again the
14203 * unsigned bounds capture the signed bounds.
14204 * Thus, in all cases it suffices to blow away our signed bounds
14205 * and rely on inferring new ones from the unsigned bounds and
14206 * var_off of the result.
14208 dst_reg
->smin_value
= S64_MIN
;
14209 dst_reg
->smax_value
= S64_MAX
;
14210 dst_reg
->var_off
= tnum_rshift(dst_reg
->var_off
, umin_val
);
14211 dst_reg
->umin_value
>>= umax_val
;
14212 dst_reg
->umax_value
>>= umin_val
;
14214 /* Its not easy to operate on alu32 bounds here because it depends
14215 * on bits being shifted in. Take easy way out and mark unbounded
14216 * so we can recalculate later from tnum.
14218 __mark_reg32_unbounded(dst_reg
);
14219 __update_reg_bounds(dst_reg
);
14222 static void scalar32_min_max_arsh(struct bpf_reg_state
*dst_reg
,
14223 struct bpf_reg_state
*src_reg
)
14225 u64 umin_val
= src_reg
->u32_min_value
;
14227 /* Upon reaching here, src_known is true and
14228 * umax_val is equal to umin_val.
14230 dst_reg
->s32_min_value
= (u32
)(((s32
)dst_reg
->s32_min_value
) >> umin_val
);
14231 dst_reg
->s32_max_value
= (u32
)(((s32
)dst_reg
->s32_max_value
) >> umin_val
);
14233 dst_reg
->var_off
= tnum_arshift(tnum_subreg(dst_reg
->var_off
), umin_val
, 32);
14235 /* blow away the dst_reg umin_value/umax_value and rely on
14236 * dst_reg var_off to refine the result.
14238 dst_reg
->u32_min_value
= 0;
14239 dst_reg
->u32_max_value
= U32_MAX
;
14241 __mark_reg64_unbounded(dst_reg
);
14242 __update_reg32_bounds(dst_reg
);
14245 static void scalar_min_max_arsh(struct bpf_reg_state
*dst_reg
,
14246 struct bpf_reg_state
*src_reg
)
14248 u64 umin_val
= src_reg
->umin_value
;
14250 /* Upon reaching here, src_known is true and umax_val is equal
14253 dst_reg
->smin_value
>>= umin_val
;
14254 dst_reg
->smax_value
>>= umin_val
;
14256 dst_reg
->var_off
= tnum_arshift(dst_reg
->var_off
, umin_val
, 64);
14258 /* blow away the dst_reg umin_value/umax_value and rely on
14259 * dst_reg var_off to refine the result.
14261 dst_reg
->umin_value
= 0;
14262 dst_reg
->umax_value
= U64_MAX
;
14264 /* Its not easy to operate on alu32 bounds here because it depends
14265 * on bits being shifted in from upper 32-bits. Take easy way out
14266 * and mark unbounded so we can recalculate later from tnum.
14268 __mark_reg32_unbounded(dst_reg
);
14269 __update_reg_bounds(dst_reg
);
14272 static bool is_safe_to_compute_dst_reg_range(struct bpf_insn
*insn
,
14273 const struct bpf_reg_state
*src_reg
)
14275 bool src_is_const
= false;
14276 u64 insn_bitness
= (BPF_CLASS(insn
->code
) == BPF_ALU64
) ? 64 : 32;
14278 if (insn_bitness
== 32) {
14279 if (tnum_subreg_is_const(src_reg
->var_off
)
14280 && src_reg
->s32_min_value
== src_reg
->s32_max_value
14281 && src_reg
->u32_min_value
== src_reg
->u32_max_value
)
14282 src_is_const
= true;
14284 if (tnum_is_const(src_reg
->var_off
)
14285 && src_reg
->smin_value
== src_reg
->smax_value
14286 && src_reg
->umin_value
== src_reg
->umax_value
)
14287 src_is_const
= true;
14290 switch (BPF_OP(insn
->code
)) {
14299 /* Shift operators range is only computable if shift dimension operand
14300 * is a constant. Shifts greater than 31 or 63 are undefined. This
14301 * includes shifts by a negative number.
14306 return (src_is_const
&& src_reg
->umax_value
< insn_bitness
);
14312 /* WARNING: This function does calculations on 64-bit values, but the actual
14313 * execution may occur on 32-bit values. Therefore, things like bitshifts
14314 * need extra checks in the 32-bit case.
14316 static int adjust_scalar_min_max_vals(struct bpf_verifier_env
*env
,
14317 struct bpf_insn
*insn
,
14318 struct bpf_reg_state
*dst_reg
,
14319 struct bpf_reg_state src_reg
)
14321 u8 opcode
= BPF_OP(insn
->code
);
14322 bool alu32
= (BPF_CLASS(insn
->code
) != BPF_ALU64
);
14325 if (!is_safe_to_compute_dst_reg_range(insn
, &src_reg
)) {
14326 __mark_reg_unknown(env
, dst_reg
);
14330 if (sanitize_needed(opcode
)) {
14331 ret
= sanitize_val_alu(env
, insn
);
14333 return sanitize_err(env
, insn
, ret
, NULL
, NULL
);
14336 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
14337 * There are two classes of instructions: The first class we track both
14338 * alu32 and alu64 sign/unsigned bounds independently this provides the
14339 * greatest amount of precision when alu operations are mixed with jmp32
14340 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
14341 * and BPF_OR. This is possible because these ops have fairly easy to
14342 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
14343 * See alu32 verifier tests for examples. The second class of
14344 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
14345 * with regards to tracking sign/unsigned bounds because the bits may
14346 * cross subreg boundaries in the alu64 case. When this happens we mark
14347 * the reg unbounded in the subreg bound space and use the resulting
14348 * tnum to calculate an approximation of the sign/unsigned bounds.
14352 scalar32_min_max_add(dst_reg
, &src_reg
);
14353 scalar_min_max_add(dst_reg
, &src_reg
);
14354 dst_reg
->var_off
= tnum_add(dst_reg
->var_off
, src_reg
.var_off
);
14357 scalar32_min_max_sub(dst_reg
, &src_reg
);
14358 scalar_min_max_sub(dst_reg
, &src_reg
);
14359 dst_reg
->var_off
= tnum_sub(dst_reg
->var_off
, src_reg
.var_off
);
14362 dst_reg
->var_off
= tnum_mul(dst_reg
->var_off
, src_reg
.var_off
);
14363 scalar32_min_max_mul(dst_reg
, &src_reg
);
14364 scalar_min_max_mul(dst_reg
, &src_reg
);
14367 dst_reg
->var_off
= tnum_and(dst_reg
->var_off
, src_reg
.var_off
);
14368 scalar32_min_max_and(dst_reg
, &src_reg
);
14369 scalar_min_max_and(dst_reg
, &src_reg
);
14372 dst_reg
->var_off
= tnum_or(dst_reg
->var_off
, src_reg
.var_off
);
14373 scalar32_min_max_or(dst_reg
, &src_reg
);
14374 scalar_min_max_or(dst_reg
, &src_reg
);
14377 dst_reg
->var_off
= tnum_xor(dst_reg
->var_off
, src_reg
.var_off
);
14378 scalar32_min_max_xor(dst_reg
, &src_reg
);
14379 scalar_min_max_xor(dst_reg
, &src_reg
);
14383 scalar32_min_max_lsh(dst_reg
, &src_reg
);
14385 scalar_min_max_lsh(dst_reg
, &src_reg
);
14389 scalar32_min_max_rsh(dst_reg
, &src_reg
);
14391 scalar_min_max_rsh(dst_reg
, &src_reg
);
14395 scalar32_min_max_arsh(dst_reg
, &src_reg
);
14397 scalar_min_max_arsh(dst_reg
, &src_reg
);
14403 /* ALU32 ops are zero extended into 64bit register */
14405 zext_32_to_64(dst_reg
);
14406 reg_bounds_sync(dst_reg
);
14410 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
14413 static int adjust_reg_min_max_vals(struct bpf_verifier_env
*env
,
14414 struct bpf_insn
*insn
)
14416 struct bpf_verifier_state
*vstate
= env
->cur_state
;
14417 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
14418 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
, *src_reg
;
14419 struct bpf_reg_state
*ptr_reg
= NULL
, off_reg
= {0};
14420 bool alu32
= (BPF_CLASS(insn
->code
) != BPF_ALU64
);
14421 u8 opcode
= BPF_OP(insn
->code
);
14424 dst_reg
= ®s
[insn
->dst_reg
];
14427 if (dst_reg
->type
== PTR_TO_ARENA
) {
14428 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
14430 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
14432 * 32-bit operations zero upper bits automatically.
14433 * 64-bit operations need to be converted to 32.
14435 aux
->needs_zext
= true;
14437 /* Any arithmetic operations are allowed on arena pointers */
14441 if (dst_reg
->type
!= SCALAR_VALUE
)
14444 if (BPF_SRC(insn
->code
) == BPF_X
) {
14445 src_reg
= ®s
[insn
->src_reg
];
14446 if (src_reg
->type
!= SCALAR_VALUE
) {
14447 if (dst_reg
->type
!= SCALAR_VALUE
) {
14448 /* Combining two pointers by any ALU op yields
14449 * an arbitrary scalar. Disallow all math except
14450 * pointer subtraction
14452 if (opcode
== BPF_SUB
&& env
->allow_ptr_leaks
) {
14453 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
14456 verbose(env
, "R%d pointer %s pointer prohibited\n",
14458 bpf_alu_string
[opcode
>> 4]);
14461 /* scalar += pointer
14462 * This is legal, but we have to reverse our
14463 * src/dest handling in computing the range
14465 err
= mark_chain_precision(env
, insn
->dst_reg
);
14468 return adjust_ptr_min_max_vals(env
, insn
,
14471 } else if (ptr_reg
) {
14472 /* pointer += scalar */
14473 err
= mark_chain_precision(env
, insn
->src_reg
);
14476 return adjust_ptr_min_max_vals(env
, insn
,
14478 } else if (dst_reg
->precise
) {
14479 /* if dst_reg is precise, src_reg should be precise as well */
14480 err
= mark_chain_precision(env
, insn
->src_reg
);
14485 /* Pretend the src is a reg with a known value, since we only
14486 * need to be able to read from this state.
14488 off_reg
.type
= SCALAR_VALUE
;
14489 __mark_reg_known(&off_reg
, insn
->imm
);
14490 src_reg
= &off_reg
;
14491 if (ptr_reg
) /* pointer += K */
14492 return adjust_ptr_min_max_vals(env
, insn
,
14496 /* Got here implies adding two SCALAR_VALUEs */
14497 if (WARN_ON_ONCE(ptr_reg
)) {
14498 print_verifier_state(env
, state
, true);
14499 verbose(env
, "verifier internal error: unexpected ptr_reg\n");
14502 if (WARN_ON(!src_reg
)) {
14503 print_verifier_state(env
, state
, true);
14504 verbose(env
, "verifier internal error: no src_reg\n");
14507 err
= adjust_scalar_min_max_vals(env
, insn
, dst_reg
, *src_reg
);
14511 * Compilers can generate the code
14514 * if r2 < 1000 goto ...
14515 * use r1 in memory access
14516 * So for 64-bit alu remember constant delta between r2 and r1 and
14517 * update r1 after 'if' condition.
14519 if (env
->bpf_capable
&&
14520 BPF_OP(insn
->code
) == BPF_ADD
&& !alu32
&&
14521 dst_reg
->id
&& is_reg_const(src_reg
, false)) {
14522 u64 val
= reg_const_value(src_reg
, false);
14524 if ((dst_reg
->id
& BPF_ADD_CONST
) ||
14525 /* prevent overflow in sync_linked_regs() later */
14526 val
> (u32
)S32_MAX
) {
14528 * If the register already went through rX += val
14529 * we cannot accumulate another val into rx->off.
14534 dst_reg
->id
|= BPF_ADD_CONST
;
14535 dst_reg
->off
= val
;
14539 * Make sure ID is cleared otherwise dst_reg min/max could be
14540 * incorrectly propagated into other registers by sync_linked_regs()
14547 /* check validity of 32-bit and 64-bit arithmetic operations */
14548 static int check_alu_op(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
14550 struct bpf_reg_state
*regs
= cur_regs(env
);
14551 u8 opcode
= BPF_OP(insn
->code
);
14554 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
14555 if (opcode
== BPF_NEG
) {
14556 if (BPF_SRC(insn
->code
) != BPF_K
||
14557 insn
->src_reg
!= BPF_REG_0
||
14558 insn
->off
!= 0 || insn
->imm
!= 0) {
14559 verbose(env
, "BPF_NEG uses reserved fields\n");
14563 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
14564 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64) ||
14565 (BPF_CLASS(insn
->code
) == BPF_ALU64
&&
14566 BPF_SRC(insn
->code
) != BPF_TO_LE
)) {
14567 verbose(env
, "BPF_END uses reserved fields\n");
14572 /* check src operand */
14573 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
14577 if (is_pointer_value(env
, insn
->dst_reg
)) {
14578 verbose(env
, "R%d pointer arithmetic prohibited\n",
14583 /* check dest operand */
14584 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
14588 } else if (opcode
== BPF_MOV
) {
14590 if (BPF_SRC(insn
->code
) == BPF_X
) {
14591 if (BPF_CLASS(insn
->code
) == BPF_ALU
) {
14592 if ((insn
->off
!= 0 && insn
->off
!= 8 && insn
->off
!= 16) ||
14594 verbose(env
, "BPF_MOV uses reserved fields\n");
14597 } else if (insn
->off
== BPF_ADDR_SPACE_CAST
) {
14598 if (insn
->imm
!= 1 && insn
->imm
!= 1u << 16) {
14599 verbose(env
, "addr_space_cast insn can only convert between address space 1 and 0\n");
14602 if (!env
->prog
->aux
->arena
) {
14603 verbose(env
, "addr_space_cast insn can only be used in a program that has an associated arena\n");
14607 if ((insn
->off
!= 0 && insn
->off
!= 8 && insn
->off
!= 16 &&
14608 insn
->off
!= 32) || insn
->imm
) {
14609 verbose(env
, "BPF_MOV uses reserved fields\n");
14614 /* check src operand */
14615 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
14619 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
14620 verbose(env
, "BPF_MOV uses reserved fields\n");
14625 /* check dest operand, mark as required later */
14626 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
14630 if (BPF_SRC(insn
->code
) == BPF_X
) {
14631 struct bpf_reg_state
*src_reg
= regs
+ insn
->src_reg
;
14632 struct bpf_reg_state
*dst_reg
= regs
+ insn
->dst_reg
;
14634 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
14636 /* off == BPF_ADDR_SPACE_CAST */
14637 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
14638 if (insn
->imm
== 1) { /* cast from as(1) to as(0) */
14639 dst_reg
->type
= PTR_TO_ARENA
;
14640 /* PTR_TO_ARENA is 32-bit */
14641 dst_reg
->subreg_def
= env
->insn_idx
+ 1;
14643 } else if (insn
->off
== 0) {
14645 * copy register state to dest reg
14647 assign_scalar_id_before_mov(env
, src_reg
);
14648 copy_register_state(dst_reg
, src_reg
);
14649 dst_reg
->live
|= REG_LIVE_WRITTEN
;
14650 dst_reg
->subreg_def
= DEF_NOT_SUBREG
;
14652 /* case: R1 = (s8, s16 s32)R2 */
14653 if (is_pointer_value(env
, insn
->src_reg
)) {
14655 "R%d sign-extension part of pointer\n",
14658 } else if (src_reg
->type
== SCALAR_VALUE
) {
14661 no_sext
= src_reg
->umax_value
< (1ULL << (insn
->off
- 1));
14663 assign_scalar_id_before_mov(env
, src_reg
);
14664 copy_register_state(dst_reg
, src_reg
);
14667 coerce_reg_to_size_sx(dst_reg
, insn
->off
>> 3);
14668 dst_reg
->live
|= REG_LIVE_WRITTEN
;
14669 dst_reg
->subreg_def
= DEF_NOT_SUBREG
;
14671 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
14675 /* R1 = (u32) R2 */
14676 if (is_pointer_value(env
, insn
->src_reg
)) {
14678 "R%d partial copy of pointer\n",
14681 } else if (src_reg
->type
== SCALAR_VALUE
) {
14682 if (insn
->off
== 0) {
14683 bool is_src_reg_u32
= get_reg_width(src_reg
) <= 32;
14685 if (is_src_reg_u32
)
14686 assign_scalar_id_before_mov(env
, src_reg
);
14687 copy_register_state(dst_reg
, src_reg
);
14688 /* Make sure ID is cleared if src_reg is not in u32
14689 * range otherwise dst_reg min/max could be incorrectly
14690 * propagated into src_reg by sync_linked_regs()
14692 if (!is_src_reg_u32
)
14694 dst_reg
->live
|= REG_LIVE_WRITTEN
;
14695 dst_reg
->subreg_def
= env
->insn_idx
+ 1;
14697 /* case: W1 = (s8, s16)W2 */
14698 bool no_sext
= src_reg
->umax_value
< (1ULL << (insn
->off
- 1));
14701 assign_scalar_id_before_mov(env
, src_reg
);
14702 copy_register_state(dst_reg
, src_reg
);
14705 dst_reg
->live
|= REG_LIVE_WRITTEN
;
14706 dst_reg
->subreg_def
= env
->insn_idx
+ 1;
14707 coerce_subreg_to_size_sx(dst_reg
, insn
->off
>> 3);
14710 mark_reg_unknown(env
, regs
,
14713 zext_32_to_64(dst_reg
);
14714 reg_bounds_sync(dst_reg
);
14718 * remember the value we stored into this reg
14720 /* clear any state __mark_reg_known doesn't set */
14721 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
14722 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
14723 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
14724 __mark_reg_known(regs
+ insn
->dst_reg
,
14727 __mark_reg_known(regs
+ insn
->dst_reg
,
14732 } else if (opcode
> BPF_END
) {
14733 verbose(env
, "invalid BPF_ALU opcode %x\n", opcode
);
14736 } else { /* all other ALU ops: and, sub, xor, add, ... */
14738 if (BPF_SRC(insn
->code
) == BPF_X
) {
14739 if (insn
->imm
!= 0 || insn
->off
> 1 ||
14740 (insn
->off
== 1 && opcode
!= BPF_MOD
&& opcode
!= BPF_DIV
)) {
14741 verbose(env
, "BPF_ALU uses reserved fields\n");
14744 /* check src1 operand */
14745 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
14749 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
> 1 ||
14750 (insn
->off
== 1 && opcode
!= BPF_MOD
&& opcode
!= BPF_DIV
)) {
14751 verbose(env
, "BPF_ALU uses reserved fields\n");
14756 /* check src2 operand */
14757 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
14761 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
14762 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
14763 verbose(env
, "div by zero\n");
14767 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
14768 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
14769 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
14771 if (insn
->imm
< 0 || insn
->imm
>= size
) {
14772 verbose(env
, "invalid shift %d\n", insn
->imm
);
14777 /* check dest operand */
14778 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
14779 err
= err
?: adjust_reg_min_max_vals(env
, insn
);
14784 return reg_bounds_sanity_check(env
, ®s
[insn
->dst_reg
], "alu");
14787 static void find_good_pkt_pointers(struct bpf_verifier_state
*vstate
,
14788 struct bpf_reg_state
*dst_reg
,
14789 enum bpf_reg_type type
,
14790 bool range_right_open
)
14792 struct bpf_func_state
*state
;
14793 struct bpf_reg_state
*reg
;
14796 if (dst_reg
->off
< 0 ||
14797 (dst_reg
->off
== 0 && range_right_open
))
14798 /* This doesn't give us any range */
14801 if (dst_reg
->umax_value
> MAX_PACKET_OFF
||
14802 dst_reg
->umax_value
+ dst_reg
->off
> MAX_PACKET_OFF
)
14803 /* Risk of overflow. For instance, ptr + (1<<63) may be less
14804 * than pkt_end, but that's because it's also less than pkt.
14808 new_range
= dst_reg
->off
;
14809 if (range_right_open
)
14812 /* Examples for register markings:
14814 * pkt_data in dst register:
14818 * if (r2 > pkt_end) goto <handle exception>
14823 * if (r2 < pkt_end) goto <access okay>
14824 * <handle exception>
14827 * r2 == dst_reg, pkt_end == src_reg
14828 * r2=pkt(id=n,off=8,r=0)
14829 * r3=pkt(id=n,off=0,r=0)
14831 * pkt_data in src register:
14835 * if (pkt_end >= r2) goto <access okay>
14836 * <handle exception>
14840 * if (pkt_end <= r2) goto <handle exception>
14844 * pkt_end == dst_reg, r2 == src_reg
14845 * r2=pkt(id=n,off=8,r=0)
14846 * r3=pkt(id=n,off=0,r=0)
14848 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
14849 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
14850 * and [r3, r3 + 8-1) respectively is safe to access depending on
14854 /* If our ids match, then we must have the same max_value. And we
14855 * don't care about the other reg's fixed offset, since if it's too big
14856 * the range won't allow anything.
14857 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
14859 bpf_for_each_reg_in_vstate(vstate
, state
, reg
, ({
14860 if (reg
->type
== type
&& reg
->id
== dst_reg
->id
)
14861 /* keep the maximum range already checked */
14862 reg
->range
= max(reg
->range
, new_range
);
14867 * <reg1> <op> <reg2>, currently assuming reg2 is a constant
14869 static int is_scalar_branch_taken(struct bpf_reg_state
*reg1
, struct bpf_reg_state
*reg2
,
14870 u8 opcode
, bool is_jmp32
)
14872 struct tnum t1
= is_jmp32
? tnum_subreg(reg1
->var_off
) : reg1
->var_off
;
14873 struct tnum t2
= is_jmp32
? tnum_subreg(reg2
->var_off
) : reg2
->var_off
;
14874 u64 umin1
= is_jmp32
? (u64
)reg1
->u32_min_value
: reg1
->umin_value
;
14875 u64 umax1
= is_jmp32
? (u64
)reg1
->u32_max_value
: reg1
->umax_value
;
14876 s64 smin1
= is_jmp32
? (s64
)reg1
->s32_min_value
: reg1
->smin_value
;
14877 s64 smax1
= is_jmp32
? (s64
)reg1
->s32_max_value
: reg1
->smax_value
;
14878 u64 umin2
= is_jmp32
? (u64
)reg2
->u32_min_value
: reg2
->umin_value
;
14879 u64 umax2
= is_jmp32
? (u64
)reg2
->u32_max_value
: reg2
->umax_value
;
14880 s64 smin2
= is_jmp32
? (s64
)reg2
->s32_min_value
: reg2
->smin_value
;
14881 s64 smax2
= is_jmp32
? (s64
)reg2
->s32_max_value
: reg2
->smax_value
;
14885 /* constants, umin/umax and smin/smax checks would be
14886 * redundant in this case because they all should match
14888 if (tnum_is_const(t1
) && tnum_is_const(t2
))
14889 return t1
.value
== t2
.value
;
14890 /* non-overlapping ranges */
14891 if (umin1
> umax2
|| umax1
< umin2
)
14893 if (smin1
> smax2
|| smax1
< smin2
)
14896 /* if 64-bit ranges are inconclusive, see if we can
14897 * utilize 32-bit subrange knowledge to eliminate
14898 * branches that can't be taken a priori
14900 if (reg1
->u32_min_value
> reg2
->u32_max_value
||
14901 reg1
->u32_max_value
< reg2
->u32_min_value
)
14903 if (reg1
->s32_min_value
> reg2
->s32_max_value
||
14904 reg1
->s32_max_value
< reg2
->s32_min_value
)
14909 /* constants, umin/umax and smin/smax checks would be
14910 * redundant in this case because they all should match
14912 if (tnum_is_const(t1
) && tnum_is_const(t2
))
14913 return t1
.value
!= t2
.value
;
14914 /* non-overlapping ranges */
14915 if (umin1
> umax2
|| umax1
< umin2
)
14917 if (smin1
> smax2
|| smax1
< smin2
)
14920 /* if 64-bit ranges are inconclusive, see if we can
14921 * utilize 32-bit subrange knowledge to eliminate
14922 * branches that can't be taken a priori
14924 if (reg1
->u32_min_value
> reg2
->u32_max_value
||
14925 reg1
->u32_max_value
< reg2
->u32_min_value
)
14927 if (reg1
->s32_min_value
> reg2
->s32_max_value
||
14928 reg1
->s32_max_value
< reg2
->s32_min_value
)
14933 if (!is_reg_const(reg2
, is_jmp32
)) {
14937 if (!is_reg_const(reg2
, is_jmp32
))
14939 if ((~t1
.mask
& t1
.value
) & t2
.value
)
14941 if (!((t1
.mask
| t1
.value
) & t2
.value
))
14947 else if (umax1
<= umin2
)
14953 else if (smax1
<= smin2
)
14959 else if (umin1
>= umax2
)
14965 else if (smin1
>= smax2
)
14969 if (umin1
>= umax2
)
14971 else if (umax1
< umin2
)
14975 if (smin1
>= smax2
)
14977 else if (smax1
< smin2
)
14981 if (umax1
<= umin2
)
14983 else if (umin1
> umax2
)
14987 if (smax1
<= smin2
)
14989 else if (smin1
> smax2
)
14997 static int flip_opcode(u32 opcode
)
14999 /* How can we transform "a <op> b" into "b <op> a"? */
15000 static const u8 opcode_flip
[16] = {
15001 /* these stay the same */
15002 [BPF_JEQ
>> 4] = BPF_JEQ
,
15003 [BPF_JNE
>> 4] = BPF_JNE
,
15004 [BPF_JSET
>> 4] = BPF_JSET
,
15005 /* these swap "lesser" and "greater" (L and G in the opcodes) */
15006 [BPF_JGE
>> 4] = BPF_JLE
,
15007 [BPF_JGT
>> 4] = BPF_JLT
,
15008 [BPF_JLE
>> 4] = BPF_JGE
,
15009 [BPF_JLT
>> 4] = BPF_JGT
,
15010 [BPF_JSGE
>> 4] = BPF_JSLE
,
15011 [BPF_JSGT
>> 4] = BPF_JSLT
,
15012 [BPF_JSLE
>> 4] = BPF_JSGE
,
15013 [BPF_JSLT
>> 4] = BPF_JSGT
15015 return opcode_flip
[opcode
>> 4];
15018 static int is_pkt_ptr_branch_taken(struct bpf_reg_state
*dst_reg
,
15019 struct bpf_reg_state
*src_reg
,
15022 struct bpf_reg_state
*pkt
;
15024 if (src_reg
->type
== PTR_TO_PACKET_END
) {
15026 } else if (dst_reg
->type
== PTR_TO_PACKET_END
) {
15028 opcode
= flip_opcode(opcode
);
15033 if (pkt
->range
>= 0)
15038 /* pkt <= pkt_end */
15041 /* pkt > pkt_end */
15042 if (pkt
->range
== BEYOND_PKT_END
)
15043 /* pkt has at last one extra byte beyond pkt_end */
15044 return opcode
== BPF_JGT
;
15047 /* pkt < pkt_end */
15050 /* pkt >= pkt_end */
15051 if (pkt
->range
== BEYOND_PKT_END
|| pkt
->range
== AT_PKT_END
)
15052 return opcode
== BPF_JGE
;
15058 /* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;"
15060 * 1 - branch will be taken and "goto target" will be executed
15061 * 0 - branch will not be taken and fall-through to next insn
15062 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
15065 static int is_branch_taken(struct bpf_reg_state
*reg1
, struct bpf_reg_state
*reg2
,
15066 u8 opcode
, bool is_jmp32
)
15068 if (reg_is_pkt_pointer_any(reg1
) && reg_is_pkt_pointer_any(reg2
) && !is_jmp32
)
15069 return is_pkt_ptr_branch_taken(reg1
, reg2
, opcode
);
15071 if (__is_pointer_value(false, reg1
) || __is_pointer_value(false, reg2
)) {
15074 /* arrange that reg2 is a scalar, and reg1 is a pointer */
15075 if (!is_reg_const(reg2
, is_jmp32
)) {
15076 opcode
= flip_opcode(opcode
);
15079 /* and ensure that reg2 is a constant */
15080 if (!is_reg_const(reg2
, is_jmp32
))
15083 if (!reg_not_null(reg1
))
15086 /* If pointer is valid tests against zero will fail so we can
15087 * use this to direct branch taken.
15089 val
= reg_const_value(reg2
, is_jmp32
);
15103 /* now deal with two scalars, but not necessarily constants */
15104 return is_scalar_branch_taken(reg1
, reg2
, opcode
, is_jmp32
);
15107 /* Opcode that corresponds to a *false* branch condition.
15108 * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2
15110 static u8
rev_opcode(u8 opcode
)
15113 case BPF_JEQ
: return BPF_JNE
;
15114 case BPF_JNE
: return BPF_JEQ
;
15115 /* JSET doesn't have it's reverse opcode in BPF, so add
15116 * BPF_X flag to denote the reverse of that operation
15118 case BPF_JSET
: return BPF_JSET
| BPF_X
;
15119 case BPF_JSET
| BPF_X
: return BPF_JSET
;
15120 case BPF_JGE
: return BPF_JLT
;
15121 case BPF_JGT
: return BPF_JLE
;
15122 case BPF_JLE
: return BPF_JGT
;
15123 case BPF_JLT
: return BPF_JGE
;
15124 case BPF_JSGE
: return BPF_JSLT
;
15125 case BPF_JSGT
: return BPF_JSLE
;
15126 case BPF_JSLE
: return BPF_JSGT
;
15127 case BPF_JSLT
: return BPF_JSGE
;
15132 /* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */
15133 static void regs_refine_cond_op(struct bpf_reg_state
*reg1
, struct bpf_reg_state
*reg2
,
15134 u8 opcode
, bool is_jmp32
)
15139 /* In case of GE/GT/SGE/JST, reuse LE/LT/SLE/SLT logic from below */
15145 opcode
= flip_opcode(opcode
);
15155 reg1
->u32_min_value
= max(reg1
->u32_min_value
, reg2
->u32_min_value
);
15156 reg1
->u32_max_value
= min(reg1
->u32_max_value
, reg2
->u32_max_value
);
15157 reg1
->s32_min_value
= max(reg1
->s32_min_value
, reg2
->s32_min_value
);
15158 reg1
->s32_max_value
= min(reg1
->s32_max_value
, reg2
->s32_max_value
);
15159 reg2
->u32_min_value
= reg1
->u32_min_value
;
15160 reg2
->u32_max_value
= reg1
->u32_max_value
;
15161 reg2
->s32_min_value
= reg1
->s32_min_value
;
15162 reg2
->s32_max_value
= reg1
->s32_max_value
;
15164 t
= tnum_intersect(tnum_subreg(reg1
->var_off
), tnum_subreg(reg2
->var_off
));
15165 reg1
->var_off
= tnum_with_subreg(reg1
->var_off
, t
);
15166 reg2
->var_off
= tnum_with_subreg(reg2
->var_off
, t
);
15168 reg1
->umin_value
= max(reg1
->umin_value
, reg2
->umin_value
);
15169 reg1
->umax_value
= min(reg1
->umax_value
, reg2
->umax_value
);
15170 reg1
->smin_value
= max(reg1
->smin_value
, reg2
->smin_value
);
15171 reg1
->smax_value
= min(reg1
->smax_value
, reg2
->smax_value
);
15172 reg2
->umin_value
= reg1
->umin_value
;
15173 reg2
->umax_value
= reg1
->umax_value
;
15174 reg2
->smin_value
= reg1
->smin_value
;
15175 reg2
->smax_value
= reg1
->smax_value
;
15177 reg1
->var_off
= tnum_intersect(reg1
->var_off
, reg2
->var_off
);
15178 reg2
->var_off
= reg1
->var_off
;
15182 if (!is_reg_const(reg2
, is_jmp32
))
15184 if (!is_reg_const(reg2
, is_jmp32
))
15187 /* try to recompute the bound of reg1 if reg2 is a const and
15188 * is exactly the edge of reg1.
15190 val
= reg_const_value(reg2
, is_jmp32
);
15192 /* u32_min_value is not equal to 0xffffffff at this point,
15193 * because otherwise u32_max_value is 0xffffffff as well,
15194 * in such a case both reg1 and reg2 would be constants,
15195 * jump would be predicted and reg_set_min_max() won't
15198 * Same reasoning works for all {u,s}{min,max}{32,64} cases
15201 if (reg1
->u32_min_value
== (u32
)val
)
15202 reg1
->u32_min_value
++;
15203 if (reg1
->u32_max_value
== (u32
)val
)
15204 reg1
->u32_max_value
--;
15205 if (reg1
->s32_min_value
== (s32
)val
)
15206 reg1
->s32_min_value
++;
15207 if (reg1
->s32_max_value
== (s32
)val
)
15208 reg1
->s32_max_value
--;
15210 if (reg1
->umin_value
== (u64
)val
)
15211 reg1
->umin_value
++;
15212 if (reg1
->umax_value
== (u64
)val
)
15213 reg1
->umax_value
--;
15214 if (reg1
->smin_value
== (s64
)val
)
15215 reg1
->smin_value
++;
15216 if (reg1
->smax_value
== (s64
)val
)
15217 reg1
->smax_value
--;
15221 if (!is_reg_const(reg2
, is_jmp32
))
15223 if (!is_reg_const(reg2
, is_jmp32
))
15225 val
= reg_const_value(reg2
, is_jmp32
);
15226 /* BPF_JSET (i.e., TRUE branch, *not* BPF_JSET | BPF_X)
15227 * requires single bit to learn something useful. E.g., if we
15228 * know that `r1 & 0x3` is true, then which bits (0, 1, or both)
15229 * are actually set? We can learn something definite only if
15230 * it's a single-bit value to begin with.
15232 * BPF_JSET | BPF_X (i.e., negation of BPF_JSET) doesn't have
15233 * this restriction. I.e., !(r1 & 0x3) means neither bit 0 nor
15234 * bit 1 is set, which we can readily use in adjustments.
15236 if (!is_power_of_2(val
))
15239 t
= tnum_or(tnum_subreg(reg1
->var_off
), tnum_const(val
));
15240 reg1
->var_off
= tnum_with_subreg(reg1
->var_off
, t
);
15242 reg1
->var_off
= tnum_or(reg1
->var_off
, tnum_const(val
));
15245 case BPF_JSET
| BPF_X
: /* reverse of BPF_JSET, see rev_opcode() */
15246 if (!is_reg_const(reg2
, is_jmp32
))
15248 if (!is_reg_const(reg2
, is_jmp32
))
15250 val
= reg_const_value(reg2
, is_jmp32
);
15252 t
= tnum_and(tnum_subreg(reg1
->var_off
), tnum_const(~val
));
15253 reg1
->var_off
= tnum_with_subreg(reg1
->var_off
, t
);
15255 reg1
->var_off
= tnum_and(reg1
->var_off
, tnum_const(~val
));
15260 reg1
->u32_max_value
= min(reg1
->u32_max_value
, reg2
->u32_max_value
);
15261 reg2
->u32_min_value
= max(reg1
->u32_min_value
, reg2
->u32_min_value
);
15263 reg1
->umax_value
= min(reg1
->umax_value
, reg2
->umax_value
);
15264 reg2
->umin_value
= max(reg1
->umin_value
, reg2
->umin_value
);
15269 reg1
->u32_max_value
= min(reg1
->u32_max_value
, reg2
->u32_max_value
- 1);
15270 reg2
->u32_min_value
= max(reg1
->u32_min_value
+ 1, reg2
->u32_min_value
);
15272 reg1
->umax_value
= min(reg1
->umax_value
, reg2
->umax_value
- 1);
15273 reg2
->umin_value
= max(reg1
->umin_value
+ 1, reg2
->umin_value
);
15278 reg1
->s32_max_value
= min(reg1
->s32_max_value
, reg2
->s32_max_value
);
15279 reg2
->s32_min_value
= max(reg1
->s32_min_value
, reg2
->s32_min_value
);
15281 reg1
->smax_value
= min(reg1
->smax_value
, reg2
->smax_value
);
15282 reg2
->smin_value
= max(reg1
->smin_value
, reg2
->smin_value
);
15287 reg1
->s32_max_value
= min(reg1
->s32_max_value
, reg2
->s32_max_value
- 1);
15288 reg2
->s32_min_value
= max(reg1
->s32_min_value
+ 1, reg2
->s32_min_value
);
15290 reg1
->smax_value
= min(reg1
->smax_value
, reg2
->smax_value
- 1);
15291 reg2
->smin_value
= max(reg1
->smin_value
+ 1, reg2
->smin_value
);
15299 /* Adjusts the register min/max values in the case that the dst_reg and
15300 * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
15301 * check, in which case we have a fake SCALAR_VALUE representing insn->imm).
15302 * Technically we can do similar adjustments for pointers to the same object,
15303 * but we don't support that right now.
15305 static int reg_set_min_max(struct bpf_verifier_env
*env
,
15306 struct bpf_reg_state
*true_reg1
,
15307 struct bpf_reg_state
*true_reg2
,
15308 struct bpf_reg_state
*false_reg1
,
15309 struct bpf_reg_state
*false_reg2
,
15310 u8 opcode
, bool is_jmp32
)
15314 /* If either register is a pointer, we can't learn anything about its
15315 * variable offset from the compare (unless they were a pointer into
15316 * the same object, but we don't bother with that).
15318 if (false_reg1
->type
!= SCALAR_VALUE
|| false_reg2
->type
!= SCALAR_VALUE
)
15321 /* fallthrough (FALSE) branch */
15322 regs_refine_cond_op(false_reg1
, false_reg2
, rev_opcode(opcode
), is_jmp32
);
15323 reg_bounds_sync(false_reg1
);
15324 reg_bounds_sync(false_reg2
);
15326 /* jump (TRUE) branch */
15327 regs_refine_cond_op(true_reg1
, true_reg2
, opcode
, is_jmp32
);
15328 reg_bounds_sync(true_reg1
);
15329 reg_bounds_sync(true_reg2
);
15331 err
= reg_bounds_sanity_check(env
, true_reg1
, "true_reg1");
15332 err
= err
?: reg_bounds_sanity_check(env
, true_reg2
, "true_reg2");
15333 err
= err
?: reg_bounds_sanity_check(env
, false_reg1
, "false_reg1");
15334 err
= err
?: reg_bounds_sanity_check(env
, false_reg2
, "false_reg2");
15338 static void mark_ptr_or_null_reg(struct bpf_func_state
*state
,
15339 struct bpf_reg_state
*reg
, u32 id
,
15342 if (type_may_be_null(reg
->type
) && reg
->id
== id
&&
15343 (is_rcu_reg(reg
) || !WARN_ON_ONCE(!reg
->id
))) {
15344 /* Old offset (both fixed and variable parts) should have been
15345 * known-zero, because we don't allow pointer arithmetic on
15346 * pointers that might be NULL. If we see this happening, don't
15347 * convert the register.
15349 * But in some cases, some helpers that return local kptrs
15350 * advance offset for the returned pointer. In those cases, it
15351 * is fine to expect to see reg->off.
15353 if (WARN_ON_ONCE(reg
->smin_value
|| reg
->smax_value
|| !tnum_equals_const(reg
->var_off
, 0)))
15355 if (!(type_is_ptr_alloc_obj(reg
->type
) || type_is_non_owning_ref(reg
->type
)) &&
15356 WARN_ON_ONCE(reg
->off
))
15360 reg
->type
= SCALAR_VALUE
;
15361 /* We don't need id and ref_obj_id from this point
15362 * onwards anymore, thus we should better reset it,
15363 * so that state pruning has chances to take effect.
15366 reg
->ref_obj_id
= 0;
15371 mark_ptr_not_null_reg(reg
);
15373 if (!reg_may_point_to_spin_lock(reg
)) {
15374 /* For not-NULL ptr, reg->ref_obj_id will be reset
15375 * in release_reference().
15377 * reg->id is still used by spin_lock ptr. Other
15378 * than spin_lock ptr type, reg->id can be reset.
15385 /* The logic is similar to find_good_pkt_pointers(), both could eventually
15386 * be folded together at some point.
15388 static void mark_ptr_or_null_regs(struct bpf_verifier_state
*vstate
, u32 regno
,
15391 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
15392 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
15393 u32 ref_obj_id
= regs
[regno
].ref_obj_id
;
15394 u32 id
= regs
[regno
].id
;
15396 if (ref_obj_id
&& ref_obj_id
== id
&& is_null
)
15397 /* regs[regno] is in the " == NULL" branch.
15398 * No one could have freed the reference state before
15399 * doing the NULL check.
15401 WARN_ON_ONCE(release_reference_state(state
, id
));
15403 bpf_for_each_reg_in_vstate(vstate
, state
, reg
, ({
15404 mark_ptr_or_null_reg(state
, reg
, id
, is_null
);
15408 static bool try_match_pkt_pointers(const struct bpf_insn
*insn
,
15409 struct bpf_reg_state
*dst_reg
,
15410 struct bpf_reg_state
*src_reg
,
15411 struct bpf_verifier_state
*this_branch
,
15412 struct bpf_verifier_state
*other_branch
)
15414 if (BPF_SRC(insn
->code
) != BPF_X
)
15417 /* Pointers are always 64-bit. */
15418 if (BPF_CLASS(insn
->code
) == BPF_JMP32
)
15421 switch (BPF_OP(insn
->code
)) {
15423 if ((dst_reg
->type
== PTR_TO_PACKET
&&
15424 src_reg
->type
== PTR_TO_PACKET_END
) ||
15425 (dst_reg
->type
== PTR_TO_PACKET_META
&&
15426 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
15427 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
15428 find_good_pkt_pointers(this_branch
, dst_reg
,
15429 dst_reg
->type
, false);
15430 mark_pkt_end(other_branch
, insn
->dst_reg
, true);
15431 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
15432 src_reg
->type
== PTR_TO_PACKET
) ||
15433 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
15434 src_reg
->type
== PTR_TO_PACKET_META
)) {
15435 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
15436 find_good_pkt_pointers(other_branch
, src_reg
,
15437 src_reg
->type
, true);
15438 mark_pkt_end(this_branch
, insn
->src_reg
, false);
15444 if ((dst_reg
->type
== PTR_TO_PACKET
&&
15445 src_reg
->type
== PTR_TO_PACKET_END
) ||
15446 (dst_reg
->type
== PTR_TO_PACKET_META
&&
15447 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
15448 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
15449 find_good_pkt_pointers(other_branch
, dst_reg
,
15450 dst_reg
->type
, true);
15451 mark_pkt_end(this_branch
, insn
->dst_reg
, false);
15452 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
15453 src_reg
->type
== PTR_TO_PACKET
) ||
15454 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
15455 src_reg
->type
== PTR_TO_PACKET_META
)) {
15456 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
15457 find_good_pkt_pointers(this_branch
, src_reg
,
15458 src_reg
->type
, false);
15459 mark_pkt_end(other_branch
, insn
->src_reg
, true);
15465 if ((dst_reg
->type
== PTR_TO_PACKET
&&
15466 src_reg
->type
== PTR_TO_PACKET_END
) ||
15467 (dst_reg
->type
== PTR_TO_PACKET_META
&&
15468 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
15469 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
15470 find_good_pkt_pointers(this_branch
, dst_reg
,
15471 dst_reg
->type
, true);
15472 mark_pkt_end(other_branch
, insn
->dst_reg
, false);
15473 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
15474 src_reg
->type
== PTR_TO_PACKET
) ||
15475 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
15476 src_reg
->type
== PTR_TO_PACKET_META
)) {
15477 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
15478 find_good_pkt_pointers(other_branch
, src_reg
,
15479 src_reg
->type
, false);
15480 mark_pkt_end(this_branch
, insn
->src_reg
, true);
15486 if ((dst_reg
->type
== PTR_TO_PACKET
&&
15487 src_reg
->type
== PTR_TO_PACKET_END
) ||
15488 (dst_reg
->type
== PTR_TO_PACKET_META
&&
15489 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
15490 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
15491 find_good_pkt_pointers(other_branch
, dst_reg
,
15492 dst_reg
->type
, false);
15493 mark_pkt_end(this_branch
, insn
->dst_reg
, true);
15494 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
15495 src_reg
->type
== PTR_TO_PACKET
) ||
15496 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
15497 src_reg
->type
== PTR_TO_PACKET_META
)) {
15498 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
15499 find_good_pkt_pointers(this_branch
, src_reg
,
15500 src_reg
->type
, true);
15501 mark_pkt_end(other_branch
, insn
->src_reg
, false);
15513 static void __collect_linked_regs(struct linked_regs
*reg_set
, struct bpf_reg_state
*reg
,
15514 u32 id
, u32 frameno
, u32 spi_or_reg
, bool is_reg
)
15516 struct linked_reg
*e
;
15518 if (reg
->type
!= SCALAR_VALUE
|| (reg
->id
& ~BPF_ADD_CONST
) != id
)
15521 e
= linked_regs_push(reg_set
);
15523 e
->frameno
= frameno
;
15524 e
->is_reg
= is_reg
;
15525 e
->regno
= spi_or_reg
;
15531 /* For all R being scalar registers or spilled scalar registers
15532 * in verifier state, save R in linked_regs if R->id == id.
15533 * If there are too many Rs sharing same id, reset id for leftover Rs.
15535 static void collect_linked_regs(struct bpf_verifier_state
*vstate
, u32 id
,
15536 struct linked_regs
*linked_regs
)
15538 struct bpf_func_state
*func
;
15539 struct bpf_reg_state
*reg
;
15542 id
= id
& ~BPF_ADD_CONST
;
15543 for (i
= vstate
->curframe
; i
>= 0; i
--) {
15544 func
= vstate
->frame
[i
];
15545 for (j
= 0; j
< BPF_REG_FP
; j
++) {
15546 reg
= &func
->regs
[j
];
15547 __collect_linked_regs(linked_regs
, reg
, id
, i
, j
, true);
15549 for (j
= 0; j
< func
->allocated_stack
/ BPF_REG_SIZE
; j
++) {
15550 if (!is_spilled_reg(&func
->stack
[j
]))
15552 reg
= &func
->stack
[j
].spilled_ptr
;
15553 __collect_linked_regs(linked_regs
, reg
, id
, i
, j
, false);
15558 /* For all R in linked_regs, copy known_reg range into R
15559 * if R->id == known_reg->id.
15561 static void sync_linked_regs(struct bpf_verifier_state
*vstate
, struct bpf_reg_state
*known_reg
,
15562 struct linked_regs
*linked_regs
)
15564 struct bpf_reg_state fake_reg
;
15565 struct bpf_reg_state
*reg
;
15566 struct linked_reg
*e
;
15569 for (i
= 0; i
< linked_regs
->cnt
; ++i
) {
15570 e
= &linked_regs
->entries
[i
];
15571 reg
= e
->is_reg
? &vstate
->frame
[e
->frameno
]->regs
[e
->regno
]
15572 : &vstate
->frame
[e
->frameno
]->stack
[e
->spi
].spilled_ptr
;
15573 if (reg
->type
!= SCALAR_VALUE
|| reg
== known_reg
)
15575 if ((reg
->id
& ~BPF_ADD_CONST
) != (known_reg
->id
& ~BPF_ADD_CONST
))
15577 if ((!(reg
->id
& BPF_ADD_CONST
) && !(known_reg
->id
& BPF_ADD_CONST
)) ||
15578 reg
->off
== known_reg
->off
) {
15579 s32 saved_subreg_def
= reg
->subreg_def
;
15581 copy_register_state(reg
, known_reg
);
15582 reg
->subreg_def
= saved_subreg_def
;
15584 s32 saved_subreg_def
= reg
->subreg_def
;
15585 s32 saved_off
= reg
->off
;
15587 fake_reg
.type
= SCALAR_VALUE
;
15588 __mark_reg_known(&fake_reg
, (s32
)reg
->off
- (s32
)known_reg
->off
);
15590 /* reg = known_reg; reg += delta */
15591 copy_register_state(reg
, known_reg
);
15593 * Must preserve off, id and add_const flag,
15594 * otherwise another sync_linked_regs() will be incorrect.
15596 reg
->off
= saved_off
;
15597 reg
->subreg_def
= saved_subreg_def
;
15599 scalar32_min_max_add(reg
, &fake_reg
);
15600 scalar_min_max_add(reg
, &fake_reg
);
15601 reg
->var_off
= tnum_add(reg
->var_off
, fake_reg
.var_off
);
15606 static int check_cond_jmp_op(struct bpf_verifier_env
*env
,
15607 struct bpf_insn
*insn
, int *insn_idx
)
15609 struct bpf_verifier_state
*this_branch
= env
->cur_state
;
15610 struct bpf_verifier_state
*other_branch
;
15611 struct bpf_reg_state
*regs
= this_branch
->frame
[this_branch
->curframe
]->regs
;
15612 struct bpf_reg_state
*dst_reg
, *other_branch_regs
, *src_reg
= NULL
;
15613 struct bpf_reg_state
*eq_branch_regs
;
15614 struct linked_regs linked_regs
= {};
15615 u8 opcode
= BPF_OP(insn
->code
);
15620 /* Only conditional jumps are expected to reach here. */
15621 if (opcode
== BPF_JA
|| opcode
> BPF_JCOND
) {
15622 verbose(env
, "invalid BPF_JMP/JMP32 opcode %x\n", opcode
);
15626 if (opcode
== BPF_JCOND
) {
15627 struct bpf_verifier_state
*cur_st
= env
->cur_state
, *queued_st
, *prev_st
;
15628 int idx
= *insn_idx
;
15630 if (insn
->code
!= (BPF_JMP
| BPF_JCOND
) ||
15631 insn
->src_reg
!= BPF_MAY_GOTO
||
15632 insn
->dst_reg
|| insn
->imm
|| insn
->off
== 0) {
15633 verbose(env
, "invalid may_goto off %d imm %d\n",
15634 insn
->off
, insn
->imm
);
15637 prev_st
= find_prev_entry(env
, cur_st
->parent
, idx
);
15639 /* branch out 'fallthrough' insn as a new state to explore */
15640 queued_st
= push_stack(env
, idx
+ 1, idx
, false);
15644 queued_st
->may_goto_depth
++;
15646 widen_imprecise_scalars(env
, prev_st
, queued_st
);
15647 *insn_idx
+= insn
->off
;
15651 /* check src2 operand */
15652 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
15656 dst_reg
= ®s
[insn
->dst_reg
];
15657 if (BPF_SRC(insn
->code
) == BPF_X
) {
15658 if (insn
->imm
!= 0) {
15659 verbose(env
, "BPF_JMP/JMP32 uses reserved fields\n");
15663 /* check src1 operand */
15664 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
15668 src_reg
= ®s
[insn
->src_reg
];
15669 if (!(reg_is_pkt_pointer_any(dst_reg
) && reg_is_pkt_pointer_any(src_reg
)) &&
15670 is_pointer_value(env
, insn
->src_reg
)) {
15671 verbose(env
, "R%d pointer comparison prohibited\n",
15676 if (insn
->src_reg
!= BPF_REG_0
) {
15677 verbose(env
, "BPF_JMP/JMP32 uses reserved fields\n");
15680 src_reg
= &env
->fake_reg
[0];
15681 memset(src_reg
, 0, sizeof(*src_reg
));
15682 src_reg
->type
= SCALAR_VALUE
;
15683 __mark_reg_known(src_reg
, insn
->imm
);
15686 is_jmp32
= BPF_CLASS(insn
->code
) == BPF_JMP32
;
15687 pred
= is_branch_taken(dst_reg
, src_reg
, opcode
, is_jmp32
);
15689 /* If we get here with a dst_reg pointer type it is because
15690 * above is_branch_taken() special cased the 0 comparison.
15692 if (!__is_pointer_value(false, dst_reg
))
15693 err
= mark_chain_precision(env
, insn
->dst_reg
);
15694 if (BPF_SRC(insn
->code
) == BPF_X
&& !err
&&
15695 !__is_pointer_value(false, src_reg
))
15696 err
= mark_chain_precision(env
, insn
->src_reg
);
15702 /* Only follow the goto, ignore fall-through. If needed, push
15703 * the fall-through branch for simulation under speculative
15706 if (!env
->bypass_spec_v1
&&
15707 !sanitize_speculative_path(env
, insn
, *insn_idx
+ 1,
15710 if (env
->log
.level
& BPF_LOG_LEVEL
)
15711 print_insn_state(env
, this_branch
->frame
[this_branch
->curframe
]);
15712 *insn_idx
+= insn
->off
;
15714 } else if (pred
== 0) {
15715 /* Only follow the fall-through branch, since that's where the
15716 * program will go. If needed, push the goto branch for
15717 * simulation under speculative execution.
15719 if (!env
->bypass_spec_v1
&&
15720 !sanitize_speculative_path(env
, insn
,
15721 *insn_idx
+ insn
->off
+ 1,
15724 if (env
->log
.level
& BPF_LOG_LEVEL
)
15725 print_insn_state(env
, this_branch
->frame
[this_branch
->curframe
]);
15729 /* Push scalar registers sharing same ID to jump history,
15730 * do this before creating 'other_branch', so that both
15731 * 'this_branch' and 'other_branch' share this history
15732 * if parent state is created.
15734 if (BPF_SRC(insn
->code
) == BPF_X
&& src_reg
->type
== SCALAR_VALUE
&& src_reg
->id
)
15735 collect_linked_regs(this_branch
, src_reg
->id
, &linked_regs
);
15736 if (dst_reg
->type
== SCALAR_VALUE
&& dst_reg
->id
)
15737 collect_linked_regs(this_branch
, dst_reg
->id
, &linked_regs
);
15738 if (linked_regs
.cnt
> 1) {
15739 err
= push_insn_history(env
, this_branch
, 0, linked_regs_pack(&linked_regs
));
15744 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
,
15748 other_branch_regs
= other_branch
->frame
[other_branch
->curframe
]->regs
;
15750 if (BPF_SRC(insn
->code
) == BPF_X
) {
15751 err
= reg_set_min_max(env
,
15752 &other_branch_regs
[insn
->dst_reg
],
15753 &other_branch_regs
[insn
->src_reg
],
15754 dst_reg
, src_reg
, opcode
, is_jmp32
);
15755 } else /* BPF_SRC(insn->code) == BPF_K */ {
15756 /* reg_set_min_max() can mangle the fake_reg. Make a copy
15757 * so that these are two different memory locations. The
15758 * src_reg is not used beyond here in context of K.
15760 memcpy(&env
->fake_reg
[1], &env
->fake_reg
[0],
15761 sizeof(env
->fake_reg
[0]));
15762 err
= reg_set_min_max(env
,
15763 &other_branch_regs
[insn
->dst_reg
],
15765 dst_reg
, &env
->fake_reg
[1],
15771 if (BPF_SRC(insn
->code
) == BPF_X
&&
15772 src_reg
->type
== SCALAR_VALUE
&& src_reg
->id
&&
15773 !WARN_ON_ONCE(src_reg
->id
!= other_branch_regs
[insn
->src_reg
].id
)) {
15774 sync_linked_regs(this_branch
, src_reg
, &linked_regs
);
15775 sync_linked_regs(other_branch
, &other_branch_regs
[insn
->src_reg
], &linked_regs
);
15777 if (dst_reg
->type
== SCALAR_VALUE
&& dst_reg
->id
&&
15778 !WARN_ON_ONCE(dst_reg
->id
!= other_branch_regs
[insn
->dst_reg
].id
)) {
15779 sync_linked_regs(this_branch
, dst_reg
, &linked_regs
);
15780 sync_linked_regs(other_branch
, &other_branch_regs
[insn
->dst_reg
], &linked_regs
);
15783 /* if one pointer register is compared to another pointer
15784 * register check if PTR_MAYBE_NULL could be lifted.
15785 * E.g. register A - maybe null
15786 * register B - not null
15787 * for JNE A, B, ... - A is not null in the false branch;
15788 * for JEQ A, B, ... - A is not null in the true branch.
15790 * Since PTR_TO_BTF_ID points to a kernel struct that does
15791 * not need to be null checked by the BPF program, i.e.,
15792 * could be null even without PTR_MAYBE_NULL marking, so
15793 * only propagate nullness when neither reg is that type.
15795 if (!is_jmp32
&& BPF_SRC(insn
->code
) == BPF_X
&&
15796 __is_pointer_value(false, src_reg
) && __is_pointer_value(false, dst_reg
) &&
15797 type_may_be_null(src_reg
->type
) != type_may_be_null(dst_reg
->type
) &&
15798 base_type(src_reg
->type
) != PTR_TO_BTF_ID
&&
15799 base_type(dst_reg
->type
) != PTR_TO_BTF_ID
) {
15800 eq_branch_regs
= NULL
;
15803 eq_branch_regs
= other_branch_regs
;
15806 eq_branch_regs
= regs
;
15812 if (eq_branch_regs
) {
15813 if (type_may_be_null(src_reg
->type
))
15814 mark_ptr_not_null_reg(&eq_branch_regs
[insn
->src_reg
]);
15816 mark_ptr_not_null_reg(&eq_branch_regs
[insn
->dst_reg
]);
15820 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
15821 * NOTE: these optimizations below are related with pointer comparison
15822 * which will never be JMP32.
15824 if (!is_jmp32
&& BPF_SRC(insn
->code
) == BPF_K
&&
15825 insn
->imm
== 0 && (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
15826 type_may_be_null(dst_reg
->type
)) {
15827 /* Mark all identical registers in each branch as either
15828 * safe or unknown depending R == 0 or R != 0 conditional.
15830 mark_ptr_or_null_regs(this_branch
, insn
->dst_reg
,
15831 opcode
== BPF_JNE
);
15832 mark_ptr_or_null_regs(other_branch
, insn
->dst_reg
,
15833 opcode
== BPF_JEQ
);
15834 } else if (!try_match_pkt_pointers(insn
, dst_reg
, ®s
[insn
->src_reg
],
15835 this_branch
, other_branch
) &&
15836 is_pointer_value(env
, insn
->dst_reg
)) {
15837 verbose(env
, "R%d pointer comparison prohibited\n",
15841 if (env
->log
.level
& BPF_LOG_LEVEL
)
15842 print_insn_state(env
, this_branch
->frame
[this_branch
->curframe
]);
15846 /* verify BPF_LD_IMM64 instruction */
15847 static int check_ld_imm(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
15849 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
15850 struct bpf_reg_state
*regs
= cur_regs(env
);
15851 struct bpf_reg_state
*dst_reg
;
15852 struct bpf_map
*map
;
15855 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
15856 verbose(env
, "invalid BPF_LD_IMM insn\n");
15859 if (insn
->off
!= 0) {
15860 verbose(env
, "BPF_LD_IMM64 uses reserved fields\n");
15864 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
15868 dst_reg
= ®s
[insn
->dst_reg
];
15869 if (insn
->src_reg
== 0) {
15870 u64 imm
= ((u64
)(insn
+ 1)->imm
<< 32) | (u32
)insn
->imm
;
15872 dst_reg
->type
= SCALAR_VALUE
;
15873 __mark_reg_known(®s
[insn
->dst_reg
], imm
);
15877 /* All special src_reg cases are listed below. From this point onwards
15878 * we either succeed and assign a corresponding dst_reg->type after
15879 * zeroing the offset, or fail and reject the program.
15881 mark_reg_known_zero(env
, regs
, insn
->dst_reg
);
15883 if (insn
->src_reg
== BPF_PSEUDO_BTF_ID
) {
15884 dst_reg
->type
= aux
->btf_var
.reg_type
;
15885 switch (base_type(dst_reg
->type
)) {
15887 dst_reg
->mem_size
= aux
->btf_var
.mem_size
;
15889 case PTR_TO_BTF_ID
:
15890 dst_reg
->btf
= aux
->btf_var
.btf
;
15891 dst_reg
->btf_id
= aux
->btf_var
.btf_id
;
15894 verbose(env
, "bpf verifier is misconfigured\n");
15900 if (insn
->src_reg
== BPF_PSEUDO_FUNC
) {
15901 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
15902 u32 subprogno
= find_subprog(env
,
15903 env
->insn_idx
+ insn
->imm
+ 1);
15905 if (!aux
->func_info
) {
15906 verbose(env
, "missing btf func_info\n");
15909 if (aux
->func_info_aux
[subprogno
].linkage
!= BTF_FUNC_STATIC
) {
15910 verbose(env
, "callback function not static\n");
15914 dst_reg
->type
= PTR_TO_FUNC
;
15915 dst_reg
->subprogno
= subprogno
;
15919 map
= env
->used_maps
[aux
->map_index
];
15920 dst_reg
->map_ptr
= map
;
15922 if (insn
->src_reg
== BPF_PSEUDO_MAP_VALUE
||
15923 insn
->src_reg
== BPF_PSEUDO_MAP_IDX_VALUE
) {
15924 if (map
->map_type
== BPF_MAP_TYPE_ARENA
) {
15925 __mark_reg_unknown(env
, dst_reg
);
15928 dst_reg
->type
= PTR_TO_MAP_VALUE
;
15929 dst_reg
->off
= aux
->map_off
;
15930 WARN_ON_ONCE(map
->max_entries
!= 1);
15931 /* We want reg->id to be same (0) as map_value is not distinct */
15932 } else if (insn
->src_reg
== BPF_PSEUDO_MAP_FD
||
15933 insn
->src_reg
== BPF_PSEUDO_MAP_IDX
) {
15934 dst_reg
->type
= CONST_PTR_TO_MAP
;
15936 verbose(env
, "bpf verifier is misconfigured\n");
15943 static bool may_access_skb(enum bpf_prog_type type
)
15946 case BPF_PROG_TYPE_SOCKET_FILTER
:
15947 case BPF_PROG_TYPE_SCHED_CLS
:
15948 case BPF_PROG_TYPE_SCHED_ACT
:
15955 /* verify safety of LD_ABS|LD_IND instructions:
15956 * - they can only appear in the programs where ctx == skb
15957 * - since they are wrappers of function calls, they scratch R1-R5 registers,
15958 * preserve R6-R9, and store return value into R0
15961 * ctx == skb == R6 == CTX
15964 * SRC == any register
15965 * IMM == 32-bit immediate
15968 * R0 - 8/16/32-bit skb data converted to cpu endianness
15970 static int check_ld_abs(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
15972 struct bpf_reg_state
*regs
= cur_regs(env
);
15973 static const int ctx_reg
= BPF_REG_6
;
15974 u8 mode
= BPF_MODE(insn
->code
);
15977 if (!may_access_skb(resolve_prog_type(env
->prog
))) {
15978 verbose(env
, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
15982 if (!env
->ops
->gen_ld_abs
) {
15983 verbose(env
, "bpf verifier is misconfigured\n");
15987 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
15988 BPF_SIZE(insn
->code
) == BPF_DW
||
15989 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
15990 verbose(env
, "BPF_LD_[ABS|IND] uses reserved fields\n");
15994 /* check whether implicit source operand (register R6) is readable */
15995 err
= check_reg_arg(env
, ctx_reg
, SRC_OP
);
15999 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
16000 * gen_ld_abs() may terminate the program at runtime, leading to
16003 err
= check_resource_leak(env
, false, true, "BPF_LD_[ABS|IND]");
16007 if (regs
[ctx_reg
].type
!= PTR_TO_CTX
) {
16009 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
16013 if (mode
== BPF_IND
) {
16014 /* check explicit source operand */
16015 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
16020 err
= check_ptr_off_reg(env
, ®s
[ctx_reg
], ctx_reg
);
16024 /* reset caller saved regs to unreadable */
16025 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
16026 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
16027 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
16030 /* mark destination R0 register as readable, since it contains
16031 * the value fetched from the packet.
16032 * Already marked as written above.
16034 mark_reg_unknown(env
, regs
, BPF_REG_0
);
16035 /* ld_abs load up to 32-bit skb data. */
16036 regs
[BPF_REG_0
].subreg_def
= env
->insn_idx
+ 1;
16040 static int check_return_code(struct bpf_verifier_env
*env
, int regno
, const char *reg_name
)
16042 const char *exit_ctx
= "At program exit";
16043 struct tnum enforce_attach_type_range
= tnum_unknown
;
16044 const struct bpf_prog
*prog
= env
->prog
;
16045 struct bpf_reg_state
*reg
;
16046 struct bpf_retval_range range
= retval_range(0, 1);
16047 enum bpf_prog_type prog_type
= resolve_prog_type(env
->prog
);
16049 struct bpf_func_state
*frame
= env
->cur_state
->frame
[0];
16050 const bool is_subprog
= frame
->subprogno
;
16051 bool return_32bit
= false;
16053 /* LSM and struct_ops func-ptr's return type could be "void" */
16054 if (!is_subprog
|| frame
->in_exception_callback_fn
) {
16055 switch (prog_type
) {
16056 case BPF_PROG_TYPE_LSM
:
16057 if (prog
->expected_attach_type
== BPF_LSM_CGROUP
)
16058 /* See below, can be 0 or 0-1 depending on hook. */
16061 case BPF_PROG_TYPE_STRUCT_OPS
:
16062 if (!prog
->aux
->attach_func_proto
->type
)
16070 /* eBPF calling convention is such that R0 is used
16071 * to return the value from eBPF program.
16072 * Make sure that it's readable at this time
16073 * of bpf_exit, which means that program wrote
16074 * something into it earlier
16076 err
= check_reg_arg(env
, regno
, SRC_OP
);
16080 if (is_pointer_value(env
, regno
)) {
16081 verbose(env
, "R%d leaks addr as return value\n", regno
);
16085 reg
= cur_regs(env
) + regno
;
16087 if (frame
->in_async_callback_fn
) {
16088 /* enforce return zero from async callbacks like timer */
16089 exit_ctx
= "At async callback return";
16090 range
= retval_range(0, 0);
16091 goto enforce_retval
;
16094 if (is_subprog
&& !frame
->in_exception_callback_fn
) {
16095 if (reg
->type
!= SCALAR_VALUE
) {
16096 verbose(env
, "At subprogram exit the register R%d is not a scalar value (%s)\n",
16097 regno
, reg_type_str(env
, reg
->type
));
16103 switch (prog_type
) {
16104 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR
:
16105 if (env
->prog
->expected_attach_type
== BPF_CGROUP_UDP4_RECVMSG
||
16106 env
->prog
->expected_attach_type
== BPF_CGROUP_UDP6_RECVMSG
||
16107 env
->prog
->expected_attach_type
== BPF_CGROUP_UNIX_RECVMSG
||
16108 env
->prog
->expected_attach_type
== BPF_CGROUP_INET4_GETPEERNAME
||
16109 env
->prog
->expected_attach_type
== BPF_CGROUP_INET6_GETPEERNAME
||
16110 env
->prog
->expected_attach_type
== BPF_CGROUP_UNIX_GETPEERNAME
||
16111 env
->prog
->expected_attach_type
== BPF_CGROUP_INET4_GETSOCKNAME
||
16112 env
->prog
->expected_attach_type
== BPF_CGROUP_INET6_GETSOCKNAME
||
16113 env
->prog
->expected_attach_type
== BPF_CGROUP_UNIX_GETSOCKNAME
)
16114 range
= retval_range(1, 1);
16115 if (env
->prog
->expected_attach_type
== BPF_CGROUP_INET4_BIND
||
16116 env
->prog
->expected_attach_type
== BPF_CGROUP_INET6_BIND
)
16117 range
= retval_range(0, 3);
16119 case BPF_PROG_TYPE_CGROUP_SKB
:
16120 if (env
->prog
->expected_attach_type
== BPF_CGROUP_INET_EGRESS
) {
16121 range
= retval_range(0, 3);
16122 enforce_attach_type_range
= tnum_range(2, 3);
16125 case BPF_PROG_TYPE_CGROUP_SOCK
:
16126 case BPF_PROG_TYPE_SOCK_OPS
:
16127 case BPF_PROG_TYPE_CGROUP_DEVICE
:
16128 case BPF_PROG_TYPE_CGROUP_SYSCTL
:
16129 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
16131 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
16132 if (!env
->prog
->aux
->attach_btf_id
)
16134 range
= retval_range(0, 0);
16136 case BPF_PROG_TYPE_TRACING
:
16137 switch (env
->prog
->expected_attach_type
) {
16138 case BPF_TRACE_FENTRY
:
16139 case BPF_TRACE_FEXIT
:
16140 range
= retval_range(0, 0);
16142 case BPF_TRACE_RAW_TP
:
16143 case BPF_MODIFY_RETURN
:
16145 case BPF_TRACE_ITER
:
16151 case BPF_PROG_TYPE_KPROBE
:
16152 switch (env
->prog
->expected_attach_type
) {
16153 case BPF_TRACE_KPROBE_SESSION
:
16154 case BPF_TRACE_UPROBE_SESSION
:
16155 range
= retval_range(0, 1);
16161 case BPF_PROG_TYPE_SK_LOOKUP
:
16162 range
= retval_range(SK_DROP
, SK_PASS
);
16165 case BPF_PROG_TYPE_LSM
:
16166 if (env
->prog
->expected_attach_type
!= BPF_LSM_CGROUP
) {
16167 /* no range found, any return value is allowed */
16168 if (!get_func_retval_range(env
->prog
, &range
))
16170 /* no restricted range, any return value is allowed */
16171 if (range
.minval
== S32_MIN
&& range
.maxval
== S32_MAX
)
16173 return_32bit
= true;
16174 } else if (!env
->prog
->aux
->attach_func_proto
->type
) {
16175 /* Make sure programs that attach to void
16176 * hooks don't try to modify return value.
16178 range
= retval_range(1, 1);
16182 case BPF_PROG_TYPE_NETFILTER
:
16183 range
= retval_range(NF_DROP
, NF_ACCEPT
);
16185 case BPF_PROG_TYPE_EXT
:
16186 /* freplace program can return anything as its return value
16187 * depends on the to-be-replaced kernel func or bpf program.
16194 if (reg
->type
!= SCALAR_VALUE
) {
16195 verbose(env
, "%s the register R%d is not a known value (%s)\n",
16196 exit_ctx
, regno
, reg_type_str(env
, reg
->type
));
16200 err
= mark_chain_precision(env
, regno
);
16204 if (!retval_range_within(range
, reg
, return_32bit
)) {
16205 verbose_invalid_scalar(env
, reg
, range
, exit_ctx
, reg_name
);
16207 prog
->expected_attach_type
== BPF_LSM_CGROUP
&&
16208 prog_type
== BPF_PROG_TYPE_LSM
&&
16209 !prog
->aux
->attach_func_proto
->type
)
16210 verbose(env
, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
16214 if (!tnum_is_unknown(enforce_attach_type_range
) &&
16215 tnum_in(enforce_attach_type_range
, reg
->var_off
))
16216 env
->prog
->enforce_expected_attach_type
= 1;
16220 /* non-recursive DFS pseudo code
16221 * 1 procedure DFS-iterative(G,v):
16222 * 2 label v as discovered
16223 * 3 let S be a stack
16225 * 5 while S is not empty
16227 * 7 if t is what we're looking for:
16229 * 9 for all edges e in G.adjacentEdges(t) do
16230 * 10 if edge e is already labelled
16231 * 11 continue with the next edge
16232 * 12 w <- G.adjacentVertex(t,e)
16233 * 13 if vertex w is not discovered and not explored
16234 * 14 label e as tree-edge
16235 * 15 label w as discovered
16238 * 18 else if vertex w is discovered
16239 * 19 label e as back-edge
16241 * 21 // vertex w is explored
16242 * 22 label e as forward- or cross-edge
16243 * 23 label t as explored
16247 * 0x10 - discovered
16248 * 0x11 - discovered and fall-through edge labelled
16249 * 0x12 - discovered and fall-through and branch edges labelled
16260 static void mark_prune_point(struct bpf_verifier_env
*env
, int idx
)
16262 env
->insn_aux_data
[idx
].prune_point
= true;
16265 static bool is_prune_point(struct bpf_verifier_env
*env
, int insn_idx
)
16267 return env
->insn_aux_data
[insn_idx
].prune_point
;
16270 static void mark_force_checkpoint(struct bpf_verifier_env
*env
, int idx
)
16272 env
->insn_aux_data
[idx
].force_checkpoint
= true;
16275 static bool is_force_checkpoint(struct bpf_verifier_env
*env
, int insn_idx
)
16277 return env
->insn_aux_data
[insn_idx
].force_checkpoint
;
16280 static void mark_calls_callback(struct bpf_verifier_env
*env
, int idx
)
16282 env
->insn_aux_data
[idx
].calls_callback
= true;
16285 static bool calls_callback(struct bpf_verifier_env
*env
, int insn_idx
)
16287 return env
->insn_aux_data
[insn_idx
].calls_callback
;
16291 DONE_EXPLORING
= 0,
16292 KEEP_EXPLORING
= 1,
16295 /* t, w, e - match pseudo-code above:
16296 * t - index of current instruction
16297 * w - next instruction
16300 static int push_insn(int t
, int w
, int e
, struct bpf_verifier_env
*env
)
16302 int *insn_stack
= env
->cfg
.insn_stack
;
16303 int *insn_state
= env
->cfg
.insn_state
;
16305 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
16306 return DONE_EXPLORING
;
16308 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
16309 return DONE_EXPLORING
;
16311 if (w
< 0 || w
>= env
->prog
->len
) {
16312 verbose_linfo(env
, t
, "%d: ", t
);
16313 verbose(env
, "jump out of range from insn %d to %d\n", t
, w
);
16318 /* mark branch target for state pruning */
16319 mark_prune_point(env
, w
);
16320 mark_jmp_point(env
, w
);
16323 if (insn_state
[w
] == 0) {
16325 insn_state
[t
] = DISCOVERED
| e
;
16326 insn_state
[w
] = DISCOVERED
;
16327 if (env
->cfg
.cur_stack
>= env
->prog
->len
)
16329 insn_stack
[env
->cfg
.cur_stack
++] = w
;
16330 return KEEP_EXPLORING
;
16331 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
16332 if (env
->bpf_capable
)
16333 return DONE_EXPLORING
;
16334 verbose_linfo(env
, t
, "%d: ", t
);
16335 verbose_linfo(env
, w
, "%d: ", w
);
16336 verbose(env
, "back-edge from insn %d to %d\n", t
, w
);
16338 } else if (insn_state
[w
] == EXPLORED
) {
16339 /* forward- or cross-edge */
16340 insn_state
[t
] = DISCOVERED
| e
;
16342 verbose(env
, "insn state internal bug\n");
16345 return DONE_EXPLORING
;
16348 static int visit_func_call_insn(int t
, struct bpf_insn
*insns
,
16349 struct bpf_verifier_env
*env
,
16354 insn_sz
= bpf_is_ldimm64(&insns
[t
]) ? 2 : 1;
16355 ret
= push_insn(t
, t
+ insn_sz
, FALLTHROUGH
, env
);
16359 mark_prune_point(env
, t
+ insn_sz
);
16360 /* when we exit from subprog, we need to record non-linear history */
16361 mark_jmp_point(env
, t
+ insn_sz
);
16363 if (visit_callee
) {
16364 mark_prune_point(env
, t
);
16365 ret
= push_insn(t
, t
+ insns
[t
].imm
+ 1, BRANCH
, env
);
16370 /* Bitmask with 1s for all caller saved registers */
16371 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
16373 /* Return a bitmask specifying which caller saved registers are
16374 * clobbered by a call to a helper *as if* this helper follows
16375 * bpf_fastcall contract:
16376 * - includes R0 if function is non-void;
16377 * - includes R1-R5 if corresponding parameter has is described
16378 * in the function prototype.
16380 static u32
helper_fastcall_clobber_mask(const struct bpf_func_proto
*fn
)
16386 if (fn
->ret_type
!= RET_VOID
)
16387 mask
|= BIT(BPF_REG_0
);
16388 for (i
= 0; i
< ARRAY_SIZE(fn
->arg_type
); ++i
)
16389 if (fn
->arg_type
[i
] != ARG_DONTCARE
)
16390 mask
|= BIT(BPF_REG_1
+ i
);
16394 /* True if do_misc_fixups() replaces calls to helper number 'imm',
16395 * replacement patch is presumed to follow bpf_fastcall contract
16396 * (see mark_fastcall_pattern_for_call() below).
16398 static bool verifier_inlines_helper_call(struct bpf_verifier_env
*env
, s32 imm
)
16401 #ifdef CONFIG_X86_64
16402 case BPF_FUNC_get_smp_processor_id
:
16403 return env
->prog
->jit_requested
&& bpf_jit_supports_percpu_insn();
16410 /* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */
16411 static u32
kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta
*meta
)
16415 vlen
= btf_type_vlen(meta
->func_proto
);
16417 if (!btf_type_is_void(btf_type_by_id(meta
->btf
, meta
->func_proto
->type
)))
16418 mask
|= BIT(BPF_REG_0
);
16419 for (i
= 0; i
< vlen
; ++i
)
16420 mask
|= BIT(BPF_REG_1
+ i
);
16424 /* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
16425 static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta
*meta
)
16427 return meta
->kfunc_flags
& KF_FASTCALL
;
16430 /* LLVM define a bpf_fastcall function attribute.
16431 * This attribute means that function scratches only some of
16432 * the caller saved registers defined by ABI.
16433 * For BPF the set of such registers could be defined as follows:
16434 * - R0 is scratched only if function is non-void;
16435 * - R1-R5 are scratched only if corresponding parameter type is defined
16436 * in the function prototype.
16438 * The contract between kernel and clang allows to simultaneously use
16439 * such functions and maintain backwards compatibility with old
16440 * kernels that don't understand bpf_fastcall calls:
16442 * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
16443 * registers are not scratched by the call;
16445 * - as a post-processing step, clang visits each bpf_fastcall call and adds
16446 * spill/fill for every live r0-r5;
16448 * - stack offsets used for the spill/fill are allocated as lowest
16449 * stack offsets in whole function and are not used for any other
16452 * - when kernel loads a program, it looks for such patterns
16453 * (bpf_fastcall function surrounded by spills/fills) and checks if
16454 * spill/fill stack offsets are used exclusively in fastcall patterns;
16456 * - if so, and if verifier or current JIT inlines the call to the
16457 * bpf_fastcall function (e.g. a helper call), kernel removes unnecessary
16458 * spill/fill pairs;
16460 * - when old kernel loads a program, presence of spill/fill pairs
16461 * keeps BPF program valid, albeit slightly less efficient.
16467 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16468 * *(u64 *)(r10 - 16) = r2; r2 = 2;
16469 * call %[to_be_inlined] --> call %[to_be_inlined]
16470 * r2 = *(u64 *)(r10 - 16); r0 = r1;
16471 * r1 = *(u64 *)(r10 - 8); r0 += r2;
16476 * The purpose of mark_fastcall_pattern_for_call is to:
16477 * - look for such patterns;
16478 * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
16479 * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
16480 * - update env->subprog_info[*]->fastcall_stack_off to find an offset
16481 * at which bpf_fastcall spill/fill stack slots start;
16482 * - update env->subprog_info[*]->keep_fastcall_stack.
16484 * The .fastcall_pattern and .fastcall_stack_off are used by
16485 * check_fastcall_stack_contract() to check if every stack access to
16486 * fastcall spill/fill stack slot originates from spill/fill
16487 * instructions, members of fastcall patterns.
16489 * If such condition holds true for a subprogram, fastcall patterns could
16490 * be rewritten by remove_fastcall_spills_fills().
16491 * Otherwise bpf_fastcall patterns are not changed in the subprogram
16492 * (code, presumably, generated by an older clang version).
16494 * For example, it is *not* safe to remove spill/fill below:
16497 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16498 * call %[to_be_inlined] --> call %[to_be_inlined]
16499 * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
16500 * r0 = *(u64 *)(r10 - 8); r0 += r1;
16504 static void mark_fastcall_pattern_for_call(struct bpf_verifier_env
*env
,
16505 struct bpf_subprog_info
*subprog
,
16506 int insn_idx
, s16 lowest_off
)
16508 struct bpf_insn
*insns
= env
->prog
->insnsi
, *stx
, *ldx
;
16509 struct bpf_insn
*call
= &env
->prog
->insnsi
[insn_idx
];
16510 const struct bpf_func_proto
*fn
;
16511 u32 clobbered_regs_mask
= ALL_CALLER_SAVED_REGS
;
16512 u32 expected_regs_mask
;
16513 bool can_be_inlined
= false;
16517 if (bpf_helper_call(call
)) {
16518 if (get_helper_proto(env
, call
->imm
, &fn
) < 0)
16519 /* error would be reported later */
16521 clobbered_regs_mask
= helper_fastcall_clobber_mask(fn
);
16522 can_be_inlined
= fn
->allow_fastcall
&&
16523 (verifier_inlines_helper_call(env
, call
->imm
) ||
16524 bpf_jit_inlines_helper_call(call
->imm
));
16527 if (bpf_pseudo_kfunc_call(call
)) {
16528 struct bpf_kfunc_call_arg_meta meta
;
16531 err
= fetch_kfunc_meta(env
, call
, &meta
, NULL
);
16533 /* error would be reported later */
16536 clobbered_regs_mask
= kfunc_fastcall_clobber_mask(&meta
);
16537 can_be_inlined
= is_fastcall_kfunc_call(&meta
);
16540 if (clobbered_regs_mask
== ALL_CALLER_SAVED_REGS
)
16543 /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */
16544 expected_regs_mask
= ~clobbered_regs_mask
& ALL_CALLER_SAVED_REGS
;
16546 /* match pairs of form:
16548 * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0)
16550 * call %[to_be_inlined]
16552 * rX = *(u64 *)(r10 - Y)
16554 for (i
= 1, off
= lowest_off
; i
<= ARRAY_SIZE(caller_saved
); ++i
, off
+= BPF_REG_SIZE
) {
16555 if (insn_idx
- i
< 0 || insn_idx
+ i
>= env
->prog
->len
)
16557 stx
= &insns
[insn_idx
- i
];
16558 ldx
= &insns
[insn_idx
+ i
];
16559 /* must be a stack spill/fill pair */
16560 if (stx
->code
!= (BPF_STX
| BPF_MEM
| BPF_DW
) ||
16561 ldx
->code
!= (BPF_LDX
| BPF_MEM
| BPF_DW
) ||
16562 stx
->dst_reg
!= BPF_REG_10
||
16563 ldx
->src_reg
!= BPF_REG_10
)
16565 /* must be a spill/fill for the same reg */
16566 if (stx
->src_reg
!= ldx
->dst_reg
)
16568 /* must be one of the previously unseen registers */
16569 if ((BIT(stx
->src_reg
) & expected_regs_mask
) == 0)
16571 /* must be a spill/fill for the same expected offset,
16572 * no need to check offset alignment, BPF_DW stack access
16573 * is always 8-byte aligned.
16575 if (stx
->off
!= off
|| ldx
->off
!= off
)
16577 expected_regs_mask
&= ~BIT(stx
->src_reg
);
16578 env
->insn_aux_data
[insn_idx
- i
].fastcall_pattern
= 1;
16579 env
->insn_aux_data
[insn_idx
+ i
].fastcall_pattern
= 1;
16584 /* Conditionally set 'fastcall_spills_num' to allow forward
16585 * compatibility when more helper functions are marked as
16586 * bpf_fastcall at compile time than current kernel supports, e.g:
16588 * 1: *(u64 *)(r10 - 8) = r1
16589 * 2: call A ;; assume A is bpf_fastcall for current kernel
16590 * 3: r1 = *(u64 *)(r10 - 8)
16591 * 4: *(u64 *)(r10 - 8) = r1
16592 * 5: call B ;; assume B is not bpf_fastcall for current kernel
16593 * 6: r1 = *(u64 *)(r10 - 8)
16595 * There is no need to block bpf_fastcall rewrite for such program.
16596 * Set 'fastcall_pattern' for both calls to keep check_fastcall_stack_contract() happy,
16597 * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills()
16598 * does not remove spill/fill pair {4,6}.
16600 if (can_be_inlined
)
16601 env
->insn_aux_data
[insn_idx
].fastcall_spills_num
= i
- 1;
16603 subprog
->keep_fastcall_stack
= 1;
16604 subprog
->fastcall_stack_off
= min(subprog
->fastcall_stack_off
, off
);
16607 static int mark_fastcall_patterns(struct bpf_verifier_env
*env
)
16609 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
16610 struct bpf_insn
*insn
;
16614 for (s
= 0; s
< env
->subprog_cnt
; ++s
, ++subprog
) {
16615 /* find lowest stack spill offset used in this subprog */
16617 for (i
= subprog
->start
; i
< (subprog
+ 1)->start
; ++i
) {
16618 insn
= env
->prog
->insnsi
+ i
;
16619 if (insn
->code
!= (BPF_STX
| BPF_MEM
| BPF_DW
) ||
16620 insn
->dst_reg
!= BPF_REG_10
)
16622 lowest_off
= min(lowest_off
, insn
->off
);
16624 /* use this offset to find fastcall patterns */
16625 for (i
= subprog
->start
; i
< (subprog
+ 1)->start
; ++i
) {
16626 insn
= env
->prog
->insnsi
+ i
;
16627 if (insn
->code
!= (BPF_JMP
| BPF_CALL
))
16629 mark_fastcall_pattern_for_call(env
, subprog
, i
, lowest_off
);
16635 /* Visits the instruction at index t and returns one of the following:
16636 * < 0 - an error occurred
16637 * DONE_EXPLORING - the instruction was fully explored
16638 * KEEP_EXPLORING - there is still work to be done before it is fully explored
16640 static int visit_insn(int t
, struct bpf_verifier_env
*env
)
16642 struct bpf_insn
*insns
= env
->prog
->insnsi
, *insn
= &insns
[t
];
16643 int ret
, off
, insn_sz
;
16645 if (bpf_pseudo_func(insn
))
16646 return visit_func_call_insn(t
, insns
, env
, true);
16648 /* All non-branch instructions have a single fall-through edge. */
16649 if (BPF_CLASS(insn
->code
) != BPF_JMP
&&
16650 BPF_CLASS(insn
->code
) != BPF_JMP32
) {
16651 insn_sz
= bpf_is_ldimm64(insn
) ? 2 : 1;
16652 return push_insn(t
, t
+ insn_sz
, FALLTHROUGH
, env
);
16655 switch (BPF_OP(insn
->code
)) {
16657 return DONE_EXPLORING
;
16660 if (is_async_callback_calling_insn(insn
))
16661 /* Mark this call insn as a prune point to trigger
16662 * is_state_visited() check before call itself is
16663 * processed by __check_func_call(). Otherwise new
16664 * async state will be pushed for further exploration.
16666 mark_prune_point(env
, t
);
16667 /* For functions that invoke callbacks it is not known how many times
16668 * callback would be called. Verifier models callback calling functions
16669 * by repeatedly visiting callback bodies and returning to origin call
16671 * In order to stop such iteration verifier needs to identify when a
16672 * state identical some state from a previous iteration is reached.
16673 * Check below forces creation of checkpoint before callback calling
16674 * instruction to allow search for such identical states.
16676 if (is_sync_callback_calling_insn(insn
)) {
16677 mark_calls_callback(env
, t
);
16678 mark_force_checkpoint(env
, t
);
16679 mark_prune_point(env
, t
);
16680 mark_jmp_point(env
, t
);
16682 if (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
) {
16683 struct bpf_kfunc_call_arg_meta meta
;
16685 ret
= fetch_kfunc_meta(env
, insn
, &meta
, NULL
);
16686 if (ret
== 0 && is_iter_next_kfunc(&meta
)) {
16687 mark_prune_point(env
, t
);
16688 /* Checking and saving state checkpoints at iter_next() call
16689 * is crucial for fast convergence of open-coded iterator loop
16690 * logic, so we need to force it. If we don't do that,
16691 * is_state_visited() might skip saving a checkpoint, causing
16692 * unnecessarily long sequence of not checkpointed
16693 * instructions and jumps, leading to exhaustion of jump
16694 * history buffer, and potentially other undesired outcomes.
16695 * It is expected that with correct open-coded iterators
16696 * convergence will happen quickly, so we don't run a risk of
16697 * exhausting memory.
16699 mark_force_checkpoint(env
, t
);
16702 return visit_func_call_insn(t
, insns
, env
, insn
->src_reg
== BPF_PSEUDO_CALL
);
16705 if (BPF_SRC(insn
->code
) != BPF_K
)
16708 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
16713 /* unconditional jump with single edge */
16714 ret
= push_insn(t
, t
+ off
+ 1, FALLTHROUGH
, env
);
16718 mark_prune_point(env
, t
+ off
+ 1);
16719 mark_jmp_point(env
, t
+ off
+ 1);
16724 /* conditional jump with two edges */
16725 mark_prune_point(env
, t
);
16726 if (is_may_goto_insn(insn
))
16727 mark_force_checkpoint(env
, t
);
16729 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
16733 return push_insn(t
, t
+ insn
->off
+ 1, BRANCH
, env
);
16737 /* non-recursive depth-first-search to detect loops in BPF program
16738 * loop == back-edge in directed graph
16740 static int check_cfg(struct bpf_verifier_env
*env
)
16742 int insn_cnt
= env
->prog
->len
;
16743 int *insn_stack
, *insn_state
;
16744 int ex_insn_beg
, i
, ret
= 0;
16745 bool ex_done
= false;
16747 insn_state
= env
->cfg
.insn_state
= kvcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
16751 insn_stack
= env
->cfg
.insn_stack
= kvcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
16753 kvfree(insn_state
);
16757 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
16758 insn_stack
[0] = 0; /* 0 is the first instruction */
16759 env
->cfg
.cur_stack
= 1;
16762 while (env
->cfg
.cur_stack
> 0) {
16763 int t
= insn_stack
[env
->cfg
.cur_stack
- 1];
16765 ret
= visit_insn(t
, env
);
16767 case DONE_EXPLORING
:
16768 insn_state
[t
] = EXPLORED
;
16769 env
->cfg
.cur_stack
--;
16771 case KEEP_EXPLORING
:
16775 verbose(env
, "visit_insn internal bug\n");
16782 if (env
->cfg
.cur_stack
< 0) {
16783 verbose(env
, "pop stack internal bug\n");
16788 if (env
->exception_callback_subprog
&& !ex_done
) {
16789 ex_insn_beg
= env
->subprog_info
[env
->exception_callback_subprog
].start
;
16791 insn_state
[ex_insn_beg
] = DISCOVERED
;
16792 insn_stack
[0] = ex_insn_beg
;
16793 env
->cfg
.cur_stack
= 1;
16798 for (i
= 0; i
< insn_cnt
; i
++) {
16799 struct bpf_insn
*insn
= &env
->prog
->insnsi
[i
];
16801 if (insn_state
[i
] != EXPLORED
) {
16802 verbose(env
, "unreachable insn %d\n", i
);
16806 if (bpf_is_ldimm64(insn
)) {
16807 if (insn_state
[i
+ 1] != 0) {
16808 verbose(env
, "jump into the middle of ldimm64 insn %d\n", i
);
16812 i
++; /* skip second half of ldimm64 */
16815 ret
= 0; /* cfg looks good */
16818 kvfree(insn_state
);
16819 kvfree(insn_stack
);
16820 env
->cfg
.insn_state
= env
->cfg
.insn_stack
= NULL
;
16824 static int check_abnormal_return(struct bpf_verifier_env
*env
)
16828 for (i
= 1; i
< env
->subprog_cnt
; i
++) {
16829 if (env
->subprog_info
[i
].has_ld_abs
) {
16830 verbose(env
, "LD_ABS is not allowed in subprogs without BTF\n");
16833 if (env
->subprog_info
[i
].has_tail_call
) {
16834 verbose(env
, "tail_call is not allowed in subprogs without BTF\n");
16841 /* The minimum supported BTF func info size */
16842 #define MIN_BPF_FUNCINFO_SIZE 8
16843 #define MAX_FUNCINFO_REC_SIZE 252
16845 static int check_btf_func_early(struct bpf_verifier_env
*env
,
16846 const union bpf_attr
*attr
,
16849 u32 krec_size
= sizeof(struct bpf_func_info
);
16850 const struct btf_type
*type
, *func_proto
;
16851 u32 i
, nfuncs
, urec_size
, min_size
;
16852 struct bpf_func_info
*krecord
;
16853 struct bpf_prog
*prog
;
16854 const struct btf
*btf
;
16855 u32 prev_offset
= 0;
16859 nfuncs
= attr
->func_info_cnt
;
16861 if (check_abnormal_return(env
))
16866 urec_size
= attr
->func_info_rec_size
;
16867 if (urec_size
< MIN_BPF_FUNCINFO_SIZE
||
16868 urec_size
> MAX_FUNCINFO_REC_SIZE
||
16869 urec_size
% sizeof(u32
)) {
16870 verbose(env
, "invalid func info rec size %u\n", urec_size
);
16875 btf
= prog
->aux
->btf
;
16877 urecord
= make_bpfptr(attr
->func_info
, uattr
.is_kernel
);
16878 min_size
= min_t(u32
, krec_size
, urec_size
);
16880 krecord
= kvcalloc(nfuncs
, krec_size
, GFP_KERNEL
| __GFP_NOWARN
);
16884 for (i
= 0; i
< nfuncs
; i
++) {
16885 ret
= bpf_check_uarg_tail_zero(urecord
, krec_size
, urec_size
);
16887 if (ret
== -E2BIG
) {
16888 verbose(env
, "nonzero tailing record in func info");
16889 /* set the size kernel expects so loader can zero
16890 * out the rest of the record.
16892 if (copy_to_bpfptr_offset(uattr
,
16893 offsetof(union bpf_attr
, func_info_rec_size
),
16894 &min_size
, sizeof(min_size
)))
16900 if (copy_from_bpfptr(&krecord
[i
], urecord
, min_size
)) {
16905 /* check insn_off */
16908 if (krecord
[i
].insn_off
) {
16910 "nonzero insn_off %u for the first func info record",
16911 krecord
[i
].insn_off
);
16914 } else if (krecord
[i
].insn_off
<= prev_offset
) {
16916 "same or smaller insn offset (%u) than previous func info record (%u)",
16917 krecord
[i
].insn_off
, prev_offset
);
16921 /* check type_id */
16922 type
= btf_type_by_id(btf
, krecord
[i
].type_id
);
16923 if (!type
|| !btf_type_is_func(type
)) {
16924 verbose(env
, "invalid type id %d in func info",
16925 krecord
[i
].type_id
);
16929 func_proto
= btf_type_by_id(btf
, type
->type
);
16930 if (unlikely(!func_proto
|| !btf_type_is_func_proto(func_proto
)))
16931 /* btf_func_check() already verified it during BTF load */
16934 prev_offset
= krecord
[i
].insn_off
;
16935 bpfptr_add(&urecord
, urec_size
);
16938 prog
->aux
->func_info
= krecord
;
16939 prog
->aux
->func_info_cnt
= nfuncs
;
16947 static int check_btf_func(struct bpf_verifier_env
*env
,
16948 const union bpf_attr
*attr
,
16951 const struct btf_type
*type
, *func_proto
, *ret_type
;
16952 u32 i
, nfuncs
, urec_size
;
16953 struct bpf_func_info
*krecord
;
16954 struct bpf_func_info_aux
*info_aux
= NULL
;
16955 struct bpf_prog
*prog
;
16956 const struct btf
*btf
;
16958 bool scalar_return
;
16961 nfuncs
= attr
->func_info_cnt
;
16963 if (check_abnormal_return(env
))
16967 if (nfuncs
!= env
->subprog_cnt
) {
16968 verbose(env
, "number of funcs in func_info doesn't match number of subprogs\n");
16972 urec_size
= attr
->func_info_rec_size
;
16975 btf
= prog
->aux
->btf
;
16977 urecord
= make_bpfptr(attr
->func_info
, uattr
.is_kernel
);
16979 krecord
= prog
->aux
->func_info
;
16980 info_aux
= kcalloc(nfuncs
, sizeof(*info_aux
), GFP_KERNEL
| __GFP_NOWARN
);
16984 for (i
= 0; i
< nfuncs
; i
++) {
16985 /* check insn_off */
16988 if (env
->subprog_info
[i
].start
!= krecord
[i
].insn_off
) {
16989 verbose(env
, "func_info BTF section doesn't match subprog layout in BPF program\n");
16993 /* Already checked type_id */
16994 type
= btf_type_by_id(btf
, krecord
[i
].type_id
);
16995 info_aux
[i
].linkage
= BTF_INFO_VLEN(type
->info
);
16996 /* Already checked func_proto */
16997 func_proto
= btf_type_by_id(btf
, type
->type
);
16999 ret_type
= btf_type_skip_modifiers(btf
, func_proto
->type
, NULL
);
17001 btf_type_is_small_int(ret_type
) || btf_is_any_enum(ret_type
);
17002 if (i
&& !scalar_return
&& env
->subprog_info
[i
].has_ld_abs
) {
17003 verbose(env
, "LD_ABS is only allowed in functions that return 'int'.\n");
17006 if (i
&& !scalar_return
&& env
->subprog_info
[i
].has_tail_call
) {
17007 verbose(env
, "tail_call is only allowed in functions that return 'int'.\n");
17011 bpfptr_add(&urecord
, urec_size
);
17014 prog
->aux
->func_info_aux
= info_aux
;
17022 static void adjust_btf_func(struct bpf_verifier_env
*env
)
17024 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
17027 if (!aux
->func_info
)
17030 /* func_info is not available for hidden subprogs */
17031 for (i
= 0; i
< env
->subprog_cnt
- env
->hidden_subprog_cnt
; i
++)
17032 aux
->func_info
[i
].insn_off
= env
->subprog_info
[i
].start
;
17035 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col)
17036 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
17038 static int check_btf_line(struct bpf_verifier_env
*env
,
17039 const union bpf_attr
*attr
,
17042 u32 i
, s
, nr_linfo
, ncopy
, expected_size
, rec_size
, prev_offset
= 0;
17043 struct bpf_subprog_info
*sub
;
17044 struct bpf_line_info
*linfo
;
17045 struct bpf_prog
*prog
;
17046 const struct btf
*btf
;
17050 nr_linfo
= attr
->line_info_cnt
;
17053 if (nr_linfo
> INT_MAX
/ sizeof(struct bpf_line_info
))
17056 rec_size
= attr
->line_info_rec_size
;
17057 if (rec_size
< MIN_BPF_LINEINFO_SIZE
||
17058 rec_size
> MAX_LINEINFO_REC_SIZE
||
17059 rec_size
& (sizeof(u32
) - 1))
17062 /* Need to zero it in case the userspace may
17063 * pass in a smaller bpf_line_info object.
17065 linfo
= kvcalloc(nr_linfo
, sizeof(struct bpf_line_info
),
17066 GFP_KERNEL
| __GFP_NOWARN
);
17071 btf
= prog
->aux
->btf
;
17074 sub
= env
->subprog_info
;
17075 ulinfo
= make_bpfptr(attr
->line_info
, uattr
.is_kernel
);
17076 expected_size
= sizeof(struct bpf_line_info
);
17077 ncopy
= min_t(u32
, expected_size
, rec_size
);
17078 for (i
= 0; i
< nr_linfo
; i
++) {
17079 err
= bpf_check_uarg_tail_zero(ulinfo
, expected_size
, rec_size
);
17081 if (err
== -E2BIG
) {
17082 verbose(env
, "nonzero tailing record in line_info");
17083 if (copy_to_bpfptr_offset(uattr
,
17084 offsetof(union bpf_attr
, line_info_rec_size
),
17085 &expected_size
, sizeof(expected_size
)))
17091 if (copy_from_bpfptr(&linfo
[i
], ulinfo
, ncopy
)) {
17097 * Check insn_off to ensure
17098 * 1) strictly increasing AND
17099 * 2) bounded by prog->len
17101 * The linfo[0].insn_off == 0 check logically falls into
17102 * the later "missing bpf_line_info for func..." case
17103 * because the first linfo[0].insn_off must be the
17104 * first sub also and the first sub must have
17105 * subprog_info[0].start == 0.
17107 if ((i
&& linfo
[i
].insn_off
<= prev_offset
) ||
17108 linfo
[i
].insn_off
>= prog
->len
) {
17109 verbose(env
, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
17110 i
, linfo
[i
].insn_off
, prev_offset
,
17116 if (!prog
->insnsi
[linfo
[i
].insn_off
].code
) {
17118 "Invalid insn code at line_info[%u].insn_off\n",
17124 if (!btf_name_by_offset(btf
, linfo
[i
].line_off
) ||
17125 !btf_name_by_offset(btf
, linfo
[i
].file_name_off
)) {
17126 verbose(env
, "Invalid line_info[%u].line_off or .file_name_off\n", i
);
17131 if (s
!= env
->subprog_cnt
) {
17132 if (linfo
[i
].insn_off
== sub
[s
].start
) {
17133 sub
[s
].linfo_idx
= i
;
17135 } else if (sub
[s
].start
< linfo
[i
].insn_off
) {
17136 verbose(env
, "missing bpf_line_info for func#%u\n", s
);
17142 prev_offset
= linfo
[i
].insn_off
;
17143 bpfptr_add(&ulinfo
, rec_size
);
17146 if (s
!= env
->subprog_cnt
) {
17147 verbose(env
, "missing bpf_line_info for %u funcs starting from func#%u\n",
17148 env
->subprog_cnt
- s
, s
);
17153 prog
->aux
->linfo
= linfo
;
17154 prog
->aux
->nr_linfo
= nr_linfo
;
17163 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo)
17164 #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE
17166 static int check_core_relo(struct bpf_verifier_env
*env
,
17167 const union bpf_attr
*attr
,
17170 u32 i
, nr_core_relo
, ncopy
, expected_size
, rec_size
;
17171 struct bpf_core_relo core_relo
= {};
17172 struct bpf_prog
*prog
= env
->prog
;
17173 const struct btf
*btf
= prog
->aux
->btf
;
17174 struct bpf_core_ctx ctx
= {
17178 bpfptr_t u_core_relo
;
17181 nr_core_relo
= attr
->core_relo_cnt
;
17184 if (nr_core_relo
> INT_MAX
/ sizeof(struct bpf_core_relo
))
17187 rec_size
= attr
->core_relo_rec_size
;
17188 if (rec_size
< MIN_CORE_RELO_SIZE
||
17189 rec_size
> MAX_CORE_RELO_SIZE
||
17190 rec_size
% sizeof(u32
))
17193 u_core_relo
= make_bpfptr(attr
->core_relos
, uattr
.is_kernel
);
17194 expected_size
= sizeof(struct bpf_core_relo
);
17195 ncopy
= min_t(u32
, expected_size
, rec_size
);
17197 /* Unlike func_info and line_info, copy and apply each CO-RE
17198 * relocation record one at a time.
17200 for (i
= 0; i
< nr_core_relo
; i
++) {
17201 /* future proofing when sizeof(bpf_core_relo) changes */
17202 err
= bpf_check_uarg_tail_zero(u_core_relo
, expected_size
, rec_size
);
17204 if (err
== -E2BIG
) {
17205 verbose(env
, "nonzero tailing record in core_relo");
17206 if (copy_to_bpfptr_offset(uattr
,
17207 offsetof(union bpf_attr
, core_relo_rec_size
),
17208 &expected_size
, sizeof(expected_size
)))
17214 if (copy_from_bpfptr(&core_relo
, u_core_relo
, ncopy
)) {
17219 if (core_relo
.insn_off
% 8 || core_relo
.insn_off
/ 8 >= prog
->len
) {
17220 verbose(env
, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
17221 i
, core_relo
.insn_off
, prog
->len
);
17226 err
= bpf_core_apply(&ctx
, &core_relo
, i
,
17227 &prog
->insnsi
[core_relo
.insn_off
/ 8]);
17230 bpfptr_add(&u_core_relo
, rec_size
);
17235 static int check_btf_info_early(struct bpf_verifier_env
*env
,
17236 const union bpf_attr
*attr
,
17242 if (!attr
->func_info_cnt
&& !attr
->line_info_cnt
) {
17243 if (check_abnormal_return(env
))
17248 btf
= btf_get_by_fd(attr
->prog_btf_fd
);
17250 return PTR_ERR(btf
);
17251 if (btf_is_kernel(btf
)) {
17255 env
->prog
->aux
->btf
= btf
;
17257 err
= check_btf_func_early(env
, attr
, uattr
);
17263 static int check_btf_info(struct bpf_verifier_env
*env
,
17264 const union bpf_attr
*attr
,
17269 if (!attr
->func_info_cnt
&& !attr
->line_info_cnt
) {
17270 if (check_abnormal_return(env
))
17275 err
= check_btf_func(env
, attr
, uattr
);
17279 err
= check_btf_line(env
, attr
, uattr
);
17283 err
= check_core_relo(env
, attr
, uattr
);
17290 /* check %cur's range satisfies %old's */
17291 static bool range_within(const struct bpf_reg_state
*old
,
17292 const struct bpf_reg_state
*cur
)
17294 return old
->umin_value
<= cur
->umin_value
&&
17295 old
->umax_value
>= cur
->umax_value
&&
17296 old
->smin_value
<= cur
->smin_value
&&
17297 old
->smax_value
>= cur
->smax_value
&&
17298 old
->u32_min_value
<= cur
->u32_min_value
&&
17299 old
->u32_max_value
>= cur
->u32_max_value
&&
17300 old
->s32_min_value
<= cur
->s32_min_value
&&
17301 old
->s32_max_value
>= cur
->s32_max_value
;
17304 /* If in the old state two registers had the same id, then they need to have
17305 * the same id in the new state as well. But that id could be different from
17306 * the old state, so we need to track the mapping from old to new ids.
17307 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
17308 * regs with old id 5 must also have new id 9 for the new state to be safe. But
17309 * regs with a different old id could still have new id 9, we don't care about
17311 * So we look through our idmap to see if this old id has been seen before. If
17312 * so, we require the new id to match; otherwise, we add the id pair to the map.
17314 static bool check_ids(u32 old_id
, u32 cur_id
, struct bpf_idmap
*idmap
)
17316 struct bpf_id_pair
*map
= idmap
->map
;
17319 /* either both IDs should be set or both should be zero */
17320 if (!!old_id
!= !!cur_id
)
17323 if (old_id
== 0) /* cur_id == 0 as well */
17326 for (i
= 0; i
< BPF_ID_MAP_SIZE
; i
++) {
17328 /* Reached an empty slot; haven't seen this id before */
17329 map
[i
].old
= old_id
;
17330 map
[i
].cur
= cur_id
;
17333 if (map
[i
].old
== old_id
)
17334 return map
[i
].cur
== cur_id
;
17335 if (map
[i
].cur
== cur_id
)
17338 /* We ran out of idmap slots, which should be impossible */
17343 /* Similar to check_ids(), but allocate a unique temporary ID
17344 * for 'old_id' or 'cur_id' of zero.
17345 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid.
17347 static bool check_scalar_ids(u32 old_id
, u32 cur_id
, struct bpf_idmap
*idmap
)
17349 old_id
= old_id
? old_id
: ++idmap
->tmp_id_gen
;
17350 cur_id
= cur_id
? cur_id
: ++idmap
->tmp_id_gen
;
17352 return check_ids(old_id
, cur_id
, idmap
);
17355 static void clean_func_state(struct bpf_verifier_env
*env
,
17356 struct bpf_func_state
*st
)
17358 enum bpf_reg_liveness live
;
17361 for (i
= 0; i
< BPF_REG_FP
; i
++) {
17362 live
= st
->regs
[i
].live
;
17363 /* liveness must not touch this register anymore */
17364 st
->regs
[i
].live
|= REG_LIVE_DONE
;
17365 if (!(live
& REG_LIVE_READ
))
17366 /* since the register is unused, clear its state
17367 * to make further comparison simpler
17369 __mark_reg_not_init(env
, &st
->regs
[i
]);
17372 for (i
= 0; i
< st
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
17373 live
= st
->stack
[i
].spilled_ptr
.live
;
17374 /* liveness must not touch this stack slot anymore */
17375 st
->stack
[i
].spilled_ptr
.live
|= REG_LIVE_DONE
;
17376 if (!(live
& REG_LIVE_READ
)) {
17377 __mark_reg_not_init(env
, &st
->stack
[i
].spilled_ptr
);
17378 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
17379 st
->stack
[i
].slot_type
[j
] = STACK_INVALID
;
17384 static void clean_verifier_state(struct bpf_verifier_env
*env
,
17385 struct bpf_verifier_state
*st
)
17389 if (st
->frame
[0]->regs
[0].live
& REG_LIVE_DONE
)
17390 /* all regs in this state in all frames were already marked */
17393 for (i
= 0; i
<= st
->curframe
; i
++)
17394 clean_func_state(env
, st
->frame
[i
]);
17397 /* the parentage chains form a tree.
17398 * the verifier states are added to state lists at given insn and
17399 * pushed into state stack for future exploration.
17400 * when the verifier reaches bpf_exit insn some of the verifer states
17401 * stored in the state lists have their final liveness state already,
17402 * but a lot of states will get revised from liveness point of view when
17403 * the verifier explores other branches.
17406 * 2: if r1 == 100 goto pc+1
17409 * when the verifier reaches exit insn the register r0 in the state list of
17410 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
17411 * of insn 2 and goes exploring further. At the insn 4 it will walk the
17412 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
17414 * Since the verifier pushes the branch states as it sees them while exploring
17415 * the program the condition of walking the branch instruction for the second
17416 * time means that all states below this branch were already explored and
17417 * their final liveness marks are already propagated.
17418 * Hence when the verifier completes the search of state list in is_state_visited()
17419 * we can call this clean_live_states() function to mark all liveness states
17420 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
17421 * will not be used.
17422 * This function also clears the registers and stack for states that !READ
17423 * to simplify state merging.
17425 * Important note here that walking the same branch instruction in the callee
17426 * doesn't meant that the states are DONE. The verifier has to compare
17429 static void clean_live_states(struct bpf_verifier_env
*env
, int insn
,
17430 struct bpf_verifier_state
*cur
)
17432 struct bpf_verifier_state_list
*sl
;
17434 sl
= *explored_state(env
, insn
);
17436 if (sl
->state
.branches
)
17438 if (sl
->state
.insn_idx
!= insn
||
17439 !same_callsites(&sl
->state
, cur
))
17441 clean_verifier_state(env
, &sl
->state
);
17447 static bool regs_exact(const struct bpf_reg_state
*rold
,
17448 const struct bpf_reg_state
*rcur
,
17449 struct bpf_idmap
*idmap
)
17451 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)) == 0 &&
17452 check_ids(rold
->id
, rcur
->id
, idmap
) &&
17453 check_ids(rold
->ref_obj_id
, rcur
->ref_obj_id
, idmap
);
17462 /* Returns true if (rold safe implies rcur safe) */
17463 static bool regsafe(struct bpf_verifier_env
*env
, struct bpf_reg_state
*rold
,
17464 struct bpf_reg_state
*rcur
, struct bpf_idmap
*idmap
,
17465 enum exact_level exact
)
17467 if (exact
== EXACT
)
17468 return regs_exact(rold
, rcur
, idmap
);
17470 if (!(rold
->live
& REG_LIVE_READ
) && exact
== NOT_EXACT
)
17471 /* explored state didn't use this */
17473 if (rold
->type
== NOT_INIT
) {
17474 if (exact
== NOT_EXACT
|| rcur
->type
== NOT_INIT
)
17475 /* explored state can't have used this */
17479 /* Enforce that register types have to match exactly, including their
17480 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
17483 * One can make a point that using a pointer register as unbounded
17484 * SCALAR would be technically acceptable, but this could lead to
17485 * pointer leaks because scalars are allowed to leak while pointers
17486 * are not. We could make this safe in special cases if root is
17487 * calling us, but it's probably not worth the hassle.
17489 * Also, register types that are *not* MAYBE_NULL could technically be
17490 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
17491 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
17492 * to the same map).
17493 * However, if the old MAYBE_NULL register then got NULL checked,
17494 * doing so could have affected others with the same id, and we can't
17495 * check for that because we lost the id when we converted to
17496 * a non-MAYBE_NULL variant.
17497 * So, as a general rule we don't allow mixing MAYBE_NULL and
17498 * non-MAYBE_NULL registers as well.
17500 if (rold
->type
!= rcur
->type
)
17503 switch (base_type(rold
->type
)) {
17505 if (env
->explore_alu_limits
) {
17506 /* explore_alu_limits disables tnum_in() and range_within()
17507 * logic and requires everything to be strict
17509 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)) == 0 &&
17510 check_scalar_ids(rold
->id
, rcur
->id
, idmap
);
17512 if (!rold
->precise
&& exact
== NOT_EXACT
)
17514 if ((rold
->id
& BPF_ADD_CONST
) != (rcur
->id
& BPF_ADD_CONST
))
17516 if ((rold
->id
& BPF_ADD_CONST
) && (rold
->off
!= rcur
->off
))
17518 /* Why check_ids() for scalar registers?
17520 * Consider the following BPF code:
17521 * 1: r6 = ... unbound scalar, ID=a ...
17522 * 2: r7 = ... unbound scalar, ID=b ...
17523 * 3: if (r6 > r7) goto +1
17525 * 5: if (r6 > X) goto ...
17526 * 6: ... memory operation using r7 ...
17528 * First verification path is [1-6]:
17529 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
17530 * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark
17531 * r7 <= X, because r6 and r7 share same id.
17532 * Next verification path is [1-4, 6].
17534 * Instruction (6) would be reached in two states:
17535 * I. r6{.id=b}, r7{.id=b} via path 1-6;
17536 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6.
17538 * Use check_ids() to distinguish these states.
17540 * Also verify that new value satisfies old value range knowledge.
17542 return range_within(rold
, rcur
) &&
17543 tnum_in(rold
->var_off
, rcur
->var_off
) &&
17544 check_scalar_ids(rold
->id
, rcur
->id
, idmap
);
17545 case PTR_TO_MAP_KEY
:
17546 case PTR_TO_MAP_VALUE
:
17549 case PTR_TO_TP_BUFFER
:
17550 /* If the new min/max/var_off satisfy the old ones and
17551 * everything else matches, we are OK.
17553 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, var_off
)) == 0 &&
17554 range_within(rold
, rcur
) &&
17555 tnum_in(rold
->var_off
, rcur
->var_off
) &&
17556 check_ids(rold
->id
, rcur
->id
, idmap
) &&
17557 check_ids(rold
->ref_obj_id
, rcur
->ref_obj_id
, idmap
);
17558 case PTR_TO_PACKET_META
:
17559 case PTR_TO_PACKET
:
17560 /* We must have at least as much range as the old ptr
17561 * did, so that any accesses which were safe before are
17562 * still safe. This is true even if old range < old off,
17563 * since someone could have accessed through (ptr - k), or
17564 * even done ptr -= k in a register, to get a safe access.
17566 if (rold
->range
> rcur
->range
)
17568 /* If the offsets don't match, we can't trust our alignment;
17569 * nor can we be sure that we won't fall out of range.
17571 if (rold
->off
!= rcur
->off
)
17573 /* id relations must be preserved */
17574 if (!check_ids(rold
->id
, rcur
->id
, idmap
))
17576 /* new val must satisfy old val knowledge */
17577 return range_within(rold
, rcur
) &&
17578 tnum_in(rold
->var_off
, rcur
->var_off
);
17580 /* two stack pointers are equal only if they're pointing to
17581 * the same stack frame, since fp-8 in foo != fp-8 in bar
17583 return regs_exact(rold
, rcur
, idmap
) && rold
->frameno
== rcur
->frameno
;
17587 return regs_exact(rold
, rcur
, idmap
);
17591 static struct bpf_reg_state unbound_reg
;
17593 static __init
int unbound_reg_init(void)
17595 __mark_reg_unknown_imprecise(&unbound_reg
);
17596 unbound_reg
.live
|= REG_LIVE_READ
;
17599 late_initcall(unbound_reg_init
);
17601 static bool is_stack_all_misc(struct bpf_verifier_env
*env
,
17602 struct bpf_stack_state
*stack
)
17606 for (i
= 0; i
< ARRAY_SIZE(stack
->slot_type
); ++i
) {
17607 if ((stack
->slot_type
[i
] == STACK_MISC
) ||
17608 (stack
->slot_type
[i
] == STACK_INVALID
&& env
->allow_uninit_stack
))
17616 static struct bpf_reg_state
*scalar_reg_for_stack(struct bpf_verifier_env
*env
,
17617 struct bpf_stack_state
*stack
)
17619 if (is_spilled_scalar_reg64(stack
))
17620 return &stack
->spilled_ptr
;
17622 if (is_stack_all_misc(env
, stack
))
17623 return &unbound_reg
;
17628 static bool stacksafe(struct bpf_verifier_env
*env
, struct bpf_func_state
*old
,
17629 struct bpf_func_state
*cur
, struct bpf_idmap
*idmap
,
17630 enum exact_level exact
)
17634 /* walk slots of the explored stack and ignore any additional
17635 * slots in the current stack, since explored(safe) state
17638 for (i
= 0; i
< old
->allocated_stack
; i
++) {
17639 struct bpf_reg_state
*old_reg
, *cur_reg
;
17641 spi
= i
/ BPF_REG_SIZE
;
17643 if (exact
!= NOT_EXACT
&&
17644 (i
>= cur
->allocated_stack
||
17645 old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] !=
17646 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
]))
17649 if (!(old
->stack
[spi
].spilled_ptr
.live
& REG_LIVE_READ
)
17650 && exact
== NOT_EXACT
) {
17651 i
+= BPF_REG_SIZE
- 1;
17652 /* explored state didn't use this */
17656 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_INVALID
)
17659 if (env
->allow_uninit_stack
&&
17660 old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_MISC
)
17663 /* explored stack has more populated slots than current stack
17664 * and these slots were used
17666 if (i
>= cur
->allocated_stack
)
17669 /* 64-bit scalar spill vs all slots MISC and vice versa.
17670 * Load from all slots MISC produces unbound scalar.
17671 * Construct a fake register for such stack and call
17672 * regsafe() to ensure scalar ids are compared.
17674 old_reg
= scalar_reg_for_stack(env
, &old
->stack
[spi
]);
17675 cur_reg
= scalar_reg_for_stack(env
, &cur
->stack
[spi
]);
17676 if (old_reg
&& cur_reg
) {
17677 if (!regsafe(env
, old_reg
, cur_reg
, idmap
, exact
))
17679 i
+= BPF_REG_SIZE
- 1;
17683 /* if old state was safe with misc data in the stack
17684 * it will be safe with zero-initialized stack.
17685 * The opposite is not true
17687 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_MISC
&&
17688 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_ZERO
)
17690 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] !=
17691 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
])
17692 /* Ex: old explored (safe) state has STACK_SPILL in
17693 * this stack slot, but current has STACK_MISC ->
17694 * this verifier states are not equivalent,
17695 * return false to continue verification of this path
17698 if (i
% BPF_REG_SIZE
!= BPF_REG_SIZE
- 1)
17700 /* Both old and cur are having same slot_type */
17701 switch (old
->stack
[spi
].slot_type
[BPF_REG_SIZE
- 1]) {
17703 /* when explored and current stack slot are both storing
17704 * spilled registers, check that stored pointers types
17705 * are the same as well.
17706 * Ex: explored safe path could have stored
17707 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
17708 * but current path has stored:
17709 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
17710 * such verifier states are not equivalent.
17711 * return false to continue verification of this path
17713 if (!regsafe(env
, &old
->stack
[spi
].spilled_ptr
,
17714 &cur
->stack
[spi
].spilled_ptr
, idmap
, exact
))
17718 old_reg
= &old
->stack
[spi
].spilled_ptr
;
17719 cur_reg
= &cur
->stack
[spi
].spilled_ptr
;
17720 if (old_reg
->dynptr
.type
!= cur_reg
->dynptr
.type
||
17721 old_reg
->dynptr
.first_slot
!= cur_reg
->dynptr
.first_slot
||
17722 !check_ids(old_reg
->ref_obj_id
, cur_reg
->ref_obj_id
, idmap
))
17726 old_reg
= &old
->stack
[spi
].spilled_ptr
;
17727 cur_reg
= &cur
->stack
[spi
].spilled_ptr
;
17728 /* iter.depth is not compared between states as it
17729 * doesn't matter for correctness and would otherwise
17730 * prevent convergence; we maintain it only to prevent
17731 * infinite loop check triggering, see
17732 * iter_active_depths_differ()
17734 if (old_reg
->iter
.btf
!= cur_reg
->iter
.btf
||
17735 old_reg
->iter
.btf_id
!= cur_reg
->iter
.btf_id
||
17736 old_reg
->iter
.state
!= cur_reg
->iter
.state
||
17737 /* ignore {old_reg,cur_reg}->iter.depth, see above */
17738 !check_ids(old_reg
->ref_obj_id
, cur_reg
->ref_obj_id
, idmap
))
17743 case STACK_INVALID
:
17745 /* Ensure that new unhandled slot types return false by default */
17753 static bool refsafe(struct bpf_func_state
*old
, struct bpf_func_state
*cur
,
17754 struct bpf_idmap
*idmap
)
17758 if (old
->acquired_refs
!= cur
->acquired_refs
)
17761 for (i
= 0; i
< old
->acquired_refs
; i
++) {
17762 if (!check_ids(old
->refs
[i
].id
, cur
->refs
[i
].id
, idmap
) ||
17763 old
->refs
[i
].type
!= cur
->refs
[i
].type
)
17765 switch (old
->refs
[i
].type
) {
17768 case REF_TYPE_LOCK
:
17769 if (old
->refs
[i
].ptr
!= cur
->refs
[i
].ptr
)
17773 WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old
->refs
[i
].type
);
17781 /* compare two verifier states
17783 * all states stored in state_list are known to be valid, since
17784 * verifier reached 'bpf_exit' instruction through them
17786 * this function is called when verifier exploring different branches of
17787 * execution popped from the state stack. If it sees an old state that has
17788 * more strict register state and more strict stack state then this execution
17789 * branch doesn't need to be explored further, since verifier already
17790 * concluded that more strict state leads to valid finish.
17792 * Therefore two states are equivalent if register state is more conservative
17793 * and explored stack state is more conservative than the current one.
17796 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
17797 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
17799 * In other words if current stack state (one being explored) has more
17800 * valid slots than old one that already passed validation, it means
17801 * the verifier can stop exploring and conclude that current state is valid too
17803 * Similarly with registers. If explored state has register type as invalid
17804 * whereas register type in current state is meaningful, it means that
17805 * the current state will reach 'bpf_exit' instruction safely
17807 static bool func_states_equal(struct bpf_verifier_env
*env
, struct bpf_func_state
*old
,
17808 struct bpf_func_state
*cur
, enum exact_level exact
)
17812 if (old
->callback_depth
> cur
->callback_depth
)
17815 for (i
= 0; i
< MAX_BPF_REG
; i
++)
17816 if (!regsafe(env
, &old
->regs
[i
], &cur
->regs
[i
],
17817 &env
->idmap_scratch
, exact
))
17820 if (!stacksafe(env
, old
, cur
, &env
->idmap_scratch
, exact
))
17823 if (!refsafe(old
, cur
, &env
->idmap_scratch
))
17829 static void reset_idmap_scratch(struct bpf_verifier_env
*env
)
17831 env
->idmap_scratch
.tmp_id_gen
= env
->id_gen
;
17832 memset(&env
->idmap_scratch
.map
, 0, sizeof(env
->idmap_scratch
.map
));
17835 static bool states_equal(struct bpf_verifier_env
*env
,
17836 struct bpf_verifier_state
*old
,
17837 struct bpf_verifier_state
*cur
,
17838 enum exact_level exact
)
17842 if (old
->curframe
!= cur
->curframe
)
17845 reset_idmap_scratch(env
);
17847 /* Verification state from speculative execution simulation
17848 * must never prune a non-speculative execution one.
17850 if (old
->speculative
&& !cur
->speculative
)
17853 if (old
->active_rcu_lock
!= cur
->active_rcu_lock
)
17856 if (old
->active_preempt_lock
!= cur
->active_preempt_lock
)
17859 if (old
->in_sleepable
!= cur
->in_sleepable
)
17862 /* for states to be equal callsites have to be the same
17863 * and all frame states need to be equivalent
17865 for (i
= 0; i
<= old
->curframe
; i
++) {
17866 if (old
->frame
[i
]->callsite
!= cur
->frame
[i
]->callsite
)
17868 if (!func_states_equal(env
, old
->frame
[i
], cur
->frame
[i
], exact
))
17874 /* Return 0 if no propagation happened. Return negative error code if error
17875 * happened. Otherwise, return the propagated bit.
17877 static int propagate_liveness_reg(struct bpf_verifier_env
*env
,
17878 struct bpf_reg_state
*reg
,
17879 struct bpf_reg_state
*parent_reg
)
17881 u8 parent_flag
= parent_reg
->live
& REG_LIVE_READ
;
17882 u8 flag
= reg
->live
& REG_LIVE_READ
;
17885 /* When comes here, read flags of PARENT_REG or REG could be any of
17886 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
17887 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
17889 if (parent_flag
== REG_LIVE_READ64
||
17890 /* Or if there is no read flag from REG. */
17892 /* Or if the read flag from REG is the same as PARENT_REG. */
17893 parent_flag
== flag
)
17896 err
= mark_reg_read(env
, reg
, parent_reg
, flag
);
17903 /* A write screens off any subsequent reads; but write marks come from the
17904 * straight-line code between a state and its parent. When we arrive at an
17905 * equivalent state (jump target or such) we didn't arrive by the straight-line
17906 * code, so read marks in the state must propagate to the parent regardless
17907 * of the state's write marks. That's what 'parent == state->parent' comparison
17908 * in mark_reg_read() is for.
17910 static int propagate_liveness(struct bpf_verifier_env
*env
,
17911 const struct bpf_verifier_state
*vstate
,
17912 struct bpf_verifier_state
*vparent
)
17914 struct bpf_reg_state
*state_reg
, *parent_reg
;
17915 struct bpf_func_state
*state
, *parent
;
17916 int i
, frame
, err
= 0;
17918 if (vparent
->curframe
!= vstate
->curframe
) {
17919 WARN(1, "propagate_live: parent frame %d current frame %d\n",
17920 vparent
->curframe
, vstate
->curframe
);
17923 /* Propagate read liveness of registers... */
17924 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
17925 for (frame
= 0; frame
<= vstate
->curframe
; frame
++) {
17926 parent
= vparent
->frame
[frame
];
17927 state
= vstate
->frame
[frame
];
17928 parent_reg
= parent
->regs
;
17929 state_reg
= state
->regs
;
17930 /* We don't need to worry about FP liveness, it's read-only */
17931 for (i
= frame
< vstate
->curframe
? BPF_REG_6
: 0; i
< BPF_REG_FP
; i
++) {
17932 err
= propagate_liveness_reg(env
, &state_reg
[i
],
17936 if (err
== REG_LIVE_READ64
)
17937 mark_insn_zext(env
, &parent_reg
[i
]);
17940 /* Propagate stack slots. */
17941 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
&&
17942 i
< parent
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
17943 parent_reg
= &parent
->stack
[i
].spilled_ptr
;
17944 state_reg
= &state
->stack
[i
].spilled_ptr
;
17945 err
= propagate_liveness_reg(env
, state_reg
,
17954 /* find precise scalars in the previous equivalent state and
17955 * propagate them into the current state
17957 static int propagate_precision(struct bpf_verifier_env
*env
,
17958 const struct bpf_verifier_state
*old
)
17960 struct bpf_reg_state
*state_reg
;
17961 struct bpf_func_state
*state
;
17962 int i
, err
= 0, fr
;
17965 for (fr
= old
->curframe
; fr
>= 0; fr
--) {
17966 state
= old
->frame
[fr
];
17967 state_reg
= state
->regs
;
17969 for (i
= 0; i
< BPF_REG_FP
; i
++, state_reg
++) {
17970 if (state_reg
->type
!= SCALAR_VALUE
||
17971 !state_reg
->precise
||
17972 !(state_reg
->live
& REG_LIVE_READ
))
17974 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
17976 verbose(env
, "frame %d: propagating r%d", fr
, i
);
17978 verbose(env
, ",r%d", i
);
17980 bt_set_frame_reg(&env
->bt
, fr
, i
);
17984 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
17985 if (!is_spilled_reg(&state
->stack
[i
]))
17987 state_reg
= &state
->stack
[i
].spilled_ptr
;
17988 if (state_reg
->type
!= SCALAR_VALUE
||
17989 !state_reg
->precise
||
17990 !(state_reg
->live
& REG_LIVE_READ
))
17992 if (env
->log
.level
& BPF_LOG_LEVEL2
) {
17994 verbose(env
, "frame %d: propagating fp%d",
17995 fr
, (-i
- 1) * BPF_REG_SIZE
);
17997 verbose(env
, ",fp%d", (-i
- 1) * BPF_REG_SIZE
);
17999 bt_set_frame_slot(&env
->bt
, fr
, i
);
18003 verbose(env
, "\n");
18006 err
= mark_chain_precision_batch(env
);
18013 static bool states_maybe_looping(struct bpf_verifier_state
*old
,
18014 struct bpf_verifier_state
*cur
)
18016 struct bpf_func_state
*fold
, *fcur
;
18017 int i
, fr
= cur
->curframe
;
18019 if (old
->curframe
!= fr
)
18022 fold
= old
->frame
[fr
];
18023 fcur
= cur
->frame
[fr
];
18024 for (i
= 0; i
< MAX_BPF_REG
; i
++)
18025 if (memcmp(&fold
->regs
[i
], &fcur
->regs
[i
],
18026 offsetof(struct bpf_reg_state
, parent
)))
18031 static bool is_iter_next_insn(struct bpf_verifier_env
*env
, int insn_idx
)
18033 return env
->insn_aux_data
[insn_idx
].is_iter_next
;
18036 /* is_state_visited() handles iter_next() (see process_iter_next_call() for
18037 * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
18038 * states to match, which otherwise would look like an infinite loop. So while
18039 * iter_next() calls are taken care of, we still need to be careful and
18040 * prevent erroneous and too eager declaration of "ininite loop", when
18041 * iterators are involved.
18043 * Here's a situation in pseudo-BPF assembly form:
18045 * 0: again: ; set up iter_next() call args
18046 * 1: r1 = &it ; <CHECKPOINT HERE>
18047 * 2: call bpf_iter_num_next ; this is iter_next() call
18048 * 3: if r0 == 0 goto done
18049 * 4: ... something useful here ...
18050 * 5: goto again ; another iteration
18053 * 8: call bpf_iter_num_destroy ; clean up iter state
18056 * This is a typical loop. Let's assume that we have a prune point at 1:,
18057 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
18058 * again`, assuming other heuristics don't get in a way).
18060 * When we first time come to 1:, let's say we have some state X. We proceed
18061 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
18062 * Now we come back to validate that forked ACTIVE state. We proceed through
18063 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
18064 * are converging. But the problem is that we don't know that yet, as this
18065 * convergence has to happen at iter_next() call site only. So if nothing is
18066 * done, at 1: verifier will use bounded loop logic and declare infinite
18067 * looping (and would be *technically* correct, if not for iterator's
18068 * "eventual sticky NULL" contract, see process_iter_next_call()). But we
18069 * don't want that. So what we do in process_iter_next_call() when we go on
18070 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
18071 * a different iteration. So when we suspect an infinite loop, we additionally
18072 * check if any of the *ACTIVE* iterator states depths differ. If yes, we
18073 * pretend we are not looping and wait for next iter_next() call.
18075 * This only applies to ACTIVE state. In DRAINED state we don't expect to
18076 * loop, because that would actually mean infinite loop, as DRAINED state is
18077 * "sticky", and so we'll keep returning into the same instruction with the
18078 * same state (at least in one of possible code paths).
18080 * This approach allows to keep infinite loop heuristic even in the face of
18081 * active iterator. E.g., C snippet below is and will be detected as
18082 * inifintely looping:
18084 * struct bpf_iter_num it;
18087 * bpf_iter_num_new(&it, 0, 10);
18088 * while ((p = bpf_iter_num_next(&t))) {
18090 * while (x--) {} // <<-- infinite loop here
18094 static bool iter_active_depths_differ(struct bpf_verifier_state
*old
, struct bpf_verifier_state
*cur
)
18096 struct bpf_reg_state
*slot
, *cur_slot
;
18097 struct bpf_func_state
*state
;
18100 for (fr
= old
->curframe
; fr
>= 0; fr
--) {
18101 state
= old
->frame
[fr
];
18102 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
18103 if (state
->stack
[i
].slot_type
[0] != STACK_ITER
)
18106 slot
= &state
->stack
[i
].spilled_ptr
;
18107 if (slot
->iter
.state
!= BPF_ITER_STATE_ACTIVE
)
18110 cur_slot
= &cur
->frame
[fr
]->stack
[i
].spilled_ptr
;
18111 if (cur_slot
->iter
.depth
!= slot
->iter
.depth
)
18118 static int is_state_visited(struct bpf_verifier_env
*env
, int insn_idx
)
18120 struct bpf_verifier_state_list
*new_sl
;
18121 struct bpf_verifier_state_list
*sl
, **pprev
;
18122 struct bpf_verifier_state
*cur
= env
->cur_state
, *new, *loop_entry
;
18123 int i
, j
, n
, err
, states_cnt
= 0;
18124 bool force_new_state
, add_new_state
, force_exact
;
18126 force_new_state
= env
->test_state_freq
|| is_force_checkpoint(env
, insn_idx
) ||
18127 /* Avoid accumulating infinitely long jmp history */
18128 cur
->insn_hist_end
- cur
->insn_hist_start
> 40;
18130 /* bpf progs typically have pruning point every 4 instructions
18131 * http://vger.kernel.org/bpfconf2019.html#session-1
18132 * Do not add new state for future pruning if the verifier hasn't seen
18133 * at least 2 jumps and at least 8 instructions.
18134 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
18135 * In tests that amounts to up to 50% reduction into total verifier
18136 * memory consumption and 20% verifier time speedup.
18138 add_new_state
= force_new_state
;
18139 if (env
->jmps_processed
- env
->prev_jmps_processed
>= 2 &&
18140 env
->insn_processed
- env
->prev_insn_processed
>= 8)
18141 add_new_state
= true;
18143 pprev
= explored_state(env
, insn_idx
);
18146 clean_live_states(env
, insn_idx
, cur
);
18150 if (sl
->state
.insn_idx
!= insn_idx
)
18153 if (sl
->state
.branches
) {
18154 struct bpf_func_state
*frame
= sl
->state
.frame
[sl
->state
.curframe
];
18156 if (frame
->in_async_callback_fn
&&
18157 frame
->async_entry_cnt
!= cur
->frame
[cur
->curframe
]->async_entry_cnt
) {
18158 /* Different async_entry_cnt means that the verifier is
18159 * processing another entry into async callback.
18160 * Seeing the same state is not an indication of infinite
18161 * loop or infinite recursion.
18162 * But finding the same state doesn't mean that it's safe
18163 * to stop processing the current state. The previous state
18164 * hasn't yet reached bpf_exit, since state.branches > 0.
18165 * Checking in_async_callback_fn alone is not enough either.
18166 * Since the verifier still needs to catch infinite loops
18167 * inside async callbacks.
18169 goto skip_inf_loop_check
;
18171 /* BPF open-coded iterators loop detection is special.
18172 * states_maybe_looping() logic is too simplistic in detecting
18173 * states that *might* be equivalent, because it doesn't know
18174 * about ID remapping, so don't even perform it.
18175 * See process_iter_next_call() and iter_active_depths_differ()
18176 * for overview of the logic. When current and one of parent
18177 * states are detected as equivalent, it's a good thing: we prove
18178 * convergence and can stop simulating further iterations.
18179 * It's safe to assume that iterator loop will finish, taking into
18180 * account iter_next() contract of eventually returning
18181 * sticky NULL result.
18183 * Note, that states have to be compared exactly in this case because
18184 * read and precision marks might not be finalized inside the loop.
18185 * E.g. as in the program below:
18188 * 2. r6 = bpf_get_prandom_u32()
18189 * 3. while (bpf_iter_num_next(&fp[-8])) {
18190 * 4. if (r6 != 42) {
18192 * 6. r6 = bpf_get_prandom_u32()
18197 * 11. r8 = *(u64 *)(r0 + 0)
18198 * 12. r6 = bpf_get_prandom_u32()
18201 * Here verifier would first visit path 1-3, create a checkpoint at 3
18202 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does
18203 * not have read or precision mark for r7 yet, thus inexact states
18204 * comparison would discard current state with r7=-32
18205 * => unsafe memory access at 11 would not be caught.
18207 if (is_iter_next_insn(env
, insn_idx
)) {
18208 if (states_equal(env
, &sl
->state
, cur
, RANGE_WITHIN
)) {
18209 struct bpf_func_state
*cur_frame
;
18210 struct bpf_reg_state
*iter_state
, *iter_reg
;
18213 cur_frame
= cur
->frame
[cur
->curframe
];
18214 /* btf_check_iter_kfuncs() enforces that
18215 * iter state pointer is always the first arg
18217 iter_reg
= &cur_frame
->regs
[BPF_REG_1
];
18218 /* current state is valid due to states_equal(),
18219 * so we can assume valid iter and reg state,
18220 * no need for extra (re-)validations
18222 spi
= __get_spi(iter_reg
->off
+ iter_reg
->var_off
.value
);
18223 iter_state
= &func(env
, iter_reg
)->stack
[spi
].spilled_ptr
;
18224 if (iter_state
->iter
.state
== BPF_ITER_STATE_ACTIVE
) {
18225 update_loop_entry(cur
, &sl
->state
);
18229 goto skip_inf_loop_check
;
18231 if (is_may_goto_insn_at(env
, insn_idx
)) {
18232 if (sl
->state
.may_goto_depth
!= cur
->may_goto_depth
&&
18233 states_equal(env
, &sl
->state
, cur
, RANGE_WITHIN
)) {
18234 update_loop_entry(cur
, &sl
->state
);
18238 if (calls_callback(env
, insn_idx
)) {
18239 if (states_equal(env
, &sl
->state
, cur
, RANGE_WITHIN
))
18241 goto skip_inf_loop_check
;
18243 /* attempt to detect infinite loop to avoid unnecessary doomed work */
18244 if (states_maybe_looping(&sl
->state
, cur
) &&
18245 states_equal(env
, &sl
->state
, cur
, EXACT
) &&
18246 !iter_active_depths_differ(&sl
->state
, cur
) &&
18247 sl
->state
.may_goto_depth
== cur
->may_goto_depth
&&
18248 sl
->state
.callback_unroll_depth
== cur
->callback_unroll_depth
) {
18249 verbose_linfo(env
, insn_idx
, "; ");
18250 verbose(env
, "infinite loop detected at insn %d\n", insn_idx
);
18251 verbose(env
, "cur state:");
18252 print_verifier_state(env
, cur
->frame
[cur
->curframe
], true);
18253 verbose(env
, "old state:");
18254 print_verifier_state(env
, sl
->state
.frame
[cur
->curframe
], true);
18257 /* if the verifier is processing a loop, avoid adding new state
18258 * too often, since different loop iterations have distinct
18259 * states and may not help future pruning.
18260 * This threshold shouldn't be too low to make sure that
18261 * a loop with large bound will be rejected quickly.
18262 * The most abusive loop will be:
18264 * if r1 < 1000000 goto pc-2
18265 * 1M insn_procssed limit / 100 == 10k peak states.
18266 * This threshold shouldn't be too high either, since states
18267 * at the end of the loop are likely to be useful in pruning.
18269 skip_inf_loop_check
:
18270 if (!force_new_state
&&
18271 env
->jmps_processed
- env
->prev_jmps_processed
< 20 &&
18272 env
->insn_processed
- env
->prev_insn_processed
< 100)
18273 add_new_state
= false;
18276 /* If sl->state is a part of a loop and this loop's entry is a part of
18277 * current verification path then states have to be compared exactly.
18278 * 'force_exact' is needed to catch the following case:
18280 * initial Here state 'succ' was processed first,
18281 * | it was eventually tracked to produce a
18282 * V state identical to 'hdr'.
18283 * .---------> hdr All branches from 'succ' had been explored
18284 * | | and thus 'succ' has its .branches == 0.
18286 * | .------... Suppose states 'cur' and 'succ' correspond
18287 * | | | to the same instruction + callsites.
18288 * | V V In such case it is necessary to check
18289 * | ... ... if 'succ' and 'cur' are states_equal().
18290 * | | | If 'succ' and 'cur' are a part of the
18291 * | V V same loop exact flag has to be set.
18292 * | succ <- cur To check if that is the case, verify
18293 * | | if loop entry of 'succ' is in current
18299 * Additional details are in the comment before get_loop_entry().
18301 loop_entry
= get_loop_entry(&sl
->state
);
18302 force_exact
= loop_entry
&& loop_entry
->branches
> 0;
18303 if (states_equal(env
, &sl
->state
, cur
, force_exact
? RANGE_WITHIN
: NOT_EXACT
)) {
18305 update_loop_entry(cur
, loop_entry
);
18308 /* reached equivalent register/stack state,
18309 * prune the search.
18310 * Registers read by the continuation are read by us.
18311 * If we have any write marks in env->cur_state, they
18312 * will prevent corresponding reads in the continuation
18313 * from reaching our parent (an explored_state). Our
18314 * own state will get the read marks recorded, but
18315 * they'll be immediately forgotten as we're pruning
18316 * this state and will pop a new one.
18318 err
= propagate_liveness(env
, &sl
->state
, cur
);
18320 /* if previous state reached the exit with precision and
18321 * current state is equivalent to it (except precision marks)
18322 * the precision needs to be propagated back in
18323 * the current state.
18325 if (is_jmp_point(env
, env
->insn_idx
))
18326 err
= err
? : push_insn_history(env
, cur
, 0, 0);
18327 err
= err
? : propagate_precision(env
, &sl
->state
);
18333 /* when new state is not going to be added do not increase miss count.
18334 * Otherwise several loop iterations will remove the state
18335 * recorded earlier. The goal of these heuristics is to have
18336 * states from some iterations of the loop (some in the beginning
18337 * and some at the end) to help pruning.
18341 /* heuristic to determine whether this state is beneficial
18342 * to keep checking from state equivalence point of view.
18343 * Higher numbers increase max_states_per_insn and verification time,
18344 * but do not meaningfully decrease insn_processed.
18345 * 'n' controls how many times state could miss before eviction.
18346 * Use bigger 'n' for checkpoints because evicting checkpoint states
18347 * too early would hinder iterator convergence.
18349 n
= is_force_checkpoint(env
, insn_idx
) && sl
->state
.branches
> 0 ? 64 : 3;
18350 if (sl
->miss_cnt
> sl
->hit_cnt
* n
+ n
) {
18351 /* the state is unlikely to be useful. Remove it to
18352 * speed up verification
18355 if (sl
->state
.frame
[0]->regs
[0].live
& REG_LIVE_DONE
&&
18356 !sl
->state
.used_as_loop_entry
) {
18357 u32 br
= sl
->state
.branches
;
18360 "BUG live_done but branches_to_explore %d\n",
18362 free_verifier_state(&sl
->state
, false);
18364 env
->peak_states
--;
18366 /* cannot free this state, since parentage chain may
18367 * walk it later. Add it for free_list instead to
18368 * be freed at the end of verification
18370 sl
->next
= env
->free_list
;
18371 env
->free_list
= sl
;
18381 if (env
->max_states_per_insn
< states_cnt
)
18382 env
->max_states_per_insn
= states_cnt
;
18384 if (!env
->bpf_capable
&& states_cnt
> BPF_COMPLEXITY_LIMIT_STATES
)
18387 if (!add_new_state
)
18390 /* There were no equivalent states, remember the current one.
18391 * Technically the current state is not proven to be safe yet,
18392 * but it will either reach outer most bpf_exit (which means it's safe)
18393 * or it will be rejected. When there are no loops the verifier won't be
18394 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
18395 * again on the way to bpf_exit.
18396 * When looping the sl->state.branches will be > 0 and this state
18397 * will not be considered for equivalence until branches == 0.
18399 new_sl
= kzalloc(sizeof(struct bpf_verifier_state_list
), GFP_KERNEL
);
18402 env
->total_states
++;
18403 env
->peak_states
++;
18404 env
->prev_jmps_processed
= env
->jmps_processed
;
18405 env
->prev_insn_processed
= env
->insn_processed
;
18407 /* forget precise markings we inherited, see __mark_chain_precision */
18408 if (env
->bpf_capable
)
18409 mark_all_scalars_imprecise(env
, cur
);
18411 /* add new state to the head of linked list */
18412 new = &new_sl
->state
;
18413 err
= copy_verifier_state(new, cur
);
18415 free_verifier_state(new, false);
18419 new->insn_idx
= insn_idx
;
18420 WARN_ONCE(new->branches
!= 1,
18421 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches
, insn_idx
);
18424 cur
->first_insn_idx
= insn_idx
;
18425 cur
->insn_hist_start
= cur
->insn_hist_end
;
18426 cur
->dfs_depth
= new->dfs_depth
+ 1;
18427 new_sl
->next
= *explored_state(env
, insn_idx
);
18428 *explored_state(env
, insn_idx
) = new_sl
;
18429 /* connect new state to parentage chain. Current frame needs all
18430 * registers connected. Only r6 - r9 of the callers are alive (pushed
18431 * to the stack implicitly by JITs) so in callers' frames connect just
18432 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
18433 * the state of the call instruction (with WRITTEN set), and r0 comes
18434 * from callee with its full parentage chain, anyway.
18436 /* clear write marks in current state: the writes we did are not writes
18437 * our child did, so they don't screen off its reads from us.
18438 * (There are no read marks in current state, because reads always mark
18439 * their parent and current state never has children yet. Only
18440 * explored_states can get read marks.)
18442 for (j
= 0; j
<= cur
->curframe
; j
++) {
18443 for (i
= j
< cur
->curframe
? BPF_REG_6
: 0; i
< BPF_REG_FP
; i
++)
18444 cur
->frame
[j
]->regs
[i
].parent
= &new->frame
[j
]->regs
[i
];
18445 for (i
= 0; i
< BPF_REG_FP
; i
++)
18446 cur
->frame
[j
]->regs
[i
].live
= REG_LIVE_NONE
;
18449 /* all stack frames are accessible from callee, clear them all */
18450 for (j
= 0; j
<= cur
->curframe
; j
++) {
18451 struct bpf_func_state
*frame
= cur
->frame
[j
];
18452 struct bpf_func_state
*newframe
= new->frame
[j
];
18454 for (i
= 0; i
< frame
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
18455 frame
->stack
[i
].spilled_ptr
.live
= REG_LIVE_NONE
;
18456 frame
->stack
[i
].spilled_ptr
.parent
=
18457 &newframe
->stack
[i
].spilled_ptr
;
18463 /* Return true if it's OK to have the same insn return a different type. */
18464 static bool reg_type_mismatch_ok(enum bpf_reg_type type
)
18466 switch (base_type(type
)) {
18468 case PTR_TO_SOCKET
:
18469 case PTR_TO_SOCK_COMMON
:
18470 case PTR_TO_TCP_SOCK
:
18471 case PTR_TO_XDP_SOCK
:
18472 case PTR_TO_BTF_ID
:
18480 /* If an instruction was previously used with particular pointer types, then we
18481 * need to be careful to avoid cases such as the below, where it may be ok
18482 * for one branch accessing the pointer, but not ok for the other branch:
18487 * R1 = some_other_valid_ptr;
18490 * R2 = *(u32 *)(R1 + 0);
18492 static bool reg_type_mismatch(enum bpf_reg_type src
, enum bpf_reg_type prev
)
18494 return src
!= prev
&& (!reg_type_mismatch_ok(src
) ||
18495 !reg_type_mismatch_ok(prev
));
18498 static int save_aux_ptr_type(struct bpf_verifier_env
*env
, enum bpf_reg_type type
,
18499 bool allow_trust_mismatch
)
18501 enum bpf_reg_type
*prev_type
= &env
->insn_aux_data
[env
->insn_idx
].ptr_type
;
18503 if (*prev_type
== NOT_INIT
) {
18504 /* Saw a valid insn
18505 * dst_reg = *(u32 *)(src_reg + off)
18506 * save type to validate intersecting paths
18509 } else if (reg_type_mismatch(type
, *prev_type
)) {
18510 /* Abuser program is trying to use the same insn
18511 * dst_reg = *(u32*) (src_reg + off)
18512 * with different pointer types:
18513 * src_reg == ctx in one branch and
18514 * src_reg == stack|map in some other branch.
18517 if (allow_trust_mismatch
&&
18518 base_type(type
) == PTR_TO_BTF_ID
&&
18519 base_type(*prev_type
) == PTR_TO_BTF_ID
) {
18521 * Have to support a use case when one path through
18522 * the program yields TRUSTED pointer while another
18523 * is UNTRUSTED. Fallback to UNTRUSTED to generate
18524 * BPF_PROBE_MEM/BPF_PROBE_MEMSX.
18526 *prev_type
= PTR_TO_BTF_ID
| PTR_UNTRUSTED
;
18528 verbose(env
, "same insn cannot be used with different pointers\n");
18536 static int do_check(struct bpf_verifier_env
*env
)
18538 bool pop_log
= !(env
->log
.level
& BPF_LOG_LEVEL2
);
18539 struct bpf_verifier_state
*state
= env
->cur_state
;
18540 struct bpf_insn
*insns
= env
->prog
->insnsi
;
18541 struct bpf_reg_state
*regs
;
18542 int insn_cnt
= env
->prog
->len
;
18543 bool do_print_state
= false;
18544 int prev_insn_idx
= -1;
18547 bool exception_exit
= false;
18548 struct bpf_insn
*insn
;
18552 /* reset current history entry on each new instruction */
18553 env
->cur_hist_ent
= NULL
;
18555 env
->prev_insn_idx
= prev_insn_idx
;
18556 if (env
->insn_idx
>= insn_cnt
) {
18557 verbose(env
, "invalid insn idx %d insn_cnt %d\n",
18558 env
->insn_idx
, insn_cnt
);
18562 insn
= &insns
[env
->insn_idx
];
18563 class = BPF_CLASS(insn
->code
);
18565 if (++env
->insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
18567 "BPF program is too large. Processed %d insn\n",
18568 env
->insn_processed
);
18572 state
->last_insn_idx
= env
->prev_insn_idx
;
18574 if (is_prune_point(env
, env
->insn_idx
)) {
18575 err
= is_state_visited(env
, env
->insn_idx
);
18579 /* found equivalent state, can prune the search */
18580 if (env
->log
.level
& BPF_LOG_LEVEL
) {
18581 if (do_print_state
)
18582 verbose(env
, "\nfrom %d to %d%s: safe\n",
18583 env
->prev_insn_idx
, env
->insn_idx
,
18584 env
->cur_state
->speculative
?
18585 " (speculative execution)" : "");
18587 verbose(env
, "%d: safe\n", env
->insn_idx
);
18589 goto process_bpf_exit
;
18593 if (is_jmp_point(env
, env
->insn_idx
)) {
18594 err
= push_insn_history(env
, state
, 0, 0);
18599 if (signal_pending(current
))
18602 if (need_resched())
18605 if (env
->log
.level
& BPF_LOG_LEVEL2
&& do_print_state
) {
18606 verbose(env
, "\nfrom %d to %d%s:",
18607 env
->prev_insn_idx
, env
->insn_idx
,
18608 env
->cur_state
->speculative
?
18609 " (speculative execution)" : "");
18610 print_verifier_state(env
, state
->frame
[state
->curframe
], true);
18611 do_print_state
= false;
18614 if (env
->log
.level
& BPF_LOG_LEVEL
) {
18615 const struct bpf_insn_cbs cbs
= {
18616 .cb_call
= disasm_kfunc_name
,
18617 .cb_print
= verbose
,
18618 .private_data
= env
,
18621 if (verifier_state_scratched(env
))
18622 print_insn_state(env
, state
->frame
[state
->curframe
]);
18624 verbose_linfo(env
, env
->insn_idx
, "; ");
18625 env
->prev_log_pos
= env
->log
.end_pos
;
18626 verbose(env
, "%d: ", env
->insn_idx
);
18627 print_bpf_insn(&cbs
, insn
, env
->allow_ptr_leaks
);
18628 env
->prev_insn_print_pos
= env
->log
.end_pos
- env
->prev_log_pos
;
18629 env
->prev_log_pos
= env
->log
.end_pos
;
18632 if (bpf_prog_is_offloaded(env
->prog
->aux
)) {
18633 err
= bpf_prog_offload_verify_insn(env
, env
->insn_idx
,
18634 env
->prev_insn_idx
);
18639 regs
= cur_regs(env
);
18640 sanitize_mark_insn_seen(env
);
18641 prev_insn_idx
= env
->insn_idx
;
18643 if (class == BPF_ALU
|| class == BPF_ALU64
) {
18644 err
= check_alu_op(env
, insn
);
18648 } else if (class == BPF_LDX
) {
18649 enum bpf_reg_type src_reg_type
;
18651 /* check for reserved fields is already done */
18653 /* check src operand */
18654 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
18658 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
18662 src_reg_type
= regs
[insn
->src_reg
].type
;
18664 /* check that memory (src_reg + off) is readable,
18665 * the state of dst_reg will be updated by this func
18667 err
= check_mem_access(env
, env
->insn_idx
, insn
->src_reg
,
18668 insn
->off
, BPF_SIZE(insn
->code
),
18669 BPF_READ
, insn
->dst_reg
, false,
18670 BPF_MODE(insn
->code
) == BPF_MEMSX
);
18671 err
= err
?: save_aux_ptr_type(env
, src_reg_type
, true);
18672 err
= err
?: reg_bounds_sanity_check(env
, ®s
[insn
->dst_reg
], "ldx");
18675 } else if (class == BPF_STX
) {
18676 enum bpf_reg_type dst_reg_type
;
18678 if (BPF_MODE(insn
->code
) == BPF_ATOMIC
) {
18679 err
= check_atomic(env
, env
->insn_idx
, insn
);
18686 if (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0) {
18687 verbose(env
, "BPF_STX uses reserved fields\n");
18691 /* check src1 operand */
18692 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
18695 /* check src2 operand */
18696 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
18700 dst_reg_type
= regs
[insn
->dst_reg
].type
;
18702 /* check that memory (dst_reg + off) is writeable */
18703 err
= check_mem_access(env
, env
->insn_idx
, insn
->dst_reg
,
18704 insn
->off
, BPF_SIZE(insn
->code
),
18705 BPF_WRITE
, insn
->src_reg
, false, false);
18709 err
= save_aux_ptr_type(env
, dst_reg_type
, false);
18712 } else if (class == BPF_ST
) {
18713 enum bpf_reg_type dst_reg_type
;
18715 if (BPF_MODE(insn
->code
) != BPF_MEM
||
18716 insn
->src_reg
!= BPF_REG_0
) {
18717 verbose(env
, "BPF_ST uses reserved fields\n");
18720 /* check src operand */
18721 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
18725 dst_reg_type
= regs
[insn
->dst_reg
].type
;
18727 /* check that memory (dst_reg + off) is writeable */
18728 err
= check_mem_access(env
, env
->insn_idx
, insn
->dst_reg
,
18729 insn
->off
, BPF_SIZE(insn
->code
),
18730 BPF_WRITE
, -1, false, false);
18734 err
= save_aux_ptr_type(env
, dst_reg_type
, false);
18737 } else if (class == BPF_JMP
|| class == BPF_JMP32
) {
18738 u8 opcode
= BPF_OP(insn
->code
);
18740 env
->jmps_processed
++;
18741 if (opcode
== BPF_CALL
) {
18742 if (BPF_SRC(insn
->code
) != BPF_K
||
18743 (insn
->src_reg
!= BPF_PSEUDO_KFUNC_CALL
18744 && insn
->off
!= 0) ||
18745 (insn
->src_reg
!= BPF_REG_0
&&
18746 insn
->src_reg
!= BPF_PSEUDO_CALL
&&
18747 insn
->src_reg
!= BPF_PSEUDO_KFUNC_CALL
) ||
18748 insn
->dst_reg
!= BPF_REG_0
||
18749 class == BPF_JMP32
) {
18750 verbose(env
, "BPF_CALL uses reserved fields\n");
18754 if (cur_func(env
)->active_locks
) {
18755 if ((insn
->src_reg
== BPF_REG_0
&& insn
->imm
!= BPF_FUNC_spin_unlock
) ||
18756 (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
&&
18757 (insn
->off
!= 0 || !is_bpf_graph_api_kfunc(insn
->imm
)))) {
18758 verbose(env
, "function calls are not allowed while holding a lock\n");
18762 if (insn
->src_reg
== BPF_PSEUDO_CALL
) {
18763 err
= check_func_call(env
, insn
, &env
->insn_idx
);
18764 } else if (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
) {
18765 err
= check_kfunc_call(env
, insn
, &env
->insn_idx
);
18766 if (!err
&& is_bpf_throw_kfunc(insn
)) {
18767 exception_exit
= true;
18768 goto process_bpf_exit_full
;
18771 err
= check_helper_call(env
, insn
, &env
->insn_idx
);
18776 mark_reg_scratched(env
, BPF_REG_0
);
18777 } else if (opcode
== BPF_JA
) {
18778 if (BPF_SRC(insn
->code
) != BPF_K
||
18779 insn
->src_reg
!= BPF_REG_0
||
18780 insn
->dst_reg
!= BPF_REG_0
||
18781 (class == BPF_JMP
&& insn
->imm
!= 0) ||
18782 (class == BPF_JMP32
&& insn
->off
!= 0)) {
18783 verbose(env
, "BPF_JA uses reserved fields\n");
18787 if (class == BPF_JMP
)
18788 env
->insn_idx
+= insn
->off
+ 1;
18790 env
->insn_idx
+= insn
->imm
+ 1;
18793 } else if (opcode
== BPF_EXIT
) {
18794 if (BPF_SRC(insn
->code
) != BPF_K
||
18796 insn
->src_reg
!= BPF_REG_0
||
18797 insn
->dst_reg
!= BPF_REG_0
||
18798 class == BPF_JMP32
) {
18799 verbose(env
, "BPF_EXIT uses reserved fields\n");
18802 process_bpf_exit_full
:
18803 /* We must do check_reference_leak here before
18804 * prepare_func_exit to handle the case when
18805 * state->curframe > 0, it may be a callback
18806 * function, for which reference_state must
18807 * match caller reference state when it exits.
18809 err
= check_resource_leak(env
, exception_exit
, !env
->cur_state
->curframe
,
18810 "BPF_EXIT instruction");
18814 /* The side effect of the prepare_func_exit
18815 * which is being skipped is that it frees
18816 * bpf_func_state. Typically, process_bpf_exit
18817 * will only be hit with outermost exit.
18818 * copy_verifier_state in pop_stack will handle
18819 * freeing of any extra bpf_func_state left over
18820 * from not processing all nested function
18821 * exits. We also skip return code checks as
18822 * they are not needed for exceptional exits.
18824 if (exception_exit
)
18825 goto process_bpf_exit
;
18827 if (state
->curframe
) {
18828 /* exit from nested function */
18829 err
= prepare_func_exit(env
, &env
->insn_idx
);
18832 do_print_state
= true;
18836 err
= check_return_code(env
, BPF_REG_0
, "R0");
18840 mark_verifier_state_scratched(env
);
18841 update_branch_counts(env
, env
->cur_state
);
18842 err
= pop_stack(env
, &prev_insn_idx
,
18843 &env
->insn_idx
, pop_log
);
18845 if (err
!= -ENOENT
)
18849 do_print_state
= true;
18853 err
= check_cond_jmp_op(env
, insn
, &env
->insn_idx
);
18857 } else if (class == BPF_LD
) {
18858 u8 mode
= BPF_MODE(insn
->code
);
18860 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
18861 err
= check_ld_abs(env
, insn
);
18865 } else if (mode
== BPF_IMM
) {
18866 err
= check_ld_imm(env
, insn
);
18871 sanitize_mark_insn_seen(env
);
18873 verbose(env
, "invalid BPF_LD mode\n");
18877 verbose(env
, "unknown insn class %d\n", class);
18887 static int find_btf_percpu_datasec(struct btf
*btf
)
18889 const struct btf_type
*t
;
18894 * Both vmlinux and module each have their own ".data..percpu"
18895 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
18896 * types to look at only module's own BTF types.
18898 n
= btf_nr_types(btf
);
18899 if (btf_is_module(btf
))
18900 i
= btf_nr_types(btf_vmlinux
);
18904 for(; i
< n
; i
++) {
18905 t
= btf_type_by_id(btf
, i
);
18906 if (BTF_INFO_KIND(t
->info
) != BTF_KIND_DATASEC
)
18909 tname
= btf_name_by_offset(btf
, t
->name_off
);
18910 if (!strcmp(tname
, ".data..percpu"))
18917 /* replace pseudo btf_id with kernel symbol address */
18918 static int check_pseudo_btf_id(struct bpf_verifier_env
*env
,
18919 struct bpf_insn
*insn
,
18920 struct bpf_insn_aux_data
*aux
)
18922 const struct btf_var_secinfo
*vsi
;
18923 const struct btf_type
*datasec
;
18924 struct btf_mod_pair
*btf_mod
;
18925 const struct btf_type
*t
;
18926 const char *sym_name
;
18927 bool percpu
= false;
18928 u32 type
, id
= insn
->imm
;
18932 int i
, btf_fd
, err
;
18934 btf_fd
= insn
[1].imm
;
18936 btf
= btf_get_by_fd(btf_fd
);
18938 verbose(env
, "invalid module BTF object FD specified.\n");
18942 if (!btf_vmlinux
) {
18943 verbose(env
, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
18950 t
= btf_type_by_id(btf
, id
);
18952 verbose(env
, "ldimm64 insn specifies invalid btf_id %d.\n", id
);
18957 if (!btf_type_is_var(t
) && !btf_type_is_func(t
)) {
18958 verbose(env
, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id
);
18963 sym_name
= btf_name_by_offset(btf
, t
->name_off
);
18964 addr
= kallsyms_lookup_name(sym_name
);
18966 verbose(env
, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
18971 insn
[0].imm
= (u32
)addr
;
18972 insn
[1].imm
= addr
>> 32;
18974 if (btf_type_is_func(t
)) {
18975 aux
->btf_var
.reg_type
= PTR_TO_MEM
| MEM_RDONLY
;
18976 aux
->btf_var
.mem_size
= 0;
18980 datasec_id
= find_btf_percpu_datasec(btf
);
18981 if (datasec_id
> 0) {
18982 datasec
= btf_type_by_id(btf
, datasec_id
);
18983 for_each_vsi(i
, datasec
, vsi
) {
18984 if (vsi
->type
== id
) {
18992 t
= btf_type_skip_modifiers(btf
, type
, NULL
);
18994 aux
->btf_var
.reg_type
= PTR_TO_BTF_ID
| MEM_PERCPU
;
18995 aux
->btf_var
.btf
= btf
;
18996 aux
->btf_var
.btf_id
= type
;
18997 } else if (!btf_type_is_struct(t
)) {
18998 const struct btf_type
*ret
;
19002 /* resolve the type size of ksym. */
19003 ret
= btf_resolve_size(btf
, t
, &tsize
);
19005 tname
= btf_name_by_offset(btf
, t
->name_off
);
19006 verbose(env
, "ldimm64 unable to resolve the size of type '%s': %ld\n",
19007 tname
, PTR_ERR(ret
));
19011 aux
->btf_var
.reg_type
= PTR_TO_MEM
| MEM_RDONLY
;
19012 aux
->btf_var
.mem_size
= tsize
;
19014 aux
->btf_var
.reg_type
= PTR_TO_BTF_ID
;
19015 aux
->btf_var
.btf
= btf
;
19016 aux
->btf_var
.btf_id
= type
;
19019 /* check whether we recorded this BTF (and maybe module) already */
19020 for (i
= 0; i
< env
->used_btf_cnt
; i
++) {
19021 if (env
->used_btfs
[i
].btf
== btf
) {
19027 if (env
->used_btf_cnt
>= MAX_USED_BTFS
) {
19032 btf_mod
= &env
->used_btfs
[env
->used_btf_cnt
];
19033 btf_mod
->btf
= btf
;
19034 btf_mod
->module
= NULL
;
19036 /* if we reference variables from kernel module, bump its refcount */
19037 if (btf_is_module(btf
)) {
19038 btf_mod
->module
= btf_try_get_module(btf
);
19039 if (!btf_mod
->module
) {
19045 env
->used_btf_cnt
++;
19053 static bool is_tracing_prog_type(enum bpf_prog_type type
)
19056 case BPF_PROG_TYPE_KPROBE
:
19057 case BPF_PROG_TYPE_TRACEPOINT
:
19058 case BPF_PROG_TYPE_PERF_EVENT
:
19059 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
19060 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE
:
19067 static int check_map_prog_compatibility(struct bpf_verifier_env
*env
,
19068 struct bpf_map
*map
,
19069 struct bpf_prog
*prog
)
19072 enum bpf_prog_type prog_type
= resolve_prog_type(prog
);
19074 if (btf_record_has_field(map
->record
, BPF_LIST_HEAD
) ||
19075 btf_record_has_field(map
->record
, BPF_RB_ROOT
)) {
19076 if (is_tracing_prog_type(prog_type
)) {
19077 verbose(env
, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
19082 if (btf_record_has_field(map
->record
, BPF_SPIN_LOCK
)) {
19083 if (prog_type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
19084 verbose(env
, "socket filter progs cannot use bpf_spin_lock yet\n");
19088 if (is_tracing_prog_type(prog_type
)) {
19089 verbose(env
, "tracing progs cannot use bpf_spin_lock yet\n");
19094 if (btf_record_has_field(map
->record
, BPF_TIMER
)) {
19095 if (is_tracing_prog_type(prog_type
)) {
19096 verbose(env
, "tracing progs cannot use bpf_timer yet\n");
19101 if (btf_record_has_field(map
->record
, BPF_WORKQUEUE
)) {
19102 if (is_tracing_prog_type(prog_type
)) {
19103 verbose(env
, "tracing progs cannot use bpf_wq yet\n");
19108 if ((bpf_prog_is_offloaded(prog
->aux
) || bpf_map_is_offloaded(map
)) &&
19109 !bpf_offload_prog_map_match(prog
, map
)) {
19110 verbose(env
, "offload device mismatch between prog and map\n");
19114 if (map
->map_type
== BPF_MAP_TYPE_STRUCT_OPS
) {
19115 verbose(env
, "bpf_struct_ops map cannot be used in prog\n");
19119 if (prog
->sleepable
)
19120 switch (map
->map_type
) {
19121 case BPF_MAP_TYPE_HASH
:
19122 case BPF_MAP_TYPE_LRU_HASH
:
19123 case BPF_MAP_TYPE_ARRAY
:
19124 case BPF_MAP_TYPE_PERCPU_HASH
:
19125 case BPF_MAP_TYPE_PERCPU_ARRAY
:
19126 case BPF_MAP_TYPE_LRU_PERCPU_HASH
:
19127 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
19128 case BPF_MAP_TYPE_HASH_OF_MAPS
:
19129 case BPF_MAP_TYPE_RINGBUF
:
19130 case BPF_MAP_TYPE_USER_RINGBUF
:
19131 case BPF_MAP_TYPE_INODE_STORAGE
:
19132 case BPF_MAP_TYPE_SK_STORAGE
:
19133 case BPF_MAP_TYPE_TASK_STORAGE
:
19134 case BPF_MAP_TYPE_CGRP_STORAGE
:
19135 case BPF_MAP_TYPE_QUEUE
:
19136 case BPF_MAP_TYPE_STACK
:
19137 case BPF_MAP_TYPE_ARENA
:
19141 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
19148 static bool bpf_map_is_cgroup_storage(struct bpf_map
*map
)
19150 return (map
->map_type
== BPF_MAP_TYPE_CGROUP_STORAGE
||
19151 map
->map_type
== BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
);
19154 /* Add map behind fd to used maps list, if it's not already there, and return
19155 * its index. Also set *reused to true if this map was already in the list of
19157 * Returns <0 on error, or >= 0 index, on success.
19159 static int add_used_map_from_fd(struct bpf_verifier_env
*env
, int fd
, bool *reused
)
19162 struct bpf_map
*map
;
19165 map
= __bpf_map_get(f
);
19167 verbose(env
, "fd %d is not pointing to valid bpf_map\n", fd
);
19168 return PTR_ERR(map
);
19171 /* check whether we recorded this map already */
19172 for (i
= 0; i
< env
->used_map_cnt
; i
++) {
19173 if (env
->used_maps
[i
] == map
) {
19179 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
19180 verbose(env
, "The total number of maps per program has reached the limit of %u\n",
19185 if (env
->prog
->sleepable
)
19186 atomic64_inc(&map
->sleepable_refcnt
);
19188 /* hold the map. If the program is rejected by verifier,
19189 * the map will be released by release_maps() or it
19190 * will be used by the valid program until it's unloaded
19191 * and all maps are released in bpf_free_used_maps()
19196 env
->used_maps
[env
->used_map_cnt
++] = map
;
19198 return env
->used_map_cnt
- 1;
19201 /* find and rewrite pseudo imm in ld_imm64 instructions:
19203 * 1. if it accesses map FD, replace it with actual map pointer.
19204 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
19206 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
19208 static int resolve_pseudo_ldimm64(struct bpf_verifier_env
*env
)
19210 struct bpf_insn
*insn
= env
->prog
->insnsi
;
19211 int insn_cnt
= env
->prog
->len
;
19214 err
= bpf_prog_calc_tag(env
->prog
);
19218 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
19219 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
19220 ((BPF_MODE(insn
->code
) != BPF_MEM
&& BPF_MODE(insn
->code
) != BPF_MEMSX
) ||
19222 verbose(env
, "BPF_LDX uses reserved fields\n");
19226 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
19227 struct bpf_insn_aux_data
*aux
;
19228 struct bpf_map
*map
;
19234 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
19235 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
19236 insn
[1].off
!= 0) {
19237 verbose(env
, "invalid bpf_ld_imm64 insn\n");
19241 if (insn
[0].src_reg
== 0)
19242 /* valid generic load 64-bit imm */
19245 if (insn
[0].src_reg
== BPF_PSEUDO_BTF_ID
) {
19246 aux
= &env
->insn_aux_data
[i
];
19247 err
= check_pseudo_btf_id(env
, insn
, aux
);
19253 if (insn
[0].src_reg
== BPF_PSEUDO_FUNC
) {
19254 aux
= &env
->insn_aux_data
[i
];
19255 aux
->ptr_type
= PTR_TO_FUNC
;
19259 /* In final convert_pseudo_ld_imm64() step, this is
19260 * converted into regular 64-bit imm load insn.
19262 switch (insn
[0].src_reg
) {
19263 case BPF_PSEUDO_MAP_VALUE
:
19264 case BPF_PSEUDO_MAP_IDX_VALUE
:
19266 case BPF_PSEUDO_MAP_FD
:
19267 case BPF_PSEUDO_MAP_IDX
:
19268 if (insn
[1].imm
== 0)
19272 verbose(env
, "unrecognized bpf_ld_imm64 insn\n");
19276 switch (insn
[0].src_reg
) {
19277 case BPF_PSEUDO_MAP_IDX_VALUE
:
19278 case BPF_PSEUDO_MAP_IDX
:
19279 if (bpfptr_is_null(env
->fd_array
)) {
19280 verbose(env
, "fd_idx without fd_array is invalid\n");
19283 if (copy_from_bpfptr_offset(&fd
, env
->fd_array
,
19284 insn
[0].imm
* sizeof(fd
),
19293 map_idx
= add_used_map_from_fd(env
, fd
, &reused
);
19296 map
= env
->used_maps
[map_idx
];
19298 aux
= &env
->insn_aux_data
[i
];
19299 aux
->map_index
= map_idx
;
19301 err
= check_map_prog_compatibility(env
, map
, env
->prog
);
19305 if (insn
[0].src_reg
== BPF_PSEUDO_MAP_FD
||
19306 insn
[0].src_reg
== BPF_PSEUDO_MAP_IDX
) {
19307 addr
= (unsigned long)map
;
19309 u32 off
= insn
[1].imm
;
19311 if (off
>= BPF_MAX_VAR_OFF
) {
19312 verbose(env
, "direct value offset of %u is not allowed\n", off
);
19316 if (!map
->ops
->map_direct_value_addr
) {
19317 verbose(env
, "no direct value access support for this map type\n");
19321 err
= map
->ops
->map_direct_value_addr(map
, &addr
, off
);
19323 verbose(env
, "invalid access to map value pointer, value_size=%u off=%u\n",
19324 map
->value_size
, off
);
19328 aux
->map_off
= off
;
19332 insn
[0].imm
= (u32
)addr
;
19333 insn
[1].imm
= addr
>> 32;
19335 /* proceed with extra checks only if its newly added used map */
19339 if (bpf_map_is_cgroup_storage(map
) &&
19340 bpf_cgroup_storage_assign(env
->prog
->aux
, map
)) {
19341 verbose(env
, "only one cgroup storage of each type is allowed\n");
19344 if (map
->map_type
== BPF_MAP_TYPE_ARENA
) {
19345 if (env
->prog
->aux
->arena
) {
19346 verbose(env
, "Only one arena per program\n");
19349 if (!env
->allow_ptr_leaks
|| !env
->bpf_capable
) {
19350 verbose(env
, "CAP_BPF and CAP_PERFMON are required to use arena\n");
19353 if (!env
->prog
->jit_requested
) {
19354 verbose(env
, "JIT is required to use arena\n");
19355 return -EOPNOTSUPP
;
19357 if (!bpf_jit_supports_arena()) {
19358 verbose(env
, "JIT doesn't support arena\n");
19359 return -EOPNOTSUPP
;
19361 env
->prog
->aux
->arena
= (void *)map
;
19362 if (!bpf_arena_get_user_vm_start(env
->prog
->aux
->arena
)) {
19363 verbose(env
, "arena's user address must be set via map_extra or mmap()\n");
19374 /* Basic sanity check before we invest more work here. */
19375 if (!bpf_opcode_in_insntable(insn
->code
)) {
19376 verbose(env
, "unknown opcode %02x\n", insn
->code
);
19381 /* now all pseudo BPF_LD_IMM64 instructions load valid
19382 * 'struct bpf_map *' into a register instead of user map_fd.
19383 * These pointers will be used later by verifier to validate map access.
19388 /* drop refcnt of maps used by the rejected program */
19389 static void release_maps(struct bpf_verifier_env
*env
)
19391 __bpf_free_used_maps(env
->prog
->aux
, env
->used_maps
,
19392 env
->used_map_cnt
);
19395 /* drop refcnt of maps used by the rejected program */
19396 static void release_btfs(struct bpf_verifier_env
*env
)
19398 __bpf_free_used_btfs(env
->used_btfs
, env
->used_btf_cnt
);
19401 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
19402 static void convert_pseudo_ld_imm64(struct bpf_verifier_env
*env
)
19404 struct bpf_insn
*insn
= env
->prog
->insnsi
;
19405 int insn_cnt
= env
->prog
->len
;
19408 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
19409 if (insn
->code
!= (BPF_LD
| BPF_IMM
| BPF_DW
))
19411 if (insn
->src_reg
== BPF_PSEUDO_FUNC
)
19417 /* single env->prog->insni[off] instruction was replaced with the range
19418 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
19419 * [0, off) and [off, end) to new locations, so the patched range stays zero
19421 static void adjust_insn_aux_data(struct bpf_verifier_env
*env
,
19422 struct bpf_insn_aux_data
*new_data
,
19423 struct bpf_prog
*new_prog
, u32 off
, u32 cnt
)
19425 struct bpf_insn_aux_data
*old_data
= env
->insn_aux_data
;
19426 struct bpf_insn
*insn
= new_prog
->insnsi
;
19427 u32 old_seen
= old_data
[off
].seen
;
19431 /* aux info at OFF always needs adjustment, no matter fast path
19432 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
19433 * original insn at old prog.
19435 old_data
[off
].zext_dst
= insn_has_def32(env
, insn
+ off
+ cnt
- 1);
19439 prog_len
= new_prog
->len
;
19441 memcpy(new_data
, old_data
, sizeof(struct bpf_insn_aux_data
) * off
);
19442 memcpy(new_data
+ off
+ cnt
- 1, old_data
+ off
,
19443 sizeof(struct bpf_insn_aux_data
) * (prog_len
- off
- cnt
+ 1));
19444 for (i
= off
; i
< off
+ cnt
- 1; i
++) {
19445 /* Expand insni[off]'s seen count to the patched range. */
19446 new_data
[i
].seen
= old_seen
;
19447 new_data
[i
].zext_dst
= insn_has_def32(env
, insn
+ i
);
19449 env
->insn_aux_data
= new_data
;
19453 static void adjust_subprog_starts(struct bpf_verifier_env
*env
, u32 off
, u32 len
)
19459 /* NOTE: fake 'exit' subprog should be updated as well. */
19460 for (i
= 0; i
<= env
->subprog_cnt
; i
++) {
19461 if (env
->subprog_info
[i
].start
<= off
)
19463 env
->subprog_info
[i
].start
+= len
- 1;
19467 static void adjust_poke_descs(struct bpf_prog
*prog
, u32 off
, u32 len
)
19469 struct bpf_jit_poke_descriptor
*tab
= prog
->aux
->poke_tab
;
19470 int i
, sz
= prog
->aux
->size_poke_tab
;
19471 struct bpf_jit_poke_descriptor
*desc
;
19473 for (i
= 0; i
< sz
; i
++) {
19475 if (desc
->insn_idx
<= off
)
19477 desc
->insn_idx
+= len
- 1;
19481 static struct bpf_prog
*bpf_patch_insn_data(struct bpf_verifier_env
*env
, u32 off
,
19482 const struct bpf_insn
*patch
, u32 len
)
19484 struct bpf_prog
*new_prog
;
19485 struct bpf_insn_aux_data
*new_data
= NULL
;
19488 new_data
= vzalloc(array_size(env
->prog
->len
+ len
- 1,
19489 sizeof(struct bpf_insn_aux_data
)));
19494 new_prog
= bpf_patch_insn_single(env
->prog
, off
, patch
, len
);
19495 if (IS_ERR(new_prog
)) {
19496 if (PTR_ERR(new_prog
) == -ERANGE
)
19498 "insn %d cannot be patched due to 16-bit range\n",
19499 env
->insn_aux_data
[off
].orig_idx
);
19503 adjust_insn_aux_data(env
, new_data
, new_prog
, off
, len
);
19504 adjust_subprog_starts(env
, off
, len
);
19505 adjust_poke_descs(new_prog
, off
, len
);
19510 * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the
19511 * jump offset by 'delta'.
19513 static int adjust_jmp_off(struct bpf_prog
*prog
, u32 tgt_idx
, u32 delta
)
19515 struct bpf_insn
*insn
= prog
->insnsi
;
19516 u32 insn_cnt
= prog
->len
, i
;
19520 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
19521 u8 code
= insn
->code
;
19523 if (tgt_idx
<= i
&& i
< tgt_idx
+ delta
)
19526 if ((BPF_CLASS(code
) != BPF_JMP
&& BPF_CLASS(code
) != BPF_JMP32
) ||
19527 BPF_OP(code
) == BPF_CALL
|| BPF_OP(code
) == BPF_EXIT
)
19530 if (insn
->code
== (BPF_JMP32
| BPF_JA
)) {
19531 if (i
+ 1 + insn
->imm
!= tgt_idx
)
19533 if (check_add_overflow(insn
->imm
, delta
, &imm
))
19537 if (i
+ 1 + insn
->off
!= tgt_idx
)
19539 if (check_add_overflow(insn
->off
, delta
, &off
))
19547 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env
*env
,
19552 /* find first prog starting at or after off (first to remove) */
19553 for (i
= 0; i
< env
->subprog_cnt
; i
++)
19554 if (env
->subprog_info
[i
].start
>= off
)
19556 /* find first prog starting at or after off + cnt (first to stay) */
19557 for (j
= i
; j
< env
->subprog_cnt
; j
++)
19558 if (env
->subprog_info
[j
].start
>= off
+ cnt
)
19560 /* if j doesn't start exactly at off + cnt, we are just removing
19561 * the front of previous prog
19563 if (env
->subprog_info
[j
].start
!= off
+ cnt
)
19567 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
19570 /* move fake 'exit' subprog as well */
19571 move
= env
->subprog_cnt
+ 1 - j
;
19573 memmove(env
->subprog_info
+ i
,
19574 env
->subprog_info
+ j
,
19575 sizeof(*env
->subprog_info
) * move
);
19576 env
->subprog_cnt
-= j
- i
;
19578 /* remove func_info */
19579 if (aux
->func_info
) {
19580 move
= aux
->func_info_cnt
- j
;
19582 memmove(aux
->func_info
+ i
,
19583 aux
->func_info
+ j
,
19584 sizeof(*aux
->func_info
) * move
);
19585 aux
->func_info_cnt
-= j
- i
;
19586 /* func_info->insn_off is set after all code rewrites,
19587 * in adjust_btf_func() - no need to adjust
19591 /* convert i from "first prog to remove" to "first to adjust" */
19592 if (env
->subprog_info
[i
].start
== off
)
19596 /* update fake 'exit' subprog as well */
19597 for (; i
<= env
->subprog_cnt
; i
++)
19598 env
->subprog_info
[i
].start
-= cnt
;
19603 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env
*env
, u32 off
,
19606 struct bpf_prog
*prog
= env
->prog
;
19607 u32 i
, l_off
, l_cnt
, nr_linfo
;
19608 struct bpf_line_info
*linfo
;
19610 nr_linfo
= prog
->aux
->nr_linfo
;
19614 linfo
= prog
->aux
->linfo
;
19616 /* find first line info to remove, count lines to be removed */
19617 for (i
= 0; i
< nr_linfo
; i
++)
19618 if (linfo
[i
].insn_off
>= off
)
19623 for (; i
< nr_linfo
; i
++)
19624 if (linfo
[i
].insn_off
< off
+ cnt
)
19629 /* First live insn doesn't match first live linfo, it needs to "inherit"
19630 * last removed linfo. prog is already modified, so prog->len == off
19631 * means no live instructions after (tail of the program was removed).
19633 if (prog
->len
!= off
&& l_cnt
&&
19634 (i
== nr_linfo
|| linfo
[i
].insn_off
!= off
+ cnt
)) {
19636 linfo
[--i
].insn_off
= off
+ cnt
;
19639 /* remove the line info which refer to the removed instructions */
19641 memmove(linfo
+ l_off
, linfo
+ i
,
19642 sizeof(*linfo
) * (nr_linfo
- i
));
19644 prog
->aux
->nr_linfo
-= l_cnt
;
19645 nr_linfo
= prog
->aux
->nr_linfo
;
19648 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
19649 for (i
= l_off
; i
< nr_linfo
; i
++)
19650 linfo
[i
].insn_off
-= cnt
;
19652 /* fix up all subprogs (incl. 'exit') which start >= off */
19653 for (i
= 0; i
<= env
->subprog_cnt
; i
++)
19654 if (env
->subprog_info
[i
].linfo_idx
> l_off
) {
19655 /* program may have started in the removed region but
19656 * may not be fully removed
19658 if (env
->subprog_info
[i
].linfo_idx
>= l_off
+ l_cnt
)
19659 env
->subprog_info
[i
].linfo_idx
-= l_cnt
;
19661 env
->subprog_info
[i
].linfo_idx
= l_off
;
19667 static int verifier_remove_insns(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
)
19669 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
19670 unsigned int orig_prog_len
= env
->prog
->len
;
19673 if (bpf_prog_is_offloaded(env
->prog
->aux
))
19674 bpf_prog_offload_remove_insns(env
, off
, cnt
);
19676 err
= bpf_remove_insns(env
->prog
, off
, cnt
);
19680 err
= adjust_subprog_starts_after_remove(env
, off
, cnt
);
19684 err
= bpf_adj_linfo_after_remove(env
, off
, cnt
);
19688 memmove(aux_data
+ off
, aux_data
+ off
+ cnt
,
19689 sizeof(*aux_data
) * (orig_prog_len
- off
- cnt
));
19694 /* The verifier does more data flow analysis than llvm and will not
19695 * explore branches that are dead at run time. Malicious programs can
19696 * have dead code too. Therefore replace all dead at-run-time code
19699 * Just nops are not optimal, e.g. if they would sit at the end of the
19700 * program and through another bug we would manage to jump there, then
19701 * we'd execute beyond program memory otherwise. Returning exception
19702 * code also wouldn't work since we can have subprogs where the dead
19703 * code could be located.
19705 static void sanitize_dead_code(struct bpf_verifier_env
*env
)
19707 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
19708 struct bpf_insn trap
= BPF_JMP_IMM(BPF_JA
, 0, 0, -1);
19709 struct bpf_insn
*insn
= env
->prog
->insnsi
;
19710 const int insn_cnt
= env
->prog
->len
;
19713 for (i
= 0; i
< insn_cnt
; i
++) {
19714 if (aux_data
[i
].seen
)
19716 memcpy(insn
+ i
, &trap
, sizeof(trap
));
19717 aux_data
[i
].zext_dst
= false;
19721 static bool insn_is_cond_jump(u8 code
)
19726 if (BPF_CLASS(code
) == BPF_JMP32
)
19727 return op
!= BPF_JA
;
19729 if (BPF_CLASS(code
) != BPF_JMP
)
19732 return op
!= BPF_JA
&& op
!= BPF_EXIT
&& op
!= BPF_CALL
;
19735 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env
*env
)
19737 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
19738 struct bpf_insn ja
= BPF_JMP_IMM(BPF_JA
, 0, 0, 0);
19739 struct bpf_insn
*insn
= env
->prog
->insnsi
;
19740 const int insn_cnt
= env
->prog
->len
;
19743 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
19744 if (!insn_is_cond_jump(insn
->code
))
19747 if (!aux_data
[i
+ 1].seen
)
19748 ja
.off
= insn
->off
;
19749 else if (!aux_data
[i
+ 1 + insn
->off
].seen
)
19754 if (bpf_prog_is_offloaded(env
->prog
->aux
))
19755 bpf_prog_offload_replace_insn(env
, i
, &ja
);
19757 memcpy(insn
, &ja
, sizeof(ja
));
19761 static int opt_remove_dead_code(struct bpf_verifier_env
*env
)
19763 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
19764 int insn_cnt
= env
->prog
->len
;
19767 for (i
= 0; i
< insn_cnt
; i
++) {
19771 while (i
+ j
< insn_cnt
&& !aux_data
[i
+ j
].seen
)
19776 err
= verifier_remove_insns(env
, i
, j
);
19779 insn_cnt
= env
->prog
->len
;
19785 static const struct bpf_insn NOP
= BPF_JMP_IMM(BPF_JA
, 0, 0, 0);
19787 static int opt_remove_nops(struct bpf_verifier_env
*env
)
19789 const struct bpf_insn ja
= NOP
;
19790 struct bpf_insn
*insn
= env
->prog
->insnsi
;
19791 int insn_cnt
= env
->prog
->len
;
19794 for (i
= 0; i
< insn_cnt
; i
++) {
19795 if (memcmp(&insn
[i
], &ja
, sizeof(ja
)))
19798 err
= verifier_remove_insns(env
, i
, 1);
19808 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env
*env
,
19809 const union bpf_attr
*attr
)
19811 struct bpf_insn
*patch
, zext_patch
[2], rnd_hi32_patch
[4];
19812 struct bpf_insn_aux_data
*aux
= env
->insn_aux_data
;
19813 int i
, patch_len
, delta
= 0, len
= env
->prog
->len
;
19814 struct bpf_insn
*insns
= env
->prog
->insnsi
;
19815 struct bpf_prog
*new_prog
;
19818 rnd_hi32
= attr
->prog_flags
& BPF_F_TEST_RND_HI32
;
19819 zext_patch
[1] = BPF_ZEXT_REG(0);
19820 rnd_hi32_patch
[1] = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, 0);
19821 rnd_hi32_patch
[2] = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_AX
, 32);
19822 rnd_hi32_patch
[3] = BPF_ALU64_REG(BPF_OR
, 0, BPF_REG_AX
);
19823 for (i
= 0; i
< len
; i
++) {
19824 int adj_idx
= i
+ delta
;
19825 struct bpf_insn insn
;
19828 insn
= insns
[adj_idx
];
19829 load_reg
= insn_def_regno(&insn
);
19830 if (!aux
[adj_idx
].zext_dst
) {
19838 class = BPF_CLASS(code
);
19839 if (load_reg
== -1)
19842 /* NOTE: arg "reg" (the fourth one) is only used for
19843 * BPF_STX + SRC_OP, so it is safe to pass NULL
19846 if (is_reg64(env
, &insn
, load_reg
, NULL
, DST_OP
)) {
19847 if (class == BPF_LD
&&
19848 BPF_MODE(code
) == BPF_IMM
)
19853 /* ctx load could be transformed into wider load. */
19854 if (class == BPF_LDX
&&
19855 aux
[adj_idx
].ptr_type
== PTR_TO_CTX
)
19858 imm_rnd
= get_random_u32();
19859 rnd_hi32_patch
[0] = insn
;
19860 rnd_hi32_patch
[1].imm
= imm_rnd
;
19861 rnd_hi32_patch
[3].dst_reg
= load_reg
;
19862 patch
= rnd_hi32_patch
;
19864 goto apply_patch_buffer
;
19867 /* Add in an zero-extend instruction if a) the JIT has requested
19868 * it or b) it's a CMPXCHG.
19870 * The latter is because: BPF_CMPXCHG always loads a value into
19871 * R0, therefore always zero-extends. However some archs'
19872 * equivalent instruction only does this load when the
19873 * comparison is successful. This detail of CMPXCHG is
19874 * orthogonal to the general zero-extension behaviour of the
19875 * CPU, so it's treated independently of bpf_jit_needs_zext.
19877 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn
))
19880 /* Zero-extension is done by the caller. */
19881 if (bpf_pseudo_kfunc_call(&insn
))
19884 if (WARN_ON(load_reg
== -1)) {
19885 verbose(env
, "verifier bug. zext_dst is set, but no reg is defined\n");
19889 zext_patch
[0] = insn
;
19890 zext_patch
[1].dst_reg
= load_reg
;
19891 zext_patch
[1].src_reg
= load_reg
;
19892 patch
= zext_patch
;
19894 apply_patch_buffer
:
19895 new_prog
= bpf_patch_insn_data(env
, adj_idx
, patch
, patch_len
);
19898 env
->prog
= new_prog
;
19899 insns
= new_prog
->insnsi
;
19900 aux
= env
->insn_aux_data
;
19901 delta
+= patch_len
- 1;
19907 /* convert load instructions that access fields of a context type into a
19908 * sequence of instructions that access fields of the underlying structure:
19909 * struct __sk_buff -> struct sk_buff
19910 * struct bpf_sock_ops -> struct sock
19912 static int convert_ctx_accesses(struct bpf_verifier_env
*env
)
19914 struct bpf_subprog_info
*subprogs
= env
->subprog_info
;
19915 const struct bpf_verifier_ops
*ops
= env
->ops
;
19916 int i
, cnt
, size
, ctx_field_size
, delta
= 0, epilogue_cnt
= 0;
19917 const int insn_cnt
= env
->prog
->len
;
19918 struct bpf_insn
*epilogue_buf
= env
->epilogue_buf
;
19919 struct bpf_insn
*insn_buf
= env
->insn_buf
;
19920 struct bpf_insn
*insn
;
19921 u32 target_size
, size_default
, off
;
19922 struct bpf_prog
*new_prog
;
19923 enum bpf_access_type type
;
19924 bool is_narrower_load
;
19925 int epilogue_idx
= 0;
19927 if (ops
->gen_epilogue
) {
19928 epilogue_cnt
= ops
->gen_epilogue(epilogue_buf
, env
->prog
,
19929 -(subprogs
[0].stack_depth
+ 8));
19930 if (epilogue_cnt
>= INSN_BUF_SIZE
) {
19931 verbose(env
, "bpf verifier is misconfigured\n");
19933 } else if (epilogue_cnt
) {
19934 /* Save the ARG_PTR_TO_CTX for the epilogue to use */
19936 subprogs
[0].stack_depth
+= 8;
19937 insn_buf
[cnt
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_FP
, BPF_REG_1
,
19938 -subprogs
[0].stack_depth
);
19939 insn_buf
[cnt
++] = env
->prog
->insnsi
[0];
19940 new_prog
= bpf_patch_insn_data(env
, 0, insn_buf
, cnt
);
19943 env
->prog
= new_prog
;
19948 if (ops
->gen_prologue
|| env
->seen_direct_write
) {
19949 if (!ops
->gen_prologue
) {
19950 verbose(env
, "bpf verifier is misconfigured\n");
19953 cnt
= ops
->gen_prologue(insn_buf
, env
->seen_direct_write
,
19955 if (cnt
>= INSN_BUF_SIZE
) {
19956 verbose(env
, "bpf verifier is misconfigured\n");
19959 new_prog
= bpf_patch_insn_data(env
, 0, insn_buf
, cnt
);
19963 env
->prog
= new_prog
;
19969 WARN_ON(adjust_jmp_off(env
->prog
, 0, delta
));
19971 if (bpf_prog_is_offloaded(env
->prog
->aux
))
19974 insn
= env
->prog
->insnsi
+ delta
;
19976 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
19977 bpf_convert_ctx_access_t convert_ctx_access
;
19980 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_B
) ||
19981 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_H
) ||
19982 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
) ||
19983 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_DW
) ||
19984 insn
->code
== (BPF_LDX
| BPF_MEMSX
| BPF_B
) ||
19985 insn
->code
== (BPF_LDX
| BPF_MEMSX
| BPF_H
) ||
19986 insn
->code
== (BPF_LDX
| BPF_MEMSX
| BPF_W
)) {
19988 } else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_B
) ||
19989 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_H
) ||
19990 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
) ||
19991 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_DW
) ||
19992 insn
->code
== (BPF_ST
| BPF_MEM
| BPF_B
) ||
19993 insn
->code
== (BPF_ST
| BPF_MEM
| BPF_H
) ||
19994 insn
->code
== (BPF_ST
| BPF_MEM
| BPF_W
) ||
19995 insn
->code
== (BPF_ST
| BPF_MEM
| BPF_DW
)) {
19997 } else if ((insn
->code
== (BPF_STX
| BPF_ATOMIC
| BPF_W
) ||
19998 insn
->code
== (BPF_STX
| BPF_ATOMIC
| BPF_DW
)) &&
19999 env
->insn_aux_data
[i
+ delta
].ptr_type
== PTR_TO_ARENA
) {
20000 insn
->code
= BPF_STX
| BPF_PROBE_ATOMIC
| BPF_SIZE(insn
->code
);
20001 env
->prog
->aux
->num_exentries
++;
20003 } else if (insn
->code
== (BPF_JMP
| BPF_EXIT
) &&
20005 i
+ delta
< subprogs
[1].start
) {
20006 /* Generate epilogue for the main prog */
20007 if (epilogue_idx
) {
20008 /* jump back to the earlier generated epilogue */
20009 insn_buf
[0] = BPF_JMP32_A(epilogue_idx
- i
- delta
- 1);
20012 memcpy(insn_buf
, epilogue_buf
,
20013 epilogue_cnt
* sizeof(*epilogue_buf
));
20014 cnt
= epilogue_cnt
;
20015 /* epilogue_idx cannot be 0. It must have at
20016 * least one ctx ptr saving insn before the
20019 epilogue_idx
= i
+ delta
;
20021 goto patch_insn_buf
;
20026 if (type
== BPF_WRITE
&&
20027 env
->insn_aux_data
[i
+ delta
].sanitize_stack_spill
) {
20028 struct bpf_insn patch
[] = {
20033 cnt
= ARRAY_SIZE(patch
);
20034 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patch
, cnt
);
20039 env
->prog
= new_prog
;
20040 insn
= new_prog
->insnsi
+ i
+ delta
;
20044 switch ((int)env
->insn_aux_data
[i
+ delta
].ptr_type
) {
20046 if (!ops
->convert_ctx_access
)
20048 convert_ctx_access
= ops
->convert_ctx_access
;
20050 case PTR_TO_SOCKET
:
20051 case PTR_TO_SOCK_COMMON
:
20052 convert_ctx_access
= bpf_sock_convert_ctx_access
;
20054 case PTR_TO_TCP_SOCK
:
20055 convert_ctx_access
= bpf_tcp_sock_convert_ctx_access
;
20057 case PTR_TO_XDP_SOCK
:
20058 convert_ctx_access
= bpf_xdp_sock_convert_ctx_access
;
20060 case PTR_TO_BTF_ID
:
20061 case PTR_TO_BTF_ID
| PTR_UNTRUSTED
:
20062 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
20063 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
20064 * be said once it is marked PTR_UNTRUSTED, hence we must handle
20065 * any faults for loads into such types. BPF_WRITE is disallowed
20068 case PTR_TO_BTF_ID
| MEM_ALLOC
| PTR_UNTRUSTED
:
20069 case PTR_TO_BTF_ID
| PTR_TRUSTED
| PTR_MAYBE_NULL
:
20070 if (type
== BPF_READ
) {
20071 if (BPF_MODE(insn
->code
) == BPF_MEM
)
20072 insn
->code
= BPF_LDX
| BPF_PROBE_MEM
|
20073 BPF_SIZE((insn
)->code
);
20075 insn
->code
= BPF_LDX
| BPF_PROBE_MEMSX
|
20076 BPF_SIZE((insn
)->code
);
20077 env
->prog
->aux
->num_exentries
++;
20081 if (BPF_MODE(insn
->code
) == BPF_MEMSX
) {
20082 verbose(env
, "sign extending loads from arena are not supported yet\n");
20083 return -EOPNOTSUPP
;
20085 insn
->code
= BPF_CLASS(insn
->code
) | BPF_PROBE_MEM32
| BPF_SIZE(insn
->code
);
20086 env
->prog
->aux
->num_exentries
++;
20092 ctx_field_size
= env
->insn_aux_data
[i
+ delta
].ctx_field_size
;
20093 size
= BPF_LDST_BYTES(insn
);
20094 mode
= BPF_MODE(insn
->code
);
20096 /* If the read access is a narrower load of the field,
20097 * convert to a 4/8-byte load, to minimum program type specific
20098 * convert_ctx_access changes. If conversion is successful,
20099 * we will apply proper mask to the result.
20101 is_narrower_load
= size
< ctx_field_size
;
20102 size_default
= bpf_ctx_off_adjust_machine(ctx_field_size
);
20104 if (is_narrower_load
) {
20107 if (type
== BPF_WRITE
) {
20108 verbose(env
, "bpf verifier narrow ctx access misconfigured\n");
20113 if (ctx_field_size
== 4)
20115 else if (ctx_field_size
== 8)
20116 size_code
= BPF_DW
;
20118 insn
->off
= off
& ~(size_default
- 1);
20119 insn
->code
= BPF_LDX
| BPF_MEM
| size_code
;
20123 cnt
= convert_ctx_access(type
, insn
, insn_buf
, env
->prog
,
20125 if (cnt
== 0 || cnt
>= INSN_BUF_SIZE
||
20126 (ctx_field_size
&& !target_size
)) {
20127 verbose(env
, "bpf verifier is misconfigured\n");
20131 if (is_narrower_load
&& size
< target_size
) {
20132 u8 shift
= bpf_ctx_narrow_access_offset(
20133 off
, size
, size_default
) * 8;
20134 if (shift
&& cnt
+ 1 >= INSN_BUF_SIZE
) {
20135 verbose(env
, "bpf verifier narrow ctx load misconfigured\n");
20138 if (ctx_field_size
<= 4) {
20140 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_RSH
,
20143 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_AND
, insn
->dst_reg
,
20144 (1 << size
* 8) - 1);
20147 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_RSH
,
20150 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_AND
, insn
->dst_reg
,
20151 (1ULL << size
* 8) - 1);
20154 if (mode
== BPF_MEMSX
)
20155 insn_buf
[cnt
++] = BPF_RAW_INSN(BPF_ALU64
| BPF_MOV
| BPF_X
,
20156 insn
->dst_reg
, insn
->dst_reg
,
20160 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20166 /* keep walking new program and skip insns we just inserted */
20167 env
->prog
= new_prog
;
20168 insn
= new_prog
->insnsi
+ i
+ delta
;
20174 static int jit_subprogs(struct bpf_verifier_env
*env
)
20176 struct bpf_prog
*prog
= env
->prog
, **func
, *tmp
;
20177 int i
, j
, subprog_start
, subprog_end
= 0, len
, subprog
;
20178 struct bpf_map
*map_ptr
;
20179 struct bpf_insn
*insn
;
20180 void *old_bpf_func
;
20181 int err
, num_exentries
;
20183 if (env
->subprog_cnt
<= 1)
20186 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
20187 if (!bpf_pseudo_func(insn
) && !bpf_pseudo_call(insn
))
20190 /* Upon error here we cannot fall back to interpreter but
20191 * need a hard reject of the program. Thus -EFAULT is
20192 * propagated in any case.
20194 subprog
= find_subprog(env
, i
+ insn
->imm
+ 1);
20196 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
20197 i
+ insn
->imm
+ 1);
20200 /* temporarily remember subprog id inside insn instead of
20201 * aux_data, since next loop will split up all insns into funcs
20203 insn
->off
= subprog
;
20204 /* remember original imm in case JIT fails and fallback
20205 * to interpreter will be needed
20207 env
->insn_aux_data
[i
].call_imm
= insn
->imm
;
20208 /* point imm to __bpf_call_base+1 from JITs point of view */
20210 if (bpf_pseudo_func(insn
)) {
20211 #if defined(MODULES_VADDR)
20212 u64 addr
= MODULES_VADDR
;
20214 u64 addr
= VMALLOC_START
;
20216 /* jit (e.g. x86_64) may emit fewer instructions
20217 * if it learns a u32 imm is the same as a u64 imm.
20218 * Set close enough to possible prog address.
20220 insn
[0].imm
= (u32
)addr
;
20221 insn
[1].imm
= addr
>> 32;
20225 err
= bpf_prog_alloc_jited_linfo(prog
);
20227 goto out_undo_insn
;
20230 func
= kcalloc(env
->subprog_cnt
, sizeof(prog
), GFP_KERNEL
);
20232 goto out_undo_insn
;
20234 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
20235 subprog_start
= subprog_end
;
20236 subprog_end
= env
->subprog_info
[i
+ 1].start
;
20238 len
= subprog_end
- subprog_start
;
20239 /* bpf_prog_run() doesn't call subprogs directly,
20240 * hence main prog stats include the runtime of subprogs.
20241 * subprogs don't have IDs and not reachable via prog_get_next_id
20242 * func[i]->stats will never be accessed and stays NULL
20244 func
[i
] = bpf_prog_alloc_no_stats(bpf_prog_size(len
), GFP_USER
);
20247 memcpy(func
[i
]->insnsi
, &prog
->insnsi
[subprog_start
],
20248 len
* sizeof(struct bpf_insn
));
20249 func
[i
]->type
= prog
->type
;
20250 func
[i
]->len
= len
;
20251 if (bpf_prog_calc_tag(func
[i
]))
20253 func
[i
]->is_func
= 1;
20254 func
[i
]->sleepable
= prog
->sleepable
;
20255 func
[i
]->aux
->func_idx
= i
;
20256 /* Below members will be freed only at prog->aux */
20257 func
[i
]->aux
->btf
= prog
->aux
->btf
;
20258 func
[i
]->aux
->func_info
= prog
->aux
->func_info
;
20259 func
[i
]->aux
->func_info_cnt
= prog
->aux
->func_info_cnt
;
20260 func
[i
]->aux
->poke_tab
= prog
->aux
->poke_tab
;
20261 func
[i
]->aux
->size_poke_tab
= prog
->aux
->size_poke_tab
;
20263 for (j
= 0; j
< prog
->aux
->size_poke_tab
; j
++) {
20264 struct bpf_jit_poke_descriptor
*poke
;
20266 poke
= &prog
->aux
->poke_tab
[j
];
20267 if (poke
->insn_idx
< subprog_end
&&
20268 poke
->insn_idx
>= subprog_start
)
20269 poke
->aux
= func
[i
]->aux
;
20272 func
[i
]->aux
->name
[0] = 'F';
20273 func
[i
]->aux
->stack_depth
= env
->subprog_info
[i
].stack_depth
;
20274 if (env
->subprog_info
[i
].priv_stack_mode
== PRIV_STACK_ADAPTIVE
)
20275 func
[i
]->aux
->jits_use_priv_stack
= true;
20277 func
[i
]->jit_requested
= 1;
20278 func
[i
]->blinding_requested
= prog
->blinding_requested
;
20279 func
[i
]->aux
->kfunc_tab
= prog
->aux
->kfunc_tab
;
20280 func
[i
]->aux
->kfunc_btf_tab
= prog
->aux
->kfunc_btf_tab
;
20281 func
[i
]->aux
->linfo
= prog
->aux
->linfo
;
20282 func
[i
]->aux
->nr_linfo
= prog
->aux
->nr_linfo
;
20283 func
[i
]->aux
->jited_linfo
= prog
->aux
->jited_linfo
;
20284 func
[i
]->aux
->linfo_idx
= env
->subprog_info
[i
].linfo_idx
;
20285 func
[i
]->aux
->arena
= prog
->aux
->arena
;
20287 insn
= func
[i
]->insnsi
;
20288 for (j
= 0; j
< func
[i
]->len
; j
++, insn
++) {
20289 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
20290 (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
20291 BPF_MODE(insn
->code
) == BPF_PROBE_MEM32
||
20292 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
))
20294 if ((BPF_CLASS(insn
->code
) == BPF_STX
||
20295 BPF_CLASS(insn
->code
) == BPF_ST
) &&
20296 BPF_MODE(insn
->code
) == BPF_PROBE_MEM32
)
20298 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
20299 BPF_MODE(insn
->code
) == BPF_PROBE_ATOMIC
)
20302 func
[i
]->aux
->num_exentries
= num_exentries
;
20303 func
[i
]->aux
->tail_call_reachable
= env
->subprog_info
[i
].tail_call_reachable
;
20304 func
[i
]->aux
->exception_cb
= env
->subprog_info
[i
].is_exception_cb
;
20306 func
[i
]->aux
->exception_boundary
= env
->seen_exception
;
20307 func
[i
] = bpf_int_jit_compile(func
[i
]);
20308 if (!func
[i
]->jited
) {
20315 /* at this point all bpf functions were successfully JITed
20316 * now populate all bpf_calls with correct addresses and
20317 * run last pass of JIT
20319 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
20320 insn
= func
[i
]->insnsi
;
20321 for (j
= 0; j
< func
[i
]->len
; j
++, insn
++) {
20322 if (bpf_pseudo_func(insn
)) {
20323 subprog
= insn
->off
;
20324 insn
[0].imm
= (u32
)(long)func
[subprog
]->bpf_func
;
20325 insn
[1].imm
= ((u64
)(long)func
[subprog
]->bpf_func
) >> 32;
20328 if (!bpf_pseudo_call(insn
))
20330 subprog
= insn
->off
;
20331 insn
->imm
= BPF_CALL_IMM(func
[subprog
]->bpf_func
);
20334 /* we use the aux data to keep a list of the start addresses
20335 * of the JITed images for each function in the program
20337 * for some architectures, such as powerpc64, the imm field
20338 * might not be large enough to hold the offset of the start
20339 * address of the callee's JITed image from __bpf_call_base
20341 * in such cases, we can lookup the start address of a callee
20342 * by using its subprog id, available from the off field of
20343 * the call instruction, as an index for this list
20345 func
[i
]->aux
->func
= func
;
20346 func
[i
]->aux
->func_cnt
= env
->subprog_cnt
- env
->hidden_subprog_cnt
;
20347 func
[i
]->aux
->real_func_cnt
= env
->subprog_cnt
;
20349 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
20350 old_bpf_func
= func
[i
]->bpf_func
;
20351 tmp
= bpf_int_jit_compile(func
[i
]);
20352 if (tmp
!= func
[i
] || func
[i
]->bpf_func
!= old_bpf_func
) {
20353 verbose(env
, "JIT doesn't support bpf-to-bpf calls\n");
20360 /* finally lock prog and jit images for all functions and
20361 * populate kallsysm. Begin at the first subprogram, since
20362 * bpf_prog_load will add the kallsyms for the main program.
20364 for (i
= 1; i
< env
->subprog_cnt
; i
++) {
20365 err
= bpf_prog_lock_ro(func
[i
]);
20370 for (i
= 1; i
< env
->subprog_cnt
; i
++)
20371 bpf_prog_kallsyms_add(func
[i
]);
20373 /* Last step: make now unused interpreter insns from main
20374 * prog consistent for later dump requests, so they can
20375 * later look the same as if they were interpreted only.
20377 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
20378 if (bpf_pseudo_func(insn
)) {
20379 insn
[0].imm
= env
->insn_aux_data
[i
].call_imm
;
20380 insn
[1].imm
= insn
->off
;
20384 if (!bpf_pseudo_call(insn
))
20386 insn
->off
= env
->insn_aux_data
[i
].call_imm
;
20387 subprog
= find_subprog(env
, i
+ insn
->off
+ 1);
20388 insn
->imm
= subprog
;
20392 prog
->bpf_func
= func
[0]->bpf_func
;
20393 prog
->jited_len
= func
[0]->jited_len
;
20394 prog
->aux
->extable
= func
[0]->aux
->extable
;
20395 prog
->aux
->num_exentries
= func
[0]->aux
->num_exentries
;
20396 prog
->aux
->func
= func
;
20397 prog
->aux
->func_cnt
= env
->subprog_cnt
- env
->hidden_subprog_cnt
;
20398 prog
->aux
->real_func_cnt
= env
->subprog_cnt
;
20399 prog
->aux
->bpf_exception_cb
= (void *)func
[env
->exception_callback_subprog
]->bpf_func
;
20400 prog
->aux
->exception_boundary
= func
[0]->aux
->exception_boundary
;
20401 bpf_prog_jit_attempt_done(prog
);
20404 /* We failed JIT'ing, so at this point we need to unregister poke
20405 * descriptors from subprogs, so that kernel is not attempting to
20406 * patch it anymore as we're freeing the subprog JIT memory.
20408 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
20409 map_ptr
= prog
->aux
->poke_tab
[i
].tail_call
.map
;
20410 map_ptr
->ops
->map_poke_untrack(map_ptr
, prog
->aux
);
20412 /* At this point we're guaranteed that poke descriptors are not
20413 * live anymore. We can just unlink its descriptor table as it's
20414 * released with the main prog.
20416 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
20419 func
[i
]->aux
->poke_tab
= NULL
;
20420 bpf_jit_free(func
[i
]);
20424 /* cleanup main prog to be interpreted */
20425 prog
->jit_requested
= 0;
20426 prog
->blinding_requested
= 0;
20427 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
20428 if (!bpf_pseudo_call(insn
))
20431 insn
->imm
= env
->insn_aux_data
[i
].call_imm
;
20433 bpf_prog_jit_attempt_done(prog
);
20437 static int fixup_call_args(struct bpf_verifier_env
*env
)
20439 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
20440 struct bpf_prog
*prog
= env
->prog
;
20441 struct bpf_insn
*insn
= prog
->insnsi
;
20442 bool has_kfunc_call
= bpf_prog_has_kfunc_call(prog
);
20447 if (env
->prog
->jit_requested
&&
20448 !bpf_prog_is_offloaded(env
->prog
->aux
)) {
20449 err
= jit_subprogs(env
);
20452 if (err
== -EFAULT
)
20455 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
20456 if (has_kfunc_call
) {
20457 verbose(env
, "calling kernel functions are not allowed in non-JITed programs\n");
20460 if (env
->subprog_cnt
> 1 && env
->prog
->aux
->tail_call_reachable
) {
20461 /* When JIT fails the progs with bpf2bpf calls and tail_calls
20462 * have to be rejected, since interpreter doesn't support them yet.
20464 verbose(env
, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
20467 for (i
= 0; i
< prog
->len
; i
++, insn
++) {
20468 if (bpf_pseudo_func(insn
)) {
20469 /* When JIT fails the progs with callback calls
20470 * have to be rejected, since interpreter doesn't support them yet.
20472 verbose(env
, "callbacks are not allowed in non-JITed programs\n");
20476 if (!bpf_pseudo_call(insn
))
20478 depth
= get_callee_stack_depth(env
, insn
, i
);
20481 bpf_patch_call_args(insn
, depth
);
20488 /* replace a generic kfunc with a specialized version if necessary */
20489 static void specialize_kfunc(struct bpf_verifier_env
*env
,
20490 u32 func_id
, u16 offset
, unsigned long *addr
)
20492 struct bpf_prog
*prog
= env
->prog
;
20493 bool seen_direct_write
;
20497 if (bpf_dev_bound_kfunc_id(func_id
)) {
20498 xdp_kfunc
= bpf_dev_bound_resolve_kfunc(prog
, func_id
);
20500 *addr
= (unsigned long)xdp_kfunc
;
20503 /* fallback to default kfunc when not supported by netdev */
20509 if (func_id
== special_kfunc_list
[KF_bpf_dynptr_from_skb
]) {
20510 seen_direct_write
= env
->seen_direct_write
;
20511 is_rdonly
= !may_access_direct_pkt_data(env
, NULL
, BPF_WRITE
);
20514 *addr
= (unsigned long)bpf_dynptr_from_skb_rdonly
;
20516 /* restore env->seen_direct_write to its original value, since
20517 * may_access_direct_pkt_data mutates it
20519 env
->seen_direct_write
= seen_direct_write
;
20523 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data
*insn_aux
,
20524 u16 struct_meta_reg
,
20525 u16 node_offset_reg
,
20526 struct bpf_insn
*insn
,
20527 struct bpf_insn
*insn_buf
,
20530 struct btf_struct_meta
*kptr_struct_meta
= insn_aux
->kptr_struct_meta
;
20531 struct bpf_insn addr
[2] = { BPF_LD_IMM64(struct_meta_reg
, (long)kptr_struct_meta
) };
20533 insn_buf
[0] = addr
[0];
20534 insn_buf
[1] = addr
[1];
20535 insn_buf
[2] = BPF_MOV64_IMM(node_offset_reg
, insn_aux
->insert_off
);
20536 insn_buf
[3] = *insn
;
20540 static int fixup_kfunc_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
20541 struct bpf_insn
*insn_buf
, int insn_idx
, int *cnt
)
20543 const struct bpf_kfunc_desc
*desc
;
20546 verbose(env
, "invalid kernel function call not eliminated in verifier pass\n");
20552 /* insn->imm has the btf func_id. Replace it with an offset relative to
20553 * __bpf_call_base, unless the JIT needs to call functions that are
20554 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()).
20556 desc
= find_kfunc_desc(env
->prog
, insn
->imm
, insn
->off
);
20558 verbose(env
, "verifier internal error: kernel function descriptor not found for func_id %u\n",
20563 if (!bpf_jit_supports_far_kfunc_call())
20564 insn
->imm
= BPF_CALL_IMM(desc
->addr
);
20567 if (desc
->func_id
== special_kfunc_list
[KF_bpf_obj_new_impl
] ||
20568 desc
->func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
]) {
20569 struct btf_struct_meta
*kptr_struct_meta
= env
->insn_aux_data
[insn_idx
].kptr_struct_meta
;
20570 struct bpf_insn addr
[2] = { BPF_LD_IMM64(BPF_REG_2
, (long)kptr_struct_meta
) };
20571 u64 obj_new_size
= env
->insn_aux_data
[insn_idx
].obj_new_size
;
20573 if (desc
->func_id
== special_kfunc_list
[KF_bpf_percpu_obj_new_impl
] && kptr_struct_meta
) {
20574 verbose(env
, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
20579 insn_buf
[0] = BPF_MOV64_IMM(BPF_REG_1
, obj_new_size
);
20580 insn_buf
[1] = addr
[0];
20581 insn_buf
[2] = addr
[1];
20582 insn_buf
[3] = *insn
;
20584 } else if (desc
->func_id
== special_kfunc_list
[KF_bpf_obj_drop_impl
] ||
20585 desc
->func_id
== special_kfunc_list
[KF_bpf_percpu_obj_drop_impl
] ||
20586 desc
->func_id
== special_kfunc_list
[KF_bpf_refcount_acquire_impl
]) {
20587 struct btf_struct_meta
*kptr_struct_meta
= env
->insn_aux_data
[insn_idx
].kptr_struct_meta
;
20588 struct bpf_insn addr
[2] = { BPF_LD_IMM64(BPF_REG_2
, (long)kptr_struct_meta
) };
20590 if (desc
->func_id
== special_kfunc_list
[KF_bpf_percpu_obj_drop_impl
] && kptr_struct_meta
) {
20591 verbose(env
, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
20596 if (desc
->func_id
== special_kfunc_list
[KF_bpf_refcount_acquire_impl
] &&
20597 !kptr_struct_meta
) {
20598 verbose(env
, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
20603 insn_buf
[0] = addr
[0];
20604 insn_buf
[1] = addr
[1];
20605 insn_buf
[2] = *insn
;
20607 } else if (desc
->func_id
== special_kfunc_list
[KF_bpf_list_push_back_impl
] ||
20608 desc
->func_id
== special_kfunc_list
[KF_bpf_list_push_front_impl
] ||
20609 desc
->func_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
]) {
20610 struct btf_struct_meta
*kptr_struct_meta
= env
->insn_aux_data
[insn_idx
].kptr_struct_meta
;
20611 int struct_meta_reg
= BPF_REG_3
;
20612 int node_offset_reg
= BPF_REG_4
;
20614 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
20615 if (desc
->func_id
== special_kfunc_list
[KF_bpf_rbtree_add_impl
]) {
20616 struct_meta_reg
= BPF_REG_4
;
20617 node_offset_reg
= BPF_REG_5
;
20620 if (!kptr_struct_meta
) {
20621 verbose(env
, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
20626 __fixup_collection_insert_kfunc(&env
->insn_aux_data
[insn_idx
], struct_meta_reg
,
20627 node_offset_reg
, insn
, insn_buf
, cnt
);
20628 } else if (desc
->func_id
== special_kfunc_list
[KF_bpf_cast_to_kern_ctx
] ||
20629 desc
->func_id
== special_kfunc_list
[KF_bpf_rdonly_cast
]) {
20630 insn_buf
[0] = BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
);
20632 } else if (is_bpf_wq_set_callback_impl_kfunc(desc
->func_id
)) {
20633 struct bpf_insn ld_addrs
[2] = { BPF_LD_IMM64(BPF_REG_4
, (long)env
->prog
->aux
) };
20635 insn_buf
[0] = ld_addrs
[0];
20636 insn_buf
[1] = ld_addrs
[1];
20637 insn_buf
[2] = *insn
;
20643 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
20644 static int add_hidden_subprog(struct bpf_verifier_env
*env
, struct bpf_insn
*patch
, int len
)
20646 struct bpf_subprog_info
*info
= env
->subprog_info
;
20647 int cnt
= env
->subprog_cnt
;
20648 struct bpf_prog
*prog
;
20650 /* We only reserve one slot for hidden subprogs in subprog_info. */
20651 if (env
->hidden_subprog_cnt
) {
20652 verbose(env
, "verifier internal error: only one hidden subprog supported\n");
20655 /* We're not patching any existing instruction, just appending the new
20656 * ones for the hidden subprog. Hence all of the adjustment operations
20657 * in bpf_patch_insn_data are no-ops.
20659 prog
= bpf_patch_insn_data(env
, env
->prog
->len
- 1, patch
, len
);
20663 info
[cnt
+ 1].start
= info
[cnt
].start
;
20664 info
[cnt
].start
= prog
->len
- len
+ 1;
20665 env
->subprog_cnt
++;
20666 env
->hidden_subprog_cnt
++;
20670 /* Do various post-verification rewrites in a single program pass.
20671 * These rewrites simplify JIT and interpreter implementations.
20673 static int do_misc_fixups(struct bpf_verifier_env
*env
)
20675 struct bpf_prog
*prog
= env
->prog
;
20676 enum bpf_attach_type eatype
= prog
->expected_attach_type
;
20677 enum bpf_prog_type prog_type
= resolve_prog_type(prog
);
20678 struct bpf_insn
*insn
= prog
->insnsi
;
20679 const struct bpf_func_proto
*fn
;
20680 const int insn_cnt
= prog
->len
;
20681 const struct bpf_map_ops
*ops
;
20682 struct bpf_insn_aux_data
*aux
;
20683 struct bpf_insn
*insn_buf
= env
->insn_buf
;
20684 struct bpf_prog
*new_prog
;
20685 struct bpf_map
*map_ptr
;
20686 int i
, ret
, cnt
, delta
= 0, cur_subprog
= 0;
20687 struct bpf_subprog_info
*subprogs
= env
->subprog_info
;
20688 u16 stack_depth
= subprogs
[cur_subprog
].stack_depth
;
20689 u16 stack_depth_extra
= 0;
20691 if (env
->seen_exception
&& !env
->exception_callback_subprog
) {
20692 struct bpf_insn patch
[] = {
20693 env
->prog
->insnsi
[insn_cnt
- 1],
20694 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
20698 ret
= add_hidden_subprog(env
, patch
, ARRAY_SIZE(patch
));
20702 insn
= prog
->insnsi
;
20704 env
->exception_callback_subprog
= env
->subprog_cnt
- 1;
20705 /* Don't update insn_cnt, as add_hidden_subprog always appends insns */
20706 mark_subprog_exc_cb(env
, env
->exception_callback_subprog
);
20709 for (i
= 0; i
< insn_cnt
;) {
20710 if (insn
->code
== (BPF_ALU64
| BPF_MOV
| BPF_X
) && insn
->imm
) {
20711 if ((insn
->off
== BPF_ADDR_SPACE_CAST
&& insn
->imm
== 1) ||
20712 (((struct bpf_map
*)env
->prog
->aux
->arena
)->map_flags
& BPF_F_NO_USER_CONV
)) {
20713 /* convert to 32-bit mov that clears upper 32-bit */
20714 insn
->code
= BPF_ALU
| BPF_MOV
| BPF_X
;
20715 /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */
20718 } /* cast from as(0) to as(1) should be handled by JIT */
20722 if (env
->insn_aux_data
[i
+ delta
].needs_zext
)
20723 /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */
20724 insn
->code
= BPF_ALU
| BPF_OP(insn
->code
) | BPF_SRC(insn
->code
);
20726 /* Make sdiv/smod divide-by-minus-one exceptions impossible. */
20727 if ((insn
->code
== (BPF_ALU64
| BPF_MOD
| BPF_K
) ||
20728 insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_K
) ||
20729 insn
->code
== (BPF_ALU
| BPF_MOD
| BPF_K
) ||
20730 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_K
)) &&
20731 insn
->off
== 1 && insn
->imm
== -1) {
20732 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
20733 bool isdiv
= BPF_OP(insn
->code
) == BPF_DIV
;
20734 struct bpf_insn
*patchlet
;
20735 struct bpf_insn chk_and_sdiv
[] = {
20736 BPF_RAW_INSN((is64
? BPF_ALU64
: BPF_ALU
) |
20737 BPF_NEG
| BPF_K
, insn
->dst_reg
,
20740 struct bpf_insn chk_and_smod
[] = {
20741 BPF_MOV32_IMM(insn
->dst_reg
, 0),
20744 patchlet
= isdiv
? chk_and_sdiv
: chk_and_smod
;
20745 cnt
= isdiv
? ARRAY_SIZE(chk_and_sdiv
) : ARRAY_SIZE(chk_and_smod
);
20747 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patchlet
, cnt
);
20752 env
->prog
= prog
= new_prog
;
20753 insn
= new_prog
->insnsi
+ i
+ delta
;
20757 /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */
20758 if (insn
->code
== (BPF_ALU64
| BPF_MOD
| BPF_X
) ||
20759 insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_X
) ||
20760 insn
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
) ||
20761 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
)) {
20762 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
20763 bool isdiv
= BPF_OP(insn
->code
) == BPF_DIV
;
20764 bool is_sdiv
= isdiv
&& insn
->off
== 1;
20765 bool is_smod
= !isdiv
&& insn
->off
== 1;
20766 struct bpf_insn
*patchlet
;
20767 struct bpf_insn chk_and_div
[] = {
20768 /* [R,W]x div 0 -> 0 */
20769 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20770 BPF_JNE
| BPF_K
, insn
->src_reg
,
20772 BPF_ALU32_REG(BPF_XOR
, insn
->dst_reg
, insn
->dst_reg
),
20773 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
20776 struct bpf_insn chk_and_mod
[] = {
20777 /* [R,W]x mod 0 -> [R,W]x */
20778 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20779 BPF_JEQ
| BPF_K
, insn
->src_reg
,
20780 0, 1 + (is64
? 0 : 1), 0),
20782 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
20783 BPF_MOV32_REG(insn
->dst_reg
, insn
->dst_reg
),
20785 struct bpf_insn chk_and_sdiv
[] = {
20786 /* [R,W]x sdiv 0 -> 0
20787 * LLONG_MIN sdiv -1 -> LLONG_MIN
20788 * INT_MIN sdiv -1 -> INT_MIN
20790 BPF_MOV64_REG(BPF_REG_AX
, insn
->src_reg
),
20791 BPF_RAW_INSN((is64
? BPF_ALU64
: BPF_ALU
) |
20792 BPF_ADD
| BPF_K
, BPF_REG_AX
,
20794 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20795 BPF_JGT
| BPF_K
, BPF_REG_AX
,
20797 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20798 BPF_JEQ
| BPF_K
, BPF_REG_AX
,
20800 BPF_RAW_INSN((is64
? BPF_ALU64
: BPF_ALU
) |
20801 BPF_MOV
| BPF_K
, insn
->dst_reg
,
20803 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
20804 BPF_RAW_INSN((is64
? BPF_ALU64
: BPF_ALU
) |
20805 BPF_NEG
| BPF_K
, insn
->dst_reg
,
20807 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
20810 struct bpf_insn chk_and_smod
[] = {
20811 /* [R,W]x mod 0 -> [R,W]x */
20812 /* [R,W]x mod -1 -> 0 */
20813 BPF_MOV64_REG(BPF_REG_AX
, insn
->src_reg
),
20814 BPF_RAW_INSN((is64
? BPF_ALU64
: BPF_ALU
) |
20815 BPF_ADD
| BPF_K
, BPF_REG_AX
,
20817 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20818 BPF_JGT
| BPF_K
, BPF_REG_AX
,
20820 BPF_RAW_INSN((is64
? BPF_JMP
: BPF_JMP32
) |
20821 BPF_JEQ
| BPF_K
, BPF_REG_AX
,
20822 0, 3 + (is64
? 0 : 1), 1),
20823 BPF_MOV32_IMM(insn
->dst_reg
, 0),
20824 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
20826 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
20827 BPF_MOV32_REG(insn
->dst_reg
, insn
->dst_reg
),
20831 patchlet
= chk_and_sdiv
;
20832 cnt
= ARRAY_SIZE(chk_and_sdiv
);
20833 } else if (is_smod
) {
20834 patchlet
= chk_and_smod
;
20835 cnt
= ARRAY_SIZE(chk_and_smod
) - (is64
? 2 : 0);
20837 patchlet
= isdiv
? chk_and_div
: chk_and_mod
;
20838 cnt
= isdiv
? ARRAY_SIZE(chk_and_div
) :
20839 ARRAY_SIZE(chk_and_mod
) - (is64
? 2 : 0);
20842 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patchlet
, cnt
);
20847 env
->prog
= prog
= new_prog
;
20848 insn
= new_prog
->insnsi
+ i
+ delta
;
20852 /* Make it impossible to de-reference a userspace address */
20853 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
20854 (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
20855 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
)) {
20856 struct bpf_insn
*patch
= &insn_buf
[0];
20857 u64 uaddress_limit
= bpf_arch_uaddress_limit();
20859 if (!uaddress_limit
)
20862 *patch
++ = BPF_MOV64_REG(BPF_REG_AX
, insn
->src_reg
);
20864 *patch
++ = BPF_ALU64_IMM(BPF_ADD
, BPF_REG_AX
, insn
->off
);
20865 *patch
++ = BPF_ALU64_IMM(BPF_RSH
, BPF_REG_AX
, 32);
20866 *patch
++ = BPF_JMP_IMM(BPF_JLE
, BPF_REG_AX
, uaddress_limit
>> 32, 2);
20868 *patch
++ = BPF_JMP_IMM(BPF_JA
, 0, 0, 1);
20869 *patch
++ = BPF_MOV64_IMM(insn
->dst_reg
, 0);
20871 cnt
= patch
- insn_buf
;
20872 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20877 env
->prog
= prog
= new_prog
;
20878 insn
= new_prog
->insnsi
+ i
+ delta
;
20882 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
20883 if (BPF_CLASS(insn
->code
) == BPF_LD
&&
20884 (BPF_MODE(insn
->code
) == BPF_ABS
||
20885 BPF_MODE(insn
->code
) == BPF_IND
)) {
20886 cnt
= env
->ops
->gen_ld_abs(insn
, insn_buf
);
20887 if (cnt
== 0 || cnt
>= INSN_BUF_SIZE
) {
20888 verbose(env
, "bpf verifier is misconfigured\n");
20892 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20897 env
->prog
= prog
= new_prog
;
20898 insn
= new_prog
->insnsi
+ i
+ delta
;
20902 /* Rewrite pointer arithmetic to mitigate speculation attacks. */
20903 if (insn
->code
== (BPF_ALU64
| BPF_ADD
| BPF_X
) ||
20904 insn
->code
== (BPF_ALU64
| BPF_SUB
| BPF_X
)) {
20905 const u8 code_add
= BPF_ALU64
| BPF_ADD
| BPF_X
;
20906 const u8 code_sub
= BPF_ALU64
| BPF_SUB
| BPF_X
;
20907 struct bpf_insn
*patch
= &insn_buf
[0];
20908 bool issrc
, isneg
, isimm
;
20911 aux
= &env
->insn_aux_data
[i
+ delta
];
20912 if (!aux
->alu_state
||
20913 aux
->alu_state
== BPF_ALU_NON_POINTER
)
20916 isneg
= aux
->alu_state
& BPF_ALU_NEG_VALUE
;
20917 issrc
= (aux
->alu_state
& BPF_ALU_SANITIZE
) ==
20918 BPF_ALU_SANITIZE_SRC
;
20919 isimm
= aux
->alu_state
& BPF_ALU_IMMEDIATE
;
20921 off_reg
= issrc
? insn
->src_reg
: insn
->dst_reg
;
20923 *patch
++ = BPF_MOV32_IMM(BPF_REG_AX
, aux
->alu_limit
);
20926 *patch
++ = BPF_ALU64_IMM(BPF_MUL
, off_reg
, -1);
20927 *patch
++ = BPF_MOV32_IMM(BPF_REG_AX
, aux
->alu_limit
);
20928 *patch
++ = BPF_ALU64_REG(BPF_SUB
, BPF_REG_AX
, off_reg
);
20929 *patch
++ = BPF_ALU64_REG(BPF_OR
, BPF_REG_AX
, off_reg
);
20930 *patch
++ = BPF_ALU64_IMM(BPF_NEG
, BPF_REG_AX
, 0);
20931 *patch
++ = BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_AX
, 63);
20932 *patch
++ = BPF_ALU64_REG(BPF_AND
, BPF_REG_AX
, off_reg
);
20935 *patch
++ = BPF_MOV64_REG(insn
->dst_reg
, insn
->src_reg
);
20936 insn
->src_reg
= BPF_REG_AX
;
20938 insn
->code
= insn
->code
== code_add
?
20939 code_sub
: code_add
;
20941 if (issrc
&& isneg
&& !isimm
)
20942 *patch
++ = BPF_ALU64_IMM(BPF_MUL
, off_reg
, -1);
20943 cnt
= patch
- insn_buf
;
20945 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20950 env
->prog
= prog
= new_prog
;
20951 insn
= new_prog
->insnsi
+ i
+ delta
;
20955 if (is_may_goto_insn(insn
)) {
20956 int stack_off
= -stack_depth
- 8;
20958 stack_depth_extra
= 8;
20959 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_AX
, BPF_REG_10
, stack_off
);
20960 if (insn
->off
>= 0)
20961 insn_buf
[1] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_AX
, 0, insn
->off
+ 2);
20963 insn_buf
[1] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_AX
, 0, insn
->off
- 1);
20964 insn_buf
[2] = BPF_ALU64_IMM(BPF_SUB
, BPF_REG_AX
, 1);
20965 insn_buf
[3] = BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_AX
, stack_off
);
20968 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20973 env
->prog
= prog
= new_prog
;
20974 insn
= new_prog
->insnsi
+ i
+ delta
;
20978 if (insn
->code
!= (BPF_JMP
| BPF_CALL
))
20980 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
20982 if (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
) {
20983 ret
= fixup_kfunc_call(env
, insn
, insn_buf
, i
+ delta
, &cnt
);
20989 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
20994 env
->prog
= prog
= new_prog
;
20995 insn
= new_prog
->insnsi
+ i
+ delta
;
20999 /* Skip inlining the helper call if the JIT does it. */
21000 if (bpf_jit_inlines_helper_call(insn
->imm
))
21003 if (insn
->imm
== BPF_FUNC_get_route_realm
)
21004 prog
->dst_needed
= 1;
21005 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
21006 bpf_user_rnd_init_once();
21007 if (insn
->imm
== BPF_FUNC_override_return
)
21008 prog
->kprobe_override
= 1;
21009 if (insn
->imm
== BPF_FUNC_tail_call
) {
21010 /* If we tail call into other programs, we
21011 * cannot make any assumptions since they can
21012 * be replaced dynamically during runtime in
21013 * the program array.
21015 prog
->cb_access
= 1;
21016 if (!allow_tail_call_in_subprogs(env
))
21017 prog
->aux
->stack_depth
= MAX_BPF_STACK
;
21018 prog
->aux
->max_pkt_offset
= MAX_PACKET_OFF
;
21020 /* mark bpf_tail_call as different opcode to avoid
21021 * conditional branch in the interpreter for every normal
21022 * call and to prevent accidental JITing by JIT compiler
21023 * that doesn't support bpf_tail_call yet
21026 insn
->code
= BPF_JMP
| BPF_TAIL_CALL
;
21028 aux
= &env
->insn_aux_data
[i
+ delta
];
21029 if (env
->bpf_capable
&& !prog
->blinding_requested
&&
21030 prog
->jit_requested
&&
21031 !bpf_map_key_poisoned(aux
) &&
21032 !bpf_map_ptr_poisoned(aux
) &&
21033 !bpf_map_ptr_unpriv(aux
)) {
21034 struct bpf_jit_poke_descriptor desc
= {
21035 .reason
= BPF_POKE_REASON_TAIL_CALL
,
21036 .tail_call
.map
= aux
->map_ptr_state
.map_ptr
,
21037 .tail_call
.key
= bpf_map_key_immediate(aux
),
21038 .insn_idx
= i
+ delta
,
21041 ret
= bpf_jit_add_poke_descriptor(prog
, &desc
);
21043 verbose(env
, "adding tail call poke descriptor failed\n");
21047 insn
->imm
= ret
+ 1;
21051 if (!bpf_map_ptr_unpriv(aux
))
21054 /* instead of changing every JIT dealing with tail_call
21055 * emit two extra insns:
21056 * if (index >= max_entries) goto out;
21057 * index &= array->index_mask;
21058 * to avoid out-of-bounds cpu speculation
21060 if (bpf_map_ptr_poisoned(aux
)) {
21061 verbose(env
, "tail_call abusing map_ptr\n");
21065 map_ptr
= aux
->map_ptr_state
.map_ptr
;
21066 insn_buf
[0] = BPF_JMP_IMM(BPF_JGE
, BPF_REG_3
,
21067 map_ptr
->max_entries
, 2);
21068 insn_buf
[1] = BPF_ALU32_IMM(BPF_AND
, BPF_REG_3
,
21069 container_of(map_ptr
,
21072 insn_buf
[2] = *insn
;
21074 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21079 env
->prog
= prog
= new_prog
;
21080 insn
= new_prog
->insnsi
+ i
+ delta
;
21084 if (insn
->imm
== BPF_FUNC_timer_set_callback
) {
21085 /* The verifier will process callback_fn as many times as necessary
21086 * with different maps and the register states prepared by
21087 * set_timer_callback_state will be accurate.
21089 * The following use case is valid:
21090 * map1 is shared by prog1, prog2, prog3.
21091 * prog1 calls bpf_timer_init for some map1 elements
21092 * prog2 calls bpf_timer_set_callback for some map1 elements.
21093 * Those that were not bpf_timer_init-ed will return -EINVAL.
21094 * prog3 calls bpf_timer_start for some map1 elements.
21095 * Those that were not both bpf_timer_init-ed and
21096 * bpf_timer_set_callback-ed will return -EINVAL.
21098 struct bpf_insn ld_addrs
[2] = {
21099 BPF_LD_IMM64(BPF_REG_3
, (long)prog
->aux
),
21102 insn_buf
[0] = ld_addrs
[0];
21103 insn_buf
[1] = ld_addrs
[1];
21104 insn_buf
[2] = *insn
;
21107 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21112 env
->prog
= prog
= new_prog
;
21113 insn
= new_prog
->insnsi
+ i
+ delta
;
21114 goto patch_call_imm
;
21117 if (is_storage_get_function(insn
->imm
)) {
21118 if (!in_sleepable(env
) ||
21119 env
->insn_aux_data
[i
+ delta
].storage_get_func_atomic
)
21120 insn_buf
[0] = BPF_MOV64_IMM(BPF_REG_5
, (__force __s32
)GFP_ATOMIC
);
21122 insn_buf
[0] = BPF_MOV64_IMM(BPF_REG_5
, (__force __s32
)GFP_KERNEL
);
21123 insn_buf
[1] = *insn
;
21126 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21131 env
->prog
= prog
= new_prog
;
21132 insn
= new_prog
->insnsi
+ i
+ delta
;
21133 goto patch_call_imm
;
21136 /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */
21137 if (env
->insn_aux_data
[i
+ delta
].call_with_percpu_alloc_ptr
) {
21138 /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data,
21139 * bpf_mem_alloc() returns a ptr to the percpu data ptr.
21141 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0);
21142 insn_buf
[1] = *insn
;
21145 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21150 env
->prog
= prog
= new_prog
;
21151 insn
= new_prog
->insnsi
+ i
+ delta
;
21152 goto patch_call_imm
;
21155 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
21156 * and other inlining handlers are currently limited to 64 bit
21159 if (prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
21160 (insn
->imm
== BPF_FUNC_map_lookup_elem
||
21161 insn
->imm
== BPF_FUNC_map_update_elem
||
21162 insn
->imm
== BPF_FUNC_map_delete_elem
||
21163 insn
->imm
== BPF_FUNC_map_push_elem
||
21164 insn
->imm
== BPF_FUNC_map_pop_elem
||
21165 insn
->imm
== BPF_FUNC_map_peek_elem
||
21166 insn
->imm
== BPF_FUNC_redirect_map
||
21167 insn
->imm
== BPF_FUNC_for_each_map_elem
||
21168 insn
->imm
== BPF_FUNC_map_lookup_percpu_elem
)) {
21169 aux
= &env
->insn_aux_data
[i
+ delta
];
21170 if (bpf_map_ptr_poisoned(aux
))
21171 goto patch_call_imm
;
21173 map_ptr
= aux
->map_ptr_state
.map_ptr
;
21174 ops
= map_ptr
->ops
;
21175 if (insn
->imm
== BPF_FUNC_map_lookup_elem
&&
21176 ops
->map_gen_lookup
) {
21177 cnt
= ops
->map_gen_lookup(map_ptr
, insn_buf
);
21178 if (cnt
== -EOPNOTSUPP
)
21179 goto patch_map_ops_generic
;
21180 if (cnt
<= 0 || cnt
>= INSN_BUF_SIZE
) {
21181 verbose(env
, "bpf verifier is misconfigured\n");
21185 new_prog
= bpf_patch_insn_data(env
, i
+ delta
,
21191 env
->prog
= prog
= new_prog
;
21192 insn
= new_prog
->insnsi
+ i
+ delta
;
21196 BUILD_BUG_ON(!__same_type(ops
->map_lookup_elem
,
21197 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
21198 BUILD_BUG_ON(!__same_type(ops
->map_delete_elem
,
21199 (long (*)(struct bpf_map
*map
, void *key
))NULL
));
21200 BUILD_BUG_ON(!__same_type(ops
->map_update_elem
,
21201 (long (*)(struct bpf_map
*map
, void *key
, void *value
,
21203 BUILD_BUG_ON(!__same_type(ops
->map_push_elem
,
21204 (long (*)(struct bpf_map
*map
, void *value
,
21206 BUILD_BUG_ON(!__same_type(ops
->map_pop_elem
,
21207 (long (*)(struct bpf_map
*map
, void *value
))NULL
));
21208 BUILD_BUG_ON(!__same_type(ops
->map_peek_elem
,
21209 (long (*)(struct bpf_map
*map
, void *value
))NULL
));
21210 BUILD_BUG_ON(!__same_type(ops
->map_redirect
,
21211 (long (*)(struct bpf_map
*map
, u64 index
, u64 flags
))NULL
));
21212 BUILD_BUG_ON(!__same_type(ops
->map_for_each_callback
,
21213 (long (*)(struct bpf_map
*map
,
21214 bpf_callback_t callback_fn
,
21215 void *callback_ctx
,
21217 BUILD_BUG_ON(!__same_type(ops
->map_lookup_percpu_elem
,
21218 (void *(*)(struct bpf_map
*map
, void *key
, u32 cpu
))NULL
));
21220 patch_map_ops_generic
:
21221 switch (insn
->imm
) {
21222 case BPF_FUNC_map_lookup_elem
:
21223 insn
->imm
= BPF_CALL_IMM(ops
->map_lookup_elem
);
21225 case BPF_FUNC_map_update_elem
:
21226 insn
->imm
= BPF_CALL_IMM(ops
->map_update_elem
);
21228 case BPF_FUNC_map_delete_elem
:
21229 insn
->imm
= BPF_CALL_IMM(ops
->map_delete_elem
);
21231 case BPF_FUNC_map_push_elem
:
21232 insn
->imm
= BPF_CALL_IMM(ops
->map_push_elem
);
21234 case BPF_FUNC_map_pop_elem
:
21235 insn
->imm
= BPF_CALL_IMM(ops
->map_pop_elem
);
21237 case BPF_FUNC_map_peek_elem
:
21238 insn
->imm
= BPF_CALL_IMM(ops
->map_peek_elem
);
21240 case BPF_FUNC_redirect_map
:
21241 insn
->imm
= BPF_CALL_IMM(ops
->map_redirect
);
21243 case BPF_FUNC_for_each_map_elem
:
21244 insn
->imm
= BPF_CALL_IMM(ops
->map_for_each_callback
);
21246 case BPF_FUNC_map_lookup_percpu_elem
:
21247 insn
->imm
= BPF_CALL_IMM(ops
->map_lookup_percpu_elem
);
21251 goto patch_call_imm
;
21254 /* Implement bpf_jiffies64 inline. */
21255 if (prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
21256 insn
->imm
== BPF_FUNC_jiffies64
) {
21257 struct bpf_insn ld_jiffies_addr
[2] = {
21258 BPF_LD_IMM64(BPF_REG_0
,
21259 (unsigned long)&jiffies
),
21262 insn_buf
[0] = ld_jiffies_addr
[0];
21263 insn_buf
[1] = ld_jiffies_addr
[1];
21264 insn_buf
[2] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
,
21268 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
,
21274 env
->prog
= prog
= new_prog
;
21275 insn
= new_prog
->insnsi
+ i
+ delta
;
21279 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
21280 /* Implement bpf_get_smp_processor_id() inline. */
21281 if (insn
->imm
== BPF_FUNC_get_smp_processor_id
&&
21282 verifier_inlines_helper_call(env
, insn
->imm
)) {
21283 /* BPF_FUNC_get_smp_processor_id inlining is an
21284 * optimization, so if pcpu_hot.cpu_number is ever
21285 * changed in some incompatible and hard to support
21286 * way, it's fine to back out this inlining logic
21288 insn_buf
[0] = BPF_MOV32_IMM(BPF_REG_0
, (u32
)(unsigned long)&pcpu_hot
.cpu_number
);
21289 insn_buf
[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0
, BPF_REG_0
);
21290 insn_buf
[2] = BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, 0);
21293 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21298 env
->prog
= prog
= new_prog
;
21299 insn
= new_prog
->insnsi
+ i
+ delta
;
21303 /* Implement bpf_get_func_arg inline. */
21304 if (prog_type
== BPF_PROG_TYPE_TRACING
&&
21305 insn
->imm
== BPF_FUNC_get_func_arg
) {
21306 /* Load nr_args from ctx - 8 */
21307 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8);
21308 insn_buf
[1] = BPF_JMP32_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_0
, 6);
21309 insn_buf
[2] = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 3);
21310 insn_buf
[3] = BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
);
21311 insn_buf
[4] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0);
21312 insn_buf
[5] = BPF_STX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0);
21313 insn_buf
[6] = BPF_MOV64_IMM(BPF_REG_0
, 0);
21314 insn_buf
[7] = BPF_JMP_A(1);
21315 insn_buf
[8] = BPF_MOV64_IMM(BPF_REG_0
, -EINVAL
);
21318 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21323 env
->prog
= prog
= new_prog
;
21324 insn
= new_prog
->insnsi
+ i
+ delta
;
21328 /* Implement bpf_get_func_ret inline. */
21329 if (prog_type
== BPF_PROG_TYPE_TRACING
&&
21330 insn
->imm
== BPF_FUNC_get_func_ret
) {
21331 if (eatype
== BPF_TRACE_FEXIT
||
21332 eatype
== BPF_MODIFY_RETURN
) {
21333 /* Load nr_args from ctx - 8 */
21334 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8);
21335 insn_buf
[1] = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_0
, 3);
21336 insn_buf
[2] = BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
);
21337 insn_buf
[3] = BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0);
21338 insn_buf
[4] = BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0);
21339 insn_buf
[5] = BPF_MOV64_IMM(BPF_REG_0
, 0);
21342 insn_buf
[0] = BPF_MOV64_IMM(BPF_REG_0
, -EOPNOTSUPP
);
21346 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21351 env
->prog
= prog
= new_prog
;
21352 insn
= new_prog
->insnsi
+ i
+ delta
;
21356 /* Implement get_func_arg_cnt inline. */
21357 if (prog_type
== BPF_PROG_TYPE_TRACING
&&
21358 insn
->imm
== BPF_FUNC_get_func_arg_cnt
) {
21359 /* Load nr_args from ctx - 8 */
21360 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8);
21362 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, 1);
21366 env
->prog
= prog
= new_prog
;
21367 insn
= new_prog
->insnsi
+ i
+ delta
;
21371 /* Implement bpf_get_func_ip inline. */
21372 if (prog_type
== BPF_PROG_TYPE_TRACING
&&
21373 insn
->imm
== BPF_FUNC_get_func_ip
) {
21374 /* Load IP address from ctx - 16 */
21375 insn_buf
[0] = BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -16);
21377 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, 1);
21381 env
->prog
= prog
= new_prog
;
21382 insn
= new_prog
->insnsi
+ i
+ delta
;
21386 /* Implement bpf_get_branch_snapshot inline. */
21387 if (IS_ENABLED(CONFIG_PERF_EVENTS
) &&
21388 prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
21389 insn
->imm
== BPF_FUNC_get_branch_snapshot
) {
21390 /* We are dealing with the following func protos:
21391 * u64 bpf_get_branch_snapshot(void *buf, u32 size, u64 flags);
21392 * int perf_snapshot_branch_stack(struct perf_branch_entry *entries, u32 cnt);
21394 const u32 br_entry_size
= sizeof(struct perf_branch_entry
);
21396 /* struct perf_branch_entry is part of UAPI and is
21397 * used as an array element, so extremely unlikely to
21398 * ever grow or shrink
21400 BUILD_BUG_ON(br_entry_size
!= 24);
21402 /* if (unlikely(flags)) return -EINVAL */
21403 insn_buf
[0] = BPF_JMP_IMM(BPF_JNE
, BPF_REG_3
, 0, 7);
21405 /* Transform size (bytes) into number of entries (cnt = size / 24).
21406 * But to avoid expensive division instruction, we implement
21407 * divide-by-3 through multiplication, followed by further
21408 * division by 8 through 3-bit right shift.
21409 * Refer to book "Hacker's Delight, 2nd ed." by Henry S. Warren, Jr.,
21410 * p. 227, chapter "Unsigned Division by 3" for details and proofs.
21412 * N / 3 <=> M * N / 2^33, where M = (2^33 + 1) / 3 = 0xaaaaaaab.
21414 insn_buf
[1] = BPF_MOV32_IMM(BPF_REG_0
, 0xaaaaaaab);
21415 insn_buf
[2] = BPF_ALU64_REG(BPF_MUL
, BPF_REG_2
, BPF_REG_0
);
21416 insn_buf
[3] = BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 36);
21418 /* call perf_snapshot_branch_stack implementation */
21419 insn_buf
[4] = BPF_EMIT_CALL(static_call_query(perf_snapshot_branch_stack
));
21420 /* if (entry_cnt == 0) return -ENOENT */
21421 insn_buf
[5] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4);
21422 /* return entry_cnt * sizeof(struct perf_branch_entry) */
21423 insn_buf
[6] = BPF_ALU32_IMM(BPF_MUL
, BPF_REG_0
, br_entry_size
);
21424 insn_buf
[7] = BPF_JMP_A(3);
21425 /* return -EINVAL; */
21426 insn_buf
[8] = BPF_MOV64_IMM(BPF_REG_0
, -EINVAL
);
21427 insn_buf
[9] = BPF_JMP_A(1);
21428 /* return -ENOENT; */
21429 insn_buf
[10] = BPF_MOV64_IMM(BPF_REG_0
, -ENOENT
);
21432 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21437 env
->prog
= prog
= new_prog
;
21438 insn
= new_prog
->insnsi
+ i
+ delta
;
21442 /* Implement bpf_kptr_xchg inline */
21443 if (prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
21444 insn
->imm
== BPF_FUNC_kptr_xchg
&&
21445 bpf_jit_supports_ptr_xchg()) {
21446 insn_buf
[0] = BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
);
21447 insn_buf
[1] = BPF_ATOMIC_OP(BPF_DW
, BPF_XCHG
, BPF_REG_1
, BPF_REG_0
, 0);
21450 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
21455 env
->prog
= prog
= new_prog
;
21456 insn
= new_prog
->insnsi
+ i
+ delta
;
21460 fn
= env
->ops
->get_func_proto(insn
->imm
, env
->prog
);
21461 /* all functions that have prototype and verifier allowed
21462 * programs to call them, must be real in-kernel functions
21466 "kernel subsystem misconfigured func %s#%d\n",
21467 func_id_name(insn
->imm
), insn
->imm
);
21470 insn
->imm
= fn
->func
- __bpf_call_base
;
21472 if (subprogs
[cur_subprog
+ 1].start
== i
+ delta
+ 1) {
21473 subprogs
[cur_subprog
].stack_depth
+= stack_depth_extra
;
21474 subprogs
[cur_subprog
].stack_extra
= stack_depth_extra
;
21476 stack_depth
= subprogs
[cur_subprog
].stack_depth
;
21477 stack_depth_extra
= 0;
21483 env
->prog
->aux
->stack_depth
= subprogs
[0].stack_depth
;
21484 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
21485 int subprog_start
= subprogs
[i
].start
;
21486 int stack_slots
= subprogs
[i
].stack_extra
/ 8;
21490 if (stack_slots
> 1) {
21491 verbose(env
, "verifier bug: stack_slots supports may_goto only\n");
21495 /* Add ST insn to subprog prologue to init extra stack */
21496 insn_buf
[0] = BPF_ST_MEM(BPF_DW
, BPF_REG_FP
,
21497 -subprogs
[i
].stack_depth
, BPF_MAX_LOOPS
);
21498 /* Copy first actual insn to preserve it */
21499 insn_buf
[1] = env
->prog
->insnsi
[subprog_start
];
21501 new_prog
= bpf_patch_insn_data(env
, subprog_start
, insn_buf
, 2);
21504 env
->prog
= prog
= new_prog
;
21506 * If may_goto is a first insn of a prog there could be a jmp
21507 * insn that points to it, hence adjust all such jmps to point
21508 * to insn after BPF_ST that inits may_goto count.
21509 * Adjustment will succeed because bpf_patch_insn_data() didn't fail.
21511 WARN_ON(adjust_jmp_off(env
->prog
, subprog_start
, 1));
21514 /* Since poke tab is now finalized, publish aux to tracker. */
21515 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
21516 map_ptr
= prog
->aux
->poke_tab
[i
].tail_call
.map
;
21517 if (!map_ptr
->ops
->map_poke_track
||
21518 !map_ptr
->ops
->map_poke_untrack
||
21519 !map_ptr
->ops
->map_poke_run
) {
21520 verbose(env
, "bpf verifier is misconfigured\n");
21524 ret
= map_ptr
->ops
->map_poke_track(map_ptr
, prog
->aux
);
21526 verbose(env
, "tracking tail call prog failed\n");
21531 sort_kfunc_descs_by_imm_off(env
->prog
);
21536 static struct bpf_prog
*inline_bpf_loop(struct bpf_verifier_env
*env
,
21539 u32 callback_subprogno
,
21542 s32 r6_offset
= stack_base
+ 0 * BPF_REG_SIZE
;
21543 s32 r7_offset
= stack_base
+ 1 * BPF_REG_SIZE
;
21544 s32 r8_offset
= stack_base
+ 2 * BPF_REG_SIZE
;
21545 int reg_loop_max
= BPF_REG_6
;
21546 int reg_loop_cnt
= BPF_REG_7
;
21547 int reg_loop_ctx
= BPF_REG_8
;
21549 struct bpf_insn
*insn_buf
= env
->insn_buf
;
21550 struct bpf_prog
*new_prog
;
21551 u32 callback_start
;
21552 u32 call_insn_offset
;
21553 s32 callback_offset
;
21556 /* This represents an inlined version of bpf_iter.c:bpf_loop,
21557 * be careful to modify this code in sync.
21560 /* Return error and jump to the end of the patch if
21561 * expected number of iterations is too big.
21563 insn_buf
[cnt
++] = BPF_JMP_IMM(BPF_JLE
, BPF_REG_1
, BPF_MAX_LOOPS
, 2);
21564 insn_buf
[cnt
++] = BPF_MOV32_IMM(BPF_REG_0
, -E2BIG
);
21565 insn_buf
[cnt
++] = BPF_JMP_IMM(BPF_JA
, 0, 0, 16);
21566 /* spill R6, R7, R8 to use these as loop vars */
21567 insn_buf
[cnt
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, r6_offset
);
21568 insn_buf
[cnt
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_7
, r7_offset
);
21569 insn_buf
[cnt
++] = BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_8
, r8_offset
);
21570 /* initialize loop vars */
21571 insn_buf
[cnt
++] = BPF_MOV64_REG(reg_loop_max
, BPF_REG_1
);
21572 insn_buf
[cnt
++] = BPF_MOV32_IMM(reg_loop_cnt
, 0);
21573 insn_buf
[cnt
++] = BPF_MOV64_REG(reg_loop_ctx
, BPF_REG_3
);
21575 * if reg_loop_cnt >= reg_loop_max skip the loop body
21577 insn_buf
[cnt
++] = BPF_JMP_REG(BPF_JGE
, reg_loop_cnt
, reg_loop_max
, 5);
21579 * correct callback offset would be set after patching
21581 insn_buf
[cnt
++] = BPF_MOV64_REG(BPF_REG_1
, reg_loop_cnt
);
21582 insn_buf
[cnt
++] = BPF_MOV64_REG(BPF_REG_2
, reg_loop_ctx
);
21583 insn_buf
[cnt
++] = BPF_CALL_REL(0);
21584 /* increment loop counter */
21585 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_ADD
, reg_loop_cnt
, 1);
21586 /* jump to loop header if callback returned 0 */
21587 insn_buf
[cnt
++] = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, -6);
21588 /* return value of bpf_loop,
21589 * set R0 to the number of iterations
21591 insn_buf
[cnt
++] = BPF_MOV64_REG(BPF_REG_0
, reg_loop_cnt
);
21592 /* restore original values of R6, R7, R8 */
21593 insn_buf
[cnt
++] = BPF_LDX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, r6_offset
);
21594 insn_buf
[cnt
++] = BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_10
, r7_offset
);
21595 insn_buf
[cnt
++] = BPF_LDX_MEM(BPF_DW
, BPF_REG_8
, BPF_REG_10
, r8_offset
);
21598 new_prog
= bpf_patch_insn_data(env
, position
, insn_buf
, cnt
);
21602 /* callback start is known only after patching */
21603 callback_start
= env
->subprog_info
[callback_subprogno
].start
;
21604 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
21605 call_insn_offset
= position
+ 12;
21606 callback_offset
= callback_start
- call_insn_offset
- 1;
21607 new_prog
->insnsi
[call_insn_offset
].imm
= callback_offset
;
21612 static bool is_bpf_loop_call(struct bpf_insn
*insn
)
21614 return insn
->code
== (BPF_JMP
| BPF_CALL
) &&
21615 insn
->src_reg
== 0 &&
21616 insn
->imm
== BPF_FUNC_loop
;
21619 /* For all sub-programs in the program (including main) check
21620 * insn_aux_data to see if there are bpf_loop calls that require
21621 * inlining. If such calls are found the calls are replaced with a
21622 * sequence of instructions produced by `inline_bpf_loop` function and
21623 * subprog stack_depth is increased by the size of 3 registers.
21624 * This stack space is used to spill values of the R6, R7, R8. These
21625 * registers are used to store the loop bound, counter and context
21628 static int optimize_bpf_loop(struct bpf_verifier_env
*env
)
21630 struct bpf_subprog_info
*subprogs
= env
->subprog_info
;
21631 int i
, cur_subprog
= 0, cnt
, delta
= 0;
21632 struct bpf_insn
*insn
= env
->prog
->insnsi
;
21633 int insn_cnt
= env
->prog
->len
;
21634 u16 stack_depth
= subprogs
[cur_subprog
].stack_depth
;
21635 u16 stack_depth_roundup
= round_up(stack_depth
, 8) - stack_depth
;
21636 u16 stack_depth_extra
= 0;
21638 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
21639 struct bpf_loop_inline_state
*inline_state
=
21640 &env
->insn_aux_data
[i
+ delta
].loop_inline_state
;
21642 if (is_bpf_loop_call(insn
) && inline_state
->fit_for_inline
) {
21643 struct bpf_prog
*new_prog
;
21645 stack_depth_extra
= BPF_REG_SIZE
* 3 + stack_depth_roundup
;
21646 new_prog
= inline_bpf_loop(env
,
21648 -(stack_depth
+ stack_depth_extra
),
21649 inline_state
->callback_subprogno
,
21655 env
->prog
= new_prog
;
21656 insn
= new_prog
->insnsi
+ i
+ delta
;
21659 if (subprogs
[cur_subprog
+ 1].start
== i
+ delta
+ 1) {
21660 subprogs
[cur_subprog
].stack_depth
+= stack_depth_extra
;
21662 stack_depth
= subprogs
[cur_subprog
].stack_depth
;
21663 stack_depth_roundup
= round_up(stack_depth
, 8) - stack_depth
;
21664 stack_depth_extra
= 0;
21668 env
->prog
->aux
->stack_depth
= env
->subprog_info
[0].stack_depth
;
21673 /* Remove unnecessary spill/fill pairs, members of fastcall pattern,
21674 * adjust subprograms stack depth when possible.
21676 static int remove_fastcall_spills_fills(struct bpf_verifier_env
*env
)
21678 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
21679 struct bpf_insn_aux_data
*aux
= env
->insn_aux_data
;
21680 struct bpf_insn
*insn
= env
->prog
->insnsi
;
21681 int insn_cnt
= env
->prog
->len
;
21683 bool modified
= false;
21686 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
21687 if (aux
[i
].fastcall_spills_num
> 0) {
21688 spills_num
= aux
[i
].fastcall_spills_num
;
21689 /* NOPs would be removed by opt_remove_nops() */
21690 for (j
= 1; j
<= spills_num
; ++j
) {
21696 if ((subprog
+ 1)->start
== i
+ 1) {
21697 if (modified
&& !subprog
->keep_fastcall_stack
)
21698 subprog
->stack_depth
= -subprog
->fastcall_stack_off
;
21707 static void free_states(struct bpf_verifier_env
*env
)
21709 struct bpf_verifier_state_list
*sl
, *sln
;
21712 sl
= env
->free_list
;
21715 free_verifier_state(&sl
->state
, false);
21719 env
->free_list
= NULL
;
21721 if (!env
->explored_states
)
21724 for (i
= 0; i
< state_htab_size(env
); i
++) {
21725 sl
= env
->explored_states
[i
];
21729 free_verifier_state(&sl
->state
, false);
21733 env
->explored_states
[i
] = NULL
;
21737 static int do_check_common(struct bpf_verifier_env
*env
, int subprog
)
21739 bool pop_log
= !(env
->log
.level
& BPF_LOG_LEVEL2
);
21740 struct bpf_subprog_info
*sub
= subprog_info(env
, subprog
);
21741 struct bpf_verifier_state
*state
;
21742 struct bpf_reg_state
*regs
;
21745 env
->prev_linfo
= NULL
;
21748 state
= kzalloc(sizeof(struct bpf_verifier_state
), GFP_KERNEL
);
21751 state
->curframe
= 0;
21752 state
->speculative
= false;
21753 state
->branches
= 1;
21754 state
->frame
[0] = kzalloc(sizeof(struct bpf_func_state
), GFP_KERNEL
);
21755 if (!state
->frame
[0]) {
21759 env
->cur_state
= state
;
21760 init_func_state(env
, state
->frame
[0],
21761 BPF_MAIN_FUNC
/* callsite */,
21764 state
->first_insn_idx
= env
->subprog_info
[subprog
].start
;
21765 state
->last_insn_idx
= -1;
21767 regs
= state
->frame
[state
->curframe
]->regs
;
21768 if (subprog
|| env
->prog
->type
== BPF_PROG_TYPE_EXT
) {
21769 const char *sub_name
= subprog_name(env
, subprog
);
21770 struct bpf_subprog_arg_info
*arg
;
21771 struct bpf_reg_state
*reg
;
21773 verbose(env
, "Validating %s() func#%d...\n", sub_name
, subprog
);
21774 ret
= btf_prepare_func_args(env
, subprog
);
21778 if (subprog_is_exc_cb(env
, subprog
)) {
21779 state
->frame
[0]->in_exception_callback_fn
= true;
21780 /* We have already ensured that the callback returns an integer, just
21781 * like all global subprogs. We need to determine it only has a single
21784 if (sub
->arg_cnt
!= 1 || sub
->args
[0].arg_type
!= ARG_ANYTHING
) {
21785 verbose(env
, "exception cb only supports single integer argument\n");
21790 for (i
= BPF_REG_1
; i
<= sub
->arg_cnt
; i
++) {
21791 arg
= &sub
->args
[i
- BPF_REG_1
];
21794 if (arg
->arg_type
== ARG_PTR_TO_CTX
) {
21795 reg
->type
= PTR_TO_CTX
;
21796 mark_reg_known_zero(env
, regs
, i
);
21797 } else if (arg
->arg_type
== ARG_ANYTHING
) {
21798 reg
->type
= SCALAR_VALUE
;
21799 mark_reg_unknown(env
, regs
, i
);
21800 } else if (arg
->arg_type
== (ARG_PTR_TO_DYNPTR
| MEM_RDONLY
)) {
21801 /* assume unspecial LOCAL dynptr type */
21802 __mark_dynptr_reg(reg
, BPF_DYNPTR_TYPE_LOCAL
, true, ++env
->id_gen
);
21803 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_MEM
) {
21804 reg
->type
= PTR_TO_MEM
;
21805 if (arg
->arg_type
& PTR_MAYBE_NULL
)
21806 reg
->type
|= PTR_MAYBE_NULL
;
21807 mark_reg_known_zero(env
, regs
, i
);
21808 reg
->mem_size
= arg
->mem_size
;
21809 reg
->id
= ++env
->id_gen
;
21810 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_BTF_ID
) {
21811 reg
->type
= PTR_TO_BTF_ID
;
21812 if (arg
->arg_type
& PTR_MAYBE_NULL
)
21813 reg
->type
|= PTR_MAYBE_NULL
;
21814 if (arg
->arg_type
& PTR_UNTRUSTED
)
21815 reg
->type
|= PTR_UNTRUSTED
;
21816 if (arg
->arg_type
& PTR_TRUSTED
)
21817 reg
->type
|= PTR_TRUSTED
;
21818 mark_reg_known_zero(env
, regs
, i
);
21819 reg
->btf
= bpf_get_btf_vmlinux(); /* can't fail at this point */
21820 reg
->btf_id
= arg
->btf_id
;
21821 reg
->id
= ++env
->id_gen
;
21822 } else if (base_type(arg
->arg_type
) == ARG_PTR_TO_ARENA
) {
21823 /* caller can pass either PTR_TO_ARENA or SCALAR */
21824 mark_reg_unknown(env
, regs
, i
);
21826 WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n",
21827 i
- BPF_REG_1
, arg
->arg_type
);
21833 /* if main BPF program has associated BTF info, validate that
21834 * it's matching expected signature, and otherwise mark BTF
21835 * info for main program as unreliable
21837 if (env
->prog
->aux
->func_info_aux
) {
21838 ret
= btf_prepare_func_args(env
, 0);
21839 if (ret
|| sub
->arg_cnt
!= 1 || sub
->args
[0].arg_type
!= ARG_PTR_TO_CTX
)
21840 env
->prog
->aux
->func_info_aux
[0].unreliable
= true;
21843 /* 1st arg to a function */
21844 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
21845 mark_reg_known_zero(env
, regs
, BPF_REG_1
);
21848 ret
= do_check(env
);
21850 /* check for NULL is necessary, since cur_state can be freed inside
21851 * do_check() under memory pressure.
21853 if (env
->cur_state
) {
21854 free_verifier_state(env
->cur_state
, true);
21855 env
->cur_state
= NULL
;
21857 while (!pop_stack(env
, NULL
, NULL
, false));
21858 if (!ret
&& pop_log
)
21859 bpf_vlog_reset(&env
->log
, 0);
21864 /* Lazily verify all global functions based on their BTF, if they are called
21865 * from main BPF program or any of subprograms transitively.
21866 * BPF global subprogs called from dead code are not validated.
21867 * All callable global functions must pass verification.
21868 * Otherwise the whole program is rejected.
21879 * foo() will be verified first for R1=any_scalar_value. During verification it
21880 * will be assumed that bar() already verified successfully and call to bar()
21881 * from foo() will be checked for type match only. Later bar() will be verified
21882 * independently to check that it's safe for R1=any_scalar_value.
21884 static int do_check_subprogs(struct bpf_verifier_env
*env
)
21886 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
21887 struct bpf_func_info_aux
*sub_aux
;
21888 int i
, ret
, new_cnt
;
21890 if (!aux
->func_info
)
21893 /* exception callback is presumed to be always called */
21894 if (env
->exception_callback_subprog
)
21895 subprog_aux(env
, env
->exception_callback_subprog
)->called
= true;
21899 for (i
= 1; i
< env
->subprog_cnt
; i
++) {
21900 if (!subprog_is_global(env
, i
))
21903 sub_aux
= subprog_aux(env
, i
);
21904 if (!sub_aux
->called
|| sub_aux
->verified
)
21907 env
->insn_idx
= env
->subprog_info
[i
].start
;
21908 WARN_ON_ONCE(env
->insn_idx
== 0);
21909 ret
= do_check_common(env
, i
);
21912 } else if (env
->log
.level
& BPF_LOG_LEVEL
) {
21913 verbose(env
, "Func#%d ('%s') is safe for any args that match its prototype\n",
21914 i
, subprog_name(env
, i
));
21917 /* We verified new global subprog, it might have called some
21918 * more global subprogs that we haven't verified yet, so we
21919 * need to do another pass over subprogs to verify those.
21921 sub_aux
->verified
= true;
21925 /* We can't loop forever as we verify at least one global subprog on
21934 static int do_check_main(struct bpf_verifier_env
*env
)
21939 ret
= do_check_common(env
, 0);
21941 env
->prog
->aux
->stack_depth
= env
->subprog_info
[0].stack_depth
;
21946 static void print_verification_stats(struct bpf_verifier_env
*env
)
21950 if (env
->log
.level
& BPF_LOG_STATS
) {
21951 verbose(env
, "verification time %lld usec\n",
21952 div_u64(env
->verification_time
, 1000));
21953 verbose(env
, "stack depth ");
21954 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
21955 u32 depth
= env
->subprog_info
[i
].stack_depth
;
21957 verbose(env
, "%d", depth
);
21958 if (i
+ 1 < env
->subprog_cnt
)
21961 verbose(env
, "\n");
21963 verbose(env
, "processed %d insns (limit %d) max_states_per_insn %d "
21964 "total_states %d peak_states %d mark_read %d\n",
21965 env
->insn_processed
, BPF_COMPLEXITY_LIMIT_INSNS
,
21966 env
->max_states_per_insn
, env
->total_states
,
21967 env
->peak_states
, env
->longest_mark_read_walk
);
21970 static int check_struct_ops_btf_id(struct bpf_verifier_env
*env
)
21972 const struct btf_type
*t
, *func_proto
;
21973 const struct bpf_struct_ops_desc
*st_ops_desc
;
21974 const struct bpf_struct_ops
*st_ops
;
21975 const struct btf_member
*member
;
21976 struct bpf_prog
*prog
= env
->prog
;
21977 u32 btf_id
, member_idx
;
21982 if (!prog
->gpl_compatible
) {
21983 verbose(env
, "struct ops programs must have a GPL compatible license\n");
21987 if (!prog
->aux
->attach_btf_id
)
21990 btf
= prog
->aux
->attach_btf
;
21991 if (btf_is_module(btf
)) {
21992 /* Make sure st_ops is valid through the lifetime of env */
21993 env
->attach_btf_mod
= btf_try_get_module(btf
);
21994 if (!env
->attach_btf_mod
) {
21995 verbose(env
, "struct_ops module %s is not found\n",
21996 btf_get_name(btf
));
22001 btf_id
= prog
->aux
->attach_btf_id
;
22002 st_ops_desc
= bpf_struct_ops_find(btf
, btf_id
);
22003 if (!st_ops_desc
) {
22004 verbose(env
, "attach_btf_id %u is not a supported struct\n",
22008 st_ops
= st_ops_desc
->st_ops
;
22010 t
= st_ops_desc
->type
;
22011 member_idx
= prog
->expected_attach_type
;
22012 if (member_idx
>= btf_type_vlen(t
)) {
22013 verbose(env
, "attach to invalid member idx %u of struct %s\n",
22014 member_idx
, st_ops
->name
);
22018 member
= &btf_type_member(t
)[member_idx
];
22019 mname
= btf_name_by_offset(btf
, member
->name_off
);
22020 func_proto
= btf_type_resolve_func_ptr(btf
, member
->type
,
22023 verbose(env
, "attach to invalid member %s(@idx %u) of struct %s\n",
22024 mname
, member_idx
, st_ops
->name
);
22028 err
= bpf_struct_ops_supported(st_ops
, __btf_member_bit_offset(t
, member
) / 8);
22030 verbose(env
, "attach to unsupported member %s of struct %s\n",
22031 mname
, st_ops
->name
);
22035 if (st_ops
->check_member
) {
22036 err
= st_ops
->check_member(t
, member
, prog
);
22039 verbose(env
, "attach to unsupported member %s of struct %s\n",
22040 mname
, st_ops
->name
);
22045 if (prog
->aux
->priv_stack_requested
&& !bpf_jit_supports_private_stack()) {
22046 verbose(env
, "Private stack not supported by jit\n");
22050 /* btf_ctx_access() used this to provide argument type info */
22051 prog
->aux
->ctx_arg_info
=
22052 st_ops_desc
->arg_info
[member_idx
].info
;
22053 prog
->aux
->ctx_arg_info_size
=
22054 st_ops_desc
->arg_info
[member_idx
].cnt
;
22056 prog
->aux
->attach_func_proto
= func_proto
;
22057 prog
->aux
->attach_func_name
= mname
;
22058 env
->ops
= st_ops
->verifier_ops
;
22062 #define SECURITY_PREFIX "security_"
22064 static int check_attach_modify_return(unsigned long addr
, const char *func_name
)
22066 if (within_error_injection_list(addr
) ||
22067 !strncmp(SECURITY_PREFIX
, func_name
, sizeof(SECURITY_PREFIX
) - 1))
22073 /* list of non-sleepable functions that are otherwise on
22074 * ALLOW_ERROR_INJECTION list
22076 BTF_SET_START(btf_non_sleepable_error_inject
)
22077 /* Three functions below can be called from sleepable and non-sleepable context.
22078 * Assume non-sleepable from bpf safety point of view.
22080 BTF_ID(func
, __filemap_add_folio
)
22081 #ifdef CONFIG_FAIL_PAGE_ALLOC
22082 BTF_ID(func
, should_fail_alloc_page
)
22084 #ifdef CONFIG_FAILSLAB
22085 BTF_ID(func
, should_failslab
)
22087 BTF_SET_END(btf_non_sleepable_error_inject
)
22089 static int check_non_sleepable_error_inject(u32 btf_id
)
22091 return btf_id_set_contains(&btf_non_sleepable_error_inject
, btf_id
);
22094 int bpf_check_attach_target(struct bpf_verifier_log
*log
,
22095 const struct bpf_prog
*prog
,
22096 const struct bpf_prog
*tgt_prog
,
22098 struct bpf_attach_target_info
*tgt_info
)
22100 bool prog_extension
= prog
->type
== BPF_PROG_TYPE_EXT
;
22101 bool prog_tracing
= prog
->type
== BPF_PROG_TYPE_TRACING
;
22102 char trace_symbol
[KSYM_SYMBOL_LEN
];
22103 const char prefix
[] = "btf_trace_";
22104 struct bpf_raw_event_map
*btp
;
22105 int ret
= 0, subprog
= -1, i
;
22106 const struct btf_type
*t
;
22107 bool conservative
= true;
22108 const char *tname
, *fname
;
22111 struct module
*mod
= NULL
;
22114 bpf_log(log
, "Tracing programs must provide btf_id\n");
22117 btf
= tgt_prog
? tgt_prog
->aux
->btf
: prog
->aux
->attach_btf
;
22120 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
22123 t
= btf_type_by_id(btf
, btf_id
);
22125 bpf_log(log
, "attach_btf_id %u is invalid\n", btf_id
);
22128 tname
= btf_name_by_offset(btf
, t
->name_off
);
22130 bpf_log(log
, "attach_btf_id %u doesn't have a name\n", btf_id
);
22134 struct bpf_prog_aux
*aux
= tgt_prog
->aux
;
22136 if (bpf_prog_is_dev_bound(prog
->aux
) &&
22137 !bpf_prog_dev_bound_match(prog
, tgt_prog
)) {
22138 bpf_log(log
, "Target program bound device mismatch");
22142 for (i
= 0; i
< aux
->func_info_cnt
; i
++)
22143 if (aux
->func_info
[i
].type_id
== btf_id
) {
22147 if (subprog
== -1) {
22148 bpf_log(log
, "Subprog %s doesn't exist\n", tname
);
22151 if (aux
->func
&& aux
->func
[subprog
]->aux
->exception_cb
) {
22153 "%s programs cannot attach to exception callback\n",
22154 prog_extension
? "Extension" : "FENTRY/FEXIT");
22157 conservative
= aux
->func_info_aux
[subprog
].unreliable
;
22158 if (prog_extension
) {
22159 if (conservative
) {
22161 "Cannot replace static functions\n");
22164 if (!prog
->jit_requested
) {
22166 "Extension programs should be JITed\n");
22170 if (!tgt_prog
->jited
) {
22171 bpf_log(log
, "Can attach to only JITed progs\n");
22174 if (prog_tracing
) {
22175 if (aux
->attach_tracing_prog
) {
22177 * Target program is an fentry/fexit which is already attached
22178 * to another tracing program. More levels of nesting
22179 * attachment are not allowed.
22181 bpf_log(log
, "Cannot nest tracing program attach more than once\n");
22184 } else if (tgt_prog
->type
== prog
->type
) {
22186 * To avoid potential call chain cycles, prevent attaching of a
22187 * program extension to another extension. It's ok to attach
22188 * fentry/fexit to extension program.
22190 bpf_log(log
, "Cannot recursively attach\n");
22193 if (tgt_prog
->type
== BPF_PROG_TYPE_TRACING
&&
22195 (tgt_prog
->expected_attach_type
== BPF_TRACE_FENTRY
||
22196 tgt_prog
->expected_attach_type
== BPF_TRACE_FEXIT
)) {
22197 /* Program extensions can extend all program types
22198 * except fentry/fexit. The reason is the following.
22199 * The fentry/fexit programs are used for performance
22200 * analysis, stats and can be attached to any program
22201 * type. When extension program is replacing XDP function
22202 * it is necessary to allow performance analysis of all
22203 * functions. Both original XDP program and its program
22204 * extension. Hence attaching fentry/fexit to
22205 * BPF_PROG_TYPE_EXT is allowed. If extending of
22206 * fentry/fexit was allowed it would be possible to create
22207 * long call chain fentry->extension->fentry->extension
22208 * beyond reasonable stack size. Hence extending fentry
22211 bpf_log(log
, "Cannot extend fentry/fexit\n");
22215 if (prog_extension
) {
22216 bpf_log(log
, "Cannot replace kernel functions\n");
22221 switch (prog
->expected_attach_type
) {
22222 case BPF_TRACE_RAW_TP
:
22225 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
22228 if (!btf_type_is_typedef(t
)) {
22229 bpf_log(log
, "attach_btf_id %u is not a typedef\n",
22233 if (strncmp(prefix
, tname
, sizeof(prefix
) - 1)) {
22234 bpf_log(log
, "attach_btf_id %u points to wrong type name %s\n",
22238 tname
+= sizeof(prefix
) - 1;
22240 /* The func_proto of "btf_trace_##tname" is generated from typedef without argument
22241 * names. Thus using bpf_raw_event_map to get argument names.
22243 btp
= bpf_get_raw_tracepoint(tname
);
22246 fname
= kallsyms_lookup((unsigned long)btp
->bpf_func
, NULL
, NULL
, NULL
,
22248 bpf_put_raw_tracepoint(btp
);
22251 ret
= btf_find_by_name_kind(btf
, fname
, BTF_KIND_FUNC
);
22253 if (!fname
|| ret
< 0) {
22254 bpf_log(log
, "Cannot find btf of tracepoint template, fall back to %s%s.\n",
22256 t
= btf_type_by_id(btf
, t
->type
);
22257 if (!btf_type_is_ptr(t
))
22258 /* should never happen in valid vmlinux build */
22261 t
= btf_type_by_id(btf
, ret
);
22262 if (!btf_type_is_func(t
))
22263 /* should never happen in valid vmlinux build */
22267 t
= btf_type_by_id(btf
, t
->type
);
22268 if (!btf_type_is_func_proto(t
))
22269 /* should never happen in valid vmlinux build */
22273 case BPF_TRACE_ITER
:
22274 if (!btf_type_is_func(t
)) {
22275 bpf_log(log
, "attach_btf_id %u is not a function\n",
22279 t
= btf_type_by_id(btf
, t
->type
);
22280 if (!btf_type_is_func_proto(t
))
22282 ret
= btf_distill_func_proto(log
, btf
, t
, tname
, &tgt_info
->fmodel
);
22287 if (!prog_extension
)
22290 case BPF_MODIFY_RETURN
:
22292 case BPF_LSM_CGROUP
:
22293 case BPF_TRACE_FENTRY
:
22294 case BPF_TRACE_FEXIT
:
22295 if (!btf_type_is_func(t
)) {
22296 bpf_log(log
, "attach_btf_id %u is not a function\n",
22300 if (prog_extension
&&
22301 btf_check_type_match(log
, prog
, btf
, t
))
22303 t
= btf_type_by_id(btf
, t
->type
);
22304 if (!btf_type_is_func_proto(t
))
22307 if ((prog
->aux
->saved_dst_prog_type
|| prog
->aux
->saved_dst_attach_type
) &&
22308 (!tgt_prog
|| prog
->aux
->saved_dst_prog_type
!= tgt_prog
->type
||
22309 prog
->aux
->saved_dst_attach_type
!= tgt_prog
->expected_attach_type
))
22312 if (tgt_prog
&& conservative
)
22315 ret
= btf_distill_func_proto(log
, btf
, t
, tname
, &tgt_info
->fmodel
);
22321 addr
= (long) tgt_prog
->bpf_func
;
22323 addr
= (long) tgt_prog
->aux
->func
[subprog
]->bpf_func
;
22325 if (btf_is_module(btf
)) {
22326 mod
= btf_try_get_module(btf
);
22328 addr
= find_kallsyms_symbol_value(mod
, tname
);
22332 addr
= kallsyms_lookup_name(tname
);
22337 "The address of function %s cannot be found\n",
22343 if (prog
->sleepable
) {
22345 switch (prog
->type
) {
22346 case BPF_PROG_TYPE_TRACING
:
22348 /* fentry/fexit/fmod_ret progs can be sleepable if they are
22349 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
22351 if (!check_non_sleepable_error_inject(btf_id
) &&
22352 within_error_injection_list(addr
))
22354 /* fentry/fexit/fmod_ret progs can also be sleepable if they are
22355 * in the fmodret id set with the KF_SLEEPABLE flag.
22358 u32
*flags
= btf_kfunc_is_modify_return(btf
, btf_id
,
22361 if (flags
&& (*flags
& KF_SLEEPABLE
))
22365 case BPF_PROG_TYPE_LSM
:
22366 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
22367 * Only some of them are sleepable.
22369 if (bpf_lsm_is_sleepable_hook(btf_id
))
22377 bpf_log(log
, "%s is not sleepable\n", tname
);
22380 } else if (prog
->expected_attach_type
== BPF_MODIFY_RETURN
) {
22383 bpf_log(log
, "can't modify return codes of BPF programs\n");
22387 if (btf_kfunc_is_modify_return(btf
, btf_id
, prog
) ||
22388 !check_attach_modify_return(addr
, tname
))
22392 bpf_log(log
, "%s() is not modifiable\n", tname
);
22399 tgt_info
->tgt_addr
= addr
;
22400 tgt_info
->tgt_name
= tname
;
22401 tgt_info
->tgt_type
= t
;
22402 tgt_info
->tgt_mod
= mod
;
22406 BTF_SET_START(btf_id_deny
)
22409 BTF_ID(func
, migrate_disable
)
22410 BTF_ID(func
, migrate_enable
)
22412 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
22413 BTF_ID(func
, rcu_read_unlock_strict
)
22415 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
22416 BTF_ID(func
, preempt_count_add
)
22417 BTF_ID(func
, preempt_count_sub
)
22419 #ifdef CONFIG_PREEMPT_RCU
22420 BTF_ID(func
, __rcu_read_lock
)
22421 BTF_ID(func
, __rcu_read_unlock
)
22423 BTF_SET_END(btf_id_deny
)
22425 static bool can_be_sleepable(struct bpf_prog
*prog
)
22427 if (prog
->type
== BPF_PROG_TYPE_TRACING
) {
22428 switch (prog
->expected_attach_type
) {
22429 case BPF_TRACE_FENTRY
:
22430 case BPF_TRACE_FEXIT
:
22431 case BPF_MODIFY_RETURN
:
22432 case BPF_TRACE_ITER
:
22438 return prog
->type
== BPF_PROG_TYPE_LSM
||
22439 prog
->type
== BPF_PROG_TYPE_KPROBE
/* only for uprobes */ ||
22440 prog
->type
== BPF_PROG_TYPE_STRUCT_OPS
;
22443 static int check_attach_btf_id(struct bpf_verifier_env
*env
)
22445 struct bpf_prog
*prog
= env
->prog
;
22446 struct bpf_prog
*tgt_prog
= prog
->aux
->dst_prog
;
22447 struct bpf_attach_target_info tgt_info
= {};
22448 u32 btf_id
= prog
->aux
->attach_btf_id
;
22449 struct bpf_trampoline
*tr
;
22453 if (prog
->type
== BPF_PROG_TYPE_SYSCALL
) {
22454 if (prog
->sleepable
)
22455 /* attach_btf_id checked to be zero already */
22457 verbose(env
, "Syscall programs can only be sleepable\n");
22461 if (prog
->sleepable
&& !can_be_sleepable(prog
)) {
22462 verbose(env
, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
22466 if (prog
->type
== BPF_PROG_TYPE_STRUCT_OPS
)
22467 return check_struct_ops_btf_id(env
);
22469 if (prog
->type
!= BPF_PROG_TYPE_TRACING
&&
22470 prog
->type
!= BPF_PROG_TYPE_LSM
&&
22471 prog
->type
!= BPF_PROG_TYPE_EXT
)
22474 ret
= bpf_check_attach_target(&env
->log
, prog
, tgt_prog
, btf_id
, &tgt_info
);
22478 if (tgt_prog
&& prog
->type
== BPF_PROG_TYPE_EXT
) {
22479 /* to make freplace equivalent to their targets, they need to
22480 * inherit env->ops and expected_attach_type for the rest of the
22483 env
->ops
= bpf_verifier_ops
[tgt_prog
->type
];
22484 prog
->expected_attach_type
= tgt_prog
->expected_attach_type
;
22487 /* store info about the attachment target that will be used later */
22488 prog
->aux
->attach_func_proto
= tgt_info
.tgt_type
;
22489 prog
->aux
->attach_func_name
= tgt_info
.tgt_name
;
22490 prog
->aux
->mod
= tgt_info
.tgt_mod
;
22493 prog
->aux
->saved_dst_prog_type
= tgt_prog
->type
;
22494 prog
->aux
->saved_dst_attach_type
= tgt_prog
->expected_attach_type
;
22497 if (prog
->expected_attach_type
== BPF_TRACE_RAW_TP
) {
22498 prog
->aux
->attach_btf_trace
= true;
22500 } else if (prog
->expected_attach_type
== BPF_TRACE_ITER
) {
22501 if (!bpf_iter_prog_supported(prog
))
22506 if (prog
->type
== BPF_PROG_TYPE_LSM
) {
22507 ret
= bpf_lsm_verify_prog(&env
->log
, prog
);
22510 } else if (prog
->type
== BPF_PROG_TYPE_TRACING
&&
22511 btf_id_set_contains(&btf_id_deny
, btf_id
)) {
22515 key
= bpf_trampoline_compute_key(tgt_prog
, prog
->aux
->attach_btf
, btf_id
);
22516 tr
= bpf_trampoline_get(key
, &tgt_info
);
22520 if (tgt_prog
&& tgt_prog
->aux
->tail_call_reachable
)
22521 tr
->flags
= BPF_TRAMP_F_TAIL_CALL_CTX
;
22523 prog
->aux
->dst_trampoline
= tr
;
22527 struct btf
*bpf_get_btf_vmlinux(void)
22529 if (!btf_vmlinux
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF
)) {
22530 mutex_lock(&bpf_verifier_lock
);
22532 btf_vmlinux
= btf_parse_vmlinux();
22533 mutex_unlock(&bpf_verifier_lock
);
22535 return btf_vmlinux
;
22538 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
, bpfptr_t uattr
, __u32 uattr_size
)
22540 u64 start_time
= ktime_get_ns();
22541 struct bpf_verifier_env
*env
;
22542 int i
, len
, ret
= -EINVAL
, err
;
22546 /* no program is valid */
22547 if (ARRAY_SIZE(bpf_verifier_ops
) == 0)
22550 /* 'struct bpf_verifier_env' can be global, but since it's not small,
22551 * allocate/free it every time bpf_check() is called
22553 env
= kvzalloc(sizeof(struct bpf_verifier_env
), GFP_KERNEL
);
22559 len
= (*prog
)->len
;
22560 env
->insn_aux_data
=
22561 vzalloc(array_size(sizeof(struct bpf_insn_aux_data
), len
));
22563 if (!env
->insn_aux_data
)
22565 for (i
= 0; i
< len
; i
++)
22566 env
->insn_aux_data
[i
].orig_idx
= i
;
22568 env
->ops
= bpf_verifier_ops
[env
->prog
->type
];
22569 env
->fd_array
= make_bpfptr(attr
->fd_array
, uattr
.is_kernel
);
22571 env
->allow_ptr_leaks
= bpf_allow_ptr_leaks(env
->prog
->aux
->token
);
22572 env
->allow_uninit_stack
= bpf_allow_uninit_stack(env
->prog
->aux
->token
);
22573 env
->bypass_spec_v1
= bpf_bypass_spec_v1(env
->prog
->aux
->token
);
22574 env
->bypass_spec_v4
= bpf_bypass_spec_v4(env
->prog
->aux
->token
);
22575 env
->bpf_capable
= is_priv
= bpf_token_capable(env
->prog
->aux
->token
, CAP_BPF
);
22577 bpf_get_btf_vmlinux();
22579 /* grab the mutex to protect few globals used by verifier */
22581 mutex_lock(&bpf_verifier_lock
);
22583 /* user could have requested verbose verifier output
22584 * and supplied buffer to store the verification trace
22586 ret
= bpf_vlog_init(&env
->log
, attr
->log_level
,
22587 (char __user
*) (unsigned long) attr
->log_buf
,
22592 mark_verifier_state_clean(env
);
22594 if (IS_ERR(btf_vmlinux
)) {
22595 /* Either gcc or pahole or kernel are broken. */
22596 verbose(env
, "in-kernel BTF is malformed\n");
22597 ret
= PTR_ERR(btf_vmlinux
);
22598 goto skip_full_check
;
22601 env
->strict_alignment
= !!(attr
->prog_flags
& BPF_F_STRICT_ALIGNMENT
);
22602 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
))
22603 env
->strict_alignment
= true;
22604 if (attr
->prog_flags
& BPF_F_ANY_ALIGNMENT
)
22605 env
->strict_alignment
= false;
22608 env
->test_state_freq
= attr
->prog_flags
& BPF_F_TEST_STATE_FREQ
;
22609 env
->test_reg_invariants
= attr
->prog_flags
& BPF_F_TEST_REG_INVARIANTS
;
22611 env
->explored_states
= kvcalloc(state_htab_size(env
),
22612 sizeof(struct bpf_verifier_state_list
*),
22615 if (!env
->explored_states
)
22616 goto skip_full_check
;
22618 ret
= check_btf_info_early(env
, attr
, uattr
);
22620 goto skip_full_check
;
22622 ret
= add_subprog_and_kfunc(env
);
22624 goto skip_full_check
;
22626 ret
= check_subprogs(env
);
22628 goto skip_full_check
;
22630 ret
= check_btf_info(env
, attr
, uattr
);
22632 goto skip_full_check
;
22634 ret
= check_attach_btf_id(env
);
22636 goto skip_full_check
;
22638 ret
= resolve_pseudo_ldimm64(env
);
22640 goto skip_full_check
;
22642 if (bpf_prog_is_offloaded(env
->prog
->aux
)) {
22643 ret
= bpf_prog_offload_verifier_prep(env
->prog
);
22645 goto skip_full_check
;
22648 ret
= check_cfg(env
);
22650 goto skip_full_check
;
22652 ret
= mark_fastcall_patterns(env
);
22654 goto skip_full_check
;
22656 ret
= do_check_main(env
);
22657 ret
= ret
?: do_check_subprogs(env
);
22659 if (ret
== 0 && bpf_prog_is_offloaded(env
->prog
->aux
))
22660 ret
= bpf_prog_offload_finalize(env
);
22663 kvfree(env
->explored_states
);
22665 /* might decrease stack depth, keep it before passes that
22666 * allocate additional slots.
22669 ret
= remove_fastcall_spills_fills(env
);
22672 ret
= check_max_stack_depth(env
);
22674 /* instruction rewrites happen after this point */
22676 ret
= optimize_bpf_loop(env
);
22680 opt_hard_wire_dead_code_branches(env
);
22682 ret
= opt_remove_dead_code(env
);
22684 ret
= opt_remove_nops(env
);
22687 sanitize_dead_code(env
);
22691 /* program is valid, convert *(u32*)(ctx + off) accesses */
22692 ret
= convert_ctx_accesses(env
);
22695 ret
= do_misc_fixups(env
);
22697 /* do 32-bit optimization after insn patching has done so those patched
22698 * insns could be handled correctly.
22700 if (ret
== 0 && !bpf_prog_is_offloaded(env
->prog
->aux
)) {
22701 ret
= opt_subreg_zext_lo32_rnd_hi32(env
, attr
);
22702 env
->prog
->aux
->verifier_zext
= bpf_jit_needs_zext() ? !ret
22707 ret
= fixup_call_args(env
);
22709 env
->verification_time
= ktime_get_ns() - start_time
;
22710 print_verification_stats(env
);
22711 env
->prog
->aux
->verified_insns
= env
->insn_processed
;
22713 /* preserve original error even if log finalization is successful */
22714 err
= bpf_vlog_finalize(&env
->log
, &log_true_size
);
22718 if (uattr_size
>= offsetofend(union bpf_attr
, log_true_size
) &&
22719 copy_to_bpfptr_offset(uattr
, offsetof(union bpf_attr
, log_true_size
),
22720 &log_true_size
, sizeof(log_true_size
))) {
22722 goto err_release_maps
;
22726 goto err_release_maps
;
22728 if (env
->used_map_cnt
) {
22729 /* if program passed verifier, update used_maps in bpf_prog_info */
22730 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
22731 sizeof(env
->used_maps
[0]),
22734 if (!env
->prog
->aux
->used_maps
) {
22736 goto err_release_maps
;
22739 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
22740 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
22741 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
22743 if (env
->used_btf_cnt
) {
22744 /* if program passed verifier, update used_btfs in bpf_prog_aux */
22745 env
->prog
->aux
->used_btfs
= kmalloc_array(env
->used_btf_cnt
,
22746 sizeof(env
->used_btfs
[0]),
22748 if (!env
->prog
->aux
->used_btfs
) {
22750 goto err_release_maps
;
22753 memcpy(env
->prog
->aux
->used_btfs
, env
->used_btfs
,
22754 sizeof(env
->used_btfs
[0]) * env
->used_btf_cnt
);
22755 env
->prog
->aux
->used_btf_cnt
= env
->used_btf_cnt
;
22757 if (env
->used_map_cnt
|| env
->used_btf_cnt
) {
22758 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
22759 * bpf_ld_imm64 instructions
22761 convert_pseudo_ld_imm64(env
);
22764 adjust_btf_func(env
);
22767 if (!env
->prog
->aux
->used_maps
)
22768 /* if we didn't copy map pointers into bpf_prog_info, release
22769 * them now. Otherwise free_used_maps() will release them.
22772 if (!env
->prog
->aux
->used_btfs
)
22775 /* extension progs temporarily inherit the attach_type of their targets
22776 for verification purposes, so set it back to zero before returning
22778 if (env
->prog
->type
== BPF_PROG_TYPE_EXT
)
22779 env
->prog
->expected_attach_type
= 0;
22783 module_put(env
->attach_btf_mod
);
22786 mutex_unlock(&bpf_verifier_lock
);
22787 vfree(env
->insn_aux_data
);
22788 kvfree(env
->insn_hist
);