1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/btf.h>
12 #include <linux/bpf_verifier.h>
13 #include <linux/filter.h>
14 #include <net/netlink.h>
15 #include <linux/file.h>
16 #include <linux/vmalloc.h>
17 #include <linux/stringify.h>
18 #include <linux/bsearch.h>
19 #include <linux/sort.h>
20 #include <linux/perf_event.h>
21 #include <linux/ctype.h>
25 static const struct bpf_verifier_ops
* const bpf_verifier_ops
[] = {
26 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
27 [_id] = & _name ## _verifier_ops,
28 #define BPF_MAP_TYPE(_id, _ops)
29 #include <linux/bpf_types.h>
34 /* bpf_check() is a static code analyzer that walks eBPF program
35 * instruction by instruction and updates register/stack state.
36 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38 * The first pass is depth-first-search to check that the program is a DAG.
39 * It rejects the following programs:
40 * - larger than BPF_MAXINSNS insns
41 * - if loop is present (detected via back-edge)
42 * - unreachable insns exist (shouldn't be a forest. program = one function)
43 * - out of bounds or malformed jumps
44 * The second pass is all possible path descent from the 1st insn.
45 * Since it's analyzing all pathes through the program, the length of the
46 * analysis is limited to 64k insn, which may be hit even if total number of
47 * insn is less then 4K, but there are too many branches that change stack/regs.
48 * Number of 'branches to be analyzed' is limited to 1k
50 * On entry to each instruction, each register has a type, and the instruction
51 * changes the types of the registers depending on instruction semantics.
52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
55 * All registers are 64-bit.
56 * R0 - return register
57 * R1-R5 argument passing registers
58 * R6-R9 callee saved registers
59 * R10 - frame pointer read-only
61 * At the start of BPF program the register R1 contains a pointer to bpf_context
62 * and has type PTR_TO_CTX.
64 * Verifier tracks arithmetic operations on pointers in case:
65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
67 * 1st insn copies R10 (which has FRAME_PTR) type into R1
68 * and 2nd arithmetic instruction is pattern matched to recognize
69 * that it wants to construct a pointer to some element within stack.
70 * So after 2nd insn, the register R1 has type PTR_TO_STACK
71 * (and -20 constant is saved for further stack bounds checking).
72 * Meaning that this reg is a pointer to stack plus known immediate constant.
74 * Most of the time the registers have SCALAR_VALUE type, which
75 * means the register has some value, but it's not a valid pointer.
76 * (like pointer plus pointer becomes SCALAR_VALUE type)
78 * When verifier sees load or store instructions the type of base register
79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
80 * four pointer types recognized by check_mem_access() function.
82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
83 * and the range of [ptr, ptr + map's value_size) is accessible.
85 * registers used to pass values to function calls are checked against
86 * function argument constraints.
88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
89 * It means that the register type passed to this function must be
90 * PTR_TO_STACK and it will be used inside the function as
91 * 'pointer to map element key'
93 * For example the argument constraints for bpf_map_lookup_elem():
94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
95 * .arg1_type = ARG_CONST_MAP_PTR,
96 * .arg2_type = ARG_PTR_TO_MAP_KEY,
98 * ret_type says that this function returns 'pointer to map elem value or null'
99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
100 * 2nd argument should be a pointer to stack, which will be used inside
101 * the helper function as a pointer to map element key.
103 * On the kernel side the helper function looks like:
104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
107 * void *key = (void *) (unsigned long) r2;
110 * here kernel can access 'key' and 'map' pointers safely, knowing that
111 * [key, key + map->key_size) bytes are valid and were initialized on
112 * the stack of eBPF program.
115 * Corresponding eBPF program may look like:
116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
120 * here verifier looks at prototype of map_lookup_elem() and sees:
121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
126 * and were initialized prior to this call.
127 * If it's ok, then verifier allows this BPF_CALL insn and looks at
128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
130 * returns ether pointer to map value or NULL.
132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
133 * insn, the register holding that pointer in the true branch changes state to
134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
135 * branch. See check_cond_jmp_op().
137 * After the call R0 is set to return type of the function and registers R1-R5
138 * are set to NOT_INIT to indicate that they are no longer readable.
140 * The following reference types represent a potential reference to a kernel
141 * resource which, after first being allocated, must be checked and freed by
143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
145 * When the verifier sees a helper call return a reference type, it allocates a
146 * pointer id for the reference and stores it in the current function state.
147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
149 * passes through a NULL-check conditional. For the branch wherein the state is
150 * changed to CONST_IMM, the verifier releases the reference.
152 * For each helper function that allocates a reference, such as
153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
154 * bpf_sk_release(). When a reference type passes into the release function,
155 * the verifier also releases the reference. If any unchecked or unreleased
156 * reference remains at the end of the program, the verifier rejects it.
159 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
160 struct bpf_verifier_stack_elem
{
161 /* verifer state is 'st'
162 * before processing instruction 'insn_idx'
163 * and after processing instruction 'prev_insn_idx'
165 struct bpf_verifier_state st
;
168 struct bpf_verifier_stack_elem
*next
;
171 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
172 #define BPF_COMPLEXITY_LIMIT_STATES 64
174 #define BPF_MAP_KEY_POISON (1ULL << 63)
175 #define BPF_MAP_KEY_SEEN (1ULL << 62)
177 #define BPF_MAP_PTR_UNPRIV 1UL
178 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
179 POISON_POINTER_DELTA))
180 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
182 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data
*aux
)
184 return BPF_MAP_PTR(aux
->map_ptr_state
) == BPF_MAP_PTR_POISON
;
187 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data
*aux
)
189 return aux
->map_ptr_state
& BPF_MAP_PTR_UNPRIV
;
192 static void bpf_map_ptr_store(struct bpf_insn_aux_data
*aux
,
193 const struct bpf_map
*map
, bool unpriv
)
195 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON
& BPF_MAP_PTR_UNPRIV
);
196 unpriv
|= bpf_map_ptr_unpriv(aux
);
197 aux
->map_ptr_state
= (unsigned long)map
|
198 (unpriv
? BPF_MAP_PTR_UNPRIV
: 0UL);
201 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data
*aux
)
203 return aux
->map_key_state
& BPF_MAP_KEY_POISON
;
206 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data
*aux
)
208 return !(aux
->map_key_state
& BPF_MAP_KEY_SEEN
);
211 static u64
bpf_map_key_immediate(const struct bpf_insn_aux_data
*aux
)
213 return aux
->map_key_state
& ~(BPF_MAP_KEY_SEEN
| BPF_MAP_KEY_POISON
);
216 static void bpf_map_key_store(struct bpf_insn_aux_data
*aux
, u64 state
)
218 bool poisoned
= bpf_map_key_poisoned(aux
);
220 aux
->map_key_state
= state
| BPF_MAP_KEY_SEEN
|
221 (poisoned
? BPF_MAP_KEY_POISON
: 0ULL);
224 struct bpf_call_arg_meta
{
225 struct bpf_map
*map_ptr
;
230 s64 msize_smax_value
;
231 u64 msize_umax_value
;
237 struct btf
*btf_vmlinux
;
239 static DEFINE_MUTEX(bpf_verifier_lock
);
241 static const struct bpf_line_info
*
242 find_linfo(const struct bpf_verifier_env
*env
, u32 insn_off
)
244 const struct bpf_line_info
*linfo
;
245 const struct bpf_prog
*prog
;
249 nr_linfo
= prog
->aux
->nr_linfo
;
251 if (!nr_linfo
|| insn_off
>= prog
->len
)
254 linfo
= prog
->aux
->linfo
;
255 for (i
= 1; i
< nr_linfo
; i
++)
256 if (insn_off
< linfo
[i
].insn_off
)
259 return &linfo
[i
- 1];
262 void bpf_verifier_vlog(struct bpf_verifier_log
*log
, const char *fmt
,
267 n
= vscnprintf(log
->kbuf
, BPF_VERIFIER_TMP_LOG_SIZE
, fmt
, args
);
269 WARN_ONCE(n
>= BPF_VERIFIER_TMP_LOG_SIZE
- 1,
270 "verifier log line truncated - local buffer too short\n");
272 n
= min(log
->len_total
- log
->len_used
- 1, n
);
275 if (log
->level
== BPF_LOG_KERNEL
) {
276 pr_err("BPF:%s\n", log
->kbuf
);
279 if (!copy_to_user(log
->ubuf
+ log
->len_used
, log
->kbuf
, n
+ 1))
285 /* log_level controls verbosity level of eBPF verifier.
286 * bpf_verifier_log_write() is used to dump the verification trace to the log,
287 * so the user can figure out what's wrong with the program
289 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env
*env
,
290 const char *fmt
, ...)
294 if (!bpf_verifier_log_needed(&env
->log
))
298 bpf_verifier_vlog(&env
->log
, fmt
, args
);
301 EXPORT_SYMBOL_GPL(bpf_verifier_log_write
);
303 __printf(2, 3) static void verbose(void *private_data
, const char *fmt
, ...)
305 struct bpf_verifier_env
*env
= private_data
;
308 if (!bpf_verifier_log_needed(&env
->log
))
312 bpf_verifier_vlog(&env
->log
, fmt
, args
);
316 __printf(2, 3) void bpf_log(struct bpf_verifier_log
*log
,
317 const char *fmt
, ...)
321 if (!bpf_verifier_log_needed(log
))
325 bpf_verifier_vlog(log
, fmt
, args
);
329 static const char *ltrim(const char *s
)
337 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env
*env
,
339 const char *prefix_fmt
, ...)
341 const struct bpf_line_info
*linfo
;
343 if (!bpf_verifier_log_needed(&env
->log
))
346 linfo
= find_linfo(env
, insn_off
);
347 if (!linfo
|| linfo
== env
->prev_linfo
)
353 va_start(args
, prefix_fmt
);
354 bpf_verifier_vlog(&env
->log
, prefix_fmt
, args
);
359 ltrim(btf_name_by_offset(env
->prog
->aux
->btf
,
362 env
->prev_linfo
= linfo
;
365 static bool type_is_pkt_pointer(enum bpf_reg_type type
)
367 return type
== PTR_TO_PACKET
||
368 type
== PTR_TO_PACKET_META
;
371 static bool type_is_sk_pointer(enum bpf_reg_type type
)
373 return type
== PTR_TO_SOCKET
||
374 type
== PTR_TO_SOCK_COMMON
||
375 type
== PTR_TO_TCP_SOCK
||
376 type
== PTR_TO_XDP_SOCK
;
379 static bool reg_type_may_be_null(enum bpf_reg_type type
)
381 return type
== PTR_TO_MAP_VALUE_OR_NULL
||
382 type
== PTR_TO_SOCKET_OR_NULL
||
383 type
== PTR_TO_SOCK_COMMON_OR_NULL
||
384 type
== PTR_TO_TCP_SOCK_OR_NULL
;
387 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state
*reg
)
389 return reg
->type
== PTR_TO_MAP_VALUE
&&
390 map_value_has_spin_lock(reg
->map_ptr
);
393 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type
)
395 return type
== PTR_TO_SOCKET
||
396 type
== PTR_TO_SOCKET_OR_NULL
||
397 type
== PTR_TO_TCP_SOCK
||
398 type
== PTR_TO_TCP_SOCK_OR_NULL
;
401 static bool arg_type_may_be_refcounted(enum bpf_arg_type type
)
403 return type
== ARG_PTR_TO_SOCK_COMMON
;
406 /* Determine whether the function releases some resources allocated by another
407 * function call. The first reference type argument will be assumed to be
408 * released by release_reference().
410 static bool is_release_function(enum bpf_func_id func_id
)
412 return func_id
== BPF_FUNC_sk_release
;
415 static bool is_acquire_function(enum bpf_func_id func_id
)
417 return func_id
== BPF_FUNC_sk_lookup_tcp
||
418 func_id
== BPF_FUNC_sk_lookup_udp
||
419 func_id
== BPF_FUNC_skc_lookup_tcp
;
422 static bool is_ptr_cast_function(enum bpf_func_id func_id
)
424 return func_id
== BPF_FUNC_tcp_sock
||
425 func_id
== BPF_FUNC_sk_fullsock
;
428 /* string representation of 'enum bpf_reg_type' */
429 static const char * const reg_type_str
[] = {
431 [SCALAR_VALUE
] = "inv",
432 [PTR_TO_CTX
] = "ctx",
433 [CONST_PTR_TO_MAP
] = "map_ptr",
434 [PTR_TO_MAP_VALUE
] = "map_value",
435 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
436 [PTR_TO_STACK
] = "fp",
437 [PTR_TO_PACKET
] = "pkt",
438 [PTR_TO_PACKET_META
] = "pkt_meta",
439 [PTR_TO_PACKET_END
] = "pkt_end",
440 [PTR_TO_FLOW_KEYS
] = "flow_keys",
441 [PTR_TO_SOCKET
] = "sock",
442 [PTR_TO_SOCKET_OR_NULL
] = "sock_or_null",
443 [PTR_TO_SOCK_COMMON
] = "sock_common",
444 [PTR_TO_SOCK_COMMON_OR_NULL
] = "sock_common_or_null",
445 [PTR_TO_TCP_SOCK
] = "tcp_sock",
446 [PTR_TO_TCP_SOCK_OR_NULL
] = "tcp_sock_or_null",
447 [PTR_TO_TP_BUFFER
] = "tp_buffer",
448 [PTR_TO_XDP_SOCK
] = "xdp_sock",
449 [PTR_TO_BTF_ID
] = "ptr_",
452 static char slot_type_char
[] = {
453 [STACK_INVALID
] = '?',
459 static void print_liveness(struct bpf_verifier_env
*env
,
460 enum bpf_reg_liveness live
)
462 if (live
& (REG_LIVE_READ
| REG_LIVE_WRITTEN
| REG_LIVE_DONE
))
464 if (live
& REG_LIVE_READ
)
466 if (live
& REG_LIVE_WRITTEN
)
468 if (live
& REG_LIVE_DONE
)
472 static struct bpf_func_state
*func(struct bpf_verifier_env
*env
,
473 const struct bpf_reg_state
*reg
)
475 struct bpf_verifier_state
*cur
= env
->cur_state
;
477 return cur
->frame
[reg
->frameno
];
480 const char *kernel_type_name(u32 id
)
482 return btf_name_by_offset(btf_vmlinux
,
483 btf_type_by_id(btf_vmlinux
, id
)->name_off
);
486 static void print_verifier_state(struct bpf_verifier_env
*env
,
487 const struct bpf_func_state
*state
)
489 const struct bpf_reg_state
*reg
;
494 verbose(env
, " frame%d:", state
->frameno
);
495 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
496 reg
= &state
->regs
[i
];
500 verbose(env
, " R%d", i
);
501 print_liveness(env
, reg
->live
);
502 verbose(env
, "=%s", reg_type_str
[t
]);
503 if (t
== SCALAR_VALUE
&& reg
->precise
)
505 if ((t
== SCALAR_VALUE
|| t
== PTR_TO_STACK
) &&
506 tnum_is_const(reg
->var_off
)) {
507 /* reg->off should be 0 for SCALAR_VALUE */
508 verbose(env
, "%lld", reg
->var_off
.value
+ reg
->off
);
510 if (t
== PTR_TO_BTF_ID
)
511 verbose(env
, "%s", kernel_type_name(reg
->btf_id
));
512 verbose(env
, "(id=%d", reg
->id
);
513 if (reg_type_may_be_refcounted_or_null(t
))
514 verbose(env
, ",ref_obj_id=%d", reg
->ref_obj_id
);
515 if (t
!= SCALAR_VALUE
)
516 verbose(env
, ",off=%d", reg
->off
);
517 if (type_is_pkt_pointer(t
))
518 verbose(env
, ",r=%d", reg
->range
);
519 else if (t
== CONST_PTR_TO_MAP
||
520 t
== PTR_TO_MAP_VALUE
||
521 t
== PTR_TO_MAP_VALUE_OR_NULL
)
522 verbose(env
, ",ks=%d,vs=%d",
523 reg
->map_ptr
->key_size
,
524 reg
->map_ptr
->value_size
);
525 if (tnum_is_const(reg
->var_off
)) {
526 /* Typically an immediate SCALAR_VALUE, but
527 * could be a pointer whose offset is too big
530 verbose(env
, ",imm=%llx", reg
->var_off
.value
);
532 if (reg
->smin_value
!= reg
->umin_value
&&
533 reg
->smin_value
!= S64_MIN
)
534 verbose(env
, ",smin_value=%lld",
535 (long long)reg
->smin_value
);
536 if (reg
->smax_value
!= reg
->umax_value
&&
537 reg
->smax_value
!= S64_MAX
)
538 verbose(env
, ",smax_value=%lld",
539 (long long)reg
->smax_value
);
540 if (reg
->umin_value
!= 0)
541 verbose(env
, ",umin_value=%llu",
542 (unsigned long long)reg
->umin_value
);
543 if (reg
->umax_value
!= U64_MAX
)
544 verbose(env
, ",umax_value=%llu",
545 (unsigned long long)reg
->umax_value
);
546 if (!tnum_is_unknown(reg
->var_off
)) {
549 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
550 verbose(env
, ",var_off=%s", tn_buf
);
556 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
557 char types_buf
[BPF_REG_SIZE
+ 1];
561 for (j
= 0; j
< BPF_REG_SIZE
; j
++) {
562 if (state
->stack
[i
].slot_type
[j
] != STACK_INVALID
)
564 types_buf
[j
] = slot_type_char
[
565 state
->stack
[i
].slot_type
[j
]];
567 types_buf
[BPF_REG_SIZE
] = 0;
570 verbose(env
, " fp%d", (-i
- 1) * BPF_REG_SIZE
);
571 print_liveness(env
, state
->stack
[i
].spilled_ptr
.live
);
572 if (state
->stack
[i
].slot_type
[0] == STACK_SPILL
) {
573 reg
= &state
->stack
[i
].spilled_ptr
;
575 verbose(env
, "=%s", reg_type_str
[t
]);
576 if (t
== SCALAR_VALUE
&& reg
->precise
)
578 if (t
== SCALAR_VALUE
&& tnum_is_const(reg
->var_off
))
579 verbose(env
, "%lld", reg
->var_off
.value
+ reg
->off
);
581 verbose(env
, "=%s", types_buf
);
584 if (state
->acquired_refs
&& state
->refs
[0].id
) {
585 verbose(env
, " refs=%d", state
->refs
[0].id
);
586 for (i
= 1; i
< state
->acquired_refs
; i
++)
587 if (state
->refs
[i
].id
)
588 verbose(env
, ",%d", state
->refs
[i
].id
);
593 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
594 static int copy_##NAME##_state(struct bpf_func_state *dst, \
595 const struct bpf_func_state *src) \
599 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
600 /* internal bug, make state invalid to reject the program */ \
601 memset(dst, 0, sizeof(*dst)); \
604 memcpy(dst->FIELD, src->FIELD, \
605 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
608 /* copy_reference_state() */
609 COPY_STATE_FN(reference
, acquired_refs
, refs
, 1)
610 /* copy_stack_state() */
611 COPY_STATE_FN(stack
, allocated_stack
, stack
, BPF_REG_SIZE
)
614 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
615 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
618 u32 old_size = state->COUNT; \
619 struct bpf_##NAME##_state *new_##FIELD; \
620 int slot = size / SIZE; \
622 if (size <= old_size || !size) { \
625 state->COUNT = slot * SIZE; \
626 if (!size && old_size) { \
627 kfree(state->FIELD); \
628 state->FIELD = NULL; \
632 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
638 memcpy(new_##FIELD, state->FIELD, \
639 sizeof(*new_##FIELD) * (old_size / SIZE)); \
640 memset(new_##FIELD + old_size / SIZE, 0, \
641 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
643 state->COUNT = slot * SIZE; \
644 kfree(state->FIELD); \
645 state->FIELD = new_##FIELD; \
648 /* realloc_reference_state() */
649 REALLOC_STATE_FN(reference
, acquired_refs
, refs
, 1)
650 /* realloc_stack_state() */
651 REALLOC_STATE_FN(stack
, allocated_stack
, stack
, BPF_REG_SIZE
)
652 #undef REALLOC_STATE_FN
654 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
655 * make it consume minimal amount of memory. check_stack_write() access from
656 * the program calls into realloc_func_state() to grow the stack size.
657 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
658 * which realloc_stack_state() copies over. It points to previous
659 * bpf_verifier_state which is never reallocated.
661 static int realloc_func_state(struct bpf_func_state
*state
, int stack_size
,
662 int refs_size
, bool copy_old
)
664 int err
= realloc_reference_state(state
, refs_size
, copy_old
);
667 return realloc_stack_state(state
, stack_size
, copy_old
);
670 /* Acquire a pointer id from the env and update the state->refs to include
671 * this new pointer reference.
672 * On success, returns a valid pointer id to associate with the register
673 * On failure, returns a negative errno.
675 static int acquire_reference_state(struct bpf_verifier_env
*env
, int insn_idx
)
677 struct bpf_func_state
*state
= cur_func(env
);
678 int new_ofs
= state
->acquired_refs
;
681 err
= realloc_reference_state(state
, state
->acquired_refs
+ 1, true);
685 state
->refs
[new_ofs
].id
= id
;
686 state
->refs
[new_ofs
].insn_idx
= insn_idx
;
691 /* release function corresponding to acquire_reference_state(). Idempotent. */
692 static int release_reference_state(struct bpf_func_state
*state
, int ptr_id
)
696 last_idx
= state
->acquired_refs
- 1;
697 for (i
= 0; i
< state
->acquired_refs
; i
++) {
698 if (state
->refs
[i
].id
== ptr_id
) {
699 if (last_idx
&& i
!= last_idx
)
700 memcpy(&state
->refs
[i
], &state
->refs
[last_idx
],
701 sizeof(*state
->refs
));
702 memset(&state
->refs
[last_idx
], 0, sizeof(*state
->refs
));
703 state
->acquired_refs
--;
710 static int transfer_reference_state(struct bpf_func_state
*dst
,
711 struct bpf_func_state
*src
)
713 int err
= realloc_reference_state(dst
, src
->acquired_refs
, false);
716 err
= copy_reference_state(dst
, src
);
722 static void free_func_state(struct bpf_func_state
*state
)
731 static void clear_jmp_history(struct bpf_verifier_state
*state
)
733 kfree(state
->jmp_history
);
734 state
->jmp_history
= NULL
;
735 state
->jmp_history_cnt
= 0;
738 static void free_verifier_state(struct bpf_verifier_state
*state
,
743 for (i
= 0; i
<= state
->curframe
; i
++) {
744 free_func_state(state
->frame
[i
]);
745 state
->frame
[i
] = NULL
;
747 clear_jmp_history(state
);
752 /* copy verifier state from src to dst growing dst stack space
753 * when necessary to accommodate larger src stack
755 static int copy_func_state(struct bpf_func_state
*dst
,
756 const struct bpf_func_state
*src
)
760 err
= realloc_func_state(dst
, src
->allocated_stack
, src
->acquired_refs
,
764 memcpy(dst
, src
, offsetof(struct bpf_func_state
, acquired_refs
));
765 err
= copy_reference_state(dst
, src
);
768 return copy_stack_state(dst
, src
);
771 static int copy_verifier_state(struct bpf_verifier_state
*dst_state
,
772 const struct bpf_verifier_state
*src
)
774 struct bpf_func_state
*dst
;
775 u32 jmp_sz
= sizeof(struct bpf_idx_pair
) * src
->jmp_history_cnt
;
778 if (dst_state
->jmp_history_cnt
< src
->jmp_history_cnt
) {
779 kfree(dst_state
->jmp_history
);
780 dst_state
->jmp_history
= kmalloc(jmp_sz
, GFP_USER
);
781 if (!dst_state
->jmp_history
)
784 memcpy(dst_state
->jmp_history
, src
->jmp_history
, jmp_sz
);
785 dst_state
->jmp_history_cnt
= src
->jmp_history_cnt
;
787 /* if dst has more stack frames then src frame, free them */
788 for (i
= src
->curframe
+ 1; i
<= dst_state
->curframe
; i
++) {
789 free_func_state(dst_state
->frame
[i
]);
790 dst_state
->frame
[i
] = NULL
;
792 dst_state
->speculative
= src
->speculative
;
793 dst_state
->curframe
= src
->curframe
;
794 dst_state
->active_spin_lock
= src
->active_spin_lock
;
795 dst_state
->branches
= src
->branches
;
796 dst_state
->parent
= src
->parent
;
797 dst_state
->first_insn_idx
= src
->first_insn_idx
;
798 dst_state
->last_insn_idx
= src
->last_insn_idx
;
799 for (i
= 0; i
<= src
->curframe
; i
++) {
800 dst
= dst_state
->frame
[i
];
802 dst
= kzalloc(sizeof(*dst
), GFP_KERNEL
);
805 dst_state
->frame
[i
] = dst
;
807 err
= copy_func_state(dst
, src
->frame
[i
]);
814 static void update_branch_counts(struct bpf_verifier_env
*env
, struct bpf_verifier_state
*st
)
817 u32 br
= --st
->branches
;
819 /* WARN_ON(br > 1) technically makes sense here,
820 * but see comment in push_stack(), hence:
822 WARN_ONCE((int)br
< 0,
823 "BUG update_branch_counts:branches_to_explore=%d\n",
831 static int pop_stack(struct bpf_verifier_env
*env
, int *prev_insn_idx
,
834 struct bpf_verifier_state
*cur
= env
->cur_state
;
835 struct bpf_verifier_stack_elem
*elem
, *head
= env
->head
;
838 if (env
->head
== NULL
)
842 err
= copy_verifier_state(cur
, &head
->st
);
847 *insn_idx
= head
->insn_idx
;
849 *prev_insn_idx
= head
->prev_insn_idx
;
851 free_verifier_state(&head
->st
, false);
858 static struct bpf_verifier_state
*push_stack(struct bpf_verifier_env
*env
,
859 int insn_idx
, int prev_insn_idx
,
862 struct bpf_verifier_state
*cur
= env
->cur_state
;
863 struct bpf_verifier_stack_elem
*elem
;
866 elem
= kzalloc(sizeof(struct bpf_verifier_stack_elem
), GFP_KERNEL
);
870 elem
->insn_idx
= insn_idx
;
871 elem
->prev_insn_idx
= prev_insn_idx
;
872 elem
->next
= env
->head
;
875 err
= copy_verifier_state(&elem
->st
, cur
);
878 elem
->st
.speculative
|= speculative
;
879 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_JMP_SEQ
) {
880 verbose(env
, "The sequence of %d jumps is too complex.\n",
884 if (elem
->st
.parent
) {
885 ++elem
->st
.parent
->branches
;
886 /* WARN_ON(branches > 2) technically makes sense here,
888 * 1. speculative states will bump 'branches' for non-branch
890 * 2. is_state_visited() heuristics may decide not to create
891 * a new state for a sequence of branches and all such current
892 * and cloned states will be pointing to a single parent state
893 * which might have large 'branches' count.
898 free_verifier_state(env
->cur_state
, true);
899 env
->cur_state
= NULL
;
900 /* pop all elements and return */
901 while (!pop_stack(env
, NULL
, NULL
));
905 #define CALLER_SAVED_REGS 6
906 static const int caller_saved
[CALLER_SAVED_REGS
] = {
907 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
910 static void __mark_reg_not_init(struct bpf_reg_state
*reg
);
912 /* Mark the unknown part of a register (variable offset or scalar value) as
913 * known to have the value @imm.
915 static void __mark_reg_known(struct bpf_reg_state
*reg
, u64 imm
)
917 /* Clear id, off, and union(map_ptr, range) */
918 memset(((u8
*)reg
) + sizeof(reg
->type
), 0,
919 offsetof(struct bpf_reg_state
, var_off
) - sizeof(reg
->type
));
920 reg
->var_off
= tnum_const(imm
);
921 reg
->smin_value
= (s64
)imm
;
922 reg
->smax_value
= (s64
)imm
;
923 reg
->umin_value
= imm
;
924 reg
->umax_value
= imm
;
927 /* Mark the 'variable offset' part of a register as zero. This should be
928 * used only on registers holding a pointer type.
930 static void __mark_reg_known_zero(struct bpf_reg_state
*reg
)
932 __mark_reg_known(reg
, 0);
935 static void __mark_reg_const_zero(struct bpf_reg_state
*reg
)
937 __mark_reg_known(reg
, 0);
938 reg
->type
= SCALAR_VALUE
;
941 static void mark_reg_known_zero(struct bpf_verifier_env
*env
,
942 struct bpf_reg_state
*regs
, u32 regno
)
944 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
945 verbose(env
, "mark_reg_known_zero(regs, %u)\n", regno
);
946 /* Something bad happened, let's kill all regs */
947 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
948 __mark_reg_not_init(regs
+ regno
);
951 __mark_reg_known_zero(regs
+ regno
);
954 static bool reg_is_pkt_pointer(const struct bpf_reg_state
*reg
)
956 return type_is_pkt_pointer(reg
->type
);
959 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state
*reg
)
961 return reg_is_pkt_pointer(reg
) ||
962 reg
->type
== PTR_TO_PACKET_END
;
965 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
966 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state
*reg
,
967 enum bpf_reg_type which
)
969 /* The register can already have a range from prior markings.
970 * This is fine as long as it hasn't been advanced from its
973 return reg
->type
== which
&&
976 tnum_equals_const(reg
->var_off
, 0);
979 /* Attempts to improve min/max values based on var_off information */
980 static void __update_reg_bounds(struct bpf_reg_state
*reg
)
982 /* min signed is max(sign bit) | min(other bits) */
983 reg
->smin_value
= max_t(s64
, reg
->smin_value
,
984 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MIN
));
985 /* max signed is min(sign bit) | max(other bits) */
986 reg
->smax_value
= min_t(s64
, reg
->smax_value
,
987 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MAX
));
988 reg
->umin_value
= max(reg
->umin_value
, reg
->var_off
.value
);
989 reg
->umax_value
= min(reg
->umax_value
,
990 reg
->var_off
.value
| reg
->var_off
.mask
);
993 /* Uses signed min/max values to inform unsigned, and vice-versa */
994 static void __reg_deduce_bounds(struct bpf_reg_state
*reg
)
996 /* Learn sign from signed bounds.
997 * If we cannot cross the sign boundary, then signed and unsigned bounds
998 * are the same, so combine. This works even in the negative case, e.g.
999 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1001 if (reg
->smin_value
>= 0 || reg
->smax_value
< 0) {
1002 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
1004 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
1008 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1009 * boundary, so we must be careful.
1011 if ((s64
)reg
->umax_value
>= 0) {
1012 /* Positive. We can't learn anything from the smin, but smax
1013 * is positive, hence safe.
1015 reg
->smin_value
= reg
->umin_value
;
1016 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
1018 } else if ((s64
)reg
->umin_value
< 0) {
1019 /* Negative. We can't learn anything from the smax, but smin
1020 * is negative, hence safe.
1022 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
1024 reg
->smax_value
= reg
->umax_value
;
1028 /* Attempts to improve var_off based on unsigned min/max information */
1029 static void __reg_bound_offset(struct bpf_reg_state
*reg
)
1031 reg
->var_off
= tnum_intersect(reg
->var_off
,
1032 tnum_range(reg
->umin_value
,
1036 static void __reg_bound_offset32(struct bpf_reg_state
*reg
)
1038 u64 mask
= 0xffffFFFF;
1039 struct tnum range
= tnum_range(reg
->umin_value
& mask
,
1040 reg
->umax_value
& mask
);
1041 struct tnum lo32
= tnum_cast(reg
->var_off
, 4);
1042 struct tnum hi32
= tnum_lshift(tnum_rshift(reg
->var_off
, 32), 32);
1044 reg
->var_off
= tnum_or(hi32
, tnum_intersect(lo32
, range
));
1047 /* Reset the min/max bounds of a register */
1048 static void __mark_reg_unbounded(struct bpf_reg_state
*reg
)
1050 reg
->smin_value
= S64_MIN
;
1051 reg
->smax_value
= S64_MAX
;
1052 reg
->umin_value
= 0;
1053 reg
->umax_value
= U64_MAX
;
1056 /* Mark a register as having a completely unknown (scalar) value. */
1057 static void __mark_reg_unknown(struct bpf_reg_state
*reg
)
1060 * Clear type, id, off, and union(map_ptr, range) and
1061 * padding between 'type' and union
1063 memset(reg
, 0, offsetof(struct bpf_reg_state
, var_off
));
1064 reg
->type
= SCALAR_VALUE
;
1065 reg
->var_off
= tnum_unknown
;
1067 __mark_reg_unbounded(reg
);
1070 static void mark_reg_unknown(struct bpf_verifier_env
*env
,
1071 struct bpf_reg_state
*regs
, u32 regno
)
1073 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
1074 verbose(env
, "mark_reg_unknown(regs, %u)\n", regno
);
1075 /* Something bad happened, let's kill all regs except FP */
1076 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
1077 __mark_reg_not_init(regs
+ regno
);
1081 __mark_reg_unknown(regs
);
1082 /* constant backtracking is enabled for root without bpf2bpf calls */
1083 regs
->precise
= env
->subprog_cnt
> 1 || !env
->allow_ptr_leaks
?
1087 static void __mark_reg_not_init(struct bpf_reg_state
*reg
)
1089 __mark_reg_unknown(reg
);
1090 reg
->type
= NOT_INIT
;
1093 static void mark_reg_not_init(struct bpf_verifier_env
*env
,
1094 struct bpf_reg_state
*regs
, u32 regno
)
1096 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
1097 verbose(env
, "mark_reg_not_init(regs, %u)\n", regno
);
1098 /* Something bad happened, let's kill all regs except FP */
1099 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
1100 __mark_reg_not_init(regs
+ regno
);
1103 __mark_reg_not_init(regs
+ regno
);
1106 #define DEF_NOT_SUBREG (0)
1107 static void init_reg_state(struct bpf_verifier_env
*env
,
1108 struct bpf_func_state
*state
)
1110 struct bpf_reg_state
*regs
= state
->regs
;
1113 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
1114 mark_reg_not_init(env
, regs
, i
);
1115 regs
[i
].live
= REG_LIVE_NONE
;
1116 regs
[i
].parent
= NULL
;
1117 regs
[i
].subreg_def
= DEF_NOT_SUBREG
;
1121 regs
[BPF_REG_FP
].type
= PTR_TO_STACK
;
1122 mark_reg_known_zero(env
, regs
, BPF_REG_FP
);
1123 regs
[BPF_REG_FP
].frameno
= state
->frameno
;
1125 /* 1st arg to a function */
1126 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
1127 mark_reg_known_zero(env
, regs
, BPF_REG_1
);
1130 #define BPF_MAIN_FUNC (-1)
1131 static void init_func_state(struct bpf_verifier_env
*env
,
1132 struct bpf_func_state
*state
,
1133 int callsite
, int frameno
, int subprogno
)
1135 state
->callsite
= callsite
;
1136 state
->frameno
= frameno
;
1137 state
->subprogno
= subprogno
;
1138 init_reg_state(env
, state
);
1142 SRC_OP
, /* register is used as source operand */
1143 DST_OP
, /* register is used as destination operand */
1144 DST_OP_NO_MARK
/* same as above, check only, don't mark */
1147 static int cmp_subprogs(const void *a
, const void *b
)
1149 return ((struct bpf_subprog_info
*)a
)->start
-
1150 ((struct bpf_subprog_info
*)b
)->start
;
1153 static int find_subprog(struct bpf_verifier_env
*env
, int off
)
1155 struct bpf_subprog_info
*p
;
1157 p
= bsearch(&off
, env
->subprog_info
, env
->subprog_cnt
,
1158 sizeof(env
->subprog_info
[0]), cmp_subprogs
);
1161 return p
- env
->subprog_info
;
1165 static int add_subprog(struct bpf_verifier_env
*env
, int off
)
1167 int insn_cnt
= env
->prog
->len
;
1170 if (off
>= insn_cnt
|| off
< 0) {
1171 verbose(env
, "call to invalid destination\n");
1174 ret
= find_subprog(env
, off
);
1177 if (env
->subprog_cnt
>= BPF_MAX_SUBPROGS
) {
1178 verbose(env
, "too many subprograms\n");
1181 env
->subprog_info
[env
->subprog_cnt
++].start
= off
;
1182 sort(env
->subprog_info
, env
->subprog_cnt
,
1183 sizeof(env
->subprog_info
[0]), cmp_subprogs
, NULL
);
1187 static int check_subprogs(struct bpf_verifier_env
*env
)
1189 int i
, ret
, subprog_start
, subprog_end
, off
, cur_subprog
= 0;
1190 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
1191 struct bpf_insn
*insn
= env
->prog
->insnsi
;
1192 int insn_cnt
= env
->prog
->len
;
1194 /* Add entry function. */
1195 ret
= add_subprog(env
, 0);
1199 /* determine subprog starts. The end is one before the next starts */
1200 for (i
= 0; i
< insn_cnt
; i
++) {
1201 if (insn
[i
].code
!= (BPF_JMP
| BPF_CALL
))
1203 if (insn
[i
].src_reg
!= BPF_PSEUDO_CALL
)
1205 if (!env
->allow_ptr_leaks
) {
1206 verbose(env
, "function calls to other bpf functions are allowed for root only\n");
1209 ret
= add_subprog(env
, i
+ insn
[i
].imm
+ 1);
1214 /* Add a fake 'exit' subprog which could simplify subprog iteration
1215 * logic. 'subprog_cnt' should not be increased.
1217 subprog
[env
->subprog_cnt
].start
= insn_cnt
;
1219 if (env
->log
.level
& BPF_LOG_LEVEL2
)
1220 for (i
= 0; i
< env
->subprog_cnt
; i
++)
1221 verbose(env
, "func#%d @%d\n", i
, subprog
[i
].start
);
1223 /* now check that all jumps are within the same subprog */
1224 subprog_start
= subprog
[cur_subprog
].start
;
1225 subprog_end
= subprog
[cur_subprog
+ 1].start
;
1226 for (i
= 0; i
< insn_cnt
; i
++) {
1227 u8 code
= insn
[i
].code
;
1229 if (BPF_CLASS(code
) != BPF_JMP
&& BPF_CLASS(code
) != BPF_JMP32
)
1231 if (BPF_OP(code
) == BPF_EXIT
|| BPF_OP(code
) == BPF_CALL
)
1233 off
= i
+ insn
[i
].off
+ 1;
1234 if (off
< subprog_start
|| off
>= subprog_end
) {
1235 verbose(env
, "jump out of range from insn %d to %d\n", i
, off
);
1239 if (i
== subprog_end
- 1) {
1240 /* to avoid fall-through from one subprog into another
1241 * the last insn of the subprog should be either exit
1242 * or unconditional jump back
1244 if (code
!= (BPF_JMP
| BPF_EXIT
) &&
1245 code
!= (BPF_JMP
| BPF_JA
)) {
1246 verbose(env
, "last insn is not an exit or jmp\n");
1249 subprog_start
= subprog_end
;
1251 if (cur_subprog
< env
->subprog_cnt
)
1252 subprog_end
= subprog
[cur_subprog
+ 1].start
;
1258 /* Parentage chain of this register (or stack slot) should take care of all
1259 * issues like callee-saved registers, stack slot allocation time, etc.
1261 static int mark_reg_read(struct bpf_verifier_env
*env
,
1262 const struct bpf_reg_state
*state
,
1263 struct bpf_reg_state
*parent
, u8 flag
)
1265 bool writes
= parent
== state
->parent
; /* Observe write marks */
1269 /* if read wasn't screened by an earlier write ... */
1270 if (writes
&& state
->live
& REG_LIVE_WRITTEN
)
1272 if (parent
->live
& REG_LIVE_DONE
) {
1273 verbose(env
, "verifier BUG type %s var_off %lld off %d\n",
1274 reg_type_str
[parent
->type
],
1275 parent
->var_off
.value
, parent
->off
);
1278 /* The first condition is more likely to be true than the
1279 * second, checked it first.
1281 if ((parent
->live
& REG_LIVE_READ
) == flag
||
1282 parent
->live
& REG_LIVE_READ64
)
1283 /* The parentage chain never changes and
1284 * this parent was already marked as LIVE_READ.
1285 * There is no need to keep walking the chain again and
1286 * keep re-marking all parents as LIVE_READ.
1287 * This case happens when the same register is read
1288 * multiple times without writes into it in-between.
1289 * Also, if parent has the stronger REG_LIVE_READ64 set,
1290 * then no need to set the weak REG_LIVE_READ32.
1293 /* ... then we depend on parent's value */
1294 parent
->live
|= flag
;
1295 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1296 if (flag
== REG_LIVE_READ64
)
1297 parent
->live
&= ~REG_LIVE_READ32
;
1299 parent
= state
->parent
;
1304 if (env
->longest_mark_read_walk
< cnt
)
1305 env
->longest_mark_read_walk
= cnt
;
1309 /* This function is supposed to be used by the following 32-bit optimization
1310 * code only. It returns TRUE if the source or destination register operates
1311 * on 64-bit, otherwise return FALSE.
1313 static bool is_reg64(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
1314 u32 regno
, struct bpf_reg_state
*reg
, enum reg_arg_type t
)
1319 class = BPF_CLASS(code
);
1321 if (class == BPF_JMP
) {
1322 /* BPF_EXIT for "main" will reach here. Return TRUE
1327 if (op
== BPF_CALL
) {
1328 /* BPF to BPF call will reach here because of marking
1329 * caller saved clobber with DST_OP_NO_MARK for which we
1330 * don't care the register def because they are anyway
1331 * marked as NOT_INIT already.
1333 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
1335 /* Helper call will reach here because of arg type
1336 * check, conservatively return TRUE.
1345 if (class == BPF_ALU64
|| class == BPF_JMP
||
1346 /* BPF_END always use BPF_ALU class. */
1347 (class == BPF_ALU
&& op
== BPF_END
&& insn
->imm
== 64))
1350 if (class == BPF_ALU
|| class == BPF_JMP32
)
1353 if (class == BPF_LDX
) {
1355 return BPF_SIZE(code
) == BPF_DW
;
1356 /* LDX source must be ptr. */
1360 if (class == BPF_STX
) {
1361 if (reg
->type
!= SCALAR_VALUE
)
1363 return BPF_SIZE(code
) == BPF_DW
;
1366 if (class == BPF_LD
) {
1367 u8 mode
= BPF_MODE(code
);
1370 if (mode
== BPF_IMM
)
1373 /* Both LD_IND and LD_ABS return 32-bit data. */
1377 /* Implicit ctx ptr. */
1378 if (regno
== BPF_REG_6
)
1381 /* Explicit source could be any width. */
1385 if (class == BPF_ST
)
1386 /* The only source register for BPF_ST is a ptr. */
1389 /* Conservatively return true at default. */
1393 /* Return TRUE if INSN doesn't have explicit value define. */
1394 static bool insn_no_def(struct bpf_insn
*insn
)
1396 u8
class = BPF_CLASS(insn
->code
);
1398 return (class == BPF_JMP
|| class == BPF_JMP32
||
1399 class == BPF_STX
|| class == BPF_ST
);
1402 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
1403 static bool insn_has_def32(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
1405 if (insn_no_def(insn
))
1408 return !is_reg64(env
, insn
, insn
->dst_reg
, NULL
, DST_OP
);
1411 static void mark_insn_zext(struct bpf_verifier_env
*env
,
1412 struct bpf_reg_state
*reg
)
1414 s32 def_idx
= reg
->subreg_def
;
1416 if (def_idx
== DEF_NOT_SUBREG
)
1419 env
->insn_aux_data
[def_idx
- 1].zext_dst
= true;
1420 /* The dst will be zero extended, so won't be sub-register anymore. */
1421 reg
->subreg_def
= DEF_NOT_SUBREG
;
1424 static int check_reg_arg(struct bpf_verifier_env
*env
, u32 regno
,
1425 enum reg_arg_type t
)
1427 struct bpf_verifier_state
*vstate
= env
->cur_state
;
1428 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
1429 struct bpf_insn
*insn
= env
->prog
->insnsi
+ env
->insn_idx
;
1430 struct bpf_reg_state
*reg
, *regs
= state
->regs
;
1433 if (regno
>= MAX_BPF_REG
) {
1434 verbose(env
, "R%d is invalid\n", regno
);
1439 rw64
= is_reg64(env
, insn
, regno
, reg
, t
);
1441 /* check whether register used as source operand can be read */
1442 if (reg
->type
== NOT_INIT
) {
1443 verbose(env
, "R%d !read_ok\n", regno
);
1446 /* We don't need to worry about FP liveness because it's read-only */
1447 if (regno
== BPF_REG_FP
)
1451 mark_insn_zext(env
, reg
);
1453 return mark_reg_read(env
, reg
, reg
->parent
,
1454 rw64
? REG_LIVE_READ64
: REG_LIVE_READ32
);
1456 /* check whether register used as dest operand can be written to */
1457 if (regno
== BPF_REG_FP
) {
1458 verbose(env
, "frame pointer is read only\n");
1461 reg
->live
|= REG_LIVE_WRITTEN
;
1462 reg
->subreg_def
= rw64
? DEF_NOT_SUBREG
: env
->insn_idx
+ 1;
1464 mark_reg_unknown(env
, regs
, regno
);
1469 /* for any branch, call, exit record the history of jmps in the given state */
1470 static int push_jmp_history(struct bpf_verifier_env
*env
,
1471 struct bpf_verifier_state
*cur
)
1473 u32 cnt
= cur
->jmp_history_cnt
;
1474 struct bpf_idx_pair
*p
;
1477 p
= krealloc(cur
->jmp_history
, cnt
* sizeof(*p
), GFP_USER
);
1480 p
[cnt
- 1].idx
= env
->insn_idx
;
1481 p
[cnt
- 1].prev_idx
= env
->prev_insn_idx
;
1482 cur
->jmp_history
= p
;
1483 cur
->jmp_history_cnt
= cnt
;
1487 /* Backtrack one insn at a time. If idx is not at the top of recorded
1488 * history then previous instruction came from straight line execution.
1490 static int get_prev_insn_idx(struct bpf_verifier_state
*st
, int i
,
1495 if (cnt
&& st
->jmp_history
[cnt
- 1].idx
== i
) {
1496 i
= st
->jmp_history
[cnt
- 1].prev_idx
;
1504 /* For given verifier state backtrack_insn() is called from the last insn to
1505 * the first insn. Its purpose is to compute a bitmask of registers and
1506 * stack slots that needs precision in the parent verifier state.
1508 static int backtrack_insn(struct bpf_verifier_env
*env
, int idx
,
1509 u32
*reg_mask
, u64
*stack_mask
)
1511 const struct bpf_insn_cbs cbs
= {
1512 .cb_print
= verbose
,
1513 .private_data
= env
,
1515 struct bpf_insn
*insn
= env
->prog
->insnsi
+ idx
;
1516 u8
class = BPF_CLASS(insn
->code
);
1517 u8 opcode
= BPF_OP(insn
->code
);
1518 u8 mode
= BPF_MODE(insn
->code
);
1519 u32 dreg
= 1u << insn
->dst_reg
;
1520 u32 sreg
= 1u << insn
->src_reg
;
1523 if (insn
->code
== 0)
1525 if (env
->log
.level
& BPF_LOG_LEVEL
) {
1526 verbose(env
, "regs=%x stack=%llx before ", *reg_mask
, *stack_mask
);
1527 verbose(env
, "%d: ", idx
);
1528 print_bpf_insn(&cbs
, insn
, env
->allow_ptr_leaks
);
1531 if (class == BPF_ALU
|| class == BPF_ALU64
) {
1532 if (!(*reg_mask
& dreg
))
1534 if (opcode
== BPF_MOV
) {
1535 if (BPF_SRC(insn
->code
) == BPF_X
) {
1537 * dreg needs precision after this insn
1538 * sreg needs precision before this insn
1544 * dreg needs precision after this insn.
1545 * Corresponding register is already marked
1546 * as precise=true in this verifier state.
1547 * No further markings in parent are necessary
1552 if (BPF_SRC(insn
->code
) == BPF_X
) {
1554 * both dreg and sreg need precision
1559 * dreg still needs precision before this insn
1562 } else if (class == BPF_LDX
) {
1563 if (!(*reg_mask
& dreg
))
1567 /* scalars can only be spilled into stack w/o losing precision.
1568 * Load from any other memory can be zero extended.
1569 * The desire to keep that precision is already indicated
1570 * by 'precise' mark in corresponding register of this state.
1571 * No further tracking necessary.
1573 if (insn
->src_reg
!= BPF_REG_FP
)
1575 if (BPF_SIZE(insn
->code
) != BPF_DW
)
1578 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1579 * that [fp - off] slot contains scalar that needs to be
1580 * tracked with precision
1582 spi
= (-insn
->off
- 1) / BPF_REG_SIZE
;
1584 verbose(env
, "BUG spi %d\n", spi
);
1585 WARN_ONCE(1, "verifier backtracking bug");
1588 *stack_mask
|= 1ull << spi
;
1589 } else if (class == BPF_STX
|| class == BPF_ST
) {
1590 if (*reg_mask
& dreg
)
1591 /* stx & st shouldn't be using _scalar_ dst_reg
1592 * to access memory. It means backtracking
1593 * encountered a case of pointer subtraction.
1596 /* scalars can only be spilled into stack */
1597 if (insn
->dst_reg
!= BPF_REG_FP
)
1599 if (BPF_SIZE(insn
->code
) != BPF_DW
)
1601 spi
= (-insn
->off
- 1) / BPF_REG_SIZE
;
1603 verbose(env
, "BUG spi %d\n", spi
);
1604 WARN_ONCE(1, "verifier backtracking bug");
1607 if (!(*stack_mask
& (1ull << spi
)))
1609 *stack_mask
&= ~(1ull << spi
);
1610 if (class == BPF_STX
)
1612 } else if (class == BPF_JMP
|| class == BPF_JMP32
) {
1613 if (opcode
== BPF_CALL
) {
1614 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
1616 /* regular helper call sets R0 */
1618 if (*reg_mask
& 0x3f) {
1619 /* if backtracing was looking for registers R1-R5
1620 * they should have been found already.
1622 verbose(env
, "BUG regs %x\n", *reg_mask
);
1623 WARN_ONCE(1, "verifier backtracking bug");
1626 } else if (opcode
== BPF_EXIT
) {
1629 } else if (class == BPF_LD
) {
1630 if (!(*reg_mask
& dreg
))
1633 /* It's ld_imm64 or ld_abs or ld_ind.
1634 * For ld_imm64 no further tracking of precision
1635 * into parent is necessary
1637 if (mode
== BPF_IND
|| mode
== BPF_ABS
)
1638 /* to be analyzed */
1644 /* the scalar precision tracking algorithm:
1645 * . at the start all registers have precise=false.
1646 * . scalar ranges are tracked as normal through alu and jmp insns.
1647 * . once precise value of the scalar register is used in:
1648 * . ptr + scalar alu
1649 * . if (scalar cond K|scalar)
1650 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1651 * backtrack through the verifier states and mark all registers and
1652 * stack slots with spilled constants that these scalar regisers
1653 * should be precise.
1654 * . during state pruning two registers (or spilled stack slots)
1655 * are equivalent if both are not precise.
1657 * Note the verifier cannot simply walk register parentage chain,
1658 * since many different registers and stack slots could have been
1659 * used to compute single precise scalar.
1661 * The approach of starting with precise=true for all registers and then
1662 * backtrack to mark a register as not precise when the verifier detects
1663 * that program doesn't care about specific value (e.g., when helper
1664 * takes register as ARG_ANYTHING parameter) is not safe.
1666 * It's ok to walk single parentage chain of the verifier states.
1667 * It's possible that this backtracking will go all the way till 1st insn.
1668 * All other branches will be explored for needing precision later.
1670 * The backtracking needs to deal with cases like:
1671 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1674 * if r5 > 0x79f goto pc+7
1675 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1678 * call bpf_perf_event_output#25
1679 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1683 * call foo // uses callee's r6 inside to compute r0
1687 * to track above reg_mask/stack_mask needs to be independent for each frame.
1689 * Also if parent's curframe > frame where backtracking started,
1690 * the verifier need to mark registers in both frames, otherwise callees
1691 * may incorrectly prune callers. This is similar to
1692 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1694 * For now backtracking falls back into conservative marking.
1696 static void mark_all_scalars_precise(struct bpf_verifier_env
*env
,
1697 struct bpf_verifier_state
*st
)
1699 struct bpf_func_state
*func
;
1700 struct bpf_reg_state
*reg
;
1703 /* big hammer: mark all scalars precise in this path.
1704 * pop_stack may still get !precise scalars.
1706 for (; st
; st
= st
->parent
)
1707 for (i
= 0; i
<= st
->curframe
; i
++) {
1708 func
= st
->frame
[i
];
1709 for (j
= 0; j
< BPF_REG_FP
; j
++) {
1710 reg
= &func
->regs
[j
];
1711 if (reg
->type
!= SCALAR_VALUE
)
1713 reg
->precise
= true;
1715 for (j
= 0; j
< func
->allocated_stack
/ BPF_REG_SIZE
; j
++) {
1716 if (func
->stack
[j
].slot_type
[0] != STACK_SPILL
)
1718 reg
= &func
->stack
[j
].spilled_ptr
;
1719 if (reg
->type
!= SCALAR_VALUE
)
1721 reg
->precise
= true;
1726 static int __mark_chain_precision(struct bpf_verifier_env
*env
, int regno
,
1729 struct bpf_verifier_state
*st
= env
->cur_state
;
1730 int first_idx
= st
->first_insn_idx
;
1731 int last_idx
= env
->insn_idx
;
1732 struct bpf_func_state
*func
;
1733 struct bpf_reg_state
*reg
;
1734 u32 reg_mask
= regno
>= 0 ? 1u << regno
: 0;
1735 u64 stack_mask
= spi
>= 0 ? 1ull << spi
: 0;
1736 bool skip_first
= true;
1737 bool new_marks
= false;
1740 if (!env
->allow_ptr_leaks
)
1741 /* backtracking is root only for now */
1744 func
= st
->frame
[st
->curframe
];
1746 reg
= &func
->regs
[regno
];
1747 if (reg
->type
!= SCALAR_VALUE
) {
1748 WARN_ONCE(1, "backtracing misuse");
1755 reg
->precise
= true;
1759 if (func
->stack
[spi
].slot_type
[0] != STACK_SPILL
) {
1763 reg
= &func
->stack
[spi
].spilled_ptr
;
1764 if (reg
->type
!= SCALAR_VALUE
) {
1772 reg
->precise
= true;
1778 if (!reg_mask
&& !stack_mask
)
1781 DECLARE_BITMAP(mask
, 64);
1782 u32 history
= st
->jmp_history_cnt
;
1784 if (env
->log
.level
& BPF_LOG_LEVEL
)
1785 verbose(env
, "last_idx %d first_idx %d\n", last_idx
, first_idx
);
1786 for (i
= last_idx
;;) {
1791 err
= backtrack_insn(env
, i
, ®_mask
, &stack_mask
);
1793 if (err
== -ENOTSUPP
) {
1794 mark_all_scalars_precise(env
, st
);
1799 if (!reg_mask
&& !stack_mask
)
1800 /* Found assignment(s) into tracked register in this state.
1801 * Since this state is already marked, just return.
1802 * Nothing to be tracked further in the parent state.
1807 i
= get_prev_insn_idx(st
, i
, &history
);
1808 if (i
>= env
->prog
->len
) {
1809 /* This can happen if backtracking reached insn 0
1810 * and there are still reg_mask or stack_mask
1812 * It means the backtracking missed the spot where
1813 * particular register was initialized with a constant.
1815 verbose(env
, "BUG backtracking idx %d\n", i
);
1816 WARN_ONCE(1, "verifier backtracking bug");
1825 func
= st
->frame
[st
->curframe
];
1826 bitmap_from_u64(mask
, reg_mask
);
1827 for_each_set_bit(i
, mask
, 32) {
1828 reg
= &func
->regs
[i
];
1829 if (reg
->type
!= SCALAR_VALUE
) {
1830 reg_mask
&= ~(1u << i
);
1835 reg
->precise
= true;
1838 bitmap_from_u64(mask
, stack_mask
);
1839 for_each_set_bit(i
, mask
, 64) {
1840 if (i
>= func
->allocated_stack
/ BPF_REG_SIZE
) {
1841 /* the sequence of instructions:
1843 * 3: (7b) *(u64 *)(r3 -8) = r0
1844 * 4: (79) r4 = *(u64 *)(r10 -8)
1845 * doesn't contain jmps. It's backtracked
1846 * as a single block.
1847 * During backtracking insn 3 is not recognized as
1848 * stack access, so at the end of backtracking
1849 * stack slot fp-8 is still marked in stack_mask.
1850 * However the parent state may not have accessed
1851 * fp-8 and it's "unallocated" stack space.
1852 * In such case fallback to conservative.
1854 mark_all_scalars_precise(env
, st
);
1858 if (func
->stack
[i
].slot_type
[0] != STACK_SPILL
) {
1859 stack_mask
&= ~(1ull << i
);
1862 reg
= &func
->stack
[i
].spilled_ptr
;
1863 if (reg
->type
!= SCALAR_VALUE
) {
1864 stack_mask
&= ~(1ull << i
);
1869 reg
->precise
= true;
1871 if (env
->log
.level
& BPF_LOG_LEVEL
) {
1872 print_verifier_state(env
, func
);
1873 verbose(env
, "parent %s regs=%x stack=%llx marks\n",
1874 new_marks
? "didn't have" : "already had",
1875 reg_mask
, stack_mask
);
1878 if (!reg_mask
&& !stack_mask
)
1883 last_idx
= st
->last_insn_idx
;
1884 first_idx
= st
->first_insn_idx
;
1889 static int mark_chain_precision(struct bpf_verifier_env
*env
, int regno
)
1891 return __mark_chain_precision(env
, regno
, -1);
1894 static int mark_chain_precision_stack(struct bpf_verifier_env
*env
, int spi
)
1896 return __mark_chain_precision(env
, -1, spi
);
1899 static bool is_spillable_regtype(enum bpf_reg_type type
)
1902 case PTR_TO_MAP_VALUE
:
1903 case PTR_TO_MAP_VALUE_OR_NULL
:
1907 case PTR_TO_PACKET_META
:
1908 case PTR_TO_PACKET_END
:
1909 case PTR_TO_FLOW_KEYS
:
1910 case CONST_PTR_TO_MAP
:
1912 case PTR_TO_SOCKET_OR_NULL
:
1913 case PTR_TO_SOCK_COMMON
:
1914 case PTR_TO_SOCK_COMMON_OR_NULL
:
1915 case PTR_TO_TCP_SOCK
:
1916 case PTR_TO_TCP_SOCK_OR_NULL
:
1917 case PTR_TO_XDP_SOCK
:
1924 /* Does this register contain a constant zero? */
1925 static bool register_is_null(struct bpf_reg_state
*reg
)
1927 return reg
->type
== SCALAR_VALUE
&& tnum_equals_const(reg
->var_off
, 0);
1930 static bool register_is_const(struct bpf_reg_state
*reg
)
1932 return reg
->type
== SCALAR_VALUE
&& tnum_is_const(reg
->var_off
);
1935 static void save_register_state(struct bpf_func_state
*state
,
1936 int spi
, struct bpf_reg_state
*reg
)
1940 state
->stack
[spi
].spilled_ptr
= *reg
;
1941 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
1943 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
1944 state
->stack
[spi
].slot_type
[i
] = STACK_SPILL
;
1947 /* check_stack_read/write functions track spill/fill of registers,
1948 * stack boundary and alignment are checked in check_mem_access()
1950 static int check_stack_write(struct bpf_verifier_env
*env
,
1951 struct bpf_func_state
*state
, /* func where register points to */
1952 int off
, int size
, int value_regno
, int insn_idx
)
1954 struct bpf_func_state
*cur
; /* state of the current function */
1955 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
, err
;
1956 u32 dst_reg
= env
->prog
->insnsi
[insn_idx
].dst_reg
;
1957 struct bpf_reg_state
*reg
= NULL
;
1959 err
= realloc_func_state(state
, round_up(slot
+ 1, BPF_REG_SIZE
),
1960 state
->acquired_refs
, true);
1963 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1964 * so it's aligned access and [off, off + size) are within stack limits
1966 if (!env
->allow_ptr_leaks
&&
1967 state
->stack
[spi
].slot_type
[0] == STACK_SPILL
&&
1968 size
!= BPF_REG_SIZE
) {
1969 verbose(env
, "attempt to corrupt spilled pointer on stack\n");
1973 cur
= env
->cur_state
->frame
[env
->cur_state
->curframe
];
1974 if (value_regno
>= 0)
1975 reg
= &cur
->regs
[value_regno
];
1977 if (reg
&& size
== BPF_REG_SIZE
&& register_is_const(reg
) &&
1978 !register_is_null(reg
) && env
->allow_ptr_leaks
) {
1979 if (dst_reg
!= BPF_REG_FP
) {
1980 /* The backtracking logic can only recognize explicit
1981 * stack slot address like [fp - 8]. Other spill of
1982 * scalar via different register has to be conervative.
1983 * Backtrack from here and mark all registers as precise
1984 * that contributed into 'reg' being a constant.
1986 err
= mark_chain_precision(env
, value_regno
);
1990 save_register_state(state
, spi
, reg
);
1991 } else if (reg
&& is_spillable_regtype(reg
->type
)) {
1992 /* register containing pointer is being spilled into stack */
1993 if (size
!= BPF_REG_SIZE
) {
1994 verbose_linfo(env
, insn_idx
, "; ");
1995 verbose(env
, "invalid size of register spill\n");
1999 if (state
!= cur
&& reg
->type
== PTR_TO_STACK
) {
2000 verbose(env
, "cannot spill pointers to stack into stack frame of the caller\n");
2004 if (!env
->allow_ptr_leaks
) {
2005 bool sanitize
= false;
2007 if (state
->stack
[spi
].slot_type
[0] == STACK_SPILL
&&
2008 register_is_const(&state
->stack
[spi
].spilled_ptr
))
2010 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
2011 if (state
->stack
[spi
].slot_type
[i
] == STACK_MISC
) {
2016 int *poff
= &env
->insn_aux_data
[insn_idx
].sanitize_stack_off
;
2017 int soff
= (-spi
- 1) * BPF_REG_SIZE
;
2019 /* detected reuse of integer stack slot with a pointer
2020 * which means either llvm is reusing stack slot or
2021 * an attacker is trying to exploit CVE-2018-3639
2022 * (speculative store bypass)
2023 * Have to sanitize that slot with preemptive
2026 if (*poff
&& *poff
!= soff
) {
2027 /* disallow programs where single insn stores
2028 * into two different stack slots, since verifier
2029 * cannot sanitize them
2032 "insn %d cannot access two stack slots fp%d and fp%d",
2033 insn_idx
, *poff
, soff
);
2039 save_register_state(state
, spi
, reg
);
2041 u8 type
= STACK_MISC
;
2043 /* regular write of data into stack destroys any spilled ptr */
2044 state
->stack
[spi
].spilled_ptr
.type
= NOT_INIT
;
2045 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2046 if (state
->stack
[spi
].slot_type
[0] == STACK_SPILL
)
2047 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
2048 state
->stack
[spi
].slot_type
[i
] = STACK_MISC
;
2050 /* only mark the slot as written if all 8 bytes were written
2051 * otherwise read propagation may incorrectly stop too soon
2052 * when stack slots are partially written.
2053 * This heuristic means that read propagation will be
2054 * conservative, since it will add reg_live_read marks
2055 * to stack slots all the way to first state when programs
2056 * writes+reads less than 8 bytes
2058 if (size
== BPF_REG_SIZE
)
2059 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
2061 /* when we zero initialize stack slots mark them as such */
2062 if (reg
&& register_is_null(reg
)) {
2063 /* backtracking doesn't work for STACK_ZERO yet. */
2064 err
= mark_chain_precision(env
, value_regno
);
2070 /* Mark slots affected by this stack write. */
2071 for (i
= 0; i
< size
; i
++)
2072 state
->stack
[spi
].slot_type
[(slot
- i
) % BPF_REG_SIZE
] =
2078 static int check_stack_read(struct bpf_verifier_env
*env
,
2079 struct bpf_func_state
*reg_state
/* func where register points to */,
2080 int off
, int size
, int value_regno
)
2082 struct bpf_verifier_state
*vstate
= env
->cur_state
;
2083 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
2084 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
;
2085 struct bpf_reg_state
*reg
;
2088 if (reg_state
->allocated_stack
<= slot
) {
2089 verbose(env
, "invalid read from stack off %d+0 size %d\n",
2093 stype
= reg_state
->stack
[spi
].slot_type
;
2094 reg
= ®_state
->stack
[spi
].spilled_ptr
;
2096 if (stype
[0] == STACK_SPILL
) {
2097 if (size
!= BPF_REG_SIZE
) {
2098 if (reg
->type
!= SCALAR_VALUE
) {
2099 verbose_linfo(env
, env
->insn_idx
, "; ");
2100 verbose(env
, "invalid size of register fill\n");
2103 if (value_regno
>= 0) {
2104 mark_reg_unknown(env
, state
->regs
, value_regno
);
2105 state
->regs
[value_regno
].live
|= REG_LIVE_WRITTEN
;
2107 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
2110 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
2111 if (stype
[(slot
- i
) % BPF_REG_SIZE
] != STACK_SPILL
) {
2112 verbose(env
, "corrupted spill memory\n");
2117 if (value_regno
>= 0) {
2118 /* restore register state from stack */
2119 state
->regs
[value_regno
] = *reg
;
2120 /* mark reg as written since spilled pointer state likely
2121 * has its liveness marks cleared by is_state_visited()
2122 * which resets stack/reg liveness for state transitions
2124 state
->regs
[value_regno
].live
|= REG_LIVE_WRITTEN
;
2126 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
2130 for (i
= 0; i
< size
; i
++) {
2131 if (stype
[(slot
- i
) % BPF_REG_SIZE
] == STACK_MISC
)
2133 if (stype
[(slot
- i
) % BPF_REG_SIZE
] == STACK_ZERO
) {
2137 verbose(env
, "invalid read from stack off %d+%d size %d\n",
2141 mark_reg_read(env
, reg
, reg
->parent
, REG_LIVE_READ64
);
2142 if (value_regno
>= 0) {
2143 if (zeros
== size
) {
2144 /* any size read into register is zero extended,
2145 * so the whole register == const_zero
2147 __mark_reg_const_zero(&state
->regs
[value_regno
]);
2148 /* backtracking doesn't support STACK_ZERO yet,
2149 * so mark it precise here, so that later
2150 * backtracking can stop here.
2151 * Backtracking may not need this if this register
2152 * doesn't participate in pointer adjustment.
2153 * Forward propagation of precise flag is not
2154 * necessary either. This mark is only to stop
2155 * backtracking. Any register that contributed
2156 * to const 0 was marked precise before spill.
2158 state
->regs
[value_regno
].precise
= true;
2160 /* have read misc data from the stack */
2161 mark_reg_unknown(env
, state
->regs
, value_regno
);
2163 state
->regs
[value_regno
].live
|= REG_LIVE_WRITTEN
;
2169 static int check_stack_access(struct bpf_verifier_env
*env
,
2170 const struct bpf_reg_state
*reg
,
2173 /* Stack accesses must be at a fixed offset, so that we
2174 * can determine what type of data were returned. See
2175 * check_stack_read().
2177 if (!tnum_is_const(reg
->var_off
)) {
2180 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2181 verbose(env
, "variable stack access var_off=%s off=%d size=%d\n",
2186 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
2187 verbose(env
, "invalid stack off=%d size=%d\n", off
, size
);
2194 static int check_map_access_type(struct bpf_verifier_env
*env
, u32 regno
,
2195 int off
, int size
, enum bpf_access_type type
)
2197 struct bpf_reg_state
*regs
= cur_regs(env
);
2198 struct bpf_map
*map
= regs
[regno
].map_ptr
;
2199 u32 cap
= bpf_map_flags_to_cap(map
);
2201 if (type
== BPF_WRITE
&& !(cap
& BPF_MAP_CAN_WRITE
)) {
2202 verbose(env
, "write into map forbidden, value_size=%d off=%d size=%d\n",
2203 map
->value_size
, off
, size
);
2207 if (type
== BPF_READ
&& !(cap
& BPF_MAP_CAN_READ
)) {
2208 verbose(env
, "read from map forbidden, value_size=%d off=%d size=%d\n",
2209 map
->value_size
, off
, size
);
2216 /* check read/write into map element returned by bpf_map_lookup_elem() */
2217 static int __check_map_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
2218 int size
, bool zero_size_allowed
)
2220 struct bpf_reg_state
*regs
= cur_regs(env
);
2221 struct bpf_map
*map
= regs
[regno
].map_ptr
;
2223 if (off
< 0 || size
< 0 || (size
== 0 && !zero_size_allowed
) ||
2224 off
+ size
> map
->value_size
) {
2225 verbose(env
, "invalid access to map value, value_size=%d off=%d size=%d\n",
2226 map
->value_size
, off
, size
);
2232 /* check read/write into a map element with possible variable offset */
2233 static int check_map_access(struct bpf_verifier_env
*env
, u32 regno
,
2234 int off
, int size
, bool zero_size_allowed
)
2236 struct bpf_verifier_state
*vstate
= env
->cur_state
;
2237 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
2238 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
2241 /* We may have adjusted the register to this map value, so we
2242 * need to try adding each of min_value and max_value to off
2243 * to make sure our theoretical access will be safe.
2245 if (env
->log
.level
& BPF_LOG_LEVEL
)
2246 print_verifier_state(env
, state
);
2248 /* The minimum value is only important with signed
2249 * comparisons where we can't assume the floor of a
2250 * value is 0. If we are using signed variables for our
2251 * index'es we need to make sure that whatever we use
2252 * will have a set floor within our range.
2254 if (reg
->smin_value
< 0 &&
2255 (reg
->smin_value
== S64_MIN
||
2256 (off
+ reg
->smin_value
!= (s64
)(s32
)(off
+ reg
->smin_value
)) ||
2257 reg
->smin_value
+ off
< 0)) {
2258 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2262 err
= __check_map_access(env
, regno
, reg
->smin_value
+ off
, size
,
2265 verbose(env
, "R%d min value is outside of the array range\n",
2270 /* If we haven't set a max value then we need to bail since we can't be
2271 * sure we won't do bad things.
2272 * If reg->umax_value + off could overflow, treat that as unbounded too.
2274 if (reg
->umax_value
>= BPF_MAX_VAR_OFF
) {
2275 verbose(env
, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
2279 err
= __check_map_access(env
, regno
, reg
->umax_value
+ off
, size
,
2282 verbose(env
, "R%d max value is outside of the array range\n",
2285 if (map_value_has_spin_lock(reg
->map_ptr
)) {
2286 u32 lock
= reg
->map_ptr
->spin_lock_off
;
2288 /* if any part of struct bpf_spin_lock can be touched by
2289 * load/store reject this program.
2290 * To check that [x1, x2) overlaps with [y1, y2)
2291 * it is sufficient to check x1 < y2 && y1 < x2.
2293 if (reg
->smin_value
+ off
< lock
+ sizeof(struct bpf_spin_lock
) &&
2294 lock
< reg
->umax_value
+ off
+ size
) {
2295 verbose(env
, "bpf_spin_lock cannot be accessed directly by load/store\n");
2302 #define MAX_PACKET_OFF 0xffff
2304 static bool may_access_direct_pkt_data(struct bpf_verifier_env
*env
,
2305 const struct bpf_call_arg_meta
*meta
,
2306 enum bpf_access_type t
)
2308 switch (env
->prog
->type
) {
2309 /* Program types only with direct read access go here! */
2310 case BPF_PROG_TYPE_LWT_IN
:
2311 case BPF_PROG_TYPE_LWT_OUT
:
2312 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
2313 case BPF_PROG_TYPE_SK_REUSEPORT
:
2314 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
2315 case BPF_PROG_TYPE_CGROUP_SKB
:
2320 /* Program types with direct read + write access go here! */
2321 case BPF_PROG_TYPE_SCHED_CLS
:
2322 case BPF_PROG_TYPE_SCHED_ACT
:
2323 case BPF_PROG_TYPE_XDP
:
2324 case BPF_PROG_TYPE_LWT_XMIT
:
2325 case BPF_PROG_TYPE_SK_SKB
:
2326 case BPF_PROG_TYPE_SK_MSG
:
2328 return meta
->pkt_access
;
2330 env
->seen_direct_write
= true;
2333 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
2335 env
->seen_direct_write
= true;
2344 static int __check_packet_access(struct bpf_verifier_env
*env
, u32 regno
,
2345 int off
, int size
, bool zero_size_allowed
)
2347 struct bpf_reg_state
*regs
= cur_regs(env
);
2348 struct bpf_reg_state
*reg
= ®s
[regno
];
2350 if (off
< 0 || size
< 0 || (size
== 0 && !zero_size_allowed
) ||
2351 (u64
)off
+ size
> reg
->range
) {
2352 verbose(env
, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
2353 off
, size
, regno
, reg
->id
, reg
->off
, reg
->range
);
2359 static int check_packet_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
2360 int size
, bool zero_size_allowed
)
2362 struct bpf_reg_state
*regs
= cur_regs(env
);
2363 struct bpf_reg_state
*reg
= ®s
[regno
];
2366 /* We may have added a variable offset to the packet pointer; but any
2367 * reg->range we have comes after that. We are only checking the fixed
2371 /* We don't allow negative numbers, because we aren't tracking enough
2372 * detail to prove they're safe.
2374 if (reg
->smin_value
< 0) {
2375 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2379 err
= __check_packet_access(env
, regno
, off
, size
, zero_size_allowed
);
2381 verbose(env
, "R%d offset is outside of the packet\n", regno
);
2385 /* __check_packet_access has made sure "off + size - 1" is within u16.
2386 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2387 * otherwise find_good_pkt_pointers would have refused to set range info
2388 * that __check_packet_access would have rejected this pkt access.
2389 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2391 env
->prog
->aux
->max_pkt_offset
=
2392 max_t(u32
, env
->prog
->aux
->max_pkt_offset
,
2393 off
+ reg
->umax_value
+ size
- 1);
2398 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
2399 static int check_ctx_access(struct bpf_verifier_env
*env
, int insn_idx
, int off
, int size
,
2400 enum bpf_access_type t
, enum bpf_reg_type
*reg_type
,
2403 struct bpf_insn_access_aux info
= {
2404 .reg_type
= *reg_type
,
2408 if (env
->ops
->is_valid_access
&&
2409 env
->ops
->is_valid_access(off
, size
, t
, env
->prog
, &info
)) {
2410 /* A non zero info.ctx_field_size indicates that this field is a
2411 * candidate for later verifier transformation to load the whole
2412 * field and then apply a mask when accessed with a narrower
2413 * access than actual ctx access size. A zero info.ctx_field_size
2414 * will only allow for whole field access and rejects any other
2415 * type of narrower access.
2417 *reg_type
= info
.reg_type
;
2419 if (*reg_type
== PTR_TO_BTF_ID
)
2420 *btf_id
= info
.btf_id
;
2422 env
->insn_aux_data
[insn_idx
].ctx_field_size
= info
.ctx_field_size
;
2423 /* remember the offset of last byte accessed in ctx */
2424 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
2425 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
2429 verbose(env
, "invalid bpf_context access off=%d size=%d\n", off
, size
);
2433 static int check_flow_keys_access(struct bpf_verifier_env
*env
, int off
,
2436 if (size
< 0 || off
< 0 ||
2437 (u64
)off
+ size
> sizeof(struct bpf_flow_keys
)) {
2438 verbose(env
, "invalid access to flow keys off=%d size=%d\n",
2445 static int check_sock_access(struct bpf_verifier_env
*env
, int insn_idx
,
2446 u32 regno
, int off
, int size
,
2447 enum bpf_access_type t
)
2449 struct bpf_reg_state
*regs
= cur_regs(env
);
2450 struct bpf_reg_state
*reg
= ®s
[regno
];
2451 struct bpf_insn_access_aux info
= {};
2454 if (reg
->smin_value
< 0) {
2455 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2460 switch (reg
->type
) {
2461 case PTR_TO_SOCK_COMMON
:
2462 valid
= bpf_sock_common_is_valid_access(off
, size
, t
, &info
);
2465 valid
= bpf_sock_is_valid_access(off
, size
, t
, &info
);
2467 case PTR_TO_TCP_SOCK
:
2468 valid
= bpf_tcp_sock_is_valid_access(off
, size
, t
, &info
);
2470 case PTR_TO_XDP_SOCK
:
2471 valid
= bpf_xdp_sock_is_valid_access(off
, size
, t
, &info
);
2479 env
->insn_aux_data
[insn_idx
].ctx_field_size
=
2480 info
.ctx_field_size
;
2484 verbose(env
, "R%d invalid %s access off=%d size=%d\n",
2485 regno
, reg_type_str
[reg
->type
], off
, size
);
2490 static bool __is_pointer_value(bool allow_ptr_leaks
,
2491 const struct bpf_reg_state
*reg
)
2493 if (allow_ptr_leaks
)
2496 return reg
->type
!= SCALAR_VALUE
;
2499 static struct bpf_reg_state
*reg_state(struct bpf_verifier_env
*env
, int regno
)
2501 return cur_regs(env
) + regno
;
2504 static bool is_pointer_value(struct bpf_verifier_env
*env
, int regno
)
2506 return __is_pointer_value(env
->allow_ptr_leaks
, reg_state(env
, regno
));
2509 static bool is_ctx_reg(struct bpf_verifier_env
*env
, int regno
)
2511 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
2513 return reg
->type
== PTR_TO_CTX
;
2516 static bool is_sk_reg(struct bpf_verifier_env
*env
, int regno
)
2518 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
2520 return type_is_sk_pointer(reg
->type
);
2523 static bool is_pkt_reg(struct bpf_verifier_env
*env
, int regno
)
2525 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
2527 return type_is_pkt_pointer(reg
->type
);
2530 static bool is_flow_key_reg(struct bpf_verifier_env
*env
, int regno
)
2532 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
2534 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2535 return reg
->type
== PTR_TO_FLOW_KEYS
;
2538 static int check_pkt_ptr_alignment(struct bpf_verifier_env
*env
,
2539 const struct bpf_reg_state
*reg
,
2540 int off
, int size
, bool strict
)
2542 struct tnum reg_off
;
2545 /* Byte size accesses are always allowed. */
2546 if (!strict
|| size
== 1)
2549 /* For platforms that do not have a Kconfig enabling
2550 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2551 * NET_IP_ALIGN is universally set to '2'. And on platforms
2552 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2553 * to this code only in strict mode where we want to emulate
2554 * the NET_IP_ALIGN==2 checking. Therefore use an
2555 * unconditional IP align value of '2'.
2559 reg_off
= tnum_add(reg
->var_off
, tnum_const(ip_align
+ reg
->off
+ off
));
2560 if (!tnum_is_aligned(reg_off
, size
)) {
2563 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2565 "misaligned packet access off %d+%s+%d+%d size %d\n",
2566 ip_align
, tn_buf
, reg
->off
, off
, size
);
2573 static int check_generic_ptr_alignment(struct bpf_verifier_env
*env
,
2574 const struct bpf_reg_state
*reg
,
2575 const char *pointer_desc
,
2576 int off
, int size
, bool strict
)
2578 struct tnum reg_off
;
2580 /* Byte size accesses are always allowed. */
2581 if (!strict
|| size
== 1)
2584 reg_off
= tnum_add(reg
->var_off
, tnum_const(reg
->off
+ off
));
2585 if (!tnum_is_aligned(reg_off
, size
)) {
2588 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2589 verbose(env
, "misaligned %saccess off %s+%d+%d size %d\n",
2590 pointer_desc
, tn_buf
, reg
->off
, off
, size
);
2597 static int check_ptr_alignment(struct bpf_verifier_env
*env
,
2598 const struct bpf_reg_state
*reg
, int off
,
2599 int size
, bool strict_alignment_once
)
2601 bool strict
= env
->strict_alignment
|| strict_alignment_once
;
2602 const char *pointer_desc
= "";
2604 switch (reg
->type
) {
2606 case PTR_TO_PACKET_META
:
2607 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2608 * right in front, treat it the very same way.
2610 return check_pkt_ptr_alignment(env
, reg
, off
, size
, strict
);
2611 case PTR_TO_FLOW_KEYS
:
2612 pointer_desc
= "flow keys ";
2614 case PTR_TO_MAP_VALUE
:
2615 pointer_desc
= "value ";
2618 pointer_desc
= "context ";
2621 pointer_desc
= "stack ";
2622 /* The stack spill tracking logic in check_stack_write()
2623 * and check_stack_read() relies on stack accesses being
2629 pointer_desc
= "sock ";
2631 case PTR_TO_SOCK_COMMON
:
2632 pointer_desc
= "sock_common ";
2634 case PTR_TO_TCP_SOCK
:
2635 pointer_desc
= "tcp_sock ";
2637 case PTR_TO_XDP_SOCK
:
2638 pointer_desc
= "xdp_sock ";
2643 return check_generic_ptr_alignment(env
, reg
, pointer_desc
, off
, size
,
2647 static int update_stack_depth(struct bpf_verifier_env
*env
,
2648 const struct bpf_func_state
*func
,
2651 u16 stack
= env
->subprog_info
[func
->subprogno
].stack_depth
;
2656 /* update known max for given subprogram */
2657 env
->subprog_info
[func
->subprogno
].stack_depth
= -off
;
2661 /* starting from main bpf function walk all instructions of the function
2662 * and recursively walk all callees that given function can call.
2663 * Ignore jump and exit insns.
2664 * Since recursion is prevented by check_cfg() this algorithm
2665 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2667 static int check_max_stack_depth(struct bpf_verifier_env
*env
)
2669 int depth
= 0, frame
= 0, idx
= 0, i
= 0, subprog_end
;
2670 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
2671 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2672 int ret_insn
[MAX_CALL_FRAMES
];
2673 int ret_prog
[MAX_CALL_FRAMES
];
2676 /* round up to 32-bytes, since this is granularity
2677 * of interpreter stack size
2679 depth
+= round_up(max_t(u32
, subprog
[idx
].stack_depth
, 1), 32);
2680 if (depth
> MAX_BPF_STACK
) {
2681 verbose(env
, "combined stack size of %d calls is %d. Too large\n",
2686 subprog_end
= subprog
[idx
+ 1].start
;
2687 for (; i
< subprog_end
; i
++) {
2688 if (insn
[i
].code
!= (BPF_JMP
| BPF_CALL
))
2690 if (insn
[i
].src_reg
!= BPF_PSEUDO_CALL
)
2692 /* remember insn and function to return to */
2693 ret_insn
[frame
] = i
+ 1;
2694 ret_prog
[frame
] = idx
;
2696 /* find the callee */
2697 i
= i
+ insn
[i
].imm
+ 1;
2698 idx
= find_subprog(env
, i
);
2700 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2705 if (frame
>= MAX_CALL_FRAMES
) {
2706 verbose(env
, "the call stack of %d frames is too deep !\n",
2712 /* end of for() loop means the last insn of the 'subprog'
2713 * was reached. Doesn't matter whether it was JA or EXIT
2717 depth
-= round_up(max_t(u32
, subprog
[idx
].stack_depth
, 1), 32);
2719 i
= ret_insn
[frame
];
2720 idx
= ret_prog
[frame
];
2724 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2725 static int get_callee_stack_depth(struct bpf_verifier_env
*env
,
2726 const struct bpf_insn
*insn
, int idx
)
2728 int start
= idx
+ insn
->imm
+ 1, subprog
;
2730 subprog
= find_subprog(env
, start
);
2732 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2736 return env
->subprog_info
[subprog
].stack_depth
;
2740 static int check_ctx_reg(struct bpf_verifier_env
*env
,
2741 const struct bpf_reg_state
*reg
, int regno
)
2743 /* Access to ctx or passing it to a helper is only allowed in
2744 * its original, unmodified form.
2748 verbose(env
, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2753 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
2756 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2757 verbose(env
, "variable ctx access var_off=%s disallowed\n", tn_buf
);
2764 static int check_tp_buffer_access(struct bpf_verifier_env
*env
,
2765 const struct bpf_reg_state
*reg
,
2766 int regno
, int off
, int size
)
2770 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2774 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
2777 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2779 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2780 regno
, off
, tn_buf
);
2783 if (off
+ size
> env
->prog
->aux
->max_tp_access
)
2784 env
->prog
->aux
->max_tp_access
= off
+ size
;
2790 /* truncate register to smaller size (in bytes)
2791 * must be called with size < BPF_REG_SIZE
2793 static void coerce_reg_to_size(struct bpf_reg_state
*reg
, int size
)
2797 /* clear high bits in bit representation */
2798 reg
->var_off
= tnum_cast(reg
->var_off
, size
);
2800 /* fix arithmetic bounds */
2801 mask
= ((u64
)1 << (size
* 8)) - 1;
2802 if ((reg
->umin_value
& ~mask
) == (reg
->umax_value
& ~mask
)) {
2803 reg
->umin_value
&= mask
;
2804 reg
->umax_value
&= mask
;
2806 reg
->umin_value
= 0;
2807 reg
->umax_value
= mask
;
2809 reg
->smin_value
= reg
->umin_value
;
2810 reg
->smax_value
= reg
->umax_value
;
2813 static bool bpf_map_is_rdonly(const struct bpf_map
*map
)
2815 return (map
->map_flags
& BPF_F_RDONLY_PROG
) && map
->frozen
;
2818 static int bpf_map_direct_read(struct bpf_map
*map
, int off
, int size
, u64
*val
)
2824 err
= map
->ops
->map_direct_value_addr(map
, &addr
, off
);
2827 ptr
= (void *)(long)addr
+ off
;
2831 *val
= (u64
)*(u8
*)ptr
;
2834 *val
= (u64
)*(u16
*)ptr
;
2837 *val
= (u64
)*(u32
*)ptr
;
2848 static int check_ptr_to_btf_access(struct bpf_verifier_env
*env
,
2849 struct bpf_reg_state
*regs
,
2850 int regno
, int off
, int size
,
2851 enum bpf_access_type atype
,
2854 struct bpf_reg_state
*reg
= regs
+ regno
;
2855 const struct btf_type
*t
= btf_type_by_id(btf_vmlinux
, reg
->btf_id
);
2856 const char *tname
= btf_name_by_offset(btf_vmlinux
, t
->name_off
);
2860 if (atype
!= BPF_READ
) {
2861 verbose(env
, "only read is supported\n");
2867 "R%d is ptr_%s invalid negative access: off=%d\n",
2871 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
2874 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2876 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2877 regno
, tname
, off
, tn_buf
);
2881 ret
= btf_struct_access(&env
->log
, t
, off
, size
, atype
, &btf_id
);
2885 if (ret
== SCALAR_VALUE
) {
2886 mark_reg_unknown(env
, regs
, value_regno
);
2889 mark_reg_known_zero(env
, regs
, value_regno
);
2890 regs
[value_regno
].type
= PTR_TO_BTF_ID
;
2891 regs
[value_regno
].btf_id
= btf_id
;
2895 /* check whether memory at (regno + off) is accessible for t = (read | write)
2896 * if t==write, value_regno is a register which value is stored into memory
2897 * if t==read, value_regno is a register which will receive the value from memory
2898 * if t==write && value_regno==-1, some unknown value is stored into memory
2899 * if t==read && value_regno==-1, don't care what we read from memory
2901 static int check_mem_access(struct bpf_verifier_env
*env
, int insn_idx
, u32 regno
,
2902 int off
, int bpf_size
, enum bpf_access_type t
,
2903 int value_regno
, bool strict_alignment_once
)
2905 struct bpf_reg_state
*regs
= cur_regs(env
);
2906 struct bpf_reg_state
*reg
= regs
+ regno
;
2907 struct bpf_func_state
*state
;
2910 size
= bpf_size_to_bytes(bpf_size
);
2914 /* alignment checks will add in reg->off themselves */
2915 err
= check_ptr_alignment(env
, reg
, off
, size
, strict_alignment_once
);
2919 /* for access checks, reg->off is just part of off */
2922 if (reg
->type
== PTR_TO_MAP_VALUE
) {
2923 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
2924 is_pointer_value(env
, value_regno
)) {
2925 verbose(env
, "R%d leaks addr into map\n", value_regno
);
2928 err
= check_map_access_type(env
, regno
, off
, size
, t
);
2931 err
= check_map_access(env
, regno
, off
, size
, false);
2932 if (!err
&& t
== BPF_READ
&& value_regno
>= 0) {
2933 struct bpf_map
*map
= reg
->map_ptr
;
2935 /* if map is read-only, track its contents as scalars */
2936 if (tnum_is_const(reg
->var_off
) &&
2937 bpf_map_is_rdonly(map
) &&
2938 map
->ops
->map_direct_value_addr
) {
2939 int map_off
= off
+ reg
->var_off
.value
;
2942 err
= bpf_map_direct_read(map
, map_off
, size
,
2947 regs
[value_regno
].type
= SCALAR_VALUE
;
2948 __mark_reg_known(®s
[value_regno
], val
);
2950 mark_reg_unknown(env
, regs
, value_regno
);
2953 } else if (reg
->type
== PTR_TO_CTX
) {
2954 enum bpf_reg_type reg_type
= SCALAR_VALUE
;
2957 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
2958 is_pointer_value(env
, value_regno
)) {
2959 verbose(env
, "R%d leaks addr into ctx\n", value_regno
);
2963 err
= check_ctx_reg(env
, reg
, regno
);
2967 err
= check_ctx_access(env
, insn_idx
, off
, size
, t
, ®_type
, &btf_id
);
2969 verbose_linfo(env
, insn_idx
, "; ");
2970 if (!err
&& t
== BPF_READ
&& value_regno
>= 0) {
2971 /* ctx access returns either a scalar, or a
2972 * PTR_TO_PACKET[_META,_END]. In the latter
2973 * case, we know the offset is zero.
2975 if (reg_type
== SCALAR_VALUE
) {
2976 mark_reg_unknown(env
, regs
, value_regno
);
2978 mark_reg_known_zero(env
, regs
,
2980 if (reg_type_may_be_null(reg_type
))
2981 regs
[value_regno
].id
= ++env
->id_gen
;
2982 /* A load of ctx field could have different
2983 * actual load size with the one encoded in the
2984 * insn. When the dst is PTR, it is for sure not
2987 regs
[value_regno
].subreg_def
= DEF_NOT_SUBREG
;
2988 if (reg_type
== PTR_TO_BTF_ID
)
2989 regs
[value_regno
].btf_id
= btf_id
;
2991 regs
[value_regno
].type
= reg_type
;
2994 } else if (reg
->type
== PTR_TO_STACK
) {
2995 off
+= reg
->var_off
.value
;
2996 err
= check_stack_access(env
, reg
, off
, size
);
3000 state
= func(env
, reg
);
3001 err
= update_stack_depth(env
, state
, off
);
3006 err
= check_stack_write(env
, state
, off
, size
,
3007 value_regno
, insn_idx
);
3009 err
= check_stack_read(env
, state
, off
, size
,
3011 } else if (reg_is_pkt_pointer(reg
)) {
3012 if (t
== BPF_WRITE
&& !may_access_direct_pkt_data(env
, NULL
, t
)) {
3013 verbose(env
, "cannot write into packet\n");
3016 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
3017 is_pointer_value(env
, value_regno
)) {
3018 verbose(env
, "R%d leaks addr into packet\n",
3022 err
= check_packet_access(env
, regno
, off
, size
, false);
3023 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
3024 mark_reg_unknown(env
, regs
, value_regno
);
3025 } else if (reg
->type
== PTR_TO_FLOW_KEYS
) {
3026 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
3027 is_pointer_value(env
, value_regno
)) {
3028 verbose(env
, "R%d leaks addr into flow keys\n",
3033 err
= check_flow_keys_access(env
, off
, size
);
3034 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
3035 mark_reg_unknown(env
, regs
, value_regno
);
3036 } else if (type_is_sk_pointer(reg
->type
)) {
3037 if (t
== BPF_WRITE
) {
3038 verbose(env
, "R%d cannot write into %s\n",
3039 regno
, reg_type_str
[reg
->type
]);
3042 err
= check_sock_access(env
, insn_idx
, regno
, off
, size
, t
);
3043 if (!err
&& value_regno
>= 0)
3044 mark_reg_unknown(env
, regs
, value_regno
);
3045 } else if (reg
->type
== PTR_TO_TP_BUFFER
) {
3046 err
= check_tp_buffer_access(env
, reg
, regno
, off
, size
);
3047 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
3048 mark_reg_unknown(env
, regs
, value_regno
);
3049 } else if (reg
->type
== PTR_TO_BTF_ID
) {
3050 err
= check_ptr_to_btf_access(env
, regs
, regno
, off
, size
, t
,
3053 verbose(env
, "R%d invalid mem access '%s'\n", regno
,
3054 reg_type_str
[reg
->type
]);
3058 if (!err
&& size
< BPF_REG_SIZE
&& value_regno
>= 0 && t
== BPF_READ
&&
3059 regs
[value_regno
].type
== SCALAR_VALUE
) {
3060 /* b/h/w load zero-extends, mark upper bits as known 0 */
3061 coerce_reg_to_size(®s
[value_regno
], size
);
3066 static int check_xadd(struct bpf_verifier_env
*env
, int insn_idx
, struct bpf_insn
*insn
)
3070 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
3072 verbose(env
, "BPF_XADD uses reserved fields\n");
3076 /* check src1 operand */
3077 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3081 /* check src2 operand */
3082 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
3086 if (is_pointer_value(env
, insn
->src_reg
)) {
3087 verbose(env
, "R%d leaks addr into mem\n", insn
->src_reg
);
3091 if (is_ctx_reg(env
, insn
->dst_reg
) ||
3092 is_pkt_reg(env
, insn
->dst_reg
) ||
3093 is_flow_key_reg(env
, insn
->dst_reg
) ||
3094 is_sk_reg(env
, insn
->dst_reg
)) {
3095 verbose(env
, "BPF_XADD stores into R%d %s is not allowed\n",
3097 reg_type_str
[reg_state(env
, insn
->dst_reg
)->type
]);
3101 /* check whether atomic_add can read the memory */
3102 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
3103 BPF_SIZE(insn
->code
), BPF_READ
, -1, true);
3107 /* check whether atomic_add can write into the same memory */
3108 return check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
3109 BPF_SIZE(insn
->code
), BPF_WRITE
, -1, true);
3112 static int __check_stack_boundary(struct bpf_verifier_env
*env
, u32 regno
,
3113 int off
, int access_size
,
3114 bool zero_size_allowed
)
3116 struct bpf_reg_state
*reg
= reg_state(env
, regno
);
3118 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
3119 access_size
< 0 || (access_size
== 0 && !zero_size_allowed
)) {
3120 if (tnum_is_const(reg
->var_off
)) {
3121 verbose(env
, "invalid stack type R%d off=%d access_size=%d\n",
3122 regno
, off
, access_size
);
3126 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
3127 verbose(env
, "invalid stack type R%d var_off=%s access_size=%d\n",
3128 regno
, tn_buf
, access_size
);
3135 /* when register 'regno' is passed into function that will read 'access_size'
3136 * bytes from that pointer, make sure that it's within stack boundary
3137 * and all elements of stack are initialized.
3138 * Unlike most pointer bounds-checking functions, this one doesn't take an
3139 * 'off' argument, so it has to add in reg->off itself.
3141 static int check_stack_boundary(struct bpf_verifier_env
*env
, int regno
,
3142 int access_size
, bool zero_size_allowed
,
3143 struct bpf_call_arg_meta
*meta
)
3145 struct bpf_reg_state
*reg
= reg_state(env
, regno
);
3146 struct bpf_func_state
*state
= func(env
, reg
);
3147 int err
, min_off
, max_off
, i
, j
, slot
, spi
;
3149 if (reg
->type
!= PTR_TO_STACK
) {
3150 /* Allow zero-byte read from NULL, regardless of pointer type */
3151 if (zero_size_allowed
&& access_size
== 0 &&
3152 register_is_null(reg
))
3155 verbose(env
, "R%d type=%s expected=%s\n", regno
,
3156 reg_type_str
[reg
->type
],
3157 reg_type_str
[PTR_TO_STACK
]);
3161 if (tnum_is_const(reg
->var_off
)) {
3162 min_off
= max_off
= reg
->var_off
.value
+ reg
->off
;
3163 err
= __check_stack_boundary(env
, regno
, min_off
, access_size
,
3168 /* Variable offset is prohibited for unprivileged mode for
3169 * simplicity since it requires corresponding support in
3170 * Spectre masking for stack ALU.
3171 * See also retrieve_ptr_limit().
3173 if (!env
->allow_ptr_leaks
) {
3176 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
3177 verbose(env
, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3181 /* Only initialized buffer on stack is allowed to be accessed
3182 * with variable offset. With uninitialized buffer it's hard to
3183 * guarantee that whole memory is marked as initialized on
3184 * helper return since specific bounds are unknown what may
3185 * cause uninitialized stack leaking.
3187 if (meta
&& meta
->raw_mode
)
3190 if (reg
->smax_value
>= BPF_MAX_VAR_OFF
||
3191 reg
->smax_value
<= -BPF_MAX_VAR_OFF
) {
3192 verbose(env
, "R%d unbounded indirect variable offset stack access\n",
3196 min_off
= reg
->smin_value
+ reg
->off
;
3197 max_off
= reg
->smax_value
+ reg
->off
;
3198 err
= __check_stack_boundary(env
, regno
, min_off
, access_size
,
3201 verbose(env
, "R%d min value is outside of stack bound\n",
3205 err
= __check_stack_boundary(env
, regno
, max_off
, access_size
,
3208 verbose(env
, "R%d max value is outside of stack bound\n",
3214 if (meta
&& meta
->raw_mode
) {
3215 meta
->access_size
= access_size
;
3216 meta
->regno
= regno
;
3220 for (i
= min_off
; i
< max_off
+ access_size
; i
++) {
3224 spi
= slot
/ BPF_REG_SIZE
;
3225 if (state
->allocated_stack
<= slot
)
3227 stype
= &state
->stack
[spi
].slot_type
[slot
% BPF_REG_SIZE
];
3228 if (*stype
== STACK_MISC
)
3230 if (*stype
== STACK_ZERO
) {
3231 /* helper can write anything into the stack */
3232 *stype
= STACK_MISC
;
3235 if (state
->stack
[spi
].slot_type
[0] == STACK_SPILL
&&
3236 state
->stack
[spi
].spilled_ptr
.type
== SCALAR_VALUE
) {
3237 __mark_reg_unknown(&state
->stack
[spi
].spilled_ptr
);
3238 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
3239 state
->stack
[spi
].slot_type
[j
] = STACK_MISC
;
3244 if (tnum_is_const(reg
->var_off
)) {
3245 verbose(env
, "invalid indirect read from stack off %d+%d size %d\n",
3246 min_off
, i
- min_off
, access_size
);
3250 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
3251 verbose(env
, "invalid indirect read from stack var_off %s+%d size %d\n",
3252 tn_buf
, i
- min_off
, access_size
);
3256 /* reading any byte out of 8-byte 'spill_slot' will cause
3257 * the whole slot to be marked as 'read'
3259 mark_reg_read(env
, &state
->stack
[spi
].spilled_ptr
,
3260 state
->stack
[spi
].spilled_ptr
.parent
,
3263 return update_stack_depth(env
, state
, min_off
);
3266 static int check_helper_mem_access(struct bpf_verifier_env
*env
, int regno
,
3267 int access_size
, bool zero_size_allowed
,
3268 struct bpf_call_arg_meta
*meta
)
3270 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
3272 switch (reg
->type
) {
3274 case PTR_TO_PACKET_META
:
3275 return check_packet_access(env
, regno
, reg
->off
, access_size
,
3277 case PTR_TO_MAP_VALUE
:
3278 if (check_map_access_type(env
, regno
, reg
->off
, access_size
,
3279 meta
&& meta
->raw_mode
? BPF_WRITE
:
3282 return check_map_access(env
, regno
, reg
->off
, access_size
,
3284 default: /* scalar_value|ptr_to_stack or invalid ptr */
3285 return check_stack_boundary(env
, regno
, access_size
,
3286 zero_size_allowed
, meta
);
3290 /* Implementation details:
3291 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3292 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3293 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3294 * value_or_null->value transition, since the verifier only cares about
3295 * the range of access to valid map value pointer and doesn't care about actual
3296 * address of the map element.
3297 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3298 * reg->id > 0 after value_or_null->value transition. By doing so
3299 * two bpf_map_lookups will be considered two different pointers that
3300 * point to different bpf_spin_locks.
3301 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3303 * Since only one bpf_spin_lock is allowed the checks are simpler than
3304 * reg_is_refcounted() logic. The verifier needs to remember only
3305 * one spin_lock instead of array of acquired_refs.
3306 * cur_state->active_spin_lock remembers which map value element got locked
3307 * and clears it after bpf_spin_unlock.
3309 static int process_spin_lock(struct bpf_verifier_env
*env
, int regno
,
3312 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
3313 struct bpf_verifier_state
*cur
= env
->cur_state
;
3314 bool is_const
= tnum_is_const(reg
->var_off
);
3315 struct bpf_map
*map
= reg
->map_ptr
;
3316 u64 val
= reg
->var_off
.value
;
3318 if (reg
->type
!= PTR_TO_MAP_VALUE
) {
3319 verbose(env
, "R%d is not a pointer to map_value\n", regno
);
3324 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3330 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3334 if (!map_value_has_spin_lock(map
)) {
3335 if (map
->spin_lock_off
== -E2BIG
)
3337 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3339 else if (map
->spin_lock_off
== -ENOENT
)
3341 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3345 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3349 if (map
->spin_lock_off
!= val
+ reg
->off
) {
3350 verbose(env
, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3355 if (cur
->active_spin_lock
) {
3357 "Locking two bpf_spin_locks are not allowed\n");
3360 cur
->active_spin_lock
= reg
->id
;
3362 if (!cur
->active_spin_lock
) {
3363 verbose(env
, "bpf_spin_unlock without taking a lock\n");
3366 if (cur
->active_spin_lock
!= reg
->id
) {
3367 verbose(env
, "bpf_spin_unlock of different lock\n");
3370 cur
->active_spin_lock
= 0;
3375 static bool arg_type_is_mem_ptr(enum bpf_arg_type type
)
3377 return type
== ARG_PTR_TO_MEM
||
3378 type
== ARG_PTR_TO_MEM_OR_NULL
||
3379 type
== ARG_PTR_TO_UNINIT_MEM
;
3382 static bool arg_type_is_mem_size(enum bpf_arg_type type
)
3384 return type
== ARG_CONST_SIZE
||
3385 type
== ARG_CONST_SIZE_OR_ZERO
;
3388 static bool arg_type_is_int_ptr(enum bpf_arg_type type
)
3390 return type
== ARG_PTR_TO_INT
||
3391 type
== ARG_PTR_TO_LONG
;
3394 static int int_ptr_type_to_size(enum bpf_arg_type type
)
3396 if (type
== ARG_PTR_TO_INT
)
3398 else if (type
== ARG_PTR_TO_LONG
)
3404 static int check_func_arg(struct bpf_verifier_env
*env
, u32 regno
,
3405 enum bpf_arg_type arg_type
,
3406 struct bpf_call_arg_meta
*meta
)
3408 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
3409 enum bpf_reg_type expected_type
, type
= reg
->type
;
3412 if (arg_type
== ARG_DONTCARE
)
3415 err
= check_reg_arg(env
, regno
, SRC_OP
);
3419 if (arg_type
== ARG_ANYTHING
) {
3420 if (is_pointer_value(env
, regno
)) {
3421 verbose(env
, "R%d leaks addr into helper function\n",
3428 if (type_is_pkt_pointer(type
) &&
3429 !may_access_direct_pkt_data(env
, meta
, BPF_READ
)) {
3430 verbose(env
, "helper access to the packet is not allowed\n");
3434 if (arg_type
== ARG_PTR_TO_MAP_KEY
||
3435 arg_type
== ARG_PTR_TO_MAP_VALUE
||
3436 arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
||
3437 arg_type
== ARG_PTR_TO_MAP_VALUE_OR_NULL
) {
3438 expected_type
= PTR_TO_STACK
;
3439 if (register_is_null(reg
) &&
3440 arg_type
== ARG_PTR_TO_MAP_VALUE_OR_NULL
)
3441 /* final test in check_stack_boundary() */;
3442 else if (!type_is_pkt_pointer(type
) &&
3443 type
!= PTR_TO_MAP_VALUE
&&
3444 type
!= expected_type
)
3446 } else if (arg_type
== ARG_CONST_SIZE
||
3447 arg_type
== ARG_CONST_SIZE_OR_ZERO
) {
3448 expected_type
= SCALAR_VALUE
;
3449 if (type
!= expected_type
)
3451 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
3452 expected_type
= CONST_PTR_TO_MAP
;
3453 if (type
!= expected_type
)
3455 } else if (arg_type
== ARG_PTR_TO_CTX
) {
3456 expected_type
= PTR_TO_CTX
;
3457 if (type
!= expected_type
)
3459 err
= check_ctx_reg(env
, reg
, regno
);
3462 } else if (arg_type
== ARG_PTR_TO_SOCK_COMMON
) {
3463 expected_type
= PTR_TO_SOCK_COMMON
;
3464 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3465 if (!type_is_sk_pointer(type
))
3467 if (reg
->ref_obj_id
) {
3468 if (meta
->ref_obj_id
) {
3469 verbose(env
, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3470 regno
, reg
->ref_obj_id
,
3474 meta
->ref_obj_id
= reg
->ref_obj_id
;
3476 } else if (arg_type
== ARG_PTR_TO_SOCKET
) {
3477 expected_type
= PTR_TO_SOCKET
;
3478 if (type
!= expected_type
)
3480 } else if (arg_type
== ARG_PTR_TO_BTF_ID
) {
3481 expected_type
= PTR_TO_BTF_ID
;
3482 if (type
!= expected_type
)
3484 if (reg
->btf_id
!= meta
->btf_id
) {
3485 verbose(env
, "Helper has type %s got %s in R%d\n",
3486 kernel_type_name(meta
->btf_id
),
3487 kernel_type_name(reg
->btf_id
), regno
);
3491 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
|| reg
->off
) {
3492 verbose(env
, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3496 } else if (arg_type
== ARG_PTR_TO_SPIN_LOCK
) {
3497 if (meta
->func_id
== BPF_FUNC_spin_lock
) {
3498 if (process_spin_lock(env
, regno
, true))
3500 } else if (meta
->func_id
== BPF_FUNC_spin_unlock
) {
3501 if (process_spin_lock(env
, regno
, false))
3504 verbose(env
, "verifier internal error\n");
3507 } else if (arg_type_is_mem_ptr(arg_type
)) {
3508 expected_type
= PTR_TO_STACK
;
3509 /* One exception here. In case function allows for NULL to be
3510 * passed in as argument, it's a SCALAR_VALUE type. Final test
3511 * happens during stack boundary checking.
3513 if (register_is_null(reg
) &&
3514 arg_type
== ARG_PTR_TO_MEM_OR_NULL
)
3515 /* final test in check_stack_boundary() */;
3516 else if (!type_is_pkt_pointer(type
) &&
3517 type
!= PTR_TO_MAP_VALUE
&&
3518 type
!= expected_type
)
3520 meta
->raw_mode
= arg_type
== ARG_PTR_TO_UNINIT_MEM
;
3521 } else if (arg_type_is_int_ptr(arg_type
)) {
3522 expected_type
= PTR_TO_STACK
;
3523 if (!type_is_pkt_pointer(type
) &&
3524 type
!= PTR_TO_MAP_VALUE
&&
3525 type
!= expected_type
)
3528 verbose(env
, "unsupported arg_type %d\n", arg_type
);
3532 if (arg_type
== ARG_CONST_MAP_PTR
) {
3533 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3534 meta
->map_ptr
= reg
->map_ptr
;
3535 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
3536 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3537 * check that [key, key + map->key_size) are within
3538 * stack limits and initialized
3540 if (!meta
->map_ptr
) {
3541 /* in function declaration map_ptr must come before
3542 * map_key, so that it's verified and known before
3543 * we have to check map_key here. Otherwise it means
3544 * that kernel subsystem misconfigured verifier
3546 verbose(env
, "invalid map_ptr to access map->key\n");
3549 err
= check_helper_mem_access(env
, regno
,
3550 meta
->map_ptr
->key_size
, false,
3552 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
||
3553 (arg_type
== ARG_PTR_TO_MAP_VALUE_OR_NULL
&&
3554 !register_is_null(reg
)) ||
3555 arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
) {
3556 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3557 * check [value, value + map->value_size) validity
3559 if (!meta
->map_ptr
) {
3560 /* kernel subsystem misconfigured verifier */
3561 verbose(env
, "invalid map_ptr to access map->value\n");
3564 meta
->raw_mode
= (arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
);
3565 err
= check_helper_mem_access(env
, regno
,
3566 meta
->map_ptr
->value_size
, false,
3568 } else if (arg_type_is_mem_size(arg_type
)) {
3569 bool zero_size_allowed
= (arg_type
== ARG_CONST_SIZE_OR_ZERO
);
3571 /* remember the mem_size which may be used later
3572 * to refine return values.
3574 meta
->msize_smax_value
= reg
->smax_value
;
3575 meta
->msize_umax_value
= reg
->umax_value
;
3577 /* The register is SCALAR_VALUE; the access check
3578 * happens using its boundaries.
3580 if (!tnum_is_const(reg
->var_off
))
3581 /* For unprivileged variable accesses, disable raw
3582 * mode so that the program is required to
3583 * initialize all the memory that the helper could
3584 * just partially fill up.
3588 if (reg
->smin_value
< 0) {
3589 verbose(env
, "R%d min value is negative, either use unsigned or 'var &= const'\n",
3594 if (reg
->umin_value
== 0) {
3595 err
= check_helper_mem_access(env
, regno
- 1, 0,
3602 if (reg
->umax_value
>= BPF_MAX_VAR_SIZ
) {
3603 verbose(env
, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
3607 err
= check_helper_mem_access(env
, regno
- 1,
3609 zero_size_allowed
, meta
);
3611 err
= mark_chain_precision(env
, regno
);
3612 } else if (arg_type_is_int_ptr(arg_type
)) {
3613 int size
= int_ptr_type_to_size(arg_type
);
3615 err
= check_helper_mem_access(env
, regno
, size
, false, meta
);
3618 err
= check_ptr_alignment(env
, reg
, 0, size
, true);
3623 verbose(env
, "R%d type=%s expected=%s\n", regno
,
3624 reg_type_str
[type
], reg_type_str
[expected_type
]);
3628 static int check_map_func_compatibility(struct bpf_verifier_env
*env
,
3629 struct bpf_map
*map
, int func_id
)
3634 /* We need a two way check, first is from map perspective ... */
3635 switch (map
->map_type
) {
3636 case BPF_MAP_TYPE_PROG_ARRAY
:
3637 if (func_id
!= BPF_FUNC_tail_call
)
3640 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
3641 if (func_id
!= BPF_FUNC_perf_event_read
&&
3642 func_id
!= BPF_FUNC_perf_event_output
&&
3643 func_id
!= BPF_FUNC_skb_output
&&
3644 func_id
!= BPF_FUNC_perf_event_read_value
)
3647 case BPF_MAP_TYPE_STACK_TRACE
:
3648 if (func_id
!= BPF_FUNC_get_stackid
)
3651 case BPF_MAP_TYPE_CGROUP_ARRAY
:
3652 if (func_id
!= BPF_FUNC_skb_under_cgroup
&&
3653 func_id
!= BPF_FUNC_current_task_under_cgroup
)
3656 case BPF_MAP_TYPE_CGROUP_STORAGE
:
3657 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
3658 if (func_id
!= BPF_FUNC_get_local_storage
)
3661 case BPF_MAP_TYPE_DEVMAP
:
3662 case BPF_MAP_TYPE_DEVMAP_HASH
:
3663 if (func_id
!= BPF_FUNC_redirect_map
&&
3664 func_id
!= BPF_FUNC_map_lookup_elem
)
3667 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3670 case BPF_MAP_TYPE_CPUMAP
:
3671 if (func_id
!= BPF_FUNC_redirect_map
)
3674 case BPF_MAP_TYPE_XSKMAP
:
3675 if (func_id
!= BPF_FUNC_redirect_map
&&
3676 func_id
!= BPF_FUNC_map_lookup_elem
)
3679 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
3680 case BPF_MAP_TYPE_HASH_OF_MAPS
:
3681 if (func_id
!= BPF_FUNC_map_lookup_elem
)
3684 case BPF_MAP_TYPE_SOCKMAP
:
3685 if (func_id
!= BPF_FUNC_sk_redirect_map
&&
3686 func_id
!= BPF_FUNC_sock_map_update
&&
3687 func_id
!= BPF_FUNC_map_delete_elem
&&
3688 func_id
!= BPF_FUNC_msg_redirect_map
)
3691 case BPF_MAP_TYPE_SOCKHASH
:
3692 if (func_id
!= BPF_FUNC_sk_redirect_hash
&&
3693 func_id
!= BPF_FUNC_sock_hash_update
&&
3694 func_id
!= BPF_FUNC_map_delete_elem
&&
3695 func_id
!= BPF_FUNC_msg_redirect_hash
)
3698 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
:
3699 if (func_id
!= BPF_FUNC_sk_select_reuseport
)
3702 case BPF_MAP_TYPE_QUEUE
:
3703 case BPF_MAP_TYPE_STACK
:
3704 if (func_id
!= BPF_FUNC_map_peek_elem
&&
3705 func_id
!= BPF_FUNC_map_pop_elem
&&
3706 func_id
!= BPF_FUNC_map_push_elem
)
3709 case BPF_MAP_TYPE_SK_STORAGE
:
3710 if (func_id
!= BPF_FUNC_sk_storage_get
&&
3711 func_id
!= BPF_FUNC_sk_storage_delete
)
3718 /* ... and second from the function itself. */
3720 case BPF_FUNC_tail_call
:
3721 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
3723 if (env
->subprog_cnt
> 1) {
3724 verbose(env
, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3728 case BPF_FUNC_perf_event_read
:
3729 case BPF_FUNC_perf_event_output
:
3730 case BPF_FUNC_perf_event_read_value
:
3731 case BPF_FUNC_skb_output
:
3732 if (map
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
)
3735 case BPF_FUNC_get_stackid
:
3736 if (map
->map_type
!= BPF_MAP_TYPE_STACK_TRACE
)
3739 case BPF_FUNC_current_task_under_cgroup
:
3740 case BPF_FUNC_skb_under_cgroup
:
3741 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_ARRAY
)
3744 case BPF_FUNC_redirect_map
:
3745 if (map
->map_type
!= BPF_MAP_TYPE_DEVMAP
&&
3746 map
->map_type
!= BPF_MAP_TYPE_DEVMAP_HASH
&&
3747 map
->map_type
!= BPF_MAP_TYPE_CPUMAP
&&
3748 map
->map_type
!= BPF_MAP_TYPE_XSKMAP
)
3751 case BPF_FUNC_sk_redirect_map
:
3752 case BPF_FUNC_msg_redirect_map
:
3753 case BPF_FUNC_sock_map_update
:
3754 if (map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
)
3757 case BPF_FUNC_sk_redirect_hash
:
3758 case BPF_FUNC_msg_redirect_hash
:
3759 case BPF_FUNC_sock_hash_update
:
3760 if (map
->map_type
!= BPF_MAP_TYPE_SOCKHASH
)
3763 case BPF_FUNC_get_local_storage
:
3764 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_STORAGE
&&
3765 map
->map_type
!= BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
)
3768 case BPF_FUNC_sk_select_reuseport
:
3769 if (map
->map_type
!= BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
)
3772 case BPF_FUNC_map_peek_elem
:
3773 case BPF_FUNC_map_pop_elem
:
3774 case BPF_FUNC_map_push_elem
:
3775 if (map
->map_type
!= BPF_MAP_TYPE_QUEUE
&&
3776 map
->map_type
!= BPF_MAP_TYPE_STACK
)
3779 case BPF_FUNC_sk_storage_get
:
3780 case BPF_FUNC_sk_storage_delete
:
3781 if (map
->map_type
!= BPF_MAP_TYPE_SK_STORAGE
)
3790 verbose(env
, "cannot pass map_type %d into func %s#%d\n",
3791 map
->map_type
, func_id_name(func_id
), func_id
);
3795 static bool check_raw_mode_ok(const struct bpf_func_proto
*fn
)
3799 if (fn
->arg1_type
== ARG_PTR_TO_UNINIT_MEM
)
3801 if (fn
->arg2_type
== ARG_PTR_TO_UNINIT_MEM
)
3803 if (fn
->arg3_type
== ARG_PTR_TO_UNINIT_MEM
)
3805 if (fn
->arg4_type
== ARG_PTR_TO_UNINIT_MEM
)
3807 if (fn
->arg5_type
== ARG_PTR_TO_UNINIT_MEM
)
3810 /* We only support one arg being in raw mode at the moment,
3811 * which is sufficient for the helper functions we have
3817 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr
,
3818 enum bpf_arg_type arg_next
)
3820 return (arg_type_is_mem_ptr(arg_curr
) &&
3821 !arg_type_is_mem_size(arg_next
)) ||
3822 (!arg_type_is_mem_ptr(arg_curr
) &&
3823 arg_type_is_mem_size(arg_next
));
3826 static bool check_arg_pair_ok(const struct bpf_func_proto
*fn
)
3828 /* bpf_xxx(..., buf, len) call will access 'len'
3829 * bytes from memory 'buf'. Both arg types need
3830 * to be paired, so make sure there's no buggy
3831 * helper function specification.
3833 if (arg_type_is_mem_size(fn
->arg1_type
) ||
3834 arg_type_is_mem_ptr(fn
->arg5_type
) ||
3835 check_args_pair_invalid(fn
->arg1_type
, fn
->arg2_type
) ||
3836 check_args_pair_invalid(fn
->arg2_type
, fn
->arg3_type
) ||
3837 check_args_pair_invalid(fn
->arg3_type
, fn
->arg4_type
) ||
3838 check_args_pair_invalid(fn
->arg4_type
, fn
->arg5_type
))
3844 static bool check_refcount_ok(const struct bpf_func_proto
*fn
, int func_id
)
3848 if (arg_type_may_be_refcounted(fn
->arg1_type
))
3850 if (arg_type_may_be_refcounted(fn
->arg2_type
))
3852 if (arg_type_may_be_refcounted(fn
->arg3_type
))
3854 if (arg_type_may_be_refcounted(fn
->arg4_type
))
3856 if (arg_type_may_be_refcounted(fn
->arg5_type
))
3859 /* A reference acquiring function cannot acquire
3860 * another refcounted ptr.
3862 if (is_acquire_function(func_id
) && count
)
3865 /* We only support one arg being unreferenced at the moment,
3866 * which is sufficient for the helper functions we have right now.
3871 static int check_func_proto(const struct bpf_func_proto
*fn
, int func_id
)
3873 return check_raw_mode_ok(fn
) &&
3874 check_arg_pair_ok(fn
) &&
3875 check_refcount_ok(fn
, func_id
) ? 0 : -EINVAL
;
3878 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3879 * are now invalid, so turn them into unknown SCALAR_VALUE.
3881 static void __clear_all_pkt_pointers(struct bpf_verifier_env
*env
,
3882 struct bpf_func_state
*state
)
3884 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
3887 for (i
= 0; i
< MAX_BPF_REG
; i
++)
3888 if (reg_is_pkt_pointer_any(®s
[i
]))
3889 mark_reg_unknown(env
, regs
, i
);
3891 bpf_for_each_spilled_reg(i
, state
, reg
) {
3894 if (reg_is_pkt_pointer_any(reg
))
3895 __mark_reg_unknown(reg
);
3899 static void clear_all_pkt_pointers(struct bpf_verifier_env
*env
)
3901 struct bpf_verifier_state
*vstate
= env
->cur_state
;
3904 for (i
= 0; i
<= vstate
->curframe
; i
++)
3905 __clear_all_pkt_pointers(env
, vstate
->frame
[i
]);
3908 static void release_reg_references(struct bpf_verifier_env
*env
,
3909 struct bpf_func_state
*state
,
3912 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
3915 for (i
= 0; i
< MAX_BPF_REG
; i
++)
3916 if (regs
[i
].ref_obj_id
== ref_obj_id
)
3917 mark_reg_unknown(env
, regs
, i
);
3919 bpf_for_each_spilled_reg(i
, state
, reg
) {
3922 if (reg
->ref_obj_id
== ref_obj_id
)
3923 __mark_reg_unknown(reg
);
3927 /* The pointer with the specified id has released its reference to kernel
3928 * resources. Identify all copies of the same pointer and clear the reference.
3930 static int release_reference(struct bpf_verifier_env
*env
,
3933 struct bpf_verifier_state
*vstate
= env
->cur_state
;
3937 err
= release_reference_state(cur_func(env
), ref_obj_id
);
3941 for (i
= 0; i
<= vstate
->curframe
; i
++)
3942 release_reg_references(env
, vstate
->frame
[i
], ref_obj_id
);
3947 static int check_func_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
3950 struct bpf_verifier_state
*state
= env
->cur_state
;
3951 struct bpf_func_state
*caller
, *callee
;
3952 int i
, err
, subprog
, target_insn
;
3954 if (state
->curframe
+ 1 >= MAX_CALL_FRAMES
) {
3955 verbose(env
, "the call stack of %d frames is too deep\n",
3956 state
->curframe
+ 2);
3960 target_insn
= *insn_idx
+ insn
->imm
;
3961 subprog
= find_subprog(env
, target_insn
+ 1);
3963 verbose(env
, "verifier bug. No program starts at insn %d\n",
3968 caller
= state
->frame
[state
->curframe
];
3969 if (state
->frame
[state
->curframe
+ 1]) {
3970 verbose(env
, "verifier bug. Frame %d already allocated\n",
3971 state
->curframe
+ 1);
3975 callee
= kzalloc(sizeof(*callee
), GFP_KERNEL
);
3978 state
->frame
[state
->curframe
+ 1] = callee
;
3980 /* callee cannot access r0, r6 - r9 for reading and has to write
3981 * into its own stack before reading from it.
3982 * callee can read/write into caller's stack
3984 init_func_state(env
, callee
,
3985 /* remember the callsite, it will be used by bpf_exit */
3986 *insn_idx
/* callsite */,
3987 state
->curframe
+ 1 /* frameno within this callchain */,
3988 subprog
/* subprog number within this prog */);
3990 /* Transfer references to the callee */
3991 err
= transfer_reference_state(callee
, caller
);
3995 /* copy r1 - r5 args that callee can access. The copy includes parent
3996 * pointers, which connects us up to the liveness chain
3998 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++)
3999 callee
->regs
[i
] = caller
->regs
[i
];
4001 /* after the call registers r0 - r5 were scratched */
4002 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
4003 mark_reg_not_init(env
, caller
->regs
, caller_saved
[i
]);
4004 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
4007 /* only increment it after check_reg_arg() finished */
4010 if (btf_check_func_arg_match(env
, subprog
))
4013 /* and go analyze first insn of the callee */
4014 *insn_idx
= target_insn
;
4016 if (env
->log
.level
& BPF_LOG_LEVEL
) {
4017 verbose(env
, "caller:\n");
4018 print_verifier_state(env
, caller
);
4019 verbose(env
, "callee:\n");
4020 print_verifier_state(env
, callee
);
4025 static int prepare_func_exit(struct bpf_verifier_env
*env
, int *insn_idx
)
4027 struct bpf_verifier_state
*state
= env
->cur_state
;
4028 struct bpf_func_state
*caller
, *callee
;
4029 struct bpf_reg_state
*r0
;
4032 callee
= state
->frame
[state
->curframe
];
4033 r0
= &callee
->regs
[BPF_REG_0
];
4034 if (r0
->type
== PTR_TO_STACK
) {
4035 /* technically it's ok to return caller's stack pointer
4036 * (or caller's caller's pointer) back to the caller,
4037 * since these pointers are valid. Only current stack
4038 * pointer will be invalid as soon as function exits,
4039 * but let's be conservative
4041 verbose(env
, "cannot return stack pointer to the caller\n");
4046 caller
= state
->frame
[state
->curframe
];
4047 /* return to the caller whatever r0 had in the callee */
4048 caller
->regs
[BPF_REG_0
] = *r0
;
4050 /* Transfer references to the caller */
4051 err
= transfer_reference_state(caller
, callee
);
4055 *insn_idx
= callee
->callsite
+ 1;
4056 if (env
->log
.level
& BPF_LOG_LEVEL
) {
4057 verbose(env
, "returning from callee:\n");
4058 print_verifier_state(env
, callee
);
4059 verbose(env
, "to caller at %d:\n", *insn_idx
);
4060 print_verifier_state(env
, caller
);
4062 /* clear everything in the callee */
4063 free_func_state(callee
);
4064 state
->frame
[state
->curframe
+ 1] = NULL
;
4068 static void do_refine_retval_range(struct bpf_reg_state
*regs
, int ret_type
,
4070 struct bpf_call_arg_meta
*meta
)
4072 struct bpf_reg_state
*ret_reg
= ®s
[BPF_REG_0
];
4074 if (ret_type
!= RET_INTEGER
||
4075 (func_id
!= BPF_FUNC_get_stack
&&
4076 func_id
!= BPF_FUNC_probe_read_str
))
4079 ret_reg
->smax_value
= meta
->msize_smax_value
;
4080 ret_reg
->umax_value
= meta
->msize_umax_value
;
4081 __reg_deduce_bounds(ret_reg
);
4082 __reg_bound_offset(ret_reg
);
4086 record_func_map(struct bpf_verifier_env
*env
, struct bpf_call_arg_meta
*meta
,
4087 int func_id
, int insn_idx
)
4089 struct bpf_insn_aux_data
*aux
= &env
->insn_aux_data
[insn_idx
];
4090 struct bpf_map
*map
= meta
->map_ptr
;
4092 if (func_id
!= BPF_FUNC_tail_call
&&
4093 func_id
!= BPF_FUNC_map_lookup_elem
&&
4094 func_id
!= BPF_FUNC_map_update_elem
&&
4095 func_id
!= BPF_FUNC_map_delete_elem
&&
4096 func_id
!= BPF_FUNC_map_push_elem
&&
4097 func_id
!= BPF_FUNC_map_pop_elem
&&
4098 func_id
!= BPF_FUNC_map_peek_elem
)
4102 verbose(env
, "kernel subsystem misconfigured verifier\n");
4106 /* In case of read-only, some additional restrictions
4107 * need to be applied in order to prevent altering the
4108 * state of the map from program side.
4110 if ((map
->map_flags
& BPF_F_RDONLY_PROG
) &&
4111 (func_id
== BPF_FUNC_map_delete_elem
||
4112 func_id
== BPF_FUNC_map_update_elem
||
4113 func_id
== BPF_FUNC_map_push_elem
||
4114 func_id
== BPF_FUNC_map_pop_elem
)) {
4115 verbose(env
, "write into map forbidden\n");
4119 if (!BPF_MAP_PTR(aux
->map_ptr_state
))
4120 bpf_map_ptr_store(aux
, meta
->map_ptr
,
4121 meta
->map_ptr
->unpriv_array
);
4122 else if (BPF_MAP_PTR(aux
->map_ptr_state
) != meta
->map_ptr
)
4123 bpf_map_ptr_store(aux
, BPF_MAP_PTR_POISON
,
4124 meta
->map_ptr
->unpriv_array
);
4129 record_func_key(struct bpf_verifier_env
*env
, struct bpf_call_arg_meta
*meta
,
4130 int func_id
, int insn_idx
)
4132 struct bpf_insn_aux_data
*aux
= &env
->insn_aux_data
[insn_idx
];
4133 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
;
4134 struct bpf_map
*map
= meta
->map_ptr
;
4138 if (func_id
!= BPF_FUNC_tail_call
)
4140 if (!map
|| map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
) {
4141 verbose(env
, "kernel subsystem misconfigured verifier\n");
4145 range
= tnum_range(0, map
->max_entries
- 1);
4146 reg
= ®s
[BPF_REG_3
];
4148 if (!register_is_const(reg
) || !tnum_in(range
, reg
->var_off
)) {
4149 bpf_map_key_store(aux
, BPF_MAP_KEY_POISON
);
4153 val
= reg
->var_off
.value
;
4154 if (bpf_map_key_unseen(aux
))
4155 bpf_map_key_store(aux
, val
);
4156 else if (!bpf_map_key_poisoned(aux
) &&
4157 bpf_map_key_immediate(aux
) != val
)
4158 bpf_map_key_store(aux
, BPF_MAP_KEY_POISON
);
4162 static int check_reference_leak(struct bpf_verifier_env
*env
)
4164 struct bpf_func_state
*state
= cur_func(env
);
4167 for (i
= 0; i
< state
->acquired_refs
; i
++) {
4168 verbose(env
, "Unreleased reference id=%d alloc_insn=%d\n",
4169 state
->refs
[i
].id
, state
->refs
[i
].insn_idx
);
4171 return state
->acquired_refs
? -EINVAL
: 0;
4174 static int check_helper_call(struct bpf_verifier_env
*env
, int func_id
, int insn_idx
)
4176 const struct bpf_func_proto
*fn
= NULL
;
4177 struct bpf_reg_state
*regs
;
4178 struct bpf_call_arg_meta meta
;
4182 /* find function prototype */
4183 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
4184 verbose(env
, "invalid func %s#%d\n", func_id_name(func_id
),
4189 if (env
->ops
->get_func_proto
)
4190 fn
= env
->ops
->get_func_proto(func_id
, env
->prog
);
4192 verbose(env
, "unknown func %s#%d\n", func_id_name(func_id
),
4197 /* eBPF programs must be GPL compatible to use GPL-ed functions */
4198 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
4199 verbose(env
, "cannot call GPL-restricted function from non-GPL compatible program\n");
4203 /* With LD_ABS/IND some JITs save/restore skb from r1. */
4204 changes_data
= bpf_helper_changes_pkt_data(fn
->func
);
4205 if (changes_data
&& fn
->arg1_type
!= ARG_PTR_TO_CTX
) {
4206 verbose(env
, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4207 func_id_name(func_id
), func_id
);
4211 memset(&meta
, 0, sizeof(meta
));
4212 meta
.pkt_access
= fn
->pkt_access
;
4214 err
= check_func_proto(fn
, func_id
);
4216 verbose(env
, "kernel subsystem misconfigured func %s#%d\n",
4217 func_id_name(func_id
), func_id
);
4221 meta
.func_id
= func_id
;
4223 for (i
= 0; i
< 5; i
++) {
4224 err
= btf_resolve_helper_id(&env
->log
, fn
, i
);
4227 err
= check_func_arg(env
, BPF_REG_1
+ i
, fn
->arg_type
[i
], &meta
);
4232 err
= record_func_map(env
, &meta
, func_id
, insn_idx
);
4236 err
= record_func_key(env
, &meta
, func_id
, insn_idx
);
4240 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4241 * is inferred from register state.
4243 for (i
= 0; i
< meta
.access_size
; i
++) {
4244 err
= check_mem_access(env
, insn_idx
, meta
.regno
, i
, BPF_B
,
4245 BPF_WRITE
, -1, false);
4250 if (func_id
== BPF_FUNC_tail_call
) {
4251 err
= check_reference_leak(env
);
4253 verbose(env
, "tail_call would lead to reference leak\n");
4256 } else if (is_release_function(func_id
)) {
4257 err
= release_reference(env
, meta
.ref_obj_id
);
4259 verbose(env
, "func %s#%d reference has not been acquired before\n",
4260 func_id_name(func_id
), func_id
);
4265 regs
= cur_regs(env
);
4267 /* check that flags argument in get_local_storage(map, flags) is 0,
4268 * this is required because get_local_storage() can't return an error.
4270 if (func_id
== BPF_FUNC_get_local_storage
&&
4271 !register_is_null(®s
[BPF_REG_2
])) {
4272 verbose(env
, "get_local_storage() doesn't support non-zero flags\n");
4276 /* reset caller saved regs */
4277 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
4278 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
4279 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
4282 /* helper call returns 64-bit value. */
4283 regs
[BPF_REG_0
].subreg_def
= DEF_NOT_SUBREG
;
4285 /* update return register (already marked as written above) */
4286 if (fn
->ret_type
== RET_INTEGER
) {
4287 /* sets type to SCALAR_VALUE */
4288 mark_reg_unknown(env
, regs
, BPF_REG_0
);
4289 } else if (fn
->ret_type
== RET_VOID
) {
4290 regs
[BPF_REG_0
].type
= NOT_INIT
;
4291 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
||
4292 fn
->ret_type
== RET_PTR_TO_MAP_VALUE
) {
4293 /* There is no offset yet applied, variable or fixed */
4294 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
4295 /* remember map_ptr, so that check_map_access()
4296 * can check 'value_size' boundary of memory access
4297 * to map element returned from bpf_map_lookup_elem()
4299 if (meta
.map_ptr
== NULL
) {
4301 "kernel subsystem misconfigured verifier\n");
4304 regs
[BPF_REG_0
].map_ptr
= meta
.map_ptr
;
4305 if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE
) {
4306 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE
;
4307 if (map_value_has_spin_lock(meta
.map_ptr
))
4308 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
4310 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
4311 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
4313 } else if (fn
->ret_type
== RET_PTR_TO_SOCKET_OR_NULL
) {
4314 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
4315 regs
[BPF_REG_0
].type
= PTR_TO_SOCKET_OR_NULL
;
4316 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
4317 } else if (fn
->ret_type
== RET_PTR_TO_SOCK_COMMON_OR_NULL
) {
4318 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
4319 regs
[BPF_REG_0
].type
= PTR_TO_SOCK_COMMON_OR_NULL
;
4320 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
4321 } else if (fn
->ret_type
== RET_PTR_TO_TCP_SOCK_OR_NULL
) {
4322 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
4323 regs
[BPF_REG_0
].type
= PTR_TO_TCP_SOCK_OR_NULL
;
4324 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
4326 verbose(env
, "unknown return type %d of func %s#%d\n",
4327 fn
->ret_type
, func_id_name(func_id
), func_id
);
4331 if (is_ptr_cast_function(func_id
)) {
4332 /* For release_reference() */
4333 regs
[BPF_REG_0
].ref_obj_id
= meta
.ref_obj_id
;
4334 } else if (is_acquire_function(func_id
)) {
4335 int id
= acquire_reference_state(env
, insn_idx
);
4339 /* For mark_ptr_or_null_reg() */
4340 regs
[BPF_REG_0
].id
= id
;
4341 /* For release_reference() */
4342 regs
[BPF_REG_0
].ref_obj_id
= id
;
4345 do_refine_retval_range(regs
, fn
->ret_type
, func_id
, &meta
);
4347 err
= check_map_func_compatibility(env
, meta
.map_ptr
, func_id
);
4351 if (func_id
== BPF_FUNC_get_stack
&& !env
->prog
->has_callchain_buf
) {
4352 const char *err_str
;
4354 #ifdef CONFIG_PERF_EVENTS
4355 err
= get_callchain_buffers(sysctl_perf_event_max_stack
);
4356 err_str
= "cannot get callchain buffer for func %s#%d\n";
4359 err_str
= "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4362 verbose(env
, err_str
, func_id_name(func_id
), func_id
);
4366 env
->prog
->has_callchain_buf
= true;
4370 clear_all_pkt_pointers(env
);
4374 static bool signed_add_overflows(s64 a
, s64 b
)
4376 /* Do the add in u64, where overflow is well-defined */
4377 s64 res
= (s64
)((u64
)a
+ (u64
)b
);
4384 static bool signed_sub_overflows(s64 a
, s64 b
)
4386 /* Do the sub in u64, where overflow is well-defined */
4387 s64 res
= (s64
)((u64
)a
- (u64
)b
);
4394 static bool check_reg_sane_offset(struct bpf_verifier_env
*env
,
4395 const struct bpf_reg_state
*reg
,
4396 enum bpf_reg_type type
)
4398 bool known
= tnum_is_const(reg
->var_off
);
4399 s64 val
= reg
->var_off
.value
;
4400 s64 smin
= reg
->smin_value
;
4402 if (known
&& (val
>= BPF_MAX_VAR_OFF
|| val
<= -BPF_MAX_VAR_OFF
)) {
4403 verbose(env
, "math between %s pointer and %lld is not allowed\n",
4404 reg_type_str
[type
], val
);
4408 if (reg
->off
>= BPF_MAX_VAR_OFF
|| reg
->off
<= -BPF_MAX_VAR_OFF
) {
4409 verbose(env
, "%s pointer offset %d is not allowed\n",
4410 reg_type_str
[type
], reg
->off
);
4414 if (smin
== S64_MIN
) {
4415 verbose(env
, "math between %s pointer and register with unbounded min value is not allowed\n",
4416 reg_type_str
[type
]);
4420 if (smin
>= BPF_MAX_VAR_OFF
|| smin
<= -BPF_MAX_VAR_OFF
) {
4421 verbose(env
, "value %lld makes %s pointer be out of bounds\n",
4422 smin
, reg_type_str
[type
]);
4429 static struct bpf_insn_aux_data
*cur_aux(struct bpf_verifier_env
*env
)
4431 return &env
->insn_aux_data
[env
->insn_idx
];
4434 static int retrieve_ptr_limit(const struct bpf_reg_state
*ptr_reg
,
4435 u32
*ptr_limit
, u8 opcode
, bool off_is_neg
)
4437 bool mask_to_left
= (opcode
== BPF_ADD
&& off_is_neg
) ||
4438 (opcode
== BPF_SUB
&& !off_is_neg
);
4441 switch (ptr_reg
->type
) {
4443 /* Indirect variable offset stack access is prohibited in
4444 * unprivileged mode so it's not handled here.
4446 off
= ptr_reg
->off
+ ptr_reg
->var_off
.value
;
4448 *ptr_limit
= MAX_BPF_STACK
+ off
;
4452 case PTR_TO_MAP_VALUE
:
4454 *ptr_limit
= ptr_reg
->umax_value
+ ptr_reg
->off
;
4456 off
= ptr_reg
->smin_value
+ ptr_reg
->off
;
4457 *ptr_limit
= ptr_reg
->map_ptr
->value_size
- off
;
4465 static bool can_skip_alu_sanitation(const struct bpf_verifier_env
*env
,
4466 const struct bpf_insn
*insn
)
4468 return env
->allow_ptr_leaks
|| BPF_SRC(insn
->code
) == BPF_K
;
4471 static int update_alu_sanitation_state(struct bpf_insn_aux_data
*aux
,
4472 u32 alu_state
, u32 alu_limit
)
4474 /* If we arrived here from different branches with different
4475 * state or limits to sanitize, then this won't work.
4477 if (aux
->alu_state
&&
4478 (aux
->alu_state
!= alu_state
||
4479 aux
->alu_limit
!= alu_limit
))
4482 /* Corresponding fixup done in fixup_bpf_calls(). */
4483 aux
->alu_state
= alu_state
;
4484 aux
->alu_limit
= alu_limit
;
4488 static int sanitize_val_alu(struct bpf_verifier_env
*env
,
4489 struct bpf_insn
*insn
)
4491 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
4493 if (can_skip_alu_sanitation(env
, insn
))
4496 return update_alu_sanitation_state(aux
, BPF_ALU_NON_POINTER
, 0);
4499 static int sanitize_ptr_alu(struct bpf_verifier_env
*env
,
4500 struct bpf_insn
*insn
,
4501 const struct bpf_reg_state
*ptr_reg
,
4502 struct bpf_reg_state
*dst_reg
,
4505 struct bpf_verifier_state
*vstate
= env
->cur_state
;
4506 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
4507 bool ptr_is_dst_reg
= ptr_reg
== dst_reg
;
4508 u8 opcode
= BPF_OP(insn
->code
);
4509 u32 alu_state
, alu_limit
;
4510 struct bpf_reg_state tmp
;
4513 if (can_skip_alu_sanitation(env
, insn
))
4516 /* We already marked aux for masking from non-speculative
4517 * paths, thus we got here in the first place. We only care
4518 * to explore bad access from here.
4520 if (vstate
->speculative
)
4523 alu_state
= off_is_neg
? BPF_ALU_NEG_VALUE
: 0;
4524 alu_state
|= ptr_is_dst_reg
?
4525 BPF_ALU_SANITIZE_SRC
: BPF_ALU_SANITIZE_DST
;
4527 if (retrieve_ptr_limit(ptr_reg
, &alu_limit
, opcode
, off_is_neg
))
4529 if (update_alu_sanitation_state(aux
, alu_state
, alu_limit
))
4532 /* Simulate and find potential out-of-bounds access under
4533 * speculative execution from truncation as a result of
4534 * masking when off was not within expected range. If off
4535 * sits in dst, then we temporarily need to move ptr there
4536 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4537 * for cases where we use K-based arithmetic in one direction
4538 * and truncated reg-based in the other in order to explore
4541 if (!ptr_is_dst_reg
) {
4543 *dst_reg
= *ptr_reg
;
4545 ret
= push_stack(env
, env
->insn_idx
+ 1, env
->insn_idx
, true);
4546 if (!ptr_is_dst_reg
&& ret
)
4548 return !ret
? -EFAULT
: 0;
4551 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
4552 * Caller should also handle BPF_MOV case separately.
4553 * If we return -EACCES, caller may want to try again treating pointer as a
4554 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4556 static int adjust_ptr_min_max_vals(struct bpf_verifier_env
*env
,
4557 struct bpf_insn
*insn
,
4558 const struct bpf_reg_state
*ptr_reg
,
4559 const struct bpf_reg_state
*off_reg
)
4561 struct bpf_verifier_state
*vstate
= env
->cur_state
;
4562 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
4563 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
;
4564 bool known
= tnum_is_const(off_reg
->var_off
);
4565 s64 smin_val
= off_reg
->smin_value
, smax_val
= off_reg
->smax_value
,
4566 smin_ptr
= ptr_reg
->smin_value
, smax_ptr
= ptr_reg
->smax_value
;
4567 u64 umin_val
= off_reg
->umin_value
, umax_val
= off_reg
->umax_value
,
4568 umin_ptr
= ptr_reg
->umin_value
, umax_ptr
= ptr_reg
->umax_value
;
4569 u32 dst
= insn
->dst_reg
, src
= insn
->src_reg
;
4570 u8 opcode
= BPF_OP(insn
->code
);
4573 dst_reg
= ®s
[dst
];
4575 if ((known
&& (smin_val
!= smax_val
|| umin_val
!= umax_val
)) ||
4576 smin_val
> smax_val
|| umin_val
> umax_val
) {
4577 /* Taint dst register if offset had invalid bounds derived from
4578 * e.g. dead branches.
4580 __mark_reg_unknown(dst_reg
);
4584 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
4585 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
4587 "R%d 32-bit pointer arithmetic prohibited\n",
4592 switch (ptr_reg
->type
) {
4593 case PTR_TO_MAP_VALUE_OR_NULL
:
4594 verbose(env
, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4595 dst
, reg_type_str
[ptr_reg
->type
]);
4597 case CONST_PTR_TO_MAP
:
4598 case PTR_TO_PACKET_END
:
4600 case PTR_TO_SOCKET_OR_NULL
:
4601 case PTR_TO_SOCK_COMMON
:
4602 case PTR_TO_SOCK_COMMON_OR_NULL
:
4603 case PTR_TO_TCP_SOCK
:
4604 case PTR_TO_TCP_SOCK_OR_NULL
:
4605 case PTR_TO_XDP_SOCK
:
4606 verbose(env
, "R%d pointer arithmetic on %s prohibited\n",
4607 dst
, reg_type_str
[ptr_reg
->type
]);
4609 case PTR_TO_MAP_VALUE
:
4610 if (!env
->allow_ptr_leaks
&& !known
&& (smin_val
< 0) != (smax_val
< 0)) {
4611 verbose(env
, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4612 off_reg
== dst_reg
? dst
: src
);
4620 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4621 * The id may be overwritten later if we create a new variable offset.
4623 dst_reg
->type
= ptr_reg
->type
;
4624 dst_reg
->id
= ptr_reg
->id
;
4626 if (!check_reg_sane_offset(env
, off_reg
, ptr_reg
->type
) ||
4627 !check_reg_sane_offset(env
, ptr_reg
, ptr_reg
->type
))
4632 ret
= sanitize_ptr_alu(env
, insn
, ptr_reg
, dst_reg
, smin_val
< 0);
4634 verbose(env
, "R%d tried to add from different maps or paths\n", dst
);
4637 /* We can take a fixed offset as long as it doesn't overflow
4638 * the s32 'off' field
4640 if (known
&& (ptr_reg
->off
+ smin_val
==
4641 (s64
)(s32
)(ptr_reg
->off
+ smin_val
))) {
4642 /* pointer += K. Accumulate it into fixed offset */
4643 dst_reg
->smin_value
= smin_ptr
;
4644 dst_reg
->smax_value
= smax_ptr
;
4645 dst_reg
->umin_value
= umin_ptr
;
4646 dst_reg
->umax_value
= umax_ptr
;
4647 dst_reg
->var_off
= ptr_reg
->var_off
;
4648 dst_reg
->off
= ptr_reg
->off
+ smin_val
;
4649 dst_reg
->raw
= ptr_reg
->raw
;
4652 /* A new variable offset is created. Note that off_reg->off
4653 * == 0, since it's a scalar.
4654 * dst_reg gets the pointer type and since some positive
4655 * integer value was added to the pointer, give it a new 'id'
4656 * if it's a PTR_TO_PACKET.
4657 * this creates a new 'base' pointer, off_reg (variable) gets
4658 * added into the variable offset, and we copy the fixed offset
4661 if (signed_add_overflows(smin_ptr
, smin_val
) ||
4662 signed_add_overflows(smax_ptr
, smax_val
)) {
4663 dst_reg
->smin_value
= S64_MIN
;
4664 dst_reg
->smax_value
= S64_MAX
;
4666 dst_reg
->smin_value
= smin_ptr
+ smin_val
;
4667 dst_reg
->smax_value
= smax_ptr
+ smax_val
;
4669 if (umin_ptr
+ umin_val
< umin_ptr
||
4670 umax_ptr
+ umax_val
< umax_ptr
) {
4671 dst_reg
->umin_value
= 0;
4672 dst_reg
->umax_value
= U64_MAX
;
4674 dst_reg
->umin_value
= umin_ptr
+ umin_val
;
4675 dst_reg
->umax_value
= umax_ptr
+ umax_val
;
4677 dst_reg
->var_off
= tnum_add(ptr_reg
->var_off
, off_reg
->var_off
);
4678 dst_reg
->off
= ptr_reg
->off
;
4679 dst_reg
->raw
= ptr_reg
->raw
;
4680 if (reg_is_pkt_pointer(ptr_reg
)) {
4681 dst_reg
->id
= ++env
->id_gen
;
4682 /* something was added to pkt_ptr, set range to zero */
4687 ret
= sanitize_ptr_alu(env
, insn
, ptr_reg
, dst_reg
, smin_val
< 0);
4689 verbose(env
, "R%d tried to sub from different maps or paths\n", dst
);
4692 if (dst_reg
== off_reg
) {
4693 /* scalar -= pointer. Creates an unknown scalar */
4694 verbose(env
, "R%d tried to subtract pointer from scalar\n",
4698 /* We don't allow subtraction from FP, because (according to
4699 * test_verifier.c test "invalid fp arithmetic", JITs might not
4700 * be able to deal with it.
4702 if (ptr_reg
->type
== PTR_TO_STACK
) {
4703 verbose(env
, "R%d subtraction from stack pointer prohibited\n",
4707 if (known
&& (ptr_reg
->off
- smin_val
==
4708 (s64
)(s32
)(ptr_reg
->off
- smin_val
))) {
4709 /* pointer -= K. Subtract it from fixed offset */
4710 dst_reg
->smin_value
= smin_ptr
;
4711 dst_reg
->smax_value
= smax_ptr
;
4712 dst_reg
->umin_value
= umin_ptr
;
4713 dst_reg
->umax_value
= umax_ptr
;
4714 dst_reg
->var_off
= ptr_reg
->var_off
;
4715 dst_reg
->id
= ptr_reg
->id
;
4716 dst_reg
->off
= ptr_reg
->off
- smin_val
;
4717 dst_reg
->raw
= ptr_reg
->raw
;
4720 /* A new variable offset is created. If the subtrahend is known
4721 * nonnegative, then any reg->range we had before is still good.
4723 if (signed_sub_overflows(smin_ptr
, smax_val
) ||
4724 signed_sub_overflows(smax_ptr
, smin_val
)) {
4725 /* Overflow possible, we know nothing */
4726 dst_reg
->smin_value
= S64_MIN
;
4727 dst_reg
->smax_value
= S64_MAX
;
4729 dst_reg
->smin_value
= smin_ptr
- smax_val
;
4730 dst_reg
->smax_value
= smax_ptr
- smin_val
;
4732 if (umin_ptr
< umax_val
) {
4733 /* Overflow possible, we know nothing */
4734 dst_reg
->umin_value
= 0;
4735 dst_reg
->umax_value
= U64_MAX
;
4737 /* Cannot overflow (as long as bounds are consistent) */
4738 dst_reg
->umin_value
= umin_ptr
- umax_val
;
4739 dst_reg
->umax_value
= umax_ptr
- umin_val
;
4741 dst_reg
->var_off
= tnum_sub(ptr_reg
->var_off
, off_reg
->var_off
);
4742 dst_reg
->off
= ptr_reg
->off
;
4743 dst_reg
->raw
= ptr_reg
->raw
;
4744 if (reg_is_pkt_pointer(ptr_reg
)) {
4745 dst_reg
->id
= ++env
->id_gen
;
4746 /* something was added to pkt_ptr, set range to zero */
4754 /* bitwise ops on pointers are troublesome, prohibit. */
4755 verbose(env
, "R%d bitwise operator %s on pointer prohibited\n",
4756 dst
, bpf_alu_string
[opcode
>> 4]);
4759 /* other operators (e.g. MUL,LSH) produce non-pointer results */
4760 verbose(env
, "R%d pointer arithmetic with %s operator prohibited\n",
4761 dst
, bpf_alu_string
[opcode
>> 4]);
4765 if (!check_reg_sane_offset(env
, dst_reg
, ptr_reg
->type
))
4768 __update_reg_bounds(dst_reg
);
4769 __reg_deduce_bounds(dst_reg
);
4770 __reg_bound_offset(dst_reg
);
4772 /* For unprivileged we require that resulting offset must be in bounds
4773 * in order to be able to sanitize access later on.
4775 if (!env
->allow_ptr_leaks
) {
4776 if (dst_reg
->type
== PTR_TO_MAP_VALUE
&&
4777 check_map_access(env
, dst
, dst_reg
->off
, 1, false)) {
4778 verbose(env
, "R%d pointer arithmetic of map value goes out of range, "
4779 "prohibited for !root\n", dst
);
4781 } else if (dst_reg
->type
== PTR_TO_STACK
&&
4782 check_stack_access(env
, dst_reg
, dst_reg
->off
+
4783 dst_reg
->var_off
.value
, 1)) {
4784 verbose(env
, "R%d stack pointer arithmetic goes out of range, "
4785 "prohibited for !root\n", dst
);
4793 /* WARNING: This function does calculations on 64-bit values, but the actual
4794 * execution may occur on 32-bit values. Therefore, things like bitshifts
4795 * need extra checks in the 32-bit case.
4797 static int adjust_scalar_min_max_vals(struct bpf_verifier_env
*env
,
4798 struct bpf_insn
*insn
,
4799 struct bpf_reg_state
*dst_reg
,
4800 struct bpf_reg_state src_reg
)
4802 struct bpf_reg_state
*regs
= cur_regs(env
);
4803 u8 opcode
= BPF_OP(insn
->code
);
4804 bool src_known
, dst_known
;
4805 s64 smin_val
, smax_val
;
4806 u64 umin_val
, umax_val
;
4807 u64 insn_bitness
= (BPF_CLASS(insn
->code
) == BPF_ALU64
) ? 64 : 32;
4808 u32 dst
= insn
->dst_reg
;
4811 if (insn_bitness
== 32) {
4812 /* Relevant for 32-bit RSH: Information can propagate towards
4813 * LSB, so it isn't sufficient to only truncate the output to
4816 coerce_reg_to_size(dst_reg
, 4);
4817 coerce_reg_to_size(&src_reg
, 4);
4820 smin_val
= src_reg
.smin_value
;
4821 smax_val
= src_reg
.smax_value
;
4822 umin_val
= src_reg
.umin_value
;
4823 umax_val
= src_reg
.umax_value
;
4824 src_known
= tnum_is_const(src_reg
.var_off
);
4825 dst_known
= tnum_is_const(dst_reg
->var_off
);
4827 if ((src_known
&& (smin_val
!= smax_val
|| umin_val
!= umax_val
)) ||
4828 smin_val
> smax_val
|| umin_val
> umax_val
) {
4829 /* Taint dst register if offset had invalid bounds derived from
4830 * e.g. dead branches.
4832 __mark_reg_unknown(dst_reg
);
4837 opcode
!= BPF_ADD
&& opcode
!= BPF_SUB
&& opcode
!= BPF_AND
) {
4838 __mark_reg_unknown(dst_reg
);
4844 ret
= sanitize_val_alu(env
, insn
);
4846 verbose(env
, "R%d tried to add from different pointers or scalars\n", dst
);
4849 if (signed_add_overflows(dst_reg
->smin_value
, smin_val
) ||
4850 signed_add_overflows(dst_reg
->smax_value
, smax_val
)) {
4851 dst_reg
->smin_value
= S64_MIN
;
4852 dst_reg
->smax_value
= S64_MAX
;
4854 dst_reg
->smin_value
+= smin_val
;
4855 dst_reg
->smax_value
+= smax_val
;
4857 if (dst_reg
->umin_value
+ umin_val
< umin_val
||
4858 dst_reg
->umax_value
+ umax_val
< umax_val
) {
4859 dst_reg
->umin_value
= 0;
4860 dst_reg
->umax_value
= U64_MAX
;
4862 dst_reg
->umin_value
+= umin_val
;
4863 dst_reg
->umax_value
+= umax_val
;
4865 dst_reg
->var_off
= tnum_add(dst_reg
->var_off
, src_reg
.var_off
);
4868 ret
= sanitize_val_alu(env
, insn
);
4870 verbose(env
, "R%d tried to sub from different pointers or scalars\n", dst
);
4873 if (signed_sub_overflows(dst_reg
->smin_value
, smax_val
) ||
4874 signed_sub_overflows(dst_reg
->smax_value
, smin_val
)) {
4875 /* Overflow possible, we know nothing */
4876 dst_reg
->smin_value
= S64_MIN
;
4877 dst_reg
->smax_value
= S64_MAX
;
4879 dst_reg
->smin_value
-= smax_val
;
4880 dst_reg
->smax_value
-= smin_val
;
4882 if (dst_reg
->umin_value
< umax_val
) {
4883 /* Overflow possible, we know nothing */
4884 dst_reg
->umin_value
= 0;
4885 dst_reg
->umax_value
= U64_MAX
;
4887 /* Cannot overflow (as long as bounds are consistent) */
4888 dst_reg
->umin_value
-= umax_val
;
4889 dst_reg
->umax_value
-= umin_val
;
4891 dst_reg
->var_off
= tnum_sub(dst_reg
->var_off
, src_reg
.var_off
);
4894 dst_reg
->var_off
= tnum_mul(dst_reg
->var_off
, src_reg
.var_off
);
4895 if (smin_val
< 0 || dst_reg
->smin_value
< 0) {
4896 /* Ain't nobody got time to multiply that sign */
4897 __mark_reg_unbounded(dst_reg
);
4898 __update_reg_bounds(dst_reg
);
4901 /* Both values are positive, so we can work with unsigned and
4902 * copy the result to signed (unless it exceeds S64_MAX).
4904 if (umax_val
> U32_MAX
|| dst_reg
->umax_value
> U32_MAX
) {
4905 /* Potential overflow, we know nothing */
4906 __mark_reg_unbounded(dst_reg
);
4907 /* (except what we can learn from the var_off) */
4908 __update_reg_bounds(dst_reg
);
4911 dst_reg
->umin_value
*= umin_val
;
4912 dst_reg
->umax_value
*= umax_val
;
4913 if (dst_reg
->umax_value
> S64_MAX
) {
4914 /* Overflow possible, we know nothing */
4915 dst_reg
->smin_value
= S64_MIN
;
4916 dst_reg
->smax_value
= S64_MAX
;
4918 dst_reg
->smin_value
= dst_reg
->umin_value
;
4919 dst_reg
->smax_value
= dst_reg
->umax_value
;
4923 if (src_known
&& dst_known
) {
4924 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
&
4925 src_reg
.var_off
.value
);
4928 /* We get our minimum from the var_off, since that's inherently
4929 * bitwise. Our maximum is the minimum of the operands' maxima.
4931 dst_reg
->var_off
= tnum_and(dst_reg
->var_off
, src_reg
.var_off
);
4932 dst_reg
->umin_value
= dst_reg
->var_off
.value
;
4933 dst_reg
->umax_value
= min(dst_reg
->umax_value
, umax_val
);
4934 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
4935 /* Lose signed bounds when ANDing negative numbers,
4936 * ain't nobody got time for that.
4938 dst_reg
->smin_value
= S64_MIN
;
4939 dst_reg
->smax_value
= S64_MAX
;
4941 /* ANDing two positives gives a positive, so safe to
4942 * cast result into s64.
4944 dst_reg
->smin_value
= dst_reg
->umin_value
;
4945 dst_reg
->smax_value
= dst_reg
->umax_value
;
4947 /* We may learn something more from the var_off */
4948 __update_reg_bounds(dst_reg
);
4951 if (src_known
&& dst_known
) {
4952 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
|
4953 src_reg
.var_off
.value
);
4956 /* We get our maximum from the var_off, and our minimum is the
4957 * maximum of the operands' minima
4959 dst_reg
->var_off
= tnum_or(dst_reg
->var_off
, src_reg
.var_off
);
4960 dst_reg
->umin_value
= max(dst_reg
->umin_value
, umin_val
);
4961 dst_reg
->umax_value
= dst_reg
->var_off
.value
|
4962 dst_reg
->var_off
.mask
;
4963 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
4964 /* Lose signed bounds when ORing negative numbers,
4965 * ain't nobody got time for that.
4967 dst_reg
->smin_value
= S64_MIN
;
4968 dst_reg
->smax_value
= S64_MAX
;
4970 /* ORing two positives gives a positive, so safe to
4971 * cast result into s64.
4973 dst_reg
->smin_value
= dst_reg
->umin_value
;
4974 dst_reg
->smax_value
= dst_reg
->umax_value
;
4976 /* We may learn something more from the var_off */
4977 __update_reg_bounds(dst_reg
);
4980 if (umax_val
>= insn_bitness
) {
4981 /* Shifts greater than 31 or 63 are undefined.
4982 * This includes shifts by a negative number.
4984 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
4987 /* We lose all sign bit information (except what we can pick
4990 dst_reg
->smin_value
= S64_MIN
;
4991 dst_reg
->smax_value
= S64_MAX
;
4992 /* If we might shift our top bit out, then we know nothing */
4993 if (dst_reg
->umax_value
> 1ULL << (63 - umax_val
)) {
4994 dst_reg
->umin_value
= 0;
4995 dst_reg
->umax_value
= U64_MAX
;
4997 dst_reg
->umin_value
<<= umin_val
;
4998 dst_reg
->umax_value
<<= umax_val
;
5000 dst_reg
->var_off
= tnum_lshift(dst_reg
->var_off
, umin_val
);
5001 /* We may learn something more from the var_off */
5002 __update_reg_bounds(dst_reg
);
5005 if (umax_val
>= insn_bitness
) {
5006 /* Shifts greater than 31 or 63 are undefined.
5007 * This includes shifts by a negative number.
5009 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
5012 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5013 * be negative, then either:
5014 * 1) src_reg might be zero, so the sign bit of the result is
5015 * unknown, so we lose our signed bounds
5016 * 2) it's known negative, thus the unsigned bounds capture the
5018 * 3) the signed bounds cross zero, so they tell us nothing
5020 * If the value in dst_reg is known nonnegative, then again the
5021 * unsigned bounts capture the signed bounds.
5022 * Thus, in all cases it suffices to blow away our signed bounds
5023 * and rely on inferring new ones from the unsigned bounds and
5024 * var_off of the result.
5026 dst_reg
->smin_value
= S64_MIN
;
5027 dst_reg
->smax_value
= S64_MAX
;
5028 dst_reg
->var_off
= tnum_rshift(dst_reg
->var_off
, umin_val
);
5029 dst_reg
->umin_value
>>= umax_val
;
5030 dst_reg
->umax_value
>>= umin_val
;
5031 /* We may learn something more from the var_off */
5032 __update_reg_bounds(dst_reg
);
5035 if (umax_val
>= insn_bitness
) {
5036 /* Shifts greater than 31 or 63 are undefined.
5037 * This includes shifts by a negative number.
5039 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
5043 /* Upon reaching here, src_known is true and
5044 * umax_val is equal to umin_val.
5046 dst_reg
->smin_value
>>= umin_val
;
5047 dst_reg
->smax_value
>>= umin_val
;
5048 dst_reg
->var_off
= tnum_arshift(dst_reg
->var_off
, umin_val
);
5050 /* blow away the dst_reg umin_value/umax_value and rely on
5051 * dst_reg var_off to refine the result.
5053 dst_reg
->umin_value
= 0;
5054 dst_reg
->umax_value
= U64_MAX
;
5055 __update_reg_bounds(dst_reg
);
5058 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
5062 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
5063 /* 32-bit ALU ops are (32,32)->32 */
5064 coerce_reg_to_size(dst_reg
, 4);
5067 __reg_deduce_bounds(dst_reg
);
5068 __reg_bound_offset(dst_reg
);
5072 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5075 static int adjust_reg_min_max_vals(struct bpf_verifier_env
*env
,
5076 struct bpf_insn
*insn
)
5078 struct bpf_verifier_state
*vstate
= env
->cur_state
;
5079 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
5080 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
, *src_reg
;
5081 struct bpf_reg_state
*ptr_reg
= NULL
, off_reg
= {0};
5082 u8 opcode
= BPF_OP(insn
->code
);
5085 dst_reg
= ®s
[insn
->dst_reg
];
5087 if (dst_reg
->type
!= SCALAR_VALUE
)
5089 if (BPF_SRC(insn
->code
) == BPF_X
) {
5090 src_reg
= ®s
[insn
->src_reg
];
5091 if (src_reg
->type
!= SCALAR_VALUE
) {
5092 if (dst_reg
->type
!= SCALAR_VALUE
) {
5093 /* Combining two pointers by any ALU op yields
5094 * an arbitrary scalar. Disallow all math except
5095 * pointer subtraction
5097 if (opcode
== BPF_SUB
&& env
->allow_ptr_leaks
) {
5098 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
5101 verbose(env
, "R%d pointer %s pointer prohibited\n",
5103 bpf_alu_string
[opcode
>> 4]);
5106 /* scalar += pointer
5107 * This is legal, but we have to reverse our
5108 * src/dest handling in computing the range
5110 err
= mark_chain_precision(env
, insn
->dst_reg
);
5113 return adjust_ptr_min_max_vals(env
, insn
,
5116 } else if (ptr_reg
) {
5117 /* pointer += scalar */
5118 err
= mark_chain_precision(env
, insn
->src_reg
);
5121 return adjust_ptr_min_max_vals(env
, insn
,
5125 /* Pretend the src is a reg with a known value, since we only
5126 * need to be able to read from this state.
5128 off_reg
.type
= SCALAR_VALUE
;
5129 __mark_reg_known(&off_reg
, insn
->imm
);
5131 if (ptr_reg
) /* pointer += K */
5132 return adjust_ptr_min_max_vals(env
, insn
,
5136 /* Got here implies adding two SCALAR_VALUEs */
5137 if (WARN_ON_ONCE(ptr_reg
)) {
5138 print_verifier_state(env
, state
);
5139 verbose(env
, "verifier internal error: unexpected ptr_reg\n");
5142 if (WARN_ON(!src_reg
)) {
5143 print_verifier_state(env
, state
);
5144 verbose(env
, "verifier internal error: no src_reg\n");
5147 return adjust_scalar_min_max_vals(env
, insn
, dst_reg
, *src_reg
);
5150 /* check validity of 32-bit and 64-bit arithmetic operations */
5151 static int check_alu_op(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
5153 struct bpf_reg_state
*regs
= cur_regs(env
);
5154 u8 opcode
= BPF_OP(insn
->code
);
5157 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
5158 if (opcode
== BPF_NEG
) {
5159 if (BPF_SRC(insn
->code
) != 0 ||
5160 insn
->src_reg
!= BPF_REG_0
||
5161 insn
->off
!= 0 || insn
->imm
!= 0) {
5162 verbose(env
, "BPF_NEG uses reserved fields\n");
5166 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
5167 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64) ||
5168 BPF_CLASS(insn
->code
) == BPF_ALU64
) {
5169 verbose(env
, "BPF_END uses reserved fields\n");
5174 /* check src operand */
5175 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
5179 if (is_pointer_value(env
, insn
->dst_reg
)) {
5180 verbose(env
, "R%d pointer arithmetic prohibited\n",
5185 /* check dest operand */
5186 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
5190 } else if (opcode
== BPF_MOV
) {
5192 if (BPF_SRC(insn
->code
) == BPF_X
) {
5193 if (insn
->imm
!= 0 || insn
->off
!= 0) {
5194 verbose(env
, "BPF_MOV uses reserved fields\n");
5198 /* check src operand */
5199 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
5203 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
5204 verbose(env
, "BPF_MOV uses reserved fields\n");
5209 /* check dest operand, mark as required later */
5210 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
5214 if (BPF_SRC(insn
->code
) == BPF_X
) {
5215 struct bpf_reg_state
*src_reg
= regs
+ insn
->src_reg
;
5216 struct bpf_reg_state
*dst_reg
= regs
+ insn
->dst_reg
;
5218 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
5220 * copy register state to dest reg
5222 *dst_reg
= *src_reg
;
5223 dst_reg
->live
|= REG_LIVE_WRITTEN
;
5224 dst_reg
->subreg_def
= DEF_NOT_SUBREG
;
5227 if (is_pointer_value(env
, insn
->src_reg
)) {
5229 "R%d partial copy of pointer\n",
5232 } else if (src_reg
->type
== SCALAR_VALUE
) {
5233 *dst_reg
= *src_reg
;
5234 dst_reg
->live
|= REG_LIVE_WRITTEN
;
5235 dst_reg
->subreg_def
= env
->insn_idx
+ 1;
5237 mark_reg_unknown(env
, regs
,
5240 coerce_reg_to_size(dst_reg
, 4);
5244 * remember the value we stored into this reg
5246 /* clear any state __mark_reg_known doesn't set */
5247 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
5248 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
5249 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
5250 __mark_reg_known(regs
+ insn
->dst_reg
,
5253 __mark_reg_known(regs
+ insn
->dst_reg
,
5258 } else if (opcode
> BPF_END
) {
5259 verbose(env
, "invalid BPF_ALU opcode %x\n", opcode
);
5262 } else { /* all other ALU ops: and, sub, xor, add, ... */
5264 if (BPF_SRC(insn
->code
) == BPF_X
) {
5265 if (insn
->imm
!= 0 || insn
->off
!= 0) {
5266 verbose(env
, "BPF_ALU uses reserved fields\n");
5269 /* check src1 operand */
5270 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
5274 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
5275 verbose(env
, "BPF_ALU uses reserved fields\n");
5280 /* check src2 operand */
5281 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
5285 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
5286 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
5287 verbose(env
, "div by zero\n");
5291 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
5292 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
5293 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
5295 if (insn
->imm
< 0 || insn
->imm
>= size
) {
5296 verbose(env
, "invalid shift %d\n", insn
->imm
);
5301 /* check dest operand */
5302 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
5306 return adjust_reg_min_max_vals(env
, insn
);
5312 static void __find_good_pkt_pointers(struct bpf_func_state
*state
,
5313 struct bpf_reg_state
*dst_reg
,
5314 enum bpf_reg_type type
, u16 new_range
)
5316 struct bpf_reg_state
*reg
;
5319 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
5320 reg
= &state
->regs
[i
];
5321 if (reg
->type
== type
&& reg
->id
== dst_reg
->id
)
5322 /* keep the maximum range already checked */
5323 reg
->range
= max(reg
->range
, new_range
);
5326 bpf_for_each_spilled_reg(i
, state
, reg
) {
5329 if (reg
->type
== type
&& reg
->id
== dst_reg
->id
)
5330 reg
->range
= max(reg
->range
, new_range
);
5334 static void find_good_pkt_pointers(struct bpf_verifier_state
*vstate
,
5335 struct bpf_reg_state
*dst_reg
,
5336 enum bpf_reg_type type
,
5337 bool range_right_open
)
5342 if (dst_reg
->off
< 0 ||
5343 (dst_reg
->off
== 0 && range_right_open
))
5344 /* This doesn't give us any range */
5347 if (dst_reg
->umax_value
> MAX_PACKET_OFF
||
5348 dst_reg
->umax_value
+ dst_reg
->off
> MAX_PACKET_OFF
)
5349 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5350 * than pkt_end, but that's because it's also less than pkt.
5354 new_range
= dst_reg
->off
;
5355 if (range_right_open
)
5358 /* Examples for register markings:
5360 * pkt_data in dst register:
5364 * if (r2 > pkt_end) goto <handle exception>
5369 * if (r2 < pkt_end) goto <access okay>
5370 * <handle exception>
5373 * r2 == dst_reg, pkt_end == src_reg
5374 * r2=pkt(id=n,off=8,r=0)
5375 * r3=pkt(id=n,off=0,r=0)
5377 * pkt_data in src register:
5381 * if (pkt_end >= r2) goto <access okay>
5382 * <handle exception>
5386 * if (pkt_end <= r2) goto <handle exception>
5390 * pkt_end == dst_reg, r2 == src_reg
5391 * r2=pkt(id=n,off=8,r=0)
5392 * r3=pkt(id=n,off=0,r=0)
5394 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
5395 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5396 * and [r3, r3 + 8-1) respectively is safe to access depending on
5400 /* If our ids match, then we must have the same max_value. And we
5401 * don't care about the other reg's fixed offset, since if it's too big
5402 * the range won't allow anything.
5403 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5405 for (i
= 0; i
<= vstate
->curframe
; i
++)
5406 __find_good_pkt_pointers(vstate
->frame
[i
], dst_reg
, type
,
5410 /* compute branch direction of the expression "if (reg opcode val) goto target;"
5412 * 1 - branch will be taken and "goto target" will be executed
5413 * 0 - branch will not be taken and fall-through to next insn
5414 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5416 static int is_branch_taken(struct bpf_reg_state
*reg
, u64 val
, u8 opcode
,
5419 struct bpf_reg_state reg_lo
;
5422 if (__is_pointer_value(false, reg
))
5428 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5429 * could truncate high bits and update umin/umax according to
5430 * information of low bits.
5432 coerce_reg_to_size(reg
, 4);
5433 /* smin/smax need special handling. For example, after coerce,
5434 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5435 * used as operand to JMP32. It is a negative number from s32's
5436 * point of view, while it is a positive number when seen as
5437 * s64. The smin/smax are kept as s64, therefore, when used with
5438 * JMP32, they need to be transformed into s32, then sign
5439 * extended back to s64.
5441 * Also, smin/smax were copied from umin/umax. If umin/umax has
5442 * different sign bit, then min/max relationship doesn't
5443 * maintain after casting into s32, for this case, set smin/smax
5446 if ((reg
->umax_value
^ reg
->umin_value
) &
5448 reg
->smin_value
= S32_MIN
;
5449 reg
->smax_value
= S32_MAX
;
5451 reg
->smin_value
= (s64
)(s32
)reg
->smin_value
;
5452 reg
->smax_value
= (s64
)(s32
)reg
->smax_value
;
5455 sval
= (s64
)(s32
)val
;
5462 if (tnum_is_const(reg
->var_off
))
5463 return !!tnum_equals_const(reg
->var_off
, val
);
5466 if (tnum_is_const(reg
->var_off
))
5467 return !tnum_equals_const(reg
->var_off
, val
);
5470 if ((~reg
->var_off
.mask
& reg
->var_off
.value
) & val
)
5472 if (!((reg
->var_off
.mask
| reg
->var_off
.value
) & val
))
5476 if (reg
->umin_value
> val
)
5478 else if (reg
->umax_value
<= val
)
5482 if (reg
->smin_value
> sval
)
5484 else if (reg
->smax_value
< sval
)
5488 if (reg
->umax_value
< val
)
5490 else if (reg
->umin_value
>= val
)
5494 if (reg
->smax_value
< sval
)
5496 else if (reg
->smin_value
>= sval
)
5500 if (reg
->umin_value
>= val
)
5502 else if (reg
->umax_value
< val
)
5506 if (reg
->smin_value
>= sval
)
5508 else if (reg
->smax_value
< sval
)
5512 if (reg
->umax_value
<= val
)
5514 else if (reg
->umin_value
> val
)
5518 if (reg
->smax_value
<= sval
)
5520 else if (reg
->smin_value
> sval
)
5528 /* Generate min value of the high 32-bit from TNUM info. */
5529 static u64
gen_hi_min(struct tnum var
)
5531 return var
.value
& ~0xffffffffULL
;
5534 /* Generate max value of the high 32-bit from TNUM info. */
5535 static u64
gen_hi_max(struct tnum var
)
5537 return (var
.value
| var
.mask
) & ~0xffffffffULL
;
5540 /* Return true if VAL is compared with a s64 sign extended from s32, and they
5541 * are with the same signedness.
5543 static bool cmp_val_with_extended_s64(s64 sval
, struct bpf_reg_state
*reg
)
5545 return ((s32
)sval
>= 0 &&
5546 reg
->smin_value
>= 0 && reg
->smax_value
<= S32_MAX
) ||
5548 reg
->smax_value
<= 0 && reg
->smin_value
>= S32_MIN
);
5551 /* Adjusts the register min/max values in the case that the dst_reg is the
5552 * variable register that we are working on, and src_reg is a constant or we're
5553 * simply doing a BPF_K check.
5554 * In JEQ/JNE cases we also adjust the var_off values.
5556 static void reg_set_min_max(struct bpf_reg_state
*true_reg
,
5557 struct bpf_reg_state
*false_reg
, u64 val
,
5558 u8 opcode
, bool is_jmp32
)
5562 /* If the dst_reg is a pointer, we can't learn anything about its
5563 * variable offset from the compare (unless src_reg were a pointer into
5564 * the same object, but we don't bother with that.
5565 * Since false_reg and true_reg have the same type by construction, we
5566 * only need to check one of them for pointerness.
5568 if (__is_pointer_value(false, false_reg
))
5571 val
= is_jmp32
? (u32
)val
: val
;
5572 sval
= is_jmp32
? (s64
)(s32
)val
: (s64
)val
;
5578 struct bpf_reg_state
*reg
=
5579 opcode
== BPF_JEQ
? true_reg
: false_reg
;
5581 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5582 * if it is true we know the value for sure. Likewise for
5586 u64 old_v
= reg
->var_off
.value
;
5587 u64 hi_mask
= ~0xffffffffULL
;
5589 reg
->var_off
.value
= (old_v
& hi_mask
) | val
;
5590 reg
->var_off
.mask
&= hi_mask
;
5592 __mark_reg_known(reg
, val
);
5597 false_reg
->var_off
= tnum_and(false_reg
->var_off
,
5599 if (is_power_of_2(val
))
5600 true_reg
->var_off
= tnum_or(true_reg
->var_off
,
5606 u64 false_umax
= opcode
== BPF_JGT
? val
: val
- 1;
5607 u64 true_umin
= opcode
== BPF_JGT
? val
+ 1 : val
;
5610 false_umax
+= gen_hi_max(false_reg
->var_off
);
5611 true_umin
+= gen_hi_min(true_reg
->var_off
);
5613 false_reg
->umax_value
= min(false_reg
->umax_value
, false_umax
);
5614 true_reg
->umin_value
= max(true_reg
->umin_value
, true_umin
);
5620 s64 false_smax
= opcode
== BPF_JSGT
? sval
: sval
- 1;
5621 s64 true_smin
= opcode
== BPF_JSGT
? sval
+ 1 : sval
;
5623 /* If the full s64 was not sign-extended from s32 then don't
5624 * deduct further info.
5626 if (is_jmp32
&& !cmp_val_with_extended_s64(sval
, false_reg
))
5628 false_reg
->smax_value
= min(false_reg
->smax_value
, false_smax
);
5629 true_reg
->smin_value
= max(true_reg
->smin_value
, true_smin
);
5635 u64 false_umin
= opcode
== BPF_JLT
? val
: val
+ 1;
5636 u64 true_umax
= opcode
== BPF_JLT
? val
- 1 : val
;
5639 false_umin
+= gen_hi_min(false_reg
->var_off
);
5640 true_umax
+= gen_hi_max(true_reg
->var_off
);
5642 false_reg
->umin_value
= max(false_reg
->umin_value
, false_umin
);
5643 true_reg
->umax_value
= min(true_reg
->umax_value
, true_umax
);
5649 s64 false_smin
= opcode
== BPF_JSLT
? sval
: sval
+ 1;
5650 s64 true_smax
= opcode
== BPF_JSLT
? sval
- 1 : sval
;
5652 if (is_jmp32
&& !cmp_val_with_extended_s64(sval
, false_reg
))
5654 false_reg
->smin_value
= max(false_reg
->smin_value
, false_smin
);
5655 true_reg
->smax_value
= min(true_reg
->smax_value
, true_smax
);
5662 __reg_deduce_bounds(false_reg
);
5663 __reg_deduce_bounds(true_reg
);
5664 /* We might have learned some bits from the bounds. */
5665 __reg_bound_offset(false_reg
);
5666 __reg_bound_offset(true_reg
);
5668 __reg_bound_offset32(false_reg
);
5669 __reg_bound_offset32(true_reg
);
5671 /* Intersecting with the old var_off might have improved our bounds
5672 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5673 * then new var_off is (0; 0x7f...fc) which improves our umax.
5675 __update_reg_bounds(false_reg
);
5676 __update_reg_bounds(true_reg
);
5679 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
5682 static void reg_set_min_max_inv(struct bpf_reg_state
*true_reg
,
5683 struct bpf_reg_state
*false_reg
, u64 val
,
5684 u8 opcode
, bool is_jmp32
)
5688 if (__is_pointer_value(false, false_reg
))
5691 val
= is_jmp32
? (u32
)val
: val
;
5692 sval
= is_jmp32
? (s64
)(s32
)val
: (s64
)val
;
5698 struct bpf_reg_state
*reg
=
5699 opcode
== BPF_JEQ
? true_reg
: false_reg
;
5702 u64 old_v
= reg
->var_off
.value
;
5703 u64 hi_mask
= ~0xffffffffULL
;
5705 reg
->var_off
.value
= (old_v
& hi_mask
) | val
;
5706 reg
->var_off
.mask
&= hi_mask
;
5708 __mark_reg_known(reg
, val
);
5713 false_reg
->var_off
= tnum_and(false_reg
->var_off
,
5715 if (is_power_of_2(val
))
5716 true_reg
->var_off
= tnum_or(true_reg
->var_off
,
5722 u64 false_umin
= opcode
== BPF_JGT
? val
: val
+ 1;
5723 u64 true_umax
= opcode
== BPF_JGT
? val
- 1 : val
;
5726 false_umin
+= gen_hi_min(false_reg
->var_off
);
5727 true_umax
+= gen_hi_max(true_reg
->var_off
);
5729 false_reg
->umin_value
= max(false_reg
->umin_value
, false_umin
);
5730 true_reg
->umax_value
= min(true_reg
->umax_value
, true_umax
);
5736 s64 false_smin
= opcode
== BPF_JSGT
? sval
: sval
+ 1;
5737 s64 true_smax
= opcode
== BPF_JSGT
? sval
- 1 : sval
;
5739 if (is_jmp32
&& !cmp_val_with_extended_s64(sval
, false_reg
))
5741 false_reg
->smin_value
= max(false_reg
->smin_value
, false_smin
);
5742 true_reg
->smax_value
= min(true_reg
->smax_value
, true_smax
);
5748 u64 false_umax
= opcode
== BPF_JLT
? val
: val
- 1;
5749 u64 true_umin
= opcode
== BPF_JLT
? val
+ 1 : val
;
5752 false_umax
+= gen_hi_max(false_reg
->var_off
);
5753 true_umin
+= gen_hi_min(true_reg
->var_off
);
5755 false_reg
->umax_value
= min(false_reg
->umax_value
, false_umax
);
5756 true_reg
->umin_value
= max(true_reg
->umin_value
, true_umin
);
5762 s64 false_smax
= opcode
== BPF_JSLT
? sval
: sval
- 1;
5763 s64 true_smin
= opcode
== BPF_JSLT
? sval
+ 1 : sval
;
5765 if (is_jmp32
&& !cmp_val_with_extended_s64(sval
, false_reg
))
5767 false_reg
->smax_value
= min(false_reg
->smax_value
, false_smax
);
5768 true_reg
->smin_value
= max(true_reg
->smin_value
, true_smin
);
5775 __reg_deduce_bounds(false_reg
);
5776 __reg_deduce_bounds(true_reg
);
5777 /* We might have learned some bits from the bounds. */
5778 __reg_bound_offset(false_reg
);
5779 __reg_bound_offset(true_reg
);
5781 __reg_bound_offset32(false_reg
);
5782 __reg_bound_offset32(true_reg
);
5784 /* Intersecting with the old var_off might have improved our bounds
5785 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5786 * then new var_off is (0; 0x7f...fc) which improves our umax.
5788 __update_reg_bounds(false_reg
);
5789 __update_reg_bounds(true_reg
);
5792 /* Regs are known to be equal, so intersect their min/max/var_off */
5793 static void __reg_combine_min_max(struct bpf_reg_state
*src_reg
,
5794 struct bpf_reg_state
*dst_reg
)
5796 src_reg
->umin_value
= dst_reg
->umin_value
= max(src_reg
->umin_value
,
5797 dst_reg
->umin_value
);
5798 src_reg
->umax_value
= dst_reg
->umax_value
= min(src_reg
->umax_value
,
5799 dst_reg
->umax_value
);
5800 src_reg
->smin_value
= dst_reg
->smin_value
= max(src_reg
->smin_value
,
5801 dst_reg
->smin_value
);
5802 src_reg
->smax_value
= dst_reg
->smax_value
= min(src_reg
->smax_value
,
5803 dst_reg
->smax_value
);
5804 src_reg
->var_off
= dst_reg
->var_off
= tnum_intersect(src_reg
->var_off
,
5806 /* We might have learned new bounds from the var_off. */
5807 __update_reg_bounds(src_reg
);
5808 __update_reg_bounds(dst_reg
);
5809 /* We might have learned something about the sign bit. */
5810 __reg_deduce_bounds(src_reg
);
5811 __reg_deduce_bounds(dst_reg
);
5812 /* We might have learned some bits from the bounds. */
5813 __reg_bound_offset(src_reg
);
5814 __reg_bound_offset(dst_reg
);
5815 /* Intersecting with the old var_off might have improved our bounds
5816 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5817 * then new var_off is (0; 0x7f...fc) which improves our umax.
5819 __update_reg_bounds(src_reg
);
5820 __update_reg_bounds(dst_reg
);
5823 static void reg_combine_min_max(struct bpf_reg_state
*true_src
,
5824 struct bpf_reg_state
*true_dst
,
5825 struct bpf_reg_state
*false_src
,
5826 struct bpf_reg_state
*false_dst
,
5831 __reg_combine_min_max(true_src
, true_dst
);
5834 __reg_combine_min_max(false_src
, false_dst
);
5839 static void mark_ptr_or_null_reg(struct bpf_func_state
*state
,
5840 struct bpf_reg_state
*reg
, u32 id
,
5843 if (reg_type_may_be_null(reg
->type
) && reg
->id
== id
) {
5844 /* Old offset (both fixed and variable parts) should
5845 * have been known-zero, because we don't allow pointer
5846 * arithmetic on pointers that might be NULL.
5848 if (WARN_ON_ONCE(reg
->smin_value
|| reg
->smax_value
||
5849 !tnum_equals_const(reg
->var_off
, 0) ||
5851 __mark_reg_known_zero(reg
);
5855 reg
->type
= SCALAR_VALUE
;
5856 } else if (reg
->type
== PTR_TO_MAP_VALUE_OR_NULL
) {
5857 if (reg
->map_ptr
->inner_map_meta
) {
5858 reg
->type
= CONST_PTR_TO_MAP
;
5859 reg
->map_ptr
= reg
->map_ptr
->inner_map_meta
;
5860 } else if (reg
->map_ptr
->map_type
==
5861 BPF_MAP_TYPE_XSKMAP
) {
5862 reg
->type
= PTR_TO_XDP_SOCK
;
5864 reg
->type
= PTR_TO_MAP_VALUE
;
5866 } else if (reg
->type
== PTR_TO_SOCKET_OR_NULL
) {
5867 reg
->type
= PTR_TO_SOCKET
;
5868 } else if (reg
->type
== PTR_TO_SOCK_COMMON_OR_NULL
) {
5869 reg
->type
= PTR_TO_SOCK_COMMON
;
5870 } else if (reg
->type
== PTR_TO_TCP_SOCK_OR_NULL
) {
5871 reg
->type
= PTR_TO_TCP_SOCK
;
5874 /* We don't need id and ref_obj_id from this point
5875 * onwards anymore, thus we should better reset it,
5876 * so that state pruning has chances to take effect.
5879 reg
->ref_obj_id
= 0;
5880 } else if (!reg_may_point_to_spin_lock(reg
)) {
5881 /* For not-NULL ptr, reg->ref_obj_id will be reset
5882 * in release_reg_references().
5884 * reg->id is still used by spin_lock ptr. Other
5885 * than spin_lock ptr type, reg->id can be reset.
5892 static void __mark_ptr_or_null_regs(struct bpf_func_state
*state
, u32 id
,
5895 struct bpf_reg_state
*reg
;
5898 for (i
= 0; i
< MAX_BPF_REG
; i
++)
5899 mark_ptr_or_null_reg(state
, &state
->regs
[i
], id
, is_null
);
5901 bpf_for_each_spilled_reg(i
, state
, reg
) {
5904 mark_ptr_or_null_reg(state
, reg
, id
, is_null
);
5908 /* The logic is similar to find_good_pkt_pointers(), both could eventually
5909 * be folded together at some point.
5911 static void mark_ptr_or_null_regs(struct bpf_verifier_state
*vstate
, u32 regno
,
5914 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
5915 struct bpf_reg_state
*regs
= state
->regs
;
5916 u32 ref_obj_id
= regs
[regno
].ref_obj_id
;
5917 u32 id
= regs
[regno
].id
;
5920 if (ref_obj_id
&& ref_obj_id
== id
&& is_null
)
5921 /* regs[regno] is in the " == NULL" branch.
5922 * No one could have freed the reference state before
5923 * doing the NULL check.
5925 WARN_ON_ONCE(release_reference_state(state
, id
));
5927 for (i
= 0; i
<= vstate
->curframe
; i
++)
5928 __mark_ptr_or_null_regs(vstate
->frame
[i
], id
, is_null
);
5931 static bool try_match_pkt_pointers(const struct bpf_insn
*insn
,
5932 struct bpf_reg_state
*dst_reg
,
5933 struct bpf_reg_state
*src_reg
,
5934 struct bpf_verifier_state
*this_branch
,
5935 struct bpf_verifier_state
*other_branch
)
5937 if (BPF_SRC(insn
->code
) != BPF_X
)
5940 /* Pointers are always 64-bit. */
5941 if (BPF_CLASS(insn
->code
) == BPF_JMP32
)
5944 switch (BPF_OP(insn
->code
)) {
5946 if ((dst_reg
->type
== PTR_TO_PACKET
&&
5947 src_reg
->type
== PTR_TO_PACKET_END
) ||
5948 (dst_reg
->type
== PTR_TO_PACKET_META
&&
5949 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
5950 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
5951 find_good_pkt_pointers(this_branch
, dst_reg
,
5952 dst_reg
->type
, false);
5953 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
5954 src_reg
->type
== PTR_TO_PACKET
) ||
5955 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
5956 src_reg
->type
== PTR_TO_PACKET_META
)) {
5957 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
5958 find_good_pkt_pointers(other_branch
, src_reg
,
5959 src_reg
->type
, true);
5965 if ((dst_reg
->type
== PTR_TO_PACKET
&&
5966 src_reg
->type
== PTR_TO_PACKET_END
) ||
5967 (dst_reg
->type
== PTR_TO_PACKET_META
&&
5968 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
5969 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
5970 find_good_pkt_pointers(other_branch
, dst_reg
,
5971 dst_reg
->type
, true);
5972 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
5973 src_reg
->type
== PTR_TO_PACKET
) ||
5974 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
5975 src_reg
->type
== PTR_TO_PACKET_META
)) {
5976 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
5977 find_good_pkt_pointers(this_branch
, src_reg
,
5978 src_reg
->type
, false);
5984 if ((dst_reg
->type
== PTR_TO_PACKET
&&
5985 src_reg
->type
== PTR_TO_PACKET_END
) ||
5986 (dst_reg
->type
== PTR_TO_PACKET_META
&&
5987 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
5988 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
5989 find_good_pkt_pointers(this_branch
, dst_reg
,
5990 dst_reg
->type
, true);
5991 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
5992 src_reg
->type
== PTR_TO_PACKET
) ||
5993 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
5994 src_reg
->type
== PTR_TO_PACKET_META
)) {
5995 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
5996 find_good_pkt_pointers(other_branch
, src_reg
,
5997 src_reg
->type
, false);
6003 if ((dst_reg
->type
== PTR_TO_PACKET
&&
6004 src_reg
->type
== PTR_TO_PACKET_END
) ||
6005 (dst_reg
->type
== PTR_TO_PACKET_META
&&
6006 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
6007 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6008 find_good_pkt_pointers(other_branch
, dst_reg
,
6009 dst_reg
->type
, false);
6010 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
6011 src_reg
->type
== PTR_TO_PACKET
) ||
6012 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
6013 src_reg
->type
== PTR_TO_PACKET_META
)) {
6014 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6015 find_good_pkt_pointers(this_branch
, src_reg
,
6016 src_reg
->type
, true);
6028 static int check_cond_jmp_op(struct bpf_verifier_env
*env
,
6029 struct bpf_insn
*insn
, int *insn_idx
)
6031 struct bpf_verifier_state
*this_branch
= env
->cur_state
;
6032 struct bpf_verifier_state
*other_branch
;
6033 struct bpf_reg_state
*regs
= this_branch
->frame
[this_branch
->curframe
]->regs
;
6034 struct bpf_reg_state
*dst_reg
, *other_branch_regs
, *src_reg
= NULL
;
6035 u8 opcode
= BPF_OP(insn
->code
);
6040 /* Only conditional jumps are expected to reach here. */
6041 if (opcode
== BPF_JA
|| opcode
> BPF_JSLE
) {
6042 verbose(env
, "invalid BPF_JMP/JMP32 opcode %x\n", opcode
);
6046 if (BPF_SRC(insn
->code
) == BPF_X
) {
6047 if (insn
->imm
!= 0) {
6048 verbose(env
, "BPF_JMP/JMP32 uses reserved fields\n");
6052 /* check src1 operand */
6053 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
6057 if (is_pointer_value(env
, insn
->src_reg
)) {
6058 verbose(env
, "R%d pointer comparison prohibited\n",
6062 src_reg
= ®s
[insn
->src_reg
];
6064 if (insn
->src_reg
!= BPF_REG_0
) {
6065 verbose(env
, "BPF_JMP/JMP32 uses reserved fields\n");
6070 /* check src2 operand */
6071 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
6075 dst_reg
= ®s
[insn
->dst_reg
];
6076 is_jmp32
= BPF_CLASS(insn
->code
) == BPF_JMP32
;
6078 if (BPF_SRC(insn
->code
) == BPF_K
)
6079 pred
= is_branch_taken(dst_reg
, insn
->imm
,
6081 else if (src_reg
->type
== SCALAR_VALUE
&&
6082 tnum_is_const(src_reg
->var_off
))
6083 pred
= is_branch_taken(dst_reg
, src_reg
->var_off
.value
,
6086 err
= mark_chain_precision(env
, insn
->dst_reg
);
6087 if (BPF_SRC(insn
->code
) == BPF_X
&& !err
)
6088 err
= mark_chain_precision(env
, insn
->src_reg
);
6093 /* only follow the goto, ignore fall-through */
6094 *insn_idx
+= insn
->off
;
6096 } else if (pred
== 0) {
6097 /* only follow fall-through branch, since
6098 * that's where the program will go
6103 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
,
6107 other_branch_regs
= other_branch
->frame
[other_branch
->curframe
]->regs
;
6109 /* detect if we are comparing against a constant value so we can adjust
6110 * our min/max values for our dst register.
6111 * this is only legit if both are scalars (or pointers to the same
6112 * object, I suppose, but we don't support that right now), because
6113 * otherwise the different base pointers mean the offsets aren't
6116 if (BPF_SRC(insn
->code
) == BPF_X
) {
6117 struct bpf_reg_state
*src_reg
= ®s
[insn
->src_reg
];
6118 struct bpf_reg_state lo_reg0
= *dst_reg
;
6119 struct bpf_reg_state lo_reg1
= *src_reg
;
6120 struct bpf_reg_state
*src_lo
, *dst_lo
;
6124 coerce_reg_to_size(dst_lo
, 4);
6125 coerce_reg_to_size(src_lo
, 4);
6127 if (dst_reg
->type
== SCALAR_VALUE
&&
6128 src_reg
->type
== SCALAR_VALUE
) {
6129 if (tnum_is_const(src_reg
->var_off
) ||
6130 (is_jmp32
&& tnum_is_const(src_lo
->var_off
)))
6131 reg_set_min_max(&other_branch_regs
[insn
->dst_reg
],
6134 ? src_lo
->var_off
.value
6135 : src_reg
->var_off
.value
,
6137 else if (tnum_is_const(dst_reg
->var_off
) ||
6138 (is_jmp32
&& tnum_is_const(dst_lo
->var_off
)))
6139 reg_set_min_max_inv(&other_branch_regs
[insn
->src_reg
],
6142 ? dst_lo
->var_off
.value
6143 : dst_reg
->var_off
.value
,
6145 else if (!is_jmp32
&&
6146 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
))
6147 /* Comparing for equality, we can combine knowledge */
6148 reg_combine_min_max(&other_branch_regs
[insn
->src_reg
],
6149 &other_branch_regs
[insn
->dst_reg
],
6150 src_reg
, dst_reg
, opcode
);
6152 } else if (dst_reg
->type
== SCALAR_VALUE
) {
6153 reg_set_min_max(&other_branch_regs
[insn
->dst_reg
],
6154 dst_reg
, insn
->imm
, opcode
, is_jmp32
);
6157 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6158 * NOTE: these optimizations below are related with pointer comparison
6159 * which will never be JMP32.
6161 if (!is_jmp32
&& BPF_SRC(insn
->code
) == BPF_K
&&
6162 insn
->imm
== 0 && (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
6163 reg_type_may_be_null(dst_reg
->type
)) {
6164 /* Mark all identical registers in each branch as either
6165 * safe or unknown depending R == 0 or R != 0 conditional.
6167 mark_ptr_or_null_regs(this_branch
, insn
->dst_reg
,
6169 mark_ptr_or_null_regs(other_branch
, insn
->dst_reg
,
6171 } else if (!try_match_pkt_pointers(insn
, dst_reg
, ®s
[insn
->src_reg
],
6172 this_branch
, other_branch
) &&
6173 is_pointer_value(env
, insn
->dst_reg
)) {
6174 verbose(env
, "R%d pointer comparison prohibited\n",
6178 if (env
->log
.level
& BPF_LOG_LEVEL
)
6179 print_verifier_state(env
, this_branch
->frame
[this_branch
->curframe
]);
6183 /* verify BPF_LD_IMM64 instruction */
6184 static int check_ld_imm(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
6186 struct bpf_insn_aux_data
*aux
= cur_aux(env
);
6187 struct bpf_reg_state
*regs
= cur_regs(env
);
6188 struct bpf_map
*map
;
6191 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
6192 verbose(env
, "invalid BPF_LD_IMM insn\n");
6195 if (insn
->off
!= 0) {
6196 verbose(env
, "BPF_LD_IMM64 uses reserved fields\n");
6200 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
6204 if (insn
->src_reg
== 0) {
6205 u64 imm
= ((u64
)(insn
+ 1)->imm
<< 32) | (u32
)insn
->imm
;
6207 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
6208 __mark_reg_known(®s
[insn
->dst_reg
], imm
);
6212 map
= env
->used_maps
[aux
->map_index
];
6213 mark_reg_known_zero(env
, regs
, insn
->dst_reg
);
6214 regs
[insn
->dst_reg
].map_ptr
= map
;
6216 if (insn
->src_reg
== BPF_PSEUDO_MAP_VALUE
) {
6217 regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
6218 regs
[insn
->dst_reg
].off
= aux
->map_off
;
6219 if (map_value_has_spin_lock(map
))
6220 regs
[insn
->dst_reg
].id
= ++env
->id_gen
;
6221 } else if (insn
->src_reg
== BPF_PSEUDO_MAP_FD
) {
6222 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
6224 verbose(env
, "bpf verifier is misconfigured\n");
6231 static bool may_access_skb(enum bpf_prog_type type
)
6234 case BPF_PROG_TYPE_SOCKET_FILTER
:
6235 case BPF_PROG_TYPE_SCHED_CLS
:
6236 case BPF_PROG_TYPE_SCHED_ACT
:
6243 /* verify safety of LD_ABS|LD_IND instructions:
6244 * - they can only appear in the programs where ctx == skb
6245 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6246 * preserve R6-R9, and store return value into R0
6249 * ctx == skb == R6 == CTX
6252 * SRC == any register
6253 * IMM == 32-bit immediate
6256 * R0 - 8/16/32-bit skb data converted to cpu endianness
6258 static int check_ld_abs(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
6260 struct bpf_reg_state
*regs
= cur_regs(env
);
6261 u8 mode
= BPF_MODE(insn
->code
);
6264 if (!may_access_skb(env
->prog
->type
)) {
6265 verbose(env
, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
6269 if (!env
->ops
->gen_ld_abs
) {
6270 verbose(env
, "bpf verifier is misconfigured\n");
6274 if (env
->subprog_cnt
> 1) {
6275 /* when program has LD_ABS insn JITs and interpreter assume
6276 * that r1 == ctx == skb which is not the case for callees
6277 * that can have arbitrary arguments. It's problematic
6278 * for main prog as well since JITs would need to analyze
6279 * all functions in order to make proper register save/restore
6280 * decisions in the main prog. Hence disallow LD_ABS with calls
6282 verbose(env
, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6286 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
6287 BPF_SIZE(insn
->code
) == BPF_DW
||
6288 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
6289 verbose(env
, "BPF_LD_[ABS|IND] uses reserved fields\n");
6293 /* check whether implicit source operand (register R6) is readable */
6294 err
= check_reg_arg(env
, BPF_REG_6
, SRC_OP
);
6298 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6299 * gen_ld_abs() may terminate the program at runtime, leading to
6302 err
= check_reference_leak(env
);
6304 verbose(env
, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6308 if (env
->cur_state
->active_spin_lock
) {
6309 verbose(env
, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6313 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
6315 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
6319 if (mode
== BPF_IND
) {
6320 /* check explicit source operand */
6321 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
6326 /* reset caller saved regs to unreadable */
6327 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
6328 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
6329 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
6332 /* mark destination R0 register as readable, since it contains
6333 * the value fetched from the packet.
6334 * Already marked as written above.
6336 mark_reg_unknown(env
, regs
, BPF_REG_0
);
6337 /* ld_abs load up to 32-bit skb data. */
6338 regs
[BPF_REG_0
].subreg_def
= env
->insn_idx
+ 1;
6342 static int check_return_code(struct bpf_verifier_env
*env
)
6344 struct tnum enforce_attach_type_range
= tnum_unknown
;
6345 struct bpf_reg_state
*reg
;
6346 struct tnum range
= tnum_range(0, 1);
6348 switch (env
->prog
->type
) {
6349 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR
:
6350 if (env
->prog
->expected_attach_type
== BPF_CGROUP_UDP4_RECVMSG
||
6351 env
->prog
->expected_attach_type
== BPF_CGROUP_UDP6_RECVMSG
)
6352 range
= tnum_range(1, 1);
6354 case BPF_PROG_TYPE_CGROUP_SKB
:
6355 if (env
->prog
->expected_attach_type
== BPF_CGROUP_INET_EGRESS
) {
6356 range
= tnum_range(0, 3);
6357 enforce_attach_type_range
= tnum_range(2, 3);
6360 case BPF_PROG_TYPE_CGROUP_SOCK
:
6361 case BPF_PROG_TYPE_SOCK_OPS
:
6362 case BPF_PROG_TYPE_CGROUP_DEVICE
:
6363 case BPF_PROG_TYPE_CGROUP_SYSCTL
:
6364 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
6366 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
6367 if (!env
->prog
->aux
->attach_btf_id
)
6369 range
= tnum_const(0);
6375 reg
= cur_regs(env
) + BPF_REG_0
;
6376 if (reg
->type
!= SCALAR_VALUE
) {
6377 verbose(env
, "At program exit the register R0 is not a known value (%s)\n",
6378 reg_type_str
[reg
->type
]);
6382 if (!tnum_in(range
, reg
->var_off
)) {
6385 verbose(env
, "At program exit the register R0 ");
6386 if (!tnum_is_unknown(reg
->var_off
)) {
6387 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
6388 verbose(env
, "has value %s", tn_buf
);
6390 verbose(env
, "has unknown scalar value");
6392 tnum_strn(tn_buf
, sizeof(tn_buf
), range
);
6393 verbose(env
, " should have been in %s\n", tn_buf
);
6397 if (!tnum_is_unknown(enforce_attach_type_range
) &&
6398 tnum_in(enforce_attach_type_range
, reg
->var_off
))
6399 env
->prog
->enforce_expected_attach_type
= 1;
6403 /* non-recursive DFS pseudo code
6404 * 1 procedure DFS-iterative(G,v):
6405 * 2 label v as discovered
6406 * 3 let S be a stack
6408 * 5 while S is not empty
6410 * 7 if t is what we're looking for:
6412 * 9 for all edges e in G.adjacentEdges(t) do
6413 * 10 if edge e is already labelled
6414 * 11 continue with the next edge
6415 * 12 w <- G.adjacentVertex(t,e)
6416 * 13 if vertex w is not discovered and not explored
6417 * 14 label e as tree-edge
6418 * 15 label w as discovered
6421 * 18 else if vertex w is discovered
6422 * 19 label e as back-edge
6424 * 21 // vertex w is explored
6425 * 22 label e as forward- or cross-edge
6426 * 23 label t as explored
6431 * 0x11 - discovered and fall-through edge labelled
6432 * 0x12 - discovered and fall-through and branch edges labelled
6443 static u32
state_htab_size(struct bpf_verifier_env
*env
)
6445 return env
->prog
->len
;
6448 static struct bpf_verifier_state_list
**explored_state(
6449 struct bpf_verifier_env
*env
,
6452 struct bpf_verifier_state
*cur
= env
->cur_state
;
6453 struct bpf_func_state
*state
= cur
->frame
[cur
->curframe
];
6455 return &env
->explored_states
[(idx
^ state
->callsite
) % state_htab_size(env
)];
6458 static void init_explored_state(struct bpf_verifier_env
*env
, int idx
)
6460 env
->insn_aux_data
[idx
].prune_point
= true;
6463 /* t, w, e - match pseudo-code above:
6464 * t - index of current instruction
6465 * w - next instruction
6468 static int push_insn(int t
, int w
, int e
, struct bpf_verifier_env
*env
,
6471 int *insn_stack
= env
->cfg
.insn_stack
;
6472 int *insn_state
= env
->cfg
.insn_state
;
6474 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
6477 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
6480 if (w
< 0 || w
>= env
->prog
->len
) {
6481 verbose_linfo(env
, t
, "%d: ", t
);
6482 verbose(env
, "jump out of range from insn %d to %d\n", t
, w
);
6487 /* mark branch target for state pruning */
6488 init_explored_state(env
, w
);
6490 if (insn_state
[w
] == 0) {
6492 insn_state
[t
] = DISCOVERED
| e
;
6493 insn_state
[w
] = DISCOVERED
;
6494 if (env
->cfg
.cur_stack
>= env
->prog
->len
)
6496 insn_stack
[env
->cfg
.cur_stack
++] = w
;
6498 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
6499 if (loop_ok
&& env
->allow_ptr_leaks
)
6501 verbose_linfo(env
, t
, "%d: ", t
);
6502 verbose_linfo(env
, w
, "%d: ", w
);
6503 verbose(env
, "back-edge from insn %d to %d\n", t
, w
);
6505 } else if (insn_state
[w
] == EXPLORED
) {
6506 /* forward- or cross-edge */
6507 insn_state
[t
] = DISCOVERED
| e
;
6509 verbose(env
, "insn state internal bug\n");
6515 /* non-recursive depth-first-search to detect loops in BPF program
6516 * loop == back-edge in directed graph
6518 static int check_cfg(struct bpf_verifier_env
*env
)
6520 struct bpf_insn
*insns
= env
->prog
->insnsi
;
6521 int insn_cnt
= env
->prog
->len
;
6522 int *insn_stack
, *insn_state
;
6526 insn_state
= env
->cfg
.insn_state
= kvcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
6530 insn_stack
= env
->cfg
.insn_stack
= kvcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
6536 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
6537 insn_stack
[0] = 0; /* 0 is the first instruction */
6538 env
->cfg
.cur_stack
= 1;
6541 if (env
->cfg
.cur_stack
== 0)
6543 t
= insn_stack
[env
->cfg
.cur_stack
- 1];
6545 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
||
6546 BPF_CLASS(insns
[t
].code
) == BPF_JMP32
) {
6547 u8 opcode
= BPF_OP(insns
[t
].code
);
6549 if (opcode
== BPF_EXIT
) {
6551 } else if (opcode
== BPF_CALL
) {
6552 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
, false);
6557 if (t
+ 1 < insn_cnt
)
6558 init_explored_state(env
, t
+ 1);
6559 if (insns
[t
].src_reg
== BPF_PSEUDO_CALL
) {
6560 init_explored_state(env
, t
);
6561 ret
= push_insn(t
, t
+ insns
[t
].imm
+ 1, BRANCH
,
6568 } else if (opcode
== BPF_JA
) {
6569 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
6573 /* unconditional jump with single edge */
6574 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
6575 FALLTHROUGH
, env
, true);
6580 /* unconditional jmp is not a good pruning point,
6581 * but it's marked, since backtracking needs
6582 * to record jmp history in is_state_visited().
6584 init_explored_state(env
, t
+ insns
[t
].off
+ 1);
6585 /* tell verifier to check for equivalent states
6586 * after every call and jump
6588 if (t
+ 1 < insn_cnt
)
6589 init_explored_state(env
, t
+ 1);
6591 /* conditional jump with two edges */
6592 init_explored_state(env
, t
);
6593 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
, true);
6599 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
, true);
6606 /* all other non-branch instructions with single
6609 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
, false);
6617 insn_state
[t
] = EXPLORED
;
6618 if (env
->cfg
.cur_stack
-- <= 0) {
6619 verbose(env
, "pop stack internal bug\n");
6626 for (i
= 0; i
< insn_cnt
; i
++) {
6627 if (insn_state
[i
] != EXPLORED
) {
6628 verbose(env
, "unreachable insn %d\n", i
);
6633 ret
= 0; /* cfg looks good */
6638 env
->cfg
.insn_state
= env
->cfg
.insn_stack
= NULL
;
6642 /* The minimum supported BTF func info size */
6643 #define MIN_BPF_FUNCINFO_SIZE 8
6644 #define MAX_FUNCINFO_REC_SIZE 252
6646 static int check_btf_func(struct bpf_verifier_env
*env
,
6647 const union bpf_attr
*attr
,
6648 union bpf_attr __user
*uattr
)
6650 u32 i
, nfuncs
, urec_size
, min_size
;
6651 u32 krec_size
= sizeof(struct bpf_func_info
);
6652 struct bpf_func_info
*krecord
;
6653 struct bpf_func_info_aux
*info_aux
= NULL
;
6654 const struct btf_type
*type
;
6655 struct bpf_prog
*prog
;
6656 const struct btf
*btf
;
6657 void __user
*urecord
;
6658 u32 prev_offset
= 0;
6661 nfuncs
= attr
->func_info_cnt
;
6665 if (nfuncs
!= env
->subprog_cnt
) {
6666 verbose(env
, "number of funcs in func_info doesn't match number of subprogs\n");
6670 urec_size
= attr
->func_info_rec_size
;
6671 if (urec_size
< MIN_BPF_FUNCINFO_SIZE
||
6672 urec_size
> MAX_FUNCINFO_REC_SIZE
||
6673 urec_size
% sizeof(u32
)) {
6674 verbose(env
, "invalid func info rec size %u\n", urec_size
);
6679 btf
= prog
->aux
->btf
;
6681 urecord
= u64_to_user_ptr(attr
->func_info
);
6682 min_size
= min_t(u32
, krec_size
, urec_size
);
6684 krecord
= kvcalloc(nfuncs
, krec_size
, GFP_KERNEL
| __GFP_NOWARN
);
6687 info_aux
= kcalloc(nfuncs
, sizeof(*info_aux
), GFP_KERNEL
| __GFP_NOWARN
);
6691 for (i
= 0; i
< nfuncs
; i
++) {
6692 ret
= bpf_check_uarg_tail_zero(urecord
, krec_size
, urec_size
);
6694 if (ret
== -E2BIG
) {
6695 verbose(env
, "nonzero tailing record in func info");
6696 /* set the size kernel expects so loader can zero
6697 * out the rest of the record.
6699 if (put_user(min_size
, &uattr
->func_info_rec_size
))
6705 if (copy_from_user(&krecord
[i
], urecord
, min_size
)) {
6710 /* check insn_off */
6712 if (krecord
[i
].insn_off
) {
6714 "nonzero insn_off %u for the first func info record",
6715 krecord
[i
].insn_off
);
6719 } else if (krecord
[i
].insn_off
<= prev_offset
) {
6721 "same or smaller insn offset (%u) than previous func info record (%u)",
6722 krecord
[i
].insn_off
, prev_offset
);
6727 if (env
->subprog_info
[i
].start
!= krecord
[i
].insn_off
) {
6728 verbose(env
, "func_info BTF section doesn't match subprog layout in BPF program\n");
6734 type
= btf_type_by_id(btf
, krecord
[i
].type_id
);
6735 if (!type
|| BTF_INFO_KIND(type
->info
) != BTF_KIND_FUNC
) {
6736 verbose(env
, "invalid type id %d in func info",
6737 krecord
[i
].type_id
);
6741 prev_offset
= krecord
[i
].insn_off
;
6742 urecord
+= urec_size
;
6745 prog
->aux
->func_info
= krecord
;
6746 prog
->aux
->func_info_cnt
= nfuncs
;
6747 prog
->aux
->func_info_aux
= info_aux
;
6756 static void adjust_btf_func(struct bpf_verifier_env
*env
)
6758 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
6761 if (!aux
->func_info
)
6764 for (i
= 0; i
< env
->subprog_cnt
; i
++)
6765 aux
->func_info
[i
].insn_off
= env
->subprog_info
[i
].start
;
6768 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6769 sizeof(((struct bpf_line_info *)(0))->line_col))
6770 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6772 static int check_btf_line(struct bpf_verifier_env
*env
,
6773 const union bpf_attr
*attr
,
6774 union bpf_attr __user
*uattr
)
6776 u32 i
, s
, nr_linfo
, ncopy
, expected_size
, rec_size
, prev_offset
= 0;
6777 struct bpf_subprog_info
*sub
;
6778 struct bpf_line_info
*linfo
;
6779 struct bpf_prog
*prog
;
6780 const struct btf
*btf
;
6781 void __user
*ulinfo
;
6784 nr_linfo
= attr
->line_info_cnt
;
6788 rec_size
= attr
->line_info_rec_size
;
6789 if (rec_size
< MIN_BPF_LINEINFO_SIZE
||
6790 rec_size
> MAX_LINEINFO_REC_SIZE
||
6791 rec_size
& (sizeof(u32
) - 1))
6794 /* Need to zero it in case the userspace may
6795 * pass in a smaller bpf_line_info object.
6797 linfo
= kvcalloc(nr_linfo
, sizeof(struct bpf_line_info
),
6798 GFP_KERNEL
| __GFP_NOWARN
);
6803 btf
= prog
->aux
->btf
;
6806 sub
= env
->subprog_info
;
6807 ulinfo
= u64_to_user_ptr(attr
->line_info
);
6808 expected_size
= sizeof(struct bpf_line_info
);
6809 ncopy
= min_t(u32
, expected_size
, rec_size
);
6810 for (i
= 0; i
< nr_linfo
; i
++) {
6811 err
= bpf_check_uarg_tail_zero(ulinfo
, expected_size
, rec_size
);
6813 if (err
== -E2BIG
) {
6814 verbose(env
, "nonzero tailing record in line_info");
6815 if (put_user(expected_size
,
6816 &uattr
->line_info_rec_size
))
6822 if (copy_from_user(&linfo
[i
], ulinfo
, ncopy
)) {
6828 * Check insn_off to ensure
6829 * 1) strictly increasing AND
6830 * 2) bounded by prog->len
6832 * The linfo[0].insn_off == 0 check logically falls into
6833 * the later "missing bpf_line_info for func..." case
6834 * because the first linfo[0].insn_off must be the
6835 * first sub also and the first sub must have
6836 * subprog_info[0].start == 0.
6838 if ((i
&& linfo
[i
].insn_off
<= prev_offset
) ||
6839 linfo
[i
].insn_off
>= prog
->len
) {
6840 verbose(env
, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6841 i
, linfo
[i
].insn_off
, prev_offset
,
6847 if (!prog
->insnsi
[linfo
[i
].insn_off
].code
) {
6849 "Invalid insn code at line_info[%u].insn_off\n",
6855 if (!btf_name_by_offset(btf
, linfo
[i
].line_off
) ||
6856 !btf_name_by_offset(btf
, linfo
[i
].file_name_off
)) {
6857 verbose(env
, "Invalid line_info[%u].line_off or .file_name_off\n", i
);
6862 if (s
!= env
->subprog_cnt
) {
6863 if (linfo
[i
].insn_off
== sub
[s
].start
) {
6864 sub
[s
].linfo_idx
= i
;
6866 } else if (sub
[s
].start
< linfo
[i
].insn_off
) {
6867 verbose(env
, "missing bpf_line_info for func#%u\n", s
);
6873 prev_offset
= linfo
[i
].insn_off
;
6877 if (s
!= env
->subprog_cnt
) {
6878 verbose(env
, "missing bpf_line_info for %u funcs starting from func#%u\n",
6879 env
->subprog_cnt
- s
, s
);
6884 prog
->aux
->linfo
= linfo
;
6885 prog
->aux
->nr_linfo
= nr_linfo
;
6894 static int check_btf_info(struct bpf_verifier_env
*env
,
6895 const union bpf_attr
*attr
,
6896 union bpf_attr __user
*uattr
)
6901 if (!attr
->func_info_cnt
&& !attr
->line_info_cnt
)
6904 btf
= btf_get_by_fd(attr
->prog_btf_fd
);
6906 return PTR_ERR(btf
);
6907 env
->prog
->aux
->btf
= btf
;
6909 err
= check_btf_func(env
, attr
, uattr
);
6913 err
= check_btf_line(env
, attr
, uattr
);
6920 /* check %cur's range satisfies %old's */
6921 static bool range_within(struct bpf_reg_state
*old
,
6922 struct bpf_reg_state
*cur
)
6924 return old
->umin_value
<= cur
->umin_value
&&
6925 old
->umax_value
>= cur
->umax_value
&&
6926 old
->smin_value
<= cur
->smin_value
&&
6927 old
->smax_value
>= cur
->smax_value
;
6930 /* Maximum number of register states that can exist at once */
6931 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6937 /* If in the old state two registers had the same id, then they need to have
6938 * the same id in the new state as well. But that id could be different from
6939 * the old state, so we need to track the mapping from old to new ids.
6940 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
6941 * regs with old id 5 must also have new id 9 for the new state to be safe. But
6942 * regs with a different old id could still have new id 9, we don't care about
6944 * So we look through our idmap to see if this old id has been seen before. If
6945 * so, we require the new id to match; otherwise, we add the id pair to the map.
6947 static bool check_ids(u32 old_id
, u32 cur_id
, struct idpair
*idmap
)
6951 for (i
= 0; i
< ID_MAP_SIZE
; i
++) {
6952 if (!idmap
[i
].old
) {
6953 /* Reached an empty slot; haven't seen this id before */
6954 idmap
[i
].old
= old_id
;
6955 idmap
[i
].cur
= cur_id
;
6958 if (idmap
[i
].old
== old_id
)
6959 return idmap
[i
].cur
== cur_id
;
6961 /* We ran out of idmap slots, which should be impossible */
6966 static void clean_func_state(struct bpf_verifier_env
*env
,
6967 struct bpf_func_state
*st
)
6969 enum bpf_reg_liveness live
;
6972 for (i
= 0; i
< BPF_REG_FP
; i
++) {
6973 live
= st
->regs
[i
].live
;
6974 /* liveness must not touch this register anymore */
6975 st
->regs
[i
].live
|= REG_LIVE_DONE
;
6976 if (!(live
& REG_LIVE_READ
))
6977 /* since the register is unused, clear its state
6978 * to make further comparison simpler
6980 __mark_reg_not_init(&st
->regs
[i
]);
6983 for (i
= 0; i
< st
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
6984 live
= st
->stack
[i
].spilled_ptr
.live
;
6985 /* liveness must not touch this stack slot anymore */
6986 st
->stack
[i
].spilled_ptr
.live
|= REG_LIVE_DONE
;
6987 if (!(live
& REG_LIVE_READ
)) {
6988 __mark_reg_not_init(&st
->stack
[i
].spilled_ptr
);
6989 for (j
= 0; j
< BPF_REG_SIZE
; j
++)
6990 st
->stack
[i
].slot_type
[j
] = STACK_INVALID
;
6995 static void clean_verifier_state(struct bpf_verifier_env
*env
,
6996 struct bpf_verifier_state
*st
)
7000 if (st
->frame
[0]->regs
[0].live
& REG_LIVE_DONE
)
7001 /* all regs in this state in all frames were already marked */
7004 for (i
= 0; i
<= st
->curframe
; i
++)
7005 clean_func_state(env
, st
->frame
[i
]);
7008 /* the parentage chains form a tree.
7009 * the verifier states are added to state lists at given insn and
7010 * pushed into state stack for future exploration.
7011 * when the verifier reaches bpf_exit insn some of the verifer states
7012 * stored in the state lists have their final liveness state already,
7013 * but a lot of states will get revised from liveness point of view when
7014 * the verifier explores other branches.
7017 * 2: if r1 == 100 goto pc+1
7020 * when the verifier reaches exit insn the register r0 in the state list of
7021 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7022 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7023 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7025 * Since the verifier pushes the branch states as it sees them while exploring
7026 * the program the condition of walking the branch instruction for the second
7027 * time means that all states below this branch were already explored and
7028 * their final liveness markes are already propagated.
7029 * Hence when the verifier completes the search of state list in is_state_visited()
7030 * we can call this clean_live_states() function to mark all liveness states
7031 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7033 * This function also clears the registers and stack for states that !READ
7034 * to simplify state merging.
7036 * Important note here that walking the same branch instruction in the callee
7037 * doesn't meant that the states are DONE. The verifier has to compare
7040 static void clean_live_states(struct bpf_verifier_env
*env
, int insn
,
7041 struct bpf_verifier_state
*cur
)
7043 struct bpf_verifier_state_list
*sl
;
7046 sl
= *explored_state(env
, insn
);
7048 if (sl
->state
.branches
)
7050 if (sl
->state
.insn_idx
!= insn
||
7051 sl
->state
.curframe
!= cur
->curframe
)
7053 for (i
= 0; i
<= cur
->curframe
; i
++)
7054 if (sl
->state
.frame
[i
]->callsite
!= cur
->frame
[i
]->callsite
)
7056 clean_verifier_state(env
, &sl
->state
);
7062 /* Returns true if (rold safe implies rcur safe) */
7063 static bool regsafe(struct bpf_reg_state
*rold
, struct bpf_reg_state
*rcur
,
7064 struct idpair
*idmap
)
7068 if (!(rold
->live
& REG_LIVE_READ
))
7069 /* explored state didn't use this */
7072 equal
= memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, parent
)) == 0;
7074 if (rold
->type
== PTR_TO_STACK
)
7075 /* two stack pointers are equal only if they're pointing to
7076 * the same stack frame, since fp-8 in foo != fp-8 in bar
7078 return equal
&& rold
->frameno
== rcur
->frameno
;
7083 if (rold
->type
== NOT_INIT
)
7084 /* explored state can't have used this */
7086 if (rcur
->type
== NOT_INIT
)
7088 switch (rold
->type
) {
7090 if (rcur
->type
== SCALAR_VALUE
) {
7091 if (!rold
->precise
&& !rcur
->precise
)
7093 /* new val must satisfy old val knowledge */
7094 return range_within(rold
, rcur
) &&
7095 tnum_in(rold
->var_off
, rcur
->var_off
);
7097 /* We're trying to use a pointer in place of a scalar.
7098 * Even if the scalar was unbounded, this could lead to
7099 * pointer leaks because scalars are allowed to leak
7100 * while pointers are not. We could make this safe in
7101 * special cases if root is calling us, but it's
7102 * probably not worth the hassle.
7106 case PTR_TO_MAP_VALUE
:
7107 /* If the new min/max/var_off satisfy the old ones and
7108 * everything else matches, we are OK.
7109 * 'id' is not compared, since it's only used for maps with
7110 * bpf_spin_lock inside map element and in such cases if
7111 * the rest of the prog is valid for one map element then
7112 * it's valid for all map elements regardless of the key
7113 * used in bpf_map_lookup()
7115 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)) == 0 &&
7116 range_within(rold
, rcur
) &&
7117 tnum_in(rold
->var_off
, rcur
->var_off
);
7118 case PTR_TO_MAP_VALUE_OR_NULL
:
7119 /* a PTR_TO_MAP_VALUE could be safe to use as a
7120 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7121 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7122 * checked, doing so could have affected others with the same
7123 * id, and we can't check for that because we lost the id when
7124 * we converted to a PTR_TO_MAP_VALUE.
7126 if (rcur
->type
!= PTR_TO_MAP_VALUE_OR_NULL
)
7128 if (memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)))
7130 /* Check our ids match any regs they're supposed to */
7131 return check_ids(rold
->id
, rcur
->id
, idmap
);
7132 case PTR_TO_PACKET_META
:
7134 if (rcur
->type
!= rold
->type
)
7136 /* We must have at least as much range as the old ptr
7137 * did, so that any accesses which were safe before are
7138 * still safe. This is true even if old range < old off,
7139 * since someone could have accessed through (ptr - k), or
7140 * even done ptr -= k in a register, to get a safe access.
7142 if (rold
->range
> rcur
->range
)
7144 /* If the offsets don't match, we can't trust our alignment;
7145 * nor can we be sure that we won't fall out of range.
7147 if (rold
->off
!= rcur
->off
)
7149 /* id relations must be preserved */
7150 if (rold
->id
&& !check_ids(rold
->id
, rcur
->id
, idmap
))
7152 /* new val must satisfy old val knowledge */
7153 return range_within(rold
, rcur
) &&
7154 tnum_in(rold
->var_off
, rcur
->var_off
);
7156 case CONST_PTR_TO_MAP
:
7157 case PTR_TO_PACKET_END
:
7158 case PTR_TO_FLOW_KEYS
:
7160 case PTR_TO_SOCKET_OR_NULL
:
7161 case PTR_TO_SOCK_COMMON
:
7162 case PTR_TO_SOCK_COMMON_OR_NULL
:
7163 case PTR_TO_TCP_SOCK
:
7164 case PTR_TO_TCP_SOCK_OR_NULL
:
7165 case PTR_TO_XDP_SOCK
:
7166 /* Only valid matches are exact, which memcmp() above
7167 * would have accepted
7170 /* Don't know what's going on, just say it's not safe */
7174 /* Shouldn't get here; if we do, say it's not safe */
7179 static bool stacksafe(struct bpf_func_state
*old
,
7180 struct bpf_func_state
*cur
,
7181 struct idpair
*idmap
)
7185 /* walk slots of the explored stack and ignore any additional
7186 * slots in the current stack, since explored(safe) state
7189 for (i
= 0; i
< old
->allocated_stack
; i
++) {
7190 spi
= i
/ BPF_REG_SIZE
;
7192 if (!(old
->stack
[spi
].spilled_ptr
.live
& REG_LIVE_READ
)) {
7193 i
+= BPF_REG_SIZE
- 1;
7194 /* explored state didn't use this */
7198 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_INVALID
)
7201 /* explored stack has more populated slots than current stack
7202 * and these slots were used
7204 if (i
>= cur
->allocated_stack
)
7207 /* if old state was safe with misc data in the stack
7208 * it will be safe with zero-initialized stack.
7209 * The opposite is not true
7211 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_MISC
&&
7212 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_ZERO
)
7214 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] !=
7215 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
])
7216 /* Ex: old explored (safe) state has STACK_SPILL in
7217 * this stack slot, but current has has STACK_MISC ->
7218 * this verifier states are not equivalent,
7219 * return false to continue verification of this path
7222 if (i
% BPF_REG_SIZE
)
7224 if (old
->stack
[spi
].slot_type
[0] != STACK_SPILL
)
7226 if (!regsafe(&old
->stack
[spi
].spilled_ptr
,
7227 &cur
->stack
[spi
].spilled_ptr
,
7229 /* when explored and current stack slot are both storing
7230 * spilled registers, check that stored pointers types
7231 * are the same as well.
7232 * Ex: explored safe path could have stored
7233 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7234 * but current path has stored:
7235 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7236 * such verifier states are not equivalent.
7237 * return false to continue verification of this path
7244 static bool refsafe(struct bpf_func_state
*old
, struct bpf_func_state
*cur
)
7246 if (old
->acquired_refs
!= cur
->acquired_refs
)
7248 return !memcmp(old
->refs
, cur
->refs
,
7249 sizeof(*old
->refs
) * old
->acquired_refs
);
7252 /* compare two verifier states
7254 * all states stored in state_list are known to be valid, since
7255 * verifier reached 'bpf_exit' instruction through them
7257 * this function is called when verifier exploring different branches of
7258 * execution popped from the state stack. If it sees an old state that has
7259 * more strict register state and more strict stack state then this execution
7260 * branch doesn't need to be explored further, since verifier already
7261 * concluded that more strict state leads to valid finish.
7263 * Therefore two states are equivalent if register state is more conservative
7264 * and explored stack state is more conservative than the current one.
7267 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7268 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7270 * In other words if current stack state (one being explored) has more
7271 * valid slots than old one that already passed validation, it means
7272 * the verifier can stop exploring and conclude that current state is valid too
7274 * Similarly with registers. If explored state has register type as invalid
7275 * whereas register type in current state is meaningful, it means that
7276 * the current state will reach 'bpf_exit' instruction safely
7278 static bool func_states_equal(struct bpf_func_state
*old
,
7279 struct bpf_func_state
*cur
)
7281 struct idpair
*idmap
;
7285 idmap
= kcalloc(ID_MAP_SIZE
, sizeof(struct idpair
), GFP_KERNEL
);
7286 /* If we failed to allocate the idmap, just say it's not safe */
7290 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
7291 if (!regsafe(&old
->regs
[i
], &cur
->regs
[i
], idmap
))
7295 if (!stacksafe(old
, cur
, idmap
))
7298 if (!refsafe(old
, cur
))
7306 static bool states_equal(struct bpf_verifier_env
*env
,
7307 struct bpf_verifier_state
*old
,
7308 struct bpf_verifier_state
*cur
)
7312 if (old
->curframe
!= cur
->curframe
)
7315 /* Verification state from speculative execution simulation
7316 * must never prune a non-speculative execution one.
7318 if (old
->speculative
&& !cur
->speculative
)
7321 if (old
->active_spin_lock
!= cur
->active_spin_lock
)
7324 /* for states to be equal callsites have to be the same
7325 * and all frame states need to be equivalent
7327 for (i
= 0; i
<= old
->curframe
; i
++) {
7328 if (old
->frame
[i
]->callsite
!= cur
->frame
[i
]->callsite
)
7330 if (!func_states_equal(old
->frame
[i
], cur
->frame
[i
]))
7336 /* Return 0 if no propagation happened. Return negative error code if error
7337 * happened. Otherwise, return the propagated bit.
7339 static int propagate_liveness_reg(struct bpf_verifier_env
*env
,
7340 struct bpf_reg_state
*reg
,
7341 struct bpf_reg_state
*parent_reg
)
7343 u8 parent_flag
= parent_reg
->live
& REG_LIVE_READ
;
7344 u8 flag
= reg
->live
& REG_LIVE_READ
;
7347 /* When comes here, read flags of PARENT_REG or REG could be any of
7348 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7349 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7351 if (parent_flag
== REG_LIVE_READ64
||
7352 /* Or if there is no read flag from REG. */
7354 /* Or if the read flag from REG is the same as PARENT_REG. */
7355 parent_flag
== flag
)
7358 err
= mark_reg_read(env
, reg
, parent_reg
, flag
);
7365 /* A write screens off any subsequent reads; but write marks come from the
7366 * straight-line code between a state and its parent. When we arrive at an
7367 * equivalent state (jump target or such) we didn't arrive by the straight-line
7368 * code, so read marks in the state must propagate to the parent regardless
7369 * of the state's write marks. That's what 'parent == state->parent' comparison
7370 * in mark_reg_read() is for.
7372 static int propagate_liveness(struct bpf_verifier_env
*env
,
7373 const struct bpf_verifier_state
*vstate
,
7374 struct bpf_verifier_state
*vparent
)
7376 struct bpf_reg_state
*state_reg
, *parent_reg
;
7377 struct bpf_func_state
*state
, *parent
;
7378 int i
, frame
, err
= 0;
7380 if (vparent
->curframe
!= vstate
->curframe
) {
7381 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7382 vparent
->curframe
, vstate
->curframe
);
7385 /* Propagate read liveness of registers... */
7386 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
7387 for (frame
= 0; frame
<= vstate
->curframe
; frame
++) {
7388 parent
= vparent
->frame
[frame
];
7389 state
= vstate
->frame
[frame
];
7390 parent_reg
= parent
->regs
;
7391 state_reg
= state
->regs
;
7392 /* We don't need to worry about FP liveness, it's read-only */
7393 for (i
= frame
< vstate
->curframe
? BPF_REG_6
: 0; i
< BPF_REG_FP
; i
++) {
7394 err
= propagate_liveness_reg(env
, &state_reg
[i
],
7398 if (err
== REG_LIVE_READ64
)
7399 mark_insn_zext(env
, &parent_reg
[i
]);
7402 /* Propagate stack slots. */
7403 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
&&
7404 i
< parent
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
7405 parent_reg
= &parent
->stack
[i
].spilled_ptr
;
7406 state_reg
= &state
->stack
[i
].spilled_ptr
;
7407 err
= propagate_liveness_reg(env
, state_reg
,
7416 /* find precise scalars in the previous equivalent state and
7417 * propagate them into the current state
7419 static int propagate_precision(struct bpf_verifier_env
*env
,
7420 const struct bpf_verifier_state
*old
)
7422 struct bpf_reg_state
*state_reg
;
7423 struct bpf_func_state
*state
;
7426 state
= old
->frame
[old
->curframe
];
7427 state_reg
= state
->regs
;
7428 for (i
= 0; i
< BPF_REG_FP
; i
++, state_reg
++) {
7429 if (state_reg
->type
!= SCALAR_VALUE
||
7430 !state_reg
->precise
)
7432 if (env
->log
.level
& BPF_LOG_LEVEL2
)
7433 verbose(env
, "propagating r%d\n", i
);
7434 err
= mark_chain_precision(env
, i
);
7439 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
7440 if (state
->stack
[i
].slot_type
[0] != STACK_SPILL
)
7442 state_reg
= &state
->stack
[i
].spilled_ptr
;
7443 if (state_reg
->type
!= SCALAR_VALUE
||
7444 !state_reg
->precise
)
7446 if (env
->log
.level
& BPF_LOG_LEVEL2
)
7447 verbose(env
, "propagating fp%d\n",
7448 (-i
- 1) * BPF_REG_SIZE
);
7449 err
= mark_chain_precision_stack(env
, i
);
7456 static bool states_maybe_looping(struct bpf_verifier_state
*old
,
7457 struct bpf_verifier_state
*cur
)
7459 struct bpf_func_state
*fold
, *fcur
;
7460 int i
, fr
= cur
->curframe
;
7462 if (old
->curframe
!= fr
)
7465 fold
= old
->frame
[fr
];
7466 fcur
= cur
->frame
[fr
];
7467 for (i
= 0; i
< MAX_BPF_REG
; i
++)
7468 if (memcmp(&fold
->regs
[i
], &fcur
->regs
[i
],
7469 offsetof(struct bpf_reg_state
, parent
)))
7475 static int is_state_visited(struct bpf_verifier_env
*env
, int insn_idx
)
7477 struct bpf_verifier_state_list
*new_sl
;
7478 struct bpf_verifier_state_list
*sl
, **pprev
;
7479 struct bpf_verifier_state
*cur
= env
->cur_state
, *new;
7480 int i
, j
, err
, states_cnt
= 0;
7481 bool add_new_state
= env
->test_state_freq
? true : false;
7483 cur
->last_insn_idx
= env
->prev_insn_idx
;
7484 if (!env
->insn_aux_data
[insn_idx
].prune_point
)
7485 /* this 'insn_idx' instruction wasn't marked, so we will not
7486 * be doing state search here
7490 /* bpf progs typically have pruning point every 4 instructions
7491 * http://vger.kernel.org/bpfconf2019.html#session-1
7492 * Do not add new state for future pruning if the verifier hasn't seen
7493 * at least 2 jumps and at least 8 instructions.
7494 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7495 * In tests that amounts to up to 50% reduction into total verifier
7496 * memory consumption and 20% verifier time speedup.
7498 if (env
->jmps_processed
- env
->prev_jmps_processed
>= 2 &&
7499 env
->insn_processed
- env
->prev_insn_processed
>= 8)
7500 add_new_state
= true;
7502 pprev
= explored_state(env
, insn_idx
);
7505 clean_live_states(env
, insn_idx
, cur
);
7509 if (sl
->state
.insn_idx
!= insn_idx
)
7511 if (sl
->state
.branches
) {
7512 if (states_maybe_looping(&sl
->state
, cur
) &&
7513 states_equal(env
, &sl
->state
, cur
)) {
7514 verbose_linfo(env
, insn_idx
, "; ");
7515 verbose(env
, "infinite loop detected at insn %d\n", insn_idx
);
7518 /* if the verifier is processing a loop, avoid adding new state
7519 * too often, since different loop iterations have distinct
7520 * states and may not help future pruning.
7521 * This threshold shouldn't be too low to make sure that
7522 * a loop with large bound will be rejected quickly.
7523 * The most abusive loop will be:
7525 * if r1 < 1000000 goto pc-2
7526 * 1M insn_procssed limit / 100 == 10k peak states.
7527 * This threshold shouldn't be too high either, since states
7528 * at the end of the loop are likely to be useful in pruning.
7530 if (env
->jmps_processed
- env
->prev_jmps_processed
< 20 &&
7531 env
->insn_processed
- env
->prev_insn_processed
< 100)
7532 add_new_state
= false;
7535 if (states_equal(env
, &sl
->state
, cur
)) {
7537 /* reached equivalent register/stack state,
7539 * Registers read by the continuation are read by us.
7540 * If we have any write marks in env->cur_state, they
7541 * will prevent corresponding reads in the continuation
7542 * from reaching our parent (an explored_state). Our
7543 * own state will get the read marks recorded, but
7544 * they'll be immediately forgotten as we're pruning
7545 * this state and will pop a new one.
7547 err
= propagate_liveness(env
, &sl
->state
, cur
);
7549 /* if previous state reached the exit with precision and
7550 * current state is equivalent to it (except precsion marks)
7551 * the precision needs to be propagated back in
7552 * the current state.
7554 err
= err
? : push_jmp_history(env
, cur
);
7555 err
= err
? : propagate_precision(env
, &sl
->state
);
7561 /* when new state is not going to be added do not increase miss count.
7562 * Otherwise several loop iterations will remove the state
7563 * recorded earlier. The goal of these heuristics is to have
7564 * states from some iterations of the loop (some in the beginning
7565 * and some at the end) to help pruning.
7569 /* heuristic to determine whether this state is beneficial
7570 * to keep checking from state equivalence point of view.
7571 * Higher numbers increase max_states_per_insn and verification time,
7572 * but do not meaningfully decrease insn_processed.
7574 if (sl
->miss_cnt
> sl
->hit_cnt
* 3 + 3) {
7575 /* the state is unlikely to be useful. Remove it to
7576 * speed up verification
7579 if (sl
->state
.frame
[0]->regs
[0].live
& REG_LIVE_DONE
) {
7580 u32 br
= sl
->state
.branches
;
7583 "BUG live_done but branches_to_explore %d\n",
7585 free_verifier_state(&sl
->state
, false);
7589 /* cannot free this state, since parentage chain may
7590 * walk it later. Add it for free_list instead to
7591 * be freed at the end of verification
7593 sl
->next
= env
->free_list
;
7594 env
->free_list
= sl
;
7604 if (env
->max_states_per_insn
< states_cnt
)
7605 env
->max_states_per_insn
= states_cnt
;
7607 if (!env
->allow_ptr_leaks
&& states_cnt
> BPF_COMPLEXITY_LIMIT_STATES
)
7608 return push_jmp_history(env
, cur
);
7611 return push_jmp_history(env
, cur
);
7613 /* There were no equivalent states, remember the current one.
7614 * Technically the current state is not proven to be safe yet,
7615 * but it will either reach outer most bpf_exit (which means it's safe)
7616 * or it will be rejected. When there are no loops the verifier won't be
7617 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
7618 * again on the way to bpf_exit.
7619 * When looping the sl->state.branches will be > 0 and this state
7620 * will not be considered for equivalence until branches == 0.
7622 new_sl
= kzalloc(sizeof(struct bpf_verifier_state_list
), GFP_KERNEL
);
7625 env
->total_states
++;
7627 env
->prev_jmps_processed
= env
->jmps_processed
;
7628 env
->prev_insn_processed
= env
->insn_processed
;
7630 /* add new state to the head of linked list */
7631 new = &new_sl
->state
;
7632 err
= copy_verifier_state(new, cur
);
7634 free_verifier_state(new, false);
7638 new->insn_idx
= insn_idx
;
7639 WARN_ONCE(new->branches
!= 1,
7640 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches
, insn_idx
);
7643 cur
->first_insn_idx
= insn_idx
;
7644 clear_jmp_history(cur
);
7645 new_sl
->next
= *explored_state(env
, insn_idx
);
7646 *explored_state(env
, insn_idx
) = new_sl
;
7647 /* connect new state to parentage chain. Current frame needs all
7648 * registers connected. Only r6 - r9 of the callers are alive (pushed
7649 * to the stack implicitly by JITs) so in callers' frames connect just
7650 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7651 * the state of the call instruction (with WRITTEN set), and r0 comes
7652 * from callee with its full parentage chain, anyway.
7654 /* clear write marks in current state: the writes we did are not writes
7655 * our child did, so they don't screen off its reads from us.
7656 * (There are no read marks in current state, because reads always mark
7657 * their parent and current state never has children yet. Only
7658 * explored_states can get read marks.)
7660 for (j
= 0; j
<= cur
->curframe
; j
++) {
7661 for (i
= j
< cur
->curframe
? BPF_REG_6
: 0; i
< BPF_REG_FP
; i
++)
7662 cur
->frame
[j
]->regs
[i
].parent
= &new->frame
[j
]->regs
[i
];
7663 for (i
= 0; i
< BPF_REG_FP
; i
++)
7664 cur
->frame
[j
]->regs
[i
].live
= REG_LIVE_NONE
;
7667 /* all stack frames are accessible from callee, clear them all */
7668 for (j
= 0; j
<= cur
->curframe
; j
++) {
7669 struct bpf_func_state
*frame
= cur
->frame
[j
];
7670 struct bpf_func_state
*newframe
= new->frame
[j
];
7672 for (i
= 0; i
< frame
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
7673 frame
->stack
[i
].spilled_ptr
.live
= REG_LIVE_NONE
;
7674 frame
->stack
[i
].spilled_ptr
.parent
=
7675 &newframe
->stack
[i
].spilled_ptr
;
7681 /* Return true if it's OK to have the same insn return a different type. */
7682 static bool reg_type_mismatch_ok(enum bpf_reg_type type
)
7687 case PTR_TO_SOCKET_OR_NULL
:
7688 case PTR_TO_SOCK_COMMON
:
7689 case PTR_TO_SOCK_COMMON_OR_NULL
:
7690 case PTR_TO_TCP_SOCK
:
7691 case PTR_TO_TCP_SOCK_OR_NULL
:
7692 case PTR_TO_XDP_SOCK
:
7700 /* If an instruction was previously used with particular pointer types, then we
7701 * need to be careful to avoid cases such as the below, where it may be ok
7702 * for one branch accessing the pointer, but not ok for the other branch:
7707 * R1 = some_other_valid_ptr;
7710 * R2 = *(u32 *)(R1 + 0);
7712 static bool reg_type_mismatch(enum bpf_reg_type src
, enum bpf_reg_type prev
)
7714 return src
!= prev
&& (!reg_type_mismatch_ok(src
) ||
7715 !reg_type_mismatch_ok(prev
));
7718 static int do_check(struct bpf_verifier_env
*env
)
7720 struct bpf_verifier_state
*state
;
7721 struct bpf_insn
*insns
= env
->prog
->insnsi
;
7722 struct bpf_reg_state
*regs
;
7723 int insn_cnt
= env
->prog
->len
;
7724 bool do_print_state
= false;
7725 int prev_insn_idx
= -1;
7727 env
->prev_linfo
= NULL
;
7729 state
= kzalloc(sizeof(struct bpf_verifier_state
), GFP_KERNEL
);
7732 state
->curframe
= 0;
7733 state
->speculative
= false;
7734 state
->branches
= 1;
7735 state
->frame
[0] = kzalloc(sizeof(struct bpf_func_state
), GFP_KERNEL
);
7736 if (!state
->frame
[0]) {
7740 env
->cur_state
= state
;
7741 init_func_state(env
, state
->frame
[0],
7742 BPF_MAIN_FUNC
/* callsite */,
7744 0 /* subprogno, zero == main subprog */);
7746 if (btf_check_func_arg_match(env
, 0))
7750 struct bpf_insn
*insn
;
7754 env
->prev_insn_idx
= prev_insn_idx
;
7755 if (env
->insn_idx
>= insn_cnt
) {
7756 verbose(env
, "invalid insn idx %d insn_cnt %d\n",
7757 env
->insn_idx
, insn_cnt
);
7761 insn
= &insns
[env
->insn_idx
];
7762 class = BPF_CLASS(insn
->code
);
7764 if (++env
->insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
7766 "BPF program is too large. Processed %d insn\n",
7767 env
->insn_processed
);
7771 err
= is_state_visited(env
, env
->insn_idx
);
7775 /* found equivalent state, can prune the search */
7776 if (env
->log
.level
& BPF_LOG_LEVEL
) {
7778 verbose(env
, "\nfrom %d to %d%s: safe\n",
7779 env
->prev_insn_idx
, env
->insn_idx
,
7780 env
->cur_state
->speculative
?
7781 " (speculative execution)" : "");
7783 verbose(env
, "%d: safe\n", env
->insn_idx
);
7785 goto process_bpf_exit
;
7788 if (signal_pending(current
))
7794 if (env
->log
.level
& BPF_LOG_LEVEL2
||
7795 (env
->log
.level
& BPF_LOG_LEVEL
&& do_print_state
)) {
7796 if (env
->log
.level
& BPF_LOG_LEVEL2
)
7797 verbose(env
, "%d:", env
->insn_idx
);
7799 verbose(env
, "\nfrom %d to %d%s:",
7800 env
->prev_insn_idx
, env
->insn_idx
,
7801 env
->cur_state
->speculative
?
7802 " (speculative execution)" : "");
7803 print_verifier_state(env
, state
->frame
[state
->curframe
]);
7804 do_print_state
= false;
7807 if (env
->log
.level
& BPF_LOG_LEVEL
) {
7808 const struct bpf_insn_cbs cbs
= {
7809 .cb_print
= verbose
,
7810 .private_data
= env
,
7813 verbose_linfo(env
, env
->insn_idx
, "; ");
7814 verbose(env
, "%d: ", env
->insn_idx
);
7815 print_bpf_insn(&cbs
, insn
, env
->allow_ptr_leaks
);
7818 if (bpf_prog_is_dev_bound(env
->prog
->aux
)) {
7819 err
= bpf_prog_offload_verify_insn(env
, env
->insn_idx
,
7820 env
->prev_insn_idx
);
7825 regs
= cur_regs(env
);
7826 env
->insn_aux_data
[env
->insn_idx
].seen
= true;
7827 prev_insn_idx
= env
->insn_idx
;
7829 if (class == BPF_ALU
|| class == BPF_ALU64
) {
7830 err
= check_alu_op(env
, insn
);
7834 } else if (class == BPF_LDX
) {
7835 enum bpf_reg_type
*prev_src_type
, src_reg_type
;
7837 /* check for reserved fields is already done */
7839 /* check src operand */
7840 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
7844 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
7848 src_reg_type
= regs
[insn
->src_reg
].type
;
7850 /* check that memory (src_reg + off) is readable,
7851 * the state of dst_reg will be updated by this func
7853 err
= check_mem_access(env
, env
->insn_idx
, insn
->src_reg
,
7854 insn
->off
, BPF_SIZE(insn
->code
),
7855 BPF_READ
, insn
->dst_reg
, false);
7859 prev_src_type
= &env
->insn_aux_data
[env
->insn_idx
].ptr_type
;
7861 if (*prev_src_type
== NOT_INIT
) {
7863 * dst_reg = *(u32 *)(src_reg + off)
7864 * save type to validate intersecting paths
7866 *prev_src_type
= src_reg_type
;
7868 } else if (reg_type_mismatch(src_reg_type
, *prev_src_type
)) {
7869 /* ABuser program is trying to use the same insn
7870 * dst_reg = *(u32*) (src_reg + off)
7871 * with different pointer types:
7872 * src_reg == ctx in one branch and
7873 * src_reg == stack|map in some other branch.
7876 verbose(env
, "same insn cannot be used with different pointers\n");
7880 } else if (class == BPF_STX
) {
7881 enum bpf_reg_type
*prev_dst_type
, dst_reg_type
;
7883 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
7884 err
= check_xadd(env
, env
->insn_idx
, insn
);
7891 /* check src1 operand */
7892 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
7895 /* check src2 operand */
7896 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
7900 dst_reg_type
= regs
[insn
->dst_reg
].type
;
7902 /* check that memory (dst_reg + off) is writeable */
7903 err
= check_mem_access(env
, env
->insn_idx
, insn
->dst_reg
,
7904 insn
->off
, BPF_SIZE(insn
->code
),
7905 BPF_WRITE
, insn
->src_reg
, false);
7909 prev_dst_type
= &env
->insn_aux_data
[env
->insn_idx
].ptr_type
;
7911 if (*prev_dst_type
== NOT_INIT
) {
7912 *prev_dst_type
= dst_reg_type
;
7913 } else if (reg_type_mismatch(dst_reg_type
, *prev_dst_type
)) {
7914 verbose(env
, "same insn cannot be used with different pointers\n");
7918 } else if (class == BPF_ST
) {
7919 if (BPF_MODE(insn
->code
) != BPF_MEM
||
7920 insn
->src_reg
!= BPF_REG_0
) {
7921 verbose(env
, "BPF_ST uses reserved fields\n");
7924 /* check src operand */
7925 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
7929 if (is_ctx_reg(env
, insn
->dst_reg
)) {
7930 verbose(env
, "BPF_ST stores into R%d %s is not allowed\n",
7932 reg_type_str
[reg_state(env
, insn
->dst_reg
)->type
]);
7936 /* check that memory (dst_reg + off) is writeable */
7937 err
= check_mem_access(env
, env
->insn_idx
, insn
->dst_reg
,
7938 insn
->off
, BPF_SIZE(insn
->code
),
7939 BPF_WRITE
, -1, false);
7943 } else if (class == BPF_JMP
|| class == BPF_JMP32
) {
7944 u8 opcode
= BPF_OP(insn
->code
);
7946 env
->jmps_processed
++;
7947 if (opcode
== BPF_CALL
) {
7948 if (BPF_SRC(insn
->code
) != BPF_K
||
7950 (insn
->src_reg
!= BPF_REG_0
&&
7951 insn
->src_reg
!= BPF_PSEUDO_CALL
) ||
7952 insn
->dst_reg
!= BPF_REG_0
||
7953 class == BPF_JMP32
) {
7954 verbose(env
, "BPF_CALL uses reserved fields\n");
7958 if (env
->cur_state
->active_spin_lock
&&
7959 (insn
->src_reg
== BPF_PSEUDO_CALL
||
7960 insn
->imm
!= BPF_FUNC_spin_unlock
)) {
7961 verbose(env
, "function calls are not allowed while holding a lock\n");
7964 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
7965 err
= check_func_call(env
, insn
, &env
->insn_idx
);
7967 err
= check_helper_call(env
, insn
->imm
, env
->insn_idx
);
7971 } else if (opcode
== BPF_JA
) {
7972 if (BPF_SRC(insn
->code
) != BPF_K
||
7974 insn
->src_reg
!= BPF_REG_0
||
7975 insn
->dst_reg
!= BPF_REG_0
||
7976 class == BPF_JMP32
) {
7977 verbose(env
, "BPF_JA uses reserved fields\n");
7981 env
->insn_idx
+= insn
->off
+ 1;
7984 } else if (opcode
== BPF_EXIT
) {
7985 if (BPF_SRC(insn
->code
) != BPF_K
||
7987 insn
->src_reg
!= BPF_REG_0
||
7988 insn
->dst_reg
!= BPF_REG_0
||
7989 class == BPF_JMP32
) {
7990 verbose(env
, "BPF_EXIT uses reserved fields\n");
7994 if (env
->cur_state
->active_spin_lock
) {
7995 verbose(env
, "bpf_spin_unlock is missing\n");
7999 if (state
->curframe
) {
8000 /* exit from nested function */
8001 err
= prepare_func_exit(env
, &env
->insn_idx
);
8004 do_print_state
= true;
8008 err
= check_reference_leak(env
);
8012 /* eBPF calling convetion is such that R0 is used
8013 * to return the value from eBPF program.
8014 * Make sure that it's readable at this time
8015 * of bpf_exit, which means that program wrote
8016 * something into it earlier
8018 err
= check_reg_arg(env
, BPF_REG_0
, SRC_OP
);
8022 if (is_pointer_value(env
, BPF_REG_0
)) {
8023 verbose(env
, "R0 leaks addr as return value\n");
8027 err
= check_return_code(env
);
8031 update_branch_counts(env
, env
->cur_state
);
8032 err
= pop_stack(env
, &prev_insn_idx
,
8039 do_print_state
= true;
8043 err
= check_cond_jmp_op(env
, insn
, &env
->insn_idx
);
8047 } else if (class == BPF_LD
) {
8048 u8 mode
= BPF_MODE(insn
->code
);
8050 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
8051 err
= check_ld_abs(env
, insn
);
8055 } else if (mode
== BPF_IMM
) {
8056 err
= check_ld_imm(env
, insn
);
8061 env
->insn_aux_data
[env
->insn_idx
].seen
= true;
8063 verbose(env
, "invalid BPF_LD mode\n");
8067 verbose(env
, "unknown insn class %d\n", class);
8074 env
->prog
->aux
->stack_depth
= env
->subprog_info
[0].stack_depth
;
8078 static int check_map_prealloc(struct bpf_map
*map
)
8080 return (map
->map_type
!= BPF_MAP_TYPE_HASH
&&
8081 map
->map_type
!= BPF_MAP_TYPE_PERCPU_HASH
&&
8082 map
->map_type
!= BPF_MAP_TYPE_HASH_OF_MAPS
) ||
8083 !(map
->map_flags
& BPF_F_NO_PREALLOC
);
8086 static bool is_tracing_prog_type(enum bpf_prog_type type
)
8089 case BPF_PROG_TYPE_KPROBE
:
8090 case BPF_PROG_TYPE_TRACEPOINT
:
8091 case BPF_PROG_TYPE_PERF_EVENT
:
8092 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
8099 static int check_map_prog_compatibility(struct bpf_verifier_env
*env
,
8100 struct bpf_map
*map
,
8101 struct bpf_prog
*prog
)
8104 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
8105 * preallocated hash maps, since doing memory allocation
8106 * in overflow_handler can crash depending on where nmi got
8109 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
) {
8110 if (!check_map_prealloc(map
)) {
8111 verbose(env
, "perf_event programs can only use preallocated hash map\n");
8114 if (map
->inner_map_meta
&&
8115 !check_map_prealloc(map
->inner_map_meta
)) {
8116 verbose(env
, "perf_event programs can only use preallocated inner hash map\n");
8121 if ((is_tracing_prog_type(prog
->type
) ||
8122 prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) &&
8123 map_value_has_spin_lock(map
)) {
8124 verbose(env
, "tracing progs cannot use bpf_spin_lock yet\n");
8128 if ((bpf_prog_is_dev_bound(prog
->aux
) || bpf_map_is_dev_bound(map
)) &&
8129 !bpf_offload_prog_map_match(prog
, map
)) {
8130 verbose(env
, "offload device mismatch between prog and map\n");
8137 static bool bpf_map_is_cgroup_storage(struct bpf_map
*map
)
8139 return (map
->map_type
== BPF_MAP_TYPE_CGROUP_STORAGE
||
8140 map
->map_type
== BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
);
8143 /* look for pseudo eBPF instructions that access map FDs and
8144 * replace them with actual map pointers
8146 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env
*env
)
8148 struct bpf_insn
*insn
= env
->prog
->insnsi
;
8149 int insn_cnt
= env
->prog
->len
;
8152 err
= bpf_prog_calc_tag(env
->prog
);
8156 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
8157 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
8158 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
8159 verbose(env
, "BPF_LDX uses reserved fields\n");
8163 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
8164 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
8165 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
8166 verbose(env
, "BPF_STX uses reserved fields\n");
8170 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
8171 struct bpf_insn_aux_data
*aux
;
8172 struct bpf_map
*map
;
8176 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
8177 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
8179 verbose(env
, "invalid bpf_ld_imm64 insn\n");
8183 if (insn
[0].src_reg
== 0)
8184 /* valid generic load 64-bit imm */
8187 /* In final convert_pseudo_ld_imm64() step, this is
8188 * converted into regular 64-bit imm load insn.
8190 if ((insn
[0].src_reg
!= BPF_PSEUDO_MAP_FD
&&
8191 insn
[0].src_reg
!= BPF_PSEUDO_MAP_VALUE
) ||
8192 (insn
[0].src_reg
== BPF_PSEUDO_MAP_FD
&&
8193 insn
[1].imm
!= 0)) {
8195 "unrecognized bpf_ld_imm64 insn\n");
8199 f
= fdget(insn
[0].imm
);
8200 map
= __bpf_map_get(f
);
8202 verbose(env
, "fd %d is not pointing to valid bpf_map\n",
8204 return PTR_ERR(map
);
8207 err
= check_map_prog_compatibility(env
, map
, env
->prog
);
8213 aux
= &env
->insn_aux_data
[i
];
8214 if (insn
->src_reg
== BPF_PSEUDO_MAP_FD
) {
8215 addr
= (unsigned long)map
;
8217 u32 off
= insn
[1].imm
;
8219 if (off
>= BPF_MAX_VAR_OFF
) {
8220 verbose(env
, "direct value offset of %u is not allowed\n", off
);
8225 if (!map
->ops
->map_direct_value_addr
) {
8226 verbose(env
, "no direct value access support for this map type\n");
8231 err
= map
->ops
->map_direct_value_addr(map
, &addr
, off
);
8233 verbose(env
, "invalid access to map value pointer, value_size=%u off=%u\n",
8234 map
->value_size
, off
);
8243 insn
[0].imm
= (u32
)addr
;
8244 insn
[1].imm
= addr
>> 32;
8246 /* check whether we recorded this map already */
8247 for (j
= 0; j
< env
->used_map_cnt
; j
++) {
8248 if (env
->used_maps
[j
] == map
) {
8255 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
8260 /* hold the map. If the program is rejected by verifier,
8261 * the map will be released by release_maps() or it
8262 * will be used by the valid program until it's unloaded
8263 * and all maps are released in free_used_maps()
8267 aux
->map_index
= env
->used_map_cnt
;
8268 env
->used_maps
[env
->used_map_cnt
++] = map
;
8270 if (bpf_map_is_cgroup_storage(map
) &&
8271 bpf_cgroup_storage_assign(env
->prog
, map
)) {
8272 verbose(env
, "only one cgroup storage of each type is allowed\n");
8284 /* Basic sanity check before we invest more work here. */
8285 if (!bpf_opcode_in_insntable(insn
->code
)) {
8286 verbose(env
, "unknown opcode %02x\n", insn
->code
);
8291 /* now all pseudo BPF_LD_IMM64 instructions load valid
8292 * 'struct bpf_map *' into a register instead of user map_fd.
8293 * These pointers will be used later by verifier to validate map access.
8298 /* drop refcnt of maps used by the rejected program */
8299 static void release_maps(struct bpf_verifier_env
*env
)
8301 enum bpf_cgroup_storage_type stype
;
8304 for_each_cgroup_storage_type(stype
) {
8305 if (!env
->prog
->aux
->cgroup_storage
[stype
])
8307 bpf_cgroup_storage_release(env
->prog
,
8308 env
->prog
->aux
->cgroup_storage
[stype
]);
8311 for (i
= 0; i
< env
->used_map_cnt
; i
++)
8312 bpf_map_put(env
->used_maps
[i
]);
8315 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
8316 static void convert_pseudo_ld_imm64(struct bpf_verifier_env
*env
)
8318 struct bpf_insn
*insn
= env
->prog
->insnsi
;
8319 int insn_cnt
= env
->prog
->len
;
8322 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
8323 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
8327 /* single env->prog->insni[off] instruction was replaced with the range
8328 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8329 * [0, off) and [off, end) to new locations, so the patched range stays zero
8331 static int adjust_insn_aux_data(struct bpf_verifier_env
*env
,
8332 struct bpf_prog
*new_prog
, u32 off
, u32 cnt
)
8334 struct bpf_insn_aux_data
*new_data
, *old_data
= env
->insn_aux_data
;
8335 struct bpf_insn
*insn
= new_prog
->insnsi
;
8339 /* aux info at OFF always needs adjustment, no matter fast path
8340 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8341 * original insn at old prog.
8343 old_data
[off
].zext_dst
= insn_has_def32(env
, insn
+ off
+ cnt
- 1);
8347 prog_len
= new_prog
->len
;
8348 new_data
= vzalloc(array_size(prog_len
,
8349 sizeof(struct bpf_insn_aux_data
)));
8352 memcpy(new_data
, old_data
, sizeof(struct bpf_insn_aux_data
) * off
);
8353 memcpy(new_data
+ off
+ cnt
- 1, old_data
+ off
,
8354 sizeof(struct bpf_insn_aux_data
) * (prog_len
- off
- cnt
+ 1));
8355 for (i
= off
; i
< off
+ cnt
- 1; i
++) {
8356 new_data
[i
].seen
= true;
8357 new_data
[i
].zext_dst
= insn_has_def32(env
, insn
+ i
);
8359 env
->insn_aux_data
= new_data
;
8364 static void adjust_subprog_starts(struct bpf_verifier_env
*env
, u32 off
, u32 len
)
8370 /* NOTE: fake 'exit' subprog should be updated as well. */
8371 for (i
= 0; i
<= env
->subprog_cnt
; i
++) {
8372 if (env
->subprog_info
[i
].start
<= off
)
8374 env
->subprog_info
[i
].start
+= len
- 1;
8378 static struct bpf_prog
*bpf_patch_insn_data(struct bpf_verifier_env
*env
, u32 off
,
8379 const struct bpf_insn
*patch
, u32 len
)
8381 struct bpf_prog
*new_prog
;
8383 new_prog
= bpf_patch_insn_single(env
->prog
, off
, patch
, len
);
8384 if (IS_ERR(new_prog
)) {
8385 if (PTR_ERR(new_prog
) == -ERANGE
)
8387 "insn %d cannot be patched due to 16-bit range\n",
8388 env
->insn_aux_data
[off
].orig_idx
);
8391 if (adjust_insn_aux_data(env
, new_prog
, off
, len
))
8393 adjust_subprog_starts(env
, off
, len
);
8397 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env
*env
,
8402 /* find first prog starting at or after off (first to remove) */
8403 for (i
= 0; i
< env
->subprog_cnt
; i
++)
8404 if (env
->subprog_info
[i
].start
>= off
)
8406 /* find first prog starting at or after off + cnt (first to stay) */
8407 for (j
= i
; j
< env
->subprog_cnt
; j
++)
8408 if (env
->subprog_info
[j
].start
>= off
+ cnt
)
8410 /* if j doesn't start exactly at off + cnt, we are just removing
8411 * the front of previous prog
8413 if (env
->subprog_info
[j
].start
!= off
+ cnt
)
8417 struct bpf_prog_aux
*aux
= env
->prog
->aux
;
8420 /* move fake 'exit' subprog as well */
8421 move
= env
->subprog_cnt
+ 1 - j
;
8423 memmove(env
->subprog_info
+ i
,
8424 env
->subprog_info
+ j
,
8425 sizeof(*env
->subprog_info
) * move
);
8426 env
->subprog_cnt
-= j
- i
;
8428 /* remove func_info */
8429 if (aux
->func_info
) {
8430 move
= aux
->func_info_cnt
- j
;
8432 memmove(aux
->func_info
+ i
,
8434 sizeof(*aux
->func_info
) * move
);
8435 aux
->func_info_cnt
-= j
- i
;
8436 /* func_info->insn_off is set after all code rewrites,
8437 * in adjust_btf_func() - no need to adjust
8441 /* convert i from "first prog to remove" to "first to adjust" */
8442 if (env
->subprog_info
[i
].start
== off
)
8446 /* update fake 'exit' subprog as well */
8447 for (; i
<= env
->subprog_cnt
; i
++)
8448 env
->subprog_info
[i
].start
-= cnt
;
8453 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env
*env
, u32 off
,
8456 struct bpf_prog
*prog
= env
->prog
;
8457 u32 i
, l_off
, l_cnt
, nr_linfo
;
8458 struct bpf_line_info
*linfo
;
8460 nr_linfo
= prog
->aux
->nr_linfo
;
8464 linfo
= prog
->aux
->linfo
;
8466 /* find first line info to remove, count lines to be removed */
8467 for (i
= 0; i
< nr_linfo
; i
++)
8468 if (linfo
[i
].insn_off
>= off
)
8473 for (; i
< nr_linfo
; i
++)
8474 if (linfo
[i
].insn_off
< off
+ cnt
)
8479 /* First live insn doesn't match first live linfo, it needs to "inherit"
8480 * last removed linfo. prog is already modified, so prog->len == off
8481 * means no live instructions after (tail of the program was removed).
8483 if (prog
->len
!= off
&& l_cnt
&&
8484 (i
== nr_linfo
|| linfo
[i
].insn_off
!= off
+ cnt
)) {
8486 linfo
[--i
].insn_off
= off
+ cnt
;
8489 /* remove the line info which refer to the removed instructions */
8491 memmove(linfo
+ l_off
, linfo
+ i
,
8492 sizeof(*linfo
) * (nr_linfo
- i
));
8494 prog
->aux
->nr_linfo
-= l_cnt
;
8495 nr_linfo
= prog
->aux
->nr_linfo
;
8498 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8499 for (i
= l_off
; i
< nr_linfo
; i
++)
8500 linfo
[i
].insn_off
-= cnt
;
8502 /* fix up all subprogs (incl. 'exit') which start >= off */
8503 for (i
= 0; i
<= env
->subprog_cnt
; i
++)
8504 if (env
->subprog_info
[i
].linfo_idx
> l_off
) {
8505 /* program may have started in the removed region but
8506 * may not be fully removed
8508 if (env
->subprog_info
[i
].linfo_idx
>= l_off
+ l_cnt
)
8509 env
->subprog_info
[i
].linfo_idx
-= l_cnt
;
8511 env
->subprog_info
[i
].linfo_idx
= l_off
;
8517 static int verifier_remove_insns(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
)
8519 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
8520 unsigned int orig_prog_len
= env
->prog
->len
;
8523 if (bpf_prog_is_dev_bound(env
->prog
->aux
))
8524 bpf_prog_offload_remove_insns(env
, off
, cnt
);
8526 err
= bpf_remove_insns(env
->prog
, off
, cnt
);
8530 err
= adjust_subprog_starts_after_remove(env
, off
, cnt
);
8534 err
= bpf_adj_linfo_after_remove(env
, off
, cnt
);
8538 memmove(aux_data
+ off
, aux_data
+ off
+ cnt
,
8539 sizeof(*aux_data
) * (orig_prog_len
- off
- cnt
));
8544 /* The verifier does more data flow analysis than llvm and will not
8545 * explore branches that are dead at run time. Malicious programs can
8546 * have dead code too. Therefore replace all dead at-run-time code
8549 * Just nops are not optimal, e.g. if they would sit at the end of the
8550 * program and through another bug we would manage to jump there, then
8551 * we'd execute beyond program memory otherwise. Returning exception
8552 * code also wouldn't work since we can have subprogs where the dead
8553 * code could be located.
8555 static void sanitize_dead_code(struct bpf_verifier_env
*env
)
8557 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
8558 struct bpf_insn trap
= BPF_JMP_IMM(BPF_JA
, 0, 0, -1);
8559 struct bpf_insn
*insn
= env
->prog
->insnsi
;
8560 const int insn_cnt
= env
->prog
->len
;
8563 for (i
= 0; i
< insn_cnt
; i
++) {
8564 if (aux_data
[i
].seen
)
8566 memcpy(insn
+ i
, &trap
, sizeof(trap
));
8570 static bool insn_is_cond_jump(u8 code
)
8574 if (BPF_CLASS(code
) == BPF_JMP32
)
8577 if (BPF_CLASS(code
) != BPF_JMP
)
8581 return op
!= BPF_JA
&& op
!= BPF_EXIT
&& op
!= BPF_CALL
;
8584 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env
*env
)
8586 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
8587 struct bpf_insn ja
= BPF_JMP_IMM(BPF_JA
, 0, 0, 0);
8588 struct bpf_insn
*insn
= env
->prog
->insnsi
;
8589 const int insn_cnt
= env
->prog
->len
;
8592 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
8593 if (!insn_is_cond_jump(insn
->code
))
8596 if (!aux_data
[i
+ 1].seen
)
8598 else if (!aux_data
[i
+ 1 + insn
->off
].seen
)
8603 if (bpf_prog_is_dev_bound(env
->prog
->aux
))
8604 bpf_prog_offload_replace_insn(env
, i
, &ja
);
8606 memcpy(insn
, &ja
, sizeof(ja
));
8610 static int opt_remove_dead_code(struct bpf_verifier_env
*env
)
8612 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
8613 int insn_cnt
= env
->prog
->len
;
8616 for (i
= 0; i
< insn_cnt
; i
++) {
8620 while (i
+ j
< insn_cnt
&& !aux_data
[i
+ j
].seen
)
8625 err
= verifier_remove_insns(env
, i
, j
);
8628 insn_cnt
= env
->prog
->len
;
8634 static int opt_remove_nops(struct bpf_verifier_env
*env
)
8636 const struct bpf_insn ja
= BPF_JMP_IMM(BPF_JA
, 0, 0, 0);
8637 struct bpf_insn
*insn
= env
->prog
->insnsi
;
8638 int insn_cnt
= env
->prog
->len
;
8641 for (i
= 0; i
< insn_cnt
; i
++) {
8642 if (memcmp(&insn
[i
], &ja
, sizeof(ja
)))
8645 err
= verifier_remove_insns(env
, i
, 1);
8655 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env
*env
,
8656 const union bpf_attr
*attr
)
8658 struct bpf_insn
*patch
, zext_patch
[2], rnd_hi32_patch
[4];
8659 struct bpf_insn_aux_data
*aux
= env
->insn_aux_data
;
8660 int i
, patch_len
, delta
= 0, len
= env
->prog
->len
;
8661 struct bpf_insn
*insns
= env
->prog
->insnsi
;
8662 struct bpf_prog
*new_prog
;
8665 rnd_hi32
= attr
->prog_flags
& BPF_F_TEST_RND_HI32
;
8666 zext_patch
[1] = BPF_ZEXT_REG(0);
8667 rnd_hi32_patch
[1] = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, 0);
8668 rnd_hi32_patch
[2] = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_AX
, 32);
8669 rnd_hi32_patch
[3] = BPF_ALU64_REG(BPF_OR
, 0, BPF_REG_AX
);
8670 for (i
= 0; i
< len
; i
++) {
8671 int adj_idx
= i
+ delta
;
8672 struct bpf_insn insn
;
8674 insn
= insns
[adj_idx
];
8675 if (!aux
[adj_idx
].zext_dst
) {
8683 class = BPF_CLASS(code
);
8684 if (insn_no_def(&insn
))
8687 /* NOTE: arg "reg" (the fourth one) is only used for
8688 * BPF_STX which has been ruled out in above
8689 * check, it is safe to pass NULL here.
8691 if (is_reg64(env
, &insn
, insn
.dst_reg
, NULL
, DST_OP
)) {
8692 if (class == BPF_LD
&&
8693 BPF_MODE(code
) == BPF_IMM
)
8698 /* ctx load could be transformed into wider load. */
8699 if (class == BPF_LDX
&&
8700 aux
[adj_idx
].ptr_type
== PTR_TO_CTX
)
8703 imm_rnd
= get_random_int();
8704 rnd_hi32_patch
[0] = insn
;
8705 rnd_hi32_patch
[1].imm
= imm_rnd
;
8706 rnd_hi32_patch
[3].dst_reg
= insn
.dst_reg
;
8707 patch
= rnd_hi32_patch
;
8709 goto apply_patch_buffer
;
8712 if (!bpf_jit_needs_zext())
8715 zext_patch
[0] = insn
;
8716 zext_patch
[1].dst_reg
= insn
.dst_reg
;
8717 zext_patch
[1].src_reg
= insn
.dst_reg
;
8721 new_prog
= bpf_patch_insn_data(env
, adj_idx
, patch
, patch_len
);
8724 env
->prog
= new_prog
;
8725 insns
= new_prog
->insnsi
;
8726 aux
= env
->insn_aux_data
;
8727 delta
+= patch_len
- 1;
8733 /* convert load instructions that access fields of a context type into a
8734 * sequence of instructions that access fields of the underlying structure:
8735 * struct __sk_buff -> struct sk_buff
8736 * struct bpf_sock_ops -> struct sock
8738 static int convert_ctx_accesses(struct bpf_verifier_env
*env
)
8740 const struct bpf_verifier_ops
*ops
= env
->ops
;
8741 int i
, cnt
, size
, ctx_field_size
, delta
= 0;
8742 const int insn_cnt
= env
->prog
->len
;
8743 struct bpf_insn insn_buf
[16], *insn
;
8744 u32 target_size
, size_default
, off
;
8745 struct bpf_prog
*new_prog
;
8746 enum bpf_access_type type
;
8747 bool is_narrower_load
;
8749 if (ops
->gen_prologue
|| env
->seen_direct_write
) {
8750 if (!ops
->gen_prologue
) {
8751 verbose(env
, "bpf verifier is misconfigured\n");
8754 cnt
= ops
->gen_prologue(insn_buf
, env
->seen_direct_write
,
8756 if (cnt
>= ARRAY_SIZE(insn_buf
)) {
8757 verbose(env
, "bpf verifier is misconfigured\n");
8760 new_prog
= bpf_patch_insn_data(env
, 0, insn_buf
, cnt
);
8764 env
->prog
= new_prog
;
8769 if (bpf_prog_is_dev_bound(env
->prog
->aux
))
8772 insn
= env
->prog
->insnsi
+ delta
;
8774 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
8775 bpf_convert_ctx_access_t convert_ctx_access
;
8777 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_B
) ||
8778 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_H
) ||
8779 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
) ||
8780 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_DW
))
8782 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_B
) ||
8783 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_H
) ||
8784 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
) ||
8785 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_DW
))
8790 if (type
== BPF_WRITE
&&
8791 env
->insn_aux_data
[i
+ delta
].sanitize_stack_off
) {
8792 struct bpf_insn patch
[] = {
8793 /* Sanitize suspicious stack slot with zero.
8794 * There are no memory dependencies for this store,
8795 * since it's only using frame pointer and immediate
8798 BPF_ST_MEM(BPF_DW
, BPF_REG_FP
,
8799 env
->insn_aux_data
[i
+ delta
].sanitize_stack_off
,
8801 /* the original STX instruction will immediately
8802 * overwrite the same stack slot with appropriate value
8807 cnt
= ARRAY_SIZE(patch
);
8808 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patch
, cnt
);
8813 env
->prog
= new_prog
;
8814 insn
= new_prog
->insnsi
+ i
+ delta
;
8818 switch (env
->insn_aux_data
[i
+ delta
].ptr_type
) {
8820 if (!ops
->convert_ctx_access
)
8822 convert_ctx_access
= ops
->convert_ctx_access
;
8825 case PTR_TO_SOCK_COMMON
:
8826 convert_ctx_access
= bpf_sock_convert_ctx_access
;
8828 case PTR_TO_TCP_SOCK
:
8829 convert_ctx_access
= bpf_tcp_sock_convert_ctx_access
;
8831 case PTR_TO_XDP_SOCK
:
8832 convert_ctx_access
= bpf_xdp_sock_convert_ctx_access
;
8835 if (type
== BPF_WRITE
) {
8836 verbose(env
, "Writes through BTF pointers are not allowed\n");
8839 insn
->code
= BPF_LDX
| BPF_PROBE_MEM
| BPF_SIZE((insn
)->code
);
8840 env
->prog
->aux
->num_exentries
++;
8846 ctx_field_size
= env
->insn_aux_data
[i
+ delta
].ctx_field_size
;
8847 size
= BPF_LDST_BYTES(insn
);
8849 /* If the read access is a narrower load of the field,
8850 * convert to a 4/8-byte load, to minimum program type specific
8851 * convert_ctx_access changes. If conversion is successful,
8852 * we will apply proper mask to the result.
8854 is_narrower_load
= size
< ctx_field_size
;
8855 size_default
= bpf_ctx_off_adjust_machine(ctx_field_size
);
8857 if (is_narrower_load
) {
8860 if (type
== BPF_WRITE
) {
8861 verbose(env
, "bpf verifier narrow ctx access misconfigured\n");
8866 if (ctx_field_size
== 4)
8868 else if (ctx_field_size
== 8)
8871 insn
->off
= off
& ~(size_default
- 1);
8872 insn
->code
= BPF_LDX
| BPF_MEM
| size_code
;
8876 cnt
= convert_ctx_access(type
, insn
, insn_buf
, env
->prog
,
8878 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
) ||
8879 (ctx_field_size
&& !target_size
)) {
8880 verbose(env
, "bpf verifier is misconfigured\n");
8884 if (is_narrower_load
&& size
< target_size
) {
8885 u8 shift
= bpf_ctx_narrow_access_offset(
8886 off
, size
, size_default
) * 8;
8887 if (ctx_field_size
<= 4) {
8889 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_RSH
,
8892 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_AND
, insn
->dst_reg
,
8893 (1 << size
* 8) - 1);
8896 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_RSH
,
8899 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_AND
, insn
->dst_reg
,
8900 (1ULL << size
* 8) - 1);
8904 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
8910 /* keep walking new program and skip insns we just inserted */
8911 env
->prog
= new_prog
;
8912 insn
= new_prog
->insnsi
+ i
+ delta
;
8918 static int jit_subprogs(struct bpf_verifier_env
*env
)
8920 struct bpf_prog
*prog
= env
->prog
, **func
, *tmp
;
8921 int i
, j
, subprog_start
, subprog_end
= 0, len
, subprog
;
8922 struct bpf_insn
*insn
;
8926 if (env
->subprog_cnt
<= 1)
8929 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
8930 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
8931 insn
->src_reg
!= BPF_PSEUDO_CALL
)
8933 /* Upon error here we cannot fall back to interpreter but
8934 * need a hard reject of the program. Thus -EFAULT is
8935 * propagated in any case.
8937 subprog
= find_subprog(env
, i
+ insn
->imm
+ 1);
8939 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
8943 /* temporarily remember subprog id inside insn instead of
8944 * aux_data, since next loop will split up all insns into funcs
8946 insn
->off
= subprog
;
8947 /* remember original imm in case JIT fails and fallback
8948 * to interpreter will be needed
8950 env
->insn_aux_data
[i
].call_imm
= insn
->imm
;
8951 /* point imm to __bpf_call_base+1 from JITs point of view */
8955 err
= bpf_prog_alloc_jited_linfo(prog
);
8960 func
= kcalloc(env
->subprog_cnt
, sizeof(prog
), GFP_KERNEL
);
8964 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
8965 subprog_start
= subprog_end
;
8966 subprog_end
= env
->subprog_info
[i
+ 1].start
;
8968 len
= subprog_end
- subprog_start
;
8969 /* BPF_PROG_RUN doesn't call subprogs directly,
8970 * hence main prog stats include the runtime of subprogs.
8971 * subprogs don't have IDs and not reachable via prog_get_next_id
8972 * func[i]->aux->stats will never be accessed and stays NULL
8974 func
[i
] = bpf_prog_alloc_no_stats(bpf_prog_size(len
), GFP_USER
);
8977 memcpy(func
[i
]->insnsi
, &prog
->insnsi
[subprog_start
],
8978 len
* sizeof(struct bpf_insn
));
8979 func
[i
]->type
= prog
->type
;
8981 if (bpf_prog_calc_tag(func
[i
]))
8983 func
[i
]->is_func
= 1;
8984 func
[i
]->aux
->func_idx
= i
;
8985 /* the btf and func_info will be freed only at prog->aux */
8986 func
[i
]->aux
->btf
= prog
->aux
->btf
;
8987 func
[i
]->aux
->func_info
= prog
->aux
->func_info
;
8989 /* Use bpf_prog_F_tag to indicate functions in stack traces.
8990 * Long term would need debug info to populate names
8992 func
[i
]->aux
->name
[0] = 'F';
8993 func
[i
]->aux
->stack_depth
= env
->subprog_info
[i
].stack_depth
;
8994 func
[i
]->jit_requested
= 1;
8995 func
[i
]->aux
->linfo
= prog
->aux
->linfo
;
8996 func
[i
]->aux
->nr_linfo
= prog
->aux
->nr_linfo
;
8997 func
[i
]->aux
->jited_linfo
= prog
->aux
->jited_linfo
;
8998 func
[i
]->aux
->linfo_idx
= env
->subprog_info
[i
].linfo_idx
;
8999 func
[i
] = bpf_int_jit_compile(func
[i
]);
9000 if (!func
[i
]->jited
) {
9006 /* at this point all bpf functions were successfully JITed
9007 * now populate all bpf_calls with correct addresses and
9008 * run last pass of JIT
9010 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
9011 insn
= func
[i
]->insnsi
;
9012 for (j
= 0; j
< func
[i
]->len
; j
++, insn
++) {
9013 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
9014 insn
->src_reg
!= BPF_PSEUDO_CALL
)
9016 subprog
= insn
->off
;
9017 insn
->imm
= BPF_CAST_CALL(func
[subprog
]->bpf_func
) -
9021 /* we use the aux data to keep a list of the start addresses
9022 * of the JITed images for each function in the program
9024 * for some architectures, such as powerpc64, the imm field
9025 * might not be large enough to hold the offset of the start
9026 * address of the callee's JITed image from __bpf_call_base
9028 * in such cases, we can lookup the start address of a callee
9029 * by using its subprog id, available from the off field of
9030 * the call instruction, as an index for this list
9032 func
[i
]->aux
->func
= func
;
9033 func
[i
]->aux
->func_cnt
= env
->subprog_cnt
;
9035 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
9036 old_bpf_func
= func
[i
]->bpf_func
;
9037 tmp
= bpf_int_jit_compile(func
[i
]);
9038 if (tmp
!= func
[i
] || func
[i
]->bpf_func
!= old_bpf_func
) {
9039 verbose(env
, "JIT doesn't support bpf-to-bpf calls\n");
9046 /* finally lock prog and jit images for all functions and
9049 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
9050 bpf_prog_lock_ro(func
[i
]);
9051 bpf_prog_kallsyms_add(func
[i
]);
9054 /* Last step: make now unused interpreter insns from main
9055 * prog consistent for later dump requests, so they can
9056 * later look the same as if they were interpreted only.
9058 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
9059 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
9060 insn
->src_reg
!= BPF_PSEUDO_CALL
)
9062 insn
->off
= env
->insn_aux_data
[i
].call_imm
;
9063 subprog
= find_subprog(env
, i
+ insn
->off
+ 1);
9064 insn
->imm
= subprog
;
9068 prog
->bpf_func
= func
[0]->bpf_func
;
9069 prog
->aux
->func
= func
;
9070 prog
->aux
->func_cnt
= env
->subprog_cnt
;
9071 bpf_prog_free_unused_jited_linfo(prog
);
9074 for (i
= 0; i
< env
->subprog_cnt
; i
++)
9076 bpf_jit_free(func
[i
]);
9079 /* cleanup main prog to be interpreted */
9080 prog
->jit_requested
= 0;
9081 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
9082 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
9083 insn
->src_reg
!= BPF_PSEUDO_CALL
)
9086 insn
->imm
= env
->insn_aux_data
[i
].call_imm
;
9088 bpf_prog_free_jited_linfo(prog
);
9092 static int fixup_call_args(struct bpf_verifier_env
*env
)
9094 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
9095 struct bpf_prog
*prog
= env
->prog
;
9096 struct bpf_insn
*insn
= prog
->insnsi
;
9101 if (env
->prog
->jit_requested
&&
9102 !bpf_prog_is_dev_bound(env
->prog
->aux
)) {
9103 err
= jit_subprogs(env
);
9109 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
9110 for (i
= 0; i
< prog
->len
; i
++, insn
++) {
9111 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
9112 insn
->src_reg
!= BPF_PSEUDO_CALL
)
9114 depth
= get_callee_stack_depth(env
, insn
, i
);
9117 bpf_patch_call_args(insn
, depth
);
9124 /* fixup insn->imm field of bpf_call instructions
9125 * and inline eligible helpers as explicit sequence of BPF instructions
9127 * this function is called after eBPF program passed verification
9129 static int fixup_bpf_calls(struct bpf_verifier_env
*env
)
9131 struct bpf_prog
*prog
= env
->prog
;
9132 bool expect_blinding
= bpf_jit_blinding_enabled(prog
);
9133 struct bpf_insn
*insn
= prog
->insnsi
;
9134 const struct bpf_func_proto
*fn
;
9135 const int insn_cnt
= prog
->len
;
9136 const struct bpf_map_ops
*ops
;
9137 struct bpf_insn_aux_data
*aux
;
9138 struct bpf_insn insn_buf
[16];
9139 struct bpf_prog
*new_prog
;
9140 struct bpf_map
*map_ptr
;
9141 int i
, ret
, cnt
, delta
= 0;
9143 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
9144 if (insn
->code
== (BPF_ALU64
| BPF_MOD
| BPF_X
) ||
9145 insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_X
) ||
9146 insn
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
) ||
9147 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
)) {
9148 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
9149 struct bpf_insn mask_and_div
[] = {
9150 BPF_MOV32_REG(insn
->src_reg
, insn
->src_reg
),
9152 BPF_JMP_IMM(BPF_JNE
, insn
->src_reg
, 0, 2),
9153 BPF_ALU32_REG(BPF_XOR
, insn
->dst_reg
, insn
->dst_reg
),
9154 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
9157 struct bpf_insn mask_and_mod
[] = {
9158 BPF_MOV32_REG(insn
->src_reg
, insn
->src_reg
),
9159 /* Rx mod 0 -> Rx */
9160 BPF_JMP_IMM(BPF_JEQ
, insn
->src_reg
, 0, 1),
9163 struct bpf_insn
*patchlet
;
9165 if (insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_X
) ||
9166 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
)) {
9167 patchlet
= mask_and_div
+ (is64
? 1 : 0);
9168 cnt
= ARRAY_SIZE(mask_and_div
) - (is64
? 1 : 0);
9170 patchlet
= mask_and_mod
+ (is64
? 1 : 0);
9171 cnt
= ARRAY_SIZE(mask_and_mod
) - (is64
? 1 : 0);
9174 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patchlet
, cnt
);
9179 env
->prog
= prog
= new_prog
;
9180 insn
= new_prog
->insnsi
+ i
+ delta
;
9184 if (BPF_CLASS(insn
->code
) == BPF_LD
&&
9185 (BPF_MODE(insn
->code
) == BPF_ABS
||
9186 BPF_MODE(insn
->code
) == BPF_IND
)) {
9187 cnt
= env
->ops
->gen_ld_abs(insn
, insn_buf
);
9188 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
9189 verbose(env
, "bpf verifier is misconfigured\n");
9193 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
9198 env
->prog
= prog
= new_prog
;
9199 insn
= new_prog
->insnsi
+ i
+ delta
;
9203 if (insn
->code
== (BPF_ALU64
| BPF_ADD
| BPF_X
) ||
9204 insn
->code
== (BPF_ALU64
| BPF_SUB
| BPF_X
)) {
9205 const u8 code_add
= BPF_ALU64
| BPF_ADD
| BPF_X
;
9206 const u8 code_sub
= BPF_ALU64
| BPF_SUB
| BPF_X
;
9207 struct bpf_insn insn_buf
[16];
9208 struct bpf_insn
*patch
= &insn_buf
[0];
9212 aux
= &env
->insn_aux_data
[i
+ delta
];
9213 if (!aux
->alu_state
||
9214 aux
->alu_state
== BPF_ALU_NON_POINTER
)
9217 isneg
= aux
->alu_state
& BPF_ALU_NEG_VALUE
;
9218 issrc
= (aux
->alu_state
& BPF_ALU_SANITIZE
) ==
9219 BPF_ALU_SANITIZE_SRC
;
9221 off_reg
= issrc
? insn
->src_reg
: insn
->dst_reg
;
9223 *patch
++ = BPF_ALU64_IMM(BPF_MUL
, off_reg
, -1);
9224 *patch
++ = BPF_MOV32_IMM(BPF_REG_AX
, aux
->alu_limit
- 1);
9225 *patch
++ = BPF_ALU64_REG(BPF_SUB
, BPF_REG_AX
, off_reg
);
9226 *patch
++ = BPF_ALU64_REG(BPF_OR
, BPF_REG_AX
, off_reg
);
9227 *patch
++ = BPF_ALU64_IMM(BPF_NEG
, BPF_REG_AX
, 0);
9228 *patch
++ = BPF_ALU64_IMM(BPF_ARSH
, BPF_REG_AX
, 63);
9230 *patch
++ = BPF_ALU64_REG(BPF_AND
, BPF_REG_AX
,
9232 insn
->src_reg
= BPF_REG_AX
;
9234 *patch
++ = BPF_ALU64_REG(BPF_AND
, off_reg
,
9238 insn
->code
= insn
->code
== code_add
?
9239 code_sub
: code_add
;
9242 *patch
++ = BPF_ALU64_IMM(BPF_MUL
, off_reg
, -1);
9243 cnt
= patch
- insn_buf
;
9245 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
9250 env
->prog
= prog
= new_prog
;
9251 insn
= new_prog
->insnsi
+ i
+ delta
;
9255 if (insn
->code
!= (BPF_JMP
| BPF_CALL
))
9257 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
9260 if (insn
->imm
== BPF_FUNC_get_route_realm
)
9261 prog
->dst_needed
= 1;
9262 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
9263 bpf_user_rnd_init_once();
9264 if (insn
->imm
== BPF_FUNC_override_return
)
9265 prog
->kprobe_override
= 1;
9266 if (insn
->imm
== BPF_FUNC_tail_call
) {
9267 /* If we tail call into other programs, we
9268 * cannot make any assumptions since they can
9269 * be replaced dynamically during runtime in
9270 * the program array.
9272 prog
->cb_access
= 1;
9273 env
->prog
->aux
->stack_depth
= MAX_BPF_STACK
;
9274 env
->prog
->aux
->max_pkt_offset
= MAX_PACKET_OFF
;
9276 /* mark bpf_tail_call as different opcode to avoid
9277 * conditional branch in the interpeter for every normal
9278 * call and to prevent accidental JITing by JIT compiler
9279 * that doesn't support bpf_tail_call yet
9282 insn
->code
= BPF_JMP
| BPF_TAIL_CALL
;
9284 aux
= &env
->insn_aux_data
[i
+ delta
];
9285 if (prog
->jit_requested
&& !expect_blinding
&&
9286 !bpf_map_key_poisoned(aux
) &&
9287 !bpf_map_ptr_poisoned(aux
) &&
9288 !bpf_map_ptr_unpriv(aux
)) {
9289 struct bpf_jit_poke_descriptor desc
= {
9290 .reason
= BPF_POKE_REASON_TAIL_CALL
,
9291 .tail_call
.map
= BPF_MAP_PTR(aux
->map_ptr_state
),
9292 .tail_call
.key
= bpf_map_key_immediate(aux
),
9295 ret
= bpf_jit_add_poke_descriptor(prog
, &desc
);
9297 verbose(env
, "adding tail call poke descriptor failed\n");
9301 insn
->imm
= ret
+ 1;
9305 if (!bpf_map_ptr_unpriv(aux
))
9308 /* instead of changing every JIT dealing with tail_call
9309 * emit two extra insns:
9310 * if (index >= max_entries) goto out;
9311 * index &= array->index_mask;
9312 * to avoid out-of-bounds cpu speculation
9314 if (bpf_map_ptr_poisoned(aux
)) {
9315 verbose(env
, "tail_call abusing map_ptr\n");
9319 map_ptr
= BPF_MAP_PTR(aux
->map_ptr_state
);
9320 insn_buf
[0] = BPF_JMP_IMM(BPF_JGE
, BPF_REG_3
,
9321 map_ptr
->max_entries
, 2);
9322 insn_buf
[1] = BPF_ALU32_IMM(BPF_AND
, BPF_REG_3
,
9323 container_of(map_ptr
,
9326 insn_buf
[2] = *insn
;
9328 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
9333 env
->prog
= prog
= new_prog
;
9334 insn
= new_prog
->insnsi
+ i
+ delta
;
9338 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
9339 * and other inlining handlers are currently limited to 64 bit
9342 if (prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
9343 (insn
->imm
== BPF_FUNC_map_lookup_elem
||
9344 insn
->imm
== BPF_FUNC_map_update_elem
||
9345 insn
->imm
== BPF_FUNC_map_delete_elem
||
9346 insn
->imm
== BPF_FUNC_map_push_elem
||
9347 insn
->imm
== BPF_FUNC_map_pop_elem
||
9348 insn
->imm
== BPF_FUNC_map_peek_elem
)) {
9349 aux
= &env
->insn_aux_data
[i
+ delta
];
9350 if (bpf_map_ptr_poisoned(aux
))
9351 goto patch_call_imm
;
9353 map_ptr
= BPF_MAP_PTR(aux
->map_ptr_state
);
9355 if (insn
->imm
== BPF_FUNC_map_lookup_elem
&&
9356 ops
->map_gen_lookup
) {
9357 cnt
= ops
->map_gen_lookup(map_ptr
, insn_buf
);
9358 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
9359 verbose(env
, "bpf verifier is misconfigured\n");
9363 new_prog
= bpf_patch_insn_data(env
, i
+ delta
,
9369 env
->prog
= prog
= new_prog
;
9370 insn
= new_prog
->insnsi
+ i
+ delta
;
9374 BUILD_BUG_ON(!__same_type(ops
->map_lookup_elem
,
9375 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
9376 BUILD_BUG_ON(!__same_type(ops
->map_delete_elem
,
9377 (int (*)(struct bpf_map
*map
, void *key
))NULL
));
9378 BUILD_BUG_ON(!__same_type(ops
->map_update_elem
,
9379 (int (*)(struct bpf_map
*map
, void *key
, void *value
,
9381 BUILD_BUG_ON(!__same_type(ops
->map_push_elem
,
9382 (int (*)(struct bpf_map
*map
, void *value
,
9384 BUILD_BUG_ON(!__same_type(ops
->map_pop_elem
,
9385 (int (*)(struct bpf_map
*map
, void *value
))NULL
));
9386 BUILD_BUG_ON(!__same_type(ops
->map_peek_elem
,
9387 (int (*)(struct bpf_map
*map
, void *value
))NULL
));
9389 switch (insn
->imm
) {
9390 case BPF_FUNC_map_lookup_elem
:
9391 insn
->imm
= BPF_CAST_CALL(ops
->map_lookup_elem
) -
9394 case BPF_FUNC_map_update_elem
:
9395 insn
->imm
= BPF_CAST_CALL(ops
->map_update_elem
) -
9398 case BPF_FUNC_map_delete_elem
:
9399 insn
->imm
= BPF_CAST_CALL(ops
->map_delete_elem
) -
9402 case BPF_FUNC_map_push_elem
:
9403 insn
->imm
= BPF_CAST_CALL(ops
->map_push_elem
) -
9406 case BPF_FUNC_map_pop_elem
:
9407 insn
->imm
= BPF_CAST_CALL(ops
->map_pop_elem
) -
9410 case BPF_FUNC_map_peek_elem
:
9411 insn
->imm
= BPF_CAST_CALL(ops
->map_peek_elem
) -
9416 goto patch_call_imm
;
9420 fn
= env
->ops
->get_func_proto(insn
->imm
, env
->prog
);
9421 /* all functions that have prototype and verifier allowed
9422 * programs to call them, must be real in-kernel functions
9426 "kernel subsystem misconfigured func %s#%d\n",
9427 func_id_name(insn
->imm
), insn
->imm
);
9430 insn
->imm
= fn
->func
- __bpf_call_base
;
9433 /* Since poke tab is now finalized, publish aux to tracker. */
9434 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
9435 map_ptr
= prog
->aux
->poke_tab
[i
].tail_call
.map
;
9436 if (!map_ptr
->ops
->map_poke_track
||
9437 !map_ptr
->ops
->map_poke_untrack
||
9438 !map_ptr
->ops
->map_poke_run
) {
9439 verbose(env
, "bpf verifier is misconfigured\n");
9443 ret
= map_ptr
->ops
->map_poke_track(map_ptr
, prog
->aux
);
9445 verbose(env
, "tracking tail call prog failed\n");
9453 static void free_states(struct bpf_verifier_env
*env
)
9455 struct bpf_verifier_state_list
*sl
, *sln
;
9458 sl
= env
->free_list
;
9461 free_verifier_state(&sl
->state
, false);
9466 if (!env
->explored_states
)
9469 for (i
= 0; i
< state_htab_size(env
); i
++) {
9470 sl
= env
->explored_states
[i
];
9474 free_verifier_state(&sl
->state
, false);
9480 kvfree(env
->explored_states
);
9483 static void print_verification_stats(struct bpf_verifier_env
*env
)
9487 if (env
->log
.level
& BPF_LOG_STATS
) {
9488 verbose(env
, "verification time %lld usec\n",
9489 div_u64(env
->verification_time
, 1000));
9490 verbose(env
, "stack depth ");
9491 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
9492 u32 depth
= env
->subprog_info
[i
].stack_depth
;
9494 verbose(env
, "%d", depth
);
9495 if (i
+ 1 < env
->subprog_cnt
)
9500 verbose(env
, "processed %d insns (limit %d) max_states_per_insn %d "
9501 "total_states %d peak_states %d mark_read %d\n",
9502 env
->insn_processed
, BPF_COMPLEXITY_LIMIT_INSNS
,
9503 env
->max_states_per_insn
, env
->total_states
,
9504 env
->peak_states
, env
->longest_mark_read_walk
);
9507 static int check_attach_btf_id(struct bpf_verifier_env
*env
)
9509 struct bpf_prog
*prog
= env
->prog
;
9510 struct bpf_prog
*tgt_prog
= prog
->aux
->linked_prog
;
9511 u32 btf_id
= prog
->aux
->attach_btf_id
;
9512 const char prefix
[] = "btf_trace_";
9513 int ret
= 0, subprog
= -1, i
;
9514 struct bpf_trampoline
*tr
;
9515 const struct btf_type
*t
;
9516 bool conservative
= true;
9522 if (prog
->type
!= BPF_PROG_TYPE_TRACING
)
9526 verbose(env
, "Tracing programs must provide btf_id\n");
9529 btf
= bpf_prog_get_target_btf(prog
);
9532 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9535 t
= btf_type_by_id(btf
, btf_id
);
9537 verbose(env
, "attach_btf_id %u is invalid\n", btf_id
);
9540 tname
= btf_name_by_offset(btf
, t
->name_off
);
9542 verbose(env
, "attach_btf_id %u doesn't have a name\n", btf_id
);
9546 struct bpf_prog_aux
*aux
= tgt_prog
->aux
;
9548 for (i
= 0; i
< aux
->func_info_cnt
; i
++)
9549 if (aux
->func_info
[i
].type_id
== btf_id
) {
9553 if (subprog
== -1) {
9554 verbose(env
, "Subprog %s doesn't exist\n", tname
);
9557 conservative
= aux
->func_info_aux
[subprog
].unreliable
;
9558 key
= ((u64
)aux
->id
) << 32 | btf_id
;
9563 switch (prog
->expected_attach_type
) {
9564 case BPF_TRACE_RAW_TP
:
9567 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
9570 if (!btf_type_is_typedef(t
)) {
9571 verbose(env
, "attach_btf_id %u is not a typedef\n",
9575 if (strncmp(prefix
, tname
, sizeof(prefix
) - 1)) {
9576 verbose(env
, "attach_btf_id %u points to wrong type name %s\n",
9580 tname
+= sizeof(prefix
) - 1;
9581 t
= btf_type_by_id(btf
, t
->type
);
9582 if (!btf_type_is_ptr(t
))
9583 /* should never happen in valid vmlinux build */
9585 t
= btf_type_by_id(btf
, t
->type
);
9586 if (!btf_type_is_func_proto(t
))
9587 /* should never happen in valid vmlinux build */
9590 /* remember two read only pointers that are valid for
9591 * the life time of the kernel
9593 prog
->aux
->attach_func_name
= tname
;
9594 prog
->aux
->attach_func_proto
= t
;
9595 prog
->aux
->attach_btf_trace
= true;
9597 case BPF_TRACE_FENTRY
:
9598 case BPF_TRACE_FEXIT
:
9599 if (!btf_type_is_func(t
)) {
9600 verbose(env
, "attach_btf_id %u is not a function\n",
9604 t
= btf_type_by_id(btf
, t
->type
);
9605 if (!btf_type_is_func_proto(t
))
9607 tr
= bpf_trampoline_lookup(key
);
9610 prog
->aux
->attach_func_name
= tname
;
9611 /* t is either vmlinux type or another program's type */
9612 prog
->aux
->attach_func_proto
= t
;
9613 mutex_lock(&tr
->mutex
);
9614 if (tr
->func
.addr
) {
9615 prog
->aux
->trampoline
= tr
;
9618 if (tgt_prog
&& conservative
) {
9619 prog
->aux
->attach_func_proto
= NULL
;
9622 ret
= btf_distill_func_proto(&env
->log
, btf
, t
,
9623 tname
, &tr
->func
.model
);
9627 if (!tgt_prog
->jited
) {
9629 verbose(env
, "Can trace only JITed BPF progs\n");
9633 if (tgt_prog
->type
== BPF_PROG_TYPE_TRACING
) {
9634 /* prevent cycles */
9635 verbose(env
, "Cannot recursively attach\n");
9639 addr
= (long) tgt_prog
->aux
->func
[subprog
]->bpf_func
;
9641 addr
= kallsyms_lookup_name(tname
);
9644 "The address of function %s cannot be found\n",
9650 tr
->func
.addr
= (void *)addr
;
9651 prog
->aux
->trampoline
= tr
;
9653 mutex_unlock(&tr
->mutex
);
9655 bpf_trampoline_put(tr
);
9662 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
,
9663 union bpf_attr __user
*uattr
)
9665 u64 start_time
= ktime_get_ns();
9666 struct bpf_verifier_env
*env
;
9667 struct bpf_verifier_log
*log
;
9668 int i
, len
, ret
= -EINVAL
;
9671 /* no program is valid */
9672 if (ARRAY_SIZE(bpf_verifier_ops
) == 0)
9675 /* 'struct bpf_verifier_env' can be global, but since it's not small,
9676 * allocate/free it every time bpf_check() is called
9678 env
= kzalloc(sizeof(struct bpf_verifier_env
), GFP_KERNEL
);
9684 env
->insn_aux_data
=
9685 vzalloc(array_size(sizeof(struct bpf_insn_aux_data
), len
));
9687 if (!env
->insn_aux_data
)
9689 for (i
= 0; i
< len
; i
++)
9690 env
->insn_aux_data
[i
].orig_idx
= i
;
9692 env
->ops
= bpf_verifier_ops
[env
->prog
->type
];
9693 is_priv
= capable(CAP_SYS_ADMIN
);
9695 if (!btf_vmlinux
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF
)) {
9696 mutex_lock(&bpf_verifier_lock
);
9698 btf_vmlinux
= btf_parse_vmlinux();
9699 mutex_unlock(&bpf_verifier_lock
);
9702 /* grab the mutex to protect few globals used by verifier */
9704 mutex_lock(&bpf_verifier_lock
);
9706 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
9707 /* user requested verbose verifier output
9708 * and supplied buffer to store the verification trace
9710 log
->level
= attr
->log_level
;
9711 log
->ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
9712 log
->len_total
= attr
->log_size
;
9715 /* log attributes have to be sane */
9716 if (log
->len_total
< 128 || log
->len_total
> UINT_MAX
>> 2 ||
9717 !log
->level
|| !log
->ubuf
|| log
->level
& ~BPF_LOG_MASK
)
9721 if (IS_ERR(btf_vmlinux
)) {
9722 /* Either gcc or pahole or kernel are broken. */
9723 verbose(env
, "in-kernel BTF is malformed\n");
9724 ret
= PTR_ERR(btf_vmlinux
);
9725 goto skip_full_check
;
9728 ret
= check_attach_btf_id(env
);
9730 goto skip_full_check
;
9732 env
->strict_alignment
= !!(attr
->prog_flags
& BPF_F_STRICT_ALIGNMENT
);
9733 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
))
9734 env
->strict_alignment
= true;
9735 if (attr
->prog_flags
& BPF_F_ANY_ALIGNMENT
)
9736 env
->strict_alignment
= false;
9738 env
->allow_ptr_leaks
= is_priv
;
9741 env
->test_state_freq
= attr
->prog_flags
& BPF_F_TEST_STATE_FREQ
;
9743 ret
= replace_map_fd_with_map_ptr(env
);
9745 goto skip_full_check
;
9747 if (bpf_prog_is_dev_bound(env
->prog
->aux
)) {
9748 ret
= bpf_prog_offload_verifier_prep(env
->prog
);
9750 goto skip_full_check
;
9753 env
->explored_states
= kvcalloc(state_htab_size(env
),
9754 sizeof(struct bpf_verifier_state_list
*),
9757 if (!env
->explored_states
)
9758 goto skip_full_check
;
9760 ret
= check_subprogs(env
);
9762 goto skip_full_check
;
9764 ret
= check_btf_info(env
, attr
, uattr
);
9766 goto skip_full_check
;
9768 ret
= check_cfg(env
);
9770 goto skip_full_check
;
9772 ret
= do_check(env
);
9773 if (env
->cur_state
) {
9774 free_verifier_state(env
->cur_state
, true);
9775 env
->cur_state
= NULL
;
9778 if (ret
== 0 && bpf_prog_is_dev_bound(env
->prog
->aux
))
9779 ret
= bpf_prog_offload_finalize(env
);
9782 while (!pop_stack(env
, NULL
, NULL
));
9786 ret
= check_max_stack_depth(env
);
9788 /* instruction rewrites happen after this point */
9791 opt_hard_wire_dead_code_branches(env
);
9793 ret
= opt_remove_dead_code(env
);
9795 ret
= opt_remove_nops(env
);
9798 sanitize_dead_code(env
);
9802 /* program is valid, convert *(u32*)(ctx + off) accesses */
9803 ret
= convert_ctx_accesses(env
);
9806 ret
= fixup_bpf_calls(env
);
9808 /* do 32-bit optimization after insn patching has done so those patched
9809 * insns could be handled correctly.
9811 if (ret
== 0 && !bpf_prog_is_dev_bound(env
->prog
->aux
)) {
9812 ret
= opt_subreg_zext_lo32_rnd_hi32(env
, attr
);
9813 env
->prog
->aux
->verifier_zext
= bpf_jit_needs_zext() ? !ret
9818 ret
= fixup_call_args(env
);
9820 env
->verification_time
= ktime_get_ns() - start_time
;
9821 print_verification_stats(env
);
9823 if (log
->level
&& bpf_verifier_log_full(log
))
9825 if (log
->level
&& !log
->ubuf
) {
9827 goto err_release_maps
;
9830 if (ret
== 0 && env
->used_map_cnt
) {
9831 /* if program passed verifier, update used_maps in bpf_prog_info */
9832 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
9833 sizeof(env
->used_maps
[0]),
9836 if (!env
->prog
->aux
->used_maps
) {
9838 goto err_release_maps
;
9841 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
9842 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
9843 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
9845 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
9846 * bpf_ld_imm64 instructions
9848 convert_pseudo_ld_imm64(env
);
9852 adjust_btf_func(env
);
9855 if (!env
->prog
->aux
->used_maps
)
9856 /* if we didn't copy map pointers into bpf_prog_info, release
9857 * them now. Otherwise free_used_maps() will release them.
9863 mutex_unlock(&bpf_verifier_lock
);
9864 vfree(env
->insn_aux_data
);