split dev_queue
[cor.git] / kernel / bpf / verifier.c
bloba0482e1c4a7761072be2c0ae0d69c064dadadc88
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5 */
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/btf.h>
12 #include <linux/bpf_verifier.h>
13 #include <linux/filter.h>
14 #include <net/netlink.h>
15 #include <linux/file.h>
16 #include <linux/vmalloc.h>
17 #include <linux/stringify.h>
18 #include <linux/bsearch.h>
19 #include <linux/sort.h>
20 #include <linux/perf_event.h>
21 #include <linux/ctype.h>
23 #include "disasm.h"
25 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
26 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
27 [_id] = & _name ## _verifier_ops,
28 #define BPF_MAP_TYPE(_id, _ops)
29 #include <linux/bpf_types.h>
30 #undef BPF_PROG_TYPE
31 #undef BPF_MAP_TYPE
34 /* bpf_check() is a static code analyzer that walks eBPF program
35 * instruction by instruction and updates register/stack state.
36 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38 * The first pass is depth-first-search to check that the program is a DAG.
39 * It rejects the following programs:
40 * - larger than BPF_MAXINSNS insns
41 * - if loop is present (detected via back-edge)
42 * - unreachable insns exist (shouldn't be a forest. program = one function)
43 * - out of bounds or malformed jumps
44 * The second pass is all possible path descent from the 1st insn.
45 * Since it's analyzing all pathes through the program, the length of the
46 * analysis is limited to 64k insn, which may be hit even if total number of
47 * insn is less then 4K, but there are too many branches that change stack/regs.
48 * Number of 'branches to be analyzed' is limited to 1k
50 * On entry to each instruction, each register has a type, and the instruction
51 * changes the types of the registers depending on instruction semantics.
52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
53 * copied to R1.
55 * All registers are 64-bit.
56 * R0 - return register
57 * R1-R5 argument passing registers
58 * R6-R9 callee saved registers
59 * R10 - frame pointer read-only
61 * At the start of BPF program the register R1 contains a pointer to bpf_context
62 * and has type PTR_TO_CTX.
64 * Verifier tracks arithmetic operations on pointers in case:
65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
67 * 1st insn copies R10 (which has FRAME_PTR) type into R1
68 * and 2nd arithmetic instruction is pattern matched to recognize
69 * that it wants to construct a pointer to some element within stack.
70 * So after 2nd insn, the register R1 has type PTR_TO_STACK
71 * (and -20 constant is saved for further stack bounds checking).
72 * Meaning that this reg is a pointer to stack plus known immediate constant.
74 * Most of the time the registers have SCALAR_VALUE type, which
75 * means the register has some value, but it's not a valid pointer.
76 * (like pointer plus pointer becomes SCALAR_VALUE type)
78 * When verifier sees load or store instructions the type of base register
79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
80 * four pointer types recognized by check_mem_access() function.
82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
83 * and the range of [ptr, ptr + map's value_size) is accessible.
85 * registers used to pass values to function calls are checked against
86 * function argument constraints.
88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
89 * It means that the register type passed to this function must be
90 * PTR_TO_STACK and it will be used inside the function as
91 * 'pointer to map element key'
93 * For example the argument constraints for bpf_map_lookup_elem():
94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
95 * .arg1_type = ARG_CONST_MAP_PTR,
96 * .arg2_type = ARG_PTR_TO_MAP_KEY,
98 * ret_type says that this function returns 'pointer to map elem value or null'
99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
100 * 2nd argument should be a pointer to stack, which will be used inside
101 * the helper function as a pointer to map element key.
103 * On the kernel side the helper function looks like:
104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
107 * void *key = (void *) (unsigned long) r2;
108 * void *value;
110 * here kernel can access 'key' and 'map' pointers safely, knowing that
111 * [key, key + map->key_size) bytes are valid and were initialized on
112 * the stack of eBPF program.
115 * Corresponding eBPF program may look like:
116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
120 * here verifier looks at prototype of map_lookup_elem() and sees:
121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
126 * and were initialized prior to this call.
127 * If it's ok, then verifier allows this BPF_CALL insn and looks at
128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
130 * returns ether pointer to map value or NULL.
132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
133 * insn, the register holding that pointer in the true branch changes state to
134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
135 * branch. See check_cond_jmp_op().
137 * After the call R0 is set to return type of the function and registers R1-R5
138 * are set to NOT_INIT to indicate that they are no longer readable.
140 * The following reference types represent a potential reference to a kernel
141 * resource which, after first being allocated, must be checked and freed by
142 * the BPF program:
143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
145 * When the verifier sees a helper call return a reference type, it allocates a
146 * pointer id for the reference and stores it in the current function state.
147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
149 * passes through a NULL-check conditional. For the branch wherein the state is
150 * changed to CONST_IMM, the verifier releases the reference.
152 * For each helper function that allocates a reference, such as
153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
154 * bpf_sk_release(). When a reference type passes into the release function,
155 * the verifier also releases the reference. If any unchecked or unreleased
156 * reference remains at the end of the program, the verifier rejects it.
159 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
160 struct bpf_verifier_stack_elem {
161 /* verifer state is 'st'
162 * before processing instruction 'insn_idx'
163 * and after processing instruction 'prev_insn_idx'
165 struct bpf_verifier_state st;
166 int insn_idx;
167 int prev_insn_idx;
168 struct bpf_verifier_stack_elem *next;
171 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
172 #define BPF_COMPLEXITY_LIMIT_STATES 64
174 #define BPF_MAP_KEY_POISON (1ULL << 63)
175 #define BPF_MAP_KEY_SEEN (1ULL << 62)
177 #define BPF_MAP_PTR_UNPRIV 1UL
178 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
179 POISON_POINTER_DELTA))
180 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
182 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
184 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
187 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
189 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
192 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
193 const struct bpf_map *map, bool unpriv)
195 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
196 unpriv |= bpf_map_ptr_unpriv(aux);
197 aux->map_ptr_state = (unsigned long)map |
198 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
201 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
203 return aux->map_key_state & BPF_MAP_KEY_POISON;
206 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
208 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
211 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
213 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
216 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
218 bool poisoned = bpf_map_key_poisoned(aux);
220 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
221 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
224 struct bpf_call_arg_meta {
225 struct bpf_map *map_ptr;
226 bool raw_mode;
227 bool pkt_access;
228 int regno;
229 int access_size;
230 s64 msize_smax_value;
231 u64 msize_umax_value;
232 int ref_obj_id;
233 int func_id;
234 u32 btf_id;
237 struct btf *btf_vmlinux;
239 static DEFINE_MUTEX(bpf_verifier_lock);
241 static const struct bpf_line_info *
242 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
244 const struct bpf_line_info *linfo;
245 const struct bpf_prog *prog;
246 u32 i, nr_linfo;
248 prog = env->prog;
249 nr_linfo = prog->aux->nr_linfo;
251 if (!nr_linfo || insn_off >= prog->len)
252 return NULL;
254 linfo = prog->aux->linfo;
255 for (i = 1; i < nr_linfo; i++)
256 if (insn_off < linfo[i].insn_off)
257 break;
259 return &linfo[i - 1];
262 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
263 va_list args)
265 unsigned int n;
267 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
269 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
270 "verifier log line truncated - local buffer too short\n");
272 n = min(log->len_total - log->len_used - 1, n);
273 log->kbuf[n] = '\0';
275 if (log->level == BPF_LOG_KERNEL) {
276 pr_err("BPF:%s\n", log->kbuf);
277 return;
279 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
280 log->len_used += n;
281 else
282 log->ubuf = NULL;
285 /* log_level controls verbosity level of eBPF verifier.
286 * bpf_verifier_log_write() is used to dump the verification trace to the log,
287 * so the user can figure out what's wrong with the program
289 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
290 const char *fmt, ...)
292 va_list args;
294 if (!bpf_verifier_log_needed(&env->log))
295 return;
297 va_start(args, fmt);
298 bpf_verifier_vlog(&env->log, fmt, args);
299 va_end(args);
301 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
303 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
305 struct bpf_verifier_env *env = private_data;
306 va_list args;
308 if (!bpf_verifier_log_needed(&env->log))
309 return;
311 va_start(args, fmt);
312 bpf_verifier_vlog(&env->log, fmt, args);
313 va_end(args);
316 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
317 const char *fmt, ...)
319 va_list args;
321 if (!bpf_verifier_log_needed(log))
322 return;
324 va_start(args, fmt);
325 bpf_verifier_vlog(log, fmt, args);
326 va_end(args);
329 static const char *ltrim(const char *s)
331 while (isspace(*s))
332 s++;
334 return s;
337 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
338 u32 insn_off,
339 const char *prefix_fmt, ...)
341 const struct bpf_line_info *linfo;
343 if (!bpf_verifier_log_needed(&env->log))
344 return;
346 linfo = find_linfo(env, insn_off);
347 if (!linfo || linfo == env->prev_linfo)
348 return;
350 if (prefix_fmt) {
351 va_list args;
353 va_start(args, prefix_fmt);
354 bpf_verifier_vlog(&env->log, prefix_fmt, args);
355 va_end(args);
358 verbose(env, "%s\n",
359 ltrim(btf_name_by_offset(env->prog->aux->btf,
360 linfo->line_off)));
362 env->prev_linfo = linfo;
365 static bool type_is_pkt_pointer(enum bpf_reg_type type)
367 return type == PTR_TO_PACKET ||
368 type == PTR_TO_PACKET_META;
371 static bool type_is_sk_pointer(enum bpf_reg_type type)
373 return type == PTR_TO_SOCKET ||
374 type == PTR_TO_SOCK_COMMON ||
375 type == PTR_TO_TCP_SOCK ||
376 type == PTR_TO_XDP_SOCK;
379 static bool reg_type_may_be_null(enum bpf_reg_type type)
381 return type == PTR_TO_MAP_VALUE_OR_NULL ||
382 type == PTR_TO_SOCKET_OR_NULL ||
383 type == PTR_TO_SOCK_COMMON_OR_NULL ||
384 type == PTR_TO_TCP_SOCK_OR_NULL;
387 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
389 return reg->type == PTR_TO_MAP_VALUE &&
390 map_value_has_spin_lock(reg->map_ptr);
393 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
395 return type == PTR_TO_SOCKET ||
396 type == PTR_TO_SOCKET_OR_NULL ||
397 type == PTR_TO_TCP_SOCK ||
398 type == PTR_TO_TCP_SOCK_OR_NULL;
401 static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
403 return type == ARG_PTR_TO_SOCK_COMMON;
406 /* Determine whether the function releases some resources allocated by another
407 * function call. The first reference type argument will be assumed to be
408 * released by release_reference().
410 static bool is_release_function(enum bpf_func_id func_id)
412 return func_id == BPF_FUNC_sk_release;
415 static bool is_acquire_function(enum bpf_func_id func_id)
417 return func_id == BPF_FUNC_sk_lookup_tcp ||
418 func_id == BPF_FUNC_sk_lookup_udp ||
419 func_id == BPF_FUNC_skc_lookup_tcp;
422 static bool is_ptr_cast_function(enum bpf_func_id func_id)
424 return func_id == BPF_FUNC_tcp_sock ||
425 func_id == BPF_FUNC_sk_fullsock;
428 /* string representation of 'enum bpf_reg_type' */
429 static const char * const reg_type_str[] = {
430 [NOT_INIT] = "?",
431 [SCALAR_VALUE] = "inv",
432 [PTR_TO_CTX] = "ctx",
433 [CONST_PTR_TO_MAP] = "map_ptr",
434 [PTR_TO_MAP_VALUE] = "map_value",
435 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
436 [PTR_TO_STACK] = "fp",
437 [PTR_TO_PACKET] = "pkt",
438 [PTR_TO_PACKET_META] = "pkt_meta",
439 [PTR_TO_PACKET_END] = "pkt_end",
440 [PTR_TO_FLOW_KEYS] = "flow_keys",
441 [PTR_TO_SOCKET] = "sock",
442 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
443 [PTR_TO_SOCK_COMMON] = "sock_common",
444 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
445 [PTR_TO_TCP_SOCK] = "tcp_sock",
446 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
447 [PTR_TO_TP_BUFFER] = "tp_buffer",
448 [PTR_TO_XDP_SOCK] = "xdp_sock",
449 [PTR_TO_BTF_ID] = "ptr_",
452 static char slot_type_char[] = {
453 [STACK_INVALID] = '?',
454 [STACK_SPILL] = 'r',
455 [STACK_MISC] = 'm',
456 [STACK_ZERO] = '0',
459 static void print_liveness(struct bpf_verifier_env *env,
460 enum bpf_reg_liveness live)
462 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
463 verbose(env, "_");
464 if (live & REG_LIVE_READ)
465 verbose(env, "r");
466 if (live & REG_LIVE_WRITTEN)
467 verbose(env, "w");
468 if (live & REG_LIVE_DONE)
469 verbose(env, "D");
472 static struct bpf_func_state *func(struct bpf_verifier_env *env,
473 const struct bpf_reg_state *reg)
475 struct bpf_verifier_state *cur = env->cur_state;
477 return cur->frame[reg->frameno];
480 const char *kernel_type_name(u32 id)
482 return btf_name_by_offset(btf_vmlinux,
483 btf_type_by_id(btf_vmlinux, id)->name_off);
486 static void print_verifier_state(struct bpf_verifier_env *env,
487 const struct bpf_func_state *state)
489 const struct bpf_reg_state *reg;
490 enum bpf_reg_type t;
491 int i;
493 if (state->frameno)
494 verbose(env, " frame%d:", state->frameno);
495 for (i = 0; i < MAX_BPF_REG; i++) {
496 reg = &state->regs[i];
497 t = reg->type;
498 if (t == NOT_INIT)
499 continue;
500 verbose(env, " R%d", i);
501 print_liveness(env, reg->live);
502 verbose(env, "=%s", reg_type_str[t]);
503 if (t == SCALAR_VALUE && reg->precise)
504 verbose(env, "P");
505 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
506 tnum_is_const(reg->var_off)) {
507 /* reg->off should be 0 for SCALAR_VALUE */
508 verbose(env, "%lld", reg->var_off.value + reg->off);
509 } else {
510 if (t == PTR_TO_BTF_ID)
511 verbose(env, "%s", kernel_type_name(reg->btf_id));
512 verbose(env, "(id=%d", reg->id);
513 if (reg_type_may_be_refcounted_or_null(t))
514 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
515 if (t != SCALAR_VALUE)
516 verbose(env, ",off=%d", reg->off);
517 if (type_is_pkt_pointer(t))
518 verbose(env, ",r=%d", reg->range);
519 else if (t == CONST_PTR_TO_MAP ||
520 t == PTR_TO_MAP_VALUE ||
521 t == PTR_TO_MAP_VALUE_OR_NULL)
522 verbose(env, ",ks=%d,vs=%d",
523 reg->map_ptr->key_size,
524 reg->map_ptr->value_size);
525 if (tnum_is_const(reg->var_off)) {
526 /* Typically an immediate SCALAR_VALUE, but
527 * could be a pointer whose offset is too big
528 * for reg->off
530 verbose(env, ",imm=%llx", reg->var_off.value);
531 } else {
532 if (reg->smin_value != reg->umin_value &&
533 reg->smin_value != S64_MIN)
534 verbose(env, ",smin_value=%lld",
535 (long long)reg->smin_value);
536 if (reg->smax_value != reg->umax_value &&
537 reg->smax_value != S64_MAX)
538 verbose(env, ",smax_value=%lld",
539 (long long)reg->smax_value);
540 if (reg->umin_value != 0)
541 verbose(env, ",umin_value=%llu",
542 (unsigned long long)reg->umin_value);
543 if (reg->umax_value != U64_MAX)
544 verbose(env, ",umax_value=%llu",
545 (unsigned long long)reg->umax_value);
546 if (!tnum_is_unknown(reg->var_off)) {
547 char tn_buf[48];
549 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
550 verbose(env, ",var_off=%s", tn_buf);
553 verbose(env, ")");
556 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
557 char types_buf[BPF_REG_SIZE + 1];
558 bool valid = false;
559 int j;
561 for (j = 0; j < BPF_REG_SIZE; j++) {
562 if (state->stack[i].slot_type[j] != STACK_INVALID)
563 valid = true;
564 types_buf[j] = slot_type_char[
565 state->stack[i].slot_type[j]];
567 types_buf[BPF_REG_SIZE] = 0;
568 if (!valid)
569 continue;
570 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
571 print_liveness(env, state->stack[i].spilled_ptr.live);
572 if (state->stack[i].slot_type[0] == STACK_SPILL) {
573 reg = &state->stack[i].spilled_ptr;
574 t = reg->type;
575 verbose(env, "=%s", reg_type_str[t]);
576 if (t == SCALAR_VALUE && reg->precise)
577 verbose(env, "P");
578 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
579 verbose(env, "%lld", reg->var_off.value + reg->off);
580 } else {
581 verbose(env, "=%s", types_buf);
584 if (state->acquired_refs && state->refs[0].id) {
585 verbose(env, " refs=%d", state->refs[0].id);
586 for (i = 1; i < state->acquired_refs; i++)
587 if (state->refs[i].id)
588 verbose(env, ",%d", state->refs[i].id);
590 verbose(env, "\n");
593 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
594 static int copy_##NAME##_state(struct bpf_func_state *dst, \
595 const struct bpf_func_state *src) \
597 if (!src->FIELD) \
598 return 0; \
599 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
600 /* internal bug, make state invalid to reject the program */ \
601 memset(dst, 0, sizeof(*dst)); \
602 return -EFAULT; \
604 memcpy(dst->FIELD, src->FIELD, \
605 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
606 return 0; \
608 /* copy_reference_state() */
609 COPY_STATE_FN(reference, acquired_refs, refs, 1)
610 /* copy_stack_state() */
611 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
612 #undef COPY_STATE_FN
614 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
615 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
616 bool copy_old) \
618 u32 old_size = state->COUNT; \
619 struct bpf_##NAME##_state *new_##FIELD; \
620 int slot = size / SIZE; \
622 if (size <= old_size || !size) { \
623 if (copy_old) \
624 return 0; \
625 state->COUNT = slot * SIZE; \
626 if (!size && old_size) { \
627 kfree(state->FIELD); \
628 state->FIELD = NULL; \
630 return 0; \
632 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
633 GFP_KERNEL); \
634 if (!new_##FIELD) \
635 return -ENOMEM; \
636 if (copy_old) { \
637 if (state->FIELD) \
638 memcpy(new_##FIELD, state->FIELD, \
639 sizeof(*new_##FIELD) * (old_size / SIZE)); \
640 memset(new_##FIELD + old_size / SIZE, 0, \
641 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
643 state->COUNT = slot * SIZE; \
644 kfree(state->FIELD); \
645 state->FIELD = new_##FIELD; \
646 return 0; \
648 /* realloc_reference_state() */
649 REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
650 /* realloc_stack_state() */
651 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
652 #undef REALLOC_STATE_FN
654 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
655 * make it consume minimal amount of memory. check_stack_write() access from
656 * the program calls into realloc_func_state() to grow the stack size.
657 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
658 * which realloc_stack_state() copies over. It points to previous
659 * bpf_verifier_state which is never reallocated.
661 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
662 int refs_size, bool copy_old)
664 int err = realloc_reference_state(state, refs_size, copy_old);
665 if (err)
666 return err;
667 return realloc_stack_state(state, stack_size, copy_old);
670 /* Acquire a pointer id from the env and update the state->refs to include
671 * this new pointer reference.
672 * On success, returns a valid pointer id to associate with the register
673 * On failure, returns a negative errno.
675 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
677 struct bpf_func_state *state = cur_func(env);
678 int new_ofs = state->acquired_refs;
679 int id, err;
681 err = realloc_reference_state(state, state->acquired_refs + 1, true);
682 if (err)
683 return err;
684 id = ++env->id_gen;
685 state->refs[new_ofs].id = id;
686 state->refs[new_ofs].insn_idx = insn_idx;
688 return id;
691 /* release function corresponding to acquire_reference_state(). Idempotent. */
692 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
694 int i, last_idx;
696 last_idx = state->acquired_refs - 1;
697 for (i = 0; i < state->acquired_refs; i++) {
698 if (state->refs[i].id == ptr_id) {
699 if (last_idx && i != last_idx)
700 memcpy(&state->refs[i], &state->refs[last_idx],
701 sizeof(*state->refs));
702 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
703 state->acquired_refs--;
704 return 0;
707 return -EINVAL;
710 static int transfer_reference_state(struct bpf_func_state *dst,
711 struct bpf_func_state *src)
713 int err = realloc_reference_state(dst, src->acquired_refs, false);
714 if (err)
715 return err;
716 err = copy_reference_state(dst, src);
717 if (err)
718 return err;
719 return 0;
722 static void free_func_state(struct bpf_func_state *state)
724 if (!state)
725 return;
726 kfree(state->refs);
727 kfree(state->stack);
728 kfree(state);
731 static void clear_jmp_history(struct bpf_verifier_state *state)
733 kfree(state->jmp_history);
734 state->jmp_history = NULL;
735 state->jmp_history_cnt = 0;
738 static void free_verifier_state(struct bpf_verifier_state *state,
739 bool free_self)
741 int i;
743 for (i = 0; i <= state->curframe; i++) {
744 free_func_state(state->frame[i]);
745 state->frame[i] = NULL;
747 clear_jmp_history(state);
748 if (free_self)
749 kfree(state);
752 /* copy verifier state from src to dst growing dst stack space
753 * when necessary to accommodate larger src stack
755 static int copy_func_state(struct bpf_func_state *dst,
756 const struct bpf_func_state *src)
758 int err;
760 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
761 false);
762 if (err)
763 return err;
764 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
765 err = copy_reference_state(dst, src);
766 if (err)
767 return err;
768 return copy_stack_state(dst, src);
771 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
772 const struct bpf_verifier_state *src)
774 struct bpf_func_state *dst;
775 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
776 int i, err;
778 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
779 kfree(dst_state->jmp_history);
780 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
781 if (!dst_state->jmp_history)
782 return -ENOMEM;
784 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
785 dst_state->jmp_history_cnt = src->jmp_history_cnt;
787 /* if dst has more stack frames then src frame, free them */
788 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
789 free_func_state(dst_state->frame[i]);
790 dst_state->frame[i] = NULL;
792 dst_state->speculative = src->speculative;
793 dst_state->curframe = src->curframe;
794 dst_state->active_spin_lock = src->active_spin_lock;
795 dst_state->branches = src->branches;
796 dst_state->parent = src->parent;
797 dst_state->first_insn_idx = src->first_insn_idx;
798 dst_state->last_insn_idx = src->last_insn_idx;
799 for (i = 0; i <= src->curframe; i++) {
800 dst = dst_state->frame[i];
801 if (!dst) {
802 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
803 if (!dst)
804 return -ENOMEM;
805 dst_state->frame[i] = dst;
807 err = copy_func_state(dst, src->frame[i]);
808 if (err)
809 return err;
811 return 0;
814 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
816 while (st) {
817 u32 br = --st->branches;
819 /* WARN_ON(br > 1) technically makes sense here,
820 * but see comment in push_stack(), hence:
822 WARN_ONCE((int)br < 0,
823 "BUG update_branch_counts:branches_to_explore=%d\n",
824 br);
825 if (br)
826 break;
827 st = st->parent;
831 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
832 int *insn_idx)
834 struct bpf_verifier_state *cur = env->cur_state;
835 struct bpf_verifier_stack_elem *elem, *head = env->head;
836 int err;
838 if (env->head == NULL)
839 return -ENOENT;
841 if (cur) {
842 err = copy_verifier_state(cur, &head->st);
843 if (err)
844 return err;
846 if (insn_idx)
847 *insn_idx = head->insn_idx;
848 if (prev_insn_idx)
849 *prev_insn_idx = head->prev_insn_idx;
850 elem = head->next;
851 free_verifier_state(&head->st, false);
852 kfree(head);
853 env->head = elem;
854 env->stack_size--;
855 return 0;
858 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
859 int insn_idx, int prev_insn_idx,
860 bool speculative)
862 struct bpf_verifier_state *cur = env->cur_state;
863 struct bpf_verifier_stack_elem *elem;
864 int err;
866 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
867 if (!elem)
868 goto err;
870 elem->insn_idx = insn_idx;
871 elem->prev_insn_idx = prev_insn_idx;
872 elem->next = env->head;
873 env->head = elem;
874 env->stack_size++;
875 err = copy_verifier_state(&elem->st, cur);
876 if (err)
877 goto err;
878 elem->st.speculative |= speculative;
879 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
880 verbose(env, "The sequence of %d jumps is too complex.\n",
881 env->stack_size);
882 goto err;
884 if (elem->st.parent) {
885 ++elem->st.parent->branches;
886 /* WARN_ON(branches > 2) technically makes sense here,
887 * but
888 * 1. speculative states will bump 'branches' for non-branch
889 * instructions
890 * 2. is_state_visited() heuristics may decide not to create
891 * a new state for a sequence of branches and all such current
892 * and cloned states will be pointing to a single parent state
893 * which might have large 'branches' count.
896 return &elem->st;
897 err:
898 free_verifier_state(env->cur_state, true);
899 env->cur_state = NULL;
900 /* pop all elements and return */
901 while (!pop_stack(env, NULL, NULL));
902 return NULL;
905 #define CALLER_SAVED_REGS 6
906 static const int caller_saved[CALLER_SAVED_REGS] = {
907 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
910 static void __mark_reg_not_init(struct bpf_reg_state *reg);
912 /* Mark the unknown part of a register (variable offset or scalar value) as
913 * known to have the value @imm.
915 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
917 /* Clear id, off, and union(map_ptr, range) */
918 memset(((u8 *)reg) + sizeof(reg->type), 0,
919 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
920 reg->var_off = tnum_const(imm);
921 reg->smin_value = (s64)imm;
922 reg->smax_value = (s64)imm;
923 reg->umin_value = imm;
924 reg->umax_value = imm;
927 /* Mark the 'variable offset' part of a register as zero. This should be
928 * used only on registers holding a pointer type.
930 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
932 __mark_reg_known(reg, 0);
935 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
937 __mark_reg_known(reg, 0);
938 reg->type = SCALAR_VALUE;
941 static void mark_reg_known_zero(struct bpf_verifier_env *env,
942 struct bpf_reg_state *regs, u32 regno)
944 if (WARN_ON(regno >= MAX_BPF_REG)) {
945 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
946 /* Something bad happened, let's kill all regs */
947 for (regno = 0; regno < MAX_BPF_REG; regno++)
948 __mark_reg_not_init(regs + regno);
949 return;
951 __mark_reg_known_zero(regs + regno);
954 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
956 return type_is_pkt_pointer(reg->type);
959 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
961 return reg_is_pkt_pointer(reg) ||
962 reg->type == PTR_TO_PACKET_END;
965 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
966 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
967 enum bpf_reg_type which)
969 /* The register can already have a range from prior markings.
970 * This is fine as long as it hasn't been advanced from its
971 * origin.
973 return reg->type == which &&
974 reg->id == 0 &&
975 reg->off == 0 &&
976 tnum_equals_const(reg->var_off, 0);
979 /* Attempts to improve min/max values based on var_off information */
980 static void __update_reg_bounds(struct bpf_reg_state *reg)
982 /* min signed is max(sign bit) | min(other bits) */
983 reg->smin_value = max_t(s64, reg->smin_value,
984 reg->var_off.value | (reg->var_off.mask & S64_MIN));
985 /* max signed is min(sign bit) | max(other bits) */
986 reg->smax_value = min_t(s64, reg->smax_value,
987 reg->var_off.value | (reg->var_off.mask & S64_MAX));
988 reg->umin_value = max(reg->umin_value, reg->var_off.value);
989 reg->umax_value = min(reg->umax_value,
990 reg->var_off.value | reg->var_off.mask);
993 /* Uses signed min/max values to inform unsigned, and vice-versa */
994 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
996 /* Learn sign from signed bounds.
997 * If we cannot cross the sign boundary, then signed and unsigned bounds
998 * are the same, so combine. This works even in the negative case, e.g.
999 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1001 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1002 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1003 reg->umin_value);
1004 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1005 reg->umax_value);
1006 return;
1008 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1009 * boundary, so we must be careful.
1011 if ((s64)reg->umax_value >= 0) {
1012 /* Positive. We can't learn anything from the smin, but smax
1013 * is positive, hence safe.
1015 reg->smin_value = reg->umin_value;
1016 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1017 reg->umax_value);
1018 } else if ((s64)reg->umin_value < 0) {
1019 /* Negative. We can't learn anything from the smax, but smin
1020 * is negative, hence safe.
1022 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1023 reg->umin_value);
1024 reg->smax_value = reg->umax_value;
1028 /* Attempts to improve var_off based on unsigned min/max information */
1029 static void __reg_bound_offset(struct bpf_reg_state *reg)
1031 reg->var_off = tnum_intersect(reg->var_off,
1032 tnum_range(reg->umin_value,
1033 reg->umax_value));
1036 static void __reg_bound_offset32(struct bpf_reg_state *reg)
1038 u64 mask = 0xffffFFFF;
1039 struct tnum range = tnum_range(reg->umin_value & mask,
1040 reg->umax_value & mask);
1041 struct tnum lo32 = tnum_cast(reg->var_off, 4);
1042 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
1044 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
1047 /* Reset the min/max bounds of a register */
1048 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1050 reg->smin_value = S64_MIN;
1051 reg->smax_value = S64_MAX;
1052 reg->umin_value = 0;
1053 reg->umax_value = U64_MAX;
1056 /* Mark a register as having a completely unknown (scalar) value. */
1057 static void __mark_reg_unknown(struct bpf_reg_state *reg)
1060 * Clear type, id, off, and union(map_ptr, range) and
1061 * padding between 'type' and union
1063 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1064 reg->type = SCALAR_VALUE;
1065 reg->var_off = tnum_unknown;
1066 reg->frameno = 0;
1067 __mark_reg_unbounded(reg);
1070 static void mark_reg_unknown(struct bpf_verifier_env *env,
1071 struct bpf_reg_state *regs, u32 regno)
1073 if (WARN_ON(regno >= MAX_BPF_REG)) {
1074 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1075 /* Something bad happened, let's kill all regs except FP */
1076 for (regno = 0; regno < BPF_REG_FP; regno++)
1077 __mark_reg_not_init(regs + regno);
1078 return;
1080 regs += regno;
1081 __mark_reg_unknown(regs);
1082 /* constant backtracking is enabled for root without bpf2bpf calls */
1083 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1084 true : false;
1087 static void __mark_reg_not_init(struct bpf_reg_state *reg)
1089 __mark_reg_unknown(reg);
1090 reg->type = NOT_INIT;
1093 static void mark_reg_not_init(struct bpf_verifier_env *env,
1094 struct bpf_reg_state *regs, u32 regno)
1096 if (WARN_ON(regno >= MAX_BPF_REG)) {
1097 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1098 /* Something bad happened, let's kill all regs except FP */
1099 for (regno = 0; regno < BPF_REG_FP; regno++)
1100 __mark_reg_not_init(regs + regno);
1101 return;
1103 __mark_reg_not_init(regs + regno);
1106 #define DEF_NOT_SUBREG (0)
1107 static void init_reg_state(struct bpf_verifier_env *env,
1108 struct bpf_func_state *state)
1110 struct bpf_reg_state *regs = state->regs;
1111 int i;
1113 for (i = 0; i < MAX_BPF_REG; i++) {
1114 mark_reg_not_init(env, regs, i);
1115 regs[i].live = REG_LIVE_NONE;
1116 regs[i].parent = NULL;
1117 regs[i].subreg_def = DEF_NOT_SUBREG;
1120 /* frame pointer */
1121 regs[BPF_REG_FP].type = PTR_TO_STACK;
1122 mark_reg_known_zero(env, regs, BPF_REG_FP);
1123 regs[BPF_REG_FP].frameno = state->frameno;
1125 /* 1st arg to a function */
1126 regs[BPF_REG_1].type = PTR_TO_CTX;
1127 mark_reg_known_zero(env, regs, BPF_REG_1);
1130 #define BPF_MAIN_FUNC (-1)
1131 static void init_func_state(struct bpf_verifier_env *env,
1132 struct bpf_func_state *state,
1133 int callsite, int frameno, int subprogno)
1135 state->callsite = callsite;
1136 state->frameno = frameno;
1137 state->subprogno = subprogno;
1138 init_reg_state(env, state);
1141 enum reg_arg_type {
1142 SRC_OP, /* register is used as source operand */
1143 DST_OP, /* register is used as destination operand */
1144 DST_OP_NO_MARK /* same as above, check only, don't mark */
1147 static int cmp_subprogs(const void *a, const void *b)
1149 return ((struct bpf_subprog_info *)a)->start -
1150 ((struct bpf_subprog_info *)b)->start;
1153 static int find_subprog(struct bpf_verifier_env *env, int off)
1155 struct bpf_subprog_info *p;
1157 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1158 sizeof(env->subprog_info[0]), cmp_subprogs);
1159 if (!p)
1160 return -ENOENT;
1161 return p - env->subprog_info;
1165 static int add_subprog(struct bpf_verifier_env *env, int off)
1167 int insn_cnt = env->prog->len;
1168 int ret;
1170 if (off >= insn_cnt || off < 0) {
1171 verbose(env, "call to invalid destination\n");
1172 return -EINVAL;
1174 ret = find_subprog(env, off);
1175 if (ret >= 0)
1176 return 0;
1177 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1178 verbose(env, "too many subprograms\n");
1179 return -E2BIG;
1181 env->subprog_info[env->subprog_cnt++].start = off;
1182 sort(env->subprog_info, env->subprog_cnt,
1183 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1184 return 0;
1187 static int check_subprogs(struct bpf_verifier_env *env)
1189 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
1190 struct bpf_subprog_info *subprog = env->subprog_info;
1191 struct bpf_insn *insn = env->prog->insnsi;
1192 int insn_cnt = env->prog->len;
1194 /* Add entry function. */
1195 ret = add_subprog(env, 0);
1196 if (ret < 0)
1197 return ret;
1199 /* determine subprog starts. The end is one before the next starts */
1200 for (i = 0; i < insn_cnt; i++) {
1201 if (insn[i].code != (BPF_JMP | BPF_CALL))
1202 continue;
1203 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1204 continue;
1205 if (!env->allow_ptr_leaks) {
1206 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1207 return -EPERM;
1209 ret = add_subprog(env, i + insn[i].imm + 1);
1210 if (ret < 0)
1211 return ret;
1214 /* Add a fake 'exit' subprog which could simplify subprog iteration
1215 * logic. 'subprog_cnt' should not be increased.
1217 subprog[env->subprog_cnt].start = insn_cnt;
1219 if (env->log.level & BPF_LOG_LEVEL2)
1220 for (i = 0; i < env->subprog_cnt; i++)
1221 verbose(env, "func#%d @%d\n", i, subprog[i].start);
1223 /* now check that all jumps are within the same subprog */
1224 subprog_start = subprog[cur_subprog].start;
1225 subprog_end = subprog[cur_subprog + 1].start;
1226 for (i = 0; i < insn_cnt; i++) {
1227 u8 code = insn[i].code;
1229 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
1230 goto next;
1231 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1232 goto next;
1233 off = i + insn[i].off + 1;
1234 if (off < subprog_start || off >= subprog_end) {
1235 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1236 return -EINVAL;
1238 next:
1239 if (i == subprog_end - 1) {
1240 /* to avoid fall-through from one subprog into another
1241 * the last insn of the subprog should be either exit
1242 * or unconditional jump back
1244 if (code != (BPF_JMP | BPF_EXIT) &&
1245 code != (BPF_JMP | BPF_JA)) {
1246 verbose(env, "last insn is not an exit or jmp\n");
1247 return -EINVAL;
1249 subprog_start = subprog_end;
1250 cur_subprog++;
1251 if (cur_subprog < env->subprog_cnt)
1252 subprog_end = subprog[cur_subprog + 1].start;
1255 return 0;
1258 /* Parentage chain of this register (or stack slot) should take care of all
1259 * issues like callee-saved registers, stack slot allocation time, etc.
1261 static int mark_reg_read(struct bpf_verifier_env *env,
1262 const struct bpf_reg_state *state,
1263 struct bpf_reg_state *parent, u8 flag)
1265 bool writes = parent == state->parent; /* Observe write marks */
1266 int cnt = 0;
1268 while (parent) {
1269 /* if read wasn't screened by an earlier write ... */
1270 if (writes && state->live & REG_LIVE_WRITTEN)
1271 break;
1272 if (parent->live & REG_LIVE_DONE) {
1273 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1274 reg_type_str[parent->type],
1275 parent->var_off.value, parent->off);
1276 return -EFAULT;
1278 /* The first condition is more likely to be true than the
1279 * second, checked it first.
1281 if ((parent->live & REG_LIVE_READ) == flag ||
1282 parent->live & REG_LIVE_READ64)
1283 /* The parentage chain never changes and
1284 * this parent was already marked as LIVE_READ.
1285 * There is no need to keep walking the chain again and
1286 * keep re-marking all parents as LIVE_READ.
1287 * This case happens when the same register is read
1288 * multiple times without writes into it in-between.
1289 * Also, if parent has the stronger REG_LIVE_READ64 set,
1290 * then no need to set the weak REG_LIVE_READ32.
1292 break;
1293 /* ... then we depend on parent's value */
1294 parent->live |= flag;
1295 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1296 if (flag == REG_LIVE_READ64)
1297 parent->live &= ~REG_LIVE_READ32;
1298 state = parent;
1299 parent = state->parent;
1300 writes = true;
1301 cnt++;
1304 if (env->longest_mark_read_walk < cnt)
1305 env->longest_mark_read_walk = cnt;
1306 return 0;
1309 /* This function is supposed to be used by the following 32-bit optimization
1310 * code only. It returns TRUE if the source or destination register operates
1311 * on 64-bit, otherwise return FALSE.
1313 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1314 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1316 u8 code, class, op;
1318 code = insn->code;
1319 class = BPF_CLASS(code);
1320 op = BPF_OP(code);
1321 if (class == BPF_JMP) {
1322 /* BPF_EXIT for "main" will reach here. Return TRUE
1323 * conservatively.
1325 if (op == BPF_EXIT)
1326 return true;
1327 if (op == BPF_CALL) {
1328 /* BPF to BPF call will reach here because of marking
1329 * caller saved clobber with DST_OP_NO_MARK for which we
1330 * don't care the register def because they are anyway
1331 * marked as NOT_INIT already.
1333 if (insn->src_reg == BPF_PSEUDO_CALL)
1334 return false;
1335 /* Helper call will reach here because of arg type
1336 * check, conservatively return TRUE.
1338 if (t == SRC_OP)
1339 return true;
1341 return false;
1345 if (class == BPF_ALU64 || class == BPF_JMP ||
1346 /* BPF_END always use BPF_ALU class. */
1347 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1348 return true;
1350 if (class == BPF_ALU || class == BPF_JMP32)
1351 return false;
1353 if (class == BPF_LDX) {
1354 if (t != SRC_OP)
1355 return BPF_SIZE(code) == BPF_DW;
1356 /* LDX source must be ptr. */
1357 return true;
1360 if (class == BPF_STX) {
1361 if (reg->type != SCALAR_VALUE)
1362 return true;
1363 return BPF_SIZE(code) == BPF_DW;
1366 if (class == BPF_LD) {
1367 u8 mode = BPF_MODE(code);
1369 /* LD_IMM64 */
1370 if (mode == BPF_IMM)
1371 return true;
1373 /* Both LD_IND and LD_ABS return 32-bit data. */
1374 if (t != SRC_OP)
1375 return false;
1377 /* Implicit ctx ptr. */
1378 if (regno == BPF_REG_6)
1379 return true;
1381 /* Explicit source could be any width. */
1382 return true;
1385 if (class == BPF_ST)
1386 /* The only source register for BPF_ST is a ptr. */
1387 return true;
1389 /* Conservatively return true at default. */
1390 return true;
1393 /* Return TRUE if INSN doesn't have explicit value define. */
1394 static bool insn_no_def(struct bpf_insn *insn)
1396 u8 class = BPF_CLASS(insn->code);
1398 return (class == BPF_JMP || class == BPF_JMP32 ||
1399 class == BPF_STX || class == BPF_ST);
1402 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
1403 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1405 if (insn_no_def(insn))
1406 return false;
1408 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1411 static void mark_insn_zext(struct bpf_verifier_env *env,
1412 struct bpf_reg_state *reg)
1414 s32 def_idx = reg->subreg_def;
1416 if (def_idx == DEF_NOT_SUBREG)
1417 return;
1419 env->insn_aux_data[def_idx - 1].zext_dst = true;
1420 /* The dst will be zero extended, so won't be sub-register anymore. */
1421 reg->subreg_def = DEF_NOT_SUBREG;
1424 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
1425 enum reg_arg_type t)
1427 struct bpf_verifier_state *vstate = env->cur_state;
1428 struct bpf_func_state *state = vstate->frame[vstate->curframe];
1429 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
1430 struct bpf_reg_state *reg, *regs = state->regs;
1431 bool rw64;
1433 if (regno >= MAX_BPF_REG) {
1434 verbose(env, "R%d is invalid\n", regno);
1435 return -EINVAL;
1438 reg = &regs[regno];
1439 rw64 = is_reg64(env, insn, regno, reg, t);
1440 if (t == SRC_OP) {
1441 /* check whether register used as source operand can be read */
1442 if (reg->type == NOT_INIT) {
1443 verbose(env, "R%d !read_ok\n", regno);
1444 return -EACCES;
1446 /* We don't need to worry about FP liveness because it's read-only */
1447 if (regno == BPF_REG_FP)
1448 return 0;
1450 if (rw64)
1451 mark_insn_zext(env, reg);
1453 return mark_reg_read(env, reg, reg->parent,
1454 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
1455 } else {
1456 /* check whether register used as dest operand can be written to */
1457 if (regno == BPF_REG_FP) {
1458 verbose(env, "frame pointer is read only\n");
1459 return -EACCES;
1461 reg->live |= REG_LIVE_WRITTEN;
1462 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
1463 if (t == DST_OP)
1464 mark_reg_unknown(env, regs, regno);
1466 return 0;
1469 /* for any branch, call, exit record the history of jmps in the given state */
1470 static int push_jmp_history(struct bpf_verifier_env *env,
1471 struct bpf_verifier_state *cur)
1473 u32 cnt = cur->jmp_history_cnt;
1474 struct bpf_idx_pair *p;
1476 cnt++;
1477 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1478 if (!p)
1479 return -ENOMEM;
1480 p[cnt - 1].idx = env->insn_idx;
1481 p[cnt - 1].prev_idx = env->prev_insn_idx;
1482 cur->jmp_history = p;
1483 cur->jmp_history_cnt = cnt;
1484 return 0;
1487 /* Backtrack one insn at a time. If idx is not at the top of recorded
1488 * history then previous instruction came from straight line execution.
1490 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1491 u32 *history)
1493 u32 cnt = *history;
1495 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1496 i = st->jmp_history[cnt - 1].prev_idx;
1497 (*history)--;
1498 } else {
1499 i--;
1501 return i;
1504 /* For given verifier state backtrack_insn() is called from the last insn to
1505 * the first insn. Its purpose is to compute a bitmask of registers and
1506 * stack slots that needs precision in the parent verifier state.
1508 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1509 u32 *reg_mask, u64 *stack_mask)
1511 const struct bpf_insn_cbs cbs = {
1512 .cb_print = verbose,
1513 .private_data = env,
1515 struct bpf_insn *insn = env->prog->insnsi + idx;
1516 u8 class = BPF_CLASS(insn->code);
1517 u8 opcode = BPF_OP(insn->code);
1518 u8 mode = BPF_MODE(insn->code);
1519 u32 dreg = 1u << insn->dst_reg;
1520 u32 sreg = 1u << insn->src_reg;
1521 u32 spi;
1523 if (insn->code == 0)
1524 return 0;
1525 if (env->log.level & BPF_LOG_LEVEL) {
1526 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1527 verbose(env, "%d: ", idx);
1528 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1531 if (class == BPF_ALU || class == BPF_ALU64) {
1532 if (!(*reg_mask & dreg))
1533 return 0;
1534 if (opcode == BPF_MOV) {
1535 if (BPF_SRC(insn->code) == BPF_X) {
1536 /* dreg = sreg
1537 * dreg needs precision after this insn
1538 * sreg needs precision before this insn
1540 *reg_mask &= ~dreg;
1541 *reg_mask |= sreg;
1542 } else {
1543 /* dreg = K
1544 * dreg needs precision after this insn.
1545 * Corresponding register is already marked
1546 * as precise=true in this verifier state.
1547 * No further markings in parent are necessary
1549 *reg_mask &= ~dreg;
1551 } else {
1552 if (BPF_SRC(insn->code) == BPF_X) {
1553 /* dreg += sreg
1554 * both dreg and sreg need precision
1555 * before this insn
1557 *reg_mask |= sreg;
1558 } /* else dreg += K
1559 * dreg still needs precision before this insn
1562 } else if (class == BPF_LDX) {
1563 if (!(*reg_mask & dreg))
1564 return 0;
1565 *reg_mask &= ~dreg;
1567 /* scalars can only be spilled into stack w/o losing precision.
1568 * Load from any other memory can be zero extended.
1569 * The desire to keep that precision is already indicated
1570 * by 'precise' mark in corresponding register of this state.
1571 * No further tracking necessary.
1573 if (insn->src_reg != BPF_REG_FP)
1574 return 0;
1575 if (BPF_SIZE(insn->code) != BPF_DW)
1576 return 0;
1578 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1579 * that [fp - off] slot contains scalar that needs to be
1580 * tracked with precision
1582 spi = (-insn->off - 1) / BPF_REG_SIZE;
1583 if (spi >= 64) {
1584 verbose(env, "BUG spi %d\n", spi);
1585 WARN_ONCE(1, "verifier backtracking bug");
1586 return -EFAULT;
1588 *stack_mask |= 1ull << spi;
1589 } else if (class == BPF_STX || class == BPF_ST) {
1590 if (*reg_mask & dreg)
1591 /* stx & st shouldn't be using _scalar_ dst_reg
1592 * to access memory. It means backtracking
1593 * encountered a case of pointer subtraction.
1595 return -ENOTSUPP;
1596 /* scalars can only be spilled into stack */
1597 if (insn->dst_reg != BPF_REG_FP)
1598 return 0;
1599 if (BPF_SIZE(insn->code) != BPF_DW)
1600 return 0;
1601 spi = (-insn->off - 1) / BPF_REG_SIZE;
1602 if (spi >= 64) {
1603 verbose(env, "BUG spi %d\n", spi);
1604 WARN_ONCE(1, "verifier backtracking bug");
1605 return -EFAULT;
1607 if (!(*stack_mask & (1ull << spi)))
1608 return 0;
1609 *stack_mask &= ~(1ull << spi);
1610 if (class == BPF_STX)
1611 *reg_mask |= sreg;
1612 } else if (class == BPF_JMP || class == BPF_JMP32) {
1613 if (opcode == BPF_CALL) {
1614 if (insn->src_reg == BPF_PSEUDO_CALL)
1615 return -ENOTSUPP;
1616 /* regular helper call sets R0 */
1617 *reg_mask &= ~1;
1618 if (*reg_mask & 0x3f) {
1619 /* if backtracing was looking for registers R1-R5
1620 * they should have been found already.
1622 verbose(env, "BUG regs %x\n", *reg_mask);
1623 WARN_ONCE(1, "verifier backtracking bug");
1624 return -EFAULT;
1626 } else if (opcode == BPF_EXIT) {
1627 return -ENOTSUPP;
1629 } else if (class == BPF_LD) {
1630 if (!(*reg_mask & dreg))
1631 return 0;
1632 *reg_mask &= ~dreg;
1633 /* It's ld_imm64 or ld_abs or ld_ind.
1634 * For ld_imm64 no further tracking of precision
1635 * into parent is necessary
1637 if (mode == BPF_IND || mode == BPF_ABS)
1638 /* to be analyzed */
1639 return -ENOTSUPP;
1641 return 0;
1644 /* the scalar precision tracking algorithm:
1645 * . at the start all registers have precise=false.
1646 * . scalar ranges are tracked as normal through alu and jmp insns.
1647 * . once precise value of the scalar register is used in:
1648 * . ptr + scalar alu
1649 * . if (scalar cond K|scalar)
1650 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1651 * backtrack through the verifier states and mark all registers and
1652 * stack slots with spilled constants that these scalar regisers
1653 * should be precise.
1654 * . during state pruning two registers (or spilled stack slots)
1655 * are equivalent if both are not precise.
1657 * Note the verifier cannot simply walk register parentage chain,
1658 * since many different registers and stack slots could have been
1659 * used to compute single precise scalar.
1661 * The approach of starting with precise=true for all registers and then
1662 * backtrack to mark a register as not precise when the verifier detects
1663 * that program doesn't care about specific value (e.g., when helper
1664 * takes register as ARG_ANYTHING parameter) is not safe.
1666 * It's ok to walk single parentage chain of the verifier states.
1667 * It's possible that this backtracking will go all the way till 1st insn.
1668 * All other branches will be explored for needing precision later.
1670 * The backtracking needs to deal with cases like:
1671 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1672 * r9 -= r8
1673 * r5 = r9
1674 * if r5 > 0x79f goto pc+7
1675 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1676 * r5 += 1
1677 * ...
1678 * call bpf_perf_event_output#25
1679 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1681 * and this case:
1682 * r6 = 1
1683 * call foo // uses callee's r6 inside to compute r0
1684 * r0 += r6
1685 * if r0 == 0 goto
1687 * to track above reg_mask/stack_mask needs to be independent for each frame.
1689 * Also if parent's curframe > frame where backtracking started,
1690 * the verifier need to mark registers in both frames, otherwise callees
1691 * may incorrectly prune callers. This is similar to
1692 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1694 * For now backtracking falls back into conservative marking.
1696 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1697 struct bpf_verifier_state *st)
1699 struct bpf_func_state *func;
1700 struct bpf_reg_state *reg;
1701 int i, j;
1703 /* big hammer: mark all scalars precise in this path.
1704 * pop_stack may still get !precise scalars.
1706 for (; st; st = st->parent)
1707 for (i = 0; i <= st->curframe; i++) {
1708 func = st->frame[i];
1709 for (j = 0; j < BPF_REG_FP; j++) {
1710 reg = &func->regs[j];
1711 if (reg->type != SCALAR_VALUE)
1712 continue;
1713 reg->precise = true;
1715 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1716 if (func->stack[j].slot_type[0] != STACK_SPILL)
1717 continue;
1718 reg = &func->stack[j].spilled_ptr;
1719 if (reg->type != SCALAR_VALUE)
1720 continue;
1721 reg->precise = true;
1726 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1727 int spi)
1729 struct bpf_verifier_state *st = env->cur_state;
1730 int first_idx = st->first_insn_idx;
1731 int last_idx = env->insn_idx;
1732 struct bpf_func_state *func;
1733 struct bpf_reg_state *reg;
1734 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1735 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
1736 bool skip_first = true;
1737 bool new_marks = false;
1738 int i, err;
1740 if (!env->allow_ptr_leaks)
1741 /* backtracking is root only for now */
1742 return 0;
1744 func = st->frame[st->curframe];
1745 if (regno >= 0) {
1746 reg = &func->regs[regno];
1747 if (reg->type != SCALAR_VALUE) {
1748 WARN_ONCE(1, "backtracing misuse");
1749 return -EFAULT;
1751 if (!reg->precise)
1752 new_marks = true;
1753 else
1754 reg_mask = 0;
1755 reg->precise = true;
1758 while (spi >= 0) {
1759 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1760 stack_mask = 0;
1761 break;
1763 reg = &func->stack[spi].spilled_ptr;
1764 if (reg->type != SCALAR_VALUE) {
1765 stack_mask = 0;
1766 break;
1768 if (!reg->precise)
1769 new_marks = true;
1770 else
1771 stack_mask = 0;
1772 reg->precise = true;
1773 break;
1776 if (!new_marks)
1777 return 0;
1778 if (!reg_mask && !stack_mask)
1779 return 0;
1780 for (;;) {
1781 DECLARE_BITMAP(mask, 64);
1782 u32 history = st->jmp_history_cnt;
1784 if (env->log.level & BPF_LOG_LEVEL)
1785 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1786 for (i = last_idx;;) {
1787 if (skip_first) {
1788 err = 0;
1789 skip_first = false;
1790 } else {
1791 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1793 if (err == -ENOTSUPP) {
1794 mark_all_scalars_precise(env, st);
1795 return 0;
1796 } else if (err) {
1797 return err;
1799 if (!reg_mask && !stack_mask)
1800 /* Found assignment(s) into tracked register in this state.
1801 * Since this state is already marked, just return.
1802 * Nothing to be tracked further in the parent state.
1804 return 0;
1805 if (i == first_idx)
1806 break;
1807 i = get_prev_insn_idx(st, i, &history);
1808 if (i >= env->prog->len) {
1809 /* This can happen if backtracking reached insn 0
1810 * and there are still reg_mask or stack_mask
1811 * to backtrack.
1812 * It means the backtracking missed the spot where
1813 * particular register was initialized with a constant.
1815 verbose(env, "BUG backtracking idx %d\n", i);
1816 WARN_ONCE(1, "verifier backtracking bug");
1817 return -EFAULT;
1820 st = st->parent;
1821 if (!st)
1822 break;
1824 new_marks = false;
1825 func = st->frame[st->curframe];
1826 bitmap_from_u64(mask, reg_mask);
1827 for_each_set_bit(i, mask, 32) {
1828 reg = &func->regs[i];
1829 if (reg->type != SCALAR_VALUE) {
1830 reg_mask &= ~(1u << i);
1831 continue;
1833 if (!reg->precise)
1834 new_marks = true;
1835 reg->precise = true;
1838 bitmap_from_u64(mask, stack_mask);
1839 for_each_set_bit(i, mask, 64) {
1840 if (i >= func->allocated_stack / BPF_REG_SIZE) {
1841 /* the sequence of instructions:
1842 * 2: (bf) r3 = r10
1843 * 3: (7b) *(u64 *)(r3 -8) = r0
1844 * 4: (79) r4 = *(u64 *)(r10 -8)
1845 * doesn't contain jmps. It's backtracked
1846 * as a single block.
1847 * During backtracking insn 3 is not recognized as
1848 * stack access, so at the end of backtracking
1849 * stack slot fp-8 is still marked in stack_mask.
1850 * However the parent state may not have accessed
1851 * fp-8 and it's "unallocated" stack space.
1852 * In such case fallback to conservative.
1854 mark_all_scalars_precise(env, st);
1855 return 0;
1858 if (func->stack[i].slot_type[0] != STACK_SPILL) {
1859 stack_mask &= ~(1ull << i);
1860 continue;
1862 reg = &func->stack[i].spilled_ptr;
1863 if (reg->type != SCALAR_VALUE) {
1864 stack_mask &= ~(1ull << i);
1865 continue;
1867 if (!reg->precise)
1868 new_marks = true;
1869 reg->precise = true;
1871 if (env->log.level & BPF_LOG_LEVEL) {
1872 print_verifier_state(env, func);
1873 verbose(env, "parent %s regs=%x stack=%llx marks\n",
1874 new_marks ? "didn't have" : "already had",
1875 reg_mask, stack_mask);
1878 if (!reg_mask && !stack_mask)
1879 break;
1880 if (!new_marks)
1881 break;
1883 last_idx = st->last_insn_idx;
1884 first_idx = st->first_insn_idx;
1886 return 0;
1889 static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1891 return __mark_chain_precision(env, regno, -1);
1894 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1896 return __mark_chain_precision(env, -1, spi);
1899 static bool is_spillable_regtype(enum bpf_reg_type type)
1901 switch (type) {
1902 case PTR_TO_MAP_VALUE:
1903 case PTR_TO_MAP_VALUE_OR_NULL:
1904 case PTR_TO_STACK:
1905 case PTR_TO_CTX:
1906 case PTR_TO_PACKET:
1907 case PTR_TO_PACKET_META:
1908 case PTR_TO_PACKET_END:
1909 case PTR_TO_FLOW_KEYS:
1910 case CONST_PTR_TO_MAP:
1911 case PTR_TO_SOCKET:
1912 case PTR_TO_SOCKET_OR_NULL:
1913 case PTR_TO_SOCK_COMMON:
1914 case PTR_TO_SOCK_COMMON_OR_NULL:
1915 case PTR_TO_TCP_SOCK:
1916 case PTR_TO_TCP_SOCK_OR_NULL:
1917 case PTR_TO_XDP_SOCK:
1918 return true;
1919 default:
1920 return false;
1924 /* Does this register contain a constant zero? */
1925 static bool register_is_null(struct bpf_reg_state *reg)
1927 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1930 static bool register_is_const(struct bpf_reg_state *reg)
1932 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1935 static void save_register_state(struct bpf_func_state *state,
1936 int spi, struct bpf_reg_state *reg)
1938 int i;
1940 state->stack[spi].spilled_ptr = *reg;
1941 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1943 for (i = 0; i < BPF_REG_SIZE; i++)
1944 state->stack[spi].slot_type[i] = STACK_SPILL;
1947 /* check_stack_read/write functions track spill/fill of registers,
1948 * stack boundary and alignment are checked in check_mem_access()
1950 static int check_stack_write(struct bpf_verifier_env *env,
1951 struct bpf_func_state *state, /* func where register points to */
1952 int off, int size, int value_regno, int insn_idx)
1954 struct bpf_func_state *cur; /* state of the current function */
1955 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1956 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
1957 struct bpf_reg_state *reg = NULL;
1959 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1960 state->acquired_refs, true);
1961 if (err)
1962 return err;
1963 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1964 * so it's aligned access and [off, off + size) are within stack limits
1966 if (!env->allow_ptr_leaks &&
1967 state->stack[spi].slot_type[0] == STACK_SPILL &&
1968 size != BPF_REG_SIZE) {
1969 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1970 return -EACCES;
1973 cur = env->cur_state->frame[env->cur_state->curframe];
1974 if (value_regno >= 0)
1975 reg = &cur->regs[value_regno];
1977 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1978 !register_is_null(reg) && env->allow_ptr_leaks) {
1979 if (dst_reg != BPF_REG_FP) {
1980 /* The backtracking logic can only recognize explicit
1981 * stack slot address like [fp - 8]. Other spill of
1982 * scalar via different register has to be conervative.
1983 * Backtrack from here and mark all registers as precise
1984 * that contributed into 'reg' being a constant.
1986 err = mark_chain_precision(env, value_regno);
1987 if (err)
1988 return err;
1990 save_register_state(state, spi, reg);
1991 } else if (reg && is_spillable_regtype(reg->type)) {
1992 /* register containing pointer is being spilled into stack */
1993 if (size != BPF_REG_SIZE) {
1994 verbose_linfo(env, insn_idx, "; ");
1995 verbose(env, "invalid size of register spill\n");
1996 return -EACCES;
1999 if (state != cur && reg->type == PTR_TO_STACK) {
2000 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2001 return -EINVAL;
2004 if (!env->allow_ptr_leaks) {
2005 bool sanitize = false;
2007 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2008 register_is_const(&state->stack[spi].spilled_ptr))
2009 sanitize = true;
2010 for (i = 0; i < BPF_REG_SIZE; i++)
2011 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2012 sanitize = true;
2013 break;
2015 if (sanitize) {
2016 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2017 int soff = (-spi - 1) * BPF_REG_SIZE;
2019 /* detected reuse of integer stack slot with a pointer
2020 * which means either llvm is reusing stack slot or
2021 * an attacker is trying to exploit CVE-2018-3639
2022 * (speculative store bypass)
2023 * Have to sanitize that slot with preemptive
2024 * store of zero.
2026 if (*poff && *poff != soff) {
2027 /* disallow programs where single insn stores
2028 * into two different stack slots, since verifier
2029 * cannot sanitize them
2031 verbose(env,
2032 "insn %d cannot access two stack slots fp%d and fp%d",
2033 insn_idx, *poff, soff);
2034 return -EINVAL;
2036 *poff = soff;
2039 save_register_state(state, spi, reg);
2040 } else {
2041 u8 type = STACK_MISC;
2043 /* regular write of data into stack destroys any spilled ptr */
2044 state->stack[spi].spilled_ptr.type = NOT_INIT;
2045 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2046 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2047 for (i = 0; i < BPF_REG_SIZE; i++)
2048 state->stack[spi].slot_type[i] = STACK_MISC;
2050 /* only mark the slot as written if all 8 bytes were written
2051 * otherwise read propagation may incorrectly stop too soon
2052 * when stack slots are partially written.
2053 * This heuristic means that read propagation will be
2054 * conservative, since it will add reg_live_read marks
2055 * to stack slots all the way to first state when programs
2056 * writes+reads less than 8 bytes
2058 if (size == BPF_REG_SIZE)
2059 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2061 /* when we zero initialize stack slots mark them as such */
2062 if (reg && register_is_null(reg)) {
2063 /* backtracking doesn't work for STACK_ZERO yet. */
2064 err = mark_chain_precision(env, value_regno);
2065 if (err)
2066 return err;
2067 type = STACK_ZERO;
2070 /* Mark slots affected by this stack write. */
2071 for (i = 0; i < size; i++)
2072 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
2073 type;
2075 return 0;
2078 static int check_stack_read(struct bpf_verifier_env *env,
2079 struct bpf_func_state *reg_state /* func where register points to */,
2080 int off, int size, int value_regno)
2082 struct bpf_verifier_state *vstate = env->cur_state;
2083 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2084 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
2085 struct bpf_reg_state *reg;
2086 u8 *stype;
2088 if (reg_state->allocated_stack <= slot) {
2089 verbose(env, "invalid read from stack off %d+0 size %d\n",
2090 off, size);
2091 return -EACCES;
2093 stype = reg_state->stack[spi].slot_type;
2094 reg = &reg_state->stack[spi].spilled_ptr;
2096 if (stype[0] == STACK_SPILL) {
2097 if (size != BPF_REG_SIZE) {
2098 if (reg->type != SCALAR_VALUE) {
2099 verbose_linfo(env, env->insn_idx, "; ");
2100 verbose(env, "invalid size of register fill\n");
2101 return -EACCES;
2103 if (value_regno >= 0) {
2104 mark_reg_unknown(env, state->regs, value_regno);
2105 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2107 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2108 return 0;
2110 for (i = 1; i < BPF_REG_SIZE; i++) {
2111 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
2112 verbose(env, "corrupted spill memory\n");
2113 return -EACCES;
2117 if (value_regno >= 0) {
2118 /* restore register state from stack */
2119 state->regs[value_regno] = *reg;
2120 /* mark reg as written since spilled pointer state likely
2121 * has its liveness marks cleared by is_state_visited()
2122 * which resets stack/reg liveness for state transitions
2124 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2126 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2127 } else {
2128 int zeros = 0;
2130 for (i = 0; i < size; i++) {
2131 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2132 continue;
2133 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2134 zeros++;
2135 continue;
2137 verbose(env, "invalid read from stack off %d+%d size %d\n",
2138 off, i, size);
2139 return -EACCES;
2141 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2142 if (value_regno >= 0) {
2143 if (zeros == size) {
2144 /* any size read into register is zero extended,
2145 * so the whole register == const_zero
2147 __mark_reg_const_zero(&state->regs[value_regno]);
2148 /* backtracking doesn't support STACK_ZERO yet,
2149 * so mark it precise here, so that later
2150 * backtracking can stop here.
2151 * Backtracking may not need this if this register
2152 * doesn't participate in pointer adjustment.
2153 * Forward propagation of precise flag is not
2154 * necessary either. This mark is only to stop
2155 * backtracking. Any register that contributed
2156 * to const 0 was marked precise before spill.
2158 state->regs[value_regno].precise = true;
2159 } else {
2160 /* have read misc data from the stack */
2161 mark_reg_unknown(env, state->regs, value_regno);
2163 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2166 return 0;
2169 static int check_stack_access(struct bpf_verifier_env *env,
2170 const struct bpf_reg_state *reg,
2171 int off, int size)
2173 /* Stack accesses must be at a fixed offset, so that we
2174 * can determine what type of data were returned. See
2175 * check_stack_read().
2177 if (!tnum_is_const(reg->var_off)) {
2178 char tn_buf[48];
2180 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2181 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
2182 tn_buf, off, size);
2183 return -EACCES;
2186 if (off >= 0 || off < -MAX_BPF_STACK) {
2187 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2188 return -EACCES;
2191 return 0;
2194 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2195 int off, int size, enum bpf_access_type type)
2197 struct bpf_reg_state *regs = cur_regs(env);
2198 struct bpf_map *map = regs[regno].map_ptr;
2199 u32 cap = bpf_map_flags_to_cap(map);
2201 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2202 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2203 map->value_size, off, size);
2204 return -EACCES;
2207 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2208 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2209 map->value_size, off, size);
2210 return -EACCES;
2213 return 0;
2216 /* check read/write into map element returned by bpf_map_lookup_elem() */
2217 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
2218 int size, bool zero_size_allowed)
2220 struct bpf_reg_state *regs = cur_regs(env);
2221 struct bpf_map *map = regs[regno].map_ptr;
2223 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2224 off + size > map->value_size) {
2225 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
2226 map->value_size, off, size);
2227 return -EACCES;
2229 return 0;
2232 /* check read/write into a map element with possible variable offset */
2233 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
2234 int off, int size, bool zero_size_allowed)
2236 struct bpf_verifier_state *vstate = env->cur_state;
2237 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2238 struct bpf_reg_state *reg = &state->regs[regno];
2239 int err;
2241 /* We may have adjusted the register to this map value, so we
2242 * need to try adding each of min_value and max_value to off
2243 * to make sure our theoretical access will be safe.
2245 if (env->log.level & BPF_LOG_LEVEL)
2246 print_verifier_state(env, state);
2248 /* The minimum value is only important with signed
2249 * comparisons where we can't assume the floor of a
2250 * value is 0. If we are using signed variables for our
2251 * index'es we need to make sure that whatever we use
2252 * will have a set floor within our range.
2254 if (reg->smin_value < 0 &&
2255 (reg->smin_value == S64_MIN ||
2256 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2257 reg->smin_value + off < 0)) {
2258 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2259 regno);
2260 return -EACCES;
2262 err = __check_map_access(env, regno, reg->smin_value + off, size,
2263 zero_size_allowed);
2264 if (err) {
2265 verbose(env, "R%d min value is outside of the array range\n",
2266 regno);
2267 return err;
2270 /* If we haven't set a max value then we need to bail since we can't be
2271 * sure we won't do bad things.
2272 * If reg->umax_value + off could overflow, treat that as unbounded too.
2274 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
2275 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
2276 regno);
2277 return -EACCES;
2279 err = __check_map_access(env, regno, reg->umax_value + off, size,
2280 zero_size_allowed);
2281 if (err)
2282 verbose(env, "R%d max value is outside of the array range\n",
2283 regno);
2285 if (map_value_has_spin_lock(reg->map_ptr)) {
2286 u32 lock = reg->map_ptr->spin_lock_off;
2288 /* if any part of struct bpf_spin_lock can be touched by
2289 * load/store reject this program.
2290 * To check that [x1, x2) overlaps with [y1, y2)
2291 * it is sufficient to check x1 < y2 && y1 < x2.
2293 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2294 lock < reg->umax_value + off + size) {
2295 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2296 return -EACCES;
2299 return err;
2302 #define MAX_PACKET_OFF 0xffff
2304 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
2305 const struct bpf_call_arg_meta *meta,
2306 enum bpf_access_type t)
2308 switch (env->prog->type) {
2309 /* Program types only with direct read access go here! */
2310 case BPF_PROG_TYPE_LWT_IN:
2311 case BPF_PROG_TYPE_LWT_OUT:
2312 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2313 case BPF_PROG_TYPE_SK_REUSEPORT:
2314 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2315 case BPF_PROG_TYPE_CGROUP_SKB:
2316 if (t == BPF_WRITE)
2317 return false;
2318 /* fallthrough */
2320 /* Program types with direct read + write access go here! */
2321 case BPF_PROG_TYPE_SCHED_CLS:
2322 case BPF_PROG_TYPE_SCHED_ACT:
2323 case BPF_PROG_TYPE_XDP:
2324 case BPF_PROG_TYPE_LWT_XMIT:
2325 case BPF_PROG_TYPE_SK_SKB:
2326 case BPF_PROG_TYPE_SK_MSG:
2327 if (meta)
2328 return meta->pkt_access;
2330 env->seen_direct_write = true;
2331 return true;
2333 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2334 if (t == BPF_WRITE)
2335 env->seen_direct_write = true;
2337 return true;
2339 default:
2340 return false;
2344 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
2345 int off, int size, bool zero_size_allowed)
2347 struct bpf_reg_state *regs = cur_regs(env);
2348 struct bpf_reg_state *reg = &regs[regno];
2350 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2351 (u64)off + size > reg->range) {
2352 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
2353 off, size, regno, reg->id, reg->off, reg->range);
2354 return -EACCES;
2356 return 0;
2359 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
2360 int size, bool zero_size_allowed)
2362 struct bpf_reg_state *regs = cur_regs(env);
2363 struct bpf_reg_state *reg = &regs[regno];
2364 int err;
2366 /* We may have added a variable offset to the packet pointer; but any
2367 * reg->range we have comes after that. We are only checking the fixed
2368 * offset.
2371 /* We don't allow negative numbers, because we aren't tracking enough
2372 * detail to prove they're safe.
2374 if (reg->smin_value < 0) {
2375 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2376 regno);
2377 return -EACCES;
2379 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
2380 if (err) {
2381 verbose(env, "R%d offset is outside of the packet\n", regno);
2382 return err;
2385 /* __check_packet_access has made sure "off + size - 1" is within u16.
2386 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2387 * otherwise find_good_pkt_pointers would have refused to set range info
2388 * that __check_packet_access would have rejected this pkt access.
2389 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2391 env->prog->aux->max_pkt_offset =
2392 max_t(u32, env->prog->aux->max_pkt_offset,
2393 off + reg->umax_value + size - 1);
2395 return err;
2398 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
2399 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
2400 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2401 u32 *btf_id)
2403 struct bpf_insn_access_aux info = {
2404 .reg_type = *reg_type,
2405 .log = &env->log,
2408 if (env->ops->is_valid_access &&
2409 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
2410 /* A non zero info.ctx_field_size indicates that this field is a
2411 * candidate for later verifier transformation to load the whole
2412 * field and then apply a mask when accessed with a narrower
2413 * access than actual ctx access size. A zero info.ctx_field_size
2414 * will only allow for whole field access and rejects any other
2415 * type of narrower access.
2417 *reg_type = info.reg_type;
2419 if (*reg_type == PTR_TO_BTF_ID)
2420 *btf_id = info.btf_id;
2421 else
2422 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
2423 /* remember the offset of last byte accessed in ctx */
2424 if (env->prog->aux->max_ctx_offset < off + size)
2425 env->prog->aux->max_ctx_offset = off + size;
2426 return 0;
2429 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
2430 return -EACCES;
2433 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2434 int size)
2436 if (size < 0 || off < 0 ||
2437 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2438 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2439 off, size);
2440 return -EACCES;
2442 return 0;
2445 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2446 u32 regno, int off, int size,
2447 enum bpf_access_type t)
2449 struct bpf_reg_state *regs = cur_regs(env);
2450 struct bpf_reg_state *reg = &regs[regno];
2451 struct bpf_insn_access_aux info = {};
2452 bool valid;
2454 if (reg->smin_value < 0) {
2455 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2456 regno);
2457 return -EACCES;
2460 switch (reg->type) {
2461 case PTR_TO_SOCK_COMMON:
2462 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2463 break;
2464 case PTR_TO_SOCKET:
2465 valid = bpf_sock_is_valid_access(off, size, t, &info);
2466 break;
2467 case PTR_TO_TCP_SOCK:
2468 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2469 break;
2470 case PTR_TO_XDP_SOCK:
2471 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2472 break;
2473 default:
2474 valid = false;
2478 if (valid) {
2479 env->insn_aux_data[insn_idx].ctx_field_size =
2480 info.ctx_field_size;
2481 return 0;
2484 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2485 regno, reg_type_str[reg->type], off, size);
2487 return -EACCES;
2490 static bool __is_pointer_value(bool allow_ptr_leaks,
2491 const struct bpf_reg_state *reg)
2493 if (allow_ptr_leaks)
2494 return false;
2496 return reg->type != SCALAR_VALUE;
2499 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2501 return cur_regs(env) + regno;
2504 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2506 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
2509 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2511 const struct bpf_reg_state *reg = reg_state(env, regno);
2513 return reg->type == PTR_TO_CTX;
2516 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2518 const struct bpf_reg_state *reg = reg_state(env, regno);
2520 return type_is_sk_pointer(reg->type);
2523 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2525 const struct bpf_reg_state *reg = reg_state(env, regno);
2527 return type_is_pkt_pointer(reg->type);
2530 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2532 const struct bpf_reg_state *reg = reg_state(env, regno);
2534 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2535 return reg->type == PTR_TO_FLOW_KEYS;
2538 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2539 const struct bpf_reg_state *reg,
2540 int off, int size, bool strict)
2542 struct tnum reg_off;
2543 int ip_align;
2545 /* Byte size accesses are always allowed. */
2546 if (!strict || size == 1)
2547 return 0;
2549 /* For platforms that do not have a Kconfig enabling
2550 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2551 * NET_IP_ALIGN is universally set to '2'. And on platforms
2552 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2553 * to this code only in strict mode where we want to emulate
2554 * the NET_IP_ALIGN==2 checking. Therefore use an
2555 * unconditional IP align value of '2'.
2557 ip_align = 2;
2559 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2560 if (!tnum_is_aligned(reg_off, size)) {
2561 char tn_buf[48];
2563 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2564 verbose(env,
2565 "misaligned packet access off %d+%s+%d+%d size %d\n",
2566 ip_align, tn_buf, reg->off, off, size);
2567 return -EACCES;
2570 return 0;
2573 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2574 const struct bpf_reg_state *reg,
2575 const char *pointer_desc,
2576 int off, int size, bool strict)
2578 struct tnum reg_off;
2580 /* Byte size accesses are always allowed. */
2581 if (!strict || size == 1)
2582 return 0;
2584 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2585 if (!tnum_is_aligned(reg_off, size)) {
2586 char tn_buf[48];
2588 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2589 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
2590 pointer_desc, tn_buf, reg->off, off, size);
2591 return -EACCES;
2594 return 0;
2597 static int check_ptr_alignment(struct bpf_verifier_env *env,
2598 const struct bpf_reg_state *reg, int off,
2599 int size, bool strict_alignment_once)
2601 bool strict = env->strict_alignment || strict_alignment_once;
2602 const char *pointer_desc = "";
2604 switch (reg->type) {
2605 case PTR_TO_PACKET:
2606 case PTR_TO_PACKET_META:
2607 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2608 * right in front, treat it the very same way.
2610 return check_pkt_ptr_alignment(env, reg, off, size, strict);
2611 case PTR_TO_FLOW_KEYS:
2612 pointer_desc = "flow keys ";
2613 break;
2614 case PTR_TO_MAP_VALUE:
2615 pointer_desc = "value ";
2616 break;
2617 case PTR_TO_CTX:
2618 pointer_desc = "context ";
2619 break;
2620 case PTR_TO_STACK:
2621 pointer_desc = "stack ";
2622 /* The stack spill tracking logic in check_stack_write()
2623 * and check_stack_read() relies on stack accesses being
2624 * aligned.
2626 strict = true;
2627 break;
2628 case PTR_TO_SOCKET:
2629 pointer_desc = "sock ";
2630 break;
2631 case PTR_TO_SOCK_COMMON:
2632 pointer_desc = "sock_common ";
2633 break;
2634 case PTR_TO_TCP_SOCK:
2635 pointer_desc = "tcp_sock ";
2636 break;
2637 case PTR_TO_XDP_SOCK:
2638 pointer_desc = "xdp_sock ";
2639 break;
2640 default:
2641 break;
2643 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2644 strict);
2647 static int update_stack_depth(struct bpf_verifier_env *env,
2648 const struct bpf_func_state *func,
2649 int off)
2651 u16 stack = env->subprog_info[func->subprogno].stack_depth;
2653 if (stack >= -off)
2654 return 0;
2656 /* update known max for given subprogram */
2657 env->subprog_info[func->subprogno].stack_depth = -off;
2658 return 0;
2661 /* starting from main bpf function walk all instructions of the function
2662 * and recursively walk all callees that given function can call.
2663 * Ignore jump and exit insns.
2664 * Since recursion is prevented by check_cfg() this algorithm
2665 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2667 static int check_max_stack_depth(struct bpf_verifier_env *env)
2669 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2670 struct bpf_subprog_info *subprog = env->subprog_info;
2671 struct bpf_insn *insn = env->prog->insnsi;
2672 int ret_insn[MAX_CALL_FRAMES];
2673 int ret_prog[MAX_CALL_FRAMES];
2675 process_func:
2676 /* round up to 32-bytes, since this is granularity
2677 * of interpreter stack size
2679 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
2680 if (depth > MAX_BPF_STACK) {
2681 verbose(env, "combined stack size of %d calls is %d. Too large\n",
2682 frame + 1, depth);
2683 return -EACCES;
2685 continue_func:
2686 subprog_end = subprog[idx + 1].start;
2687 for (; i < subprog_end; i++) {
2688 if (insn[i].code != (BPF_JMP | BPF_CALL))
2689 continue;
2690 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2691 continue;
2692 /* remember insn and function to return to */
2693 ret_insn[frame] = i + 1;
2694 ret_prog[frame] = idx;
2696 /* find the callee */
2697 i = i + insn[i].imm + 1;
2698 idx = find_subprog(env, i);
2699 if (idx < 0) {
2700 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2702 return -EFAULT;
2704 frame++;
2705 if (frame >= MAX_CALL_FRAMES) {
2706 verbose(env, "the call stack of %d frames is too deep !\n",
2707 frame);
2708 return -E2BIG;
2710 goto process_func;
2712 /* end of for() loop means the last insn of the 'subprog'
2713 * was reached. Doesn't matter whether it was JA or EXIT
2715 if (frame == 0)
2716 return 0;
2717 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
2718 frame--;
2719 i = ret_insn[frame];
2720 idx = ret_prog[frame];
2721 goto continue_func;
2724 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2725 static int get_callee_stack_depth(struct bpf_verifier_env *env,
2726 const struct bpf_insn *insn, int idx)
2728 int start = idx + insn->imm + 1, subprog;
2730 subprog = find_subprog(env, start);
2731 if (subprog < 0) {
2732 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2733 start);
2734 return -EFAULT;
2736 return env->subprog_info[subprog].stack_depth;
2738 #endif
2740 static int check_ctx_reg(struct bpf_verifier_env *env,
2741 const struct bpf_reg_state *reg, int regno)
2743 /* Access to ctx or passing it to a helper is only allowed in
2744 * its original, unmodified form.
2747 if (reg->off) {
2748 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2749 regno, reg->off);
2750 return -EACCES;
2753 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2754 char tn_buf[48];
2756 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2757 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2758 return -EACCES;
2761 return 0;
2764 static int check_tp_buffer_access(struct bpf_verifier_env *env,
2765 const struct bpf_reg_state *reg,
2766 int regno, int off, int size)
2768 if (off < 0) {
2769 verbose(env,
2770 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2771 regno, off, size);
2772 return -EACCES;
2774 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2775 char tn_buf[48];
2777 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2778 verbose(env,
2779 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2780 regno, off, tn_buf);
2781 return -EACCES;
2783 if (off + size > env->prog->aux->max_tp_access)
2784 env->prog->aux->max_tp_access = off + size;
2786 return 0;
2790 /* truncate register to smaller size (in bytes)
2791 * must be called with size < BPF_REG_SIZE
2793 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2795 u64 mask;
2797 /* clear high bits in bit representation */
2798 reg->var_off = tnum_cast(reg->var_off, size);
2800 /* fix arithmetic bounds */
2801 mask = ((u64)1 << (size * 8)) - 1;
2802 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2803 reg->umin_value &= mask;
2804 reg->umax_value &= mask;
2805 } else {
2806 reg->umin_value = 0;
2807 reg->umax_value = mask;
2809 reg->smin_value = reg->umin_value;
2810 reg->smax_value = reg->umax_value;
2813 static bool bpf_map_is_rdonly(const struct bpf_map *map)
2815 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2818 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2820 void *ptr;
2821 u64 addr;
2822 int err;
2824 err = map->ops->map_direct_value_addr(map, &addr, off);
2825 if (err)
2826 return err;
2827 ptr = (void *)(long)addr + off;
2829 switch (size) {
2830 case sizeof(u8):
2831 *val = (u64)*(u8 *)ptr;
2832 break;
2833 case sizeof(u16):
2834 *val = (u64)*(u16 *)ptr;
2835 break;
2836 case sizeof(u32):
2837 *val = (u64)*(u32 *)ptr;
2838 break;
2839 case sizeof(u64):
2840 *val = *(u64 *)ptr;
2841 break;
2842 default:
2843 return -EINVAL;
2845 return 0;
2848 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2849 struct bpf_reg_state *regs,
2850 int regno, int off, int size,
2851 enum bpf_access_type atype,
2852 int value_regno)
2854 struct bpf_reg_state *reg = regs + regno;
2855 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2856 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2857 u32 btf_id;
2858 int ret;
2860 if (atype != BPF_READ) {
2861 verbose(env, "only read is supported\n");
2862 return -EACCES;
2865 if (off < 0) {
2866 verbose(env,
2867 "R%d is ptr_%s invalid negative access: off=%d\n",
2868 regno, tname, off);
2869 return -EACCES;
2871 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2872 char tn_buf[48];
2874 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2875 verbose(env,
2876 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2877 regno, tname, off, tn_buf);
2878 return -EACCES;
2881 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
2882 if (ret < 0)
2883 return ret;
2885 if (ret == SCALAR_VALUE) {
2886 mark_reg_unknown(env, regs, value_regno);
2887 return 0;
2889 mark_reg_known_zero(env, regs, value_regno);
2890 regs[value_regno].type = PTR_TO_BTF_ID;
2891 regs[value_regno].btf_id = btf_id;
2892 return 0;
2895 /* check whether memory at (regno + off) is accessible for t = (read | write)
2896 * if t==write, value_regno is a register which value is stored into memory
2897 * if t==read, value_regno is a register which will receive the value from memory
2898 * if t==write && value_regno==-1, some unknown value is stored into memory
2899 * if t==read && value_regno==-1, don't care what we read from memory
2901 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2902 int off, int bpf_size, enum bpf_access_type t,
2903 int value_regno, bool strict_alignment_once)
2905 struct bpf_reg_state *regs = cur_regs(env);
2906 struct bpf_reg_state *reg = regs + regno;
2907 struct bpf_func_state *state;
2908 int size, err = 0;
2910 size = bpf_size_to_bytes(bpf_size);
2911 if (size < 0)
2912 return size;
2914 /* alignment checks will add in reg->off themselves */
2915 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
2916 if (err)
2917 return err;
2919 /* for access checks, reg->off is just part of off */
2920 off += reg->off;
2922 if (reg->type == PTR_TO_MAP_VALUE) {
2923 if (t == BPF_WRITE && value_regno >= 0 &&
2924 is_pointer_value(env, value_regno)) {
2925 verbose(env, "R%d leaks addr into map\n", value_regno);
2926 return -EACCES;
2928 err = check_map_access_type(env, regno, off, size, t);
2929 if (err)
2930 return err;
2931 err = check_map_access(env, regno, off, size, false);
2932 if (!err && t == BPF_READ && value_regno >= 0) {
2933 struct bpf_map *map = reg->map_ptr;
2935 /* if map is read-only, track its contents as scalars */
2936 if (tnum_is_const(reg->var_off) &&
2937 bpf_map_is_rdonly(map) &&
2938 map->ops->map_direct_value_addr) {
2939 int map_off = off + reg->var_off.value;
2940 u64 val = 0;
2942 err = bpf_map_direct_read(map, map_off, size,
2943 &val);
2944 if (err)
2945 return err;
2947 regs[value_regno].type = SCALAR_VALUE;
2948 __mark_reg_known(&regs[value_regno], val);
2949 } else {
2950 mark_reg_unknown(env, regs, value_regno);
2953 } else if (reg->type == PTR_TO_CTX) {
2954 enum bpf_reg_type reg_type = SCALAR_VALUE;
2955 u32 btf_id = 0;
2957 if (t == BPF_WRITE && value_regno >= 0 &&
2958 is_pointer_value(env, value_regno)) {
2959 verbose(env, "R%d leaks addr into ctx\n", value_regno);
2960 return -EACCES;
2963 err = check_ctx_reg(env, reg, regno);
2964 if (err < 0)
2965 return err;
2967 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2968 if (err)
2969 verbose_linfo(env, insn_idx, "; ");
2970 if (!err && t == BPF_READ && value_regno >= 0) {
2971 /* ctx access returns either a scalar, or a
2972 * PTR_TO_PACKET[_META,_END]. In the latter
2973 * case, we know the offset is zero.
2975 if (reg_type == SCALAR_VALUE) {
2976 mark_reg_unknown(env, regs, value_regno);
2977 } else {
2978 mark_reg_known_zero(env, regs,
2979 value_regno);
2980 if (reg_type_may_be_null(reg_type))
2981 regs[value_regno].id = ++env->id_gen;
2982 /* A load of ctx field could have different
2983 * actual load size with the one encoded in the
2984 * insn. When the dst is PTR, it is for sure not
2985 * a sub-register.
2987 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
2988 if (reg_type == PTR_TO_BTF_ID)
2989 regs[value_regno].btf_id = btf_id;
2991 regs[value_regno].type = reg_type;
2994 } else if (reg->type == PTR_TO_STACK) {
2995 off += reg->var_off.value;
2996 err = check_stack_access(env, reg, off, size);
2997 if (err)
2998 return err;
3000 state = func(env, reg);
3001 err = update_stack_depth(env, state, off);
3002 if (err)
3003 return err;
3005 if (t == BPF_WRITE)
3006 err = check_stack_write(env, state, off, size,
3007 value_regno, insn_idx);
3008 else
3009 err = check_stack_read(env, state, off, size,
3010 value_regno);
3011 } else if (reg_is_pkt_pointer(reg)) {
3012 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
3013 verbose(env, "cannot write into packet\n");
3014 return -EACCES;
3016 if (t == BPF_WRITE && value_regno >= 0 &&
3017 is_pointer_value(env, value_regno)) {
3018 verbose(env, "R%d leaks addr into packet\n",
3019 value_regno);
3020 return -EACCES;
3022 err = check_packet_access(env, regno, off, size, false);
3023 if (!err && t == BPF_READ && value_regno >= 0)
3024 mark_reg_unknown(env, regs, value_regno);
3025 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3026 if (t == BPF_WRITE && value_regno >= 0 &&
3027 is_pointer_value(env, value_regno)) {
3028 verbose(env, "R%d leaks addr into flow keys\n",
3029 value_regno);
3030 return -EACCES;
3033 err = check_flow_keys_access(env, off, size);
3034 if (!err && t == BPF_READ && value_regno >= 0)
3035 mark_reg_unknown(env, regs, value_regno);
3036 } else if (type_is_sk_pointer(reg->type)) {
3037 if (t == BPF_WRITE) {
3038 verbose(env, "R%d cannot write into %s\n",
3039 regno, reg_type_str[reg->type]);
3040 return -EACCES;
3042 err = check_sock_access(env, insn_idx, regno, off, size, t);
3043 if (!err && value_regno >= 0)
3044 mark_reg_unknown(env, regs, value_regno);
3045 } else if (reg->type == PTR_TO_TP_BUFFER) {
3046 err = check_tp_buffer_access(env, reg, regno, off, size);
3047 if (!err && t == BPF_READ && value_regno >= 0)
3048 mark_reg_unknown(env, regs, value_regno);
3049 } else if (reg->type == PTR_TO_BTF_ID) {
3050 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3051 value_regno);
3052 } else {
3053 verbose(env, "R%d invalid mem access '%s'\n", regno,
3054 reg_type_str[reg->type]);
3055 return -EACCES;
3058 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
3059 regs[value_regno].type == SCALAR_VALUE) {
3060 /* b/h/w load zero-extends, mark upper bits as known 0 */
3061 coerce_reg_to_size(&regs[value_regno], size);
3063 return err;
3066 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
3068 int err;
3070 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3071 insn->imm != 0) {
3072 verbose(env, "BPF_XADD uses reserved fields\n");
3073 return -EINVAL;
3076 /* check src1 operand */
3077 err = check_reg_arg(env, insn->src_reg, SRC_OP);
3078 if (err)
3079 return err;
3081 /* check src2 operand */
3082 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3083 if (err)
3084 return err;
3086 if (is_pointer_value(env, insn->src_reg)) {
3087 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
3088 return -EACCES;
3091 if (is_ctx_reg(env, insn->dst_reg) ||
3092 is_pkt_reg(env, insn->dst_reg) ||
3093 is_flow_key_reg(env, insn->dst_reg) ||
3094 is_sk_reg(env, insn->dst_reg)) {
3095 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
3096 insn->dst_reg,
3097 reg_type_str[reg_state(env, insn->dst_reg)->type]);
3098 return -EACCES;
3101 /* check whether atomic_add can read the memory */
3102 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3103 BPF_SIZE(insn->code), BPF_READ, -1, true);
3104 if (err)
3105 return err;
3107 /* check whether atomic_add can write into the same memory */
3108 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3109 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
3112 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3113 int off, int access_size,
3114 bool zero_size_allowed)
3116 struct bpf_reg_state *reg = reg_state(env, regno);
3118 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3119 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3120 if (tnum_is_const(reg->var_off)) {
3121 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3122 regno, off, access_size);
3123 } else {
3124 char tn_buf[48];
3126 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3127 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3128 regno, tn_buf, access_size);
3130 return -EACCES;
3132 return 0;
3135 /* when register 'regno' is passed into function that will read 'access_size'
3136 * bytes from that pointer, make sure that it's within stack boundary
3137 * and all elements of stack are initialized.
3138 * Unlike most pointer bounds-checking functions, this one doesn't take an
3139 * 'off' argument, so it has to add in reg->off itself.
3141 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
3142 int access_size, bool zero_size_allowed,
3143 struct bpf_call_arg_meta *meta)
3145 struct bpf_reg_state *reg = reg_state(env, regno);
3146 struct bpf_func_state *state = func(env, reg);
3147 int err, min_off, max_off, i, j, slot, spi;
3149 if (reg->type != PTR_TO_STACK) {
3150 /* Allow zero-byte read from NULL, regardless of pointer type */
3151 if (zero_size_allowed && access_size == 0 &&
3152 register_is_null(reg))
3153 return 0;
3155 verbose(env, "R%d type=%s expected=%s\n", regno,
3156 reg_type_str[reg->type],
3157 reg_type_str[PTR_TO_STACK]);
3158 return -EACCES;
3161 if (tnum_is_const(reg->var_off)) {
3162 min_off = max_off = reg->var_off.value + reg->off;
3163 err = __check_stack_boundary(env, regno, min_off, access_size,
3164 zero_size_allowed);
3165 if (err)
3166 return err;
3167 } else {
3168 /* Variable offset is prohibited for unprivileged mode for
3169 * simplicity since it requires corresponding support in
3170 * Spectre masking for stack ALU.
3171 * See also retrieve_ptr_limit().
3173 if (!env->allow_ptr_leaks) {
3174 char tn_buf[48];
3176 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3177 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3178 regno, tn_buf);
3179 return -EACCES;
3181 /* Only initialized buffer on stack is allowed to be accessed
3182 * with variable offset. With uninitialized buffer it's hard to
3183 * guarantee that whole memory is marked as initialized on
3184 * helper return since specific bounds are unknown what may
3185 * cause uninitialized stack leaking.
3187 if (meta && meta->raw_mode)
3188 meta = NULL;
3190 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3191 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3192 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3193 regno);
3194 return -EACCES;
3196 min_off = reg->smin_value + reg->off;
3197 max_off = reg->smax_value + reg->off;
3198 err = __check_stack_boundary(env, regno, min_off, access_size,
3199 zero_size_allowed);
3200 if (err) {
3201 verbose(env, "R%d min value is outside of stack bound\n",
3202 regno);
3203 return err;
3205 err = __check_stack_boundary(env, regno, max_off, access_size,
3206 zero_size_allowed);
3207 if (err) {
3208 verbose(env, "R%d max value is outside of stack bound\n",
3209 regno);
3210 return err;
3214 if (meta && meta->raw_mode) {
3215 meta->access_size = access_size;
3216 meta->regno = regno;
3217 return 0;
3220 for (i = min_off; i < max_off + access_size; i++) {
3221 u8 *stype;
3223 slot = -i - 1;
3224 spi = slot / BPF_REG_SIZE;
3225 if (state->allocated_stack <= slot)
3226 goto err;
3227 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3228 if (*stype == STACK_MISC)
3229 goto mark;
3230 if (*stype == STACK_ZERO) {
3231 /* helper can write anything into the stack */
3232 *stype = STACK_MISC;
3233 goto mark;
3235 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3236 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
3237 __mark_reg_unknown(&state->stack[spi].spilled_ptr);
3238 for (j = 0; j < BPF_REG_SIZE; j++)
3239 state->stack[spi].slot_type[j] = STACK_MISC;
3240 goto mark;
3243 err:
3244 if (tnum_is_const(reg->var_off)) {
3245 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3246 min_off, i - min_off, access_size);
3247 } else {
3248 char tn_buf[48];
3250 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3251 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3252 tn_buf, i - min_off, access_size);
3254 return -EACCES;
3255 mark:
3256 /* reading any byte out of 8-byte 'spill_slot' will cause
3257 * the whole slot to be marked as 'read'
3259 mark_reg_read(env, &state->stack[spi].spilled_ptr,
3260 state->stack[spi].spilled_ptr.parent,
3261 REG_LIVE_READ64);
3263 return update_stack_depth(env, state, min_off);
3266 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3267 int access_size, bool zero_size_allowed,
3268 struct bpf_call_arg_meta *meta)
3270 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3272 switch (reg->type) {
3273 case PTR_TO_PACKET:
3274 case PTR_TO_PACKET_META:
3275 return check_packet_access(env, regno, reg->off, access_size,
3276 zero_size_allowed);
3277 case PTR_TO_MAP_VALUE:
3278 if (check_map_access_type(env, regno, reg->off, access_size,
3279 meta && meta->raw_mode ? BPF_WRITE :
3280 BPF_READ))
3281 return -EACCES;
3282 return check_map_access(env, regno, reg->off, access_size,
3283 zero_size_allowed);
3284 default: /* scalar_value|ptr_to_stack or invalid ptr */
3285 return check_stack_boundary(env, regno, access_size,
3286 zero_size_allowed, meta);
3290 /* Implementation details:
3291 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3292 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3293 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3294 * value_or_null->value transition, since the verifier only cares about
3295 * the range of access to valid map value pointer and doesn't care about actual
3296 * address of the map element.
3297 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3298 * reg->id > 0 after value_or_null->value transition. By doing so
3299 * two bpf_map_lookups will be considered two different pointers that
3300 * point to different bpf_spin_locks.
3301 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3302 * dead-locks.
3303 * Since only one bpf_spin_lock is allowed the checks are simpler than
3304 * reg_is_refcounted() logic. The verifier needs to remember only
3305 * one spin_lock instead of array of acquired_refs.
3306 * cur_state->active_spin_lock remembers which map value element got locked
3307 * and clears it after bpf_spin_unlock.
3309 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3310 bool is_lock)
3312 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3313 struct bpf_verifier_state *cur = env->cur_state;
3314 bool is_const = tnum_is_const(reg->var_off);
3315 struct bpf_map *map = reg->map_ptr;
3316 u64 val = reg->var_off.value;
3318 if (reg->type != PTR_TO_MAP_VALUE) {
3319 verbose(env, "R%d is not a pointer to map_value\n", regno);
3320 return -EINVAL;
3322 if (!is_const) {
3323 verbose(env,
3324 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3325 regno);
3326 return -EINVAL;
3328 if (!map->btf) {
3329 verbose(env,
3330 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3331 map->name);
3332 return -EINVAL;
3334 if (!map_value_has_spin_lock(map)) {
3335 if (map->spin_lock_off == -E2BIG)
3336 verbose(env,
3337 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3338 map->name);
3339 else if (map->spin_lock_off == -ENOENT)
3340 verbose(env,
3341 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3342 map->name);
3343 else
3344 verbose(env,
3345 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3346 map->name);
3347 return -EINVAL;
3349 if (map->spin_lock_off != val + reg->off) {
3350 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3351 val + reg->off);
3352 return -EINVAL;
3354 if (is_lock) {
3355 if (cur->active_spin_lock) {
3356 verbose(env,
3357 "Locking two bpf_spin_locks are not allowed\n");
3358 return -EINVAL;
3360 cur->active_spin_lock = reg->id;
3361 } else {
3362 if (!cur->active_spin_lock) {
3363 verbose(env, "bpf_spin_unlock without taking a lock\n");
3364 return -EINVAL;
3366 if (cur->active_spin_lock != reg->id) {
3367 verbose(env, "bpf_spin_unlock of different lock\n");
3368 return -EINVAL;
3370 cur->active_spin_lock = 0;
3372 return 0;
3375 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3377 return type == ARG_PTR_TO_MEM ||
3378 type == ARG_PTR_TO_MEM_OR_NULL ||
3379 type == ARG_PTR_TO_UNINIT_MEM;
3382 static bool arg_type_is_mem_size(enum bpf_arg_type type)
3384 return type == ARG_CONST_SIZE ||
3385 type == ARG_CONST_SIZE_OR_ZERO;
3388 static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3390 return type == ARG_PTR_TO_INT ||
3391 type == ARG_PTR_TO_LONG;
3394 static int int_ptr_type_to_size(enum bpf_arg_type type)
3396 if (type == ARG_PTR_TO_INT)
3397 return sizeof(u32);
3398 else if (type == ARG_PTR_TO_LONG)
3399 return sizeof(u64);
3401 return -EINVAL;
3404 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
3405 enum bpf_arg_type arg_type,
3406 struct bpf_call_arg_meta *meta)
3408 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3409 enum bpf_reg_type expected_type, type = reg->type;
3410 int err = 0;
3412 if (arg_type == ARG_DONTCARE)
3413 return 0;
3415 err = check_reg_arg(env, regno, SRC_OP);
3416 if (err)
3417 return err;
3419 if (arg_type == ARG_ANYTHING) {
3420 if (is_pointer_value(env, regno)) {
3421 verbose(env, "R%d leaks addr into helper function\n",
3422 regno);
3423 return -EACCES;
3425 return 0;
3428 if (type_is_pkt_pointer(type) &&
3429 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
3430 verbose(env, "helper access to the packet is not allowed\n");
3431 return -EACCES;
3434 if (arg_type == ARG_PTR_TO_MAP_KEY ||
3435 arg_type == ARG_PTR_TO_MAP_VALUE ||
3436 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3437 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
3438 expected_type = PTR_TO_STACK;
3439 if (register_is_null(reg) &&
3440 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3441 /* final test in check_stack_boundary() */;
3442 else if (!type_is_pkt_pointer(type) &&
3443 type != PTR_TO_MAP_VALUE &&
3444 type != expected_type)
3445 goto err_type;
3446 } else if (arg_type == ARG_CONST_SIZE ||
3447 arg_type == ARG_CONST_SIZE_OR_ZERO) {
3448 expected_type = SCALAR_VALUE;
3449 if (type != expected_type)
3450 goto err_type;
3451 } else if (arg_type == ARG_CONST_MAP_PTR) {
3452 expected_type = CONST_PTR_TO_MAP;
3453 if (type != expected_type)
3454 goto err_type;
3455 } else if (arg_type == ARG_PTR_TO_CTX) {
3456 expected_type = PTR_TO_CTX;
3457 if (type != expected_type)
3458 goto err_type;
3459 err = check_ctx_reg(env, reg, regno);
3460 if (err < 0)
3461 return err;
3462 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3463 expected_type = PTR_TO_SOCK_COMMON;
3464 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3465 if (!type_is_sk_pointer(type))
3466 goto err_type;
3467 if (reg->ref_obj_id) {
3468 if (meta->ref_obj_id) {
3469 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3470 regno, reg->ref_obj_id,
3471 meta->ref_obj_id);
3472 return -EFAULT;
3474 meta->ref_obj_id = reg->ref_obj_id;
3476 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3477 expected_type = PTR_TO_SOCKET;
3478 if (type != expected_type)
3479 goto err_type;
3480 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3481 expected_type = PTR_TO_BTF_ID;
3482 if (type != expected_type)
3483 goto err_type;
3484 if (reg->btf_id != meta->btf_id) {
3485 verbose(env, "Helper has type %s got %s in R%d\n",
3486 kernel_type_name(meta->btf_id),
3487 kernel_type_name(reg->btf_id), regno);
3489 return -EACCES;
3491 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3492 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3493 regno);
3494 return -EACCES;
3496 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3497 if (meta->func_id == BPF_FUNC_spin_lock) {
3498 if (process_spin_lock(env, regno, true))
3499 return -EACCES;
3500 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3501 if (process_spin_lock(env, regno, false))
3502 return -EACCES;
3503 } else {
3504 verbose(env, "verifier internal error\n");
3505 return -EFAULT;
3507 } else if (arg_type_is_mem_ptr(arg_type)) {
3508 expected_type = PTR_TO_STACK;
3509 /* One exception here. In case function allows for NULL to be
3510 * passed in as argument, it's a SCALAR_VALUE type. Final test
3511 * happens during stack boundary checking.
3513 if (register_is_null(reg) &&
3514 arg_type == ARG_PTR_TO_MEM_OR_NULL)
3515 /* final test in check_stack_boundary() */;
3516 else if (!type_is_pkt_pointer(type) &&
3517 type != PTR_TO_MAP_VALUE &&
3518 type != expected_type)
3519 goto err_type;
3520 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
3521 } else if (arg_type_is_int_ptr(arg_type)) {
3522 expected_type = PTR_TO_STACK;
3523 if (!type_is_pkt_pointer(type) &&
3524 type != PTR_TO_MAP_VALUE &&
3525 type != expected_type)
3526 goto err_type;
3527 } else {
3528 verbose(env, "unsupported arg_type %d\n", arg_type);
3529 return -EFAULT;
3532 if (arg_type == ARG_CONST_MAP_PTR) {
3533 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3534 meta->map_ptr = reg->map_ptr;
3535 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3536 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3537 * check that [key, key + map->key_size) are within
3538 * stack limits and initialized
3540 if (!meta->map_ptr) {
3541 /* in function declaration map_ptr must come before
3542 * map_key, so that it's verified and known before
3543 * we have to check map_key here. Otherwise it means
3544 * that kernel subsystem misconfigured verifier
3546 verbose(env, "invalid map_ptr to access map->key\n");
3547 return -EACCES;
3549 err = check_helper_mem_access(env, regno,
3550 meta->map_ptr->key_size, false,
3551 NULL);
3552 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
3553 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3554 !register_is_null(reg)) ||
3555 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
3556 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3557 * check [value, value + map->value_size) validity
3559 if (!meta->map_ptr) {
3560 /* kernel subsystem misconfigured verifier */
3561 verbose(env, "invalid map_ptr to access map->value\n");
3562 return -EACCES;
3564 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
3565 err = check_helper_mem_access(env, regno,
3566 meta->map_ptr->value_size, false,
3567 meta);
3568 } else if (arg_type_is_mem_size(arg_type)) {
3569 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
3571 /* remember the mem_size which may be used later
3572 * to refine return values.
3574 meta->msize_smax_value = reg->smax_value;
3575 meta->msize_umax_value = reg->umax_value;
3577 /* The register is SCALAR_VALUE; the access check
3578 * happens using its boundaries.
3580 if (!tnum_is_const(reg->var_off))
3581 /* For unprivileged variable accesses, disable raw
3582 * mode so that the program is required to
3583 * initialize all the memory that the helper could
3584 * just partially fill up.
3586 meta = NULL;
3588 if (reg->smin_value < 0) {
3589 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
3590 regno);
3591 return -EACCES;
3594 if (reg->umin_value == 0) {
3595 err = check_helper_mem_access(env, regno - 1, 0,
3596 zero_size_allowed,
3597 meta);
3598 if (err)
3599 return err;
3602 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
3603 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
3604 regno);
3605 return -EACCES;
3607 err = check_helper_mem_access(env, regno - 1,
3608 reg->umax_value,
3609 zero_size_allowed, meta);
3610 if (!err)
3611 err = mark_chain_precision(env, regno);
3612 } else if (arg_type_is_int_ptr(arg_type)) {
3613 int size = int_ptr_type_to_size(arg_type);
3615 err = check_helper_mem_access(env, regno, size, false, meta);
3616 if (err)
3617 return err;
3618 err = check_ptr_alignment(env, reg, 0, size, true);
3621 return err;
3622 err_type:
3623 verbose(env, "R%d type=%s expected=%s\n", regno,
3624 reg_type_str[type], reg_type_str[expected_type]);
3625 return -EACCES;
3628 static int check_map_func_compatibility(struct bpf_verifier_env *env,
3629 struct bpf_map *map, int func_id)
3631 if (!map)
3632 return 0;
3634 /* We need a two way check, first is from map perspective ... */
3635 switch (map->map_type) {
3636 case BPF_MAP_TYPE_PROG_ARRAY:
3637 if (func_id != BPF_FUNC_tail_call)
3638 goto error;
3639 break;
3640 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3641 if (func_id != BPF_FUNC_perf_event_read &&
3642 func_id != BPF_FUNC_perf_event_output &&
3643 func_id != BPF_FUNC_skb_output &&
3644 func_id != BPF_FUNC_perf_event_read_value)
3645 goto error;
3646 break;
3647 case BPF_MAP_TYPE_STACK_TRACE:
3648 if (func_id != BPF_FUNC_get_stackid)
3649 goto error;
3650 break;
3651 case BPF_MAP_TYPE_CGROUP_ARRAY:
3652 if (func_id != BPF_FUNC_skb_under_cgroup &&
3653 func_id != BPF_FUNC_current_task_under_cgroup)
3654 goto error;
3655 break;
3656 case BPF_MAP_TYPE_CGROUP_STORAGE:
3657 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
3658 if (func_id != BPF_FUNC_get_local_storage)
3659 goto error;
3660 break;
3661 case BPF_MAP_TYPE_DEVMAP:
3662 case BPF_MAP_TYPE_DEVMAP_HASH:
3663 if (func_id != BPF_FUNC_redirect_map &&
3664 func_id != BPF_FUNC_map_lookup_elem)
3665 goto error;
3666 break;
3667 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3668 * appear.
3670 case BPF_MAP_TYPE_CPUMAP:
3671 if (func_id != BPF_FUNC_redirect_map)
3672 goto error;
3673 break;
3674 case BPF_MAP_TYPE_XSKMAP:
3675 if (func_id != BPF_FUNC_redirect_map &&
3676 func_id != BPF_FUNC_map_lookup_elem)
3677 goto error;
3678 break;
3679 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
3680 case BPF_MAP_TYPE_HASH_OF_MAPS:
3681 if (func_id != BPF_FUNC_map_lookup_elem)
3682 goto error;
3683 break;
3684 case BPF_MAP_TYPE_SOCKMAP:
3685 if (func_id != BPF_FUNC_sk_redirect_map &&
3686 func_id != BPF_FUNC_sock_map_update &&
3687 func_id != BPF_FUNC_map_delete_elem &&
3688 func_id != BPF_FUNC_msg_redirect_map)
3689 goto error;
3690 break;
3691 case BPF_MAP_TYPE_SOCKHASH:
3692 if (func_id != BPF_FUNC_sk_redirect_hash &&
3693 func_id != BPF_FUNC_sock_hash_update &&
3694 func_id != BPF_FUNC_map_delete_elem &&
3695 func_id != BPF_FUNC_msg_redirect_hash)
3696 goto error;
3697 break;
3698 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3699 if (func_id != BPF_FUNC_sk_select_reuseport)
3700 goto error;
3701 break;
3702 case BPF_MAP_TYPE_QUEUE:
3703 case BPF_MAP_TYPE_STACK:
3704 if (func_id != BPF_FUNC_map_peek_elem &&
3705 func_id != BPF_FUNC_map_pop_elem &&
3706 func_id != BPF_FUNC_map_push_elem)
3707 goto error;
3708 break;
3709 case BPF_MAP_TYPE_SK_STORAGE:
3710 if (func_id != BPF_FUNC_sk_storage_get &&
3711 func_id != BPF_FUNC_sk_storage_delete)
3712 goto error;
3713 break;
3714 default:
3715 break;
3718 /* ... and second from the function itself. */
3719 switch (func_id) {
3720 case BPF_FUNC_tail_call:
3721 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3722 goto error;
3723 if (env->subprog_cnt > 1) {
3724 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3725 return -EINVAL;
3727 break;
3728 case BPF_FUNC_perf_event_read:
3729 case BPF_FUNC_perf_event_output:
3730 case BPF_FUNC_perf_event_read_value:
3731 case BPF_FUNC_skb_output:
3732 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3733 goto error;
3734 break;
3735 case BPF_FUNC_get_stackid:
3736 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3737 goto error;
3738 break;
3739 case BPF_FUNC_current_task_under_cgroup:
3740 case BPF_FUNC_skb_under_cgroup:
3741 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3742 goto error;
3743 break;
3744 case BPF_FUNC_redirect_map:
3745 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
3746 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
3747 map->map_type != BPF_MAP_TYPE_CPUMAP &&
3748 map->map_type != BPF_MAP_TYPE_XSKMAP)
3749 goto error;
3750 break;
3751 case BPF_FUNC_sk_redirect_map:
3752 case BPF_FUNC_msg_redirect_map:
3753 case BPF_FUNC_sock_map_update:
3754 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3755 goto error;
3756 break;
3757 case BPF_FUNC_sk_redirect_hash:
3758 case BPF_FUNC_msg_redirect_hash:
3759 case BPF_FUNC_sock_hash_update:
3760 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
3761 goto error;
3762 break;
3763 case BPF_FUNC_get_local_storage:
3764 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3765 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
3766 goto error;
3767 break;
3768 case BPF_FUNC_sk_select_reuseport:
3769 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
3770 goto error;
3771 break;
3772 case BPF_FUNC_map_peek_elem:
3773 case BPF_FUNC_map_pop_elem:
3774 case BPF_FUNC_map_push_elem:
3775 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3776 map->map_type != BPF_MAP_TYPE_STACK)
3777 goto error;
3778 break;
3779 case BPF_FUNC_sk_storage_get:
3780 case BPF_FUNC_sk_storage_delete:
3781 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3782 goto error;
3783 break;
3784 default:
3785 break;
3788 return 0;
3789 error:
3790 verbose(env, "cannot pass map_type %d into func %s#%d\n",
3791 map->map_type, func_id_name(func_id), func_id);
3792 return -EINVAL;
3795 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
3797 int count = 0;
3799 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
3800 count++;
3801 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
3802 count++;
3803 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
3804 count++;
3805 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
3806 count++;
3807 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
3808 count++;
3810 /* We only support one arg being in raw mode at the moment,
3811 * which is sufficient for the helper functions we have
3812 * right now.
3814 return count <= 1;
3817 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3818 enum bpf_arg_type arg_next)
3820 return (arg_type_is_mem_ptr(arg_curr) &&
3821 !arg_type_is_mem_size(arg_next)) ||
3822 (!arg_type_is_mem_ptr(arg_curr) &&
3823 arg_type_is_mem_size(arg_next));
3826 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3828 /* bpf_xxx(..., buf, len) call will access 'len'
3829 * bytes from memory 'buf'. Both arg types need
3830 * to be paired, so make sure there's no buggy
3831 * helper function specification.
3833 if (arg_type_is_mem_size(fn->arg1_type) ||
3834 arg_type_is_mem_ptr(fn->arg5_type) ||
3835 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3836 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3837 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3838 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3839 return false;
3841 return true;
3844 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
3846 int count = 0;
3848 if (arg_type_may_be_refcounted(fn->arg1_type))
3849 count++;
3850 if (arg_type_may_be_refcounted(fn->arg2_type))
3851 count++;
3852 if (arg_type_may_be_refcounted(fn->arg3_type))
3853 count++;
3854 if (arg_type_may_be_refcounted(fn->arg4_type))
3855 count++;
3856 if (arg_type_may_be_refcounted(fn->arg5_type))
3857 count++;
3859 /* A reference acquiring function cannot acquire
3860 * another refcounted ptr.
3862 if (is_acquire_function(func_id) && count)
3863 return false;
3865 /* We only support one arg being unreferenced at the moment,
3866 * which is sufficient for the helper functions we have right now.
3868 return count <= 1;
3871 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
3873 return check_raw_mode_ok(fn) &&
3874 check_arg_pair_ok(fn) &&
3875 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
3878 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3879 * are now invalid, so turn them into unknown SCALAR_VALUE.
3881 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3882 struct bpf_func_state *state)
3884 struct bpf_reg_state *regs = state->regs, *reg;
3885 int i;
3887 for (i = 0; i < MAX_BPF_REG; i++)
3888 if (reg_is_pkt_pointer_any(&regs[i]))
3889 mark_reg_unknown(env, regs, i);
3891 bpf_for_each_spilled_reg(i, state, reg) {
3892 if (!reg)
3893 continue;
3894 if (reg_is_pkt_pointer_any(reg))
3895 __mark_reg_unknown(reg);
3899 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3901 struct bpf_verifier_state *vstate = env->cur_state;
3902 int i;
3904 for (i = 0; i <= vstate->curframe; i++)
3905 __clear_all_pkt_pointers(env, vstate->frame[i]);
3908 static void release_reg_references(struct bpf_verifier_env *env,
3909 struct bpf_func_state *state,
3910 int ref_obj_id)
3912 struct bpf_reg_state *regs = state->regs, *reg;
3913 int i;
3915 for (i = 0; i < MAX_BPF_REG; i++)
3916 if (regs[i].ref_obj_id == ref_obj_id)
3917 mark_reg_unknown(env, regs, i);
3919 bpf_for_each_spilled_reg(i, state, reg) {
3920 if (!reg)
3921 continue;
3922 if (reg->ref_obj_id == ref_obj_id)
3923 __mark_reg_unknown(reg);
3927 /* The pointer with the specified id has released its reference to kernel
3928 * resources. Identify all copies of the same pointer and clear the reference.
3930 static int release_reference(struct bpf_verifier_env *env,
3931 int ref_obj_id)
3933 struct bpf_verifier_state *vstate = env->cur_state;
3934 int err;
3935 int i;
3937 err = release_reference_state(cur_func(env), ref_obj_id);
3938 if (err)
3939 return err;
3941 for (i = 0; i <= vstate->curframe; i++)
3942 release_reg_references(env, vstate->frame[i], ref_obj_id);
3944 return 0;
3947 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3948 int *insn_idx)
3950 struct bpf_verifier_state *state = env->cur_state;
3951 struct bpf_func_state *caller, *callee;
3952 int i, err, subprog, target_insn;
3954 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
3955 verbose(env, "the call stack of %d frames is too deep\n",
3956 state->curframe + 2);
3957 return -E2BIG;
3960 target_insn = *insn_idx + insn->imm;
3961 subprog = find_subprog(env, target_insn + 1);
3962 if (subprog < 0) {
3963 verbose(env, "verifier bug. No program starts at insn %d\n",
3964 target_insn + 1);
3965 return -EFAULT;
3968 caller = state->frame[state->curframe];
3969 if (state->frame[state->curframe + 1]) {
3970 verbose(env, "verifier bug. Frame %d already allocated\n",
3971 state->curframe + 1);
3972 return -EFAULT;
3975 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
3976 if (!callee)
3977 return -ENOMEM;
3978 state->frame[state->curframe + 1] = callee;
3980 /* callee cannot access r0, r6 - r9 for reading and has to write
3981 * into its own stack before reading from it.
3982 * callee can read/write into caller's stack
3984 init_func_state(env, callee,
3985 /* remember the callsite, it will be used by bpf_exit */
3986 *insn_idx /* callsite */,
3987 state->curframe + 1 /* frameno within this callchain */,
3988 subprog /* subprog number within this prog */);
3990 /* Transfer references to the callee */
3991 err = transfer_reference_state(callee, caller);
3992 if (err)
3993 return err;
3995 /* copy r1 - r5 args that callee can access. The copy includes parent
3996 * pointers, which connects us up to the liveness chain
3998 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3999 callee->regs[i] = caller->regs[i];
4001 /* after the call registers r0 - r5 were scratched */
4002 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4003 mark_reg_not_init(env, caller->regs, caller_saved[i]);
4004 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4007 /* only increment it after check_reg_arg() finished */
4008 state->curframe++;
4010 if (btf_check_func_arg_match(env, subprog))
4011 return -EINVAL;
4013 /* and go analyze first insn of the callee */
4014 *insn_idx = target_insn;
4016 if (env->log.level & BPF_LOG_LEVEL) {
4017 verbose(env, "caller:\n");
4018 print_verifier_state(env, caller);
4019 verbose(env, "callee:\n");
4020 print_verifier_state(env, callee);
4022 return 0;
4025 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4027 struct bpf_verifier_state *state = env->cur_state;
4028 struct bpf_func_state *caller, *callee;
4029 struct bpf_reg_state *r0;
4030 int err;
4032 callee = state->frame[state->curframe];
4033 r0 = &callee->regs[BPF_REG_0];
4034 if (r0->type == PTR_TO_STACK) {
4035 /* technically it's ok to return caller's stack pointer
4036 * (or caller's caller's pointer) back to the caller,
4037 * since these pointers are valid. Only current stack
4038 * pointer will be invalid as soon as function exits,
4039 * but let's be conservative
4041 verbose(env, "cannot return stack pointer to the caller\n");
4042 return -EINVAL;
4045 state->curframe--;
4046 caller = state->frame[state->curframe];
4047 /* return to the caller whatever r0 had in the callee */
4048 caller->regs[BPF_REG_0] = *r0;
4050 /* Transfer references to the caller */
4051 err = transfer_reference_state(caller, callee);
4052 if (err)
4053 return err;
4055 *insn_idx = callee->callsite + 1;
4056 if (env->log.level & BPF_LOG_LEVEL) {
4057 verbose(env, "returning from callee:\n");
4058 print_verifier_state(env, callee);
4059 verbose(env, "to caller at %d:\n", *insn_idx);
4060 print_verifier_state(env, caller);
4062 /* clear everything in the callee */
4063 free_func_state(callee);
4064 state->frame[state->curframe + 1] = NULL;
4065 return 0;
4068 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4069 int func_id,
4070 struct bpf_call_arg_meta *meta)
4072 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4074 if (ret_type != RET_INTEGER ||
4075 (func_id != BPF_FUNC_get_stack &&
4076 func_id != BPF_FUNC_probe_read_str))
4077 return;
4079 ret_reg->smax_value = meta->msize_smax_value;
4080 ret_reg->umax_value = meta->msize_umax_value;
4081 __reg_deduce_bounds(ret_reg);
4082 __reg_bound_offset(ret_reg);
4085 static int
4086 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4087 int func_id, int insn_idx)
4089 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4090 struct bpf_map *map = meta->map_ptr;
4092 if (func_id != BPF_FUNC_tail_call &&
4093 func_id != BPF_FUNC_map_lookup_elem &&
4094 func_id != BPF_FUNC_map_update_elem &&
4095 func_id != BPF_FUNC_map_delete_elem &&
4096 func_id != BPF_FUNC_map_push_elem &&
4097 func_id != BPF_FUNC_map_pop_elem &&
4098 func_id != BPF_FUNC_map_peek_elem)
4099 return 0;
4101 if (map == NULL) {
4102 verbose(env, "kernel subsystem misconfigured verifier\n");
4103 return -EINVAL;
4106 /* In case of read-only, some additional restrictions
4107 * need to be applied in order to prevent altering the
4108 * state of the map from program side.
4110 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4111 (func_id == BPF_FUNC_map_delete_elem ||
4112 func_id == BPF_FUNC_map_update_elem ||
4113 func_id == BPF_FUNC_map_push_elem ||
4114 func_id == BPF_FUNC_map_pop_elem)) {
4115 verbose(env, "write into map forbidden\n");
4116 return -EACCES;
4119 if (!BPF_MAP_PTR(aux->map_ptr_state))
4120 bpf_map_ptr_store(aux, meta->map_ptr,
4121 meta->map_ptr->unpriv_array);
4122 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
4123 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
4124 meta->map_ptr->unpriv_array);
4125 return 0;
4128 static int
4129 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4130 int func_id, int insn_idx)
4132 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4133 struct bpf_reg_state *regs = cur_regs(env), *reg;
4134 struct bpf_map *map = meta->map_ptr;
4135 struct tnum range;
4136 u64 val;
4138 if (func_id != BPF_FUNC_tail_call)
4139 return 0;
4140 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4141 verbose(env, "kernel subsystem misconfigured verifier\n");
4142 return -EINVAL;
4145 range = tnum_range(0, map->max_entries - 1);
4146 reg = &regs[BPF_REG_3];
4148 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4149 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4150 return 0;
4153 val = reg->var_off.value;
4154 if (bpf_map_key_unseen(aux))
4155 bpf_map_key_store(aux, val);
4156 else if (!bpf_map_key_poisoned(aux) &&
4157 bpf_map_key_immediate(aux) != val)
4158 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4159 return 0;
4162 static int check_reference_leak(struct bpf_verifier_env *env)
4164 struct bpf_func_state *state = cur_func(env);
4165 int i;
4167 for (i = 0; i < state->acquired_refs; i++) {
4168 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4169 state->refs[i].id, state->refs[i].insn_idx);
4171 return state->acquired_refs ? -EINVAL : 0;
4174 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
4176 const struct bpf_func_proto *fn = NULL;
4177 struct bpf_reg_state *regs;
4178 struct bpf_call_arg_meta meta;
4179 bool changes_data;
4180 int i, err;
4182 /* find function prototype */
4183 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
4184 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4185 func_id);
4186 return -EINVAL;
4189 if (env->ops->get_func_proto)
4190 fn = env->ops->get_func_proto(func_id, env->prog);
4191 if (!fn) {
4192 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4193 func_id);
4194 return -EINVAL;
4197 /* eBPF programs must be GPL compatible to use GPL-ed functions */
4198 if (!env->prog->gpl_compatible && fn->gpl_only) {
4199 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
4200 return -EINVAL;
4203 /* With LD_ABS/IND some JITs save/restore skb from r1. */
4204 changes_data = bpf_helper_changes_pkt_data(fn->func);
4205 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4206 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4207 func_id_name(func_id), func_id);
4208 return -EINVAL;
4211 memset(&meta, 0, sizeof(meta));
4212 meta.pkt_access = fn->pkt_access;
4214 err = check_func_proto(fn, func_id);
4215 if (err) {
4216 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
4217 func_id_name(func_id), func_id);
4218 return err;
4221 meta.func_id = func_id;
4222 /* check args */
4223 for (i = 0; i < 5; i++) {
4224 err = btf_resolve_helper_id(&env->log, fn, i);
4225 if (err > 0)
4226 meta.btf_id = err;
4227 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4228 if (err)
4229 return err;
4232 err = record_func_map(env, &meta, func_id, insn_idx);
4233 if (err)
4234 return err;
4236 err = record_func_key(env, &meta, func_id, insn_idx);
4237 if (err)
4238 return err;
4240 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4241 * is inferred from register state.
4243 for (i = 0; i < meta.access_size; i++) {
4244 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4245 BPF_WRITE, -1, false);
4246 if (err)
4247 return err;
4250 if (func_id == BPF_FUNC_tail_call) {
4251 err = check_reference_leak(env);
4252 if (err) {
4253 verbose(env, "tail_call would lead to reference leak\n");
4254 return err;
4256 } else if (is_release_function(func_id)) {
4257 err = release_reference(env, meta.ref_obj_id);
4258 if (err) {
4259 verbose(env, "func %s#%d reference has not been acquired before\n",
4260 func_id_name(func_id), func_id);
4261 return err;
4265 regs = cur_regs(env);
4267 /* check that flags argument in get_local_storage(map, flags) is 0,
4268 * this is required because get_local_storage() can't return an error.
4270 if (func_id == BPF_FUNC_get_local_storage &&
4271 !register_is_null(&regs[BPF_REG_2])) {
4272 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4273 return -EINVAL;
4276 /* reset caller saved regs */
4277 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4278 mark_reg_not_init(env, regs, caller_saved[i]);
4279 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4282 /* helper call returns 64-bit value. */
4283 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4285 /* update return register (already marked as written above) */
4286 if (fn->ret_type == RET_INTEGER) {
4287 /* sets type to SCALAR_VALUE */
4288 mark_reg_unknown(env, regs, BPF_REG_0);
4289 } else if (fn->ret_type == RET_VOID) {
4290 regs[BPF_REG_0].type = NOT_INIT;
4291 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4292 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4293 /* There is no offset yet applied, variable or fixed */
4294 mark_reg_known_zero(env, regs, BPF_REG_0);
4295 /* remember map_ptr, so that check_map_access()
4296 * can check 'value_size' boundary of memory access
4297 * to map element returned from bpf_map_lookup_elem()
4299 if (meta.map_ptr == NULL) {
4300 verbose(env,
4301 "kernel subsystem misconfigured verifier\n");
4302 return -EINVAL;
4304 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4305 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4306 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
4307 if (map_value_has_spin_lock(meta.map_ptr))
4308 regs[BPF_REG_0].id = ++env->id_gen;
4309 } else {
4310 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4311 regs[BPF_REG_0].id = ++env->id_gen;
4313 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4314 mark_reg_known_zero(env, regs, BPF_REG_0);
4315 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
4316 regs[BPF_REG_0].id = ++env->id_gen;
4317 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4318 mark_reg_known_zero(env, regs, BPF_REG_0);
4319 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4320 regs[BPF_REG_0].id = ++env->id_gen;
4321 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4322 mark_reg_known_zero(env, regs, BPF_REG_0);
4323 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4324 regs[BPF_REG_0].id = ++env->id_gen;
4325 } else {
4326 verbose(env, "unknown return type %d of func %s#%d\n",
4327 fn->ret_type, func_id_name(func_id), func_id);
4328 return -EINVAL;
4331 if (is_ptr_cast_function(func_id)) {
4332 /* For release_reference() */
4333 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
4334 } else if (is_acquire_function(func_id)) {
4335 int id = acquire_reference_state(env, insn_idx);
4337 if (id < 0)
4338 return id;
4339 /* For mark_ptr_or_null_reg() */
4340 regs[BPF_REG_0].id = id;
4341 /* For release_reference() */
4342 regs[BPF_REG_0].ref_obj_id = id;
4345 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4347 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
4348 if (err)
4349 return err;
4351 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4352 const char *err_str;
4354 #ifdef CONFIG_PERF_EVENTS
4355 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4356 err_str = "cannot get callchain buffer for func %s#%d\n";
4357 #else
4358 err = -ENOTSUPP;
4359 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4360 #endif
4361 if (err) {
4362 verbose(env, err_str, func_id_name(func_id), func_id);
4363 return err;
4366 env->prog->has_callchain_buf = true;
4369 if (changes_data)
4370 clear_all_pkt_pointers(env);
4371 return 0;
4374 static bool signed_add_overflows(s64 a, s64 b)
4376 /* Do the add in u64, where overflow is well-defined */
4377 s64 res = (s64)((u64)a + (u64)b);
4379 if (b < 0)
4380 return res > a;
4381 return res < a;
4384 static bool signed_sub_overflows(s64 a, s64 b)
4386 /* Do the sub in u64, where overflow is well-defined */
4387 s64 res = (s64)((u64)a - (u64)b);
4389 if (b < 0)
4390 return res < a;
4391 return res > a;
4394 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4395 const struct bpf_reg_state *reg,
4396 enum bpf_reg_type type)
4398 bool known = tnum_is_const(reg->var_off);
4399 s64 val = reg->var_off.value;
4400 s64 smin = reg->smin_value;
4402 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4403 verbose(env, "math between %s pointer and %lld is not allowed\n",
4404 reg_type_str[type], val);
4405 return false;
4408 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4409 verbose(env, "%s pointer offset %d is not allowed\n",
4410 reg_type_str[type], reg->off);
4411 return false;
4414 if (smin == S64_MIN) {
4415 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4416 reg_type_str[type]);
4417 return false;
4420 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4421 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4422 smin, reg_type_str[type]);
4423 return false;
4426 return true;
4429 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4431 return &env->insn_aux_data[env->insn_idx];
4434 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4435 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4437 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4438 (opcode == BPF_SUB && !off_is_neg);
4439 u32 off;
4441 switch (ptr_reg->type) {
4442 case PTR_TO_STACK:
4443 /* Indirect variable offset stack access is prohibited in
4444 * unprivileged mode so it's not handled here.
4446 off = ptr_reg->off + ptr_reg->var_off.value;
4447 if (mask_to_left)
4448 *ptr_limit = MAX_BPF_STACK + off;
4449 else
4450 *ptr_limit = -off;
4451 return 0;
4452 case PTR_TO_MAP_VALUE:
4453 if (mask_to_left) {
4454 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4455 } else {
4456 off = ptr_reg->smin_value + ptr_reg->off;
4457 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4459 return 0;
4460 default:
4461 return -EINVAL;
4465 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4466 const struct bpf_insn *insn)
4468 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4471 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4472 u32 alu_state, u32 alu_limit)
4474 /* If we arrived here from different branches with different
4475 * state or limits to sanitize, then this won't work.
4477 if (aux->alu_state &&
4478 (aux->alu_state != alu_state ||
4479 aux->alu_limit != alu_limit))
4480 return -EACCES;
4482 /* Corresponding fixup done in fixup_bpf_calls(). */
4483 aux->alu_state = alu_state;
4484 aux->alu_limit = alu_limit;
4485 return 0;
4488 static int sanitize_val_alu(struct bpf_verifier_env *env,
4489 struct bpf_insn *insn)
4491 struct bpf_insn_aux_data *aux = cur_aux(env);
4493 if (can_skip_alu_sanitation(env, insn))
4494 return 0;
4496 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4499 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4500 struct bpf_insn *insn,
4501 const struct bpf_reg_state *ptr_reg,
4502 struct bpf_reg_state *dst_reg,
4503 bool off_is_neg)
4505 struct bpf_verifier_state *vstate = env->cur_state;
4506 struct bpf_insn_aux_data *aux = cur_aux(env);
4507 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4508 u8 opcode = BPF_OP(insn->code);
4509 u32 alu_state, alu_limit;
4510 struct bpf_reg_state tmp;
4511 bool ret;
4513 if (can_skip_alu_sanitation(env, insn))
4514 return 0;
4516 /* We already marked aux for masking from non-speculative
4517 * paths, thus we got here in the first place. We only care
4518 * to explore bad access from here.
4520 if (vstate->speculative)
4521 goto do_sim;
4523 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4524 alu_state |= ptr_is_dst_reg ?
4525 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4527 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4528 return 0;
4529 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
4530 return -EACCES;
4531 do_sim:
4532 /* Simulate and find potential out-of-bounds access under
4533 * speculative execution from truncation as a result of
4534 * masking when off was not within expected range. If off
4535 * sits in dst, then we temporarily need to move ptr there
4536 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4537 * for cases where we use K-based arithmetic in one direction
4538 * and truncated reg-based in the other in order to explore
4539 * bad access.
4541 if (!ptr_is_dst_reg) {
4542 tmp = *dst_reg;
4543 *dst_reg = *ptr_reg;
4545 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
4546 if (!ptr_is_dst_reg && ret)
4547 *dst_reg = tmp;
4548 return !ret ? -EFAULT : 0;
4551 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
4552 * Caller should also handle BPF_MOV case separately.
4553 * If we return -EACCES, caller may want to try again treating pointer as a
4554 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4556 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4557 struct bpf_insn *insn,
4558 const struct bpf_reg_state *ptr_reg,
4559 const struct bpf_reg_state *off_reg)
4561 struct bpf_verifier_state *vstate = env->cur_state;
4562 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4563 struct bpf_reg_state *regs = state->regs, *dst_reg;
4564 bool known = tnum_is_const(off_reg->var_off);
4565 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4566 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4567 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4568 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
4569 u32 dst = insn->dst_reg, src = insn->src_reg;
4570 u8 opcode = BPF_OP(insn->code);
4571 int ret;
4573 dst_reg = &regs[dst];
4575 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4576 smin_val > smax_val || umin_val > umax_val) {
4577 /* Taint dst register if offset had invalid bounds derived from
4578 * e.g. dead branches.
4580 __mark_reg_unknown(dst_reg);
4581 return 0;
4584 if (BPF_CLASS(insn->code) != BPF_ALU64) {
4585 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
4586 verbose(env,
4587 "R%d 32-bit pointer arithmetic prohibited\n",
4588 dst);
4589 return -EACCES;
4592 switch (ptr_reg->type) {
4593 case PTR_TO_MAP_VALUE_OR_NULL:
4594 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4595 dst, reg_type_str[ptr_reg->type]);
4596 return -EACCES;
4597 case CONST_PTR_TO_MAP:
4598 case PTR_TO_PACKET_END:
4599 case PTR_TO_SOCKET:
4600 case PTR_TO_SOCKET_OR_NULL:
4601 case PTR_TO_SOCK_COMMON:
4602 case PTR_TO_SOCK_COMMON_OR_NULL:
4603 case PTR_TO_TCP_SOCK:
4604 case PTR_TO_TCP_SOCK_OR_NULL:
4605 case PTR_TO_XDP_SOCK:
4606 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4607 dst, reg_type_str[ptr_reg->type]);
4608 return -EACCES;
4609 case PTR_TO_MAP_VALUE:
4610 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4611 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4612 off_reg == dst_reg ? dst : src);
4613 return -EACCES;
4615 /* fall-through */
4616 default:
4617 break;
4620 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4621 * The id may be overwritten later if we create a new variable offset.
4623 dst_reg->type = ptr_reg->type;
4624 dst_reg->id = ptr_reg->id;
4626 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4627 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4628 return -EINVAL;
4630 switch (opcode) {
4631 case BPF_ADD:
4632 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4633 if (ret < 0) {
4634 verbose(env, "R%d tried to add from different maps or paths\n", dst);
4635 return ret;
4637 /* We can take a fixed offset as long as it doesn't overflow
4638 * the s32 'off' field
4640 if (known && (ptr_reg->off + smin_val ==
4641 (s64)(s32)(ptr_reg->off + smin_val))) {
4642 /* pointer += K. Accumulate it into fixed offset */
4643 dst_reg->smin_value = smin_ptr;
4644 dst_reg->smax_value = smax_ptr;
4645 dst_reg->umin_value = umin_ptr;
4646 dst_reg->umax_value = umax_ptr;
4647 dst_reg->var_off = ptr_reg->var_off;
4648 dst_reg->off = ptr_reg->off + smin_val;
4649 dst_reg->raw = ptr_reg->raw;
4650 break;
4652 /* A new variable offset is created. Note that off_reg->off
4653 * == 0, since it's a scalar.
4654 * dst_reg gets the pointer type and since some positive
4655 * integer value was added to the pointer, give it a new 'id'
4656 * if it's a PTR_TO_PACKET.
4657 * this creates a new 'base' pointer, off_reg (variable) gets
4658 * added into the variable offset, and we copy the fixed offset
4659 * from ptr_reg.
4661 if (signed_add_overflows(smin_ptr, smin_val) ||
4662 signed_add_overflows(smax_ptr, smax_val)) {
4663 dst_reg->smin_value = S64_MIN;
4664 dst_reg->smax_value = S64_MAX;
4665 } else {
4666 dst_reg->smin_value = smin_ptr + smin_val;
4667 dst_reg->smax_value = smax_ptr + smax_val;
4669 if (umin_ptr + umin_val < umin_ptr ||
4670 umax_ptr + umax_val < umax_ptr) {
4671 dst_reg->umin_value = 0;
4672 dst_reg->umax_value = U64_MAX;
4673 } else {
4674 dst_reg->umin_value = umin_ptr + umin_val;
4675 dst_reg->umax_value = umax_ptr + umax_val;
4677 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4678 dst_reg->off = ptr_reg->off;
4679 dst_reg->raw = ptr_reg->raw;
4680 if (reg_is_pkt_pointer(ptr_reg)) {
4681 dst_reg->id = ++env->id_gen;
4682 /* something was added to pkt_ptr, set range to zero */
4683 dst_reg->raw = 0;
4685 break;
4686 case BPF_SUB:
4687 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4688 if (ret < 0) {
4689 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4690 return ret;
4692 if (dst_reg == off_reg) {
4693 /* scalar -= pointer. Creates an unknown scalar */
4694 verbose(env, "R%d tried to subtract pointer from scalar\n",
4695 dst);
4696 return -EACCES;
4698 /* We don't allow subtraction from FP, because (according to
4699 * test_verifier.c test "invalid fp arithmetic", JITs might not
4700 * be able to deal with it.
4702 if (ptr_reg->type == PTR_TO_STACK) {
4703 verbose(env, "R%d subtraction from stack pointer prohibited\n",
4704 dst);
4705 return -EACCES;
4707 if (known && (ptr_reg->off - smin_val ==
4708 (s64)(s32)(ptr_reg->off - smin_val))) {
4709 /* pointer -= K. Subtract it from fixed offset */
4710 dst_reg->smin_value = smin_ptr;
4711 dst_reg->smax_value = smax_ptr;
4712 dst_reg->umin_value = umin_ptr;
4713 dst_reg->umax_value = umax_ptr;
4714 dst_reg->var_off = ptr_reg->var_off;
4715 dst_reg->id = ptr_reg->id;
4716 dst_reg->off = ptr_reg->off - smin_val;
4717 dst_reg->raw = ptr_reg->raw;
4718 break;
4720 /* A new variable offset is created. If the subtrahend is known
4721 * nonnegative, then any reg->range we had before is still good.
4723 if (signed_sub_overflows(smin_ptr, smax_val) ||
4724 signed_sub_overflows(smax_ptr, smin_val)) {
4725 /* Overflow possible, we know nothing */
4726 dst_reg->smin_value = S64_MIN;
4727 dst_reg->smax_value = S64_MAX;
4728 } else {
4729 dst_reg->smin_value = smin_ptr - smax_val;
4730 dst_reg->smax_value = smax_ptr - smin_val;
4732 if (umin_ptr < umax_val) {
4733 /* Overflow possible, we know nothing */
4734 dst_reg->umin_value = 0;
4735 dst_reg->umax_value = U64_MAX;
4736 } else {
4737 /* Cannot overflow (as long as bounds are consistent) */
4738 dst_reg->umin_value = umin_ptr - umax_val;
4739 dst_reg->umax_value = umax_ptr - umin_val;
4741 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4742 dst_reg->off = ptr_reg->off;
4743 dst_reg->raw = ptr_reg->raw;
4744 if (reg_is_pkt_pointer(ptr_reg)) {
4745 dst_reg->id = ++env->id_gen;
4746 /* something was added to pkt_ptr, set range to zero */
4747 if (smin_val < 0)
4748 dst_reg->raw = 0;
4750 break;
4751 case BPF_AND:
4752 case BPF_OR:
4753 case BPF_XOR:
4754 /* bitwise ops on pointers are troublesome, prohibit. */
4755 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4756 dst, bpf_alu_string[opcode >> 4]);
4757 return -EACCES;
4758 default:
4759 /* other operators (e.g. MUL,LSH) produce non-pointer results */
4760 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4761 dst, bpf_alu_string[opcode >> 4]);
4762 return -EACCES;
4765 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4766 return -EINVAL;
4768 __update_reg_bounds(dst_reg);
4769 __reg_deduce_bounds(dst_reg);
4770 __reg_bound_offset(dst_reg);
4772 /* For unprivileged we require that resulting offset must be in bounds
4773 * in order to be able to sanitize access later on.
4775 if (!env->allow_ptr_leaks) {
4776 if (dst_reg->type == PTR_TO_MAP_VALUE &&
4777 check_map_access(env, dst, dst_reg->off, 1, false)) {
4778 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4779 "prohibited for !root\n", dst);
4780 return -EACCES;
4781 } else if (dst_reg->type == PTR_TO_STACK &&
4782 check_stack_access(env, dst_reg, dst_reg->off +
4783 dst_reg->var_off.value, 1)) {
4784 verbose(env, "R%d stack pointer arithmetic goes out of range, "
4785 "prohibited for !root\n", dst);
4786 return -EACCES;
4790 return 0;
4793 /* WARNING: This function does calculations on 64-bit values, but the actual
4794 * execution may occur on 32-bit values. Therefore, things like bitshifts
4795 * need extra checks in the 32-bit case.
4797 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4798 struct bpf_insn *insn,
4799 struct bpf_reg_state *dst_reg,
4800 struct bpf_reg_state src_reg)
4802 struct bpf_reg_state *regs = cur_regs(env);
4803 u8 opcode = BPF_OP(insn->code);
4804 bool src_known, dst_known;
4805 s64 smin_val, smax_val;
4806 u64 umin_val, umax_val;
4807 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
4808 u32 dst = insn->dst_reg;
4809 int ret;
4811 if (insn_bitness == 32) {
4812 /* Relevant for 32-bit RSH: Information can propagate towards
4813 * LSB, so it isn't sufficient to only truncate the output to
4814 * 32 bits.
4816 coerce_reg_to_size(dst_reg, 4);
4817 coerce_reg_to_size(&src_reg, 4);
4820 smin_val = src_reg.smin_value;
4821 smax_val = src_reg.smax_value;
4822 umin_val = src_reg.umin_value;
4823 umax_val = src_reg.umax_value;
4824 src_known = tnum_is_const(src_reg.var_off);
4825 dst_known = tnum_is_const(dst_reg->var_off);
4827 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
4828 smin_val > smax_val || umin_val > umax_val) {
4829 /* Taint dst register if offset had invalid bounds derived from
4830 * e.g. dead branches.
4832 __mark_reg_unknown(dst_reg);
4833 return 0;
4836 if (!src_known &&
4837 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
4838 __mark_reg_unknown(dst_reg);
4839 return 0;
4842 switch (opcode) {
4843 case BPF_ADD:
4844 ret = sanitize_val_alu(env, insn);
4845 if (ret < 0) {
4846 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
4847 return ret;
4849 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4850 signed_add_overflows(dst_reg->smax_value, smax_val)) {
4851 dst_reg->smin_value = S64_MIN;
4852 dst_reg->smax_value = S64_MAX;
4853 } else {
4854 dst_reg->smin_value += smin_val;
4855 dst_reg->smax_value += smax_val;
4857 if (dst_reg->umin_value + umin_val < umin_val ||
4858 dst_reg->umax_value + umax_val < umax_val) {
4859 dst_reg->umin_value = 0;
4860 dst_reg->umax_value = U64_MAX;
4861 } else {
4862 dst_reg->umin_value += umin_val;
4863 dst_reg->umax_value += umax_val;
4865 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
4866 break;
4867 case BPF_SUB:
4868 ret = sanitize_val_alu(env, insn);
4869 if (ret < 0) {
4870 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
4871 return ret;
4873 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4874 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4875 /* Overflow possible, we know nothing */
4876 dst_reg->smin_value = S64_MIN;
4877 dst_reg->smax_value = S64_MAX;
4878 } else {
4879 dst_reg->smin_value -= smax_val;
4880 dst_reg->smax_value -= smin_val;
4882 if (dst_reg->umin_value < umax_val) {
4883 /* Overflow possible, we know nothing */
4884 dst_reg->umin_value = 0;
4885 dst_reg->umax_value = U64_MAX;
4886 } else {
4887 /* Cannot overflow (as long as bounds are consistent) */
4888 dst_reg->umin_value -= umax_val;
4889 dst_reg->umax_value -= umin_val;
4891 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
4892 break;
4893 case BPF_MUL:
4894 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
4895 if (smin_val < 0 || dst_reg->smin_value < 0) {
4896 /* Ain't nobody got time to multiply that sign */
4897 __mark_reg_unbounded(dst_reg);
4898 __update_reg_bounds(dst_reg);
4899 break;
4901 /* Both values are positive, so we can work with unsigned and
4902 * copy the result to signed (unless it exceeds S64_MAX).
4904 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4905 /* Potential overflow, we know nothing */
4906 __mark_reg_unbounded(dst_reg);
4907 /* (except what we can learn from the var_off) */
4908 __update_reg_bounds(dst_reg);
4909 break;
4911 dst_reg->umin_value *= umin_val;
4912 dst_reg->umax_value *= umax_val;
4913 if (dst_reg->umax_value > S64_MAX) {
4914 /* Overflow possible, we know nothing */
4915 dst_reg->smin_value = S64_MIN;
4916 dst_reg->smax_value = S64_MAX;
4917 } else {
4918 dst_reg->smin_value = dst_reg->umin_value;
4919 dst_reg->smax_value = dst_reg->umax_value;
4921 break;
4922 case BPF_AND:
4923 if (src_known && dst_known) {
4924 __mark_reg_known(dst_reg, dst_reg->var_off.value &
4925 src_reg.var_off.value);
4926 break;
4928 /* We get our minimum from the var_off, since that's inherently
4929 * bitwise. Our maximum is the minimum of the operands' maxima.
4931 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
4932 dst_reg->umin_value = dst_reg->var_off.value;
4933 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4934 if (dst_reg->smin_value < 0 || smin_val < 0) {
4935 /* Lose signed bounds when ANDing negative numbers,
4936 * ain't nobody got time for that.
4938 dst_reg->smin_value = S64_MIN;
4939 dst_reg->smax_value = S64_MAX;
4940 } else {
4941 /* ANDing two positives gives a positive, so safe to
4942 * cast result into s64.
4944 dst_reg->smin_value = dst_reg->umin_value;
4945 dst_reg->smax_value = dst_reg->umax_value;
4947 /* We may learn something more from the var_off */
4948 __update_reg_bounds(dst_reg);
4949 break;
4950 case BPF_OR:
4951 if (src_known && dst_known) {
4952 __mark_reg_known(dst_reg, dst_reg->var_off.value |
4953 src_reg.var_off.value);
4954 break;
4956 /* We get our maximum from the var_off, and our minimum is the
4957 * maximum of the operands' minima
4959 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
4960 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
4961 dst_reg->umax_value = dst_reg->var_off.value |
4962 dst_reg->var_off.mask;
4963 if (dst_reg->smin_value < 0 || smin_val < 0) {
4964 /* Lose signed bounds when ORing negative numbers,
4965 * ain't nobody got time for that.
4967 dst_reg->smin_value = S64_MIN;
4968 dst_reg->smax_value = S64_MAX;
4969 } else {
4970 /* ORing two positives gives a positive, so safe to
4971 * cast result into s64.
4973 dst_reg->smin_value = dst_reg->umin_value;
4974 dst_reg->smax_value = dst_reg->umax_value;
4976 /* We may learn something more from the var_off */
4977 __update_reg_bounds(dst_reg);
4978 break;
4979 case BPF_LSH:
4980 if (umax_val >= insn_bitness) {
4981 /* Shifts greater than 31 or 63 are undefined.
4982 * This includes shifts by a negative number.
4984 mark_reg_unknown(env, regs, insn->dst_reg);
4985 break;
4987 /* We lose all sign bit information (except what we can pick
4988 * up from var_off)
4990 dst_reg->smin_value = S64_MIN;
4991 dst_reg->smax_value = S64_MAX;
4992 /* If we might shift our top bit out, then we know nothing */
4993 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
4994 dst_reg->umin_value = 0;
4995 dst_reg->umax_value = U64_MAX;
4996 } else {
4997 dst_reg->umin_value <<= umin_val;
4998 dst_reg->umax_value <<= umax_val;
5000 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
5001 /* We may learn something more from the var_off */
5002 __update_reg_bounds(dst_reg);
5003 break;
5004 case BPF_RSH:
5005 if (umax_val >= insn_bitness) {
5006 /* Shifts greater than 31 or 63 are undefined.
5007 * This includes shifts by a negative number.
5009 mark_reg_unknown(env, regs, insn->dst_reg);
5010 break;
5012 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5013 * be negative, then either:
5014 * 1) src_reg might be zero, so the sign bit of the result is
5015 * unknown, so we lose our signed bounds
5016 * 2) it's known negative, thus the unsigned bounds capture the
5017 * signed bounds
5018 * 3) the signed bounds cross zero, so they tell us nothing
5019 * about the result
5020 * If the value in dst_reg is known nonnegative, then again the
5021 * unsigned bounts capture the signed bounds.
5022 * Thus, in all cases it suffices to blow away our signed bounds
5023 * and rely on inferring new ones from the unsigned bounds and
5024 * var_off of the result.
5026 dst_reg->smin_value = S64_MIN;
5027 dst_reg->smax_value = S64_MAX;
5028 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
5029 dst_reg->umin_value >>= umax_val;
5030 dst_reg->umax_value >>= umin_val;
5031 /* We may learn something more from the var_off */
5032 __update_reg_bounds(dst_reg);
5033 break;
5034 case BPF_ARSH:
5035 if (umax_val >= insn_bitness) {
5036 /* Shifts greater than 31 or 63 are undefined.
5037 * This includes shifts by a negative number.
5039 mark_reg_unknown(env, regs, insn->dst_reg);
5040 break;
5043 /* Upon reaching here, src_known is true and
5044 * umax_val is equal to umin_val.
5046 dst_reg->smin_value >>= umin_val;
5047 dst_reg->smax_value >>= umin_val;
5048 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
5050 /* blow away the dst_reg umin_value/umax_value and rely on
5051 * dst_reg var_off to refine the result.
5053 dst_reg->umin_value = 0;
5054 dst_reg->umax_value = U64_MAX;
5055 __update_reg_bounds(dst_reg);
5056 break;
5057 default:
5058 mark_reg_unknown(env, regs, insn->dst_reg);
5059 break;
5062 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5063 /* 32-bit ALU ops are (32,32)->32 */
5064 coerce_reg_to_size(dst_reg, 4);
5067 __reg_deduce_bounds(dst_reg);
5068 __reg_bound_offset(dst_reg);
5069 return 0;
5072 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5073 * and var_off.
5075 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5076 struct bpf_insn *insn)
5078 struct bpf_verifier_state *vstate = env->cur_state;
5079 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5080 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
5081 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5082 u8 opcode = BPF_OP(insn->code);
5083 int err;
5085 dst_reg = &regs[insn->dst_reg];
5086 src_reg = NULL;
5087 if (dst_reg->type != SCALAR_VALUE)
5088 ptr_reg = dst_reg;
5089 if (BPF_SRC(insn->code) == BPF_X) {
5090 src_reg = &regs[insn->src_reg];
5091 if (src_reg->type != SCALAR_VALUE) {
5092 if (dst_reg->type != SCALAR_VALUE) {
5093 /* Combining two pointers by any ALU op yields
5094 * an arbitrary scalar. Disallow all math except
5095 * pointer subtraction
5097 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
5098 mark_reg_unknown(env, regs, insn->dst_reg);
5099 return 0;
5101 verbose(env, "R%d pointer %s pointer prohibited\n",
5102 insn->dst_reg,
5103 bpf_alu_string[opcode >> 4]);
5104 return -EACCES;
5105 } else {
5106 /* scalar += pointer
5107 * This is legal, but we have to reverse our
5108 * src/dest handling in computing the range
5110 err = mark_chain_precision(env, insn->dst_reg);
5111 if (err)
5112 return err;
5113 return adjust_ptr_min_max_vals(env, insn,
5114 src_reg, dst_reg);
5116 } else if (ptr_reg) {
5117 /* pointer += scalar */
5118 err = mark_chain_precision(env, insn->src_reg);
5119 if (err)
5120 return err;
5121 return adjust_ptr_min_max_vals(env, insn,
5122 dst_reg, src_reg);
5124 } else {
5125 /* Pretend the src is a reg with a known value, since we only
5126 * need to be able to read from this state.
5128 off_reg.type = SCALAR_VALUE;
5129 __mark_reg_known(&off_reg, insn->imm);
5130 src_reg = &off_reg;
5131 if (ptr_reg) /* pointer += K */
5132 return adjust_ptr_min_max_vals(env, insn,
5133 ptr_reg, src_reg);
5136 /* Got here implies adding two SCALAR_VALUEs */
5137 if (WARN_ON_ONCE(ptr_reg)) {
5138 print_verifier_state(env, state);
5139 verbose(env, "verifier internal error: unexpected ptr_reg\n");
5140 return -EINVAL;
5142 if (WARN_ON(!src_reg)) {
5143 print_verifier_state(env, state);
5144 verbose(env, "verifier internal error: no src_reg\n");
5145 return -EINVAL;
5147 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
5150 /* check validity of 32-bit and 64-bit arithmetic operations */
5151 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
5153 struct bpf_reg_state *regs = cur_regs(env);
5154 u8 opcode = BPF_OP(insn->code);
5155 int err;
5157 if (opcode == BPF_END || opcode == BPF_NEG) {
5158 if (opcode == BPF_NEG) {
5159 if (BPF_SRC(insn->code) != 0 ||
5160 insn->src_reg != BPF_REG_0 ||
5161 insn->off != 0 || insn->imm != 0) {
5162 verbose(env, "BPF_NEG uses reserved fields\n");
5163 return -EINVAL;
5165 } else {
5166 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
5167 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
5168 BPF_CLASS(insn->code) == BPF_ALU64) {
5169 verbose(env, "BPF_END uses reserved fields\n");
5170 return -EINVAL;
5174 /* check src operand */
5175 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5176 if (err)
5177 return err;
5179 if (is_pointer_value(env, insn->dst_reg)) {
5180 verbose(env, "R%d pointer arithmetic prohibited\n",
5181 insn->dst_reg);
5182 return -EACCES;
5185 /* check dest operand */
5186 err = check_reg_arg(env, insn->dst_reg, DST_OP);
5187 if (err)
5188 return err;
5190 } else if (opcode == BPF_MOV) {
5192 if (BPF_SRC(insn->code) == BPF_X) {
5193 if (insn->imm != 0 || insn->off != 0) {
5194 verbose(env, "BPF_MOV uses reserved fields\n");
5195 return -EINVAL;
5198 /* check src operand */
5199 err = check_reg_arg(env, insn->src_reg, SRC_OP);
5200 if (err)
5201 return err;
5202 } else {
5203 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
5204 verbose(env, "BPF_MOV uses reserved fields\n");
5205 return -EINVAL;
5209 /* check dest operand, mark as required later */
5210 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5211 if (err)
5212 return err;
5214 if (BPF_SRC(insn->code) == BPF_X) {
5215 struct bpf_reg_state *src_reg = regs + insn->src_reg;
5216 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
5218 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5219 /* case: R1 = R2
5220 * copy register state to dest reg
5222 *dst_reg = *src_reg;
5223 dst_reg->live |= REG_LIVE_WRITTEN;
5224 dst_reg->subreg_def = DEF_NOT_SUBREG;
5225 } else {
5226 /* R1 = (u32) R2 */
5227 if (is_pointer_value(env, insn->src_reg)) {
5228 verbose(env,
5229 "R%d partial copy of pointer\n",
5230 insn->src_reg);
5231 return -EACCES;
5232 } else if (src_reg->type == SCALAR_VALUE) {
5233 *dst_reg = *src_reg;
5234 dst_reg->live |= REG_LIVE_WRITTEN;
5235 dst_reg->subreg_def = env->insn_idx + 1;
5236 } else {
5237 mark_reg_unknown(env, regs,
5238 insn->dst_reg);
5240 coerce_reg_to_size(dst_reg, 4);
5242 } else {
5243 /* case: R = imm
5244 * remember the value we stored into this reg
5246 /* clear any state __mark_reg_known doesn't set */
5247 mark_reg_unknown(env, regs, insn->dst_reg);
5248 regs[insn->dst_reg].type = SCALAR_VALUE;
5249 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5250 __mark_reg_known(regs + insn->dst_reg,
5251 insn->imm);
5252 } else {
5253 __mark_reg_known(regs + insn->dst_reg,
5254 (u32)insn->imm);
5258 } else if (opcode > BPF_END) {
5259 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
5260 return -EINVAL;
5262 } else { /* all other ALU ops: and, sub, xor, add, ... */
5264 if (BPF_SRC(insn->code) == BPF_X) {
5265 if (insn->imm != 0 || insn->off != 0) {
5266 verbose(env, "BPF_ALU uses reserved fields\n");
5267 return -EINVAL;
5269 /* check src1 operand */
5270 err = check_reg_arg(env, insn->src_reg, SRC_OP);
5271 if (err)
5272 return err;
5273 } else {
5274 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
5275 verbose(env, "BPF_ALU uses reserved fields\n");
5276 return -EINVAL;
5280 /* check src2 operand */
5281 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5282 if (err)
5283 return err;
5285 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5286 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
5287 verbose(env, "div by zero\n");
5288 return -EINVAL;
5291 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5292 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5293 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5295 if (insn->imm < 0 || insn->imm >= size) {
5296 verbose(env, "invalid shift %d\n", insn->imm);
5297 return -EINVAL;
5301 /* check dest operand */
5302 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5303 if (err)
5304 return err;
5306 return adjust_reg_min_max_vals(env, insn);
5309 return 0;
5312 static void __find_good_pkt_pointers(struct bpf_func_state *state,
5313 struct bpf_reg_state *dst_reg,
5314 enum bpf_reg_type type, u16 new_range)
5316 struct bpf_reg_state *reg;
5317 int i;
5319 for (i = 0; i < MAX_BPF_REG; i++) {
5320 reg = &state->regs[i];
5321 if (reg->type == type && reg->id == dst_reg->id)
5322 /* keep the maximum range already checked */
5323 reg->range = max(reg->range, new_range);
5326 bpf_for_each_spilled_reg(i, state, reg) {
5327 if (!reg)
5328 continue;
5329 if (reg->type == type && reg->id == dst_reg->id)
5330 reg->range = max(reg->range, new_range);
5334 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
5335 struct bpf_reg_state *dst_reg,
5336 enum bpf_reg_type type,
5337 bool range_right_open)
5339 u16 new_range;
5340 int i;
5342 if (dst_reg->off < 0 ||
5343 (dst_reg->off == 0 && range_right_open))
5344 /* This doesn't give us any range */
5345 return;
5347 if (dst_reg->umax_value > MAX_PACKET_OFF ||
5348 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
5349 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5350 * than pkt_end, but that's because it's also less than pkt.
5352 return;
5354 new_range = dst_reg->off;
5355 if (range_right_open)
5356 new_range--;
5358 /* Examples for register markings:
5360 * pkt_data in dst register:
5362 * r2 = r3;
5363 * r2 += 8;
5364 * if (r2 > pkt_end) goto <handle exception>
5365 * <access okay>
5367 * r2 = r3;
5368 * r2 += 8;
5369 * if (r2 < pkt_end) goto <access okay>
5370 * <handle exception>
5372 * Where:
5373 * r2 == dst_reg, pkt_end == src_reg
5374 * r2=pkt(id=n,off=8,r=0)
5375 * r3=pkt(id=n,off=0,r=0)
5377 * pkt_data in src register:
5379 * r2 = r3;
5380 * r2 += 8;
5381 * if (pkt_end >= r2) goto <access okay>
5382 * <handle exception>
5384 * r2 = r3;
5385 * r2 += 8;
5386 * if (pkt_end <= r2) goto <handle exception>
5387 * <access okay>
5389 * Where:
5390 * pkt_end == dst_reg, r2 == src_reg
5391 * r2=pkt(id=n,off=8,r=0)
5392 * r3=pkt(id=n,off=0,r=0)
5394 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
5395 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5396 * and [r3, r3 + 8-1) respectively is safe to access depending on
5397 * the check.
5400 /* If our ids match, then we must have the same max_value. And we
5401 * don't care about the other reg's fixed offset, since if it's too big
5402 * the range won't allow anything.
5403 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5405 for (i = 0; i <= vstate->curframe; i++)
5406 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5407 new_range);
5410 /* compute branch direction of the expression "if (reg opcode val) goto target;"
5411 * and return:
5412 * 1 - branch will be taken and "goto target" will be executed
5413 * 0 - branch will not be taken and fall-through to next insn
5414 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5416 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5417 bool is_jmp32)
5419 struct bpf_reg_state reg_lo;
5420 s64 sval;
5422 if (__is_pointer_value(false, reg))
5423 return -1;
5425 if (is_jmp32) {
5426 reg_lo = *reg;
5427 reg = &reg_lo;
5428 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5429 * could truncate high bits and update umin/umax according to
5430 * information of low bits.
5432 coerce_reg_to_size(reg, 4);
5433 /* smin/smax need special handling. For example, after coerce,
5434 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5435 * used as operand to JMP32. It is a negative number from s32's
5436 * point of view, while it is a positive number when seen as
5437 * s64. The smin/smax are kept as s64, therefore, when used with
5438 * JMP32, they need to be transformed into s32, then sign
5439 * extended back to s64.
5441 * Also, smin/smax were copied from umin/umax. If umin/umax has
5442 * different sign bit, then min/max relationship doesn't
5443 * maintain after casting into s32, for this case, set smin/smax
5444 * to safest range.
5446 if ((reg->umax_value ^ reg->umin_value) &
5447 (1ULL << 31)) {
5448 reg->smin_value = S32_MIN;
5449 reg->smax_value = S32_MAX;
5451 reg->smin_value = (s64)(s32)reg->smin_value;
5452 reg->smax_value = (s64)(s32)reg->smax_value;
5454 val = (u32)val;
5455 sval = (s64)(s32)val;
5456 } else {
5457 sval = (s64)val;
5460 switch (opcode) {
5461 case BPF_JEQ:
5462 if (tnum_is_const(reg->var_off))
5463 return !!tnum_equals_const(reg->var_off, val);
5464 break;
5465 case BPF_JNE:
5466 if (tnum_is_const(reg->var_off))
5467 return !tnum_equals_const(reg->var_off, val);
5468 break;
5469 case BPF_JSET:
5470 if ((~reg->var_off.mask & reg->var_off.value) & val)
5471 return 1;
5472 if (!((reg->var_off.mask | reg->var_off.value) & val))
5473 return 0;
5474 break;
5475 case BPF_JGT:
5476 if (reg->umin_value > val)
5477 return 1;
5478 else if (reg->umax_value <= val)
5479 return 0;
5480 break;
5481 case BPF_JSGT:
5482 if (reg->smin_value > sval)
5483 return 1;
5484 else if (reg->smax_value < sval)
5485 return 0;
5486 break;
5487 case BPF_JLT:
5488 if (reg->umax_value < val)
5489 return 1;
5490 else if (reg->umin_value >= val)
5491 return 0;
5492 break;
5493 case BPF_JSLT:
5494 if (reg->smax_value < sval)
5495 return 1;
5496 else if (reg->smin_value >= sval)
5497 return 0;
5498 break;
5499 case BPF_JGE:
5500 if (reg->umin_value >= val)
5501 return 1;
5502 else if (reg->umax_value < val)
5503 return 0;
5504 break;
5505 case BPF_JSGE:
5506 if (reg->smin_value >= sval)
5507 return 1;
5508 else if (reg->smax_value < sval)
5509 return 0;
5510 break;
5511 case BPF_JLE:
5512 if (reg->umax_value <= val)
5513 return 1;
5514 else if (reg->umin_value > val)
5515 return 0;
5516 break;
5517 case BPF_JSLE:
5518 if (reg->smax_value <= sval)
5519 return 1;
5520 else if (reg->smin_value > sval)
5521 return 0;
5522 break;
5525 return -1;
5528 /* Generate min value of the high 32-bit from TNUM info. */
5529 static u64 gen_hi_min(struct tnum var)
5531 return var.value & ~0xffffffffULL;
5534 /* Generate max value of the high 32-bit from TNUM info. */
5535 static u64 gen_hi_max(struct tnum var)
5537 return (var.value | var.mask) & ~0xffffffffULL;
5540 /* Return true if VAL is compared with a s64 sign extended from s32, and they
5541 * are with the same signedness.
5543 static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5545 return ((s32)sval >= 0 &&
5546 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5547 ((s32)sval < 0 &&
5548 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5551 /* Adjusts the register min/max values in the case that the dst_reg is the
5552 * variable register that we are working on, and src_reg is a constant or we're
5553 * simply doing a BPF_K check.
5554 * In JEQ/JNE cases we also adjust the var_off values.
5556 static void reg_set_min_max(struct bpf_reg_state *true_reg,
5557 struct bpf_reg_state *false_reg, u64 val,
5558 u8 opcode, bool is_jmp32)
5560 s64 sval;
5562 /* If the dst_reg is a pointer, we can't learn anything about its
5563 * variable offset from the compare (unless src_reg were a pointer into
5564 * the same object, but we don't bother with that.
5565 * Since false_reg and true_reg have the same type by construction, we
5566 * only need to check one of them for pointerness.
5568 if (__is_pointer_value(false, false_reg))
5569 return;
5571 val = is_jmp32 ? (u32)val : val;
5572 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
5574 switch (opcode) {
5575 case BPF_JEQ:
5576 case BPF_JNE:
5578 struct bpf_reg_state *reg =
5579 opcode == BPF_JEQ ? true_reg : false_reg;
5581 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5582 * if it is true we know the value for sure. Likewise for
5583 * BPF_JNE.
5585 if (is_jmp32) {
5586 u64 old_v = reg->var_off.value;
5587 u64 hi_mask = ~0xffffffffULL;
5589 reg->var_off.value = (old_v & hi_mask) | val;
5590 reg->var_off.mask &= hi_mask;
5591 } else {
5592 __mark_reg_known(reg, val);
5594 break;
5596 case BPF_JSET:
5597 false_reg->var_off = tnum_and(false_reg->var_off,
5598 tnum_const(~val));
5599 if (is_power_of_2(val))
5600 true_reg->var_off = tnum_or(true_reg->var_off,
5601 tnum_const(val));
5602 break;
5603 case BPF_JGE:
5604 case BPF_JGT:
5606 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
5607 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5609 if (is_jmp32) {
5610 false_umax += gen_hi_max(false_reg->var_off);
5611 true_umin += gen_hi_min(true_reg->var_off);
5613 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5614 true_reg->umin_value = max(true_reg->umin_value, true_umin);
5615 break;
5617 case BPF_JSGE:
5618 case BPF_JSGT:
5620 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
5621 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5623 /* If the full s64 was not sign-extended from s32 then don't
5624 * deduct further info.
5626 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5627 break;
5628 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5629 true_reg->smin_value = max(true_reg->smin_value, true_smin);
5630 break;
5632 case BPF_JLE:
5633 case BPF_JLT:
5635 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
5636 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5638 if (is_jmp32) {
5639 false_umin += gen_hi_min(false_reg->var_off);
5640 true_umax += gen_hi_max(true_reg->var_off);
5642 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5643 true_reg->umax_value = min(true_reg->umax_value, true_umax);
5644 break;
5646 case BPF_JSLE:
5647 case BPF_JSLT:
5649 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
5650 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5652 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5653 break;
5654 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5655 true_reg->smax_value = min(true_reg->smax_value, true_smax);
5656 break;
5658 default:
5659 break;
5662 __reg_deduce_bounds(false_reg);
5663 __reg_deduce_bounds(true_reg);
5664 /* We might have learned some bits from the bounds. */
5665 __reg_bound_offset(false_reg);
5666 __reg_bound_offset(true_reg);
5667 if (is_jmp32) {
5668 __reg_bound_offset32(false_reg);
5669 __reg_bound_offset32(true_reg);
5671 /* Intersecting with the old var_off might have improved our bounds
5672 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5673 * then new var_off is (0; 0x7f...fc) which improves our umax.
5675 __update_reg_bounds(false_reg);
5676 __update_reg_bounds(true_reg);
5679 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
5680 * the variable reg.
5682 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5683 struct bpf_reg_state *false_reg, u64 val,
5684 u8 opcode, bool is_jmp32)
5686 s64 sval;
5688 if (__is_pointer_value(false, false_reg))
5689 return;
5691 val = is_jmp32 ? (u32)val : val;
5692 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
5694 switch (opcode) {
5695 case BPF_JEQ:
5696 case BPF_JNE:
5698 struct bpf_reg_state *reg =
5699 opcode == BPF_JEQ ? true_reg : false_reg;
5701 if (is_jmp32) {
5702 u64 old_v = reg->var_off.value;
5703 u64 hi_mask = ~0xffffffffULL;
5705 reg->var_off.value = (old_v & hi_mask) | val;
5706 reg->var_off.mask &= hi_mask;
5707 } else {
5708 __mark_reg_known(reg, val);
5710 break;
5712 case BPF_JSET:
5713 false_reg->var_off = tnum_and(false_reg->var_off,
5714 tnum_const(~val));
5715 if (is_power_of_2(val))
5716 true_reg->var_off = tnum_or(true_reg->var_off,
5717 tnum_const(val));
5718 break;
5719 case BPF_JGE:
5720 case BPF_JGT:
5722 u64 false_umin = opcode == BPF_JGT ? val : val + 1;
5723 u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5725 if (is_jmp32) {
5726 false_umin += gen_hi_min(false_reg->var_off);
5727 true_umax += gen_hi_max(true_reg->var_off);
5729 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5730 true_reg->umax_value = min(true_reg->umax_value, true_umax);
5731 break;
5733 case BPF_JSGE:
5734 case BPF_JSGT:
5736 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
5737 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5739 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5740 break;
5741 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5742 true_reg->smax_value = min(true_reg->smax_value, true_smax);
5743 break;
5745 case BPF_JLE:
5746 case BPF_JLT:
5748 u64 false_umax = opcode == BPF_JLT ? val : val - 1;
5749 u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5751 if (is_jmp32) {
5752 false_umax += gen_hi_max(false_reg->var_off);
5753 true_umin += gen_hi_min(true_reg->var_off);
5755 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5756 true_reg->umin_value = max(true_reg->umin_value, true_umin);
5757 break;
5759 case BPF_JSLE:
5760 case BPF_JSLT:
5762 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
5763 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5765 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5766 break;
5767 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5768 true_reg->smin_value = max(true_reg->smin_value, true_smin);
5769 break;
5771 default:
5772 break;
5775 __reg_deduce_bounds(false_reg);
5776 __reg_deduce_bounds(true_reg);
5777 /* We might have learned some bits from the bounds. */
5778 __reg_bound_offset(false_reg);
5779 __reg_bound_offset(true_reg);
5780 if (is_jmp32) {
5781 __reg_bound_offset32(false_reg);
5782 __reg_bound_offset32(true_reg);
5784 /* Intersecting with the old var_off might have improved our bounds
5785 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5786 * then new var_off is (0; 0x7f...fc) which improves our umax.
5788 __update_reg_bounds(false_reg);
5789 __update_reg_bounds(true_reg);
5792 /* Regs are known to be equal, so intersect their min/max/var_off */
5793 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5794 struct bpf_reg_state *dst_reg)
5796 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5797 dst_reg->umin_value);
5798 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5799 dst_reg->umax_value);
5800 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5801 dst_reg->smin_value);
5802 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5803 dst_reg->smax_value);
5804 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5805 dst_reg->var_off);
5806 /* We might have learned new bounds from the var_off. */
5807 __update_reg_bounds(src_reg);
5808 __update_reg_bounds(dst_reg);
5809 /* We might have learned something about the sign bit. */
5810 __reg_deduce_bounds(src_reg);
5811 __reg_deduce_bounds(dst_reg);
5812 /* We might have learned some bits from the bounds. */
5813 __reg_bound_offset(src_reg);
5814 __reg_bound_offset(dst_reg);
5815 /* Intersecting with the old var_off might have improved our bounds
5816 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5817 * then new var_off is (0; 0x7f...fc) which improves our umax.
5819 __update_reg_bounds(src_reg);
5820 __update_reg_bounds(dst_reg);
5823 static void reg_combine_min_max(struct bpf_reg_state *true_src,
5824 struct bpf_reg_state *true_dst,
5825 struct bpf_reg_state *false_src,
5826 struct bpf_reg_state *false_dst,
5827 u8 opcode)
5829 switch (opcode) {
5830 case BPF_JEQ:
5831 __reg_combine_min_max(true_src, true_dst);
5832 break;
5833 case BPF_JNE:
5834 __reg_combine_min_max(false_src, false_dst);
5835 break;
5839 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5840 struct bpf_reg_state *reg, u32 id,
5841 bool is_null)
5843 if (reg_type_may_be_null(reg->type) && reg->id == id) {
5844 /* Old offset (both fixed and variable parts) should
5845 * have been known-zero, because we don't allow pointer
5846 * arithmetic on pointers that might be NULL.
5848 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5849 !tnum_equals_const(reg->var_off, 0) ||
5850 reg->off)) {
5851 __mark_reg_known_zero(reg);
5852 reg->off = 0;
5854 if (is_null) {
5855 reg->type = SCALAR_VALUE;
5856 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5857 if (reg->map_ptr->inner_map_meta) {
5858 reg->type = CONST_PTR_TO_MAP;
5859 reg->map_ptr = reg->map_ptr->inner_map_meta;
5860 } else if (reg->map_ptr->map_type ==
5861 BPF_MAP_TYPE_XSKMAP) {
5862 reg->type = PTR_TO_XDP_SOCK;
5863 } else {
5864 reg->type = PTR_TO_MAP_VALUE;
5866 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
5867 reg->type = PTR_TO_SOCKET;
5868 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
5869 reg->type = PTR_TO_SOCK_COMMON;
5870 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
5871 reg->type = PTR_TO_TCP_SOCK;
5873 if (is_null) {
5874 /* We don't need id and ref_obj_id from this point
5875 * onwards anymore, thus we should better reset it,
5876 * so that state pruning has chances to take effect.
5878 reg->id = 0;
5879 reg->ref_obj_id = 0;
5880 } else if (!reg_may_point_to_spin_lock(reg)) {
5881 /* For not-NULL ptr, reg->ref_obj_id will be reset
5882 * in release_reg_references().
5884 * reg->id is still used by spin_lock ptr. Other
5885 * than spin_lock ptr type, reg->id can be reset.
5887 reg->id = 0;
5892 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
5893 bool is_null)
5895 struct bpf_reg_state *reg;
5896 int i;
5898 for (i = 0; i < MAX_BPF_REG; i++)
5899 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
5901 bpf_for_each_spilled_reg(i, state, reg) {
5902 if (!reg)
5903 continue;
5904 mark_ptr_or_null_reg(state, reg, id, is_null);
5908 /* The logic is similar to find_good_pkt_pointers(), both could eventually
5909 * be folded together at some point.
5911 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
5912 bool is_null)
5914 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5915 struct bpf_reg_state *regs = state->regs;
5916 u32 ref_obj_id = regs[regno].ref_obj_id;
5917 u32 id = regs[regno].id;
5918 int i;
5920 if (ref_obj_id && ref_obj_id == id && is_null)
5921 /* regs[regno] is in the " == NULL" branch.
5922 * No one could have freed the reference state before
5923 * doing the NULL check.
5925 WARN_ON_ONCE(release_reference_state(state, id));
5927 for (i = 0; i <= vstate->curframe; i++)
5928 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
5931 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
5932 struct bpf_reg_state *dst_reg,
5933 struct bpf_reg_state *src_reg,
5934 struct bpf_verifier_state *this_branch,
5935 struct bpf_verifier_state *other_branch)
5937 if (BPF_SRC(insn->code) != BPF_X)
5938 return false;
5940 /* Pointers are always 64-bit. */
5941 if (BPF_CLASS(insn->code) == BPF_JMP32)
5942 return false;
5944 switch (BPF_OP(insn->code)) {
5945 case BPF_JGT:
5946 if ((dst_reg->type == PTR_TO_PACKET &&
5947 src_reg->type == PTR_TO_PACKET_END) ||
5948 (dst_reg->type == PTR_TO_PACKET_META &&
5949 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5950 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
5951 find_good_pkt_pointers(this_branch, dst_reg,
5952 dst_reg->type, false);
5953 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5954 src_reg->type == PTR_TO_PACKET) ||
5955 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5956 src_reg->type == PTR_TO_PACKET_META)) {
5957 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
5958 find_good_pkt_pointers(other_branch, src_reg,
5959 src_reg->type, true);
5960 } else {
5961 return false;
5963 break;
5964 case BPF_JLT:
5965 if ((dst_reg->type == PTR_TO_PACKET &&
5966 src_reg->type == PTR_TO_PACKET_END) ||
5967 (dst_reg->type == PTR_TO_PACKET_META &&
5968 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5969 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
5970 find_good_pkt_pointers(other_branch, dst_reg,
5971 dst_reg->type, true);
5972 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5973 src_reg->type == PTR_TO_PACKET) ||
5974 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5975 src_reg->type == PTR_TO_PACKET_META)) {
5976 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
5977 find_good_pkt_pointers(this_branch, src_reg,
5978 src_reg->type, false);
5979 } else {
5980 return false;
5982 break;
5983 case BPF_JGE:
5984 if ((dst_reg->type == PTR_TO_PACKET &&
5985 src_reg->type == PTR_TO_PACKET_END) ||
5986 (dst_reg->type == PTR_TO_PACKET_META &&
5987 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5988 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
5989 find_good_pkt_pointers(this_branch, dst_reg,
5990 dst_reg->type, true);
5991 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5992 src_reg->type == PTR_TO_PACKET) ||
5993 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5994 src_reg->type == PTR_TO_PACKET_META)) {
5995 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
5996 find_good_pkt_pointers(other_branch, src_reg,
5997 src_reg->type, false);
5998 } else {
5999 return false;
6001 break;
6002 case BPF_JLE:
6003 if ((dst_reg->type == PTR_TO_PACKET &&
6004 src_reg->type == PTR_TO_PACKET_END) ||
6005 (dst_reg->type == PTR_TO_PACKET_META &&
6006 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6007 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6008 find_good_pkt_pointers(other_branch, dst_reg,
6009 dst_reg->type, false);
6010 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6011 src_reg->type == PTR_TO_PACKET) ||
6012 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6013 src_reg->type == PTR_TO_PACKET_META)) {
6014 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6015 find_good_pkt_pointers(this_branch, src_reg,
6016 src_reg->type, true);
6017 } else {
6018 return false;
6020 break;
6021 default:
6022 return false;
6025 return true;
6028 static int check_cond_jmp_op(struct bpf_verifier_env *env,
6029 struct bpf_insn *insn, int *insn_idx)
6031 struct bpf_verifier_state *this_branch = env->cur_state;
6032 struct bpf_verifier_state *other_branch;
6033 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
6034 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
6035 u8 opcode = BPF_OP(insn->code);
6036 bool is_jmp32;
6037 int pred = -1;
6038 int err;
6040 /* Only conditional jumps are expected to reach here. */
6041 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6042 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
6043 return -EINVAL;
6046 if (BPF_SRC(insn->code) == BPF_X) {
6047 if (insn->imm != 0) {
6048 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
6049 return -EINVAL;
6052 /* check src1 operand */
6053 err = check_reg_arg(env, insn->src_reg, SRC_OP);
6054 if (err)
6055 return err;
6057 if (is_pointer_value(env, insn->src_reg)) {
6058 verbose(env, "R%d pointer comparison prohibited\n",
6059 insn->src_reg);
6060 return -EACCES;
6062 src_reg = &regs[insn->src_reg];
6063 } else {
6064 if (insn->src_reg != BPF_REG_0) {
6065 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
6066 return -EINVAL;
6070 /* check src2 operand */
6071 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6072 if (err)
6073 return err;
6075 dst_reg = &regs[insn->dst_reg];
6076 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
6078 if (BPF_SRC(insn->code) == BPF_K)
6079 pred = is_branch_taken(dst_reg, insn->imm,
6080 opcode, is_jmp32);
6081 else if (src_reg->type == SCALAR_VALUE &&
6082 tnum_is_const(src_reg->var_off))
6083 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6084 opcode, is_jmp32);
6085 if (pred >= 0) {
6086 err = mark_chain_precision(env, insn->dst_reg);
6087 if (BPF_SRC(insn->code) == BPF_X && !err)
6088 err = mark_chain_precision(env, insn->src_reg);
6089 if (err)
6090 return err;
6092 if (pred == 1) {
6093 /* only follow the goto, ignore fall-through */
6094 *insn_idx += insn->off;
6095 return 0;
6096 } else if (pred == 0) {
6097 /* only follow fall-through branch, since
6098 * that's where the program will go
6100 return 0;
6103 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6104 false);
6105 if (!other_branch)
6106 return -EFAULT;
6107 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
6109 /* detect if we are comparing against a constant value so we can adjust
6110 * our min/max values for our dst register.
6111 * this is only legit if both are scalars (or pointers to the same
6112 * object, I suppose, but we don't support that right now), because
6113 * otherwise the different base pointers mean the offsets aren't
6114 * comparable.
6116 if (BPF_SRC(insn->code) == BPF_X) {
6117 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
6118 struct bpf_reg_state lo_reg0 = *dst_reg;
6119 struct bpf_reg_state lo_reg1 = *src_reg;
6120 struct bpf_reg_state *src_lo, *dst_lo;
6122 dst_lo = &lo_reg0;
6123 src_lo = &lo_reg1;
6124 coerce_reg_to_size(dst_lo, 4);
6125 coerce_reg_to_size(src_lo, 4);
6127 if (dst_reg->type == SCALAR_VALUE &&
6128 src_reg->type == SCALAR_VALUE) {
6129 if (tnum_is_const(src_reg->var_off) ||
6130 (is_jmp32 && tnum_is_const(src_lo->var_off)))
6131 reg_set_min_max(&other_branch_regs[insn->dst_reg],
6132 dst_reg,
6133 is_jmp32
6134 ? src_lo->var_off.value
6135 : src_reg->var_off.value,
6136 opcode, is_jmp32);
6137 else if (tnum_is_const(dst_reg->var_off) ||
6138 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
6139 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
6140 src_reg,
6141 is_jmp32
6142 ? dst_lo->var_off.value
6143 : dst_reg->var_off.value,
6144 opcode, is_jmp32);
6145 else if (!is_jmp32 &&
6146 (opcode == BPF_JEQ || opcode == BPF_JNE))
6147 /* Comparing for equality, we can combine knowledge */
6148 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6149 &other_branch_regs[insn->dst_reg],
6150 src_reg, dst_reg, opcode);
6152 } else if (dst_reg->type == SCALAR_VALUE) {
6153 reg_set_min_max(&other_branch_regs[insn->dst_reg],
6154 dst_reg, insn->imm, opcode, is_jmp32);
6157 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6158 * NOTE: these optimizations below are related with pointer comparison
6159 * which will never be JMP32.
6161 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
6162 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
6163 reg_type_may_be_null(dst_reg->type)) {
6164 /* Mark all identical registers in each branch as either
6165 * safe or unknown depending R == 0 or R != 0 conditional.
6167 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6168 opcode == BPF_JNE);
6169 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6170 opcode == BPF_JEQ);
6171 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
6172 this_branch, other_branch) &&
6173 is_pointer_value(env, insn->dst_reg)) {
6174 verbose(env, "R%d pointer comparison prohibited\n",
6175 insn->dst_reg);
6176 return -EACCES;
6178 if (env->log.level & BPF_LOG_LEVEL)
6179 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
6180 return 0;
6183 /* verify BPF_LD_IMM64 instruction */
6184 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
6186 struct bpf_insn_aux_data *aux = cur_aux(env);
6187 struct bpf_reg_state *regs = cur_regs(env);
6188 struct bpf_map *map;
6189 int err;
6191 if (BPF_SIZE(insn->code) != BPF_DW) {
6192 verbose(env, "invalid BPF_LD_IMM insn\n");
6193 return -EINVAL;
6195 if (insn->off != 0) {
6196 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
6197 return -EINVAL;
6200 err = check_reg_arg(env, insn->dst_reg, DST_OP);
6201 if (err)
6202 return err;
6204 if (insn->src_reg == 0) {
6205 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
6207 regs[insn->dst_reg].type = SCALAR_VALUE;
6208 __mark_reg_known(&regs[insn->dst_reg], imm);
6209 return 0;
6212 map = env->used_maps[aux->map_index];
6213 mark_reg_known_zero(env, regs, insn->dst_reg);
6214 regs[insn->dst_reg].map_ptr = map;
6216 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
6217 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
6218 regs[insn->dst_reg].off = aux->map_off;
6219 if (map_value_has_spin_lock(map))
6220 regs[insn->dst_reg].id = ++env->id_gen;
6221 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
6222 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
6223 } else {
6224 verbose(env, "bpf verifier is misconfigured\n");
6225 return -EINVAL;
6228 return 0;
6231 static bool may_access_skb(enum bpf_prog_type type)
6233 switch (type) {
6234 case BPF_PROG_TYPE_SOCKET_FILTER:
6235 case BPF_PROG_TYPE_SCHED_CLS:
6236 case BPF_PROG_TYPE_SCHED_ACT:
6237 return true;
6238 default:
6239 return false;
6243 /* verify safety of LD_ABS|LD_IND instructions:
6244 * - they can only appear in the programs where ctx == skb
6245 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6246 * preserve R6-R9, and store return value into R0
6248 * Implicit input:
6249 * ctx == skb == R6 == CTX
6251 * Explicit input:
6252 * SRC == any register
6253 * IMM == 32-bit immediate
6255 * Output:
6256 * R0 - 8/16/32-bit skb data converted to cpu endianness
6258 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
6260 struct bpf_reg_state *regs = cur_regs(env);
6261 u8 mode = BPF_MODE(insn->code);
6262 int i, err;
6264 if (!may_access_skb(env->prog->type)) {
6265 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
6266 return -EINVAL;
6269 if (!env->ops->gen_ld_abs) {
6270 verbose(env, "bpf verifier is misconfigured\n");
6271 return -EINVAL;
6274 if (env->subprog_cnt > 1) {
6275 /* when program has LD_ABS insn JITs and interpreter assume
6276 * that r1 == ctx == skb which is not the case for callees
6277 * that can have arbitrary arguments. It's problematic
6278 * for main prog as well since JITs would need to analyze
6279 * all functions in order to make proper register save/restore
6280 * decisions in the main prog. Hence disallow LD_ABS with calls
6282 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6283 return -EINVAL;
6286 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
6287 BPF_SIZE(insn->code) == BPF_DW ||
6288 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
6289 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
6290 return -EINVAL;
6293 /* check whether implicit source operand (register R6) is readable */
6294 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
6295 if (err)
6296 return err;
6298 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6299 * gen_ld_abs() may terminate the program at runtime, leading to
6300 * reference leak.
6302 err = check_reference_leak(env);
6303 if (err) {
6304 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6305 return err;
6308 if (env->cur_state->active_spin_lock) {
6309 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6310 return -EINVAL;
6313 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
6314 verbose(env,
6315 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
6316 return -EINVAL;
6319 if (mode == BPF_IND) {
6320 /* check explicit source operand */
6321 err = check_reg_arg(env, insn->src_reg, SRC_OP);
6322 if (err)
6323 return err;
6326 /* reset caller saved regs to unreadable */
6327 for (i = 0; i < CALLER_SAVED_REGS; i++) {
6328 mark_reg_not_init(env, regs, caller_saved[i]);
6329 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6332 /* mark destination R0 register as readable, since it contains
6333 * the value fetched from the packet.
6334 * Already marked as written above.
6336 mark_reg_unknown(env, regs, BPF_REG_0);
6337 /* ld_abs load up to 32-bit skb data. */
6338 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
6339 return 0;
6342 static int check_return_code(struct bpf_verifier_env *env)
6344 struct tnum enforce_attach_type_range = tnum_unknown;
6345 struct bpf_reg_state *reg;
6346 struct tnum range = tnum_range(0, 1);
6348 switch (env->prog->type) {
6349 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6350 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6351 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6352 range = tnum_range(1, 1);
6353 break;
6354 case BPF_PROG_TYPE_CGROUP_SKB:
6355 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6356 range = tnum_range(0, 3);
6357 enforce_attach_type_range = tnum_range(2, 3);
6359 break;
6360 case BPF_PROG_TYPE_CGROUP_SOCK:
6361 case BPF_PROG_TYPE_SOCK_OPS:
6362 case BPF_PROG_TYPE_CGROUP_DEVICE:
6363 case BPF_PROG_TYPE_CGROUP_SYSCTL:
6364 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
6365 break;
6366 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6367 if (!env->prog->aux->attach_btf_id)
6368 return 0;
6369 range = tnum_const(0);
6370 break;
6371 default:
6372 return 0;
6375 reg = cur_regs(env) + BPF_REG_0;
6376 if (reg->type != SCALAR_VALUE) {
6377 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
6378 reg_type_str[reg->type]);
6379 return -EINVAL;
6382 if (!tnum_in(range, reg->var_off)) {
6383 char tn_buf[48];
6385 verbose(env, "At program exit the register R0 ");
6386 if (!tnum_is_unknown(reg->var_off)) {
6387 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6388 verbose(env, "has value %s", tn_buf);
6389 } else {
6390 verbose(env, "has unknown scalar value");
6392 tnum_strn(tn_buf, sizeof(tn_buf), range);
6393 verbose(env, " should have been in %s\n", tn_buf);
6394 return -EINVAL;
6397 if (!tnum_is_unknown(enforce_attach_type_range) &&
6398 tnum_in(enforce_attach_type_range, reg->var_off))
6399 env->prog->enforce_expected_attach_type = 1;
6400 return 0;
6403 /* non-recursive DFS pseudo code
6404 * 1 procedure DFS-iterative(G,v):
6405 * 2 label v as discovered
6406 * 3 let S be a stack
6407 * 4 S.push(v)
6408 * 5 while S is not empty
6409 * 6 t <- S.pop()
6410 * 7 if t is what we're looking for:
6411 * 8 return t
6412 * 9 for all edges e in G.adjacentEdges(t) do
6413 * 10 if edge e is already labelled
6414 * 11 continue with the next edge
6415 * 12 w <- G.adjacentVertex(t,e)
6416 * 13 if vertex w is not discovered and not explored
6417 * 14 label e as tree-edge
6418 * 15 label w as discovered
6419 * 16 S.push(w)
6420 * 17 continue at 5
6421 * 18 else if vertex w is discovered
6422 * 19 label e as back-edge
6423 * 20 else
6424 * 21 // vertex w is explored
6425 * 22 label e as forward- or cross-edge
6426 * 23 label t as explored
6427 * 24 S.pop()
6429 * convention:
6430 * 0x10 - discovered
6431 * 0x11 - discovered and fall-through edge labelled
6432 * 0x12 - discovered and fall-through and branch edges labelled
6433 * 0x20 - explored
6436 enum {
6437 DISCOVERED = 0x10,
6438 EXPLORED = 0x20,
6439 FALLTHROUGH = 1,
6440 BRANCH = 2,
6443 static u32 state_htab_size(struct bpf_verifier_env *env)
6445 return env->prog->len;
6448 static struct bpf_verifier_state_list **explored_state(
6449 struct bpf_verifier_env *env,
6450 int idx)
6452 struct bpf_verifier_state *cur = env->cur_state;
6453 struct bpf_func_state *state = cur->frame[cur->curframe];
6455 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
6458 static void init_explored_state(struct bpf_verifier_env *env, int idx)
6460 env->insn_aux_data[idx].prune_point = true;
6463 /* t, w, e - match pseudo-code above:
6464 * t - index of current instruction
6465 * w - next instruction
6466 * e - edge
6468 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6469 bool loop_ok)
6471 int *insn_stack = env->cfg.insn_stack;
6472 int *insn_state = env->cfg.insn_state;
6474 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6475 return 0;
6477 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6478 return 0;
6480 if (w < 0 || w >= env->prog->len) {
6481 verbose_linfo(env, t, "%d: ", t);
6482 verbose(env, "jump out of range from insn %d to %d\n", t, w);
6483 return -EINVAL;
6486 if (e == BRANCH)
6487 /* mark branch target for state pruning */
6488 init_explored_state(env, w);
6490 if (insn_state[w] == 0) {
6491 /* tree-edge */
6492 insn_state[t] = DISCOVERED | e;
6493 insn_state[w] = DISCOVERED;
6494 if (env->cfg.cur_stack >= env->prog->len)
6495 return -E2BIG;
6496 insn_stack[env->cfg.cur_stack++] = w;
6497 return 1;
6498 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
6499 if (loop_ok && env->allow_ptr_leaks)
6500 return 0;
6501 verbose_linfo(env, t, "%d: ", t);
6502 verbose_linfo(env, w, "%d: ", w);
6503 verbose(env, "back-edge from insn %d to %d\n", t, w);
6504 return -EINVAL;
6505 } else if (insn_state[w] == EXPLORED) {
6506 /* forward- or cross-edge */
6507 insn_state[t] = DISCOVERED | e;
6508 } else {
6509 verbose(env, "insn state internal bug\n");
6510 return -EFAULT;
6512 return 0;
6515 /* non-recursive depth-first-search to detect loops in BPF program
6516 * loop == back-edge in directed graph
6518 static int check_cfg(struct bpf_verifier_env *env)
6520 struct bpf_insn *insns = env->prog->insnsi;
6521 int insn_cnt = env->prog->len;
6522 int *insn_stack, *insn_state;
6523 int ret = 0;
6524 int i, t;
6526 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
6527 if (!insn_state)
6528 return -ENOMEM;
6530 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
6531 if (!insn_stack) {
6532 kvfree(insn_state);
6533 return -ENOMEM;
6536 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6537 insn_stack[0] = 0; /* 0 is the first instruction */
6538 env->cfg.cur_stack = 1;
6540 peek_stack:
6541 if (env->cfg.cur_stack == 0)
6542 goto check_state;
6543 t = insn_stack[env->cfg.cur_stack - 1];
6545 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6546 BPF_CLASS(insns[t].code) == BPF_JMP32) {
6547 u8 opcode = BPF_OP(insns[t].code);
6549 if (opcode == BPF_EXIT) {
6550 goto mark_explored;
6551 } else if (opcode == BPF_CALL) {
6552 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
6553 if (ret == 1)
6554 goto peek_stack;
6555 else if (ret < 0)
6556 goto err_free;
6557 if (t + 1 < insn_cnt)
6558 init_explored_state(env, t + 1);
6559 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
6560 init_explored_state(env, t);
6561 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6562 env, false);
6563 if (ret == 1)
6564 goto peek_stack;
6565 else if (ret < 0)
6566 goto err_free;
6568 } else if (opcode == BPF_JA) {
6569 if (BPF_SRC(insns[t].code) != BPF_K) {
6570 ret = -EINVAL;
6571 goto err_free;
6573 /* unconditional jump with single edge */
6574 ret = push_insn(t, t + insns[t].off + 1,
6575 FALLTHROUGH, env, true);
6576 if (ret == 1)
6577 goto peek_stack;
6578 else if (ret < 0)
6579 goto err_free;
6580 /* unconditional jmp is not a good pruning point,
6581 * but it's marked, since backtracking needs
6582 * to record jmp history in is_state_visited().
6584 init_explored_state(env, t + insns[t].off + 1);
6585 /* tell verifier to check for equivalent states
6586 * after every call and jump
6588 if (t + 1 < insn_cnt)
6589 init_explored_state(env, t + 1);
6590 } else {
6591 /* conditional jump with two edges */
6592 init_explored_state(env, t);
6593 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
6594 if (ret == 1)
6595 goto peek_stack;
6596 else if (ret < 0)
6597 goto err_free;
6599 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
6600 if (ret == 1)
6601 goto peek_stack;
6602 else if (ret < 0)
6603 goto err_free;
6605 } else {
6606 /* all other non-branch instructions with single
6607 * fall-through edge
6609 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
6610 if (ret == 1)
6611 goto peek_stack;
6612 else if (ret < 0)
6613 goto err_free;
6616 mark_explored:
6617 insn_state[t] = EXPLORED;
6618 if (env->cfg.cur_stack-- <= 0) {
6619 verbose(env, "pop stack internal bug\n");
6620 ret = -EFAULT;
6621 goto err_free;
6623 goto peek_stack;
6625 check_state:
6626 for (i = 0; i < insn_cnt; i++) {
6627 if (insn_state[i] != EXPLORED) {
6628 verbose(env, "unreachable insn %d\n", i);
6629 ret = -EINVAL;
6630 goto err_free;
6633 ret = 0; /* cfg looks good */
6635 err_free:
6636 kvfree(insn_state);
6637 kvfree(insn_stack);
6638 env->cfg.insn_state = env->cfg.insn_stack = NULL;
6639 return ret;
6642 /* The minimum supported BTF func info size */
6643 #define MIN_BPF_FUNCINFO_SIZE 8
6644 #define MAX_FUNCINFO_REC_SIZE 252
6646 static int check_btf_func(struct bpf_verifier_env *env,
6647 const union bpf_attr *attr,
6648 union bpf_attr __user *uattr)
6650 u32 i, nfuncs, urec_size, min_size;
6651 u32 krec_size = sizeof(struct bpf_func_info);
6652 struct bpf_func_info *krecord;
6653 struct bpf_func_info_aux *info_aux = NULL;
6654 const struct btf_type *type;
6655 struct bpf_prog *prog;
6656 const struct btf *btf;
6657 void __user *urecord;
6658 u32 prev_offset = 0;
6659 int ret = 0;
6661 nfuncs = attr->func_info_cnt;
6662 if (!nfuncs)
6663 return 0;
6665 if (nfuncs != env->subprog_cnt) {
6666 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6667 return -EINVAL;
6670 urec_size = attr->func_info_rec_size;
6671 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6672 urec_size > MAX_FUNCINFO_REC_SIZE ||
6673 urec_size % sizeof(u32)) {
6674 verbose(env, "invalid func info rec size %u\n", urec_size);
6675 return -EINVAL;
6678 prog = env->prog;
6679 btf = prog->aux->btf;
6681 urecord = u64_to_user_ptr(attr->func_info);
6682 min_size = min_t(u32, krec_size, urec_size);
6684 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
6685 if (!krecord)
6686 return -ENOMEM;
6687 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
6688 if (!info_aux)
6689 goto err_free;
6691 for (i = 0; i < nfuncs; i++) {
6692 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6693 if (ret) {
6694 if (ret == -E2BIG) {
6695 verbose(env, "nonzero tailing record in func info");
6696 /* set the size kernel expects so loader can zero
6697 * out the rest of the record.
6699 if (put_user(min_size, &uattr->func_info_rec_size))
6700 ret = -EFAULT;
6702 goto err_free;
6705 if (copy_from_user(&krecord[i], urecord, min_size)) {
6706 ret = -EFAULT;
6707 goto err_free;
6710 /* check insn_off */
6711 if (i == 0) {
6712 if (krecord[i].insn_off) {
6713 verbose(env,
6714 "nonzero insn_off %u for the first func info record",
6715 krecord[i].insn_off);
6716 ret = -EINVAL;
6717 goto err_free;
6719 } else if (krecord[i].insn_off <= prev_offset) {
6720 verbose(env,
6721 "same or smaller insn offset (%u) than previous func info record (%u)",
6722 krecord[i].insn_off, prev_offset);
6723 ret = -EINVAL;
6724 goto err_free;
6727 if (env->subprog_info[i].start != krecord[i].insn_off) {
6728 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6729 ret = -EINVAL;
6730 goto err_free;
6733 /* check type_id */
6734 type = btf_type_by_id(btf, krecord[i].type_id);
6735 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
6736 verbose(env, "invalid type id %d in func info",
6737 krecord[i].type_id);
6738 ret = -EINVAL;
6739 goto err_free;
6741 prev_offset = krecord[i].insn_off;
6742 urecord += urec_size;
6745 prog->aux->func_info = krecord;
6746 prog->aux->func_info_cnt = nfuncs;
6747 prog->aux->func_info_aux = info_aux;
6748 return 0;
6750 err_free:
6751 kvfree(krecord);
6752 kfree(info_aux);
6753 return ret;
6756 static void adjust_btf_func(struct bpf_verifier_env *env)
6758 struct bpf_prog_aux *aux = env->prog->aux;
6759 int i;
6761 if (!aux->func_info)
6762 return;
6764 for (i = 0; i < env->subprog_cnt; i++)
6765 aux->func_info[i].insn_off = env->subprog_info[i].start;
6768 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6769 sizeof(((struct bpf_line_info *)(0))->line_col))
6770 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6772 static int check_btf_line(struct bpf_verifier_env *env,
6773 const union bpf_attr *attr,
6774 union bpf_attr __user *uattr)
6776 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6777 struct bpf_subprog_info *sub;
6778 struct bpf_line_info *linfo;
6779 struct bpf_prog *prog;
6780 const struct btf *btf;
6781 void __user *ulinfo;
6782 int err;
6784 nr_linfo = attr->line_info_cnt;
6785 if (!nr_linfo)
6786 return 0;
6788 rec_size = attr->line_info_rec_size;
6789 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6790 rec_size > MAX_LINEINFO_REC_SIZE ||
6791 rec_size & (sizeof(u32) - 1))
6792 return -EINVAL;
6794 /* Need to zero it in case the userspace may
6795 * pass in a smaller bpf_line_info object.
6797 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6798 GFP_KERNEL | __GFP_NOWARN);
6799 if (!linfo)
6800 return -ENOMEM;
6802 prog = env->prog;
6803 btf = prog->aux->btf;
6805 s = 0;
6806 sub = env->subprog_info;
6807 ulinfo = u64_to_user_ptr(attr->line_info);
6808 expected_size = sizeof(struct bpf_line_info);
6809 ncopy = min_t(u32, expected_size, rec_size);
6810 for (i = 0; i < nr_linfo; i++) {
6811 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6812 if (err) {
6813 if (err == -E2BIG) {
6814 verbose(env, "nonzero tailing record in line_info");
6815 if (put_user(expected_size,
6816 &uattr->line_info_rec_size))
6817 err = -EFAULT;
6819 goto err_free;
6822 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6823 err = -EFAULT;
6824 goto err_free;
6828 * Check insn_off to ensure
6829 * 1) strictly increasing AND
6830 * 2) bounded by prog->len
6832 * The linfo[0].insn_off == 0 check logically falls into
6833 * the later "missing bpf_line_info for func..." case
6834 * because the first linfo[0].insn_off must be the
6835 * first sub also and the first sub must have
6836 * subprog_info[0].start == 0.
6838 if ((i && linfo[i].insn_off <= prev_offset) ||
6839 linfo[i].insn_off >= prog->len) {
6840 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6841 i, linfo[i].insn_off, prev_offset,
6842 prog->len);
6843 err = -EINVAL;
6844 goto err_free;
6847 if (!prog->insnsi[linfo[i].insn_off].code) {
6848 verbose(env,
6849 "Invalid insn code at line_info[%u].insn_off\n",
6851 err = -EINVAL;
6852 goto err_free;
6855 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
6856 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
6857 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
6858 err = -EINVAL;
6859 goto err_free;
6862 if (s != env->subprog_cnt) {
6863 if (linfo[i].insn_off == sub[s].start) {
6864 sub[s].linfo_idx = i;
6865 s++;
6866 } else if (sub[s].start < linfo[i].insn_off) {
6867 verbose(env, "missing bpf_line_info for func#%u\n", s);
6868 err = -EINVAL;
6869 goto err_free;
6873 prev_offset = linfo[i].insn_off;
6874 ulinfo += rec_size;
6877 if (s != env->subprog_cnt) {
6878 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
6879 env->subprog_cnt - s, s);
6880 err = -EINVAL;
6881 goto err_free;
6884 prog->aux->linfo = linfo;
6885 prog->aux->nr_linfo = nr_linfo;
6887 return 0;
6889 err_free:
6890 kvfree(linfo);
6891 return err;
6894 static int check_btf_info(struct bpf_verifier_env *env,
6895 const union bpf_attr *attr,
6896 union bpf_attr __user *uattr)
6898 struct btf *btf;
6899 int err;
6901 if (!attr->func_info_cnt && !attr->line_info_cnt)
6902 return 0;
6904 btf = btf_get_by_fd(attr->prog_btf_fd);
6905 if (IS_ERR(btf))
6906 return PTR_ERR(btf);
6907 env->prog->aux->btf = btf;
6909 err = check_btf_func(env, attr, uattr);
6910 if (err)
6911 return err;
6913 err = check_btf_line(env, attr, uattr);
6914 if (err)
6915 return err;
6917 return 0;
6920 /* check %cur's range satisfies %old's */
6921 static bool range_within(struct bpf_reg_state *old,
6922 struct bpf_reg_state *cur)
6924 return old->umin_value <= cur->umin_value &&
6925 old->umax_value >= cur->umax_value &&
6926 old->smin_value <= cur->smin_value &&
6927 old->smax_value >= cur->smax_value;
6930 /* Maximum number of register states that can exist at once */
6931 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6932 struct idpair {
6933 u32 old;
6934 u32 cur;
6937 /* If in the old state two registers had the same id, then they need to have
6938 * the same id in the new state as well. But that id could be different from
6939 * the old state, so we need to track the mapping from old to new ids.
6940 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
6941 * regs with old id 5 must also have new id 9 for the new state to be safe. But
6942 * regs with a different old id could still have new id 9, we don't care about
6943 * that.
6944 * So we look through our idmap to see if this old id has been seen before. If
6945 * so, we require the new id to match; otherwise, we add the id pair to the map.
6947 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
6949 unsigned int i;
6951 for (i = 0; i < ID_MAP_SIZE; i++) {
6952 if (!idmap[i].old) {
6953 /* Reached an empty slot; haven't seen this id before */
6954 idmap[i].old = old_id;
6955 idmap[i].cur = cur_id;
6956 return true;
6958 if (idmap[i].old == old_id)
6959 return idmap[i].cur == cur_id;
6961 /* We ran out of idmap slots, which should be impossible */
6962 WARN_ON_ONCE(1);
6963 return false;
6966 static void clean_func_state(struct bpf_verifier_env *env,
6967 struct bpf_func_state *st)
6969 enum bpf_reg_liveness live;
6970 int i, j;
6972 for (i = 0; i < BPF_REG_FP; i++) {
6973 live = st->regs[i].live;
6974 /* liveness must not touch this register anymore */
6975 st->regs[i].live |= REG_LIVE_DONE;
6976 if (!(live & REG_LIVE_READ))
6977 /* since the register is unused, clear its state
6978 * to make further comparison simpler
6980 __mark_reg_not_init(&st->regs[i]);
6983 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
6984 live = st->stack[i].spilled_ptr.live;
6985 /* liveness must not touch this stack slot anymore */
6986 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
6987 if (!(live & REG_LIVE_READ)) {
6988 __mark_reg_not_init(&st->stack[i].spilled_ptr);
6989 for (j = 0; j < BPF_REG_SIZE; j++)
6990 st->stack[i].slot_type[j] = STACK_INVALID;
6995 static void clean_verifier_state(struct bpf_verifier_env *env,
6996 struct bpf_verifier_state *st)
6998 int i;
7000 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7001 /* all regs in this state in all frames were already marked */
7002 return;
7004 for (i = 0; i <= st->curframe; i++)
7005 clean_func_state(env, st->frame[i]);
7008 /* the parentage chains form a tree.
7009 * the verifier states are added to state lists at given insn and
7010 * pushed into state stack for future exploration.
7011 * when the verifier reaches bpf_exit insn some of the verifer states
7012 * stored in the state lists have their final liveness state already,
7013 * but a lot of states will get revised from liveness point of view when
7014 * the verifier explores other branches.
7015 * Example:
7016 * 1: r0 = 1
7017 * 2: if r1 == 100 goto pc+1
7018 * 3: r0 = 2
7019 * 4: exit
7020 * when the verifier reaches exit insn the register r0 in the state list of
7021 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7022 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7023 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7025 * Since the verifier pushes the branch states as it sees them while exploring
7026 * the program the condition of walking the branch instruction for the second
7027 * time means that all states below this branch were already explored and
7028 * their final liveness markes are already propagated.
7029 * Hence when the verifier completes the search of state list in is_state_visited()
7030 * we can call this clean_live_states() function to mark all liveness states
7031 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7032 * will not be used.
7033 * This function also clears the registers and stack for states that !READ
7034 * to simplify state merging.
7036 * Important note here that walking the same branch instruction in the callee
7037 * doesn't meant that the states are DONE. The verifier has to compare
7038 * the callsites
7040 static void clean_live_states(struct bpf_verifier_env *env, int insn,
7041 struct bpf_verifier_state *cur)
7043 struct bpf_verifier_state_list *sl;
7044 int i;
7046 sl = *explored_state(env, insn);
7047 while (sl) {
7048 if (sl->state.branches)
7049 goto next;
7050 if (sl->state.insn_idx != insn ||
7051 sl->state.curframe != cur->curframe)
7052 goto next;
7053 for (i = 0; i <= cur->curframe; i++)
7054 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7055 goto next;
7056 clean_verifier_state(env, &sl->state);
7057 next:
7058 sl = sl->next;
7062 /* Returns true if (rold safe implies rcur safe) */
7063 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7064 struct idpair *idmap)
7066 bool equal;
7068 if (!(rold->live & REG_LIVE_READ))
7069 /* explored state didn't use this */
7070 return true;
7072 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
7074 if (rold->type == PTR_TO_STACK)
7075 /* two stack pointers are equal only if they're pointing to
7076 * the same stack frame, since fp-8 in foo != fp-8 in bar
7078 return equal && rold->frameno == rcur->frameno;
7080 if (equal)
7081 return true;
7083 if (rold->type == NOT_INIT)
7084 /* explored state can't have used this */
7085 return true;
7086 if (rcur->type == NOT_INIT)
7087 return false;
7088 switch (rold->type) {
7089 case SCALAR_VALUE:
7090 if (rcur->type == SCALAR_VALUE) {
7091 if (!rold->precise && !rcur->precise)
7092 return true;
7093 /* new val must satisfy old val knowledge */
7094 return range_within(rold, rcur) &&
7095 tnum_in(rold->var_off, rcur->var_off);
7096 } else {
7097 /* We're trying to use a pointer in place of a scalar.
7098 * Even if the scalar was unbounded, this could lead to
7099 * pointer leaks because scalars are allowed to leak
7100 * while pointers are not. We could make this safe in
7101 * special cases if root is calling us, but it's
7102 * probably not worth the hassle.
7104 return false;
7106 case PTR_TO_MAP_VALUE:
7107 /* If the new min/max/var_off satisfy the old ones and
7108 * everything else matches, we are OK.
7109 * 'id' is not compared, since it's only used for maps with
7110 * bpf_spin_lock inside map element and in such cases if
7111 * the rest of the prog is valid for one map element then
7112 * it's valid for all map elements regardless of the key
7113 * used in bpf_map_lookup()
7115 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7116 range_within(rold, rcur) &&
7117 tnum_in(rold->var_off, rcur->var_off);
7118 case PTR_TO_MAP_VALUE_OR_NULL:
7119 /* a PTR_TO_MAP_VALUE could be safe to use as a
7120 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7121 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7122 * checked, doing so could have affected others with the same
7123 * id, and we can't check for that because we lost the id when
7124 * we converted to a PTR_TO_MAP_VALUE.
7126 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7127 return false;
7128 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7129 return false;
7130 /* Check our ids match any regs they're supposed to */
7131 return check_ids(rold->id, rcur->id, idmap);
7132 case PTR_TO_PACKET_META:
7133 case PTR_TO_PACKET:
7134 if (rcur->type != rold->type)
7135 return false;
7136 /* We must have at least as much range as the old ptr
7137 * did, so that any accesses which were safe before are
7138 * still safe. This is true even if old range < old off,
7139 * since someone could have accessed through (ptr - k), or
7140 * even done ptr -= k in a register, to get a safe access.
7142 if (rold->range > rcur->range)
7143 return false;
7144 /* If the offsets don't match, we can't trust our alignment;
7145 * nor can we be sure that we won't fall out of range.
7147 if (rold->off != rcur->off)
7148 return false;
7149 /* id relations must be preserved */
7150 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
7151 return false;
7152 /* new val must satisfy old val knowledge */
7153 return range_within(rold, rcur) &&
7154 tnum_in(rold->var_off, rcur->var_off);
7155 case PTR_TO_CTX:
7156 case CONST_PTR_TO_MAP:
7157 case PTR_TO_PACKET_END:
7158 case PTR_TO_FLOW_KEYS:
7159 case PTR_TO_SOCKET:
7160 case PTR_TO_SOCKET_OR_NULL:
7161 case PTR_TO_SOCK_COMMON:
7162 case PTR_TO_SOCK_COMMON_OR_NULL:
7163 case PTR_TO_TCP_SOCK:
7164 case PTR_TO_TCP_SOCK_OR_NULL:
7165 case PTR_TO_XDP_SOCK:
7166 /* Only valid matches are exact, which memcmp() above
7167 * would have accepted
7169 default:
7170 /* Don't know what's going on, just say it's not safe */
7171 return false;
7174 /* Shouldn't get here; if we do, say it's not safe */
7175 WARN_ON_ONCE(1);
7176 return false;
7179 static bool stacksafe(struct bpf_func_state *old,
7180 struct bpf_func_state *cur,
7181 struct idpair *idmap)
7183 int i, spi;
7185 /* walk slots of the explored stack and ignore any additional
7186 * slots in the current stack, since explored(safe) state
7187 * didn't use them
7189 for (i = 0; i < old->allocated_stack; i++) {
7190 spi = i / BPF_REG_SIZE;
7192 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
7193 i += BPF_REG_SIZE - 1;
7194 /* explored state didn't use this */
7195 continue;
7198 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
7199 continue;
7201 /* explored stack has more populated slots than current stack
7202 * and these slots were used
7204 if (i >= cur->allocated_stack)
7205 return false;
7207 /* if old state was safe with misc data in the stack
7208 * it will be safe with zero-initialized stack.
7209 * The opposite is not true
7211 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
7212 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
7213 continue;
7214 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
7215 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
7216 /* Ex: old explored (safe) state has STACK_SPILL in
7217 * this stack slot, but current has has STACK_MISC ->
7218 * this verifier states are not equivalent,
7219 * return false to continue verification of this path
7221 return false;
7222 if (i % BPF_REG_SIZE)
7223 continue;
7224 if (old->stack[spi].slot_type[0] != STACK_SPILL)
7225 continue;
7226 if (!regsafe(&old->stack[spi].spilled_ptr,
7227 &cur->stack[spi].spilled_ptr,
7228 idmap))
7229 /* when explored and current stack slot are both storing
7230 * spilled registers, check that stored pointers types
7231 * are the same as well.
7232 * Ex: explored safe path could have stored
7233 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7234 * but current path has stored:
7235 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7236 * such verifier states are not equivalent.
7237 * return false to continue verification of this path
7239 return false;
7241 return true;
7244 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
7246 if (old->acquired_refs != cur->acquired_refs)
7247 return false;
7248 return !memcmp(old->refs, cur->refs,
7249 sizeof(*old->refs) * old->acquired_refs);
7252 /* compare two verifier states
7254 * all states stored in state_list are known to be valid, since
7255 * verifier reached 'bpf_exit' instruction through them
7257 * this function is called when verifier exploring different branches of
7258 * execution popped from the state stack. If it sees an old state that has
7259 * more strict register state and more strict stack state then this execution
7260 * branch doesn't need to be explored further, since verifier already
7261 * concluded that more strict state leads to valid finish.
7263 * Therefore two states are equivalent if register state is more conservative
7264 * and explored stack state is more conservative than the current one.
7265 * Example:
7266 * explored current
7267 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7268 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7270 * In other words if current stack state (one being explored) has more
7271 * valid slots than old one that already passed validation, it means
7272 * the verifier can stop exploring and conclude that current state is valid too
7274 * Similarly with registers. If explored state has register type as invalid
7275 * whereas register type in current state is meaningful, it means that
7276 * the current state will reach 'bpf_exit' instruction safely
7278 static bool func_states_equal(struct bpf_func_state *old,
7279 struct bpf_func_state *cur)
7281 struct idpair *idmap;
7282 bool ret = false;
7283 int i;
7285 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7286 /* If we failed to allocate the idmap, just say it's not safe */
7287 if (!idmap)
7288 return false;
7290 for (i = 0; i < MAX_BPF_REG; i++) {
7291 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
7292 goto out_free;
7295 if (!stacksafe(old, cur, idmap))
7296 goto out_free;
7298 if (!refsafe(old, cur))
7299 goto out_free;
7300 ret = true;
7301 out_free:
7302 kfree(idmap);
7303 return ret;
7306 static bool states_equal(struct bpf_verifier_env *env,
7307 struct bpf_verifier_state *old,
7308 struct bpf_verifier_state *cur)
7310 int i;
7312 if (old->curframe != cur->curframe)
7313 return false;
7315 /* Verification state from speculative execution simulation
7316 * must never prune a non-speculative execution one.
7318 if (old->speculative && !cur->speculative)
7319 return false;
7321 if (old->active_spin_lock != cur->active_spin_lock)
7322 return false;
7324 /* for states to be equal callsites have to be the same
7325 * and all frame states need to be equivalent
7327 for (i = 0; i <= old->curframe; i++) {
7328 if (old->frame[i]->callsite != cur->frame[i]->callsite)
7329 return false;
7330 if (!func_states_equal(old->frame[i], cur->frame[i]))
7331 return false;
7333 return true;
7336 /* Return 0 if no propagation happened. Return negative error code if error
7337 * happened. Otherwise, return the propagated bit.
7339 static int propagate_liveness_reg(struct bpf_verifier_env *env,
7340 struct bpf_reg_state *reg,
7341 struct bpf_reg_state *parent_reg)
7343 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7344 u8 flag = reg->live & REG_LIVE_READ;
7345 int err;
7347 /* When comes here, read flags of PARENT_REG or REG could be any of
7348 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7349 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7351 if (parent_flag == REG_LIVE_READ64 ||
7352 /* Or if there is no read flag from REG. */
7353 !flag ||
7354 /* Or if the read flag from REG is the same as PARENT_REG. */
7355 parent_flag == flag)
7356 return 0;
7358 err = mark_reg_read(env, reg, parent_reg, flag);
7359 if (err)
7360 return err;
7362 return flag;
7365 /* A write screens off any subsequent reads; but write marks come from the
7366 * straight-line code between a state and its parent. When we arrive at an
7367 * equivalent state (jump target or such) we didn't arrive by the straight-line
7368 * code, so read marks in the state must propagate to the parent regardless
7369 * of the state's write marks. That's what 'parent == state->parent' comparison
7370 * in mark_reg_read() is for.
7372 static int propagate_liveness(struct bpf_verifier_env *env,
7373 const struct bpf_verifier_state *vstate,
7374 struct bpf_verifier_state *vparent)
7376 struct bpf_reg_state *state_reg, *parent_reg;
7377 struct bpf_func_state *state, *parent;
7378 int i, frame, err = 0;
7380 if (vparent->curframe != vstate->curframe) {
7381 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7382 vparent->curframe, vstate->curframe);
7383 return -EFAULT;
7385 /* Propagate read liveness of registers... */
7386 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
7387 for (frame = 0; frame <= vstate->curframe; frame++) {
7388 parent = vparent->frame[frame];
7389 state = vstate->frame[frame];
7390 parent_reg = parent->regs;
7391 state_reg = state->regs;
7392 /* We don't need to worry about FP liveness, it's read-only */
7393 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
7394 err = propagate_liveness_reg(env, &state_reg[i],
7395 &parent_reg[i]);
7396 if (err < 0)
7397 return err;
7398 if (err == REG_LIVE_READ64)
7399 mark_insn_zext(env, &parent_reg[i]);
7402 /* Propagate stack slots. */
7403 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7404 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
7405 parent_reg = &parent->stack[i].spilled_ptr;
7406 state_reg = &state->stack[i].spilled_ptr;
7407 err = propagate_liveness_reg(env, state_reg,
7408 parent_reg);
7409 if (err < 0)
7410 return err;
7413 return 0;
7416 /* find precise scalars in the previous equivalent state and
7417 * propagate them into the current state
7419 static int propagate_precision(struct bpf_verifier_env *env,
7420 const struct bpf_verifier_state *old)
7422 struct bpf_reg_state *state_reg;
7423 struct bpf_func_state *state;
7424 int i, err = 0;
7426 state = old->frame[old->curframe];
7427 state_reg = state->regs;
7428 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7429 if (state_reg->type != SCALAR_VALUE ||
7430 !state_reg->precise)
7431 continue;
7432 if (env->log.level & BPF_LOG_LEVEL2)
7433 verbose(env, "propagating r%d\n", i);
7434 err = mark_chain_precision(env, i);
7435 if (err < 0)
7436 return err;
7439 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7440 if (state->stack[i].slot_type[0] != STACK_SPILL)
7441 continue;
7442 state_reg = &state->stack[i].spilled_ptr;
7443 if (state_reg->type != SCALAR_VALUE ||
7444 !state_reg->precise)
7445 continue;
7446 if (env->log.level & BPF_LOG_LEVEL2)
7447 verbose(env, "propagating fp%d\n",
7448 (-i - 1) * BPF_REG_SIZE);
7449 err = mark_chain_precision_stack(env, i);
7450 if (err < 0)
7451 return err;
7453 return 0;
7456 static bool states_maybe_looping(struct bpf_verifier_state *old,
7457 struct bpf_verifier_state *cur)
7459 struct bpf_func_state *fold, *fcur;
7460 int i, fr = cur->curframe;
7462 if (old->curframe != fr)
7463 return false;
7465 fold = old->frame[fr];
7466 fcur = cur->frame[fr];
7467 for (i = 0; i < MAX_BPF_REG; i++)
7468 if (memcmp(&fold->regs[i], &fcur->regs[i],
7469 offsetof(struct bpf_reg_state, parent)))
7470 return false;
7471 return true;
7475 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
7477 struct bpf_verifier_state_list *new_sl;
7478 struct bpf_verifier_state_list *sl, **pprev;
7479 struct bpf_verifier_state *cur = env->cur_state, *new;
7480 int i, j, err, states_cnt = 0;
7481 bool add_new_state = env->test_state_freq ? true : false;
7483 cur->last_insn_idx = env->prev_insn_idx;
7484 if (!env->insn_aux_data[insn_idx].prune_point)
7485 /* this 'insn_idx' instruction wasn't marked, so we will not
7486 * be doing state search here
7488 return 0;
7490 /* bpf progs typically have pruning point every 4 instructions
7491 * http://vger.kernel.org/bpfconf2019.html#session-1
7492 * Do not add new state for future pruning if the verifier hasn't seen
7493 * at least 2 jumps and at least 8 instructions.
7494 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7495 * In tests that amounts to up to 50% reduction into total verifier
7496 * memory consumption and 20% verifier time speedup.
7498 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7499 env->insn_processed - env->prev_insn_processed >= 8)
7500 add_new_state = true;
7502 pprev = explored_state(env, insn_idx);
7503 sl = *pprev;
7505 clean_live_states(env, insn_idx, cur);
7507 while (sl) {
7508 states_cnt++;
7509 if (sl->state.insn_idx != insn_idx)
7510 goto next;
7511 if (sl->state.branches) {
7512 if (states_maybe_looping(&sl->state, cur) &&
7513 states_equal(env, &sl->state, cur)) {
7514 verbose_linfo(env, insn_idx, "; ");
7515 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7516 return -EINVAL;
7518 /* if the verifier is processing a loop, avoid adding new state
7519 * too often, since different loop iterations have distinct
7520 * states and may not help future pruning.
7521 * This threshold shouldn't be too low to make sure that
7522 * a loop with large bound will be rejected quickly.
7523 * The most abusive loop will be:
7524 * r1 += 1
7525 * if r1 < 1000000 goto pc-2
7526 * 1M insn_procssed limit / 100 == 10k peak states.
7527 * This threshold shouldn't be too high either, since states
7528 * at the end of the loop are likely to be useful in pruning.
7530 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7531 env->insn_processed - env->prev_insn_processed < 100)
7532 add_new_state = false;
7533 goto miss;
7535 if (states_equal(env, &sl->state, cur)) {
7536 sl->hit_cnt++;
7537 /* reached equivalent register/stack state,
7538 * prune the search.
7539 * Registers read by the continuation are read by us.
7540 * If we have any write marks in env->cur_state, they
7541 * will prevent corresponding reads in the continuation
7542 * from reaching our parent (an explored_state). Our
7543 * own state will get the read marks recorded, but
7544 * they'll be immediately forgotten as we're pruning
7545 * this state and will pop a new one.
7547 err = propagate_liveness(env, &sl->state, cur);
7549 /* if previous state reached the exit with precision and
7550 * current state is equivalent to it (except precsion marks)
7551 * the precision needs to be propagated back in
7552 * the current state.
7554 err = err ? : push_jmp_history(env, cur);
7555 err = err ? : propagate_precision(env, &sl->state);
7556 if (err)
7557 return err;
7558 return 1;
7560 miss:
7561 /* when new state is not going to be added do not increase miss count.
7562 * Otherwise several loop iterations will remove the state
7563 * recorded earlier. The goal of these heuristics is to have
7564 * states from some iterations of the loop (some in the beginning
7565 * and some at the end) to help pruning.
7567 if (add_new_state)
7568 sl->miss_cnt++;
7569 /* heuristic to determine whether this state is beneficial
7570 * to keep checking from state equivalence point of view.
7571 * Higher numbers increase max_states_per_insn and verification time,
7572 * but do not meaningfully decrease insn_processed.
7574 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7575 /* the state is unlikely to be useful. Remove it to
7576 * speed up verification
7578 *pprev = sl->next;
7579 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
7580 u32 br = sl->state.branches;
7582 WARN_ONCE(br,
7583 "BUG live_done but branches_to_explore %d\n",
7584 br);
7585 free_verifier_state(&sl->state, false);
7586 kfree(sl);
7587 env->peak_states--;
7588 } else {
7589 /* cannot free this state, since parentage chain may
7590 * walk it later. Add it for free_list instead to
7591 * be freed at the end of verification
7593 sl->next = env->free_list;
7594 env->free_list = sl;
7596 sl = *pprev;
7597 continue;
7599 next:
7600 pprev = &sl->next;
7601 sl = *pprev;
7604 if (env->max_states_per_insn < states_cnt)
7605 env->max_states_per_insn = states_cnt;
7607 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
7608 return push_jmp_history(env, cur);
7610 if (!add_new_state)
7611 return push_jmp_history(env, cur);
7613 /* There were no equivalent states, remember the current one.
7614 * Technically the current state is not proven to be safe yet,
7615 * but it will either reach outer most bpf_exit (which means it's safe)
7616 * or it will be rejected. When there are no loops the verifier won't be
7617 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
7618 * again on the way to bpf_exit.
7619 * When looping the sl->state.branches will be > 0 and this state
7620 * will not be considered for equivalence until branches == 0.
7622 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
7623 if (!new_sl)
7624 return -ENOMEM;
7625 env->total_states++;
7626 env->peak_states++;
7627 env->prev_jmps_processed = env->jmps_processed;
7628 env->prev_insn_processed = env->insn_processed;
7630 /* add new state to the head of linked list */
7631 new = &new_sl->state;
7632 err = copy_verifier_state(new, cur);
7633 if (err) {
7634 free_verifier_state(new, false);
7635 kfree(new_sl);
7636 return err;
7638 new->insn_idx = insn_idx;
7639 WARN_ONCE(new->branches != 1,
7640 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
7642 cur->parent = new;
7643 cur->first_insn_idx = insn_idx;
7644 clear_jmp_history(cur);
7645 new_sl->next = *explored_state(env, insn_idx);
7646 *explored_state(env, insn_idx) = new_sl;
7647 /* connect new state to parentage chain. Current frame needs all
7648 * registers connected. Only r6 - r9 of the callers are alive (pushed
7649 * to the stack implicitly by JITs) so in callers' frames connect just
7650 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7651 * the state of the call instruction (with WRITTEN set), and r0 comes
7652 * from callee with its full parentage chain, anyway.
7654 /* clear write marks in current state: the writes we did are not writes
7655 * our child did, so they don't screen off its reads from us.
7656 * (There are no read marks in current state, because reads always mark
7657 * their parent and current state never has children yet. Only
7658 * explored_states can get read marks.)
7660 for (j = 0; j <= cur->curframe; j++) {
7661 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7662 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7663 for (i = 0; i < BPF_REG_FP; i++)
7664 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7667 /* all stack frames are accessible from callee, clear them all */
7668 for (j = 0; j <= cur->curframe; j++) {
7669 struct bpf_func_state *frame = cur->frame[j];
7670 struct bpf_func_state *newframe = new->frame[j];
7672 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
7673 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
7674 frame->stack[i].spilled_ptr.parent =
7675 &newframe->stack[i].spilled_ptr;
7678 return 0;
7681 /* Return true if it's OK to have the same insn return a different type. */
7682 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7684 switch (type) {
7685 case PTR_TO_CTX:
7686 case PTR_TO_SOCKET:
7687 case PTR_TO_SOCKET_OR_NULL:
7688 case PTR_TO_SOCK_COMMON:
7689 case PTR_TO_SOCK_COMMON_OR_NULL:
7690 case PTR_TO_TCP_SOCK:
7691 case PTR_TO_TCP_SOCK_OR_NULL:
7692 case PTR_TO_XDP_SOCK:
7693 case PTR_TO_BTF_ID:
7694 return false;
7695 default:
7696 return true;
7700 /* If an instruction was previously used with particular pointer types, then we
7701 * need to be careful to avoid cases such as the below, where it may be ok
7702 * for one branch accessing the pointer, but not ok for the other branch:
7704 * R1 = sock_ptr
7705 * goto X;
7706 * ...
7707 * R1 = some_other_valid_ptr;
7708 * goto X;
7709 * ...
7710 * R2 = *(u32 *)(R1 + 0);
7712 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7714 return src != prev && (!reg_type_mismatch_ok(src) ||
7715 !reg_type_mismatch_ok(prev));
7718 static int do_check(struct bpf_verifier_env *env)
7720 struct bpf_verifier_state *state;
7721 struct bpf_insn *insns = env->prog->insnsi;
7722 struct bpf_reg_state *regs;
7723 int insn_cnt = env->prog->len;
7724 bool do_print_state = false;
7725 int prev_insn_idx = -1;
7727 env->prev_linfo = NULL;
7729 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
7730 if (!state)
7731 return -ENOMEM;
7732 state->curframe = 0;
7733 state->speculative = false;
7734 state->branches = 1;
7735 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
7736 if (!state->frame[0]) {
7737 kfree(state);
7738 return -ENOMEM;
7740 env->cur_state = state;
7741 init_func_state(env, state->frame[0],
7742 BPF_MAIN_FUNC /* callsite */,
7743 0 /* frameno */,
7744 0 /* subprogno, zero == main subprog */);
7746 if (btf_check_func_arg_match(env, 0))
7747 return -EINVAL;
7749 for (;;) {
7750 struct bpf_insn *insn;
7751 u8 class;
7752 int err;
7754 env->prev_insn_idx = prev_insn_idx;
7755 if (env->insn_idx >= insn_cnt) {
7756 verbose(env, "invalid insn idx %d insn_cnt %d\n",
7757 env->insn_idx, insn_cnt);
7758 return -EFAULT;
7761 insn = &insns[env->insn_idx];
7762 class = BPF_CLASS(insn->code);
7764 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
7765 verbose(env,
7766 "BPF program is too large. Processed %d insn\n",
7767 env->insn_processed);
7768 return -E2BIG;
7771 err = is_state_visited(env, env->insn_idx);
7772 if (err < 0)
7773 return err;
7774 if (err == 1) {
7775 /* found equivalent state, can prune the search */
7776 if (env->log.level & BPF_LOG_LEVEL) {
7777 if (do_print_state)
7778 verbose(env, "\nfrom %d to %d%s: safe\n",
7779 env->prev_insn_idx, env->insn_idx,
7780 env->cur_state->speculative ?
7781 " (speculative execution)" : "");
7782 else
7783 verbose(env, "%d: safe\n", env->insn_idx);
7785 goto process_bpf_exit;
7788 if (signal_pending(current))
7789 return -EAGAIN;
7791 if (need_resched())
7792 cond_resched();
7794 if (env->log.level & BPF_LOG_LEVEL2 ||
7795 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7796 if (env->log.level & BPF_LOG_LEVEL2)
7797 verbose(env, "%d:", env->insn_idx);
7798 else
7799 verbose(env, "\nfrom %d to %d%s:",
7800 env->prev_insn_idx, env->insn_idx,
7801 env->cur_state->speculative ?
7802 " (speculative execution)" : "");
7803 print_verifier_state(env, state->frame[state->curframe]);
7804 do_print_state = false;
7807 if (env->log.level & BPF_LOG_LEVEL) {
7808 const struct bpf_insn_cbs cbs = {
7809 .cb_print = verbose,
7810 .private_data = env,
7813 verbose_linfo(env, env->insn_idx, "; ");
7814 verbose(env, "%d: ", env->insn_idx);
7815 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
7818 if (bpf_prog_is_dev_bound(env->prog->aux)) {
7819 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7820 env->prev_insn_idx);
7821 if (err)
7822 return err;
7825 regs = cur_regs(env);
7826 env->insn_aux_data[env->insn_idx].seen = true;
7827 prev_insn_idx = env->insn_idx;
7829 if (class == BPF_ALU || class == BPF_ALU64) {
7830 err = check_alu_op(env, insn);
7831 if (err)
7832 return err;
7834 } else if (class == BPF_LDX) {
7835 enum bpf_reg_type *prev_src_type, src_reg_type;
7837 /* check for reserved fields is already done */
7839 /* check src operand */
7840 err = check_reg_arg(env, insn->src_reg, SRC_OP);
7841 if (err)
7842 return err;
7844 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
7845 if (err)
7846 return err;
7848 src_reg_type = regs[insn->src_reg].type;
7850 /* check that memory (src_reg + off) is readable,
7851 * the state of dst_reg will be updated by this func
7853 err = check_mem_access(env, env->insn_idx, insn->src_reg,
7854 insn->off, BPF_SIZE(insn->code),
7855 BPF_READ, insn->dst_reg, false);
7856 if (err)
7857 return err;
7859 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
7861 if (*prev_src_type == NOT_INIT) {
7862 /* saw a valid insn
7863 * dst_reg = *(u32 *)(src_reg + off)
7864 * save type to validate intersecting paths
7866 *prev_src_type = src_reg_type;
7868 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
7869 /* ABuser program is trying to use the same insn
7870 * dst_reg = *(u32*) (src_reg + off)
7871 * with different pointer types:
7872 * src_reg == ctx in one branch and
7873 * src_reg == stack|map in some other branch.
7874 * Reject it.
7876 verbose(env, "same insn cannot be used with different pointers\n");
7877 return -EINVAL;
7880 } else if (class == BPF_STX) {
7881 enum bpf_reg_type *prev_dst_type, dst_reg_type;
7883 if (BPF_MODE(insn->code) == BPF_XADD) {
7884 err = check_xadd(env, env->insn_idx, insn);
7885 if (err)
7886 return err;
7887 env->insn_idx++;
7888 continue;
7891 /* check src1 operand */
7892 err = check_reg_arg(env, insn->src_reg, SRC_OP);
7893 if (err)
7894 return err;
7895 /* check src2 operand */
7896 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7897 if (err)
7898 return err;
7900 dst_reg_type = regs[insn->dst_reg].type;
7902 /* check that memory (dst_reg + off) is writeable */
7903 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7904 insn->off, BPF_SIZE(insn->code),
7905 BPF_WRITE, insn->src_reg, false);
7906 if (err)
7907 return err;
7909 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
7911 if (*prev_dst_type == NOT_INIT) {
7912 *prev_dst_type = dst_reg_type;
7913 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
7914 verbose(env, "same insn cannot be used with different pointers\n");
7915 return -EINVAL;
7918 } else if (class == BPF_ST) {
7919 if (BPF_MODE(insn->code) != BPF_MEM ||
7920 insn->src_reg != BPF_REG_0) {
7921 verbose(env, "BPF_ST uses reserved fields\n");
7922 return -EINVAL;
7924 /* check src operand */
7925 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7926 if (err)
7927 return err;
7929 if (is_ctx_reg(env, insn->dst_reg)) {
7930 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
7931 insn->dst_reg,
7932 reg_type_str[reg_state(env, insn->dst_reg)->type]);
7933 return -EACCES;
7936 /* check that memory (dst_reg + off) is writeable */
7937 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7938 insn->off, BPF_SIZE(insn->code),
7939 BPF_WRITE, -1, false);
7940 if (err)
7941 return err;
7943 } else if (class == BPF_JMP || class == BPF_JMP32) {
7944 u8 opcode = BPF_OP(insn->code);
7946 env->jmps_processed++;
7947 if (opcode == BPF_CALL) {
7948 if (BPF_SRC(insn->code) != BPF_K ||
7949 insn->off != 0 ||
7950 (insn->src_reg != BPF_REG_0 &&
7951 insn->src_reg != BPF_PSEUDO_CALL) ||
7952 insn->dst_reg != BPF_REG_0 ||
7953 class == BPF_JMP32) {
7954 verbose(env, "BPF_CALL uses reserved fields\n");
7955 return -EINVAL;
7958 if (env->cur_state->active_spin_lock &&
7959 (insn->src_reg == BPF_PSEUDO_CALL ||
7960 insn->imm != BPF_FUNC_spin_unlock)) {
7961 verbose(env, "function calls are not allowed while holding a lock\n");
7962 return -EINVAL;
7964 if (insn->src_reg == BPF_PSEUDO_CALL)
7965 err = check_func_call(env, insn, &env->insn_idx);
7966 else
7967 err = check_helper_call(env, insn->imm, env->insn_idx);
7968 if (err)
7969 return err;
7971 } else if (opcode == BPF_JA) {
7972 if (BPF_SRC(insn->code) != BPF_K ||
7973 insn->imm != 0 ||
7974 insn->src_reg != BPF_REG_0 ||
7975 insn->dst_reg != BPF_REG_0 ||
7976 class == BPF_JMP32) {
7977 verbose(env, "BPF_JA uses reserved fields\n");
7978 return -EINVAL;
7981 env->insn_idx += insn->off + 1;
7982 continue;
7984 } else if (opcode == BPF_EXIT) {
7985 if (BPF_SRC(insn->code) != BPF_K ||
7986 insn->imm != 0 ||
7987 insn->src_reg != BPF_REG_0 ||
7988 insn->dst_reg != BPF_REG_0 ||
7989 class == BPF_JMP32) {
7990 verbose(env, "BPF_EXIT uses reserved fields\n");
7991 return -EINVAL;
7994 if (env->cur_state->active_spin_lock) {
7995 verbose(env, "bpf_spin_unlock is missing\n");
7996 return -EINVAL;
7999 if (state->curframe) {
8000 /* exit from nested function */
8001 err = prepare_func_exit(env, &env->insn_idx);
8002 if (err)
8003 return err;
8004 do_print_state = true;
8005 continue;
8008 err = check_reference_leak(env);
8009 if (err)
8010 return err;
8012 /* eBPF calling convetion is such that R0 is used
8013 * to return the value from eBPF program.
8014 * Make sure that it's readable at this time
8015 * of bpf_exit, which means that program wrote
8016 * something into it earlier
8018 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
8019 if (err)
8020 return err;
8022 if (is_pointer_value(env, BPF_REG_0)) {
8023 verbose(env, "R0 leaks addr as return value\n");
8024 return -EACCES;
8027 err = check_return_code(env);
8028 if (err)
8029 return err;
8030 process_bpf_exit:
8031 update_branch_counts(env, env->cur_state);
8032 err = pop_stack(env, &prev_insn_idx,
8033 &env->insn_idx);
8034 if (err < 0) {
8035 if (err != -ENOENT)
8036 return err;
8037 break;
8038 } else {
8039 do_print_state = true;
8040 continue;
8042 } else {
8043 err = check_cond_jmp_op(env, insn, &env->insn_idx);
8044 if (err)
8045 return err;
8047 } else if (class == BPF_LD) {
8048 u8 mode = BPF_MODE(insn->code);
8050 if (mode == BPF_ABS || mode == BPF_IND) {
8051 err = check_ld_abs(env, insn);
8052 if (err)
8053 return err;
8055 } else if (mode == BPF_IMM) {
8056 err = check_ld_imm(env, insn);
8057 if (err)
8058 return err;
8060 env->insn_idx++;
8061 env->insn_aux_data[env->insn_idx].seen = true;
8062 } else {
8063 verbose(env, "invalid BPF_LD mode\n");
8064 return -EINVAL;
8066 } else {
8067 verbose(env, "unknown insn class %d\n", class);
8068 return -EINVAL;
8071 env->insn_idx++;
8074 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
8075 return 0;
8078 static int check_map_prealloc(struct bpf_map *map)
8080 return (map->map_type != BPF_MAP_TYPE_HASH &&
8081 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8082 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
8083 !(map->map_flags & BPF_F_NO_PREALLOC);
8086 static bool is_tracing_prog_type(enum bpf_prog_type type)
8088 switch (type) {
8089 case BPF_PROG_TYPE_KPROBE:
8090 case BPF_PROG_TYPE_TRACEPOINT:
8091 case BPF_PROG_TYPE_PERF_EVENT:
8092 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8093 return true;
8094 default:
8095 return false;
8099 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8100 struct bpf_map *map,
8101 struct bpf_prog *prog)
8104 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
8105 * preallocated hash maps, since doing memory allocation
8106 * in overflow_handler can crash depending on where nmi got
8107 * triggered.
8109 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
8110 if (!check_map_prealloc(map)) {
8111 verbose(env, "perf_event programs can only use preallocated hash map\n");
8112 return -EINVAL;
8114 if (map->inner_map_meta &&
8115 !check_map_prealloc(map->inner_map_meta)) {
8116 verbose(env, "perf_event programs can only use preallocated inner hash map\n");
8117 return -EINVAL;
8121 if ((is_tracing_prog_type(prog->type) ||
8122 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
8123 map_value_has_spin_lock(map)) {
8124 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
8125 return -EINVAL;
8128 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
8129 !bpf_offload_prog_map_match(prog, map)) {
8130 verbose(env, "offload device mismatch between prog and map\n");
8131 return -EINVAL;
8134 return 0;
8137 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
8139 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
8140 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
8143 /* look for pseudo eBPF instructions that access map FDs and
8144 * replace them with actual map pointers
8146 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
8148 struct bpf_insn *insn = env->prog->insnsi;
8149 int insn_cnt = env->prog->len;
8150 int i, j, err;
8152 err = bpf_prog_calc_tag(env->prog);
8153 if (err)
8154 return err;
8156 for (i = 0; i < insn_cnt; i++, insn++) {
8157 if (BPF_CLASS(insn->code) == BPF_LDX &&
8158 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
8159 verbose(env, "BPF_LDX uses reserved fields\n");
8160 return -EINVAL;
8163 if (BPF_CLASS(insn->code) == BPF_STX &&
8164 ((BPF_MODE(insn->code) != BPF_MEM &&
8165 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
8166 verbose(env, "BPF_STX uses reserved fields\n");
8167 return -EINVAL;
8170 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
8171 struct bpf_insn_aux_data *aux;
8172 struct bpf_map *map;
8173 struct fd f;
8174 u64 addr;
8176 if (i == insn_cnt - 1 || insn[1].code != 0 ||
8177 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
8178 insn[1].off != 0) {
8179 verbose(env, "invalid bpf_ld_imm64 insn\n");
8180 return -EINVAL;
8183 if (insn[0].src_reg == 0)
8184 /* valid generic load 64-bit imm */
8185 goto next_insn;
8187 /* In final convert_pseudo_ld_imm64() step, this is
8188 * converted into regular 64-bit imm load insn.
8190 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
8191 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
8192 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
8193 insn[1].imm != 0)) {
8194 verbose(env,
8195 "unrecognized bpf_ld_imm64 insn\n");
8196 return -EINVAL;
8199 f = fdget(insn[0].imm);
8200 map = __bpf_map_get(f);
8201 if (IS_ERR(map)) {
8202 verbose(env, "fd %d is not pointing to valid bpf_map\n",
8203 insn[0].imm);
8204 return PTR_ERR(map);
8207 err = check_map_prog_compatibility(env, map, env->prog);
8208 if (err) {
8209 fdput(f);
8210 return err;
8213 aux = &env->insn_aux_data[i];
8214 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8215 addr = (unsigned long)map;
8216 } else {
8217 u32 off = insn[1].imm;
8219 if (off >= BPF_MAX_VAR_OFF) {
8220 verbose(env, "direct value offset of %u is not allowed\n", off);
8221 fdput(f);
8222 return -EINVAL;
8225 if (!map->ops->map_direct_value_addr) {
8226 verbose(env, "no direct value access support for this map type\n");
8227 fdput(f);
8228 return -EINVAL;
8231 err = map->ops->map_direct_value_addr(map, &addr, off);
8232 if (err) {
8233 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
8234 map->value_size, off);
8235 fdput(f);
8236 return err;
8239 aux->map_off = off;
8240 addr += off;
8243 insn[0].imm = (u32)addr;
8244 insn[1].imm = addr >> 32;
8246 /* check whether we recorded this map already */
8247 for (j = 0; j < env->used_map_cnt; j++) {
8248 if (env->used_maps[j] == map) {
8249 aux->map_index = j;
8250 fdput(f);
8251 goto next_insn;
8255 if (env->used_map_cnt >= MAX_USED_MAPS) {
8256 fdput(f);
8257 return -E2BIG;
8260 /* hold the map. If the program is rejected by verifier,
8261 * the map will be released by release_maps() or it
8262 * will be used by the valid program until it's unloaded
8263 * and all maps are released in free_used_maps()
8265 bpf_map_inc(map);
8267 aux->map_index = env->used_map_cnt;
8268 env->used_maps[env->used_map_cnt++] = map;
8270 if (bpf_map_is_cgroup_storage(map) &&
8271 bpf_cgroup_storage_assign(env->prog, map)) {
8272 verbose(env, "only one cgroup storage of each type is allowed\n");
8273 fdput(f);
8274 return -EBUSY;
8277 fdput(f);
8278 next_insn:
8279 insn++;
8280 i++;
8281 continue;
8284 /* Basic sanity check before we invest more work here. */
8285 if (!bpf_opcode_in_insntable(insn->code)) {
8286 verbose(env, "unknown opcode %02x\n", insn->code);
8287 return -EINVAL;
8291 /* now all pseudo BPF_LD_IMM64 instructions load valid
8292 * 'struct bpf_map *' into a register instead of user map_fd.
8293 * These pointers will be used later by verifier to validate map access.
8295 return 0;
8298 /* drop refcnt of maps used by the rejected program */
8299 static void release_maps(struct bpf_verifier_env *env)
8301 enum bpf_cgroup_storage_type stype;
8302 int i;
8304 for_each_cgroup_storage_type(stype) {
8305 if (!env->prog->aux->cgroup_storage[stype])
8306 continue;
8307 bpf_cgroup_storage_release(env->prog,
8308 env->prog->aux->cgroup_storage[stype]);
8311 for (i = 0; i < env->used_map_cnt; i++)
8312 bpf_map_put(env->used_maps[i]);
8315 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
8316 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
8318 struct bpf_insn *insn = env->prog->insnsi;
8319 int insn_cnt = env->prog->len;
8320 int i;
8322 for (i = 0; i < insn_cnt; i++, insn++)
8323 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8324 insn->src_reg = 0;
8327 /* single env->prog->insni[off] instruction was replaced with the range
8328 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8329 * [0, off) and [off, end) to new locations, so the patched range stays zero
8331 static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8332 struct bpf_prog *new_prog, u32 off, u32 cnt)
8334 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
8335 struct bpf_insn *insn = new_prog->insnsi;
8336 u32 prog_len;
8337 int i;
8339 /* aux info at OFF always needs adjustment, no matter fast path
8340 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8341 * original insn at old prog.
8343 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8345 if (cnt == 1)
8346 return 0;
8347 prog_len = new_prog->len;
8348 new_data = vzalloc(array_size(prog_len,
8349 sizeof(struct bpf_insn_aux_data)));
8350 if (!new_data)
8351 return -ENOMEM;
8352 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8353 memcpy(new_data + off + cnt - 1, old_data + off,
8354 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
8355 for (i = off; i < off + cnt - 1; i++) {
8356 new_data[i].seen = true;
8357 new_data[i].zext_dst = insn_has_def32(env, insn + i);
8359 env->insn_aux_data = new_data;
8360 vfree(old_data);
8361 return 0;
8364 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8366 int i;
8368 if (len == 1)
8369 return;
8370 /* NOTE: fake 'exit' subprog should be updated as well. */
8371 for (i = 0; i <= env->subprog_cnt; i++) {
8372 if (env->subprog_info[i].start <= off)
8373 continue;
8374 env->subprog_info[i].start += len - 1;
8378 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8379 const struct bpf_insn *patch, u32 len)
8381 struct bpf_prog *new_prog;
8383 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
8384 if (IS_ERR(new_prog)) {
8385 if (PTR_ERR(new_prog) == -ERANGE)
8386 verbose(env,
8387 "insn %d cannot be patched due to 16-bit range\n",
8388 env->insn_aux_data[off].orig_idx);
8389 return NULL;
8391 if (adjust_insn_aux_data(env, new_prog, off, len))
8392 return NULL;
8393 adjust_subprog_starts(env, off, len);
8394 return new_prog;
8397 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8398 u32 off, u32 cnt)
8400 int i, j;
8402 /* find first prog starting at or after off (first to remove) */
8403 for (i = 0; i < env->subprog_cnt; i++)
8404 if (env->subprog_info[i].start >= off)
8405 break;
8406 /* find first prog starting at or after off + cnt (first to stay) */
8407 for (j = i; j < env->subprog_cnt; j++)
8408 if (env->subprog_info[j].start >= off + cnt)
8409 break;
8410 /* if j doesn't start exactly at off + cnt, we are just removing
8411 * the front of previous prog
8413 if (env->subprog_info[j].start != off + cnt)
8414 j--;
8416 if (j > i) {
8417 struct bpf_prog_aux *aux = env->prog->aux;
8418 int move;
8420 /* move fake 'exit' subprog as well */
8421 move = env->subprog_cnt + 1 - j;
8423 memmove(env->subprog_info + i,
8424 env->subprog_info + j,
8425 sizeof(*env->subprog_info) * move);
8426 env->subprog_cnt -= j - i;
8428 /* remove func_info */
8429 if (aux->func_info) {
8430 move = aux->func_info_cnt - j;
8432 memmove(aux->func_info + i,
8433 aux->func_info + j,
8434 sizeof(*aux->func_info) * move);
8435 aux->func_info_cnt -= j - i;
8436 /* func_info->insn_off is set after all code rewrites,
8437 * in adjust_btf_func() - no need to adjust
8440 } else {
8441 /* convert i from "first prog to remove" to "first to adjust" */
8442 if (env->subprog_info[i].start == off)
8443 i++;
8446 /* update fake 'exit' subprog as well */
8447 for (; i <= env->subprog_cnt; i++)
8448 env->subprog_info[i].start -= cnt;
8450 return 0;
8453 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8454 u32 cnt)
8456 struct bpf_prog *prog = env->prog;
8457 u32 i, l_off, l_cnt, nr_linfo;
8458 struct bpf_line_info *linfo;
8460 nr_linfo = prog->aux->nr_linfo;
8461 if (!nr_linfo)
8462 return 0;
8464 linfo = prog->aux->linfo;
8466 /* find first line info to remove, count lines to be removed */
8467 for (i = 0; i < nr_linfo; i++)
8468 if (linfo[i].insn_off >= off)
8469 break;
8471 l_off = i;
8472 l_cnt = 0;
8473 for (; i < nr_linfo; i++)
8474 if (linfo[i].insn_off < off + cnt)
8475 l_cnt++;
8476 else
8477 break;
8479 /* First live insn doesn't match first live linfo, it needs to "inherit"
8480 * last removed linfo. prog is already modified, so prog->len == off
8481 * means no live instructions after (tail of the program was removed).
8483 if (prog->len != off && l_cnt &&
8484 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8485 l_cnt--;
8486 linfo[--i].insn_off = off + cnt;
8489 /* remove the line info which refer to the removed instructions */
8490 if (l_cnt) {
8491 memmove(linfo + l_off, linfo + i,
8492 sizeof(*linfo) * (nr_linfo - i));
8494 prog->aux->nr_linfo -= l_cnt;
8495 nr_linfo = prog->aux->nr_linfo;
8498 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8499 for (i = l_off; i < nr_linfo; i++)
8500 linfo[i].insn_off -= cnt;
8502 /* fix up all subprogs (incl. 'exit') which start >= off */
8503 for (i = 0; i <= env->subprog_cnt; i++)
8504 if (env->subprog_info[i].linfo_idx > l_off) {
8505 /* program may have started in the removed region but
8506 * may not be fully removed
8508 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8509 env->subprog_info[i].linfo_idx -= l_cnt;
8510 else
8511 env->subprog_info[i].linfo_idx = l_off;
8514 return 0;
8517 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8519 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8520 unsigned int orig_prog_len = env->prog->len;
8521 int err;
8523 if (bpf_prog_is_dev_bound(env->prog->aux))
8524 bpf_prog_offload_remove_insns(env, off, cnt);
8526 err = bpf_remove_insns(env->prog, off, cnt);
8527 if (err)
8528 return err;
8530 err = adjust_subprog_starts_after_remove(env, off, cnt);
8531 if (err)
8532 return err;
8534 err = bpf_adj_linfo_after_remove(env, off, cnt);
8535 if (err)
8536 return err;
8538 memmove(aux_data + off, aux_data + off + cnt,
8539 sizeof(*aux_data) * (orig_prog_len - off - cnt));
8541 return 0;
8544 /* The verifier does more data flow analysis than llvm and will not
8545 * explore branches that are dead at run time. Malicious programs can
8546 * have dead code too. Therefore replace all dead at-run-time code
8547 * with 'ja -1'.
8549 * Just nops are not optimal, e.g. if they would sit at the end of the
8550 * program and through another bug we would manage to jump there, then
8551 * we'd execute beyond program memory otherwise. Returning exception
8552 * code also wouldn't work since we can have subprogs where the dead
8553 * code could be located.
8555 static void sanitize_dead_code(struct bpf_verifier_env *env)
8557 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8558 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
8559 struct bpf_insn *insn = env->prog->insnsi;
8560 const int insn_cnt = env->prog->len;
8561 int i;
8563 for (i = 0; i < insn_cnt; i++) {
8564 if (aux_data[i].seen)
8565 continue;
8566 memcpy(insn + i, &trap, sizeof(trap));
8570 static bool insn_is_cond_jump(u8 code)
8572 u8 op;
8574 if (BPF_CLASS(code) == BPF_JMP32)
8575 return true;
8577 if (BPF_CLASS(code) != BPF_JMP)
8578 return false;
8580 op = BPF_OP(code);
8581 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8584 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8586 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8587 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8588 struct bpf_insn *insn = env->prog->insnsi;
8589 const int insn_cnt = env->prog->len;
8590 int i;
8592 for (i = 0; i < insn_cnt; i++, insn++) {
8593 if (!insn_is_cond_jump(insn->code))
8594 continue;
8596 if (!aux_data[i + 1].seen)
8597 ja.off = insn->off;
8598 else if (!aux_data[i + 1 + insn->off].seen)
8599 ja.off = 0;
8600 else
8601 continue;
8603 if (bpf_prog_is_dev_bound(env->prog->aux))
8604 bpf_prog_offload_replace_insn(env, i, &ja);
8606 memcpy(insn, &ja, sizeof(ja));
8610 static int opt_remove_dead_code(struct bpf_verifier_env *env)
8612 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8613 int insn_cnt = env->prog->len;
8614 int i, err;
8616 for (i = 0; i < insn_cnt; i++) {
8617 int j;
8619 j = 0;
8620 while (i + j < insn_cnt && !aux_data[i + j].seen)
8621 j++;
8622 if (!j)
8623 continue;
8625 err = verifier_remove_insns(env, i, j);
8626 if (err)
8627 return err;
8628 insn_cnt = env->prog->len;
8631 return 0;
8634 static int opt_remove_nops(struct bpf_verifier_env *env)
8636 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8637 struct bpf_insn *insn = env->prog->insnsi;
8638 int insn_cnt = env->prog->len;
8639 int i, err;
8641 for (i = 0; i < insn_cnt; i++) {
8642 if (memcmp(&insn[i], &ja, sizeof(ja)))
8643 continue;
8645 err = verifier_remove_insns(env, i, 1);
8646 if (err)
8647 return err;
8648 insn_cnt--;
8649 i--;
8652 return 0;
8655 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8656 const union bpf_attr *attr)
8658 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
8659 struct bpf_insn_aux_data *aux = env->insn_aux_data;
8660 int i, patch_len, delta = 0, len = env->prog->len;
8661 struct bpf_insn *insns = env->prog->insnsi;
8662 struct bpf_prog *new_prog;
8663 bool rnd_hi32;
8665 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
8666 zext_patch[1] = BPF_ZEXT_REG(0);
8667 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8668 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8669 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
8670 for (i = 0; i < len; i++) {
8671 int adj_idx = i + delta;
8672 struct bpf_insn insn;
8674 insn = insns[adj_idx];
8675 if (!aux[adj_idx].zext_dst) {
8676 u8 code, class;
8677 u32 imm_rnd;
8679 if (!rnd_hi32)
8680 continue;
8682 code = insn.code;
8683 class = BPF_CLASS(code);
8684 if (insn_no_def(&insn))
8685 continue;
8687 /* NOTE: arg "reg" (the fourth one) is only used for
8688 * BPF_STX which has been ruled out in above
8689 * check, it is safe to pass NULL here.
8691 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8692 if (class == BPF_LD &&
8693 BPF_MODE(code) == BPF_IMM)
8694 i++;
8695 continue;
8698 /* ctx load could be transformed into wider load. */
8699 if (class == BPF_LDX &&
8700 aux[adj_idx].ptr_type == PTR_TO_CTX)
8701 continue;
8703 imm_rnd = get_random_int();
8704 rnd_hi32_patch[0] = insn;
8705 rnd_hi32_patch[1].imm = imm_rnd;
8706 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8707 patch = rnd_hi32_patch;
8708 patch_len = 4;
8709 goto apply_patch_buffer;
8712 if (!bpf_jit_needs_zext())
8713 continue;
8715 zext_patch[0] = insn;
8716 zext_patch[1].dst_reg = insn.dst_reg;
8717 zext_patch[1].src_reg = insn.dst_reg;
8718 patch = zext_patch;
8719 patch_len = 2;
8720 apply_patch_buffer:
8721 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
8722 if (!new_prog)
8723 return -ENOMEM;
8724 env->prog = new_prog;
8725 insns = new_prog->insnsi;
8726 aux = env->insn_aux_data;
8727 delta += patch_len - 1;
8730 return 0;
8733 /* convert load instructions that access fields of a context type into a
8734 * sequence of instructions that access fields of the underlying structure:
8735 * struct __sk_buff -> struct sk_buff
8736 * struct bpf_sock_ops -> struct sock
8738 static int convert_ctx_accesses(struct bpf_verifier_env *env)
8740 const struct bpf_verifier_ops *ops = env->ops;
8741 int i, cnt, size, ctx_field_size, delta = 0;
8742 const int insn_cnt = env->prog->len;
8743 struct bpf_insn insn_buf[16], *insn;
8744 u32 target_size, size_default, off;
8745 struct bpf_prog *new_prog;
8746 enum bpf_access_type type;
8747 bool is_narrower_load;
8749 if (ops->gen_prologue || env->seen_direct_write) {
8750 if (!ops->gen_prologue) {
8751 verbose(env, "bpf verifier is misconfigured\n");
8752 return -EINVAL;
8754 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8755 env->prog);
8756 if (cnt >= ARRAY_SIZE(insn_buf)) {
8757 verbose(env, "bpf verifier is misconfigured\n");
8758 return -EINVAL;
8759 } else if (cnt) {
8760 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
8761 if (!new_prog)
8762 return -ENOMEM;
8764 env->prog = new_prog;
8765 delta += cnt - 1;
8769 if (bpf_prog_is_dev_bound(env->prog->aux))
8770 return 0;
8772 insn = env->prog->insnsi + delta;
8774 for (i = 0; i < insn_cnt; i++, insn++) {
8775 bpf_convert_ctx_access_t convert_ctx_access;
8777 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8778 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8779 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
8780 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
8781 type = BPF_READ;
8782 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8783 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8784 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
8785 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
8786 type = BPF_WRITE;
8787 else
8788 continue;
8790 if (type == BPF_WRITE &&
8791 env->insn_aux_data[i + delta].sanitize_stack_off) {
8792 struct bpf_insn patch[] = {
8793 /* Sanitize suspicious stack slot with zero.
8794 * There are no memory dependencies for this store,
8795 * since it's only using frame pointer and immediate
8796 * constant of zero
8798 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8799 env->insn_aux_data[i + delta].sanitize_stack_off,
8801 /* the original STX instruction will immediately
8802 * overwrite the same stack slot with appropriate value
8804 *insn,
8807 cnt = ARRAY_SIZE(patch);
8808 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8809 if (!new_prog)
8810 return -ENOMEM;
8812 delta += cnt - 1;
8813 env->prog = new_prog;
8814 insn = new_prog->insnsi + i + delta;
8815 continue;
8818 switch (env->insn_aux_data[i + delta].ptr_type) {
8819 case PTR_TO_CTX:
8820 if (!ops->convert_ctx_access)
8821 continue;
8822 convert_ctx_access = ops->convert_ctx_access;
8823 break;
8824 case PTR_TO_SOCKET:
8825 case PTR_TO_SOCK_COMMON:
8826 convert_ctx_access = bpf_sock_convert_ctx_access;
8827 break;
8828 case PTR_TO_TCP_SOCK:
8829 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8830 break;
8831 case PTR_TO_XDP_SOCK:
8832 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8833 break;
8834 case PTR_TO_BTF_ID:
8835 if (type == BPF_WRITE) {
8836 verbose(env, "Writes through BTF pointers are not allowed\n");
8837 return -EINVAL;
8839 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
8840 env->prog->aux->num_exentries++;
8841 continue;
8842 default:
8843 continue;
8846 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
8847 size = BPF_LDST_BYTES(insn);
8849 /* If the read access is a narrower load of the field,
8850 * convert to a 4/8-byte load, to minimum program type specific
8851 * convert_ctx_access changes. If conversion is successful,
8852 * we will apply proper mask to the result.
8854 is_narrower_load = size < ctx_field_size;
8855 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
8856 off = insn->off;
8857 if (is_narrower_load) {
8858 u8 size_code;
8860 if (type == BPF_WRITE) {
8861 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
8862 return -EINVAL;
8865 size_code = BPF_H;
8866 if (ctx_field_size == 4)
8867 size_code = BPF_W;
8868 else if (ctx_field_size == 8)
8869 size_code = BPF_DW;
8871 insn->off = off & ~(size_default - 1);
8872 insn->code = BPF_LDX | BPF_MEM | size_code;
8875 target_size = 0;
8876 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
8877 &target_size);
8878 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
8879 (ctx_field_size && !target_size)) {
8880 verbose(env, "bpf verifier is misconfigured\n");
8881 return -EINVAL;
8884 if (is_narrower_load && size < target_size) {
8885 u8 shift = bpf_ctx_narrow_access_offset(
8886 off, size, size_default) * 8;
8887 if (ctx_field_size <= 4) {
8888 if (shift)
8889 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
8890 insn->dst_reg,
8891 shift);
8892 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
8893 (1 << size * 8) - 1);
8894 } else {
8895 if (shift)
8896 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
8897 insn->dst_reg,
8898 shift);
8899 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
8900 (1ULL << size * 8) - 1);
8904 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
8905 if (!new_prog)
8906 return -ENOMEM;
8908 delta += cnt - 1;
8910 /* keep walking new program and skip insns we just inserted */
8911 env->prog = new_prog;
8912 insn = new_prog->insnsi + i + delta;
8915 return 0;
8918 static int jit_subprogs(struct bpf_verifier_env *env)
8920 struct bpf_prog *prog = env->prog, **func, *tmp;
8921 int i, j, subprog_start, subprog_end = 0, len, subprog;
8922 struct bpf_insn *insn;
8923 void *old_bpf_func;
8924 int err;
8926 if (env->subprog_cnt <= 1)
8927 return 0;
8929 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
8930 if (insn->code != (BPF_JMP | BPF_CALL) ||
8931 insn->src_reg != BPF_PSEUDO_CALL)
8932 continue;
8933 /* Upon error here we cannot fall back to interpreter but
8934 * need a hard reject of the program. Thus -EFAULT is
8935 * propagated in any case.
8937 subprog = find_subprog(env, i + insn->imm + 1);
8938 if (subprog < 0) {
8939 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
8940 i + insn->imm + 1);
8941 return -EFAULT;
8943 /* temporarily remember subprog id inside insn instead of
8944 * aux_data, since next loop will split up all insns into funcs
8946 insn->off = subprog;
8947 /* remember original imm in case JIT fails and fallback
8948 * to interpreter will be needed
8950 env->insn_aux_data[i].call_imm = insn->imm;
8951 /* point imm to __bpf_call_base+1 from JITs point of view */
8952 insn->imm = 1;
8955 err = bpf_prog_alloc_jited_linfo(prog);
8956 if (err)
8957 goto out_undo_insn;
8959 err = -ENOMEM;
8960 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
8961 if (!func)
8962 goto out_undo_insn;
8964 for (i = 0; i < env->subprog_cnt; i++) {
8965 subprog_start = subprog_end;
8966 subprog_end = env->subprog_info[i + 1].start;
8968 len = subprog_end - subprog_start;
8969 /* BPF_PROG_RUN doesn't call subprogs directly,
8970 * hence main prog stats include the runtime of subprogs.
8971 * subprogs don't have IDs and not reachable via prog_get_next_id
8972 * func[i]->aux->stats will never be accessed and stays NULL
8974 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
8975 if (!func[i])
8976 goto out_free;
8977 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
8978 len * sizeof(struct bpf_insn));
8979 func[i]->type = prog->type;
8980 func[i]->len = len;
8981 if (bpf_prog_calc_tag(func[i]))
8982 goto out_free;
8983 func[i]->is_func = 1;
8984 func[i]->aux->func_idx = i;
8985 /* the btf and func_info will be freed only at prog->aux */
8986 func[i]->aux->btf = prog->aux->btf;
8987 func[i]->aux->func_info = prog->aux->func_info;
8989 /* Use bpf_prog_F_tag to indicate functions in stack traces.
8990 * Long term would need debug info to populate names
8992 func[i]->aux->name[0] = 'F';
8993 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
8994 func[i]->jit_requested = 1;
8995 func[i]->aux->linfo = prog->aux->linfo;
8996 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
8997 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
8998 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
8999 func[i] = bpf_int_jit_compile(func[i]);
9000 if (!func[i]->jited) {
9001 err = -ENOTSUPP;
9002 goto out_free;
9004 cond_resched();
9006 /* at this point all bpf functions were successfully JITed
9007 * now populate all bpf_calls with correct addresses and
9008 * run last pass of JIT
9010 for (i = 0; i < env->subprog_cnt; i++) {
9011 insn = func[i]->insnsi;
9012 for (j = 0; j < func[i]->len; j++, insn++) {
9013 if (insn->code != (BPF_JMP | BPF_CALL) ||
9014 insn->src_reg != BPF_PSEUDO_CALL)
9015 continue;
9016 subprog = insn->off;
9017 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9018 __bpf_call_base;
9021 /* we use the aux data to keep a list of the start addresses
9022 * of the JITed images for each function in the program
9024 * for some architectures, such as powerpc64, the imm field
9025 * might not be large enough to hold the offset of the start
9026 * address of the callee's JITed image from __bpf_call_base
9028 * in such cases, we can lookup the start address of a callee
9029 * by using its subprog id, available from the off field of
9030 * the call instruction, as an index for this list
9032 func[i]->aux->func = func;
9033 func[i]->aux->func_cnt = env->subprog_cnt;
9035 for (i = 0; i < env->subprog_cnt; i++) {
9036 old_bpf_func = func[i]->bpf_func;
9037 tmp = bpf_int_jit_compile(func[i]);
9038 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9039 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
9040 err = -ENOTSUPP;
9041 goto out_free;
9043 cond_resched();
9046 /* finally lock prog and jit images for all functions and
9047 * populate kallsysm
9049 for (i = 0; i < env->subprog_cnt; i++) {
9050 bpf_prog_lock_ro(func[i]);
9051 bpf_prog_kallsyms_add(func[i]);
9054 /* Last step: make now unused interpreter insns from main
9055 * prog consistent for later dump requests, so they can
9056 * later look the same as if they were interpreted only.
9058 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9059 if (insn->code != (BPF_JMP | BPF_CALL) ||
9060 insn->src_reg != BPF_PSEUDO_CALL)
9061 continue;
9062 insn->off = env->insn_aux_data[i].call_imm;
9063 subprog = find_subprog(env, i + insn->off + 1);
9064 insn->imm = subprog;
9067 prog->jited = 1;
9068 prog->bpf_func = func[0]->bpf_func;
9069 prog->aux->func = func;
9070 prog->aux->func_cnt = env->subprog_cnt;
9071 bpf_prog_free_unused_jited_linfo(prog);
9072 return 0;
9073 out_free:
9074 for (i = 0; i < env->subprog_cnt; i++)
9075 if (func[i])
9076 bpf_jit_free(func[i]);
9077 kfree(func);
9078 out_undo_insn:
9079 /* cleanup main prog to be interpreted */
9080 prog->jit_requested = 0;
9081 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9082 if (insn->code != (BPF_JMP | BPF_CALL) ||
9083 insn->src_reg != BPF_PSEUDO_CALL)
9084 continue;
9085 insn->off = 0;
9086 insn->imm = env->insn_aux_data[i].call_imm;
9088 bpf_prog_free_jited_linfo(prog);
9089 return err;
9092 static int fixup_call_args(struct bpf_verifier_env *env)
9094 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
9095 struct bpf_prog *prog = env->prog;
9096 struct bpf_insn *insn = prog->insnsi;
9097 int i, depth;
9098 #endif
9099 int err = 0;
9101 if (env->prog->jit_requested &&
9102 !bpf_prog_is_dev_bound(env->prog->aux)) {
9103 err = jit_subprogs(env);
9104 if (err == 0)
9105 return 0;
9106 if (err == -EFAULT)
9107 return err;
9109 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
9110 for (i = 0; i < prog->len; i++, insn++) {
9111 if (insn->code != (BPF_JMP | BPF_CALL) ||
9112 insn->src_reg != BPF_PSEUDO_CALL)
9113 continue;
9114 depth = get_callee_stack_depth(env, insn, i);
9115 if (depth < 0)
9116 return depth;
9117 bpf_patch_call_args(insn, depth);
9119 err = 0;
9120 #endif
9121 return err;
9124 /* fixup insn->imm field of bpf_call instructions
9125 * and inline eligible helpers as explicit sequence of BPF instructions
9127 * this function is called after eBPF program passed verification
9129 static int fixup_bpf_calls(struct bpf_verifier_env *env)
9131 struct bpf_prog *prog = env->prog;
9132 bool expect_blinding = bpf_jit_blinding_enabled(prog);
9133 struct bpf_insn *insn = prog->insnsi;
9134 const struct bpf_func_proto *fn;
9135 const int insn_cnt = prog->len;
9136 const struct bpf_map_ops *ops;
9137 struct bpf_insn_aux_data *aux;
9138 struct bpf_insn insn_buf[16];
9139 struct bpf_prog *new_prog;
9140 struct bpf_map *map_ptr;
9141 int i, ret, cnt, delta = 0;
9143 for (i = 0; i < insn_cnt; i++, insn++) {
9144 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9145 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9146 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
9147 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9148 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9149 struct bpf_insn mask_and_div[] = {
9150 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9151 /* Rx div 0 -> 0 */
9152 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9153 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9154 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9155 *insn,
9157 struct bpf_insn mask_and_mod[] = {
9158 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9159 /* Rx mod 0 -> Rx */
9160 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9161 *insn,
9163 struct bpf_insn *patchlet;
9165 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9166 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9167 patchlet = mask_and_div + (is64 ? 1 : 0);
9168 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9169 } else {
9170 patchlet = mask_and_mod + (is64 ? 1 : 0);
9171 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9174 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
9175 if (!new_prog)
9176 return -ENOMEM;
9178 delta += cnt - 1;
9179 env->prog = prog = new_prog;
9180 insn = new_prog->insnsi + i + delta;
9181 continue;
9184 if (BPF_CLASS(insn->code) == BPF_LD &&
9185 (BPF_MODE(insn->code) == BPF_ABS ||
9186 BPF_MODE(insn->code) == BPF_IND)) {
9187 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9188 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9189 verbose(env, "bpf verifier is misconfigured\n");
9190 return -EINVAL;
9193 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9194 if (!new_prog)
9195 return -ENOMEM;
9197 delta += cnt - 1;
9198 env->prog = prog = new_prog;
9199 insn = new_prog->insnsi + i + delta;
9200 continue;
9203 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9204 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9205 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9206 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9207 struct bpf_insn insn_buf[16];
9208 struct bpf_insn *patch = &insn_buf[0];
9209 bool issrc, isneg;
9210 u32 off_reg;
9212 aux = &env->insn_aux_data[i + delta];
9213 if (!aux->alu_state ||
9214 aux->alu_state == BPF_ALU_NON_POINTER)
9215 continue;
9217 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9218 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9219 BPF_ALU_SANITIZE_SRC;
9221 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9222 if (isneg)
9223 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9224 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9225 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9226 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9227 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9228 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9229 if (issrc) {
9230 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9231 off_reg);
9232 insn->src_reg = BPF_REG_AX;
9233 } else {
9234 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9235 BPF_REG_AX);
9237 if (isneg)
9238 insn->code = insn->code == code_add ?
9239 code_sub : code_add;
9240 *patch++ = *insn;
9241 if (issrc && isneg)
9242 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9243 cnt = patch - insn_buf;
9245 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9246 if (!new_prog)
9247 return -ENOMEM;
9249 delta += cnt - 1;
9250 env->prog = prog = new_prog;
9251 insn = new_prog->insnsi + i + delta;
9252 continue;
9255 if (insn->code != (BPF_JMP | BPF_CALL))
9256 continue;
9257 if (insn->src_reg == BPF_PSEUDO_CALL)
9258 continue;
9260 if (insn->imm == BPF_FUNC_get_route_realm)
9261 prog->dst_needed = 1;
9262 if (insn->imm == BPF_FUNC_get_prandom_u32)
9263 bpf_user_rnd_init_once();
9264 if (insn->imm == BPF_FUNC_override_return)
9265 prog->kprobe_override = 1;
9266 if (insn->imm == BPF_FUNC_tail_call) {
9267 /* If we tail call into other programs, we
9268 * cannot make any assumptions since they can
9269 * be replaced dynamically during runtime in
9270 * the program array.
9272 prog->cb_access = 1;
9273 env->prog->aux->stack_depth = MAX_BPF_STACK;
9274 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
9276 /* mark bpf_tail_call as different opcode to avoid
9277 * conditional branch in the interpeter for every normal
9278 * call and to prevent accidental JITing by JIT compiler
9279 * that doesn't support bpf_tail_call yet
9281 insn->imm = 0;
9282 insn->code = BPF_JMP | BPF_TAIL_CALL;
9284 aux = &env->insn_aux_data[i + delta];
9285 if (prog->jit_requested && !expect_blinding &&
9286 !bpf_map_key_poisoned(aux) &&
9287 !bpf_map_ptr_poisoned(aux) &&
9288 !bpf_map_ptr_unpriv(aux)) {
9289 struct bpf_jit_poke_descriptor desc = {
9290 .reason = BPF_POKE_REASON_TAIL_CALL,
9291 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9292 .tail_call.key = bpf_map_key_immediate(aux),
9295 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9296 if (ret < 0) {
9297 verbose(env, "adding tail call poke descriptor failed\n");
9298 return ret;
9301 insn->imm = ret + 1;
9302 continue;
9305 if (!bpf_map_ptr_unpriv(aux))
9306 continue;
9308 /* instead of changing every JIT dealing with tail_call
9309 * emit two extra insns:
9310 * if (index >= max_entries) goto out;
9311 * index &= array->index_mask;
9312 * to avoid out-of-bounds cpu speculation
9314 if (bpf_map_ptr_poisoned(aux)) {
9315 verbose(env, "tail_call abusing map_ptr\n");
9316 return -EINVAL;
9319 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
9320 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9321 map_ptr->max_entries, 2);
9322 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9323 container_of(map_ptr,
9324 struct bpf_array,
9325 map)->index_mask);
9326 insn_buf[2] = *insn;
9327 cnt = 3;
9328 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9329 if (!new_prog)
9330 return -ENOMEM;
9332 delta += cnt - 1;
9333 env->prog = prog = new_prog;
9334 insn = new_prog->insnsi + i + delta;
9335 continue;
9338 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
9339 * and other inlining handlers are currently limited to 64 bit
9340 * only.
9342 if (prog->jit_requested && BITS_PER_LONG == 64 &&
9343 (insn->imm == BPF_FUNC_map_lookup_elem ||
9344 insn->imm == BPF_FUNC_map_update_elem ||
9345 insn->imm == BPF_FUNC_map_delete_elem ||
9346 insn->imm == BPF_FUNC_map_push_elem ||
9347 insn->imm == BPF_FUNC_map_pop_elem ||
9348 insn->imm == BPF_FUNC_map_peek_elem)) {
9349 aux = &env->insn_aux_data[i + delta];
9350 if (bpf_map_ptr_poisoned(aux))
9351 goto patch_call_imm;
9353 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
9354 ops = map_ptr->ops;
9355 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9356 ops->map_gen_lookup) {
9357 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9358 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9359 verbose(env, "bpf verifier is misconfigured\n");
9360 return -EINVAL;
9363 new_prog = bpf_patch_insn_data(env, i + delta,
9364 insn_buf, cnt);
9365 if (!new_prog)
9366 return -ENOMEM;
9368 delta += cnt - 1;
9369 env->prog = prog = new_prog;
9370 insn = new_prog->insnsi + i + delta;
9371 continue;
9374 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9375 (void *(*)(struct bpf_map *map, void *key))NULL));
9376 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9377 (int (*)(struct bpf_map *map, void *key))NULL));
9378 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9379 (int (*)(struct bpf_map *map, void *key, void *value,
9380 u64 flags))NULL));
9381 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9382 (int (*)(struct bpf_map *map, void *value,
9383 u64 flags))NULL));
9384 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9385 (int (*)(struct bpf_map *map, void *value))NULL));
9386 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9387 (int (*)(struct bpf_map *map, void *value))NULL));
9389 switch (insn->imm) {
9390 case BPF_FUNC_map_lookup_elem:
9391 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9392 __bpf_call_base;
9393 continue;
9394 case BPF_FUNC_map_update_elem:
9395 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9396 __bpf_call_base;
9397 continue;
9398 case BPF_FUNC_map_delete_elem:
9399 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9400 __bpf_call_base;
9401 continue;
9402 case BPF_FUNC_map_push_elem:
9403 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9404 __bpf_call_base;
9405 continue;
9406 case BPF_FUNC_map_pop_elem:
9407 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9408 __bpf_call_base;
9409 continue;
9410 case BPF_FUNC_map_peek_elem:
9411 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9412 __bpf_call_base;
9413 continue;
9416 goto patch_call_imm;
9419 patch_call_imm:
9420 fn = env->ops->get_func_proto(insn->imm, env->prog);
9421 /* all functions that have prototype and verifier allowed
9422 * programs to call them, must be real in-kernel functions
9424 if (!fn->func) {
9425 verbose(env,
9426 "kernel subsystem misconfigured func %s#%d\n",
9427 func_id_name(insn->imm), insn->imm);
9428 return -EFAULT;
9430 insn->imm = fn->func - __bpf_call_base;
9433 /* Since poke tab is now finalized, publish aux to tracker. */
9434 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9435 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9436 if (!map_ptr->ops->map_poke_track ||
9437 !map_ptr->ops->map_poke_untrack ||
9438 !map_ptr->ops->map_poke_run) {
9439 verbose(env, "bpf verifier is misconfigured\n");
9440 return -EINVAL;
9443 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9444 if (ret < 0) {
9445 verbose(env, "tracking tail call prog failed\n");
9446 return ret;
9450 return 0;
9453 static void free_states(struct bpf_verifier_env *env)
9455 struct bpf_verifier_state_list *sl, *sln;
9456 int i;
9458 sl = env->free_list;
9459 while (sl) {
9460 sln = sl->next;
9461 free_verifier_state(&sl->state, false);
9462 kfree(sl);
9463 sl = sln;
9466 if (!env->explored_states)
9467 return;
9469 for (i = 0; i < state_htab_size(env); i++) {
9470 sl = env->explored_states[i];
9472 while (sl) {
9473 sln = sl->next;
9474 free_verifier_state(&sl->state, false);
9475 kfree(sl);
9476 sl = sln;
9480 kvfree(env->explored_states);
9483 static void print_verification_stats(struct bpf_verifier_env *env)
9485 int i;
9487 if (env->log.level & BPF_LOG_STATS) {
9488 verbose(env, "verification time %lld usec\n",
9489 div_u64(env->verification_time, 1000));
9490 verbose(env, "stack depth ");
9491 for (i = 0; i < env->subprog_cnt; i++) {
9492 u32 depth = env->subprog_info[i].stack_depth;
9494 verbose(env, "%d", depth);
9495 if (i + 1 < env->subprog_cnt)
9496 verbose(env, "+");
9498 verbose(env, "\n");
9500 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9501 "total_states %d peak_states %d mark_read %d\n",
9502 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9503 env->max_states_per_insn, env->total_states,
9504 env->peak_states, env->longest_mark_read_walk);
9507 static int check_attach_btf_id(struct bpf_verifier_env *env)
9509 struct bpf_prog *prog = env->prog;
9510 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
9511 u32 btf_id = prog->aux->attach_btf_id;
9512 const char prefix[] = "btf_trace_";
9513 int ret = 0, subprog = -1, i;
9514 struct bpf_trampoline *tr;
9515 const struct btf_type *t;
9516 bool conservative = true;
9517 const char *tname;
9518 struct btf *btf;
9519 long addr;
9520 u64 key;
9522 if (prog->type != BPF_PROG_TYPE_TRACING)
9523 return 0;
9525 if (!btf_id) {
9526 verbose(env, "Tracing programs must provide btf_id\n");
9527 return -EINVAL;
9529 btf = bpf_prog_get_target_btf(prog);
9530 if (!btf) {
9531 verbose(env,
9532 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9533 return -EINVAL;
9535 t = btf_type_by_id(btf, btf_id);
9536 if (!t) {
9537 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9538 return -EINVAL;
9540 tname = btf_name_by_offset(btf, t->name_off);
9541 if (!tname) {
9542 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9543 return -EINVAL;
9545 if (tgt_prog) {
9546 struct bpf_prog_aux *aux = tgt_prog->aux;
9548 for (i = 0; i < aux->func_info_cnt; i++)
9549 if (aux->func_info[i].type_id == btf_id) {
9550 subprog = i;
9551 break;
9553 if (subprog == -1) {
9554 verbose(env, "Subprog %s doesn't exist\n", tname);
9555 return -EINVAL;
9557 conservative = aux->func_info_aux[subprog].unreliable;
9558 key = ((u64)aux->id) << 32 | btf_id;
9559 } else {
9560 key = btf_id;
9563 switch (prog->expected_attach_type) {
9564 case BPF_TRACE_RAW_TP:
9565 if (tgt_prog) {
9566 verbose(env,
9567 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
9568 return -EINVAL;
9570 if (!btf_type_is_typedef(t)) {
9571 verbose(env, "attach_btf_id %u is not a typedef\n",
9572 btf_id);
9573 return -EINVAL;
9575 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
9576 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
9577 btf_id, tname);
9578 return -EINVAL;
9580 tname += sizeof(prefix) - 1;
9581 t = btf_type_by_id(btf, t->type);
9582 if (!btf_type_is_ptr(t))
9583 /* should never happen in valid vmlinux build */
9584 return -EINVAL;
9585 t = btf_type_by_id(btf, t->type);
9586 if (!btf_type_is_func_proto(t))
9587 /* should never happen in valid vmlinux build */
9588 return -EINVAL;
9590 /* remember two read only pointers that are valid for
9591 * the life time of the kernel
9593 prog->aux->attach_func_name = tname;
9594 prog->aux->attach_func_proto = t;
9595 prog->aux->attach_btf_trace = true;
9596 return 0;
9597 case BPF_TRACE_FENTRY:
9598 case BPF_TRACE_FEXIT:
9599 if (!btf_type_is_func(t)) {
9600 verbose(env, "attach_btf_id %u is not a function\n",
9601 btf_id);
9602 return -EINVAL;
9604 t = btf_type_by_id(btf, t->type);
9605 if (!btf_type_is_func_proto(t))
9606 return -EINVAL;
9607 tr = bpf_trampoline_lookup(key);
9608 if (!tr)
9609 return -ENOMEM;
9610 prog->aux->attach_func_name = tname;
9611 /* t is either vmlinux type or another program's type */
9612 prog->aux->attach_func_proto = t;
9613 mutex_lock(&tr->mutex);
9614 if (tr->func.addr) {
9615 prog->aux->trampoline = tr;
9616 goto out;
9618 if (tgt_prog && conservative) {
9619 prog->aux->attach_func_proto = NULL;
9620 t = NULL;
9622 ret = btf_distill_func_proto(&env->log, btf, t,
9623 tname, &tr->func.model);
9624 if (ret < 0)
9625 goto out;
9626 if (tgt_prog) {
9627 if (!tgt_prog->jited) {
9628 /* for now */
9629 verbose(env, "Can trace only JITed BPF progs\n");
9630 ret = -EINVAL;
9631 goto out;
9633 if (tgt_prog->type == BPF_PROG_TYPE_TRACING) {
9634 /* prevent cycles */
9635 verbose(env, "Cannot recursively attach\n");
9636 ret = -EINVAL;
9637 goto out;
9639 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
9640 } else {
9641 addr = kallsyms_lookup_name(tname);
9642 if (!addr) {
9643 verbose(env,
9644 "The address of function %s cannot be found\n",
9645 tname);
9646 ret = -ENOENT;
9647 goto out;
9650 tr->func.addr = (void *)addr;
9651 prog->aux->trampoline = tr;
9652 out:
9653 mutex_unlock(&tr->mutex);
9654 if (ret)
9655 bpf_trampoline_put(tr);
9656 return ret;
9657 default:
9658 return -EINVAL;
9662 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9663 union bpf_attr __user *uattr)
9665 u64 start_time = ktime_get_ns();
9666 struct bpf_verifier_env *env;
9667 struct bpf_verifier_log *log;
9668 int i, len, ret = -EINVAL;
9669 bool is_priv;
9671 /* no program is valid */
9672 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
9673 return -EINVAL;
9675 /* 'struct bpf_verifier_env' can be global, but since it's not small,
9676 * allocate/free it every time bpf_check() is called
9678 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
9679 if (!env)
9680 return -ENOMEM;
9681 log = &env->log;
9683 len = (*prog)->len;
9684 env->insn_aux_data =
9685 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
9686 ret = -ENOMEM;
9687 if (!env->insn_aux_data)
9688 goto err_free_env;
9689 for (i = 0; i < len; i++)
9690 env->insn_aux_data[i].orig_idx = i;
9691 env->prog = *prog;
9692 env->ops = bpf_verifier_ops[env->prog->type];
9693 is_priv = capable(CAP_SYS_ADMIN);
9695 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
9696 mutex_lock(&bpf_verifier_lock);
9697 if (!btf_vmlinux)
9698 btf_vmlinux = btf_parse_vmlinux();
9699 mutex_unlock(&bpf_verifier_lock);
9702 /* grab the mutex to protect few globals used by verifier */
9703 if (!is_priv)
9704 mutex_lock(&bpf_verifier_lock);
9706 if (attr->log_level || attr->log_buf || attr->log_size) {
9707 /* user requested verbose verifier output
9708 * and supplied buffer to store the verification trace
9710 log->level = attr->log_level;
9711 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9712 log->len_total = attr->log_size;
9714 ret = -EINVAL;
9715 /* log attributes have to be sane */
9716 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
9717 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
9718 goto err_unlock;
9721 if (IS_ERR(btf_vmlinux)) {
9722 /* Either gcc or pahole or kernel are broken. */
9723 verbose(env, "in-kernel BTF is malformed\n");
9724 ret = PTR_ERR(btf_vmlinux);
9725 goto skip_full_check;
9728 ret = check_attach_btf_id(env);
9729 if (ret)
9730 goto skip_full_check;
9732 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9733 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
9734 env->strict_alignment = true;
9735 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9736 env->strict_alignment = false;
9738 env->allow_ptr_leaks = is_priv;
9740 if (is_priv)
9741 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
9743 ret = replace_map_fd_with_map_ptr(env);
9744 if (ret < 0)
9745 goto skip_full_check;
9747 if (bpf_prog_is_dev_bound(env->prog->aux)) {
9748 ret = bpf_prog_offload_verifier_prep(env->prog);
9749 if (ret)
9750 goto skip_full_check;
9753 env->explored_states = kvcalloc(state_htab_size(env),
9754 sizeof(struct bpf_verifier_state_list *),
9755 GFP_USER);
9756 ret = -ENOMEM;
9757 if (!env->explored_states)
9758 goto skip_full_check;
9760 ret = check_subprogs(env);
9761 if (ret < 0)
9762 goto skip_full_check;
9764 ret = check_btf_info(env, attr, uattr);
9765 if (ret < 0)
9766 goto skip_full_check;
9768 ret = check_cfg(env);
9769 if (ret < 0)
9770 goto skip_full_check;
9772 ret = do_check(env);
9773 if (env->cur_state) {
9774 free_verifier_state(env->cur_state, true);
9775 env->cur_state = NULL;
9778 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
9779 ret = bpf_prog_offload_finalize(env);
9781 skip_full_check:
9782 while (!pop_stack(env, NULL, NULL));
9783 free_states(env);
9785 if (ret == 0)
9786 ret = check_max_stack_depth(env);
9788 /* instruction rewrites happen after this point */
9789 if (is_priv) {
9790 if (ret == 0)
9791 opt_hard_wire_dead_code_branches(env);
9792 if (ret == 0)
9793 ret = opt_remove_dead_code(env);
9794 if (ret == 0)
9795 ret = opt_remove_nops(env);
9796 } else {
9797 if (ret == 0)
9798 sanitize_dead_code(env);
9801 if (ret == 0)
9802 /* program is valid, convert *(u32*)(ctx + off) accesses */
9803 ret = convert_ctx_accesses(env);
9805 if (ret == 0)
9806 ret = fixup_bpf_calls(env);
9808 /* do 32-bit optimization after insn patching has done so those patched
9809 * insns could be handled correctly.
9811 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
9812 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
9813 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
9814 : false;
9817 if (ret == 0)
9818 ret = fixup_call_args(env);
9820 env->verification_time = ktime_get_ns() - start_time;
9821 print_verification_stats(env);
9823 if (log->level && bpf_verifier_log_full(log))
9824 ret = -ENOSPC;
9825 if (log->level && !log->ubuf) {
9826 ret = -EFAULT;
9827 goto err_release_maps;
9830 if (ret == 0 && env->used_map_cnt) {
9831 /* if program passed verifier, update used_maps in bpf_prog_info */
9832 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
9833 sizeof(env->used_maps[0]),
9834 GFP_KERNEL);
9836 if (!env->prog->aux->used_maps) {
9837 ret = -ENOMEM;
9838 goto err_release_maps;
9841 memcpy(env->prog->aux->used_maps, env->used_maps,
9842 sizeof(env->used_maps[0]) * env->used_map_cnt);
9843 env->prog->aux->used_map_cnt = env->used_map_cnt;
9845 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
9846 * bpf_ld_imm64 instructions
9848 convert_pseudo_ld_imm64(env);
9851 if (ret == 0)
9852 adjust_btf_func(env);
9854 err_release_maps:
9855 if (!env->prog->aux->used_maps)
9856 /* if we didn't copy map pointers into bpf_prog_info, release
9857 * them now. Otherwise free_used_maps() will release them.
9859 release_maps(env);
9860 *prog = env->prog;
9861 err_unlock:
9862 if (!is_priv)
9863 mutex_unlock(&bpf_verifier_lock);
9864 vfree(env->insn_aux_data);
9865 err_free_env:
9866 kfree(env);
9867 return ret;