drm/panfrost: Remove set but not used variable 'bo'
[linux/fpc-iii.git] / kernel / bpf / core.c
blob973a20d4974981fcd7bca0d6f56c94e5c65bdee9
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
10 * Authors:
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/frame.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <asm/unaligned.h>
37 /* Registers */
38 #define BPF_R0 regs[BPF_REG_0]
39 #define BPF_R1 regs[BPF_REG_1]
40 #define BPF_R2 regs[BPF_REG_2]
41 #define BPF_R3 regs[BPF_REG_3]
42 #define BPF_R4 regs[BPF_REG_4]
43 #define BPF_R5 regs[BPF_REG_5]
44 #define BPF_R6 regs[BPF_REG_6]
45 #define BPF_R7 regs[BPF_REG_7]
46 #define BPF_R8 regs[BPF_REG_8]
47 #define BPF_R9 regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
50 /* Named registers */
51 #define DST regs[insn->dst_reg]
52 #define SRC regs[insn->src_reg]
53 #define FP regs[BPF_REG_FP]
54 #define AX regs[BPF_REG_AX]
55 #define ARG1 regs[BPF_REG_ARG1]
56 #define CTX regs[BPF_REG_CTX]
57 #define IMM insn->imm
59 /* No hurry in this branch
61 * Exported for the bpf jit load helper.
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
65 u8 *ptr = NULL;
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
75 return NULL;
78 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
81 struct bpf_prog_aux *aux;
82 struct bpf_prog *fp;
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 if (fp == NULL)
87 return NULL;
89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 if (aux == NULL) {
91 vfree(fp);
92 return NULL;
95 fp->pages = size / PAGE_SIZE;
96 fp->aux = aux;
97 fp->aux->prog = fp;
98 fp->jit_requested = ebpf_jit_enabled();
100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
102 return fp;
105 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
107 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
108 struct bpf_prog *prog;
109 int cpu;
111 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
112 if (!prog)
113 return NULL;
115 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
116 if (!prog->aux->stats) {
117 kfree(prog->aux);
118 vfree(prog);
119 return NULL;
122 for_each_possible_cpu(cpu) {
123 struct bpf_prog_stats *pstats;
125 pstats = per_cpu_ptr(prog->aux->stats, cpu);
126 u64_stats_init(&pstats->syncp);
128 return prog;
130 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
132 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
134 if (!prog->aux->nr_linfo || !prog->jit_requested)
135 return 0;
137 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
138 sizeof(*prog->aux->jited_linfo),
139 GFP_KERNEL | __GFP_NOWARN);
140 if (!prog->aux->jited_linfo)
141 return -ENOMEM;
143 return 0;
146 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
148 kfree(prog->aux->jited_linfo);
149 prog->aux->jited_linfo = NULL;
152 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
154 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
155 bpf_prog_free_jited_linfo(prog);
158 /* The jit engine is responsible to provide an array
159 * for insn_off to the jited_off mapping (insn_to_jit_off).
161 * The idx to this array is the insn_off. Hence, the insn_off
162 * here is relative to the prog itself instead of the main prog.
163 * This array has one entry for each xlated bpf insn.
165 * jited_off is the byte off to the last byte of the jited insn.
167 * Hence, with
168 * insn_start:
169 * The first bpf insn off of the prog. The insn off
170 * here is relative to the main prog.
171 * e.g. if prog is a subprog, insn_start > 0
172 * linfo_idx:
173 * The prog's idx to prog->aux->linfo and jited_linfo
175 * jited_linfo[linfo_idx] = prog->bpf_func
177 * For i > linfo_idx,
179 * jited_linfo[i] = prog->bpf_func +
180 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
182 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
183 const u32 *insn_to_jit_off)
185 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
186 const struct bpf_line_info *linfo;
187 void **jited_linfo;
189 if (!prog->aux->jited_linfo)
190 /* Userspace did not provide linfo */
191 return;
193 linfo_idx = prog->aux->linfo_idx;
194 linfo = &prog->aux->linfo[linfo_idx];
195 insn_start = linfo[0].insn_off;
196 insn_end = insn_start + prog->len;
198 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
199 jited_linfo[0] = prog->bpf_func;
201 nr_linfo = prog->aux->nr_linfo - linfo_idx;
203 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
204 /* The verifier ensures that linfo[i].insn_off is
205 * strictly increasing
207 jited_linfo[i] = prog->bpf_func +
208 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
211 void bpf_prog_free_linfo(struct bpf_prog *prog)
213 bpf_prog_free_jited_linfo(prog);
214 kvfree(prog->aux->linfo);
217 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
218 gfp_t gfp_extra_flags)
220 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
221 struct bpf_prog *fp;
222 u32 pages, delta;
223 int ret;
225 size = round_up(size, PAGE_SIZE);
226 pages = size / PAGE_SIZE;
227 if (pages <= fp_old->pages)
228 return fp_old;
230 delta = pages - fp_old->pages;
231 ret = __bpf_prog_charge(fp_old->aux->user, delta);
232 if (ret)
233 return NULL;
235 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
236 if (fp == NULL) {
237 __bpf_prog_uncharge(fp_old->aux->user, delta);
238 } else {
239 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
240 fp->pages = pages;
241 fp->aux->prog = fp;
243 /* We keep fp->aux from fp_old around in the new
244 * reallocated structure.
246 fp_old->aux = NULL;
247 __bpf_prog_free(fp_old);
250 return fp;
253 void __bpf_prog_free(struct bpf_prog *fp)
255 if (fp->aux) {
256 free_percpu(fp->aux->stats);
257 kfree(fp->aux->poke_tab);
258 kfree(fp->aux);
260 vfree(fp);
263 int bpf_prog_calc_tag(struct bpf_prog *fp)
265 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
266 u32 raw_size = bpf_prog_tag_scratch_size(fp);
267 u32 digest[SHA_DIGEST_WORDS];
268 u32 ws[SHA_WORKSPACE_WORDS];
269 u32 i, bsize, psize, blocks;
270 struct bpf_insn *dst;
271 bool was_ld_map;
272 u8 *raw, *todo;
273 __be32 *result;
274 __be64 *bits;
276 raw = vmalloc(raw_size);
277 if (!raw)
278 return -ENOMEM;
280 sha_init(digest);
281 memset(ws, 0, sizeof(ws));
283 /* We need to take out the map fd for the digest calculation
284 * since they are unstable from user space side.
286 dst = (void *)raw;
287 for (i = 0, was_ld_map = false; i < fp->len; i++) {
288 dst[i] = fp->insnsi[i];
289 if (!was_ld_map &&
290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
293 was_ld_map = true;
294 dst[i].imm = 0;
295 } else if (was_ld_map &&
296 dst[i].code == 0 &&
297 dst[i].dst_reg == 0 &&
298 dst[i].src_reg == 0 &&
299 dst[i].off == 0) {
300 was_ld_map = false;
301 dst[i].imm = 0;
302 } else {
303 was_ld_map = false;
307 psize = bpf_prog_insn_size(fp);
308 memset(&raw[psize], 0, raw_size - psize);
309 raw[psize++] = 0x80;
311 bsize = round_up(psize, SHA_MESSAGE_BYTES);
312 blocks = bsize / SHA_MESSAGE_BYTES;
313 todo = raw;
314 if (bsize - psize >= sizeof(__be64)) {
315 bits = (__be64 *)(todo + bsize - sizeof(__be64));
316 } else {
317 bits = (__be64 *)(todo + bsize + bits_offset);
318 blocks++;
320 *bits = cpu_to_be64((psize - 1) << 3);
322 while (blocks--) {
323 sha_transform(digest, todo, ws);
324 todo += SHA_MESSAGE_BYTES;
327 result = (__force __be32 *)digest;
328 for (i = 0; i < SHA_DIGEST_WORDS; i++)
329 result[i] = cpu_to_be32(digest[i]);
330 memcpy(fp->tag, result, sizeof(fp->tag));
332 vfree(raw);
333 return 0;
336 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
337 s32 end_new, s32 curr, const bool probe_pass)
339 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
340 s32 delta = end_new - end_old;
341 s64 imm = insn->imm;
343 if (curr < pos && curr + imm + 1 >= end_old)
344 imm += delta;
345 else if (curr >= end_new && curr + imm + 1 < end_new)
346 imm -= delta;
347 if (imm < imm_min || imm > imm_max)
348 return -ERANGE;
349 if (!probe_pass)
350 insn->imm = imm;
351 return 0;
354 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
355 s32 end_new, s32 curr, const bool probe_pass)
357 const s32 off_min = S16_MIN, off_max = S16_MAX;
358 s32 delta = end_new - end_old;
359 s32 off = insn->off;
361 if (curr < pos && curr + off + 1 >= end_old)
362 off += delta;
363 else if (curr >= end_new && curr + off + 1 < end_new)
364 off -= delta;
365 if (off < off_min || off > off_max)
366 return -ERANGE;
367 if (!probe_pass)
368 insn->off = off;
369 return 0;
372 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
373 s32 end_new, const bool probe_pass)
375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
376 struct bpf_insn *insn = prog->insnsi;
377 int ret = 0;
379 for (i = 0; i < insn_cnt; i++, insn++) {
380 u8 code;
382 /* In the probing pass we still operate on the original,
383 * unpatched image in order to check overflows before we
384 * do any other adjustments. Therefore skip the patchlet.
386 if (probe_pass && i == pos) {
387 i = end_new;
388 insn = prog->insnsi + end_old;
390 code = insn->code;
391 if ((BPF_CLASS(code) != BPF_JMP &&
392 BPF_CLASS(code) != BPF_JMP32) ||
393 BPF_OP(code) == BPF_EXIT)
394 continue;
395 /* Adjust offset of jmps if we cross patch boundaries. */
396 if (BPF_OP(code) == BPF_CALL) {
397 if (insn->src_reg != BPF_PSEUDO_CALL)
398 continue;
399 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
400 end_new, i, probe_pass);
401 } else {
402 ret = bpf_adj_delta_to_off(insn, pos, end_old,
403 end_new, i, probe_pass);
405 if (ret)
406 break;
409 return ret;
412 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
414 struct bpf_line_info *linfo;
415 u32 i, nr_linfo;
417 nr_linfo = prog->aux->nr_linfo;
418 if (!nr_linfo || !delta)
419 return;
421 linfo = prog->aux->linfo;
423 for (i = 0; i < nr_linfo; i++)
424 if (off < linfo[i].insn_off)
425 break;
427 /* Push all off < linfo[i].insn_off by delta */
428 for (; i < nr_linfo; i++)
429 linfo[i].insn_off += delta;
432 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
433 const struct bpf_insn *patch, u32 len)
435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
436 const u32 cnt_max = S16_MAX;
437 struct bpf_prog *prog_adj;
438 int err;
440 /* Since our patchlet doesn't expand the image, we're done. */
441 if (insn_delta == 0) {
442 memcpy(prog->insnsi + off, patch, sizeof(*patch));
443 return prog;
446 insn_adj_cnt = prog->len + insn_delta;
448 /* Reject anything that would potentially let the insn->off
449 * target overflow when we have excessive program expansions.
450 * We need to probe here before we do any reallocation where
451 * we afterwards may not fail anymore.
453 if (insn_adj_cnt > cnt_max &&
454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
455 return ERR_PTR(err);
457 /* Several new instructions need to be inserted. Make room
458 * for them. Likely, there's no need for a new allocation as
459 * last page could have large enough tailroom.
461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
462 GFP_USER);
463 if (!prog_adj)
464 return ERR_PTR(-ENOMEM);
466 prog_adj->len = insn_adj_cnt;
468 /* Patching happens in 3 steps:
470 * 1) Move over tail of insnsi from next instruction onwards,
471 * so we can patch the single target insn with one or more
472 * new ones (patching is always from 1 to n insns, n > 0).
473 * 2) Inject new instructions at the target location.
474 * 3) Adjust branch offsets if necessary.
476 insn_rest = insn_adj_cnt - off - len;
478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
479 sizeof(*patch) * insn_rest);
480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
482 /* We are guaranteed to not fail at this point, otherwise
483 * the ship has sailed to reverse to the original state. An
484 * overflow cannot happen at this point.
486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
488 bpf_adj_linfo(prog_adj, off, insn_delta);
490 return prog_adj;
493 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
495 /* Branch offsets can't overflow when program is shrinking, no need
496 * to call bpf_adj_branches(..., true) here
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
499 sizeof(struct bpf_insn) * (prog->len - off - cnt));
500 prog->len -= cnt;
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
505 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
507 int i;
509 for (i = 0; i < fp->aux->func_cnt; i++)
510 bpf_prog_kallsyms_del(fp->aux->func[i]);
513 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
515 bpf_prog_kallsyms_del_subprogs(fp);
516 bpf_prog_kallsyms_del(fp);
519 #ifdef CONFIG_BPF_JIT
520 /* All BPF JIT sysctl knobs here. */
521 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
522 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
523 int bpf_jit_harden __read_mostly;
524 long bpf_jit_limit __read_mostly;
526 static __always_inline void
527 bpf_get_prog_addr_region(const struct bpf_prog *prog,
528 unsigned long *symbol_start,
529 unsigned long *symbol_end)
531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
532 unsigned long addr = (unsigned long)hdr;
534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
536 *symbol_start = addr;
537 *symbol_end = addr + hdr->pages * PAGE_SIZE;
540 void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
542 const char *end = sym + KSYM_NAME_LEN;
543 const struct btf_type *type;
544 const char *func_name;
546 BUILD_BUG_ON(sizeof("bpf_prog_") +
547 sizeof(prog->tag) * 2 +
548 /* name has been null terminated.
549 * We should need +1 for the '_' preceding
550 * the name. However, the null character
551 * is double counted between the name and the
552 * sizeof("bpf_prog_") above, so we omit
553 * the +1 here.
555 sizeof(prog->aux->name) > KSYM_NAME_LEN);
557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
560 /* prog->aux->name will be ignored if full btf name is available */
561 if (prog->aux->func_info_cnt) {
562 type = btf_type_by_id(prog->aux->btf,
563 prog->aux->func_info[prog->aux->func_idx].type_id);
564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
565 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
566 return;
569 if (prog->aux->name[0])
570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
571 else
572 *sym = 0;
575 static __always_inline unsigned long
576 bpf_get_prog_addr_start(struct latch_tree_node *n)
578 unsigned long symbol_start, symbol_end;
579 const struct bpf_prog_aux *aux;
581 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
582 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
584 return symbol_start;
587 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
588 struct latch_tree_node *b)
590 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
593 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
595 unsigned long val = (unsigned long)key;
596 unsigned long symbol_start, symbol_end;
597 const struct bpf_prog_aux *aux;
599 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
600 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
602 if (val < symbol_start)
603 return -1;
604 if (val >= symbol_end)
605 return 1;
607 return 0;
610 static const struct latch_tree_ops bpf_tree_ops = {
611 .less = bpf_tree_less,
612 .comp = bpf_tree_comp,
615 static DEFINE_SPINLOCK(bpf_lock);
616 static LIST_HEAD(bpf_kallsyms);
617 static struct latch_tree_root bpf_tree __cacheline_aligned;
619 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
621 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
622 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
623 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
626 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
628 if (list_empty(&aux->ksym_lnode))
629 return;
631 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
632 list_del_rcu(&aux->ksym_lnode);
635 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
637 return fp->jited && !bpf_prog_was_classic(fp);
640 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
642 return list_empty(&fp->aux->ksym_lnode) ||
643 fp->aux->ksym_lnode.prev == LIST_POISON2;
646 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
648 if (!bpf_prog_kallsyms_candidate(fp) ||
649 !capable(CAP_SYS_ADMIN))
650 return;
652 spin_lock_bh(&bpf_lock);
653 bpf_prog_ksym_node_add(fp->aux);
654 spin_unlock_bh(&bpf_lock);
657 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
659 if (!bpf_prog_kallsyms_candidate(fp))
660 return;
662 spin_lock_bh(&bpf_lock);
663 bpf_prog_ksym_node_del(fp->aux);
664 spin_unlock_bh(&bpf_lock);
667 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
669 struct latch_tree_node *n;
671 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
672 return n ?
673 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
674 NULL;
677 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
678 unsigned long *off, char *sym)
680 unsigned long symbol_start, symbol_end;
681 struct bpf_prog *prog;
682 char *ret = NULL;
684 rcu_read_lock();
685 prog = bpf_prog_kallsyms_find(addr);
686 if (prog) {
687 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
688 bpf_get_prog_name(prog, sym);
690 ret = sym;
691 if (size)
692 *size = symbol_end - symbol_start;
693 if (off)
694 *off = addr - symbol_start;
696 rcu_read_unlock();
698 return ret;
701 bool is_bpf_text_address(unsigned long addr)
703 bool ret;
705 rcu_read_lock();
706 ret = bpf_prog_kallsyms_find(addr) != NULL;
707 rcu_read_unlock();
709 return ret;
712 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
714 const struct exception_table_entry *e = NULL;
715 struct bpf_prog *prog;
717 rcu_read_lock();
718 prog = bpf_prog_kallsyms_find(addr);
719 if (!prog)
720 goto out;
721 if (!prog->aux->num_exentries)
722 goto out;
724 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
725 out:
726 rcu_read_unlock();
727 return e;
730 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
731 char *sym)
733 struct bpf_prog_aux *aux;
734 unsigned int it = 0;
735 int ret = -ERANGE;
737 if (!bpf_jit_kallsyms_enabled())
738 return ret;
740 rcu_read_lock();
741 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
742 if (it++ != symnum)
743 continue;
745 bpf_get_prog_name(aux->prog, sym);
747 *value = (unsigned long)aux->prog->bpf_func;
748 *type = BPF_SYM_ELF_TYPE;
750 ret = 0;
751 break;
753 rcu_read_unlock();
755 return ret;
758 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
759 struct bpf_jit_poke_descriptor *poke)
761 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
762 static const u32 poke_tab_max = 1024;
763 u32 slot = prog->aux->size_poke_tab;
764 u32 size = slot + 1;
766 if (size > poke_tab_max)
767 return -ENOSPC;
768 if (poke->ip || poke->ip_stable || poke->adj_off)
769 return -EINVAL;
771 switch (poke->reason) {
772 case BPF_POKE_REASON_TAIL_CALL:
773 if (!poke->tail_call.map)
774 return -EINVAL;
775 break;
776 default:
777 return -EINVAL;
780 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
781 if (!tab)
782 return -ENOMEM;
784 memcpy(&tab[slot], poke, sizeof(*poke));
785 prog->aux->size_poke_tab = size;
786 prog->aux->poke_tab = tab;
788 return slot;
791 static atomic_long_t bpf_jit_current;
793 /* Can be overridden by an arch's JIT compiler if it has a custom,
794 * dedicated BPF backend memory area, or if neither of the two
795 * below apply.
797 u64 __weak bpf_jit_alloc_exec_limit(void)
799 #if defined(MODULES_VADDR)
800 return MODULES_END - MODULES_VADDR;
801 #else
802 return VMALLOC_END - VMALLOC_START;
803 #endif
806 static int __init bpf_jit_charge_init(void)
808 /* Only used as heuristic here to derive limit. */
809 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
810 PAGE_SIZE), LONG_MAX);
811 return 0;
813 pure_initcall(bpf_jit_charge_init);
815 static int bpf_jit_charge_modmem(u32 pages)
817 if (atomic_long_add_return(pages, &bpf_jit_current) >
818 (bpf_jit_limit >> PAGE_SHIFT)) {
819 if (!capable(CAP_SYS_ADMIN)) {
820 atomic_long_sub(pages, &bpf_jit_current);
821 return -EPERM;
825 return 0;
828 static void bpf_jit_uncharge_modmem(u32 pages)
830 atomic_long_sub(pages, &bpf_jit_current);
833 void *__weak bpf_jit_alloc_exec(unsigned long size)
835 return module_alloc(size);
838 void __weak bpf_jit_free_exec(void *addr)
840 module_memfree(addr);
843 struct bpf_binary_header *
844 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
845 unsigned int alignment,
846 bpf_jit_fill_hole_t bpf_fill_ill_insns)
848 struct bpf_binary_header *hdr;
849 u32 size, hole, start, pages;
851 WARN_ON_ONCE(!is_power_of_2(alignment) ||
852 alignment > BPF_IMAGE_ALIGNMENT);
854 /* Most of BPF filters are really small, but if some of them
855 * fill a page, allow at least 128 extra bytes to insert a
856 * random section of illegal instructions.
858 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
859 pages = size / PAGE_SIZE;
861 if (bpf_jit_charge_modmem(pages))
862 return NULL;
863 hdr = bpf_jit_alloc_exec(size);
864 if (!hdr) {
865 bpf_jit_uncharge_modmem(pages);
866 return NULL;
869 /* Fill space with illegal/arch-dep instructions. */
870 bpf_fill_ill_insns(hdr, size);
872 hdr->pages = pages;
873 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
874 PAGE_SIZE - sizeof(*hdr));
875 start = (get_random_int() % hole) & ~(alignment - 1);
877 /* Leave a random number of instructions before BPF code. */
878 *image_ptr = &hdr->image[start];
880 return hdr;
883 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
885 u32 pages = hdr->pages;
887 bpf_jit_free_exec(hdr);
888 bpf_jit_uncharge_modmem(pages);
891 /* This symbol is only overridden by archs that have different
892 * requirements than the usual eBPF JITs, f.e. when they only
893 * implement cBPF JIT, do not set images read-only, etc.
895 void __weak bpf_jit_free(struct bpf_prog *fp)
897 if (fp->jited) {
898 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
900 bpf_jit_binary_free(hdr);
902 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
905 bpf_prog_unlock_free(fp);
908 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
909 const struct bpf_insn *insn, bool extra_pass,
910 u64 *func_addr, bool *func_addr_fixed)
912 s16 off = insn->off;
913 s32 imm = insn->imm;
914 u8 *addr;
916 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
917 if (!*func_addr_fixed) {
918 /* Place-holder address till the last pass has collected
919 * all addresses for JITed subprograms in which case we
920 * can pick them up from prog->aux.
922 if (!extra_pass)
923 addr = NULL;
924 else if (prog->aux->func &&
925 off >= 0 && off < prog->aux->func_cnt)
926 addr = (u8 *)prog->aux->func[off]->bpf_func;
927 else
928 return -EINVAL;
929 } else {
930 /* Address of a BPF helper call. Since part of the core
931 * kernel, it's always at a fixed location. __bpf_call_base
932 * and the helper with imm relative to it are both in core
933 * kernel.
935 addr = (u8 *)__bpf_call_base + imm;
938 *func_addr = (unsigned long)addr;
939 return 0;
942 static int bpf_jit_blind_insn(const struct bpf_insn *from,
943 const struct bpf_insn *aux,
944 struct bpf_insn *to_buff,
945 bool emit_zext)
947 struct bpf_insn *to = to_buff;
948 u32 imm_rnd = get_random_int();
949 s16 off;
951 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
952 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
954 /* Constraints on AX register:
956 * AX register is inaccessible from user space. It is mapped in
957 * all JITs, and used here for constant blinding rewrites. It is
958 * typically "stateless" meaning its contents are only valid within
959 * the executed instruction, but not across several instructions.
960 * There are a few exceptions however which are further detailed
961 * below.
963 * Constant blinding is only used by JITs, not in the interpreter.
964 * The interpreter uses AX in some occasions as a local temporary
965 * register e.g. in DIV or MOD instructions.
967 * In restricted circumstances, the verifier can also use the AX
968 * register for rewrites as long as they do not interfere with
969 * the above cases!
971 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
972 goto out;
974 if (from->imm == 0 &&
975 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
976 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
977 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
978 goto out;
981 switch (from->code) {
982 case BPF_ALU | BPF_ADD | BPF_K:
983 case BPF_ALU | BPF_SUB | BPF_K:
984 case BPF_ALU | BPF_AND | BPF_K:
985 case BPF_ALU | BPF_OR | BPF_K:
986 case BPF_ALU | BPF_XOR | BPF_K:
987 case BPF_ALU | BPF_MUL | BPF_K:
988 case BPF_ALU | BPF_MOV | BPF_K:
989 case BPF_ALU | BPF_DIV | BPF_K:
990 case BPF_ALU | BPF_MOD | BPF_K:
991 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
992 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
993 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
994 break;
996 case BPF_ALU64 | BPF_ADD | BPF_K:
997 case BPF_ALU64 | BPF_SUB | BPF_K:
998 case BPF_ALU64 | BPF_AND | BPF_K:
999 case BPF_ALU64 | BPF_OR | BPF_K:
1000 case BPF_ALU64 | BPF_XOR | BPF_K:
1001 case BPF_ALU64 | BPF_MUL | BPF_K:
1002 case BPF_ALU64 | BPF_MOV | BPF_K:
1003 case BPF_ALU64 | BPF_DIV | BPF_K:
1004 case BPF_ALU64 | BPF_MOD | BPF_K:
1005 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1006 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1007 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1008 break;
1010 case BPF_JMP | BPF_JEQ | BPF_K:
1011 case BPF_JMP | BPF_JNE | BPF_K:
1012 case BPF_JMP | BPF_JGT | BPF_K:
1013 case BPF_JMP | BPF_JLT | BPF_K:
1014 case BPF_JMP | BPF_JGE | BPF_K:
1015 case BPF_JMP | BPF_JLE | BPF_K:
1016 case BPF_JMP | BPF_JSGT | BPF_K:
1017 case BPF_JMP | BPF_JSLT | BPF_K:
1018 case BPF_JMP | BPF_JSGE | BPF_K:
1019 case BPF_JMP | BPF_JSLE | BPF_K:
1020 case BPF_JMP | BPF_JSET | BPF_K:
1021 /* Accommodate for extra offset in case of a backjump. */
1022 off = from->off;
1023 if (off < 0)
1024 off -= 2;
1025 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1026 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1027 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1028 break;
1030 case BPF_JMP32 | BPF_JEQ | BPF_K:
1031 case BPF_JMP32 | BPF_JNE | BPF_K:
1032 case BPF_JMP32 | BPF_JGT | BPF_K:
1033 case BPF_JMP32 | BPF_JLT | BPF_K:
1034 case BPF_JMP32 | BPF_JGE | BPF_K:
1035 case BPF_JMP32 | BPF_JLE | BPF_K:
1036 case BPF_JMP32 | BPF_JSGT | BPF_K:
1037 case BPF_JMP32 | BPF_JSLT | BPF_K:
1038 case BPF_JMP32 | BPF_JSGE | BPF_K:
1039 case BPF_JMP32 | BPF_JSLE | BPF_K:
1040 case BPF_JMP32 | BPF_JSET | BPF_K:
1041 /* Accommodate for extra offset in case of a backjump. */
1042 off = from->off;
1043 if (off < 0)
1044 off -= 2;
1045 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1046 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1047 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1048 off);
1049 break;
1051 case BPF_LD | BPF_IMM | BPF_DW:
1052 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1053 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1054 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1055 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1056 break;
1057 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1058 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1059 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1060 if (emit_zext)
1061 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1062 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1063 break;
1065 case BPF_ST | BPF_MEM | BPF_DW:
1066 case BPF_ST | BPF_MEM | BPF_W:
1067 case BPF_ST | BPF_MEM | BPF_H:
1068 case BPF_ST | BPF_MEM | BPF_B:
1069 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1070 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1071 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1072 break;
1074 out:
1075 return to - to_buff;
1078 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1079 gfp_t gfp_extra_flags)
1081 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1082 struct bpf_prog *fp;
1084 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1085 if (fp != NULL) {
1086 /* aux->prog still points to the fp_other one, so
1087 * when promoting the clone to the real program,
1088 * this still needs to be adapted.
1090 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1093 return fp;
1096 static void bpf_prog_clone_free(struct bpf_prog *fp)
1098 /* aux was stolen by the other clone, so we cannot free
1099 * it from this path! It will be freed eventually by the
1100 * other program on release.
1102 * At this point, we don't need a deferred release since
1103 * clone is guaranteed to not be locked.
1105 fp->aux = NULL;
1106 __bpf_prog_free(fp);
1109 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1111 /* We have to repoint aux->prog to self, as we don't
1112 * know whether fp here is the clone or the original.
1114 fp->aux->prog = fp;
1115 bpf_prog_clone_free(fp_other);
1118 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1120 struct bpf_insn insn_buff[16], aux[2];
1121 struct bpf_prog *clone, *tmp;
1122 int insn_delta, insn_cnt;
1123 struct bpf_insn *insn;
1124 int i, rewritten;
1126 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1127 return prog;
1129 clone = bpf_prog_clone_create(prog, GFP_USER);
1130 if (!clone)
1131 return ERR_PTR(-ENOMEM);
1133 insn_cnt = clone->len;
1134 insn = clone->insnsi;
1136 for (i = 0; i < insn_cnt; i++, insn++) {
1137 /* We temporarily need to hold the original ld64 insn
1138 * so that we can still access the first part in the
1139 * second blinding run.
1141 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1142 insn[1].code == 0)
1143 memcpy(aux, insn, sizeof(aux));
1145 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1146 clone->aux->verifier_zext);
1147 if (!rewritten)
1148 continue;
1150 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1151 if (IS_ERR(tmp)) {
1152 /* Patching may have repointed aux->prog during
1153 * realloc from the original one, so we need to
1154 * fix it up here on error.
1156 bpf_jit_prog_release_other(prog, clone);
1157 return tmp;
1160 clone = tmp;
1161 insn_delta = rewritten - 1;
1163 /* Walk new program and skip insns we just inserted. */
1164 insn = clone->insnsi + i + insn_delta;
1165 insn_cnt += insn_delta;
1166 i += insn_delta;
1169 clone->blinded = 1;
1170 return clone;
1172 #endif /* CONFIG_BPF_JIT */
1174 /* Base function for offset calculation. Needs to go into .text section,
1175 * therefore keeping it non-static as well; will also be used by JITs
1176 * anyway later on, so do not let the compiler omit it. This also needs
1177 * to go into kallsyms for correlation from e.g. bpftool, so naming
1178 * must not change.
1180 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1182 return 0;
1184 EXPORT_SYMBOL_GPL(__bpf_call_base);
1186 /* All UAPI available opcodes. */
1187 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1188 /* 32 bit ALU operations. */ \
1189 /* Register based. */ \
1190 INSN_3(ALU, ADD, X), \
1191 INSN_3(ALU, SUB, X), \
1192 INSN_3(ALU, AND, X), \
1193 INSN_3(ALU, OR, X), \
1194 INSN_3(ALU, LSH, X), \
1195 INSN_3(ALU, RSH, X), \
1196 INSN_3(ALU, XOR, X), \
1197 INSN_3(ALU, MUL, X), \
1198 INSN_3(ALU, MOV, X), \
1199 INSN_3(ALU, ARSH, X), \
1200 INSN_3(ALU, DIV, X), \
1201 INSN_3(ALU, MOD, X), \
1202 INSN_2(ALU, NEG), \
1203 INSN_3(ALU, END, TO_BE), \
1204 INSN_3(ALU, END, TO_LE), \
1205 /* Immediate based. */ \
1206 INSN_3(ALU, ADD, K), \
1207 INSN_3(ALU, SUB, K), \
1208 INSN_3(ALU, AND, K), \
1209 INSN_3(ALU, OR, K), \
1210 INSN_3(ALU, LSH, K), \
1211 INSN_3(ALU, RSH, K), \
1212 INSN_3(ALU, XOR, K), \
1213 INSN_3(ALU, MUL, K), \
1214 INSN_3(ALU, MOV, K), \
1215 INSN_3(ALU, ARSH, K), \
1216 INSN_3(ALU, DIV, K), \
1217 INSN_3(ALU, MOD, K), \
1218 /* 64 bit ALU operations. */ \
1219 /* Register based. */ \
1220 INSN_3(ALU64, ADD, X), \
1221 INSN_3(ALU64, SUB, X), \
1222 INSN_3(ALU64, AND, X), \
1223 INSN_3(ALU64, OR, X), \
1224 INSN_3(ALU64, LSH, X), \
1225 INSN_3(ALU64, RSH, X), \
1226 INSN_3(ALU64, XOR, X), \
1227 INSN_3(ALU64, MUL, X), \
1228 INSN_3(ALU64, MOV, X), \
1229 INSN_3(ALU64, ARSH, X), \
1230 INSN_3(ALU64, DIV, X), \
1231 INSN_3(ALU64, MOD, X), \
1232 INSN_2(ALU64, NEG), \
1233 /* Immediate based. */ \
1234 INSN_3(ALU64, ADD, K), \
1235 INSN_3(ALU64, SUB, K), \
1236 INSN_3(ALU64, AND, K), \
1237 INSN_3(ALU64, OR, K), \
1238 INSN_3(ALU64, LSH, K), \
1239 INSN_3(ALU64, RSH, K), \
1240 INSN_3(ALU64, XOR, K), \
1241 INSN_3(ALU64, MUL, K), \
1242 INSN_3(ALU64, MOV, K), \
1243 INSN_3(ALU64, ARSH, K), \
1244 INSN_3(ALU64, DIV, K), \
1245 INSN_3(ALU64, MOD, K), \
1246 /* Call instruction. */ \
1247 INSN_2(JMP, CALL), \
1248 /* Exit instruction. */ \
1249 INSN_2(JMP, EXIT), \
1250 /* 32-bit Jump instructions. */ \
1251 /* Register based. */ \
1252 INSN_3(JMP32, JEQ, X), \
1253 INSN_3(JMP32, JNE, X), \
1254 INSN_3(JMP32, JGT, X), \
1255 INSN_3(JMP32, JLT, X), \
1256 INSN_3(JMP32, JGE, X), \
1257 INSN_3(JMP32, JLE, X), \
1258 INSN_3(JMP32, JSGT, X), \
1259 INSN_3(JMP32, JSLT, X), \
1260 INSN_3(JMP32, JSGE, X), \
1261 INSN_3(JMP32, JSLE, X), \
1262 INSN_3(JMP32, JSET, X), \
1263 /* Immediate based. */ \
1264 INSN_3(JMP32, JEQ, K), \
1265 INSN_3(JMP32, JNE, K), \
1266 INSN_3(JMP32, JGT, K), \
1267 INSN_3(JMP32, JLT, K), \
1268 INSN_3(JMP32, JGE, K), \
1269 INSN_3(JMP32, JLE, K), \
1270 INSN_3(JMP32, JSGT, K), \
1271 INSN_3(JMP32, JSLT, K), \
1272 INSN_3(JMP32, JSGE, K), \
1273 INSN_3(JMP32, JSLE, K), \
1274 INSN_3(JMP32, JSET, K), \
1275 /* Jump instructions. */ \
1276 /* Register based. */ \
1277 INSN_3(JMP, JEQ, X), \
1278 INSN_3(JMP, JNE, X), \
1279 INSN_3(JMP, JGT, X), \
1280 INSN_3(JMP, JLT, X), \
1281 INSN_3(JMP, JGE, X), \
1282 INSN_3(JMP, JLE, X), \
1283 INSN_3(JMP, JSGT, X), \
1284 INSN_3(JMP, JSLT, X), \
1285 INSN_3(JMP, JSGE, X), \
1286 INSN_3(JMP, JSLE, X), \
1287 INSN_3(JMP, JSET, X), \
1288 /* Immediate based. */ \
1289 INSN_3(JMP, JEQ, K), \
1290 INSN_3(JMP, JNE, K), \
1291 INSN_3(JMP, JGT, K), \
1292 INSN_3(JMP, JLT, K), \
1293 INSN_3(JMP, JGE, K), \
1294 INSN_3(JMP, JLE, K), \
1295 INSN_3(JMP, JSGT, K), \
1296 INSN_3(JMP, JSLT, K), \
1297 INSN_3(JMP, JSGE, K), \
1298 INSN_3(JMP, JSLE, K), \
1299 INSN_3(JMP, JSET, K), \
1300 INSN_2(JMP, JA), \
1301 /* Store instructions. */ \
1302 /* Register based. */ \
1303 INSN_3(STX, MEM, B), \
1304 INSN_3(STX, MEM, H), \
1305 INSN_3(STX, MEM, W), \
1306 INSN_3(STX, MEM, DW), \
1307 INSN_3(STX, XADD, W), \
1308 INSN_3(STX, XADD, DW), \
1309 /* Immediate based. */ \
1310 INSN_3(ST, MEM, B), \
1311 INSN_3(ST, MEM, H), \
1312 INSN_3(ST, MEM, W), \
1313 INSN_3(ST, MEM, DW), \
1314 /* Load instructions. */ \
1315 /* Register based. */ \
1316 INSN_3(LDX, MEM, B), \
1317 INSN_3(LDX, MEM, H), \
1318 INSN_3(LDX, MEM, W), \
1319 INSN_3(LDX, MEM, DW), \
1320 /* Immediate based. */ \
1321 INSN_3(LD, IMM, DW)
1323 bool bpf_opcode_in_insntable(u8 code)
1325 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1326 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1327 static const bool public_insntable[256] = {
1328 [0 ... 255] = false,
1329 /* Now overwrite non-defaults ... */
1330 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1331 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1332 [BPF_LD | BPF_ABS | BPF_B] = true,
1333 [BPF_LD | BPF_ABS | BPF_H] = true,
1334 [BPF_LD | BPF_ABS | BPF_W] = true,
1335 [BPF_LD | BPF_IND | BPF_B] = true,
1336 [BPF_LD | BPF_IND | BPF_H] = true,
1337 [BPF_LD | BPF_IND | BPF_W] = true,
1339 #undef BPF_INSN_3_TBL
1340 #undef BPF_INSN_2_TBL
1341 return public_insntable[code];
1344 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1345 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1347 memset(dst, 0, size);
1348 return -EFAULT;
1352 * __bpf_prog_run - run eBPF program on a given context
1353 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1354 * @insn: is the array of eBPF instructions
1355 * @stack: is the eBPF storage stack
1357 * Decode and execute eBPF instructions.
1359 static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1361 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1362 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1363 static const void * const jumptable[256] __annotate_jump_table = {
1364 [0 ... 255] = &&default_label,
1365 /* Now overwrite non-defaults ... */
1366 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1367 /* Non-UAPI available opcodes. */
1368 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1369 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1370 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1371 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1372 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1373 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1375 #undef BPF_INSN_3_LBL
1376 #undef BPF_INSN_2_LBL
1377 u32 tail_call_cnt = 0;
1379 #define CONT ({ insn++; goto select_insn; })
1380 #define CONT_JMP ({ insn++; goto select_insn; })
1382 select_insn:
1383 goto *jumptable[insn->code];
1385 /* ALU */
1386 #define ALU(OPCODE, OP) \
1387 ALU64_##OPCODE##_X: \
1388 DST = DST OP SRC; \
1389 CONT; \
1390 ALU_##OPCODE##_X: \
1391 DST = (u32) DST OP (u32) SRC; \
1392 CONT; \
1393 ALU64_##OPCODE##_K: \
1394 DST = DST OP IMM; \
1395 CONT; \
1396 ALU_##OPCODE##_K: \
1397 DST = (u32) DST OP (u32) IMM; \
1398 CONT;
1400 ALU(ADD, +)
1401 ALU(SUB, -)
1402 ALU(AND, &)
1403 ALU(OR, |)
1404 ALU(LSH, <<)
1405 ALU(RSH, >>)
1406 ALU(XOR, ^)
1407 ALU(MUL, *)
1408 #undef ALU
1409 ALU_NEG:
1410 DST = (u32) -DST;
1411 CONT;
1412 ALU64_NEG:
1413 DST = -DST;
1414 CONT;
1415 ALU_MOV_X:
1416 DST = (u32) SRC;
1417 CONT;
1418 ALU_MOV_K:
1419 DST = (u32) IMM;
1420 CONT;
1421 ALU64_MOV_X:
1422 DST = SRC;
1423 CONT;
1424 ALU64_MOV_K:
1425 DST = IMM;
1426 CONT;
1427 LD_IMM_DW:
1428 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1429 insn++;
1430 CONT;
1431 ALU_ARSH_X:
1432 DST = (u64) (u32) (((s32) DST) >> SRC);
1433 CONT;
1434 ALU_ARSH_K:
1435 DST = (u64) (u32) (((s32) DST) >> IMM);
1436 CONT;
1437 ALU64_ARSH_X:
1438 (*(s64 *) &DST) >>= SRC;
1439 CONT;
1440 ALU64_ARSH_K:
1441 (*(s64 *) &DST) >>= IMM;
1442 CONT;
1443 ALU64_MOD_X:
1444 div64_u64_rem(DST, SRC, &AX);
1445 DST = AX;
1446 CONT;
1447 ALU_MOD_X:
1448 AX = (u32) DST;
1449 DST = do_div(AX, (u32) SRC);
1450 CONT;
1451 ALU64_MOD_K:
1452 div64_u64_rem(DST, IMM, &AX);
1453 DST = AX;
1454 CONT;
1455 ALU_MOD_K:
1456 AX = (u32) DST;
1457 DST = do_div(AX, (u32) IMM);
1458 CONT;
1459 ALU64_DIV_X:
1460 DST = div64_u64(DST, SRC);
1461 CONT;
1462 ALU_DIV_X:
1463 AX = (u32) DST;
1464 do_div(AX, (u32) SRC);
1465 DST = (u32) AX;
1466 CONT;
1467 ALU64_DIV_K:
1468 DST = div64_u64(DST, IMM);
1469 CONT;
1470 ALU_DIV_K:
1471 AX = (u32) DST;
1472 do_div(AX, (u32) IMM);
1473 DST = (u32) AX;
1474 CONT;
1475 ALU_END_TO_BE:
1476 switch (IMM) {
1477 case 16:
1478 DST = (__force u16) cpu_to_be16(DST);
1479 break;
1480 case 32:
1481 DST = (__force u32) cpu_to_be32(DST);
1482 break;
1483 case 64:
1484 DST = (__force u64) cpu_to_be64(DST);
1485 break;
1487 CONT;
1488 ALU_END_TO_LE:
1489 switch (IMM) {
1490 case 16:
1491 DST = (__force u16) cpu_to_le16(DST);
1492 break;
1493 case 32:
1494 DST = (__force u32) cpu_to_le32(DST);
1495 break;
1496 case 64:
1497 DST = (__force u64) cpu_to_le64(DST);
1498 break;
1500 CONT;
1502 /* CALL */
1503 JMP_CALL:
1504 /* Function call scratches BPF_R1-BPF_R5 registers,
1505 * preserves BPF_R6-BPF_R9, and stores return value
1506 * into BPF_R0.
1508 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1509 BPF_R4, BPF_R5);
1510 CONT;
1512 JMP_CALL_ARGS:
1513 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1514 BPF_R3, BPF_R4,
1515 BPF_R5,
1516 insn + insn->off + 1);
1517 CONT;
1519 JMP_TAIL_CALL: {
1520 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1521 struct bpf_array *array = container_of(map, struct bpf_array, map);
1522 struct bpf_prog *prog;
1523 u32 index = BPF_R3;
1525 if (unlikely(index >= array->map.max_entries))
1526 goto out;
1527 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1528 goto out;
1530 tail_call_cnt++;
1532 prog = READ_ONCE(array->ptrs[index]);
1533 if (!prog)
1534 goto out;
1536 /* ARG1 at this point is guaranteed to point to CTX from
1537 * the verifier side due to the fact that the tail call is
1538 * handeled like a helper, that is, bpf_tail_call_proto,
1539 * where arg1_type is ARG_PTR_TO_CTX.
1541 insn = prog->insnsi;
1542 goto select_insn;
1543 out:
1544 CONT;
1546 JMP_JA:
1547 insn += insn->off;
1548 CONT;
1549 JMP_EXIT:
1550 return BPF_R0;
1551 /* JMP */
1552 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1553 JMP_##OPCODE##_X: \
1554 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1555 insn += insn->off; \
1556 CONT_JMP; \
1558 CONT; \
1559 JMP32_##OPCODE##_X: \
1560 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1561 insn += insn->off; \
1562 CONT_JMP; \
1564 CONT; \
1565 JMP_##OPCODE##_K: \
1566 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1567 insn += insn->off; \
1568 CONT_JMP; \
1570 CONT; \
1571 JMP32_##OPCODE##_K: \
1572 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1573 insn += insn->off; \
1574 CONT_JMP; \
1576 CONT;
1577 COND_JMP(u, JEQ, ==)
1578 COND_JMP(u, JNE, !=)
1579 COND_JMP(u, JGT, >)
1580 COND_JMP(u, JLT, <)
1581 COND_JMP(u, JGE, >=)
1582 COND_JMP(u, JLE, <=)
1583 COND_JMP(u, JSET, &)
1584 COND_JMP(s, JSGT, >)
1585 COND_JMP(s, JSLT, <)
1586 COND_JMP(s, JSGE, >=)
1587 COND_JMP(s, JSLE, <=)
1588 #undef COND_JMP
1589 /* STX and ST and LDX*/
1590 #define LDST(SIZEOP, SIZE) \
1591 STX_MEM_##SIZEOP: \
1592 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1593 CONT; \
1594 ST_MEM_##SIZEOP: \
1595 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1596 CONT; \
1597 LDX_MEM_##SIZEOP: \
1598 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1599 CONT;
1601 LDST(B, u8)
1602 LDST(H, u16)
1603 LDST(W, u32)
1604 LDST(DW, u64)
1605 #undef LDST
1606 #define LDX_PROBE(SIZEOP, SIZE) \
1607 LDX_PROBE_MEM_##SIZEOP: \
1608 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1609 CONT;
1610 LDX_PROBE(B, 1)
1611 LDX_PROBE(H, 2)
1612 LDX_PROBE(W, 4)
1613 LDX_PROBE(DW, 8)
1614 #undef LDX_PROBE
1616 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1617 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1618 (DST + insn->off));
1619 CONT;
1620 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1621 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1622 (DST + insn->off));
1623 CONT;
1625 default_label:
1626 /* If we ever reach this, we have a bug somewhere. Die hard here
1627 * instead of just returning 0; we could be somewhere in a subprog,
1628 * so execution could continue otherwise which we do /not/ want.
1630 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1632 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1633 BUG_ON(1);
1634 return 0;
1637 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1638 #define DEFINE_BPF_PROG_RUN(stack_size) \
1639 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1641 u64 stack[stack_size / sizeof(u64)]; \
1642 u64 regs[MAX_BPF_EXT_REG]; \
1644 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1645 ARG1 = (u64) (unsigned long) ctx; \
1646 return ___bpf_prog_run(regs, insn, stack); \
1649 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1650 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1651 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1652 const struct bpf_insn *insn) \
1654 u64 stack[stack_size / sizeof(u64)]; \
1655 u64 regs[MAX_BPF_EXT_REG]; \
1657 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1658 BPF_R1 = r1; \
1659 BPF_R2 = r2; \
1660 BPF_R3 = r3; \
1661 BPF_R4 = r4; \
1662 BPF_R5 = r5; \
1663 return ___bpf_prog_run(regs, insn, stack); \
1666 #define EVAL1(FN, X) FN(X)
1667 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1668 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1669 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1670 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1671 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1673 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1674 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1675 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1677 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1678 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1679 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1681 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1683 static unsigned int (*interpreters[])(const void *ctx,
1684 const struct bpf_insn *insn) = {
1685 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1686 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1687 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1689 #undef PROG_NAME_LIST
1690 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1691 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1692 const struct bpf_insn *insn) = {
1693 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1694 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1695 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1697 #undef PROG_NAME_LIST
1699 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1701 stack_depth = max_t(u32, stack_depth, 1);
1702 insn->off = (s16) insn->imm;
1703 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1704 __bpf_call_base_args;
1705 insn->code = BPF_JMP | BPF_CALL_ARGS;
1708 #else
1709 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1710 const struct bpf_insn *insn)
1712 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1713 * is not working properly, so warn about it!
1715 WARN_ON_ONCE(1);
1716 return 0;
1718 #endif
1720 bool bpf_prog_array_compatible(struct bpf_array *array,
1721 const struct bpf_prog *fp)
1723 if (fp->kprobe_override)
1724 return false;
1726 if (!array->aux->type) {
1727 /* There's no owner yet where we could check for
1728 * compatibility.
1730 array->aux->type = fp->type;
1731 array->aux->jited = fp->jited;
1732 return true;
1735 return array->aux->type == fp->type &&
1736 array->aux->jited == fp->jited;
1739 static int bpf_check_tail_call(const struct bpf_prog *fp)
1741 struct bpf_prog_aux *aux = fp->aux;
1742 int i;
1744 for (i = 0; i < aux->used_map_cnt; i++) {
1745 struct bpf_map *map = aux->used_maps[i];
1746 struct bpf_array *array;
1748 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1749 continue;
1751 array = container_of(map, struct bpf_array, map);
1752 if (!bpf_prog_array_compatible(array, fp))
1753 return -EINVAL;
1756 return 0;
1759 static void bpf_prog_select_func(struct bpf_prog *fp)
1761 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1762 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1764 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1765 #else
1766 fp->bpf_func = __bpf_prog_ret0_warn;
1767 #endif
1771 * bpf_prog_select_runtime - select exec runtime for BPF program
1772 * @fp: bpf_prog populated with internal BPF program
1773 * @err: pointer to error variable
1775 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1776 * The BPF program will be executed via BPF_PROG_RUN() macro.
1778 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1780 /* In case of BPF to BPF calls, verifier did all the prep
1781 * work with regards to JITing, etc.
1783 if (fp->bpf_func)
1784 goto finalize;
1786 bpf_prog_select_func(fp);
1788 /* eBPF JITs can rewrite the program in case constant
1789 * blinding is active. However, in case of error during
1790 * blinding, bpf_int_jit_compile() must always return a
1791 * valid program, which in this case would simply not
1792 * be JITed, but falls back to the interpreter.
1794 if (!bpf_prog_is_dev_bound(fp->aux)) {
1795 *err = bpf_prog_alloc_jited_linfo(fp);
1796 if (*err)
1797 return fp;
1799 fp = bpf_int_jit_compile(fp);
1800 if (!fp->jited) {
1801 bpf_prog_free_jited_linfo(fp);
1802 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1803 *err = -ENOTSUPP;
1804 return fp;
1805 #endif
1806 } else {
1807 bpf_prog_free_unused_jited_linfo(fp);
1809 } else {
1810 *err = bpf_prog_offload_compile(fp);
1811 if (*err)
1812 return fp;
1815 finalize:
1816 bpf_prog_lock_ro(fp);
1818 /* The tail call compatibility check can only be done at
1819 * this late stage as we need to determine, if we deal
1820 * with JITed or non JITed program concatenations and not
1821 * all eBPF JITs might immediately support all features.
1823 *err = bpf_check_tail_call(fp);
1825 return fp;
1827 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1829 static unsigned int __bpf_prog_ret1(const void *ctx,
1830 const struct bpf_insn *insn)
1832 return 1;
1835 static struct bpf_prog_dummy {
1836 struct bpf_prog prog;
1837 } dummy_bpf_prog = {
1838 .prog = {
1839 .bpf_func = __bpf_prog_ret1,
1843 /* to avoid allocating empty bpf_prog_array for cgroups that
1844 * don't have bpf program attached use one global 'empty_prog_array'
1845 * It will not be modified the caller of bpf_prog_array_alloc()
1846 * (since caller requested prog_cnt == 0)
1847 * that pointer should be 'freed' by bpf_prog_array_free()
1849 static struct {
1850 struct bpf_prog_array hdr;
1851 struct bpf_prog *null_prog;
1852 } empty_prog_array = {
1853 .null_prog = NULL,
1856 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1858 if (prog_cnt)
1859 return kzalloc(sizeof(struct bpf_prog_array) +
1860 sizeof(struct bpf_prog_array_item) *
1861 (prog_cnt + 1),
1862 flags);
1864 return &empty_prog_array.hdr;
1867 void bpf_prog_array_free(struct bpf_prog_array *progs)
1869 if (!progs || progs == &empty_prog_array.hdr)
1870 return;
1871 kfree_rcu(progs, rcu);
1874 int bpf_prog_array_length(struct bpf_prog_array *array)
1876 struct bpf_prog_array_item *item;
1877 u32 cnt = 0;
1879 for (item = array->items; item->prog; item++)
1880 if (item->prog != &dummy_bpf_prog.prog)
1881 cnt++;
1882 return cnt;
1885 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1887 struct bpf_prog_array_item *item;
1889 for (item = array->items; item->prog; item++)
1890 if (item->prog != &dummy_bpf_prog.prog)
1891 return false;
1892 return true;
1895 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1896 u32 *prog_ids,
1897 u32 request_cnt)
1899 struct bpf_prog_array_item *item;
1900 int i = 0;
1902 for (item = array->items; item->prog; item++) {
1903 if (item->prog == &dummy_bpf_prog.prog)
1904 continue;
1905 prog_ids[i] = item->prog->aux->id;
1906 if (++i == request_cnt) {
1907 item++;
1908 break;
1912 return !!(item->prog);
1915 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1916 __u32 __user *prog_ids, u32 cnt)
1918 unsigned long err = 0;
1919 bool nospc;
1920 u32 *ids;
1922 /* users of this function are doing:
1923 * cnt = bpf_prog_array_length();
1924 * if (cnt > 0)
1925 * bpf_prog_array_copy_to_user(..., cnt);
1926 * so below kcalloc doesn't need extra cnt > 0 check.
1928 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1929 if (!ids)
1930 return -ENOMEM;
1931 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1932 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1933 kfree(ids);
1934 if (err)
1935 return -EFAULT;
1936 if (nospc)
1937 return -ENOSPC;
1938 return 0;
1941 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1942 struct bpf_prog *old_prog)
1944 struct bpf_prog_array_item *item;
1946 for (item = array->items; item->prog; item++)
1947 if (item->prog == old_prog) {
1948 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1949 break;
1953 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1954 struct bpf_prog *exclude_prog,
1955 struct bpf_prog *include_prog,
1956 struct bpf_prog_array **new_array)
1958 int new_prog_cnt, carry_prog_cnt = 0;
1959 struct bpf_prog_array_item *existing;
1960 struct bpf_prog_array *array;
1961 bool found_exclude = false;
1962 int new_prog_idx = 0;
1964 /* Figure out how many existing progs we need to carry over to
1965 * the new array.
1967 if (old_array) {
1968 existing = old_array->items;
1969 for (; existing->prog; existing++) {
1970 if (existing->prog == exclude_prog) {
1971 found_exclude = true;
1972 continue;
1974 if (existing->prog != &dummy_bpf_prog.prog)
1975 carry_prog_cnt++;
1976 if (existing->prog == include_prog)
1977 return -EEXIST;
1981 if (exclude_prog && !found_exclude)
1982 return -ENOENT;
1984 /* How many progs (not NULL) will be in the new array? */
1985 new_prog_cnt = carry_prog_cnt;
1986 if (include_prog)
1987 new_prog_cnt += 1;
1989 /* Do we have any prog (not NULL) in the new array? */
1990 if (!new_prog_cnt) {
1991 *new_array = NULL;
1992 return 0;
1995 /* +1 as the end of prog_array is marked with NULL */
1996 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1997 if (!array)
1998 return -ENOMEM;
2000 /* Fill in the new prog array */
2001 if (carry_prog_cnt) {
2002 existing = old_array->items;
2003 for (; existing->prog; existing++)
2004 if (existing->prog != exclude_prog &&
2005 existing->prog != &dummy_bpf_prog.prog) {
2006 array->items[new_prog_idx++].prog =
2007 existing->prog;
2010 if (include_prog)
2011 array->items[new_prog_idx++].prog = include_prog;
2012 array->items[new_prog_idx].prog = NULL;
2013 *new_array = array;
2014 return 0;
2017 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2018 u32 *prog_ids, u32 request_cnt,
2019 u32 *prog_cnt)
2021 u32 cnt = 0;
2023 if (array)
2024 cnt = bpf_prog_array_length(array);
2026 *prog_cnt = cnt;
2028 /* return early if user requested only program count or nothing to copy */
2029 if (!request_cnt || !cnt)
2030 return 0;
2032 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2033 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2034 : 0;
2037 static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
2039 enum bpf_cgroup_storage_type stype;
2041 for_each_cgroup_storage_type(stype) {
2042 if (!aux->cgroup_storage[stype])
2043 continue;
2044 bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
2048 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2049 struct bpf_map **used_maps, u32 len)
2051 struct bpf_map *map;
2052 u32 i;
2054 bpf_free_cgroup_storage(aux);
2055 for (i = 0; i < len; i++) {
2056 map = used_maps[i];
2057 if (map->ops->map_poke_untrack)
2058 map->ops->map_poke_untrack(map, aux);
2059 bpf_map_put(map);
2063 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2065 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2066 kfree(aux->used_maps);
2069 static void bpf_prog_free_deferred(struct work_struct *work)
2071 struct bpf_prog_aux *aux;
2072 int i;
2074 aux = container_of(work, struct bpf_prog_aux, work);
2075 bpf_free_used_maps(aux);
2076 if (bpf_prog_is_dev_bound(aux))
2077 bpf_prog_offload_destroy(aux->prog);
2078 #ifdef CONFIG_PERF_EVENTS
2079 if (aux->prog->has_callchain_buf)
2080 put_callchain_buffers();
2081 #endif
2082 bpf_trampoline_put(aux->trampoline);
2083 for (i = 0; i < aux->func_cnt; i++)
2084 bpf_jit_free(aux->func[i]);
2085 if (aux->func_cnt) {
2086 kfree(aux->func);
2087 bpf_prog_unlock_free(aux->prog);
2088 } else {
2089 bpf_jit_free(aux->prog);
2093 /* Free internal BPF program */
2094 void bpf_prog_free(struct bpf_prog *fp)
2096 struct bpf_prog_aux *aux = fp->aux;
2098 if (aux->linked_prog)
2099 bpf_prog_put(aux->linked_prog);
2100 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2101 schedule_work(&aux->work);
2103 EXPORT_SYMBOL_GPL(bpf_prog_free);
2105 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2106 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2108 void bpf_user_rnd_init_once(void)
2110 prandom_init_once(&bpf_user_rnd_state);
2113 BPF_CALL_0(bpf_user_rnd_u32)
2115 /* Should someone ever have the rather unwise idea to use some
2116 * of the registers passed into this function, then note that
2117 * this function is called from native eBPF and classic-to-eBPF
2118 * transformations. Register assignments from both sides are
2119 * different, f.e. classic always sets fn(ctx, A, X) here.
2121 struct rnd_state *state;
2122 u32 res;
2124 state = &get_cpu_var(bpf_user_rnd_state);
2125 res = prandom_u32_state(state);
2126 put_cpu_var(bpf_user_rnd_state);
2128 return res;
2131 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2132 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2133 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2134 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2135 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2136 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2137 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2138 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2139 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2140 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2142 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2143 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2144 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2145 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2147 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2148 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2149 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2150 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2151 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2153 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2155 return NULL;
2158 u64 __weak
2159 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2160 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2162 return -ENOTSUPP;
2164 EXPORT_SYMBOL_GPL(bpf_event_output);
2166 /* Always built-in helper functions. */
2167 const struct bpf_func_proto bpf_tail_call_proto = {
2168 .func = NULL,
2169 .gpl_only = false,
2170 .ret_type = RET_VOID,
2171 .arg1_type = ARG_PTR_TO_CTX,
2172 .arg2_type = ARG_CONST_MAP_PTR,
2173 .arg3_type = ARG_ANYTHING,
2176 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2177 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2178 * eBPF and implicitly also cBPF can get JITed!
2180 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2182 return prog;
2185 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2186 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2188 void __weak bpf_jit_compile(struct bpf_prog *prog)
2192 bool __weak bpf_helper_changes_pkt_data(void *func)
2194 return false;
2197 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2198 * analysis code and wants explicit zero extension inserted by verifier.
2199 * Otherwise, return FALSE.
2201 bool __weak bpf_jit_needs_zext(void)
2203 return false;
2206 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2207 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2209 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2210 int len)
2212 return -EFAULT;
2215 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2216 void *addr1, void *addr2)
2218 return -ENOTSUPP;
2221 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2222 EXPORT_SYMBOL(bpf_stats_enabled_key);
2224 /* All definitions of tracepoints related to BPF. */
2225 #define CREATE_TRACE_POINTS
2226 #include <linux/bpf_trace.h>
2228 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2229 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);