1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/prandom.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/objtool.h>
28 #include <linux/overflow.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40 #include <linux/execmem.h>
42 #include <asm/barrier.h>
43 #include <linux/unaligned.h>
46 #define BPF_R0 regs[BPF_REG_0]
47 #define BPF_R1 regs[BPF_REG_1]
48 #define BPF_R2 regs[BPF_REG_2]
49 #define BPF_R3 regs[BPF_REG_3]
50 #define BPF_R4 regs[BPF_REG_4]
51 #define BPF_R5 regs[BPF_REG_5]
52 #define BPF_R6 regs[BPF_REG_6]
53 #define BPF_R7 regs[BPF_REG_7]
54 #define BPF_R8 regs[BPF_REG_8]
55 #define BPF_R9 regs[BPF_REG_9]
56 #define BPF_R10 regs[BPF_REG_10]
59 #define DST regs[insn->dst_reg]
60 #define SRC regs[insn->src_reg]
61 #define FP regs[BPF_REG_FP]
62 #define AX regs[BPF_REG_AX]
63 #define ARG1 regs[BPF_REG_ARG1]
64 #define CTX regs[BPF_REG_CTX]
68 struct bpf_mem_alloc bpf_global_ma
;
69 bool bpf_global_ma_set
;
71 /* No hurry in this branch
73 * Exported for the bpf jit load helper.
75 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff
*skb
, int k
, unsigned int size
)
79 if (k
>= SKF_NET_OFF
) {
80 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
81 } else if (k
>= SKF_LL_OFF
) {
82 if (unlikely(!skb_mac_header_was_set(skb
)))
84 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
86 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
92 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
94 __PAGE_SIZE
= PAGE_SIZE
97 struct bpf_prog
*bpf_prog_alloc_no_stats(unsigned int size
, gfp_t gfp_extra_flags
)
99 gfp_t gfp_flags
= bpf_memcg_flags(GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
);
100 struct bpf_prog_aux
*aux
;
103 size
= round_up(size
, __PAGE_SIZE
);
104 fp
= __vmalloc(size
, gfp_flags
);
108 aux
= kzalloc(sizeof(*aux
), bpf_memcg_flags(GFP_KERNEL
| gfp_extra_flags
));
113 fp
->active
= alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL
| gfp_extra_flags
));
120 fp
->pages
= size
/ PAGE_SIZE
;
123 fp
->jit_requested
= ebpf_jit_enabled();
124 fp
->blinding_requested
= bpf_jit_blinding_enabled(fp
);
125 #ifdef CONFIG_CGROUP_BPF
126 aux
->cgroup_atype
= CGROUP_BPF_ATTACH_TYPE_INVALID
;
129 INIT_LIST_HEAD_RCU(&fp
->aux
->ksym
.lnode
);
130 #ifdef CONFIG_FINEIBT
131 INIT_LIST_HEAD_RCU(&fp
->aux
->ksym_prefix
.lnode
);
133 mutex_init(&fp
->aux
->used_maps_mutex
);
134 mutex_init(&fp
->aux
->ext_mutex
);
135 mutex_init(&fp
->aux
->dst_mutex
);
140 struct bpf_prog
*bpf_prog_alloc(unsigned int size
, gfp_t gfp_extra_flags
)
142 gfp_t gfp_flags
= bpf_memcg_flags(GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
);
143 struct bpf_prog
*prog
;
146 prog
= bpf_prog_alloc_no_stats(size
, gfp_extra_flags
);
150 prog
->stats
= alloc_percpu_gfp(struct bpf_prog_stats
, gfp_flags
);
152 free_percpu(prog
->active
);
158 for_each_possible_cpu(cpu
) {
159 struct bpf_prog_stats
*pstats
;
161 pstats
= per_cpu_ptr(prog
->stats
, cpu
);
162 u64_stats_init(&pstats
->syncp
);
166 EXPORT_SYMBOL_GPL(bpf_prog_alloc
);
168 int bpf_prog_alloc_jited_linfo(struct bpf_prog
*prog
)
170 if (!prog
->aux
->nr_linfo
|| !prog
->jit_requested
)
173 prog
->aux
->jited_linfo
= kvcalloc(prog
->aux
->nr_linfo
,
174 sizeof(*prog
->aux
->jited_linfo
),
175 bpf_memcg_flags(GFP_KERNEL
| __GFP_NOWARN
));
176 if (!prog
->aux
->jited_linfo
)
182 void bpf_prog_jit_attempt_done(struct bpf_prog
*prog
)
184 if (prog
->aux
->jited_linfo
&&
185 (!prog
->jited
|| !prog
->aux
->jited_linfo
[0])) {
186 kvfree(prog
->aux
->jited_linfo
);
187 prog
->aux
->jited_linfo
= NULL
;
190 kfree(prog
->aux
->kfunc_tab
);
191 prog
->aux
->kfunc_tab
= NULL
;
194 /* The jit engine is responsible to provide an array
195 * for insn_off to the jited_off mapping (insn_to_jit_off).
197 * The idx to this array is the insn_off. Hence, the insn_off
198 * here is relative to the prog itself instead of the main prog.
199 * This array has one entry for each xlated bpf insn.
201 * jited_off is the byte off to the end of the jited insn.
205 * The first bpf insn off of the prog. The insn off
206 * here is relative to the main prog.
207 * e.g. if prog is a subprog, insn_start > 0
209 * The prog's idx to prog->aux->linfo and jited_linfo
211 * jited_linfo[linfo_idx] = prog->bpf_func
215 * jited_linfo[i] = prog->bpf_func +
216 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
218 void bpf_prog_fill_jited_linfo(struct bpf_prog
*prog
,
219 const u32
*insn_to_jit_off
)
221 u32 linfo_idx
, insn_start
, insn_end
, nr_linfo
, i
;
222 const struct bpf_line_info
*linfo
;
225 if (!prog
->aux
->jited_linfo
|| prog
->aux
->func_idx
> prog
->aux
->func_cnt
)
226 /* Userspace did not provide linfo */
229 linfo_idx
= prog
->aux
->linfo_idx
;
230 linfo
= &prog
->aux
->linfo
[linfo_idx
];
231 insn_start
= linfo
[0].insn_off
;
232 insn_end
= insn_start
+ prog
->len
;
234 jited_linfo
= &prog
->aux
->jited_linfo
[linfo_idx
];
235 jited_linfo
[0] = prog
->bpf_func
;
237 nr_linfo
= prog
->aux
->nr_linfo
- linfo_idx
;
239 for (i
= 1; i
< nr_linfo
&& linfo
[i
].insn_off
< insn_end
; i
++)
240 /* The verifier ensures that linfo[i].insn_off is
241 * strictly increasing
243 jited_linfo
[i
] = prog
->bpf_func
+
244 insn_to_jit_off
[linfo
[i
].insn_off
- insn_start
- 1];
247 struct bpf_prog
*bpf_prog_realloc(struct bpf_prog
*fp_old
, unsigned int size
,
248 gfp_t gfp_extra_flags
)
250 gfp_t gfp_flags
= bpf_memcg_flags(GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
);
254 size
= round_up(size
, PAGE_SIZE
);
255 pages
= size
/ PAGE_SIZE
;
256 if (pages
<= fp_old
->pages
)
259 fp
= __vmalloc(size
, gfp_flags
);
261 memcpy(fp
, fp_old
, fp_old
->pages
* PAGE_SIZE
);
265 /* We keep fp->aux from fp_old around in the new
266 * reallocated structure.
269 fp_old
->stats
= NULL
;
270 fp_old
->active
= NULL
;
271 __bpf_prog_free(fp_old
);
277 void __bpf_prog_free(struct bpf_prog
*fp
)
280 mutex_destroy(&fp
->aux
->used_maps_mutex
);
281 mutex_destroy(&fp
->aux
->dst_mutex
);
282 kfree(fp
->aux
->poke_tab
);
285 free_percpu(fp
->stats
);
286 free_percpu(fp
->active
);
290 int bpf_prog_calc_tag(struct bpf_prog
*fp
)
292 const u32 bits_offset
= SHA1_BLOCK_SIZE
- sizeof(__be64
);
293 u32 raw_size
= bpf_prog_tag_scratch_size(fp
);
294 u32 digest
[SHA1_DIGEST_WORDS
];
295 u32 ws
[SHA1_WORKSPACE_WORDS
];
296 u32 i
, bsize
, psize
, blocks
;
297 struct bpf_insn
*dst
;
303 raw
= vmalloc(raw_size
);
308 memset(ws
, 0, sizeof(ws
));
310 /* We need to take out the map fd for the digest calculation
311 * since they are unstable from user space side.
314 for (i
= 0, was_ld_map
= false; i
< fp
->len
; i
++) {
315 dst
[i
] = fp
->insnsi
[i
];
317 dst
[i
].code
== (BPF_LD
| BPF_IMM
| BPF_DW
) &&
318 (dst
[i
].src_reg
== BPF_PSEUDO_MAP_FD
||
319 dst
[i
].src_reg
== BPF_PSEUDO_MAP_VALUE
)) {
322 } else if (was_ld_map
&&
324 dst
[i
].dst_reg
== 0 &&
325 dst
[i
].src_reg
== 0 &&
334 psize
= bpf_prog_insn_size(fp
);
335 memset(&raw
[psize
], 0, raw_size
- psize
);
338 bsize
= round_up(psize
, SHA1_BLOCK_SIZE
);
339 blocks
= bsize
/ SHA1_BLOCK_SIZE
;
341 if (bsize
- psize
>= sizeof(__be64
)) {
342 bits
= (__be64
*)(todo
+ bsize
- sizeof(__be64
));
344 bits
= (__be64
*)(todo
+ bsize
+ bits_offset
);
347 *bits
= cpu_to_be64((psize
- 1) << 3);
350 sha1_transform(digest
, todo
, ws
);
351 todo
+= SHA1_BLOCK_SIZE
;
354 result
= (__force __be32
*)digest
;
355 for (i
= 0; i
< SHA1_DIGEST_WORDS
; i
++)
356 result
[i
] = cpu_to_be32(digest
[i
]);
357 memcpy(fp
->tag
, result
, sizeof(fp
->tag
));
363 static int bpf_adj_delta_to_imm(struct bpf_insn
*insn
, u32 pos
, s32 end_old
,
364 s32 end_new
, s32 curr
, const bool probe_pass
)
366 const s64 imm_min
= S32_MIN
, imm_max
= S32_MAX
;
367 s32 delta
= end_new
- end_old
;
370 if (curr
< pos
&& curr
+ imm
+ 1 >= end_old
)
372 else if (curr
>= end_new
&& curr
+ imm
+ 1 < end_new
)
374 if (imm
< imm_min
|| imm
> imm_max
)
381 static int bpf_adj_delta_to_off(struct bpf_insn
*insn
, u32 pos
, s32 end_old
,
382 s32 end_new
, s32 curr
, const bool probe_pass
)
384 s64 off_min
, off_max
, off
;
385 s32 delta
= end_new
- end_old
;
387 if (insn
->code
== (BPF_JMP32
| BPF_JA
)) {
397 if (curr
< pos
&& curr
+ off
+ 1 >= end_old
)
399 else if (curr
>= end_new
&& curr
+ off
+ 1 < end_new
)
401 if (off
< off_min
|| off
> off_max
)
404 if (insn
->code
== (BPF_JMP32
| BPF_JA
))
412 static int bpf_adj_branches(struct bpf_prog
*prog
, u32 pos
, s32 end_old
,
413 s32 end_new
, const bool probe_pass
)
415 u32 i
, insn_cnt
= prog
->len
+ (probe_pass
? end_new
- end_old
: 0);
416 struct bpf_insn
*insn
= prog
->insnsi
;
419 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
422 /* In the probing pass we still operate on the original,
423 * unpatched image in order to check overflows before we
424 * do any other adjustments. Therefore skip the patchlet.
426 if (probe_pass
&& i
== pos
) {
428 insn
= prog
->insnsi
+ end_old
;
430 if (bpf_pseudo_func(insn
)) {
431 ret
= bpf_adj_delta_to_imm(insn
, pos
, end_old
,
432 end_new
, i
, probe_pass
);
438 if ((BPF_CLASS(code
) != BPF_JMP
&&
439 BPF_CLASS(code
) != BPF_JMP32
) ||
440 BPF_OP(code
) == BPF_EXIT
)
442 /* Adjust offset of jmps if we cross patch boundaries. */
443 if (BPF_OP(code
) == BPF_CALL
) {
444 if (insn
->src_reg
!= BPF_PSEUDO_CALL
)
446 ret
= bpf_adj_delta_to_imm(insn
, pos
, end_old
,
447 end_new
, i
, probe_pass
);
449 ret
= bpf_adj_delta_to_off(insn
, pos
, end_old
,
450 end_new
, i
, probe_pass
);
459 static void bpf_adj_linfo(struct bpf_prog
*prog
, u32 off
, u32 delta
)
461 struct bpf_line_info
*linfo
;
464 nr_linfo
= prog
->aux
->nr_linfo
;
465 if (!nr_linfo
|| !delta
)
468 linfo
= prog
->aux
->linfo
;
470 for (i
= 0; i
< nr_linfo
; i
++)
471 if (off
< linfo
[i
].insn_off
)
474 /* Push all off < linfo[i].insn_off by delta */
475 for (; i
< nr_linfo
; i
++)
476 linfo
[i
].insn_off
+= delta
;
479 struct bpf_prog
*bpf_patch_insn_single(struct bpf_prog
*prog
, u32 off
,
480 const struct bpf_insn
*patch
, u32 len
)
482 u32 insn_adj_cnt
, insn_rest
, insn_delta
= len
- 1;
483 const u32 cnt_max
= S16_MAX
;
484 struct bpf_prog
*prog_adj
;
487 /* Since our patchlet doesn't expand the image, we're done. */
488 if (insn_delta
== 0) {
489 memcpy(prog
->insnsi
+ off
, patch
, sizeof(*patch
));
493 insn_adj_cnt
= prog
->len
+ insn_delta
;
495 /* Reject anything that would potentially let the insn->off
496 * target overflow when we have excessive program expansions.
497 * We need to probe here before we do any reallocation where
498 * we afterwards may not fail anymore.
500 if (insn_adj_cnt
> cnt_max
&&
501 (err
= bpf_adj_branches(prog
, off
, off
+ 1, off
+ len
, true)))
504 /* Several new instructions need to be inserted. Make room
505 * for them. Likely, there's no need for a new allocation as
506 * last page could have large enough tailroom.
508 prog_adj
= bpf_prog_realloc(prog
, bpf_prog_size(insn_adj_cnt
),
511 return ERR_PTR(-ENOMEM
);
513 prog_adj
->len
= insn_adj_cnt
;
515 /* Patching happens in 3 steps:
517 * 1) Move over tail of insnsi from next instruction onwards,
518 * so we can patch the single target insn with one or more
519 * new ones (patching is always from 1 to n insns, n > 0).
520 * 2) Inject new instructions at the target location.
521 * 3) Adjust branch offsets if necessary.
523 insn_rest
= insn_adj_cnt
- off
- len
;
525 memmove(prog_adj
->insnsi
+ off
+ len
, prog_adj
->insnsi
+ off
+ 1,
526 sizeof(*patch
) * insn_rest
);
527 memcpy(prog_adj
->insnsi
+ off
, patch
, sizeof(*patch
) * len
);
529 /* We are guaranteed to not fail at this point, otherwise
530 * the ship has sailed to reverse to the original state. An
531 * overflow cannot happen at this point.
533 BUG_ON(bpf_adj_branches(prog_adj
, off
, off
+ 1, off
+ len
, false));
535 bpf_adj_linfo(prog_adj
, off
, insn_delta
);
540 int bpf_remove_insns(struct bpf_prog
*prog
, u32 off
, u32 cnt
)
542 /* Branch offsets can't overflow when program is shrinking, no need
543 * to call bpf_adj_branches(..., true) here
545 memmove(prog
->insnsi
+ off
, prog
->insnsi
+ off
+ cnt
,
546 sizeof(struct bpf_insn
) * (prog
->len
- off
- cnt
));
549 return WARN_ON_ONCE(bpf_adj_branches(prog
, off
, off
+ cnt
, off
, false));
552 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog
*fp
)
556 for (i
= 0; i
< fp
->aux
->real_func_cnt
; i
++)
557 bpf_prog_kallsyms_del(fp
->aux
->func
[i
]);
560 void bpf_prog_kallsyms_del_all(struct bpf_prog
*fp
)
562 bpf_prog_kallsyms_del_subprogs(fp
);
563 bpf_prog_kallsyms_del(fp
);
566 #ifdef CONFIG_BPF_JIT
567 /* All BPF JIT sysctl knobs here. */
568 int bpf_jit_enable __read_mostly
= IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON
);
569 int bpf_jit_kallsyms __read_mostly
= IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON
);
570 int bpf_jit_harden __read_mostly
;
571 long bpf_jit_limit __read_mostly
;
572 long bpf_jit_limit_max __read_mostly
;
575 bpf_prog_ksym_set_addr(struct bpf_prog
*prog
)
577 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog
));
579 prog
->aux
->ksym
.start
= (unsigned long) prog
->bpf_func
;
580 prog
->aux
->ksym
.end
= prog
->aux
->ksym
.start
+ prog
->jited_len
;
584 bpf_prog_ksym_set_name(struct bpf_prog
*prog
)
586 char *sym
= prog
->aux
->ksym
.name
;
587 const char *end
= sym
+ KSYM_NAME_LEN
;
588 const struct btf_type
*type
;
589 const char *func_name
;
591 BUILD_BUG_ON(sizeof("bpf_prog_") +
592 sizeof(prog
->tag
) * 2 +
593 /* name has been null terminated.
594 * We should need +1 for the '_' preceding
595 * the name. However, the null character
596 * is double counted between the name and the
597 * sizeof("bpf_prog_") above, so we omit
600 sizeof(prog
->aux
->name
) > KSYM_NAME_LEN
);
602 sym
+= snprintf(sym
, KSYM_NAME_LEN
, "bpf_prog_");
603 sym
= bin2hex(sym
, prog
->tag
, sizeof(prog
->tag
));
605 /* prog->aux->name will be ignored if full btf name is available */
606 if (prog
->aux
->func_info_cnt
&& prog
->aux
->func_idx
< prog
->aux
->func_info_cnt
) {
607 type
= btf_type_by_id(prog
->aux
->btf
,
608 prog
->aux
->func_info
[prog
->aux
->func_idx
].type_id
);
609 func_name
= btf_name_by_offset(prog
->aux
->btf
, type
->name_off
);
610 snprintf(sym
, (size_t)(end
- sym
), "_%s", func_name
);
614 if (prog
->aux
->name
[0])
615 snprintf(sym
, (size_t)(end
- sym
), "_%s", prog
->aux
->name
);
620 static unsigned long bpf_get_ksym_start(struct latch_tree_node
*n
)
622 return container_of(n
, struct bpf_ksym
, tnode
)->start
;
625 static __always_inline
bool bpf_tree_less(struct latch_tree_node
*a
,
626 struct latch_tree_node
*b
)
628 return bpf_get_ksym_start(a
) < bpf_get_ksym_start(b
);
631 static __always_inline
int bpf_tree_comp(void *key
, struct latch_tree_node
*n
)
633 unsigned long val
= (unsigned long)key
;
634 const struct bpf_ksym
*ksym
;
636 ksym
= container_of(n
, struct bpf_ksym
, tnode
);
638 if (val
< ksym
->start
)
640 /* Ensure that we detect return addresses as part of the program, when
641 * the final instruction is a call for a program part of the stack
642 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
650 static const struct latch_tree_ops bpf_tree_ops
= {
651 .less
= bpf_tree_less
,
652 .comp
= bpf_tree_comp
,
655 static DEFINE_SPINLOCK(bpf_lock
);
656 static LIST_HEAD(bpf_kallsyms
);
657 static struct latch_tree_root bpf_tree __cacheline_aligned
;
659 void bpf_ksym_add(struct bpf_ksym
*ksym
)
661 spin_lock_bh(&bpf_lock
);
662 WARN_ON_ONCE(!list_empty(&ksym
->lnode
));
663 list_add_tail_rcu(&ksym
->lnode
, &bpf_kallsyms
);
664 latch_tree_insert(&ksym
->tnode
, &bpf_tree
, &bpf_tree_ops
);
665 spin_unlock_bh(&bpf_lock
);
668 static void __bpf_ksym_del(struct bpf_ksym
*ksym
)
670 if (list_empty(&ksym
->lnode
))
673 latch_tree_erase(&ksym
->tnode
, &bpf_tree
, &bpf_tree_ops
);
674 list_del_rcu(&ksym
->lnode
);
677 void bpf_ksym_del(struct bpf_ksym
*ksym
)
679 spin_lock_bh(&bpf_lock
);
680 __bpf_ksym_del(ksym
);
681 spin_unlock_bh(&bpf_lock
);
684 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog
*fp
)
686 return fp
->jited
&& !bpf_prog_was_classic(fp
);
689 void bpf_prog_kallsyms_add(struct bpf_prog
*fp
)
691 if (!bpf_prog_kallsyms_candidate(fp
) ||
692 !bpf_token_capable(fp
->aux
->token
, CAP_BPF
))
695 bpf_prog_ksym_set_addr(fp
);
696 bpf_prog_ksym_set_name(fp
);
697 fp
->aux
->ksym
.prog
= true;
699 bpf_ksym_add(&fp
->aux
->ksym
);
701 #ifdef CONFIG_FINEIBT
703 * When FineIBT, code in the __cfi_foo() symbols can get executed
704 * and hence unwinder needs help.
706 if (cfi_mode
!= CFI_FINEIBT
)
709 snprintf(fp
->aux
->ksym_prefix
.name
, KSYM_NAME_LEN
,
710 "__cfi_%s", fp
->aux
->ksym
.name
);
712 fp
->aux
->ksym_prefix
.start
= (unsigned long) fp
->bpf_func
- 16;
713 fp
->aux
->ksym_prefix
.end
= (unsigned long) fp
->bpf_func
;
715 bpf_ksym_add(&fp
->aux
->ksym_prefix
);
719 void bpf_prog_kallsyms_del(struct bpf_prog
*fp
)
721 if (!bpf_prog_kallsyms_candidate(fp
))
724 bpf_ksym_del(&fp
->aux
->ksym
);
725 #ifdef CONFIG_FINEIBT
726 if (cfi_mode
!= CFI_FINEIBT
)
728 bpf_ksym_del(&fp
->aux
->ksym_prefix
);
732 static struct bpf_ksym
*bpf_ksym_find(unsigned long addr
)
734 struct latch_tree_node
*n
;
736 n
= latch_tree_find((void *)addr
, &bpf_tree
, &bpf_tree_ops
);
737 return n
? container_of(n
, struct bpf_ksym
, tnode
) : NULL
;
740 int __bpf_address_lookup(unsigned long addr
, unsigned long *size
,
741 unsigned long *off
, char *sym
)
743 struct bpf_ksym
*ksym
;
747 ksym
= bpf_ksym_find(addr
);
749 unsigned long symbol_start
= ksym
->start
;
750 unsigned long symbol_end
= ksym
->end
;
752 ret
= strscpy(sym
, ksym
->name
, KSYM_NAME_LEN
);
755 *size
= symbol_end
- symbol_start
;
757 *off
= addr
- symbol_start
;
764 bool is_bpf_text_address(unsigned long addr
)
769 ret
= bpf_ksym_find(addr
) != NULL
;
775 struct bpf_prog
*bpf_prog_ksym_find(unsigned long addr
)
777 struct bpf_ksym
*ksym
= bpf_ksym_find(addr
);
779 return ksym
&& ksym
->prog
?
780 container_of(ksym
, struct bpf_prog_aux
, ksym
)->prog
:
784 const struct exception_table_entry
*search_bpf_extables(unsigned long addr
)
786 const struct exception_table_entry
*e
= NULL
;
787 struct bpf_prog
*prog
;
790 prog
= bpf_prog_ksym_find(addr
);
793 if (!prog
->aux
->num_exentries
)
796 e
= search_extable(prog
->aux
->extable
, prog
->aux
->num_exentries
, addr
);
802 int bpf_get_kallsym(unsigned int symnum
, unsigned long *value
, char *type
,
805 struct bpf_ksym
*ksym
;
809 if (!bpf_jit_kallsyms_enabled())
813 list_for_each_entry_rcu(ksym
, &bpf_kallsyms
, lnode
) {
817 strscpy(sym
, ksym
->name
, KSYM_NAME_LEN
);
819 *value
= ksym
->start
;
820 *type
= BPF_SYM_ELF_TYPE
;
830 int bpf_jit_add_poke_descriptor(struct bpf_prog
*prog
,
831 struct bpf_jit_poke_descriptor
*poke
)
833 struct bpf_jit_poke_descriptor
*tab
= prog
->aux
->poke_tab
;
834 static const u32 poke_tab_max
= 1024;
835 u32 slot
= prog
->aux
->size_poke_tab
;
838 if (size
> poke_tab_max
)
840 if (poke
->tailcall_target
|| poke
->tailcall_target_stable
||
841 poke
->tailcall_bypass
|| poke
->adj_off
|| poke
->bypass_addr
)
844 switch (poke
->reason
) {
845 case BPF_POKE_REASON_TAIL_CALL
:
846 if (!poke
->tail_call
.map
)
853 tab
= krealloc_array(tab
, size
, sizeof(*poke
), GFP_KERNEL
);
857 memcpy(&tab
[slot
], poke
, sizeof(*poke
));
858 prog
->aux
->size_poke_tab
= size
;
859 prog
->aux
->poke_tab
= tab
;
865 * BPF program pack allocator.
867 * Most BPF programs are pretty small. Allocating a hole page for each
868 * program is sometime a waste. Many small bpf program also adds pressure
869 * to instruction TLB. To solve this issue, we introduce a BPF program pack
870 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
871 * to host BPF programs.
873 #define BPF_PROG_CHUNK_SHIFT 6
874 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
875 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
877 struct bpf_prog_pack
{
878 struct list_head list
;
880 unsigned long bitmap
[];
883 void bpf_jit_fill_hole_with_zero(void *area
, unsigned int size
)
885 memset(area
, 0, size
);
888 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
890 static DEFINE_MUTEX(pack_mutex
);
891 static LIST_HEAD(pack_list
);
893 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
894 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
897 /* PMD_SIZE is really big for some archs. It doesn't make sense to
898 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
899 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
900 * greater than or equal to 2MB.
902 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
904 #define BPF_PROG_PACK_SIZE PAGE_SIZE
907 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
909 static struct bpf_prog_pack
*alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns
)
911 struct bpf_prog_pack
*pack
;
914 pack
= kzalloc(struct_size(pack
, bitmap
, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT
)),
918 pack
->ptr
= bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE
);
921 bpf_fill_ill_insns(pack
->ptr
, BPF_PROG_PACK_SIZE
);
922 bitmap_zero(pack
->bitmap
, BPF_PROG_PACK_SIZE
/ BPF_PROG_CHUNK_SIZE
);
924 set_vm_flush_reset_perms(pack
->ptr
);
925 err
= set_memory_rox((unsigned long)pack
->ptr
,
926 BPF_PROG_PACK_SIZE
/ PAGE_SIZE
);
929 list_add_tail(&pack
->list
, &pack_list
);
933 bpf_jit_free_exec(pack
->ptr
);
938 void *bpf_prog_pack_alloc(u32 size
, bpf_jit_fill_hole_t bpf_fill_ill_insns
)
940 unsigned int nbits
= BPF_PROG_SIZE_TO_NBITS(size
);
941 struct bpf_prog_pack
*pack
;
945 mutex_lock(&pack_mutex
);
946 if (size
> BPF_PROG_PACK_SIZE
) {
947 size
= round_up(size
, PAGE_SIZE
);
948 ptr
= bpf_jit_alloc_exec(size
);
952 bpf_fill_ill_insns(ptr
, size
);
953 set_vm_flush_reset_perms(ptr
);
954 err
= set_memory_rox((unsigned long)ptr
,
957 bpf_jit_free_exec(ptr
);
963 list_for_each_entry(pack
, &pack_list
, list
) {
964 pos
= bitmap_find_next_zero_area(pack
->bitmap
, BPF_PROG_CHUNK_COUNT
, 0,
966 if (pos
< BPF_PROG_CHUNK_COUNT
)
967 goto found_free_area
;
970 pack
= alloc_new_pack(bpf_fill_ill_insns
);
977 bitmap_set(pack
->bitmap
, pos
, nbits
);
978 ptr
= (void *)(pack
->ptr
) + (pos
<< BPF_PROG_CHUNK_SHIFT
);
981 mutex_unlock(&pack_mutex
);
985 void bpf_prog_pack_free(void *ptr
, u32 size
)
987 struct bpf_prog_pack
*pack
= NULL
, *tmp
;
991 mutex_lock(&pack_mutex
);
992 if (size
> BPF_PROG_PACK_SIZE
) {
993 bpf_jit_free_exec(ptr
);
997 list_for_each_entry(tmp
, &pack_list
, list
) {
998 if (ptr
>= tmp
->ptr
&& (tmp
->ptr
+ BPF_PROG_PACK_SIZE
) > ptr
) {
1004 if (WARN_ONCE(!pack
, "bpf_prog_pack bug\n"))
1007 nbits
= BPF_PROG_SIZE_TO_NBITS(size
);
1008 pos
= ((unsigned long)ptr
- (unsigned long)pack
->ptr
) >> BPF_PROG_CHUNK_SHIFT
;
1010 WARN_ONCE(bpf_arch_text_invalidate(ptr
, size
),
1011 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1013 bitmap_clear(pack
->bitmap
, pos
, nbits
);
1014 if (bitmap_find_next_zero_area(pack
->bitmap
, BPF_PROG_CHUNK_COUNT
, 0,
1015 BPF_PROG_CHUNK_COUNT
, 0) == 0) {
1016 list_del(&pack
->list
);
1017 bpf_jit_free_exec(pack
->ptr
);
1021 mutex_unlock(&pack_mutex
);
1024 static atomic_long_t bpf_jit_current
;
1026 /* Can be overridden by an arch's JIT compiler if it has a custom,
1027 * dedicated BPF backend memory area, or if neither of the two
1030 u64 __weak
bpf_jit_alloc_exec_limit(void)
1032 #if defined(MODULES_VADDR)
1033 return MODULES_END
- MODULES_VADDR
;
1035 return VMALLOC_END
- VMALLOC_START
;
1039 static int __init
bpf_jit_charge_init(void)
1041 /* Only used as heuristic here to derive limit. */
1042 bpf_jit_limit_max
= bpf_jit_alloc_exec_limit();
1043 bpf_jit_limit
= min_t(u64
, round_up(bpf_jit_limit_max
>> 1,
1044 PAGE_SIZE
), LONG_MAX
);
1047 pure_initcall(bpf_jit_charge_init
);
1049 int bpf_jit_charge_modmem(u32 size
)
1051 if (atomic_long_add_return(size
, &bpf_jit_current
) > READ_ONCE(bpf_jit_limit
)) {
1052 if (!bpf_capable()) {
1053 atomic_long_sub(size
, &bpf_jit_current
);
1061 void bpf_jit_uncharge_modmem(u32 size
)
1063 atomic_long_sub(size
, &bpf_jit_current
);
1066 void *__weak
bpf_jit_alloc_exec(unsigned long size
)
1068 return execmem_alloc(EXECMEM_BPF
, size
);
1071 void __weak
bpf_jit_free_exec(void *addr
)
1076 struct bpf_binary_header
*
1077 bpf_jit_binary_alloc(unsigned int proglen
, u8
**image_ptr
,
1078 unsigned int alignment
,
1079 bpf_jit_fill_hole_t bpf_fill_ill_insns
)
1081 struct bpf_binary_header
*hdr
;
1082 u32 size
, hole
, start
;
1084 WARN_ON_ONCE(!is_power_of_2(alignment
) ||
1085 alignment
> BPF_IMAGE_ALIGNMENT
);
1087 /* Most of BPF filters are really small, but if some of them
1088 * fill a page, allow at least 128 extra bytes to insert a
1089 * random section of illegal instructions.
1091 size
= round_up(proglen
+ sizeof(*hdr
) + 128, PAGE_SIZE
);
1093 if (bpf_jit_charge_modmem(size
))
1095 hdr
= bpf_jit_alloc_exec(size
);
1097 bpf_jit_uncharge_modmem(size
);
1101 /* Fill space with illegal/arch-dep instructions. */
1102 bpf_fill_ill_insns(hdr
, size
);
1105 hole
= min_t(unsigned int, size
- (proglen
+ sizeof(*hdr
)),
1106 PAGE_SIZE
- sizeof(*hdr
));
1107 start
= get_random_u32_below(hole
) & ~(alignment
- 1);
1109 /* Leave a random number of instructions before BPF code. */
1110 *image_ptr
= &hdr
->image
[start
];
1115 void bpf_jit_binary_free(struct bpf_binary_header
*hdr
)
1117 u32 size
= hdr
->size
;
1119 bpf_jit_free_exec(hdr
);
1120 bpf_jit_uncharge_modmem(size
);
1123 /* Allocate jit binary from bpf_prog_pack allocator.
1124 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1125 * to the memory. To solve this problem, a RW buffer is also allocated at
1126 * as the same time. The JIT engine should calculate offsets based on the
1127 * RO memory address, but write JITed program to the RW buffer. Once the
1128 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1129 * the JITed program to the RO memory.
1131 struct bpf_binary_header
*
1132 bpf_jit_binary_pack_alloc(unsigned int proglen
, u8
**image_ptr
,
1133 unsigned int alignment
,
1134 struct bpf_binary_header
**rw_header
,
1136 bpf_jit_fill_hole_t bpf_fill_ill_insns
)
1138 struct bpf_binary_header
*ro_header
;
1139 u32 size
, hole
, start
;
1141 WARN_ON_ONCE(!is_power_of_2(alignment
) ||
1142 alignment
> BPF_IMAGE_ALIGNMENT
);
1144 /* add 16 bytes for a random section of illegal instructions */
1145 size
= round_up(proglen
+ sizeof(*ro_header
) + 16, BPF_PROG_CHUNK_SIZE
);
1147 if (bpf_jit_charge_modmem(size
))
1149 ro_header
= bpf_prog_pack_alloc(size
, bpf_fill_ill_insns
);
1151 bpf_jit_uncharge_modmem(size
);
1155 *rw_header
= kvmalloc(size
, GFP_KERNEL
);
1157 bpf_prog_pack_free(ro_header
, size
);
1158 bpf_jit_uncharge_modmem(size
);
1162 /* Fill space with illegal/arch-dep instructions. */
1163 bpf_fill_ill_insns(*rw_header
, size
);
1164 (*rw_header
)->size
= size
;
1166 hole
= min_t(unsigned int, size
- (proglen
+ sizeof(*ro_header
)),
1167 BPF_PROG_CHUNK_SIZE
- sizeof(*ro_header
));
1168 start
= get_random_u32_below(hole
) & ~(alignment
- 1);
1170 *image_ptr
= &ro_header
->image
[start
];
1171 *rw_image
= &(*rw_header
)->image
[start
];
1176 /* Copy JITed text from rw_header to its final location, the ro_header. */
1177 int bpf_jit_binary_pack_finalize(struct bpf_binary_header
*ro_header
,
1178 struct bpf_binary_header
*rw_header
)
1182 ptr
= bpf_arch_text_copy(ro_header
, rw_header
, rw_header
->size
);
1187 bpf_prog_pack_free(ro_header
, ro_header
->size
);
1188 return PTR_ERR(ptr
);
1193 /* bpf_jit_binary_pack_free is called in two different scenarios:
1194 * 1) when the program is freed after;
1195 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1196 * For case 2), we need to free both the RO memory and the RW buffer.
1198 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1199 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1200 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1201 * bpf_arch_text_copy (when jit fails).
1203 void bpf_jit_binary_pack_free(struct bpf_binary_header
*ro_header
,
1204 struct bpf_binary_header
*rw_header
)
1206 u32 size
= ro_header
->size
;
1208 bpf_prog_pack_free(ro_header
, size
);
1210 bpf_jit_uncharge_modmem(size
);
1213 struct bpf_binary_header
*
1214 bpf_jit_binary_pack_hdr(const struct bpf_prog
*fp
)
1216 unsigned long real_start
= (unsigned long)fp
->bpf_func
;
1219 addr
= real_start
& BPF_PROG_CHUNK_MASK
;
1220 return (void *)addr
;
1223 static inline struct bpf_binary_header
*
1224 bpf_jit_binary_hdr(const struct bpf_prog
*fp
)
1226 unsigned long real_start
= (unsigned long)fp
->bpf_func
;
1229 addr
= real_start
& PAGE_MASK
;
1230 return (void *)addr
;
1233 /* This symbol is only overridden by archs that have different
1234 * requirements than the usual eBPF JITs, f.e. when they only
1235 * implement cBPF JIT, do not set images read-only, etc.
1237 void __weak
bpf_jit_free(struct bpf_prog
*fp
)
1240 struct bpf_binary_header
*hdr
= bpf_jit_binary_hdr(fp
);
1242 bpf_jit_binary_free(hdr
);
1243 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp
));
1246 bpf_prog_unlock_free(fp
);
1249 int bpf_jit_get_func_addr(const struct bpf_prog
*prog
,
1250 const struct bpf_insn
*insn
, bool extra_pass
,
1251 u64
*func_addr
, bool *func_addr_fixed
)
1253 s16 off
= insn
->off
;
1254 s32 imm
= insn
->imm
;
1258 *func_addr_fixed
= insn
->src_reg
!= BPF_PSEUDO_CALL
;
1259 if (!*func_addr_fixed
) {
1260 /* Place-holder address till the last pass has collected
1261 * all addresses for JITed subprograms in which case we
1262 * can pick them up from prog->aux.
1266 else if (prog
->aux
->func
&&
1267 off
>= 0 && off
< prog
->aux
->real_func_cnt
)
1268 addr
= (u8
*)prog
->aux
->func
[off
]->bpf_func
;
1271 } else if (insn
->src_reg
== BPF_PSEUDO_KFUNC_CALL
&&
1272 bpf_jit_supports_far_kfunc_call()) {
1273 err
= bpf_get_kfunc_addr(prog
, insn
->imm
, insn
->off
, &addr
);
1277 /* Address of a BPF helper call. Since part of the core
1278 * kernel, it's always at a fixed location. __bpf_call_base
1279 * and the helper with imm relative to it are both in core
1282 addr
= (u8
*)__bpf_call_base
+ imm
;
1285 *func_addr
= (unsigned long)addr
;
1289 static int bpf_jit_blind_insn(const struct bpf_insn
*from
,
1290 const struct bpf_insn
*aux
,
1291 struct bpf_insn
*to_buff
,
1294 struct bpf_insn
*to
= to_buff
;
1295 u32 imm_rnd
= get_random_u32();
1298 BUILD_BUG_ON(BPF_REG_AX
+ 1 != MAX_BPF_JIT_REG
);
1299 BUILD_BUG_ON(MAX_BPF_REG
+ 1 != MAX_BPF_JIT_REG
);
1301 /* Constraints on AX register:
1303 * AX register is inaccessible from user space. It is mapped in
1304 * all JITs, and used here for constant blinding rewrites. It is
1305 * typically "stateless" meaning its contents are only valid within
1306 * the executed instruction, but not across several instructions.
1307 * There are a few exceptions however which are further detailed
1310 * Constant blinding is only used by JITs, not in the interpreter.
1311 * The interpreter uses AX in some occasions as a local temporary
1312 * register e.g. in DIV or MOD instructions.
1314 * In restricted circumstances, the verifier can also use the AX
1315 * register for rewrites as long as they do not interfere with
1318 if (from
->dst_reg
== BPF_REG_AX
|| from
->src_reg
== BPF_REG_AX
)
1321 if (from
->imm
== 0 &&
1322 (from
->code
== (BPF_ALU
| BPF_MOV
| BPF_K
) ||
1323 from
->code
== (BPF_ALU64
| BPF_MOV
| BPF_K
))) {
1324 *to
++ = BPF_ALU64_REG(BPF_XOR
, from
->dst_reg
, from
->dst_reg
);
1328 switch (from
->code
) {
1329 case BPF_ALU
| BPF_ADD
| BPF_K
:
1330 case BPF_ALU
| BPF_SUB
| BPF_K
:
1331 case BPF_ALU
| BPF_AND
| BPF_K
:
1332 case BPF_ALU
| BPF_OR
| BPF_K
:
1333 case BPF_ALU
| BPF_XOR
| BPF_K
:
1334 case BPF_ALU
| BPF_MUL
| BPF_K
:
1335 case BPF_ALU
| BPF_MOV
| BPF_K
:
1336 case BPF_ALU
| BPF_DIV
| BPF_K
:
1337 case BPF_ALU
| BPF_MOD
| BPF_K
:
1338 *to
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
1339 *to
++ = BPF_ALU32_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1340 *to
++ = BPF_ALU32_REG_OFF(from
->code
, from
->dst_reg
, BPF_REG_AX
, from
->off
);
1343 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1344 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1345 case BPF_ALU64
| BPF_AND
| BPF_K
:
1346 case BPF_ALU64
| BPF_OR
| BPF_K
:
1347 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1348 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1349 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1350 case BPF_ALU64
| BPF_DIV
| BPF_K
:
1351 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1352 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
1353 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1354 *to
++ = BPF_ALU64_REG_OFF(from
->code
, from
->dst_reg
, BPF_REG_AX
, from
->off
);
1357 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1358 case BPF_JMP
| BPF_JNE
| BPF_K
:
1359 case BPF_JMP
| BPF_JGT
| BPF_K
:
1360 case BPF_JMP
| BPF_JLT
| BPF_K
:
1361 case BPF_JMP
| BPF_JGE
| BPF_K
:
1362 case BPF_JMP
| BPF_JLE
| BPF_K
:
1363 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1364 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1365 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1366 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1367 case BPF_JMP
| BPF_JSET
| BPF_K
:
1368 /* Accommodate for extra offset in case of a backjump. */
1372 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
1373 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1374 *to
++ = BPF_JMP_REG(from
->code
, from
->dst_reg
, BPF_REG_AX
, off
);
1377 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
1378 case BPF_JMP32
| BPF_JNE
| BPF_K
:
1379 case BPF_JMP32
| BPF_JGT
| BPF_K
:
1380 case BPF_JMP32
| BPF_JLT
| BPF_K
:
1381 case BPF_JMP32
| BPF_JGE
| BPF_K
:
1382 case BPF_JMP32
| BPF_JLE
| BPF_K
:
1383 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
1384 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
1385 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
1386 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
1387 case BPF_JMP32
| BPF_JSET
| BPF_K
:
1388 /* Accommodate for extra offset in case of a backjump. */
1392 *to
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
1393 *to
++ = BPF_ALU32_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1394 *to
++ = BPF_JMP32_REG(from
->code
, from
->dst_reg
, BPF_REG_AX
,
1398 case BPF_LD
| BPF_IMM
| BPF_DW
:
1399 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ aux
[1].imm
);
1400 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1401 *to
++ = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_AX
, 32);
1402 *to
++ = BPF_ALU64_REG(BPF_MOV
, aux
[0].dst_reg
, BPF_REG_AX
);
1404 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1405 *to
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ aux
[0].imm
);
1406 *to
++ = BPF_ALU32_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1408 *to
++ = BPF_ZEXT_REG(BPF_REG_AX
);
1409 *to
++ = BPF_ALU64_REG(BPF_OR
, aux
[0].dst_reg
, BPF_REG_AX
);
1412 case BPF_ST
| BPF_MEM
| BPF_DW
:
1413 case BPF_ST
| BPF_MEM
| BPF_W
:
1414 case BPF_ST
| BPF_MEM
| BPF_H
:
1415 case BPF_ST
| BPF_MEM
| BPF_B
:
1416 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
1417 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
1418 *to
++ = BPF_STX_MEM(from
->code
, from
->dst_reg
, BPF_REG_AX
, from
->off
);
1422 return to
- to_buff
;
1425 static struct bpf_prog
*bpf_prog_clone_create(struct bpf_prog
*fp_other
,
1426 gfp_t gfp_extra_flags
)
1428 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
;
1429 struct bpf_prog
*fp
;
1431 fp
= __vmalloc(fp_other
->pages
* PAGE_SIZE
, gfp_flags
);
1433 /* aux->prog still points to the fp_other one, so
1434 * when promoting the clone to the real program,
1435 * this still needs to be adapted.
1437 memcpy(fp
, fp_other
, fp_other
->pages
* PAGE_SIZE
);
1443 static void bpf_prog_clone_free(struct bpf_prog
*fp
)
1445 /* aux was stolen by the other clone, so we cannot free
1446 * it from this path! It will be freed eventually by the
1447 * other program on release.
1449 * At this point, we don't need a deferred release since
1450 * clone is guaranteed to not be locked.
1455 __bpf_prog_free(fp
);
1458 void bpf_jit_prog_release_other(struct bpf_prog
*fp
, struct bpf_prog
*fp_other
)
1460 /* We have to repoint aux->prog to self, as we don't
1461 * know whether fp here is the clone or the original.
1464 bpf_prog_clone_free(fp_other
);
1467 struct bpf_prog
*bpf_jit_blind_constants(struct bpf_prog
*prog
)
1469 struct bpf_insn insn_buff
[16], aux
[2];
1470 struct bpf_prog
*clone
, *tmp
;
1471 int insn_delta
, insn_cnt
;
1472 struct bpf_insn
*insn
;
1475 if (!prog
->blinding_requested
|| prog
->blinded
)
1478 clone
= bpf_prog_clone_create(prog
, GFP_USER
);
1480 return ERR_PTR(-ENOMEM
);
1482 insn_cnt
= clone
->len
;
1483 insn
= clone
->insnsi
;
1485 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
1486 if (bpf_pseudo_func(insn
)) {
1487 /* ld_imm64 with an address of bpf subprog is not
1488 * a user controlled constant. Don't randomize it,
1489 * since it will conflict with jit_subprogs() logic.
1496 /* We temporarily need to hold the original ld64 insn
1497 * so that we can still access the first part in the
1498 * second blinding run.
1500 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
) &&
1502 memcpy(aux
, insn
, sizeof(aux
));
1504 rewritten
= bpf_jit_blind_insn(insn
, aux
, insn_buff
,
1505 clone
->aux
->verifier_zext
);
1509 tmp
= bpf_patch_insn_single(clone
, i
, insn_buff
, rewritten
);
1511 /* Patching may have repointed aux->prog during
1512 * realloc from the original one, so we need to
1513 * fix it up here on error.
1515 bpf_jit_prog_release_other(prog
, clone
);
1520 insn_delta
= rewritten
- 1;
1522 /* Walk new program and skip insns we just inserted. */
1523 insn
= clone
->insnsi
+ i
+ insn_delta
;
1524 insn_cnt
+= insn_delta
;
1531 #endif /* CONFIG_BPF_JIT */
1533 /* Base function for offset calculation. Needs to go into .text section,
1534 * therefore keeping it non-static as well; will also be used by JITs
1535 * anyway later on, so do not let the compiler omit it. This also needs
1536 * to go into kallsyms for correlation from e.g. bpftool, so naming
1539 noinline u64
__bpf_call_base(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
1543 EXPORT_SYMBOL_GPL(__bpf_call_base
);
1545 /* All UAPI available opcodes. */
1546 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1547 /* 32 bit ALU operations. */ \
1548 /* Register based. */ \
1549 INSN_3(ALU, ADD, X), \
1550 INSN_3(ALU, SUB, X), \
1551 INSN_3(ALU, AND, X), \
1552 INSN_3(ALU, OR, X), \
1553 INSN_3(ALU, LSH, X), \
1554 INSN_3(ALU, RSH, X), \
1555 INSN_3(ALU, XOR, X), \
1556 INSN_3(ALU, MUL, X), \
1557 INSN_3(ALU, MOV, X), \
1558 INSN_3(ALU, ARSH, X), \
1559 INSN_3(ALU, DIV, X), \
1560 INSN_3(ALU, MOD, X), \
1562 INSN_3(ALU, END, TO_BE), \
1563 INSN_3(ALU, END, TO_LE), \
1564 /* Immediate based. */ \
1565 INSN_3(ALU, ADD, K), \
1566 INSN_3(ALU, SUB, K), \
1567 INSN_3(ALU, AND, K), \
1568 INSN_3(ALU, OR, K), \
1569 INSN_3(ALU, LSH, K), \
1570 INSN_3(ALU, RSH, K), \
1571 INSN_3(ALU, XOR, K), \
1572 INSN_3(ALU, MUL, K), \
1573 INSN_3(ALU, MOV, K), \
1574 INSN_3(ALU, ARSH, K), \
1575 INSN_3(ALU, DIV, K), \
1576 INSN_3(ALU, MOD, K), \
1577 /* 64 bit ALU operations. */ \
1578 /* Register based. */ \
1579 INSN_3(ALU64, ADD, X), \
1580 INSN_3(ALU64, SUB, X), \
1581 INSN_3(ALU64, AND, X), \
1582 INSN_3(ALU64, OR, X), \
1583 INSN_3(ALU64, LSH, X), \
1584 INSN_3(ALU64, RSH, X), \
1585 INSN_3(ALU64, XOR, X), \
1586 INSN_3(ALU64, MUL, X), \
1587 INSN_3(ALU64, MOV, X), \
1588 INSN_3(ALU64, ARSH, X), \
1589 INSN_3(ALU64, DIV, X), \
1590 INSN_3(ALU64, MOD, X), \
1591 INSN_2(ALU64, NEG), \
1592 INSN_3(ALU64, END, TO_LE), \
1593 /* Immediate based. */ \
1594 INSN_3(ALU64, ADD, K), \
1595 INSN_3(ALU64, SUB, K), \
1596 INSN_3(ALU64, AND, K), \
1597 INSN_3(ALU64, OR, K), \
1598 INSN_3(ALU64, LSH, K), \
1599 INSN_3(ALU64, RSH, K), \
1600 INSN_3(ALU64, XOR, K), \
1601 INSN_3(ALU64, MUL, K), \
1602 INSN_3(ALU64, MOV, K), \
1603 INSN_3(ALU64, ARSH, K), \
1604 INSN_3(ALU64, DIV, K), \
1605 INSN_3(ALU64, MOD, K), \
1606 /* Call instruction. */ \
1607 INSN_2(JMP, CALL), \
1608 /* Exit instruction. */ \
1609 INSN_2(JMP, EXIT), \
1610 /* 32-bit Jump instructions. */ \
1611 /* Register based. */ \
1612 INSN_3(JMP32, JEQ, X), \
1613 INSN_3(JMP32, JNE, X), \
1614 INSN_3(JMP32, JGT, X), \
1615 INSN_3(JMP32, JLT, X), \
1616 INSN_3(JMP32, JGE, X), \
1617 INSN_3(JMP32, JLE, X), \
1618 INSN_3(JMP32, JSGT, X), \
1619 INSN_3(JMP32, JSLT, X), \
1620 INSN_3(JMP32, JSGE, X), \
1621 INSN_3(JMP32, JSLE, X), \
1622 INSN_3(JMP32, JSET, X), \
1623 /* Immediate based. */ \
1624 INSN_3(JMP32, JEQ, K), \
1625 INSN_3(JMP32, JNE, K), \
1626 INSN_3(JMP32, JGT, K), \
1627 INSN_3(JMP32, JLT, K), \
1628 INSN_3(JMP32, JGE, K), \
1629 INSN_3(JMP32, JLE, K), \
1630 INSN_3(JMP32, JSGT, K), \
1631 INSN_3(JMP32, JSLT, K), \
1632 INSN_3(JMP32, JSGE, K), \
1633 INSN_3(JMP32, JSLE, K), \
1634 INSN_3(JMP32, JSET, K), \
1635 /* Jump instructions. */ \
1636 /* Register based. */ \
1637 INSN_3(JMP, JEQ, X), \
1638 INSN_3(JMP, JNE, X), \
1639 INSN_3(JMP, JGT, X), \
1640 INSN_3(JMP, JLT, X), \
1641 INSN_3(JMP, JGE, X), \
1642 INSN_3(JMP, JLE, X), \
1643 INSN_3(JMP, JSGT, X), \
1644 INSN_3(JMP, JSLT, X), \
1645 INSN_3(JMP, JSGE, X), \
1646 INSN_3(JMP, JSLE, X), \
1647 INSN_3(JMP, JSET, X), \
1648 /* Immediate based. */ \
1649 INSN_3(JMP, JEQ, K), \
1650 INSN_3(JMP, JNE, K), \
1651 INSN_3(JMP, JGT, K), \
1652 INSN_3(JMP, JLT, K), \
1653 INSN_3(JMP, JGE, K), \
1654 INSN_3(JMP, JLE, K), \
1655 INSN_3(JMP, JSGT, K), \
1656 INSN_3(JMP, JSLT, K), \
1657 INSN_3(JMP, JSGE, K), \
1658 INSN_3(JMP, JSLE, K), \
1659 INSN_3(JMP, JSET, K), \
1661 INSN_2(JMP32, JA), \
1662 /* Store instructions. */ \
1663 /* Register based. */ \
1664 INSN_3(STX, MEM, B), \
1665 INSN_3(STX, MEM, H), \
1666 INSN_3(STX, MEM, W), \
1667 INSN_3(STX, MEM, DW), \
1668 INSN_3(STX, ATOMIC, W), \
1669 INSN_3(STX, ATOMIC, DW), \
1670 /* Immediate based. */ \
1671 INSN_3(ST, MEM, B), \
1672 INSN_3(ST, MEM, H), \
1673 INSN_3(ST, MEM, W), \
1674 INSN_3(ST, MEM, DW), \
1675 /* Load instructions. */ \
1676 /* Register based. */ \
1677 INSN_3(LDX, MEM, B), \
1678 INSN_3(LDX, MEM, H), \
1679 INSN_3(LDX, MEM, W), \
1680 INSN_3(LDX, MEM, DW), \
1681 INSN_3(LDX, MEMSX, B), \
1682 INSN_3(LDX, MEMSX, H), \
1683 INSN_3(LDX, MEMSX, W), \
1684 /* Immediate based. */ \
1687 bool bpf_opcode_in_insntable(u8 code
)
1689 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1690 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1691 static const bool public_insntable
[256] = {
1692 [0 ... 255] = false,
1693 /* Now overwrite non-defaults ... */
1694 BPF_INSN_MAP(BPF_INSN_2_TBL
, BPF_INSN_3_TBL
),
1695 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1696 [BPF_LD
| BPF_ABS
| BPF_B
] = true,
1697 [BPF_LD
| BPF_ABS
| BPF_H
] = true,
1698 [BPF_LD
| BPF_ABS
| BPF_W
] = true,
1699 [BPF_LD
| BPF_IND
| BPF_B
] = true,
1700 [BPF_LD
| BPF_IND
| BPF_H
] = true,
1701 [BPF_LD
| BPF_IND
| BPF_W
] = true,
1702 [BPF_JMP
| BPF_JCOND
] = true,
1704 #undef BPF_INSN_3_TBL
1705 #undef BPF_INSN_2_TBL
1706 return public_insntable
[code
];
1709 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1711 * ___bpf_prog_run - run eBPF program on a given context
1712 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1713 * @insn: is the array of eBPF instructions
1715 * Decode and execute eBPF instructions.
1717 * Return: whatever value is in %BPF_R0 at program exit
1719 static u64
___bpf_prog_run(u64
*regs
, const struct bpf_insn
*insn
)
1721 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1722 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1723 static const void * const jumptable
[256] __annotate_jump_table
= {
1724 [0 ... 255] = &&default_label
,
1725 /* Now overwrite non-defaults ... */
1726 BPF_INSN_MAP(BPF_INSN_2_LBL
, BPF_INSN_3_LBL
),
1727 /* Non-UAPI available opcodes. */
1728 [BPF_JMP
| BPF_CALL_ARGS
] = &&JMP_CALL_ARGS
,
1729 [BPF_JMP
| BPF_TAIL_CALL
] = &&JMP_TAIL_CALL
,
1730 [BPF_ST
| BPF_NOSPEC
] = &&ST_NOSPEC
,
1731 [BPF_LDX
| BPF_PROBE_MEM
| BPF_B
] = &&LDX_PROBE_MEM_B
,
1732 [BPF_LDX
| BPF_PROBE_MEM
| BPF_H
] = &&LDX_PROBE_MEM_H
,
1733 [BPF_LDX
| BPF_PROBE_MEM
| BPF_W
] = &&LDX_PROBE_MEM_W
,
1734 [BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
] = &&LDX_PROBE_MEM_DW
,
1735 [BPF_LDX
| BPF_PROBE_MEMSX
| BPF_B
] = &&LDX_PROBE_MEMSX_B
,
1736 [BPF_LDX
| BPF_PROBE_MEMSX
| BPF_H
] = &&LDX_PROBE_MEMSX_H
,
1737 [BPF_LDX
| BPF_PROBE_MEMSX
| BPF_W
] = &&LDX_PROBE_MEMSX_W
,
1739 #undef BPF_INSN_3_LBL
1740 #undef BPF_INSN_2_LBL
1741 u32 tail_call_cnt
= 0;
1743 #define CONT ({ insn++; goto select_insn; })
1744 #define CONT_JMP ({ insn++; goto select_insn; })
1747 goto *jumptable
[insn
->code
];
1749 /* Explicitly mask the register-based shift amounts with 63 or 31
1750 * to avoid undefined behavior. Normally this won't affect the
1751 * generated code, for example, in case of native 64 bit archs such
1752 * as x86-64 or arm64, the compiler is optimizing the AND away for
1753 * the interpreter. In case of JITs, each of the JIT backends compiles
1754 * the BPF shift operations to machine instructions which produce
1755 * implementation-defined results in such a case; the resulting
1756 * contents of the register may be arbitrary, but program behaviour
1757 * as a whole remains defined. In other words, in case of JIT backends,
1758 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1761 #define SHT(OPCODE, OP) \
1762 ALU64_##OPCODE##_X: \
1763 DST = DST OP (SRC & 63); \
1766 DST = (u32) DST OP ((u32) SRC & 31); \
1768 ALU64_##OPCODE##_K: \
1772 DST = (u32) DST OP (u32) IMM; \
1775 #define ALU(OPCODE, OP) \
1776 ALU64_##OPCODE##_X: \
1780 DST = (u32) DST OP (u32) SRC; \
1782 ALU64_##OPCODE##_K: \
1786 DST = (u32) DST OP (u32) IMM; \
1810 DST
= (u32
)(s8
) SRC
;
1813 DST
= (u32
)(s16
) SRC
;
1840 DST
= (u64
) (u32
) insn
[0].imm
| ((u64
) (u32
) insn
[1].imm
) << 32;
1844 DST
= (u64
) (u32
) (((s32
) DST
) >> (SRC
& 31));
1847 DST
= (u64
) (u32
) (((s32
) DST
) >> IMM
);
1850 (*(s64
*) &DST
) >>= (SRC
& 63);
1853 (*(s64
*) &DST
) >>= IMM
;
1858 div64_u64_rem(DST
, SRC
, &AX
);
1862 AX
= div64_s64(DST
, SRC
);
1863 DST
= DST
- AX
* SRC
;
1871 DST
= do_div(AX
, (u32
) SRC
);
1875 AX
= do_div(AX
, abs((s32
)SRC
));
1886 div64_u64_rem(DST
, IMM
, &AX
);
1890 AX
= div64_s64(DST
, IMM
);
1891 DST
= DST
- AX
* IMM
;
1899 DST
= do_div(AX
, (u32
) IMM
);
1903 AX
= do_div(AX
, abs((s32
)IMM
));
1914 DST
= div64_u64(DST
, SRC
);
1917 DST
= div64_s64(DST
, SRC
);
1925 do_div(AX
, (u32
) SRC
);
1930 do_div(AX
, abs((s32
)SRC
));
1931 if (((s32
)DST
< 0) == ((s32
)SRC
< 0))
1941 DST
= div64_u64(DST
, IMM
);
1944 DST
= div64_s64(DST
, IMM
);
1952 do_div(AX
, (u32
) IMM
);
1957 do_div(AX
, abs((s32
)IMM
));
1958 if (((s32
)DST
< 0) == ((s32
)IMM
< 0))
1968 DST
= (__force u16
) cpu_to_be16(DST
);
1971 DST
= (__force u32
) cpu_to_be32(DST
);
1974 DST
= (__force u64
) cpu_to_be64(DST
);
1981 DST
= (__force u16
) cpu_to_le16(DST
);
1984 DST
= (__force u32
) cpu_to_le32(DST
);
1987 DST
= (__force u64
) cpu_to_le64(DST
);
1994 DST
= (__force u16
) __swab16(DST
);
1997 DST
= (__force u32
) __swab32(DST
);
2000 DST
= (__force u64
) __swab64(DST
);
2007 /* Function call scratches BPF_R1-BPF_R5 registers,
2008 * preserves BPF_R6-BPF_R9, and stores return value
2011 BPF_R0
= (__bpf_call_base
+ insn
->imm
)(BPF_R1
, BPF_R2
, BPF_R3
,
2016 BPF_R0
= (__bpf_call_base_args
+ insn
->imm
)(BPF_R1
, BPF_R2
,
2019 insn
+ insn
->off
+ 1);
2023 struct bpf_map
*map
= (struct bpf_map
*) (unsigned long) BPF_R2
;
2024 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
2025 struct bpf_prog
*prog
;
2028 if (unlikely(index
>= array
->map
.max_entries
))
2031 if (unlikely(tail_call_cnt
>= MAX_TAIL_CALL_CNT
))
2036 prog
= READ_ONCE(array
->ptrs
[index
]);
2040 /* ARG1 at this point is guaranteed to point to CTX from
2041 * the verifier side due to the fact that the tail call is
2042 * handled like a helper, that is, bpf_tail_call_proto,
2043 * where arg1_type is ARG_PTR_TO_CTX.
2045 insn
= prog
->insnsi
;
2059 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2061 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2062 insn += insn->off; \
2066 JMP32_##OPCODE##_X: \
2067 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2068 insn += insn->off; \
2073 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2074 insn += insn->off; \
2078 JMP32_##OPCODE##_K: \
2079 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2080 insn += insn->off; \
2084 COND_JMP(u
, JEQ
, ==)
2085 COND_JMP(u
, JNE
, !=)
2088 COND_JMP(u
, JGE
, >=)
2089 COND_JMP(u
, JLE
, <=)
2090 COND_JMP(u
, JSET
, &)
2091 COND_JMP(s
, JSGT
, >)
2092 COND_JMP(s
, JSLT
, <)
2093 COND_JMP(s
, JSGE
, >=)
2094 COND_JMP(s
, JSLE
, <=)
2096 /* ST, STX and LDX*/
2098 /* Speculation barrier for mitigating Speculative Store Bypass.
2099 * In case of arm64, we rely on the firmware mitigation as
2100 * controlled via the ssbd kernel parameter. Whenever the
2101 * mitigation is enabled, it works for all of the kernel code
2102 * with no need to provide any additional instructions here.
2103 * In case of x86, we use 'lfence' insn for mitigation. We
2104 * reuse preexisting logic from Spectre v1 mitigation that
2105 * happens to produce the required code on x86 for v4 as well.
2109 #define LDST(SIZEOP, SIZE) \
2111 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2114 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2117 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2119 LDX_PROBE_MEM_##SIZEOP: \
2120 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2121 (const void *)(long) (SRC + insn->off)); \
2122 DST = *((SIZE *)&DST); \
2131 #define LDSX(SIZEOP, SIZE) \
2132 LDX_MEMSX_##SIZEOP: \
2133 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2135 LDX_PROBE_MEMSX_##SIZEOP: \
2136 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2137 (const void *)(long) (SRC + insn->off)); \
2138 DST = *((SIZE *)&DST); \
2146 #define ATOMIC_ALU_OP(BOP, KOP) \
2148 if (BPF_SIZE(insn->code) == BPF_W) \
2149 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2150 (DST + insn->off)); \
2152 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2153 (DST + insn->off)); \
2155 case BOP | BPF_FETCH: \
2156 if (BPF_SIZE(insn->code) == BPF_W) \
2157 SRC = (u32) atomic_fetch_##KOP( \
2159 (atomic_t *)(unsigned long) (DST + insn->off)); \
2161 SRC = (u64) atomic64_fetch_##KOP( \
2163 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2169 ATOMIC_ALU_OP(BPF_ADD
, add
)
2170 ATOMIC_ALU_OP(BPF_AND
, and)
2171 ATOMIC_ALU_OP(BPF_OR
, or)
2172 ATOMIC_ALU_OP(BPF_XOR
, xor)
2173 #undef ATOMIC_ALU_OP
2176 if (BPF_SIZE(insn
->code
) == BPF_W
)
2177 SRC
= (u32
) atomic_xchg(
2178 (atomic_t
*)(unsigned long) (DST
+ insn
->off
),
2181 SRC
= (u64
) atomic64_xchg(
2182 (atomic64_t
*)(unsigned long) (DST
+ insn
->off
),
2186 if (BPF_SIZE(insn
->code
) == BPF_W
)
2187 BPF_R0
= (u32
) atomic_cmpxchg(
2188 (atomic_t
*)(unsigned long) (DST
+ insn
->off
),
2189 (u32
) BPF_R0
, (u32
) SRC
);
2191 BPF_R0
= (u64
) atomic64_cmpxchg(
2192 (atomic64_t
*)(unsigned long) (DST
+ insn
->off
),
2193 (u64
) BPF_R0
, (u64
) SRC
);
2202 /* If we ever reach this, we have a bug somewhere. Die hard here
2203 * instead of just returning 0; we could be somewhere in a subprog,
2204 * so execution could continue otherwise which we do /not/ want.
2206 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2208 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2209 insn
->code
, insn
->imm
);
2214 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2215 #define DEFINE_BPF_PROG_RUN(stack_size) \
2216 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2218 u64 stack[stack_size / sizeof(u64)]; \
2219 u64 regs[MAX_BPF_EXT_REG] = {}; \
2221 kmsan_unpoison_memory(stack, sizeof(stack)); \
2222 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2223 ARG1 = (u64) (unsigned long) ctx; \
2224 return ___bpf_prog_run(regs, insn); \
2227 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2228 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2229 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2230 const struct bpf_insn *insn) \
2232 u64 stack[stack_size / sizeof(u64)]; \
2233 u64 regs[MAX_BPF_EXT_REG]; \
2235 kmsan_unpoison_memory(stack, sizeof(stack)); \
2236 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2242 return ___bpf_prog_run(regs, insn); \
2245 #define EVAL1(FN, X) FN(X)
2246 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2247 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2248 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2249 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2250 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2252 EVAL6(DEFINE_BPF_PROG_RUN
, 32, 64, 96, 128, 160, 192);
2253 EVAL6(DEFINE_BPF_PROG_RUN
, 224, 256, 288, 320, 352, 384);
2254 EVAL4(DEFINE_BPF_PROG_RUN
, 416, 448, 480, 512);
2256 EVAL6(DEFINE_BPF_PROG_RUN_ARGS
, 32, 64, 96, 128, 160, 192);
2257 EVAL6(DEFINE_BPF_PROG_RUN_ARGS
, 224, 256, 288, 320, 352, 384);
2258 EVAL4(DEFINE_BPF_PROG_RUN_ARGS
, 416, 448, 480, 512);
2260 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2262 static unsigned int (*interpreters
[])(const void *ctx
,
2263 const struct bpf_insn
*insn
) = {
2264 EVAL6(PROG_NAME_LIST
, 32, 64, 96, 128, 160, 192)
2265 EVAL6(PROG_NAME_LIST
, 224, 256, 288, 320, 352, 384)
2266 EVAL4(PROG_NAME_LIST
, 416, 448, 480, 512)
2268 #undef PROG_NAME_LIST
2269 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2270 static __maybe_unused
2271 u64 (*interpreters_args
[])(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
,
2272 const struct bpf_insn
*insn
) = {
2273 EVAL6(PROG_NAME_LIST
, 32, 64, 96, 128, 160, 192)
2274 EVAL6(PROG_NAME_LIST
, 224, 256, 288, 320, 352, 384)
2275 EVAL4(PROG_NAME_LIST
, 416, 448, 480, 512)
2277 #undef PROG_NAME_LIST
2279 #ifdef CONFIG_BPF_SYSCALL
2280 void bpf_patch_call_args(struct bpf_insn
*insn
, u32 stack_depth
)
2282 stack_depth
= max_t(u32
, stack_depth
, 1);
2283 insn
->off
= (s16
) insn
->imm
;
2284 insn
->imm
= interpreters_args
[(round_up(stack_depth
, 32) / 32) - 1] -
2285 __bpf_call_base_args
;
2286 insn
->code
= BPF_JMP
| BPF_CALL_ARGS
;
2290 static unsigned int __bpf_prog_ret0_warn(const void *ctx
,
2291 const struct bpf_insn
*insn
)
2293 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2294 * is not working properly, so warn about it!
2301 bool bpf_prog_map_compatible(struct bpf_map
*map
,
2302 const struct bpf_prog
*fp
)
2304 enum bpf_prog_type prog_type
= resolve_prog_type(fp
);
2306 struct bpf_prog_aux
*aux
= fp
->aux
;
2308 if (fp
->kprobe_override
)
2311 /* XDP programs inserted into maps are not guaranteed to run on
2312 * a particular netdev (and can run outside driver context entirely
2313 * in the case of devmap and cpumap). Until device checks
2314 * are implemented, prohibit adding dev-bound programs to program maps.
2316 if (bpf_prog_is_dev_bound(aux
))
2319 spin_lock(&map
->owner
.lock
);
2320 if (!map
->owner
.type
) {
2321 /* There's no owner yet where we could check for
2324 map
->owner
.type
= prog_type
;
2325 map
->owner
.jited
= fp
->jited
;
2326 map
->owner
.xdp_has_frags
= aux
->xdp_has_frags
;
2327 map
->owner
.attach_func_proto
= aux
->attach_func_proto
;
2330 ret
= map
->owner
.type
== prog_type
&&
2331 map
->owner
.jited
== fp
->jited
&&
2332 map
->owner
.xdp_has_frags
== aux
->xdp_has_frags
;
2334 map
->owner
.attach_func_proto
!= aux
->attach_func_proto
) {
2335 switch (prog_type
) {
2336 case BPF_PROG_TYPE_TRACING
:
2337 case BPF_PROG_TYPE_LSM
:
2338 case BPF_PROG_TYPE_EXT
:
2339 case BPF_PROG_TYPE_STRUCT_OPS
:
2347 spin_unlock(&map
->owner
.lock
);
2352 static int bpf_check_tail_call(const struct bpf_prog
*fp
)
2354 struct bpf_prog_aux
*aux
= fp
->aux
;
2357 mutex_lock(&aux
->used_maps_mutex
);
2358 for (i
= 0; i
< aux
->used_map_cnt
; i
++) {
2359 struct bpf_map
*map
= aux
->used_maps
[i
];
2361 if (!map_type_contains_progs(map
))
2364 if (!bpf_prog_map_compatible(map
, fp
)) {
2371 mutex_unlock(&aux
->used_maps_mutex
);
2375 static void bpf_prog_select_func(struct bpf_prog
*fp
)
2377 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2378 u32 stack_depth
= max_t(u32
, fp
->aux
->stack_depth
, 1);
2380 fp
->bpf_func
= interpreters
[(round_up(stack_depth
, 32) / 32) - 1];
2382 fp
->bpf_func
= __bpf_prog_ret0_warn
;
2387 * bpf_prog_select_runtime - select exec runtime for BPF program
2388 * @fp: bpf_prog populated with BPF program
2389 * @err: pointer to error variable
2391 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2392 * The BPF program will be executed via bpf_prog_run() function.
2394 * Return: the &fp argument along with &err set to 0 for success or
2395 * a negative errno code on failure
2397 struct bpf_prog
*bpf_prog_select_runtime(struct bpf_prog
*fp
, int *err
)
2399 /* In case of BPF to BPF calls, verifier did all the prep
2400 * work with regards to JITing, etc.
2402 bool jit_needed
= false;
2407 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON
) ||
2408 bpf_prog_has_kfunc_call(fp
))
2411 bpf_prog_select_func(fp
);
2413 /* eBPF JITs can rewrite the program in case constant
2414 * blinding is active. However, in case of error during
2415 * blinding, bpf_int_jit_compile() must always return a
2416 * valid program, which in this case would simply not
2417 * be JITed, but falls back to the interpreter.
2419 if (!bpf_prog_is_offloaded(fp
->aux
)) {
2420 *err
= bpf_prog_alloc_jited_linfo(fp
);
2424 fp
= bpf_int_jit_compile(fp
);
2425 bpf_prog_jit_attempt_done(fp
);
2426 if (!fp
->jited
&& jit_needed
) {
2431 *err
= bpf_prog_offload_compile(fp
);
2437 *err
= bpf_prog_lock_ro(fp
);
2441 /* The tail call compatibility check can only be done at
2442 * this late stage as we need to determine, if we deal
2443 * with JITed or non JITed program concatenations and not
2444 * all eBPF JITs might immediately support all features.
2446 *err
= bpf_check_tail_call(fp
);
2450 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime
);
2452 static unsigned int __bpf_prog_ret1(const void *ctx
,
2453 const struct bpf_insn
*insn
)
2458 static struct bpf_prog_dummy
{
2459 struct bpf_prog prog
;
2460 } dummy_bpf_prog
= {
2462 .bpf_func
= __bpf_prog_ret1
,
2466 struct bpf_empty_prog_array bpf_empty_prog_array
= {
2469 EXPORT_SYMBOL(bpf_empty_prog_array
);
2471 struct bpf_prog_array
*bpf_prog_array_alloc(u32 prog_cnt
, gfp_t flags
)
2473 struct bpf_prog_array
*p
;
2476 p
= kzalloc(struct_size(p
, items
, prog_cnt
+ 1), flags
);
2478 p
= &bpf_empty_prog_array
.hdr
;
2483 void bpf_prog_array_free(struct bpf_prog_array
*progs
)
2485 if (!progs
|| progs
== &bpf_empty_prog_array
.hdr
)
2487 kfree_rcu(progs
, rcu
);
2490 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head
*rcu
)
2492 struct bpf_prog_array
*progs
;
2494 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2495 * no need to call kfree_rcu(), just call kfree() directly.
2497 progs
= container_of(rcu
, struct bpf_prog_array
, rcu
);
2498 if (rcu_trace_implies_rcu_gp())
2501 kfree_rcu(progs
, rcu
);
2504 void bpf_prog_array_free_sleepable(struct bpf_prog_array
*progs
)
2506 if (!progs
|| progs
== &bpf_empty_prog_array
.hdr
)
2508 call_rcu_tasks_trace(&progs
->rcu
, __bpf_prog_array_free_sleepable_cb
);
2511 int bpf_prog_array_length(struct bpf_prog_array
*array
)
2513 struct bpf_prog_array_item
*item
;
2516 for (item
= array
->items
; item
->prog
; item
++)
2517 if (item
->prog
!= &dummy_bpf_prog
.prog
)
2522 bool bpf_prog_array_is_empty(struct bpf_prog_array
*array
)
2524 struct bpf_prog_array_item
*item
;
2526 for (item
= array
->items
; item
->prog
; item
++)
2527 if (item
->prog
!= &dummy_bpf_prog
.prog
)
2532 static bool bpf_prog_array_copy_core(struct bpf_prog_array
*array
,
2536 struct bpf_prog_array_item
*item
;
2539 for (item
= array
->items
; item
->prog
; item
++) {
2540 if (item
->prog
== &dummy_bpf_prog
.prog
)
2542 prog_ids
[i
] = item
->prog
->aux
->id
;
2543 if (++i
== request_cnt
) {
2549 return !!(item
->prog
);
2552 int bpf_prog_array_copy_to_user(struct bpf_prog_array
*array
,
2553 __u32 __user
*prog_ids
, u32 cnt
)
2555 unsigned long err
= 0;
2559 /* users of this function are doing:
2560 * cnt = bpf_prog_array_length();
2562 * bpf_prog_array_copy_to_user(..., cnt);
2563 * so below kcalloc doesn't need extra cnt > 0 check.
2565 ids
= kcalloc(cnt
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
2568 nospc
= bpf_prog_array_copy_core(array
, ids
, cnt
);
2569 err
= copy_to_user(prog_ids
, ids
, cnt
* sizeof(u32
));
2578 void bpf_prog_array_delete_safe(struct bpf_prog_array
*array
,
2579 struct bpf_prog
*old_prog
)
2581 struct bpf_prog_array_item
*item
;
2583 for (item
= array
->items
; item
->prog
; item
++)
2584 if (item
->prog
== old_prog
) {
2585 WRITE_ONCE(item
->prog
, &dummy_bpf_prog
.prog
);
2591 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2592 * index into the program array with
2593 * a dummy no-op program.
2594 * @array: a bpf_prog_array
2595 * @index: the index of the program to replace
2597 * Skips over dummy programs, by not counting them, when calculating
2598 * the position of the program to replace.
2602 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2603 * * -ENOENT - Index out of range
2605 int bpf_prog_array_delete_safe_at(struct bpf_prog_array
*array
, int index
)
2607 return bpf_prog_array_update_at(array
, index
, &dummy_bpf_prog
.prog
);
2611 * bpf_prog_array_update_at() - Updates the program at the given index
2612 * into the program array.
2613 * @array: a bpf_prog_array
2614 * @index: the index of the program to update
2615 * @prog: the program to insert into the array
2617 * Skips over dummy programs, by not counting them, when calculating
2618 * the position of the program to update.
2622 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2623 * * -ENOENT - Index out of range
2625 int bpf_prog_array_update_at(struct bpf_prog_array
*array
, int index
,
2626 struct bpf_prog
*prog
)
2628 struct bpf_prog_array_item
*item
;
2630 if (unlikely(index
< 0))
2633 for (item
= array
->items
; item
->prog
; item
++) {
2634 if (item
->prog
== &dummy_bpf_prog
.prog
)
2637 WRITE_ONCE(item
->prog
, prog
);
2645 int bpf_prog_array_copy(struct bpf_prog_array
*old_array
,
2646 struct bpf_prog
*exclude_prog
,
2647 struct bpf_prog
*include_prog
,
2649 struct bpf_prog_array
**new_array
)
2651 int new_prog_cnt
, carry_prog_cnt
= 0;
2652 struct bpf_prog_array_item
*existing
, *new;
2653 struct bpf_prog_array
*array
;
2654 bool found_exclude
= false;
2656 /* Figure out how many existing progs we need to carry over to
2660 existing
= old_array
->items
;
2661 for (; existing
->prog
; existing
++) {
2662 if (existing
->prog
== exclude_prog
) {
2663 found_exclude
= true;
2666 if (existing
->prog
!= &dummy_bpf_prog
.prog
)
2668 if (existing
->prog
== include_prog
)
2673 if (exclude_prog
&& !found_exclude
)
2676 /* How many progs (not NULL) will be in the new array? */
2677 new_prog_cnt
= carry_prog_cnt
;
2681 /* Do we have any prog (not NULL) in the new array? */
2682 if (!new_prog_cnt
) {
2687 /* +1 as the end of prog_array is marked with NULL */
2688 array
= bpf_prog_array_alloc(new_prog_cnt
+ 1, GFP_KERNEL
);
2693 /* Fill in the new prog array */
2694 if (carry_prog_cnt
) {
2695 existing
= old_array
->items
;
2696 for (; existing
->prog
; existing
++) {
2697 if (existing
->prog
== exclude_prog
||
2698 existing
->prog
== &dummy_bpf_prog
.prog
)
2701 new->prog
= existing
->prog
;
2702 new->bpf_cookie
= existing
->bpf_cookie
;
2707 new->prog
= include_prog
;
2708 new->bpf_cookie
= bpf_cookie
;
2716 int bpf_prog_array_copy_info(struct bpf_prog_array
*array
,
2717 u32
*prog_ids
, u32 request_cnt
,
2723 cnt
= bpf_prog_array_length(array
);
2727 /* return early if user requested only program count or nothing to copy */
2728 if (!request_cnt
|| !cnt
)
2731 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2732 return bpf_prog_array_copy_core(array
, prog_ids
, request_cnt
) ? -ENOSPC
2736 void __bpf_free_used_maps(struct bpf_prog_aux
*aux
,
2737 struct bpf_map
**used_maps
, u32 len
)
2739 struct bpf_map
*map
;
2743 sleepable
= aux
->prog
->sleepable
;
2744 for (i
= 0; i
< len
; i
++) {
2746 if (map
->ops
->map_poke_untrack
)
2747 map
->ops
->map_poke_untrack(map
, aux
);
2749 atomic64_dec(&map
->sleepable_refcnt
);
2754 static void bpf_free_used_maps(struct bpf_prog_aux
*aux
)
2756 __bpf_free_used_maps(aux
, aux
->used_maps
, aux
->used_map_cnt
);
2757 kfree(aux
->used_maps
);
2760 void __bpf_free_used_btfs(struct btf_mod_pair
*used_btfs
, u32 len
)
2762 #ifdef CONFIG_BPF_SYSCALL
2763 struct btf_mod_pair
*btf_mod
;
2766 for (i
= 0; i
< len
; i
++) {
2767 btf_mod
= &used_btfs
[i
];
2768 if (btf_mod
->module
)
2769 module_put(btf_mod
->module
);
2770 btf_put(btf_mod
->btf
);
2775 static void bpf_free_used_btfs(struct bpf_prog_aux
*aux
)
2777 __bpf_free_used_btfs(aux
->used_btfs
, aux
->used_btf_cnt
);
2778 kfree(aux
->used_btfs
);
2781 static void bpf_prog_free_deferred(struct work_struct
*work
)
2783 struct bpf_prog_aux
*aux
;
2786 aux
= container_of(work
, struct bpf_prog_aux
, work
);
2787 #ifdef CONFIG_BPF_SYSCALL
2788 bpf_free_kfunc_btf_tab(aux
->kfunc_btf_tab
);
2790 #ifdef CONFIG_CGROUP_BPF
2791 if (aux
->cgroup_atype
!= CGROUP_BPF_ATTACH_TYPE_INVALID
)
2792 bpf_cgroup_atype_put(aux
->cgroup_atype
);
2794 bpf_free_used_maps(aux
);
2795 bpf_free_used_btfs(aux
);
2796 if (bpf_prog_is_dev_bound(aux
))
2797 bpf_prog_dev_bound_destroy(aux
->prog
);
2798 #ifdef CONFIG_PERF_EVENTS
2799 if (aux
->prog
->has_callchain_buf
)
2800 put_callchain_buffers();
2802 if (aux
->dst_trampoline
)
2803 bpf_trampoline_put(aux
->dst_trampoline
);
2804 for (i
= 0; i
< aux
->real_func_cnt
; i
++) {
2805 /* We can just unlink the subprog poke descriptor table as
2806 * it was originally linked to the main program and is also
2807 * released along with it.
2809 aux
->func
[i
]->aux
->poke_tab
= NULL
;
2810 bpf_jit_free(aux
->func
[i
]);
2812 if (aux
->real_func_cnt
) {
2814 bpf_prog_unlock_free(aux
->prog
);
2816 bpf_jit_free(aux
->prog
);
2820 void bpf_prog_free(struct bpf_prog
*fp
)
2822 struct bpf_prog_aux
*aux
= fp
->aux
;
2825 bpf_prog_put(aux
->dst_prog
);
2826 bpf_token_put(aux
->token
);
2827 INIT_WORK(&aux
->work
, bpf_prog_free_deferred
);
2828 schedule_work(&aux
->work
);
2830 EXPORT_SYMBOL_GPL(bpf_prog_free
);
2832 /* RNG for unprivileged user space with separated state from prandom_u32(). */
2833 static DEFINE_PER_CPU(struct rnd_state
, bpf_user_rnd_state
);
2835 void bpf_user_rnd_init_once(void)
2837 prandom_init_once(&bpf_user_rnd_state
);
2840 BPF_CALL_0(bpf_user_rnd_u32
)
2842 /* Should someone ever have the rather unwise idea to use some
2843 * of the registers passed into this function, then note that
2844 * this function is called from native eBPF and classic-to-eBPF
2845 * transformations. Register assignments from both sides are
2846 * different, f.e. classic always sets fn(ctx, A, X) here.
2848 struct rnd_state
*state
;
2851 state
= &get_cpu_var(bpf_user_rnd_state
);
2852 res
= prandom_u32_state(state
);
2853 put_cpu_var(bpf_user_rnd_state
);
2858 BPF_CALL_0(bpf_get_raw_cpu_id
)
2860 return raw_smp_processor_id();
2863 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2864 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak
;
2865 const struct bpf_func_proto bpf_map_update_elem_proto __weak
;
2866 const struct bpf_func_proto bpf_map_delete_elem_proto __weak
;
2867 const struct bpf_func_proto bpf_map_push_elem_proto __weak
;
2868 const struct bpf_func_proto bpf_map_pop_elem_proto __weak
;
2869 const struct bpf_func_proto bpf_map_peek_elem_proto __weak
;
2870 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak
;
2871 const struct bpf_func_proto bpf_spin_lock_proto __weak
;
2872 const struct bpf_func_proto bpf_spin_unlock_proto __weak
;
2873 const struct bpf_func_proto bpf_jiffies64_proto __weak
;
2875 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak
;
2876 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak
;
2877 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak
;
2878 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak
;
2879 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak
;
2880 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak
;
2881 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak
;
2883 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak
;
2884 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak
;
2885 const struct bpf_func_proto bpf_get_current_comm_proto __weak
;
2886 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak
;
2887 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak
;
2888 const struct bpf_func_proto bpf_get_local_storage_proto __weak
;
2889 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak
;
2890 const struct bpf_func_proto bpf_snprintf_btf_proto __weak
;
2891 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak
;
2892 const struct bpf_func_proto bpf_set_retval_proto __weak
;
2893 const struct bpf_func_proto bpf_get_retval_proto __weak
;
2895 const struct bpf_func_proto
* __weak
bpf_get_trace_printk_proto(void)
2900 const struct bpf_func_proto
* __weak
bpf_get_trace_vprintk_proto(void)
2906 bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
2907 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
2911 EXPORT_SYMBOL_GPL(bpf_event_output
);
2913 /* Always built-in helper functions. */
2914 const struct bpf_func_proto bpf_tail_call_proto
= {
2917 .ret_type
= RET_VOID
,
2918 .arg1_type
= ARG_PTR_TO_CTX
,
2919 .arg2_type
= ARG_CONST_MAP_PTR
,
2920 .arg3_type
= ARG_ANYTHING
,
2923 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2924 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2925 * eBPF and implicitly also cBPF can get JITed!
2927 struct bpf_prog
* __weak
bpf_int_jit_compile(struct bpf_prog
*prog
)
2932 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2933 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2935 void __weak
bpf_jit_compile(struct bpf_prog
*prog
)
2939 bool __weak
bpf_helper_changes_pkt_data(void *func
)
2944 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2945 * analysis code and wants explicit zero extension inserted by verifier.
2946 * Otherwise, return FALSE.
2948 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2949 * you don't override this. JITs that don't want these extra insns can detect
2950 * them using insn_is_zext.
2952 bool __weak
bpf_jit_needs_zext(void)
2957 /* Return true if the JIT inlines the call to the helper corresponding to
2960 * The verifier will not patch the insn->imm for the call to the helper if
2961 * this returns true.
2963 bool __weak
bpf_jit_inlines_helper_call(s32 imm
)
2968 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2969 bool __weak
bpf_jit_supports_subprog_tailcalls(void)
2974 bool __weak
bpf_jit_supports_percpu_insn(void)
2979 bool __weak
bpf_jit_supports_kfunc_call(void)
2984 bool __weak
bpf_jit_supports_far_kfunc_call(void)
2989 bool __weak
bpf_jit_supports_arena(void)
2994 bool __weak
bpf_jit_supports_insn(struct bpf_insn
*insn
, bool in_arena
)
2999 u64 __weak
bpf_arch_uaddress_limit(void)
3001 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3008 /* Return TRUE if the JIT backend satisfies the following two conditions:
3009 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3010 * 2) Under the specific arch, the implementation of xchg() is the same
3011 * as atomic_xchg() on pointer-sized words.
3013 bool __weak
bpf_jit_supports_ptr_xchg(void)
3018 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3019 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3021 int __weak
skb_copy_bits(const struct sk_buff
*skb
, int offset
, void *to
,
3027 int __weak
bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
3028 void *addr1
, void *addr2
)
3033 void * __weak
bpf_arch_text_copy(void *dst
, void *src
, size_t len
)
3035 return ERR_PTR(-ENOTSUPP
);
3038 int __weak
bpf_arch_text_invalidate(void *dst
, size_t len
)
3043 bool __weak
bpf_jit_supports_exceptions(void)
3048 bool __weak
bpf_jit_supports_private_stack(void)
3053 void __weak
arch_bpf_stack_walk(bool (*consume_fn
)(void *cookie
, u64 ip
, u64 sp
, u64 bp
), void *cookie
)
3057 /* for configs without MMU or 32-bit */
3058 __weak
const struct bpf_map_ops arena_map_ops
;
3059 __weak u64
bpf_arena_get_user_vm_start(struct bpf_arena
*arena
)
3063 __weak u64
bpf_arena_get_kern_vm_start(struct bpf_arena
*arena
)
3068 #ifdef CONFIG_BPF_SYSCALL
3069 static int __init
bpf_global_ma_init(void)
3073 ret
= bpf_mem_alloc_init(&bpf_global_ma
, 0, false);
3074 bpf_global_ma_set
= !ret
;
3077 late_initcall(bpf_global_ma_init
);
3080 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key
);
3081 EXPORT_SYMBOL(bpf_stats_enabled_key
);
3083 /* All definitions of tracepoints related to BPF. */
3084 #define CREATE_TRACE_POINTS
3085 #include <linux/bpf_trace.h>
3087 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception
);
3088 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx
);