1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <uapi/linux/bpf.h>
8 #include <uapi/linux/filter.h>
10 #include <linux/workqueue.h>
11 #include <linux/file.h>
12 #include <linux/percpu.h>
13 #include <linux/err.h>
14 #include <linux/rbtree_latch.h>
15 #include <linux/numa.h>
16 #include <linux/mm_types.h>
17 #include <linux/wait.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 #include <linux/sched/mm.h>
24 #include <linux/slab.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/stddef.h>
27 #include <linux/bpfptr.h>
28 #include <linux/btf.h>
29 #include <linux/rcupdate_trace.h>
30 #include <linux/static_call.h>
31 #include <linux/memcontrol.h>
32 #include <linux/cfi.h>
34 struct bpf_verifier_env
;
35 struct bpf_verifier_log
;
45 struct exception_table_entry
;
46 struct seq_operations
;
47 struct bpf_iter_aux_info
;
48 struct bpf_local_storage
;
49 struct bpf_local_storage_map
;
53 struct bpf_func_state
;
57 struct user_namespace
;
61 extern struct idr btf_idr
;
62 extern spinlock_t btf_idr_lock
;
63 extern struct kobject
*btf_kobj
;
64 extern struct bpf_mem_alloc bpf_global_ma
, bpf_global_percpu_ma
;
65 extern bool bpf_global_ma_set
;
67 typedef u64 (*bpf_callback_t
)(u64
, u64
, u64
, u64
, u64
);
68 typedef int (*bpf_iter_init_seq_priv_t
)(void *private_data
,
69 struct bpf_iter_aux_info
*aux
);
70 typedef void (*bpf_iter_fini_seq_priv_t
)(void *private_data
);
71 typedef unsigned int (*bpf_func_t
)(const void *,
72 const struct bpf_insn
*);
73 struct bpf_iter_seq_info
{
74 const struct seq_operations
*seq_ops
;
75 bpf_iter_init_seq_priv_t init_seq_private
;
76 bpf_iter_fini_seq_priv_t fini_seq_private
;
80 /* map is generic key/value storage optionally accessible by eBPF programs */
82 /* funcs callable from userspace (via syscall) */
83 int (*map_alloc_check
)(union bpf_attr
*attr
);
84 struct bpf_map
*(*map_alloc
)(union bpf_attr
*attr
);
85 void (*map_release
)(struct bpf_map
*map
, struct file
*map_file
);
86 void (*map_free
)(struct bpf_map
*map
);
87 int (*map_get_next_key
)(struct bpf_map
*map
, void *key
, void *next_key
);
88 void (*map_release_uref
)(struct bpf_map
*map
);
89 void *(*map_lookup_elem_sys_only
)(struct bpf_map
*map
, void *key
);
90 int (*map_lookup_batch
)(struct bpf_map
*map
, const union bpf_attr
*attr
,
91 union bpf_attr __user
*uattr
);
92 int (*map_lookup_and_delete_elem
)(struct bpf_map
*map
, void *key
,
93 void *value
, u64 flags
);
94 int (*map_lookup_and_delete_batch
)(struct bpf_map
*map
,
95 const union bpf_attr
*attr
,
96 union bpf_attr __user
*uattr
);
97 int (*map_update_batch
)(struct bpf_map
*map
, struct file
*map_file
,
98 const union bpf_attr
*attr
,
99 union bpf_attr __user
*uattr
);
100 int (*map_delete_batch
)(struct bpf_map
*map
, const union bpf_attr
*attr
,
101 union bpf_attr __user
*uattr
);
103 /* funcs callable from userspace and from eBPF programs */
104 void *(*map_lookup_elem
)(struct bpf_map
*map
, void *key
);
105 long (*map_update_elem
)(struct bpf_map
*map
, void *key
, void *value
, u64 flags
);
106 long (*map_delete_elem
)(struct bpf_map
*map
, void *key
);
107 long (*map_push_elem
)(struct bpf_map
*map
, void *value
, u64 flags
);
108 long (*map_pop_elem
)(struct bpf_map
*map
, void *value
);
109 long (*map_peek_elem
)(struct bpf_map
*map
, void *value
);
110 void *(*map_lookup_percpu_elem
)(struct bpf_map
*map
, void *key
, u32 cpu
);
112 /* funcs called by prog_array and perf_event_array map */
113 void *(*map_fd_get_ptr
)(struct bpf_map
*map
, struct file
*map_file
,
115 /* If need_defer is true, the implementation should guarantee that
116 * the to-be-put element is still alive before the bpf program, which
117 * may manipulate it, exists.
119 void (*map_fd_put_ptr
)(struct bpf_map
*map
, void *ptr
, bool need_defer
);
120 int (*map_gen_lookup
)(struct bpf_map
*map
, struct bpf_insn
*insn_buf
);
121 u32 (*map_fd_sys_lookup_elem
)(void *ptr
);
122 void (*map_seq_show_elem
)(struct bpf_map
*map
, void *key
,
124 int (*map_check_btf
)(const struct bpf_map
*map
,
125 const struct btf
*btf
,
126 const struct btf_type
*key_type
,
127 const struct btf_type
*value_type
);
129 /* Prog poke tracking helpers. */
130 int (*map_poke_track
)(struct bpf_map
*map
, struct bpf_prog_aux
*aux
);
131 void (*map_poke_untrack
)(struct bpf_map
*map
, struct bpf_prog_aux
*aux
);
132 void (*map_poke_run
)(struct bpf_map
*map
, u32 key
, struct bpf_prog
*old
,
133 struct bpf_prog
*new);
135 /* Direct value access helpers. */
136 int (*map_direct_value_addr
)(const struct bpf_map
*map
,
138 int (*map_direct_value_meta
)(const struct bpf_map
*map
,
140 int (*map_mmap
)(struct bpf_map
*map
, struct vm_area_struct
*vma
);
141 __poll_t (*map_poll
)(struct bpf_map
*map
, struct file
*filp
,
142 struct poll_table_struct
*pts
);
143 unsigned long (*map_get_unmapped_area
)(struct file
*filep
, unsigned long addr
,
144 unsigned long len
, unsigned long pgoff
,
145 unsigned long flags
);
147 /* Functions called by bpf_local_storage maps */
148 int (*map_local_storage_charge
)(struct bpf_local_storage_map
*smap
,
149 void *owner
, u32 size
);
150 void (*map_local_storage_uncharge
)(struct bpf_local_storage_map
*smap
,
151 void *owner
, u32 size
);
152 struct bpf_local_storage __rcu
** (*map_owner_storage_ptr
)(void *owner
);
155 long (*map_redirect
)(struct bpf_map
*map
, u64 key
, u64 flags
);
157 /* map_meta_equal must be implemented for maps that can be
158 * used as an inner map. It is a runtime check to ensure
159 * an inner map can be inserted to an outer map.
161 * Some properties of the inner map has been used during the
162 * verification time. When inserting an inner map at the runtime,
163 * map_meta_equal has to ensure the inserting map has the same
164 * properties that the verifier has used earlier.
166 bool (*map_meta_equal
)(const struct bpf_map
*meta0
,
167 const struct bpf_map
*meta1
);
170 int (*map_set_for_each_callback_args
)(struct bpf_verifier_env
*env
,
171 struct bpf_func_state
*caller
,
172 struct bpf_func_state
*callee
);
173 long (*map_for_each_callback
)(struct bpf_map
*map
,
174 bpf_callback_t callback_fn
,
175 void *callback_ctx
, u64 flags
);
177 u64 (*map_mem_usage
)(const struct bpf_map
*map
);
179 /* BTF id of struct allocated by map_alloc */
182 /* bpf_iter info used to open a seq_file */
183 const struct bpf_iter_seq_info
*iter_seq_info
;
187 /* Support at most 11 fields in a BTF type */
191 enum btf_field_type
{
192 BPF_SPIN_LOCK
= (1 << 0),
193 BPF_TIMER
= (1 << 1),
194 BPF_KPTR_UNREF
= (1 << 2),
195 BPF_KPTR_REF
= (1 << 3),
196 BPF_KPTR_PERCPU
= (1 << 4),
197 BPF_KPTR
= BPF_KPTR_UNREF
| BPF_KPTR_REF
| BPF_KPTR_PERCPU
,
198 BPF_LIST_HEAD
= (1 << 5),
199 BPF_LIST_NODE
= (1 << 6),
200 BPF_RB_ROOT
= (1 << 7),
201 BPF_RB_NODE
= (1 << 8),
202 BPF_GRAPH_NODE
= BPF_RB_NODE
| BPF_LIST_NODE
,
203 BPF_GRAPH_ROOT
= BPF_RB_ROOT
| BPF_LIST_HEAD
,
204 BPF_REFCOUNT
= (1 << 9),
205 BPF_WORKQUEUE
= (1 << 10),
206 BPF_UPTR
= (1 << 11),
209 typedef void (*btf_dtor_kfunc_t
)(void *);
211 struct btf_field_kptr
{
213 struct module
*module
;
214 /* dtor used if btf_is_kernel(btf), otherwise the type is
215 * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
217 btf_dtor_kfunc_t dtor
;
221 struct btf_field_graph_root
{
225 struct btf_record
*value_rec
;
231 enum btf_field_type type
;
233 struct btf_field_kptr kptr
;
234 struct btf_field_graph_root graph_root
;
245 struct btf_field fields
[];
248 /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
249 struct bpf_rb_node_kern
{
250 struct rb_node rb_node
;
252 } __attribute__((aligned(8)));
254 /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
255 struct bpf_list_node_kern
{
256 struct list_head list_head
;
258 } __attribute__((aligned(8)));
261 const struct bpf_map_ops
*ops
;
262 struct bpf_map
*inner_map_meta
;
263 #ifdef CONFIG_SECURITY
266 enum bpf_map_type map_type
;
270 u64 map_extra
; /* any per-map-type extra fields */
273 struct btf_record
*record
;
276 u32 btf_value_type_id
;
277 u32 btf_vmlinux_value_type_id
;
280 struct obj_cgroup
*objcg
;
282 char name
[BPF_OBJ_NAME_LEN
];
283 struct mutex freeze_mutex
;
286 /* rcu is used before freeing and work is only used during freeing */
288 struct work_struct work
;
292 /* 'Ownership' of program-containing map is claimed by the first program
293 * that is going to use this map or by the first program which FD is
294 * stored in the map to make sure that all callers and callees have the
295 * same prog type, JITed flag and xdp_has_frags flag.
298 const struct btf_type
*attach_func_proto
;
300 enum bpf_prog_type type
;
305 bool frozen
; /* write-once; write-protected by freeze_mutex */
306 bool free_after_mult_rcu_gp
;
307 bool free_after_rcu_gp
;
308 atomic64_t sleepable_refcnt
;
309 s64 __percpu
*elem_count
;
312 static inline const char *btf_field_type_name(enum btf_field_type type
)
316 return "bpf_spin_lock";
324 case BPF_KPTR_PERCPU
:
325 return "percpu_kptr";
329 return "bpf_list_head";
331 return "bpf_list_node";
333 return "bpf_rb_root";
335 return "bpf_rb_node";
337 return "bpf_refcount";
344 static inline u32
btf_field_type_size(enum btf_field_type type
)
348 return sizeof(struct bpf_spin_lock
);
350 return sizeof(struct bpf_timer
);
352 return sizeof(struct bpf_wq
);
355 case BPF_KPTR_PERCPU
:
359 return sizeof(struct bpf_list_head
);
361 return sizeof(struct bpf_list_node
);
363 return sizeof(struct bpf_rb_root
);
365 return sizeof(struct bpf_rb_node
);
367 return sizeof(struct bpf_refcount
);
374 static inline u32
btf_field_type_align(enum btf_field_type type
)
378 return __alignof__(struct bpf_spin_lock
);
380 return __alignof__(struct bpf_timer
);
382 return __alignof__(struct bpf_wq
);
385 case BPF_KPTR_PERCPU
:
387 return __alignof__(u64
);
389 return __alignof__(struct bpf_list_head
);
391 return __alignof__(struct bpf_list_node
);
393 return __alignof__(struct bpf_rb_root
);
395 return __alignof__(struct bpf_rb_node
);
397 return __alignof__(struct bpf_refcount
);
404 static inline void bpf_obj_init_field(const struct btf_field
*field
, void *addr
)
406 memset(addr
, 0, field
->size
);
408 switch (field
->type
) {
410 refcount_set((refcount_t
*)addr
, 1);
413 RB_CLEAR_NODE((struct rb_node
*)addr
);
417 INIT_LIST_HEAD((struct list_head
*)addr
);
420 /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
426 case BPF_KPTR_PERCPU
:
435 static inline bool btf_record_has_field(const struct btf_record
*rec
, enum btf_field_type type
)
437 if (IS_ERR_OR_NULL(rec
))
439 return rec
->field_mask
& type
;
442 static inline void bpf_obj_init(const struct btf_record
*rec
, void *obj
)
446 if (IS_ERR_OR_NULL(rec
))
448 for (i
= 0; i
< rec
->cnt
; i
++)
449 bpf_obj_init_field(&rec
->fields
[i
], obj
+ rec
->fields
[i
].offset
);
452 /* 'dst' must be a temporary buffer and should not point to memory that is being
453 * used in parallel by a bpf program or bpf syscall, otherwise the access from
454 * the bpf program or bpf syscall may be corrupted by the reinitialization,
455 * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
456 * allocator, it is still possible for 'dst' to be used in parallel by a bpf
457 * program or bpf syscall.
459 static inline void check_and_init_map_value(struct bpf_map
*map
, void *dst
)
461 bpf_obj_init(map
->record
, dst
);
464 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
465 * forced to use 'long' read/writes to try to atomically copy long counters.
466 * Best-effort only. No barriers here, since it _will_ race with concurrent
467 * updates from BPF programs. Called from bpf syscall and mostly used with
468 * size 8 or 16 bytes, so ask compiler to inline it.
470 static inline void bpf_long_memcpy(void *dst
, const void *src
, u32 size
)
472 const long *lsrc
= src
;
475 size
/= sizeof(long);
477 data_race(*ldst
++ = *lsrc
++);
480 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
481 static inline void bpf_obj_memcpy(struct btf_record
*rec
,
482 void *dst
, void *src
, u32 size
,
488 if (IS_ERR_OR_NULL(rec
)) {
490 bpf_long_memcpy(dst
, src
, round_up(size
, 8));
492 memcpy(dst
, src
, size
);
496 for (i
= 0; i
< rec
->cnt
; i
++) {
497 u32 next_off
= rec
->fields
[i
].offset
;
498 u32 sz
= next_off
- curr_off
;
500 memcpy(dst
+ curr_off
, src
+ curr_off
, sz
);
501 curr_off
+= rec
->fields
[i
].size
+ sz
;
503 memcpy(dst
+ curr_off
, src
+ curr_off
, size
- curr_off
);
506 static inline void copy_map_value(struct bpf_map
*map
, void *dst
, void *src
)
508 bpf_obj_memcpy(map
->record
, dst
, src
, map
->value_size
, false);
511 static inline void copy_map_value_long(struct bpf_map
*map
, void *dst
, void *src
)
513 bpf_obj_memcpy(map
->record
, dst
, src
, map
->value_size
, true);
516 static inline void bpf_obj_swap_uptrs(const struct btf_record
*rec
, void *dst
, void *src
)
518 unsigned long *src_uptr
, *dst_uptr
;
519 const struct btf_field
*field
;
522 if (!btf_record_has_field(rec
, BPF_UPTR
))
525 for (i
= 0, field
= rec
->fields
; i
< rec
->cnt
; i
++, field
++) {
526 if (field
->type
!= BPF_UPTR
)
529 src_uptr
= src
+ field
->offset
;
530 dst_uptr
= dst
+ field
->offset
;
531 swap(*src_uptr
, *dst_uptr
);
535 static inline void bpf_obj_memzero(struct btf_record
*rec
, void *dst
, u32 size
)
540 if (IS_ERR_OR_NULL(rec
)) {
541 memset(dst
, 0, size
);
545 for (i
= 0; i
< rec
->cnt
; i
++) {
546 u32 next_off
= rec
->fields
[i
].offset
;
547 u32 sz
= next_off
- curr_off
;
549 memset(dst
+ curr_off
, 0, sz
);
550 curr_off
+= rec
->fields
[i
].size
+ sz
;
552 memset(dst
+ curr_off
, 0, size
- curr_off
);
555 static inline void zero_map_value(struct bpf_map
*map
, void *dst
)
557 bpf_obj_memzero(map
->record
, dst
, map
->value_size
);
560 void copy_map_value_locked(struct bpf_map
*map
, void *dst
, void *src
,
562 void bpf_timer_cancel_and_free(void *timer
);
563 void bpf_wq_cancel_and_free(void *timer
);
564 void bpf_list_head_free(const struct btf_field
*field
, void *list_head
,
565 struct bpf_spin_lock
*spin_lock
);
566 void bpf_rb_root_free(const struct btf_field
*field
, void *rb_root
,
567 struct bpf_spin_lock
*spin_lock
);
568 u64
bpf_arena_get_kern_vm_start(struct bpf_arena
*arena
);
569 u64
bpf_arena_get_user_vm_start(struct bpf_arena
*arena
);
570 int bpf_obj_name_cpy(char *dst
, const char *src
, unsigned int size
);
572 struct bpf_offload_dev
;
573 struct bpf_offloaded_map
;
575 struct bpf_map_dev_ops
{
576 int (*map_get_next_key
)(struct bpf_offloaded_map
*map
,
577 void *key
, void *next_key
);
578 int (*map_lookup_elem
)(struct bpf_offloaded_map
*map
,
579 void *key
, void *value
);
580 int (*map_update_elem
)(struct bpf_offloaded_map
*map
,
581 void *key
, void *value
, u64 flags
);
582 int (*map_delete_elem
)(struct bpf_offloaded_map
*map
, void *key
);
585 struct bpf_offloaded_map
{
587 struct net_device
*netdev
;
588 const struct bpf_map_dev_ops
*dev_ops
;
590 struct list_head offloads
;
593 static inline struct bpf_offloaded_map
*map_to_offmap(struct bpf_map
*map
)
595 return container_of(map
, struct bpf_offloaded_map
, map
);
598 static inline bool bpf_map_offload_neutral(const struct bpf_map
*map
)
600 return map
->map_type
== BPF_MAP_TYPE_PERF_EVENT_ARRAY
;
603 static inline bool bpf_map_support_seq_show(const struct bpf_map
*map
)
605 return (map
->btf_value_type_id
|| map
->btf_vmlinux_value_type_id
) &&
606 map
->ops
->map_seq_show_elem
;
609 int map_check_no_btf(const struct bpf_map
*map
,
610 const struct btf
*btf
,
611 const struct btf_type
*key_type
,
612 const struct btf_type
*value_type
);
614 bool bpf_map_meta_equal(const struct bpf_map
*meta0
,
615 const struct bpf_map
*meta1
);
617 extern const struct bpf_map_ops bpf_map_offload_ops
;
619 /* bpf_type_flag contains a set of flags that are applicable to the values of
620 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
621 * or a memory is read-only. We classify types into two categories: base types
622 * and extended types. Extended types are base types combined with a type flag.
624 * Currently there are no more than 32 base types in arg_type, ret_type and
627 #define BPF_BASE_TYPE_BITS 8
630 /* PTR may be NULL. */
631 PTR_MAYBE_NULL
= BIT(0 + BPF_BASE_TYPE_BITS
),
633 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
634 * compatible with both mutable and immutable memory.
636 MEM_RDONLY
= BIT(1 + BPF_BASE_TYPE_BITS
),
638 /* MEM points to BPF ring buffer reservation. */
639 MEM_RINGBUF
= BIT(2 + BPF_BASE_TYPE_BITS
),
641 /* MEM is in user address space. */
642 MEM_USER
= BIT(3 + BPF_BASE_TYPE_BITS
),
644 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
645 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
646 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
647 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
648 * to the specified cpu.
650 MEM_PERCPU
= BIT(4 + BPF_BASE_TYPE_BITS
),
652 /* Indicates that the argument will be released. */
653 OBJ_RELEASE
= BIT(5 + BPF_BASE_TYPE_BITS
),
655 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
656 * unreferenced and referenced kptr loaded from map value using a load
657 * instruction, so that they can only be dereferenced but not escape the
658 * BPF program into the kernel (i.e. cannot be passed as arguments to
659 * kfunc or bpf helpers).
661 PTR_UNTRUSTED
= BIT(6 + BPF_BASE_TYPE_BITS
),
663 /* MEM can be uninitialized. */
664 MEM_UNINIT
= BIT(7 + BPF_BASE_TYPE_BITS
),
666 /* DYNPTR points to memory local to the bpf program. */
667 DYNPTR_TYPE_LOCAL
= BIT(8 + BPF_BASE_TYPE_BITS
),
669 /* DYNPTR points to a kernel-produced ringbuf record. */
670 DYNPTR_TYPE_RINGBUF
= BIT(9 + BPF_BASE_TYPE_BITS
),
672 /* Size is known at compile time. */
673 MEM_FIXED_SIZE
= BIT(10 + BPF_BASE_TYPE_BITS
),
675 /* MEM is of an allocated object of type in program BTF. This is used to
676 * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
678 MEM_ALLOC
= BIT(11 + BPF_BASE_TYPE_BITS
),
680 /* PTR was passed from the kernel in a trusted context, and may be
681 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
682 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
683 * PTR_UNTRUSTED refers to a kptr that was read directly from a map
684 * without invoking bpf_kptr_xchg(). What we really need to know is
685 * whether a pointer is safe to pass to a kfunc or BPF helper function.
686 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
687 * helpers, they do not cover all possible instances of unsafe
688 * pointers. For example, a pointer that was obtained from walking a
689 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
690 * fact that it may be NULL, invalid, etc. This is due to backwards
691 * compatibility requirements, as this was the behavior that was first
692 * introduced when kptrs were added. The behavior is now considered
693 * deprecated, and PTR_UNTRUSTED will eventually be removed.
695 * PTR_TRUSTED, on the other hand, is a pointer that the kernel
696 * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
697 * For example, pointers passed to tracepoint arguments are considered
698 * PTR_TRUSTED, as are pointers that are passed to struct_ops
699 * callbacks. As alluded to above, pointers that are obtained from
700 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
701 * struct task_struct *task is PTR_TRUSTED, then accessing
702 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
703 * in a BPF register. Similarly, pointers passed to certain programs
704 * types such as kretprobes are not guaranteed to be valid, as they may
705 * for example contain an object that was recently freed.
707 PTR_TRUSTED
= BIT(12 + BPF_BASE_TYPE_BITS
),
709 /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
710 MEM_RCU
= BIT(13 + BPF_BASE_TYPE_BITS
),
712 /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
713 * Currently only valid for linked-list and rbtree nodes. If the nodes
714 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
716 NON_OWN_REF
= BIT(14 + BPF_BASE_TYPE_BITS
),
718 /* DYNPTR points to sk_buff */
719 DYNPTR_TYPE_SKB
= BIT(15 + BPF_BASE_TYPE_BITS
),
721 /* DYNPTR points to xdp_buff */
722 DYNPTR_TYPE_XDP
= BIT(16 + BPF_BASE_TYPE_BITS
),
724 /* Memory must be aligned on some architectures, used in combination with
727 MEM_ALIGNED
= BIT(17 + BPF_BASE_TYPE_BITS
),
729 /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
730 * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
731 * MEM_UNINIT means that memory needs to be initialized since it is also
734 MEM_WRITE
= BIT(18 + BPF_BASE_TYPE_BITS
),
737 __BPF_TYPE_LAST_FLAG
= __BPF_TYPE_FLAG_MAX
- 1,
740 #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
743 /* Max number of base types. */
744 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
746 /* Max number of all types. */
747 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
749 /* function argument constraints */
751 ARG_DONTCARE
= 0, /* unused argument in helper function */
753 /* the following constraints used to prototype
754 * bpf_map_lookup/update/delete_elem() functions
756 ARG_CONST_MAP_PTR
, /* const argument used as pointer to bpf_map */
757 ARG_PTR_TO_MAP_KEY
, /* pointer to stack used as map key */
758 ARG_PTR_TO_MAP_VALUE
, /* pointer to stack used as map value */
760 /* Used to prototype bpf_memcmp() and other functions that access data
761 * on eBPF program stack
763 ARG_PTR_TO_MEM
, /* pointer to valid memory (stack, packet, map value) */
766 ARG_CONST_SIZE
, /* number of bytes accessed from memory */
767 ARG_CONST_SIZE_OR_ZERO
, /* number of bytes accessed from memory or 0 */
769 ARG_PTR_TO_CTX
, /* pointer to context */
770 ARG_ANYTHING
, /* any (initialized) argument is ok */
771 ARG_PTR_TO_SPIN_LOCK
, /* pointer to bpf_spin_lock */
772 ARG_PTR_TO_SOCK_COMMON
, /* pointer to sock_common */
773 ARG_PTR_TO_SOCKET
, /* pointer to bpf_sock (fullsock) */
774 ARG_PTR_TO_BTF_ID
, /* pointer to in-kernel struct */
775 ARG_PTR_TO_RINGBUF_MEM
, /* pointer to dynamically reserved ringbuf memory */
776 ARG_CONST_ALLOC_SIZE_OR_ZERO
, /* number of allocated bytes requested */
777 ARG_PTR_TO_BTF_ID_SOCK_COMMON
, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
778 ARG_PTR_TO_PERCPU_BTF_ID
, /* pointer to in-kernel percpu type */
779 ARG_PTR_TO_FUNC
, /* pointer to a bpf program function */
780 ARG_PTR_TO_STACK
, /* pointer to stack */
781 ARG_PTR_TO_CONST_STR
, /* pointer to a null terminated read-only string */
782 ARG_PTR_TO_TIMER
, /* pointer to bpf_timer */
783 ARG_KPTR_XCHG_DEST
, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
784 ARG_PTR_TO_DYNPTR
, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
787 /* Extended arg_types. */
788 ARG_PTR_TO_MAP_VALUE_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_MAP_VALUE
,
789 ARG_PTR_TO_MEM_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_MEM
,
790 ARG_PTR_TO_CTX_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_CTX
,
791 ARG_PTR_TO_SOCKET_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_SOCKET
,
792 ARG_PTR_TO_STACK_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_STACK
,
793 ARG_PTR_TO_BTF_ID_OR_NULL
= PTR_MAYBE_NULL
| ARG_PTR_TO_BTF_ID
,
794 /* Pointer to memory does not need to be initialized, since helper function
795 * fills all bytes or clears them in error case.
797 ARG_PTR_TO_UNINIT_MEM
= MEM_UNINIT
| MEM_WRITE
| ARG_PTR_TO_MEM
,
798 /* Pointer to valid memory of size known at compile time. */
799 ARG_PTR_TO_FIXED_SIZE_MEM
= MEM_FIXED_SIZE
| ARG_PTR_TO_MEM
,
801 /* This must be the last entry. Its purpose is to ensure the enum is
802 * wide enough to hold the higher bits reserved for bpf_type_flag.
804 __BPF_ARG_TYPE_LIMIT
= BPF_TYPE_LIMIT
,
806 static_assert(__BPF_ARG_TYPE_MAX
<= BPF_BASE_TYPE_LIMIT
);
808 /* type of values returned from helper functions */
809 enum bpf_return_type
{
810 RET_INTEGER
, /* function returns integer */
811 RET_VOID
, /* function doesn't return anything */
812 RET_PTR_TO_MAP_VALUE
, /* returns a pointer to map elem value */
813 RET_PTR_TO_SOCKET
, /* returns a pointer to a socket */
814 RET_PTR_TO_TCP_SOCK
, /* returns a pointer to a tcp_sock */
815 RET_PTR_TO_SOCK_COMMON
, /* returns a pointer to a sock_common */
816 RET_PTR_TO_MEM
, /* returns a pointer to memory */
817 RET_PTR_TO_MEM_OR_BTF_ID
, /* returns a pointer to a valid memory or a btf_id */
818 RET_PTR_TO_BTF_ID
, /* returns a pointer to a btf_id */
821 /* Extended ret_types. */
822 RET_PTR_TO_MAP_VALUE_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_MAP_VALUE
,
823 RET_PTR_TO_SOCKET_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_SOCKET
,
824 RET_PTR_TO_TCP_SOCK_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_TCP_SOCK
,
825 RET_PTR_TO_SOCK_COMMON_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_SOCK_COMMON
,
826 RET_PTR_TO_RINGBUF_MEM_OR_NULL
= PTR_MAYBE_NULL
| MEM_RINGBUF
| RET_PTR_TO_MEM
,
827 RET_PTR_TO_DYNPTR_MEM_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_MEM
,
828 RET_PTR_TO_BTF_ID_OR_NULL
= PTR_MAYBE_NULL
| RET_PTR_TO_BTF_ID
,
829 RET_PTR_TO_BTF_ID_TRUSTED
= PTR_TRUSTED
| RET_PTR_TO_BTF_ID
,
831 /* This must be the last entry. Its purpose is to ensure the enum is
832 * wide enough to hold the higher bits reserved for bpf_type_flag.
834 __BPF_RET_TYPE_LIMIT
= BPF_TYPE_LIMIT
,
836 static_assert(__BPF_RET_TYPE_MAX
<= BPF_BASE_TYPE_LIMIT
);
838 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
839 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
840 * instructions after verifying
842 struct bpf_func_proto
{
843 u64 (*func
)(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
847 /* set to true if helper follows contract for llvm
848 * attribute bpf_fastcall:
849 * - void functions do not scratch r0
850 * - functions taking N arguments scratch only registers r1-rN
853 enum bpf_return_type ret_type
;
856 enum bpf_arg_type arg1_type
;
857 enum bpf_arg_type arg2_type
;
858 enum bpf_arg_type arg3_type
;
859 enum bpf_arg_type arg4_type
;
860 enum bpf_arg_type arg5_type
;
862 enum bpf_arg_type arg_type
[5];
882 int *ret_btf_id
; /* return value btf_id */
883 bool (*allowed
)(const struct bpf_prog
*prog
);
886 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
887 * the first argument to eBPF programs.
888 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
892 enum bpf_access_type
{
897 /* types of values stored in eBPF registers */
898 /* Pointer types represent:
901 * pointer + (u16) var
902 * pointer + (u16) var + imm
903 * if (range > 0) then [ptr, ptr + range - off) is safe to access
904 * if (id > 0) means that some 'var' was added
905 * if (off > 0) means that 'imm' was added
908 NOT_INIT
= 0, /* nothing was written into register */
909 SCALAR_VALUE
, /* reg doesn't contain a valid pointer */
910 PTR_TO_CTX
, /* reg points to bpf_context */
911 CONST_PTR_TO_MAP
, /* reg points to struct bpf_map */
912 PTR_TO_MAP_VALUE
, /* reg points to map element value */
913 PTR_TO_MAP_KEY
, /* reg points to a map element key */
914 PTR_TO_STACK
, /* reg == frame_pointer + offset */
915 PTR_TO_PACKET_META
, /* skb->data - meta_len */
916 PTR_TO_PACKET
, /* reg points to skb->data */
917 PTR_TO_PACKET_END
, /* skb->data + headlen */
918 PTR_TO_FLOW_KEYS
, /* reg points to bpf_flow_keys */
919 PTR_TO_SOCKET
, /* reg points to struct bpf_sock */
920 PTR_TO_SOCK_COMMON
, /* reg points to sock_common */
921 PTR_TO_TCP_SOCK
, /* reg points to struct tcp_sock */
922 PTR_TO_TP_BUFFER
, /* reg points to a writable raw tp's buffer */
923 PTR_TO_XDP_SOCK
, /* reg points to struct xdp_sock */
924 /* PTR_TO_BTF_ID points to a kernel struct that does not need
925 * to be null checked by the BPF program. This does not imply the
926 * pointer is _not_ null and in practice this can easily be a null
927 * pointer when reading pointer chains. The assumption is program
928 * context will handle null pointer dereference typically via fault
929 * handling. The verifier must keep this in mind and can make no
930 * assumptions about null or non-null when doing branch analysis.
931 * Further, when passed into helpers the helpers can not, without
932 * additional context, assume the value is non-null.
935 PTR_TO_MEM
, /* reg points to valid memory region */
937 PTR_TO_BUF
, /* reg points to a read/write buffer */
938 PTR_TO_FUNC
, /* reg points to a bpf program function */
939 CONST_PTR_TO_DYNPTR
, /* reg points to a const struct bpf_dynptr */
942 /* Extended reg_types. */
943 PTR_TO_MAP_VALUE_OR_NULL
= PTR_MAYBE_NULL
| PTR_TO_MAP_VALUE
,
944 PTR_TO_SOCKET_OR_NULL
= PTR_MAYBE_NULL
| PTR_TO_SOCKET
,
945 PTR_TO_SOCK_COMMON_OR_NULL
= PTR_MAYBE_NULL
| PTR_TO_SOCK_COMMON
,
946 PTR_TO_TCP_SOCK_OR_NULL
= PTR_MAYBE_NULL
| PTR_TO_TCP_SOCK
,
947 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
948 * been checked for null. Used primarily to inform the verifier
949 * an explicit null check is required for this struct.
951 PTR_TO_BTF_ID_OR_NULL
= PTR_MAYBE_NULL
| PTR_TO_BTF_ID
,
953 /* This must be the last entry. Its purpose is to ensure the enum is
954 * wide enough to hold the higher bits reserved for bpf_type_flag.
956 __BPF_REG_TYPE_LIMIT
= BPF_TYPE_LIMIT
,
958 static_assert(__BPF_REG_TYPE_MAX
<= BPF_BASE_TYPE_LIMIT
);
960 /* The information passed from prog-specific *_is_valid_access
961 * back to the verifier.
963 struct bpf_insn_access_aux
{
964 enum bpf_reg_type reg_type
;
973 struct bpf_verifier_log
*log
; /* for verbose logs */
974 bool is_retval
; /* is accessing function return value ? */
978 bpf_ctx_record_field_size(struct bpf_insn_access_aux
*aux
, u32 size
)
980 aux
->ctx_field_size
= size
;
983 static bool bpf_is_ldimm64(const struct bpf_insn
*insn
)
985 return insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
);
988 static inline bool bpf_pseudo_func(const struct bpf_insn
*insn
)
990 return bpf_is_ldimm64(insn
) && insn
->src_reg
== BPF_PSEUDO_FUNC
;
993 struct bpf_prog_ops
{
994 int (*test_run
)(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
995 union bpf_attr __user
*uattr
);
998 struct bpf_reg_state
;
999 struct bpf_verifier_ops
{
1000 /* return eBPF function prototype for verification */
1001 const struct bpf_func_proto
*
1002 (*get_func_proto
)(enum bpf_func_id func_id
,
1003 const struct bpf_prog
*prog
);
1005 /* return true if 'size' wide access at offset 'off' within bpf_context
1006 * with 'type' (read or write) is allowed
1008 bool (*is_valid_access
)(int off
, int size
, enum bpf_access_type type
,
1009 const struct bpf_prog
*prog
,
1010 struct bpf_insn_access_aux
*info
);
1011 int (*gen_prologue
)(struct bpf_insn
*insn
, bool direct_write
,
1012 const struct bpf_prog
*prog
);
1013 int (*gen_epilogue
)(struct bpf_insn
*insn
, const struct bpf_prog
*prog
,
1015 int (*gen_ld_abs
)(const struct bpf_insn
*orig
,
1016 struct bpf_insn
*insn_buf
);
1017 u32 (*convert_ctx_access
)(enum bpf_access_type type
,
1018 const struct bpf_insn
*src
,
1019 struct bpf_insn
*dst
,
1020 struct bpf_prog
*prog
, u32
*target_size
);
1021 int (*btf_struct_access
)(struct bpf_verifier_log
*log
,
1022 const struct bpf_reg_state
*reg
,
1026 struct bpf_prog_offload_ops
{
1027 /* verifier basic callbacks */
1028 int (*insn_hook
)(struct bpf_verifier_env
*env
,
1029 int insn_idx
, int prev_insn_idx
);
1030 int (*finalize
)(struct bpf_verifier_env
*env
);
1031 /* verifier optimization callbacks (called after .finalize) */
1032 int (*replace_insn
)(struct bpf_verifier_env
*env
, u32 off
,
1033 struct bpf_insn
*insn
);
1034 int (*remove_insns
)(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
);
1035 /* program management callbacks */
1036 int (*prepare
)(struct bpf_prog
*prog
);
1037 int (*translate
)(struct bpf_prog
*prog
);
1038 void (*destroy
)(struct bpf_prog
*prog
);
1041 struct bpf_prog_offload
{
1042 struct bpf_prog
*prog
;
1043 struct net_device
*netdev
;
1044 struct bpf_offload_dev
*offdev
;
1046 struct list_head offloads
;
1053 enum bpf_cgroup_storage_type
{
1054 BPF_CGROUP_STORAGE_SHARED
,
1055 BPF_CGROUP_STORAGE_PERCPU
,
1056 __BPF_CGROUP_STORAGE_MAX
1059 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1061 /* The longest tracepoint has 12 args.
1062 * See include/trace/bpf_probe.h
1064 #define MAX_BPF_FUNC_ARGS 12
1066 /* The maximum number of arguments passed through registers
1067 * a single function may have.
1069 #define MAX_BPF_FUNC_REG_ARGS 5
1071 /* The argument is a structure. */
1072 #define BTF_FMODEL_STRUCT_ARG BIT(0)
1074 /* The argument is signed. */
1075 #define BTF_FMODEL_SIGNED_ARG BIT(1)
1077 struct btf_func_model
{
1081 u8 arg_size
[MAX_BPF_FUNC_ARGS
];
1082 u8 arg_flags
[MAX_BPF_FUNC_ARGS
];
1085 /* Restore arguments before returning from trampoline to let original function
1086 * continue executing. This flag is used for fentry progs when there are no
1089 #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
1090 /* Call original function after fentry progs, but before fexit progs.
1091 * Makes sense for fentry/fexit, normal calls and indirect calls.
1093 #define BPF_TRAMP_F_CALL_ORIG BIT(1)
1094 /* Skip current frame and return to parent. Makes sense for fentry/fexit
1095 * programs only. Should not be used with normal calls and indirect calls.
1097 #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
1098 /* Store IP address of the caller on the trampoline stack,
1099 * so it's available for trampoline's programs.
1101 #define BPF_TRAMP_F_IP_ARG BIT(3)
1102 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
1103 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
1105 /* Get original function from stack instead of from provided direct address.
1106 * Makes sense for trampolines with fexit or fmod_ret programs.
1108 #define BPF_TRAMP_F_ORIG_STACK BIT(5)
1110 /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
1111 * e.g., a live patch. This flag is set and cleared by ftrace call backs,
1113 #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
1115 /* Indicate that current trampoline is in a tail call context. Then, it has to
1116 * cache and restore tail_call_cnt to avoid infinite tail call loop.
1118 #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
1121 * Indicate the trampoline should be suitable to receive indirect calls;
1122 * without this indirectly calling the generated code can result in #UD/#CP,
1123 * depending on the CFI options.
1125 * Used by bpf_struct_ops.
1127 * Incompatible with FENTRY usage, overloads @func_addr argument.
1129 #define BPF_TRAMP_F_INDIRECT BIT(8)
1131 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1135 #if defined(__s390x__)
1136 BPF_MAX_TRAMP_LINKS
= 27,
1138 BPF_MAX_TRAMP_LINKS
= 38,
1142 struct bpf_tramp_links
{
1143 struct bpf_tramp_link
*links
[BPF_MAX_TRAMP_LINKS
];
1147 struct bpf_tramp_run_ctx
;
1149 /* Different use cases for BPF trampoline:
1150 * 1. replace nop at the function entry (kprobe equivalent)
1151 * flags = BPF_TRAMP_F_RESTORE_REGS
1152 * fentry = a set of programs to run before returning from trampoline
1154 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1155 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1156 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
1157 * fentry = a set of program to run before calling original function
1158 * fexit = a set of program to run after original function
1160 * 3. replace direct call instruction anywhere in the function body
1161 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1163 * fentry = a set of programs to run before returning from trampoline
1164 * With flags = BPF_TRAMP_F_CALL_ORIG
1165 * orig_call = original callback addr or direct function addr
1166 * fentry = a set of program to run before calling original function
1167 * fexit = a set of program to run after original function
1169 struct bpf_tramp_image
;
1170 int arch_prepare_bpf_trampoline(struct bpf_tramp_image
*im
, void *image
, void *image_end
,
1171 const struct btf_func_model
*m
, u32 flags
,
1172 struct bpf_tramp_links
*tlinks
,
1174 void *arch_alloc_bpf_trampoline(unsigned int size
);
1175 void arch_free_bpf_trampoline(void *image
, unsigned int size
);
1176 int __must_check
arch_protect_bpf_trampoline(void *image
, unsigned int size
);
1177 int arch_bpf_trampoline_size(const struct btf_func_model
*m
, u32 flags
,
1178 struct bpf_tramp_links
*tlinks
, void *func_addr
);
1180 u64 notrace
__bpf_prog_enter_sleepable_recur(struct bpf_prog
*prog
,
1181 struct bpf_tramp_run_ctx
*run_ctx
);
1182 void notrace
__bpf_prog_exit_sleepable_recur(struct bpf_prog
*prog
, u64 start
,
1183 struct bpf_tramp_run_ctx
*run_ctx
);
1184 void notrace
__bpf_tramp_enter(struct bpf_tramp_image
*tr
);
1185 void notrace
__bpf_tramp_exit(struct bpf_tramp_image
*tr
);
1186 typedef u64 (*bpf_trampoline_enter_t
)(struct bpf_prog
*prog
,
1187 struct bpf_tramp_run_ctx
*run_ctx
);
1188 typedef void (*bpf_trampoline_exit_t
)(struct bpf_prog
*prog
, u64 start
,
1189 struct bpf_tramp_run_ctx
*run_ctx
);
1190 bpf_trampoline_enter_t
bpf_trampoline_enter(const struct bpf_prog
*prog
);
1191 bpf_trampoline_exit_t
bpf_trampoline_exit(const struct bpf_prog
*prog
);
1194 unsigned long start
;
1196 char name
[KSYM_NAME_LEN
];
1197 struct list_head lnode
;
1198 struct latch_tree_node tnode
;
1202 enum bpf_tramp_prog_type
{
1205 BPF_TRAMP_MODIFY_RETURN
,
1207 BPF_TRAMP_REPLACE
, /* more than MAX */
1210 struct bpf_tramp_image
{
1213 struct bpf_ksym ksym
;
1214 struct percpu_ref pcref
;
1215 void *ip_after_call
;
1218 struct rcu_head rcu
;
1219 struct work_struct work
;
1223 struct bpf_trampoline
{
1224 /* hlist for trampoline_table */
1225 struct hlist_node hlist
;
1226 struct ftrace_ops
*fops
;
1227 /* serializes access to fields of this trampoline */
1233 struct btf_func_model model
;
1235 bool ftrace_managed
;
1237 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1238 * program by replacing one of its functions. func.addr is the address
1239 * of the function it replaced.
1241 struct bpf_prog
*extension_prog
;
1242 /* list of BPF programs using this trampoline */
1243 struct hlist_head progs_hlist
[BPF_TRAMP_MAX
];
1244 /* Number of attached programs. A counter per kind. */
1245 int progs_cnt
[BPF_TRAMP_MAX
];
1246 /* Executable image of trampoline */
1247 struct bpf_tramp_image
*cur_image
;
1250 struct bpf_attach_target_info
{
1251 struct btf_func_model fmodel
;
1253 struct module
*tgt_mod
;
1254 const char *tgt_name
;
1255 const struct btf_type
*tgt_type
;
1258 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
1260 struct bpf_dispatcher_prog
{
1261 struct bpf_prog
*prog
;
1265 struct bpf_dispatcher
{
1266 /* dispatcher mutex */
1269 struct bpf_dispatcher_prog progs
[BPF_DISPATCHER_MAX
];
1274 struct bpf_ksym ksym
;
1275 #ifdef CONFIG_HAVE_STATIC_CALL
1276 struct static_call_key
*sc_key
;
1282 #define __bpfcall __nocfi
1285 static __always_inline __bpfcall
unsigned int bpf_dispatcher_nop_func(
1287 const struct bpf_insn
*insnsi
,
1288 bpf_func_t bpf_func
)
1290 return bpf_func(ctx
, insnsi
);
1293 /* the implementation of the opaque uapi struct bpf_dynptr */
1294 struct bpf_dynptr_kern
{
1296 /* Size represents the number of usable bytes of dynptr data.
1297 * If for example the offset is at 4 for a local dynptr whose data is
1298 * of type u64, the number of usable bytes is 4.
1300 * The upper 8 bits are reserved. It is as follows:
1301 * Bits 0 - 23 = size
1302 * Bits 24 - 30 = dynptr type
1303 * Bit 31 = whether dynptr is read-only
1309 enum bpf_dynptr_type
{
1310 BPF_DYNPTR_TYPE_INVALID
,
1311 /* Points to memory that is local to the bpf program */
1312 BPF_DYNPTR_TYPE_LOCAL
,
1313 /* Underlying data is a ringbuf record */
1314 BPF_DYNPTR_TYPE_RINGBUF
,
1315 /* Underlying data is a sk_buff */
1316 BPF_DYNPTR_TYPE_SKB
,
1317 /* Underlying data is a xdp_buff */
1318 BPF_DYNPTR_TYPE_XDP
,
1321 int bpf_dynptr_check_size(u32 size
);
1322 u32
__bpf_dynptr_size(const struct bpf_dynptr_kern
*ptr
);
1323 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern
*ptr
, u32 len
);
1324 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern
*ptr
, u32 len
);
1325 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern
*ptr
);
1327 #ifdef CONFIG_BPF_JIT
1328 int bpf_trampoline_link_prog(struct bpf_tramp_link
*link
,
1329 struct bpf_trampoline
*tr
,
1330 struct bpf_prog
*tgt_prog
);
1331 int bpf_trampoline_unlink_prog(struct bpf_tramp_link
*link
,
1332 struct bpf_trampoline
*tr
,
1333 struct bpf_prog
*tgt_prog
);
1334 struct bpf_trampoline
*bpf_trampoline_get(u64 key
,
1335 struct bpf_attach_target_info
*tgt_info
);
1336 void bpf_trampoline_put(struct bpf_trampoline
*tr
);
1337 int arch_prepare_bpf_dispatcher(void *image
, void *buf
, s64
*funcs
, int num_funcs
);
1340 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1341 * indirection with a direct call to the bpf program. If the architecture does
1342 * not have STATIC_CALL, avoid a double-indirection.
1344 #ifdef CONFIG_HAVE_STATIC_CALL
1346 #define __BPF_DISPATCHER_SC_INIT(_name) \
1347 .sc_key = &STATIC_CALL_KEY(_name), \
1348 .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1350 #define __BPF_DISPATCHER_SC(name) \
1351 DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1353 #define __BPF_DISPATCHER_CALL(name) \
1354 static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1356 #define __BPF_DISPATCHER_UPDATE(_d, _new) \
1357 __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1360 #define __BPF_DISPATCHER_SC_INIT(name)
1361 #define __BPF_DISPATCHER_SC(name)
1362 #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
1363 #define __BPF_DISPATCHER_UPDATE(_d, _new)
1366 #define BPF_DISPATCHER_INIT(_name) { \
1367 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
1368 .func = &_name##_func, \
1375 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
1377 __BPF_DISPATCHER_SC_INIT(_name##_call) \
1380 #define DEFINE_BPF_DISPATCHER(name) \
1381 __BPF_DISPATCHER_SC(name); \
1382 noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
1384 const struct bpf_insn *insnsi, \
1385 bpf_func_t bpf_func) \
1387 return __BPF_DISPATCHER_CALL(name); \
1389 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
1390 struct bpf_dispatcher bpf_dispatcher_##name = \
1391 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1393 #define DECLARE_BPF_DISPATCHER(name) \
1394 unsigned int bpf_dispatcher_##name##_func( \
1396 const struct bpf_insn *insnsi, \
1397 bpf_func_t bpf_func); \
1398 extern struct bpf_dispatcher bpf_dispatcher_##name;
1400 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
1401 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1402 void bpf_dispatcher_change_prog(struct bpf_dispatcher
*d
, struct bpf_prog
*from
,
1403 struct bpf_prog
*to
);
1404 /* Called only from JIT-enabled code, so there's no need for stubs. */
1405 void bpf_image_ksym_init(void *data
, unsigned int size
, struct bpf_ksym
*ksym
);
1406 void bpf_image_ksym_add(struct bpf_ksym
*ksym
);
1407 void bpf_image_ksym_del(struct bpf_ksym
*ksym
);
1408 void bpf_ksym_add(struct bpf_ksym
*ksym
);
1409 void bpf_ksym_del(struct bpf_ksym
*ksym
);
1410 int bpf_jit_charge_modmem(u32 size
);
1411 void bpf_jit_uncharge_modmem(u32 size
);
1412 bool bpf_prog_has_trampoline(const struct bpf_prog
*prog
);
1414 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link
*link
,
1415 struct bpf_trampoline
*tr
,
1416 struct bpf_prog
*tgt_prog
)
1420 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link
*link
,
1421 struct bpf_trampoline
*tr
,
1422 struct bpf_prog
*tgt_prog
)
1426 static inline struct bpf_trampoline
*bpf_trampoline_get(u64 key
,
1427 struct bpf_attach_target_info
*tgt_info
)
1431 static inline void bpf_trampoline_put(struct bpf_trampoline
*tr
) {}
1432 #define DEFINE_BPF_DISPATCHER(name)
1433 #define DECLARE_BPF_DISPATCHER(name)
1434 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1435 #define BPF_DISPATCHER_PTR(name) NULL
1436 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher
*d
,
1437 struct bpf_prog
*from
,
1438 struct bpf_prog
*to
) {}
1439 static inline bool is_bpf_image_address(unsigned long address
)
1443 static inline bool bpf_prog_has_trampoline(const struct bpf_prog
*prog
)
1449 struct bpf_func_info_aux
{
1456 enum bpf_jit_poke_reason
{
1457 BPF_POKE_REASON_TAIL_CALL
,
1460 /* Descriptor of pokes pointing /into/ the JITed image. */
1461 struct bpf_jit_poke_descriptor
{
1462 void *tailcall_target
;
1463 void *tailcall_bypass
;
1468 struct bpf_map
*map
;
1472 bool tailcall_target_stable
;
1478 /* reg_type info for ctx arguments */
1479 struct bpf_ctx_arg_aux
{
1481 enum bpf_reg_type reg_type
;
1486 struct btf_mod_pair
{
1488 struct module
*module
;
1491 struct bpf_kfunc_desc_tab
;
1493 struct bpf_prog_aux
{
1502 u32 func_cnt
; /* used by non-func prog as the number of func progs */
1503 u32 real_func_cnt
; /* includes hidden progs, only used for JIT and freeing progs */
1504 u32 func_idx
; /* 0 for non-func prog, the index in func array for func prog */
1505 u32 attach_btf_id
; /* in-kernel BTF type id to attach to */
1506 u32 ctx_arg_info_size
;
1507 u32 max_rdonly_access
;
1508 u32 max_rdwr_access
;
1509 struct btf
*attach_btf
;
1510 const struct bpf_ctx_arg_aux
*ctx_arg_info
;
1511 void __percpu
*priv_stack_ptr
;
1512 struct mutex dst_mutex
; /* protects dst_* pointers below, *after* prog becomes visible */
1513 struct bpf_prog
*dst_prog
;
1514 struct bpf_trampoline
*dst_trampoline
;
1515 enum bpf_prog_type saved_dst_prog_type
;
1516 enum bpf_attach_type saved_dst_attach_type
;
1517 bool verifier_zext
; /* Zero extensions has been inserted by verifier. */
1518 bool dev_bound
; /* Program is bound to the netdev. */
1519 bool offload_requested
; /* Program is bound and offloaded to the netdev. */
1520 bool attach_btf_trace
; /* true if attaching to BTF-enabled raw tp */
1521 bool attach_tracing_prog
; /* true if tracing another tracing program */
1522 bool func_proto_unreliable
;
1523 bool tail_call_reachable
;
1526 bool exception_boundary
;
1527 bool is_extended
; /* true if extended by freplace program */
1528 bool jits_use_priv_stack
;
1529 bool priv_stack_requested
;
1530 u64 prog_array_member_cnt
; /* counts how many times as member of prog_array */
1531 struct mutex ext_mutex
; /* mutex for is_extended and prog_array_member_cnt */
1532 struct bpf_arena
*arena
;
1533 void (*recursion_detected
)(struct bpf_prog
*prog
); /* callback if recursion is detected */
1534 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1535 const struct btf_type
*attach_func_proto
;
1536 /* function name for valid attach_btf_id */
1537 const char *attach_func_name
;
1538 struct bpf_prog
**func
;
1539 void *jit_data
; /* JIT specific data. arch dependent */
1540 struct bpf_jit_poke_descriptor
*poke_tab
;
1541 struct bpf_kfunc_desc_tab
*kfunc_tab
;
1542 struct bpf_kfunc_btf_tab
*kfunc_btf_tab
;
1544 #ifdef CONFIG_FINEIBT
1545 struct bpf_ksym ksym_prefix
;
1547 struct bpf_ksym ksym
;
1548 const struct bpf_prog_ops
*ops
;
1549 struct bpf_map
**used_maps
;
1550 struct mutex used_maps_mutex
; /* mutex for used_maps and used_map_cnt */
1551 struct btf_mod_pair
*used_btfs
;
1552 struct bpf_prog
*prog
;
1553 struct user_struct
*user
;
1554 u64 load_time
; /* ns since boottime */
1556 int cgroup_atype
; /* enum cgroup_bpf_attach_type */
1557 struct bpf_map
*cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
1558 char name
[BPF_OBJ_NAME_LEN
];
1559 u64 (*bpf_exception_cb
)(u64 cookie
, u64 sp
, u64 bp
, u64
, u64
);
1560 #ifdef CONFIG_SECURITY
1563 struct bpf_token
*token
;
1564 struct bpf_prog_offload
*offload
;
1566 struct bpf_func_info
*func_info
;
1567 struct bpf_func_info_aux
*func_info_aux
;
1568 /* bpf_line_info loaded from userspace. linfo->insn_off
1569 * has the xlated insn offset.
1570 * Both the main and sub prog share the same linfo.
1571 * The subprog can access its first linfo by
1572 * using the linfo_idx.
1574 struct bpf_line_info
*linfo
;
1575 /* jited_linfo is the jited addr of the linfo. It has a
1576 * one to one mapping to linfo:
1577 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1578 * Both the main and sub prog share the same jited_linfo.
1579 * The subprog can access its first jited_linfo by
1580 * using the linfo_idx.
1585 /* subprog can use linfo_idx to access its first linfo and
1587 * main prog always has linfo_idx == 0
1592 struct exception_table_entry
*extable
;
1594 struct work_struct work
;
1595 struct rcu_head rcu
;
1600 u16 pages
; /* Number of allocated pages */
1601 u16 jited
:1, /* Is our filter JIT'ed? */
1602 jit_requested
:1,/* archs need to JIT the prog */
1603 gpl_compatible
:1, /* Is filter GPL compatible? */
1604 cb_access
:1, /* Is control block accessed? */
1605 dst_needed
:1, /* Do we need dst entry? */
1606 blinding_requested
:1, /* needs constant blinding */
1607 blinded
:1, /* Was blinded */
1608 is_func
:1, /* program is a bpf function */
1609 kprobe_override
:1, /* Do we override a kprobe? */
1610 has_callchain_buf
:1, /* callchain buffer allocated? */
1611 enforce_expected_attach_type
:1, /* Enforce expected_attach_type checking at attach time */
1612 call_get_stack
:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1613 call_get_func_ip
:1, /* Do we call get_func_ip() */
1614 tstamp_type_access
:1, /* Accessed __sk_buff->tstamp_type */
1615 sleepable
:1; /* BPF program is sleepable */
1616 enum bpf_prog_type type
; /* Type of BPF program */
1617 enum bpf_attach_type expected_attach_type
; /* For some prog types */
1618 u32 len
; /* Number of filter blocks */
1619 u32 jited_len
; /* Size of jited insns in bytes */
1620 u8 tag
[BPF_TAG_SIZE
];
1621 struct bpf_prog_stats __percpu
*stats
;
1622 int __percpu
*active
;
1623 unsigned int (*bpf_func
)(const void *ctx
,
1624 const struct bpf_insn
*insn
);
1625 struct bpf_prog_aux
*aux
; /* Auxiliary fields */
1626 struct sock_fprog_kern
*orig_prog
; /* Original BPF program */
1627 /* Instructions for interpreter */
1629 DECLARE_FLEX_ARRAY(struct sock_filter
, insns
);
1630 DECLARE_FLEX_ARRAY(struct bpf_insn
, insnsi
);
1634 struct bpf_array_aux
{
1635 /* Programs with direct jumps into programs part of this array. */
1636 struct list_head poke_progs
;
1637 struct bpf_map
*map
;
1638 struct mutex poke_mutex
;
1639 struct work_struct work
;
1645 enum bpf_link_type type
;
1646 const struct bpf_link_ops
*ops
;
1647 struct bpf_prog
*prog
;
1648 /* whether BPF link itself has "sleepable" semantics, which can differ
1649 * from underlying BPF program having a "sleepable" semantics, as BPF
1650 * link's semantics is determined by target attach hook
1653 /* rcu is used before freeing, work can be used to schedule that
1654 * RCU-based freeing before that, so they never overlap
1657 struct rcu_head rcu
;
1658 struct work_struct work
;
1662 struct bpf_link_ops
{
1663 void (*release
)(struct bpf_link
*link
);
1664 /* deallocate link resources callback, called without RCU grace period
1667 void (*dealloc
)(struct bpf_link
*link
);
1668 /* deallocate link resources callback, called after RCU grace period;
1669 * if either the underlying BPF program is sleepable or BPF link's
1670 * target hook is sleepable, we'll go through tasks trace RCU GP and
1671 * then "classic" RCU GP; this need for chaining tasks trace and
1672 * classic RCU GPs is designated by setting bpf_link->sleepable flag
1674 void (*dealloc_deferred
)(struct bpf_link
*link
);
1675 int (*detach
)(struct bpf_link
*link
);
1676 int (*update_prog
)(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
1677 struct bpf_prog
*old_prog
);
1678 void (*show_fdinfo
)(const struct bpf_link
*link
, struct seq_file
*seq
);
1679 int (*fill_link_info
)(const struct bpf_link
*link
,
1680 struct bpf_link_info
*info
);
1681 int (*update_map
)(struct bpf_link
*link
, struct bpf_map
*new_map
,
1682 struct bpf_map
*old_map
);
1683 __poll_t (*poll
)(struct file
*file
, struct poll_table_struct
*pts
);
1686 struct bpf_tramp_link
{
1687 struct bpf_link link
;
1688 struct hlist_node tramp_hlist
;
1692 struct bpf_shim_tramp_link
{
1693 struct bpf_tramp_link link
;
1694 struct bpf_trampoline
*trampoline
;
1697 struct bpf_tracing_link
{
1698 struct bpf_tramp_link link
;
1699 enum bpf_attach_type attach_type
;
1700 struct bpf_trampoline
*trampoline
;
1701 struct bpf_prog
*tgt_prog
;
1704 struct bpf_raw_tp_link
{
1705 struct bpf_link link
;
1706 struct bpf_raw_event_map
*btp
;
1710 struct bpf_link_primer
{
1711 struct bpf_link
*link
;
1717 struct bpf_mount_opts
{
1722 /* BPF token-related delegation options */
1726 u64 delegate_attachs
;
1730 struct work_struct work
;
1732 struct user_namespace
*userns
;
1736 u64 allowed_attachs
;
1737 #ifdef CONFIG_SECURITY
1742 struct bpf_struct_ops_value
;
1745 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1747 * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1748 * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1749 * of BPF_PROG_TYPE_STRUCT_OPS progs.
1750 * @verifier_ops: A structure of callbacks that are invoked by the verifier
1751 * when determining whether the struct_ops progs in the
1752 * struct_ops map are valid.
1753 * @init: A callback that is invoked a single time, and before any other
1754 * callback, to initialize the structure. A nonzero return value means
1755 * the subsystem could not be initialized.
1756 * @check_member: When defined, a callback invoked by the verifier to allow
1757 * the subsystem to determine if an entry in the struct_ops map
1758 * is valid. A nonzero return value means that the map is
1759 * invalid and should be rejected by the verifier.
1760 * @init_member: A callback that is invoked for each member of the struct_ops
1761 * map to allow the subsystem to initialize the member. A nonzero
1762 * value means the member could not be initialized. This callback
1763 * is exclusive with the @type, @type_id, @value_type, and
1765 * @reg: A callback that is invoked when the struct_ops map has been
1766 * initialized and is being attached to. Zero means the struct_ops map
1767 * has been successfully registered and is live. A nonzero return value
1768 * means the struct_ops map could not be registered.
1769 * @unreg: A callback that is invoked when the struct_ops map should be
1771 * @update: A callback that is invoked when the live struct_ops map is being
1772 * updated to contain new values. This callback is only invoked when
1773 * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1774 * it is assumed that the struct_ops map cannot be updated.
1775 * @validate: A callback that is invoked after all of the members have been
1776 * initialized. This callback should perform static checks on the
1777 * map, meaning that it should either fail or succeed
1778 * deterministically. A struct_ops map that has been validated may
1779 * not necessarily succeed in being registered if the call to @reg
1780 * fails. For example, a valid struct_ops map may be loaded, but
1781 * then fail to be registered due to there being another active
1782 * struct_ops map on the system in the subsystem already. For this
1783 * reason, if this callback is not defined, the check is skipped as
1784 * the struct_ops map will have final verification performed in
1787 * @value_type: Value type.
1788 * @name: The name of the struct bpf_struct_ops object.
1789 * @func_models: Func models
1790 * @type_id: BTF type id.
1791 * @value_id: BTF value id.
1793 struct bpf_struct_ops
{
1794 const struct bpf_verifier_ops
*verifier_ops
;
1795 int (*init
)(struct btf
*btf
);
1796 int (*check_member
)(const struct btf_type
*t
,
1797 const struct btf_member
*member
,
1798 const struct bpf_prog
*prog
);
1799 int (*init_member
)(const struct btf_type
*t
,
1800 const struct btf_member
*member
,
1801 void *kdata
, const void *udata
);
1802 int (*reg
)(void *kdata
, struct bpf_link
*link
);
1803 void (*unreg
)(void *kdata
, struct bpf_link
*link
);
1804 int (*update
)(void *kdata
, void *old_kdata
, struct bpf_link
*link
);
1805 int (*validate
)(void *kdata
);
1807 struct module
*owner
;
1809 struct btf_func_model func_models
[BPF_STRUCT_OPS_MAX_NR_MEMBERS
];
1812 /* Every member of a struct_ops type has an instance even a member is not
1813 * an operator (function pointer). The "info" field will be assigned to
1814 * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
1815 * argument information required by the verifier to verify the program.
1817 * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
1818 * corresponding entry for an given argument.
1820 struct bpf_struct_ops_arg_info
{
1821 struct bpf_ctx_arg_aux
*info
;
1825 struct bpf_struct_ops_desc
{
1826 struct bpf_struct_ops
*st_ops
;
1828 const struct btf_type
*type
;
1829 const struct btf_type
*value_type
;
1833 /* Collection of argument information for each member */
1834 struct bpf_struct_ops_arg_info
*arg_info
;
1837 enum bpf_struct_ops_state
{
1838 BPF_STRUCT_OPS_STATE_INIT
,
1839 BPF_STRUCT_OPS_STATE_INUSE
,
1840 BPF_STRUCT_OPS_STATE_TOBEFREE
,
1841 BPF_STRUCT_OPS_STATE_READY
,
1844 struct bpf_struct_ops_common_value
{
1846 enum bpf_struct_ops_state state
;
1849 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1850 /* This macro helps developer to register a struct_ops type and generate
1851 * type information correctly. Developers should use this macro to register
1852 * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
1854 #define register_bpf_struct_ops(st_ops, type) \
1856 struct bpf_struct_ops_##type { \
1857 struct bpf_struct_ops_common_value common; \
1858 struct type data ____cacheline_aligned_in_smp; \
1860 BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
1861 __register_bpf_struct_ops(st_ops); \
1863 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1864 bool bpf_struct_ops_get(const void *kdata
);
1865 void bpf_struct_ops_put(const void *kdata
);
1866 int bpf_struct_ops_supported(const struct bpf_struct_ops
*st_ops
, u32 moff
);
1867 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map
*map
, void *key
,
1869 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links
*tlinks
,
1870 struct bpf_tramp_link
*link
,
1871 const struct btf_func_model
*model
,
1873 void **image
, u32
*image_off
,
1875 void bpf_struct_ops_image_free(void *image
);
1876 static inline bool bpf_try_module_get(const void *data
, struct module
*owner
)
1878 if (owner
== BPF_MODULE_OWNER
)
1879 return bpf_struct_ops_get(data
);
1881 return try_module_get(owner
);
1883 static inline void bpf_module_put(const void *data
, struct module
*owner
)
1885 if (owner
== BPF_MODULE_OWNER
)
1886 bpf_struct_ops_put(data
);
1890 int bpf_struct_ops_link_create(union bpf_attr
*attr
);
1893 /* Define it here to avoid the use of forward declaration */
1894 struct bpf_dummy_ops_state
{
1898 struct bpf_dummy_ops
{
1899 int (*test_1
)(struct bpf_dummy_ops_state
*cb
);
1900 int (*test_2
)(struct bpf_dummy_ops_state
*cb
, int a1
, unsigned short a2
,
1901 char a3
, unsigned long a4
);
1902 int (*test_sleepable
)(struct bpf_dummy_ops_state
*cb
);
1905 int bpf_struct_ops_test_run(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
1906 union bpf_attr __user
*uattr
);
1908 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc
*st_ops_desc
,
1910 struct bpf_verifier_log
*log
);
1911 void bpf_map_struct_ops_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
);
1912 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc
*st_ops_desc
);
1914 #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
1915 static inline bool bpf_try_module_get(const void *data
, struct module
*owner
)
1917 return try_module_get(owner
);
1919 static inline void bpf_module_put(const void *data
, struct module
*owner
)
1923 static inline int bpf_struct_ops_supported(const struct bpf_struct_ops
*st_ops
, u32 moff
)
1927 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map
*map
,
1933 static inline int bpf_struct_ops_link_create(union bpf_attr
*attr
)
1937 static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
)
1941 static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc
*st_ops_desc
)
1947 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
1948 int bpf_trampoline_link_cgroup_shim(struct bpf_prog
*prog
,
1950 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog
*prog
);
1952 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog
*prog
,
1957 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog
*prog
)
1966 struct bpf_array_aux
*aux
;
1968 DECLARE_FLEX_ARRAY(char, value
) __aligned(8);
1969 DECLARE_FLEX_ARRAY(void *, ptrs
) __aligned(8);
1970 DECLARE_FLEX_ARRAY(void __percpu
*, pptrs
) __aligned(8);
1974 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
1975 #define MAX_TAIL_CALL_CNT 33
1977 /* Maximum number of loops for bpf_loop and bpf_iter_num.
1978 * It's enum to expose it (and thus make it discoverable) through BTF.
1981 BPF_MAX_LOOPS
= 8 * 1024 * 1024,
1984 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1985 BPF_F_RDONLY_PROG | \
1989 #define BPF_MAP_CAN_READ BIT(0)
1990 #define BPF_MAP_CAN_WRITE BIT(1)
1992 /* Maximum number of user-producer ring buffer samples that can be drained in
1993 * a call to bpf_user_ringbuf_drain().
1995 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
1997 static inline u32
bpf_map_flags_to_cap(struct bpf_map
*map
)
1999 u32 access_flags
= map
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
);
2001 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
2004 if (access_flags
& BPF_F_RDONLY_PROG
)
2005 return BPF_MAP_CAN_READ
;
2006 else if (access_flags
& BPF_F_WRONLY_PROG
)
2007 return BPF_MAP_CAN_WRITE
;
2009 return BPF_MAP_CAN_READ
| BPF_MAP_CAN_WRITE
;
2012 static inline bool bpf_map_flags_access_ok(u32 access_flags
)
2014 return (access_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
)) !=
2015 (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
);
2018 struct bpf_event_entry
{
2019 struct perf_event
*event
;
2020 struct file
*perf_file
;
2021 struct file
*map_file
;
2022 struct rcu_head rcu
;
2025 static inline bool map_type_contains_progs(struct bpf_map
*map
)
2027 return map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
||
2028 map
->map_type
== BPF_MAP_TYPE_DEVMAP
||
2029 map
->map_type
== BPF_MAP_TYPE_CPUMAP
;
2032 bool bpf_prog_map_compatible(struct bpf_map
*map
, const struct bpf_prog
*fp
);
2033 int bpf_prog_calc_tag(struct bpf_prog
*fp
);
2035 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void);
2036 const struct bpf_func_proto
*bpf_get_trace_vprintk_proto(void);
2038 typedef unsigned long (*bpf_ctx_copy_t
)(void *dst
, const void *src
,
2039 unsigned long off
, unsigned long len
);
2040 typedef u32 (*bpf_convert_ctx_access_t
)(enum bpf_access_type type
,
2041 const struct bpf_insn
*src
,
2042 struct bpf_insn
*dst
,
2043 struct bpf_prog
*prog
,
2046 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
2047 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
);
2049 /* an array of programs to be executed under rcu_lock.
2052 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
2054 * the structure returned by bpf_prog_array_alloc() should be populated
2055 * with program pointers and the last pointer must be NULL.
2056 * The user has to keep refcnt on the program and make sure the program
2057 * is removed from the array before bpf_prog_put().
2058 * The 'struct bpf_prog_array *' should only be replaced with xchg()
2059 * since other cpus are walking the array of pointers in parallel.
2061 struct bpf_prog_array_item
{
2062 struct bpf_prog
*prog
;
2064 struct bpf_cgroup_storage
*cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
2069 struct bpf_prog_array
{
2070 struct rcu_head rcu
;
2071 struct bpf_prog_array_item items
[];
2074 struct bpf_empty_prog_array
{
2075 struct bpf_prog_array hdr
;
2076 struct bpf_prog
*null_prog
;
2079 /* to avoid allocating empty bpf_prog_array for cgroups that
2080 * don't have bpf program attached use one global 'bpf_empty_prog_array'
2081 * It will not be modified the caller of bpf_prog_array_alloc()
2082 * (since caller requested prog_cnt == 0)
2083 * that pointer should be 'freed' by bpf_prog_array_free()
2085 extern struct bpf_empty_prog_array bpf_empty_prog_array
;
2087 struct bpf_prog_array
*bpf_prog_array_alloc(u32 prog_cnt
, gfp_t flags
);
2088 void bpf_prog_array_free(struct bpf_prog_array
*progs
);
2089 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
2090 void bpf_prog_array_free_sleepable(struct bpf_prog_array
*progs
);
2091 int bpf_prog_array_length(struct bpf_prog_array
*progs
);
2092 bool bpf_prog_array_is_empty(struct bpf_prog_array
*array
);
2093 int bpf_prog_array_copy_to_user(struct bpf_prog_array
*progs
,
2094 __u32 __user
*prog_ids
, u32 cnt
);
2096 void bpf_prog_array_delete_safe(struct bpf_prog_array
*progs
,
2097 struct bpf_prog
*old_prog
);
2098 int bpf_prog_array_delete_safe_at(struct bpf_prog_array
*array
, int index
);
2099 int bpf_prog_array_update_at(struct bpf_prog_array
*array
, int index
,
2100 struct bpf_prog
*prog
);
2101 int bpf_prog_array_copy_info(struct bpf_prog_array
*array
,
2102 u32
*prog_ids
, u32 request_cnt
,
2104 int bpf_prog_array_copy(struct bpf_prog_array
*old_array
,
2105 struct bpf_prog
*exclude_prog
,
2106 struct bpf_prog
*include_prog
,
2108 struct bpf_prog_array
**new_array
);
2110 struct bpf_run_ctx
{};
2112 struct bpf_cg_run_ctx
{
2113 struct bpf_run_ctx run_ctx
;
2114 const struct bpf_prog_array_item
*prog_item
;
2118 struct bpf_trace_run_ctx
{
2119 struct bpf_run_ctx run_ctx
;
2124 struct bpf_tramp_run_ctx
{
2125 struct bpf_run_ctx run_ctx
;
2127 struct bpf_run_ctx
*saved_run_ctx
;
2130 static inline struct bpf_run_ctx
*bpf_set_run_ctx(struct bpf_run_ctx
*new_ctx
)
2132 struct bpf_run_ctx
*old_ctx
= NULL
;
2134 #ifdef CONFIG_BPF_SYSCALL
2135 old_ctx
= current
->bpf_ctx
;
2136 current
->bpf_ctx
= new_ctx
;
2141 static inline void bpf_reset_run_ctx(struct bpf_run_ctx
*old_ctx
)
2143 #ifdef CONFIG_BPF_SYSCALL
2144 current
->bpf_ctx
= old_ctx
;
2148 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
2149 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
2150 /* BPF program asks to set CN on the packet. */
2151 #define BPF_RET_SET_CN (1 << 0)
2153 typedef u32 (*bpf_prog_run_fn
)(const struct bpf_prog
*prog
, const void *ctx
);
2155 static __always_inline u32
2156 bpf_prog_run_array(const struct bpf_prog_array
*array
,
2157 const void *ctx
, bpf_prog_run_fn run_prog
)
2159 const struct bpf_prog_array_item
*item
;
2160 const struct bpf_prog
*prog
;
2161 struct bpf_run_ctx
*old_run_ctx
;
2162 struct bpf_trace_run_ctx run_ctx
;
2165 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
2167 if (unlikely(!array
))
2170 run_ctx
.is_uprobe
= false;
2173 old_run_ctx
= bpf_set_run_ctx(&run_ctx
.run_ctx
);
2174 item
= &array
->items
[0];
2175 while ((prog
= READ_ONCE(item
->prog
))) {
2176 run_ctx
.bpf_cookie
= item
->bpf_cookie
;
2177 ret
&= run_prog(prog
, ctx
);
2180 bpf_reset_run_ctx(old_run_ctx
);
2185 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
2187 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
2188 * overall. As a result, we must use the bpf_prog_array_free_sleepable
2189 * in order to use the tasks_trace rcu grace period.
2191 * When a non-sleepable program is inside the array, we take the rcu read
2192 * section and disable preemption for that program alone, so it can access
2193 * rcu-protected dynamically sized maps.
2195 static __always_inline u32
2196 bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu
*array_rcu
,
2197 const void *ctx
, bpf_prog_run_fn run_prog
)
2199 const struct bpf_prog_array_item
*item
;
2200 const struct bpf_prog
*prog
;
2201 const struct bpf_prog_array
*array
;
2202 struct bpf_run_ctx
*old_run_ctx
;
2203 struct bpf_trace_run_ctx run_ctx
;
2208 rcu_read_lock_trace();
2211 run_ctx
.is_uprobe
= true;
2213 array
= rcu_dereference_check(array_rcu
, rcu_read_lock_trace_held());
2214 if (unlikely(!array
))
2216 old_run_ctx
= bpf_set_run_ctx(&run_ctx
.run_ctx
);
2217 item
= &array
->items
[0];
2218 while ((prog
= READ_ONCE(item
->prog
))) {
2219 if (!prog
->sleepable
)
2222 run_ctx
.bpf_cookie
= item
->bpf_cookie
;
2223 ret
&= run_prog(prog
, ctx
);
2226 if (!prog
->sleepable
)
2229 bpf_reset_run_ctx(old_run_ctx
);
2232 rcu_read_unlock_trace();
2236 #ifdef CONFIG_BPF_SYSCALL
2237 DECLARE_PER_CPU(int, bpf_prog_active
);
2238 extern struct mutex bpf_stats_enabled_mutex
;
2241 * Block execution of BPF programs attached to instrumentation (perf,
2242 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2243 * these events can happen inside a region which holds a map bucket lock
2244 * and can deadlock on it.
2246 static inline void bpf_disable_instrumentation(void)
2249 this_cpu_inc(bpf_prog_active
);
2252 static inline void bpf_enable_instrumentation(void)
2254 this_cpu_dec(bpf_prog_active
);
2258 extern const struct super_operations bpf_super_ops
;
2259 extern const struct file_operations bpf_map_fops
;
2260 extern const struct file_operations bpf_prog_fops
;
2261 extern const struct file_operations bpf_iter_fops
;
2263 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2264 extern const struct bpf_prog_ops _name ## _prog_ops; \
2265 extern const struct bpf_verifier_ops _name ## _verifier_ops;
2266 #define BPF_MAP_TYPE(_id, _ops) \
2267 extern const struct bpf_map_ops _ops;
2268 #define BPF_LINK_TYPE(_id, _name)
2269 #include <linux/bpf_types.h>
2270 #undef BPF_PROG_TYPE
2272 #undef BPF_LINK_TYPE
2274 extern const struct bpf_prog_ops bpf_offload_prog_ops
;
2275 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops
;
2276 extern const struct bpf_verifier_ops xdp_analyzer_ops
;
2278 struct bpf_prog
*bpf_prog_get(u32 ufd
);
2279 struct bpf_prog
*bpf_prog_get_type_dev(u32 ufd
, enum bpf_prog_type type
,
2281 void bpf_prog_add(struct bpf_prog
*prog
, int i
);
2282 void bpf_prog_sub(struct bpf_prog
*prog
, int i
);
2283 void bpf_prog_inc(struct bpf_prog
*prog
);
2284 struct bpf_prog
* __must_check
bpf_prog_inc_not_zero(struct bpf_prog
*prog
);
2285 void bpf_prog_put(struct bpf_prog
*prog
);
2287 void bpf_prog_free_id(struct bpf_prog
*prog
);
2288 void bpf_map_free_id(struct bpf_map
*map
);
2290 struct btf_field
*btf_record_find(const struct btf_record
*rec
,
2291 u32 offset
, u32 field_mask
);
2292 void btf_record_free(struct btf_record
*rec
);
2293 void bpf_map_free_record(struct bpf_map
*map
);
2294 struct btf_record
*btf_record_dup(const struct btf_record
*rec
);
2295 bool btf_record_equal(const struct btf_record
*rec_a
, const struct btf_record
*rec_b
);
2296 void bpf_obj_free_timer(const struct btf_record
*rec
, void *obj
);
2297 void bpf_obj_free_workqueue(const struct btf_record
*rec
, void *obj
);
2298 void bpf_obj_free_fields(const struct btf_record
*rec
, void *obj
);
2299 void __bpf_obj_drop_impl(void *p
, const struct btf_record
*rec
, bool percpu
);
2301 struct bpf_map
*bpf_map_get(u32 ufd
);
2302 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
);
2304 static inline struct bpf_map
*__bpf_map_get(struct fd f
)
2307 return ERR_PTR(-EBADF
);
2308 if (unlikely(fd_file(f
)->f_op
!= &bpf_map_fops
))
2309 return ERR_PTR(-EINVAL
);
2310 return fd_file(f
)->private_data
;
2313 void bpf_map_inc(struct bpf_map
*map
);
2314 void bpf_map_inc_with_uref(struct bpf_map
*map
);
2315 struct bpf_map
*__bpf_map_inc_not_zero(struct bpf_map
*map
, bool uref
);
2316 struct bpf_map
* __must_check
bpf_map_inc_not_zero(struct bpf_map
*map
);
2317 void bpf_map_put_with_uref(struct bpf_map
*map
);
2318 void bpf_map_put(struct bpf_map
*map
);
2319 void *bpf_map_area_alloc(u64 size
, int numa_node
);
2320 void *bpf_map_area_mmapable_alloc(u64 size
, int numa_node
);
2321 void bpf_map_area_free(void *base
);
2322 bool bpf_map_write_active(const struct bpf_map
*map
);
2323 void bpf_map_init_from_attr(struct bpf_map
*map
, union bpf_attr
*attr
);
2324 int generic_map_lookup_batch(struct bpf_map
*map
,
2325 const union bpf_attr
*attr
,
2326 union bpf_attr __user
*uattr
);
2327 int generic_map_update_batch(struct bpf_map
*map
, struct file
*map_file
,
2328 const union bpf_attr
*attr
,
2329 union bpf_attr __user
*uattr
);
2330 int generic_map_delete_batch(struct bpf_map
*map
,
2331 const union bpf_attr
*attr
,
2332 union bpf_attr __user
*uattr
);
2333 struct bpf_map
*bpf_map_get_curr_or_next(u32
*id
);
2334 struct bpf_prog
*bpf_prog_get_curr_or_next(u32
*id
);
2336 int bpf_map_alloc_pages(const struct bpf_map
*map
, gfp_t gfp
, int nid
,
2337 unsigned long nr_pages
, struct page
**page_array
);
2339 void *bpf_map_kmalloc_node(const struct bpf_map
*map
, size_t size
, gfp_t flags
,
2341 void *bpf_map_kzalloc(const struct bpf_map
*map
, size_t size
, gfp_t flags
);
2342 void *bpf_map_kvcalloc(struct bpf_map
*map
, size_t n
, size_t size
,
2344 void __percpu
*bpf_map_alloc_percpu(const struct bpf_map
*map
, size_t size
,
2345 size_t align
, gfp_t flags
);
2348 * These specialized allocators have to be macros for their allocations to be
2349 * accounted separately (to have separate alloc_tag).
2351 #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
2352 kmalloc_node(_size, _flags, _node)
2353 #define bpf_map_kzalloc(_map, _size, _flags) \
2354 kzalloc(_size, _flags)
2355 #define bpf_map_kvcalloc(_map, _n, _size, _flags) \
2356 kvcalloc(_n, _size, _flags)
2357 #define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
2358 __alloc_percpu_gfp(_size, _align, _flags)
2362 bpf_map_init_elem_count(struct bpf_map
*map
)
2364 size_t size
= sizeof(*map
->elem_count
), align
= size
;
2365 gfp_t flags
= GFP_USER
| __GFP_NOWARN
;
2367 map
->elem_count
= bpf_map_alloc_percpu(map
, size
, align
, flags
);
2368 if (!map
->elem_count
)
2375 bpf_map_free_elem_count(struct bpf_map
*map
)
2377 free_percpu(map
->elem_count
);
2380 static inline void bpf_map_inc_elem_count(struct bpf_map
*map
)
2382 this_cpu_inc(*map
->elem_count
);
2385 static inline void bpf_map_dec_elem_count(struct bpf_map
*map
)
2387 this_cpu_dec(*map
->elem_count
);
2390 extern int sysctl_unprivileged_bpf_disabled
;
2392 bool bpf_token_capable(const struct bpf_token
*token
, int cap
);
2394 static inline bool bpf_allow_ptr_leaks(const struct bpf_token
*token
)
2396 return bpf_token_capable(token
, CAP_PERFMON
);
2399 static inline bool bpf_allow_uninit_stack(const struct bpf_token
*token
)
2401 return bpf_token_capable(token
, CAP_PERFMON
);
2404 static inline bool bpf_bypass_spec_v1(const struct bpf_token
*token
)
2406 return cpu_mitigations_off() || bpf_token_capable(token
, CAP_PERFMON
);
2409 static inline bool bpf_bypass_spec_v4(const struct bpf_token
*token
)
2411 return cpu_mitigations_off() || bpf_token_capable(token
, CAP_PERFMON
);
2414 int bpf_map_new_fd(struct bpf_map
*map
, int flags
);
2415 int bpf_prog_new_fd(struct bpf_prog
*prog
);
2417 void bpf_link_init(struct bpf_link
*link
, enum bpf_link_type type
,
2418 const struct bpf_link_ops
*ops
, struct bpf_prog
*prog
);
2419 void bpf_link_init_sleepable(struct bpf_link
*link
, enum bpf_link_type type
,
2420 const struct bpf_link_ops
*ops
, struct bpf_prog
*prog
,
2422 int bpf_link_prime(struct bpf_link
*link
, struct bpf_link_primer
*primer
);
2423 int bpf_link_settle(struct bpf_link_primer
*primer
);
2424 void bpf_link_cleanup(struct bpf_link_primer
*primer
);
2425 void bpf_link_inc(struct bpf_link
*link
);
2426 struct bpf_link
*bpf_link_inc_not_zero(struct bpf_link
*link
);
2427 void bpf_link_put(struct bpf_link
*link
);
2428 int bpf_link_new_fd(struct bpf_link
*link
);
2429 struct bpf_link
*bpf_link_get_from_fd(u32 ufd
);
2430 struct bpf_link
*bpf_link_get_curr_or_next(u32
*id
);
2432 void bpf_token_inc(struct bpf_token
*token
);
2433 void bpf_token_put(struct bpf_token
*token
);
2434 int bpf_token_create(union bpf_attr
*attr
);
2435 struct bpf_token
*bpf_token_get_from_fd(u32 ufd
);
2437 bool bpf_token_allow_cmd(const struct bpf_token
*token
, enum bpf_cmd cmd
);
2438 bool bpf_token_allow_map_type(const struct bpf_token
*token
, enum bpf_map_type type
);
2439 bool bpf_token_allow_prog_type(const struct bpf_token
*token
,
2440 enum bpf_prog_type prog_type
,
2441 enum bpf_attach_type attach_type
);
2443 int bpf_obj_pin_user(u32 ufd
, int path_fd
, const char __user
*pathname
);
2444 int bpf_obj_get_user(int path_fd
, const char __user
*pathname
, int flags
);
2445 struct inode
*bpf_get_inode(struct super_block
*sb
, const struct inode
*dir
,
2448 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2449 #define DEFINE_BPF_ITER_FUNC(target, args...) \
2450 extern int bpf_iter_ ## target(args); \
2451 int __init bpf_iter_ ## target(args) { return 0; }
2454 * The task type of iterators.
2456 * For BPF task iterators, they can be parameterized with various
2457 * parameters to visit only some of tasks.
2459 * BPF_TASK_ITER_ALL (default)
2460 * Iterate over resources of every task.
2463 * Iterate over resources of a task/tid.
2465 * BPF_TASK_ITER_TGID
2466 * Iterate over resources of every task of a process / task group.
2468 enum bpf_iter_task_type
{
2469 BPF_TASK_ITER_ALL
= 0,
2474 struct bpf_iter_aux_info
{
2475 /* for map_elem iter */
2476 struct bpf_map
*map
;
2478 /* for cgroup iter */
2480 struct cgroup
*start
; /* starting cgroup */
2481 enum bpf_cgroup_iter_order order
;
2484 enum bpf_iter_task_type type
;
2489 typedef int (*bpf_iter_attach_target_t
)(struct bpf_prog
*prog
,
2490 union bpf_iter_link_info
*linfo
,
2491 struct bpf_iter_aux_info
*aux
);
2492 typedef void (*bpf_iter_detach_target_t
)(struct bpf_iter_aux_info
*aux
);
2493 typedef void (*bpf_iter_show_fdinfo_t
) (const struct bpf_iter_aux_info
*aux
,
2494 struct seq_file
*seq
);
2495 typedef int (*bpf_iter_fill_link_info_t
)(const struct bpf_iter_aux_info
*aux
,
2496 struct bpf_link_info
*info
);
2497 typedef const struct bpf_func_proto
*
2498 (*bpf_iter_get_func_proto_t
)(enum bpf_func_id func_id
,
2499 const struct bpf_prog
*prog
);
2501 enum bpf_iter_feature
{
2502 BPF_ITER_RESCHED
= BIT(0),
2505 #define BPF_ITER_CTX_ARG_MAX 2
2506 struct bpf_iter_reg
{
2508 bpf_iter_attach_target_t attach_target
;
2509 bpf_iter_detach_target_t detach_target
;
2510 bpf_iter_show_fdinfo_t show_fdinfo
;
2511 bpf_iter_fill_link_info_t fill_link_info
;
2512 bpf_iter_get_func_proto_t get_func_proto
;
2513 u32 ctx_arg_info_size
;
2515 struct bpf_ctx_arg_aux ctx_arg_info
[BPF_ITER_CTX_ARG_MAX
];
2516 const struct bpf_iter_seq_info
*seq_info
;
2519 struct bpf_iter_meta
{
2520 __bpf_md_ptr(struct seq_file
*, seq
);
2525 struct bpf_iter__bpf_map_elem
{
2526 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
2527 __bpf_md_ptr(struct bpf_map
*, map
);
2528 __bpf_md_ptr(void *, key
);
2529 __bpf_md_ptr(void *, value
);
2532 int bpf_iter_reg_target(const struct bpf_iter_reg
*reg_info
);
2533 void bpf_iter_unreg_target(const struct bpf_iter_reg
*reg_info
);
2534 bool bpf_iter_prog_supported(struct bpf_prog
*prog
);
2535 const struct bpf_func_proto
*
2536 bpf_iter_get_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
);
2537 int bpf_iter_link_attach(const union bpf_attr
*attr
, bpfptr_t uattr
, struct bpf_prog
*prog
);
2538 int bpf_iter_new_fd(struct bpf_link
*link
);
2539 bool bpf_link_is_iter(struct bpf_link
*link
);
2540 struct bpf_prog
*bpf_iter_get_info(struct bpf_iter_meta
*meta
, bool in_stop
);
2541 int bpf_iter_run_prog(struct bpf_prog
*prog
, void *ctx
);
2542 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info
*aux
,
2543 struct seq_file
*seq
);
2544 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info
*aux
,
2545 struct bpf_link_info
*info
);
2547 int map_set_for_each_callback_args(struct bpf_verifier_env
*env
,
2548 struct bpf_func_state
*caller
,
2549 struct bpf_func_state
*callee
);
2551 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
);
2552 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
);
2553 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
2555 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
2558 int bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
);
2560 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
2561 void *key
, void *value
, u64 map_flags
);
2562 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
);
2563 int bpf_fd_htab_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
2564 void *key
, void *value
, u64 map_flags
);
2565 int bpf_fd_htab_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
);
2567 int bpf_get_file_flag(int flags
);
2568 int bpf_check_uarg_tail_zero(bpfptr_t uaddr
, size_t expected_size
,
2569 size_t actual_size
);
2571 /* verify correctness of eBPF program */
2572 int bpf_check(struct bpf_prog
**fp
, union bpf_attr
*attr
, bpfptr_t uattr
, u32 uattr_size
);
2574 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2575 void bpf_patch_call_args(struct bpf_insn
*insn
, u32 stack_depth
);
2578 struct btf
*bpf_get_btf_vmlinux(void);
2583 struct bpf_dtab_netdev
;
2584 struct bpf_cpu_map_entry
;
2586 void __dev_flush(struct list_head
*flush_list
);
2587 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_frame
*xdpf
,
2588 struct net_device
*dev_rx
);
2589 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_frame
*xdpf
,
2590 struct net_device
*dev_rx
);
2591 int dev_map_enqueue_multi(struct xdp_frame
*xdpf
, struct net_device
*dev_rx
,
2592 struct bpf_map
*map
, bool exclude_ingress
);
2593 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
2594 struct bpf_prog
*xdp_prog
);
2595 int dev_map_redirect_multi(struct net_device
*dev
, struct sk_buff
*skb
,
2596 struct bpf_prog
*xdp_prog
, struct bpf_map
*map
,
2597 bool exclude_ingress
);
2599 void __cpu_map_flush(struct list_head
*flush_list
);
2600 int cpu_map_enqueue(struct bpf_cpu_map_entry
*rcpu
, struct xdp_frame
*xdpf
,
2601 struct net_device
*dev_rx
);
2602 int cpu_map_generic_redirect(struct bpf_cpu_map_entry
*rcpu
,
2603 struct sk_buff
*skb
);
2605 /* Return map's numa specified by userspace */
2606 static inline int bpf_map_attr_numa_node(const union bpf_attr
*attr
)
2608 return (attr
->map_flags
& BPF_F_NUMA_NODE
) ?
2609 attr
->numa_node
: NUMA_NO_NODE
;
2612 struct bpf_prog
*bpf_prog_get_type_path(const char *name
, enum bpf_prog_type type
);
2613 int array_map_alloc_check(union bpf_attr
*attr
);
2615 int bpf_prog_test_run_xdp(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
2616 union bpf_attr __user
*uattr
);
2617 int bpf_prog_test_run_skb(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
2618 union bpf_attr __user
*uattr
);
2619 int bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
2620 const union bpf_attr
*kattr
,
2621 union bpf_attr __user
*uattr
);
2622 int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
2623 const union bpf_attr
*kattr
,
2624 union bpf_attr __user
*uattr
);
2625 int bpf_prog_test_run_raw_tp(struct bpf_prog
*prog
,
2626 const union bpf_attr
*kattr
,
2627 union bpf_attr __user
*uattr
);
2628 int bpf_prog_test_run_sk_lookup(struct bpf_prog
*prog
,
2629 const union bpf_attr
*kattr
,
2630 union bpf_attr __user
*uattr
);
2631 int bpf_prog_test_run_nf(struct bpf_prog
*prog
,
2632 const union bpf_attr
*kattr
,
2633 union bpf_attr __user
*uattr
);
2634 bool btf_ctx_access(int off
, int size
, enum bpf_access_type type
,
2635 const struct bpf_prog
*prog
,
2636 struct bpf_insn_access_aux
*info
);
2638 static inline bool bpf_tracing_ctx_access(int off
, int size
,
2639 enum bpf_access_type type
)
2641 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
2643 if (type
!= BPF_READ
)
2645 if (off
% size
!= 0)
2650 static inline bool bpf_tracing_btf_ctx_access(int off
, int size
,
2651 enum bpf_access_type type
,
2652 const struct bpf_prog
*prog
,
2653 struct bpf_insn_access_aux
*info
)
2655 if (!bpf_tracing_ctx_access(off
, size
, type
))
2657 return btf_ctx_access(off
, size
, type
, prog
, info
);
2660 int btf_struct_access(struct bpf_verifier_log
*log
,
2661 const struct bpf_reg_state
*reg
,
2662 int off
, int size
, enum bpf_access_type atype
,
2663 u32
*next_btf_id
, enum bpf_type_flag
*flag
, const char **field_name
);
2664 bool btf_struct_ids_match(struct bpf_verifier_log
*log
,
2665 const struct btf
*btf
, u32 id
, int off
,
2666 const struct btf
*need_btf
, u32 need_type_id
,
2669 int btf_distill_func_proto(struct bpf_verifier_log
*log
,
2671 const struct btf_type
*func_proto
,
2672 const char *func_name
,
2673 struct btf_func_model
*m
);
2675 struct bpf_reg_state
;
2676 int btf_prepare_func_args(struct bpf_verifier_env
*env
, int subprog
);
2677 int btf_check_type_match(struct bpf_verifier_log
*log
, const struct bpf_prog
*prog
,
2678 struct btf
*btf
, const struct btf_type
*t
);
2679 const char *btf_find_decl_tag_value(const struct btf
*btf
, const struct btf_type
*pt
,
2680 int comp_idx
, const char *tag_key
);
2681 int btf_find_next_decl_tag(const struct btf
*btf
, const struct btf_type
*pt
,
2682 int comp_idx
, const char *tag_key
, int last_id
);
2684 struct bpf_prog
*bpf_prog_by_id(u32 id
);
2685 struct bpf_link
*bpf_link_by_id(u32 id
);
2687 const struct bpf_func_proto
*bpf_base_func_proto(enum bpf_func_id func_id
,
2688 const struct bpf_prog
*prog
);
2689 void bpf_task_storage_free(struct task_struct
*task
);
2690 void bpf_cgrp_storage_free(struct cgroup
*cgroup
);
2691 bool bpf_prog_has_kfunc_call(const struct bpf_prog
*prog
);
2692 const struct btf_func_model
*
2693 bpf_jit_find_kfunc_model(const struct bpf_prog
*prog
,
2694 const struct bpf_insn
*insn
);
2695 int bpf_get_kfunc_addr(const struct bpf_prog
*prog
, u32 func_id
,
2696 u16 btf_fd_idx
, u8
**func_addr
);
2698 struct bpf_core_ctx
{
2699 struct bpf_verifier_log
*log
;
2700 const struct btf
*btf
;
2703 bool btf_nested_type_is_trusted(struct bpf_verifier_log
*log
,
2704 const struct bpf_reg_state
*reg
,
2705 const char *field_name
, u32 btf_id
, const char *suffix
);
2707 bool btf_type_ids_nocast_alias(struct bpf_verifier_log
*log
,
2708 const struct btf
*reg_btf
, u32 reg_id
,
2709 const struct btf
*arg_btf
, u32 arg_id
);
2711 int bpf_core_apply(struct bpf_core_ctx
*ctx
, const struct bpf_core_relo
*relo
,
2712 int relo_idx
, void *insn
);
2714 static inline bool unprivileged_ebpf_enabled(void)
2716 return !sysctl_unprivileged_bpf_disabled
;
2719 /* Not all bpf prog type has the bpf_ctx.
2720 * For the bpf prog type that has initialized the bpf_ctx,
2721 * this function can be used to decide if a kernel function
2722 * is called by a bpf program.
2724 static inline bool has_current_bpf_ctx(void)
2726 return !!current
->bpf_ctx
;
2729 void notrace
bpf_prog_inc_misses_counter(struct bpf_prog
*prog
);
2731 void bpf_dynptr_init(struct bpf_dynptr_kern
*ptr
, void *data
,
2732 enum bpf_dynptr_type type
, u32 offset
, u32 size
);
2733 void bpf_dynptr_set_null(struct bpf_dynptr_kern
*ptr
);
2734 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern
*ptr
);
2736 #else /* !CONFIG_BPF_SYSCALL */
2737 static inline struct bpf_prog
*bpf_prog_get(u32 ufd
)
2739 return ERR_PTR(-EOPNOTSUPP
);
2742 static inline struct bpf_prog
*bpf_prog_get_type_dev(u32 ufd
,
2743 enum bpf_prog_type type
,
2746 return ERR_PTR(-EOPNOTSUPP
);
2749 static inline void bpf_prog_add(struct bpf_prog
*prog
, int i
)
2753 static inline void bpf_prog_sub(struct bpf_prog
*prog
, int i
)
2757 static inline void bpf_prog_put(struct bpf_prog
*prog
)
2761 static inline void bpf_prog_inc(struct bpf_prog
*prog
)
2765 static inline struct bpf_prog
*__must_check
2766 bpf_prog_inc_not_zero(struct bpf_prog
*prog
)
2768 return ERR_PTR(-EOPNOTSUPP
);
2771 static inline void bpf_link_init(struct bpf_link
*link
, enum bpf_link_type type
,
2772 const struct bpf_link_ops
*ops
,
2773 struct bpf_prog
*prog
)
2777 static inline void bpf_link_init_sleepable(struct bpf_link
*link
, enum bpf_link_type type
,
2778 const struct bpf_link_ops
*ops
, struct bpf_prog
*prog
,
2783 static inline int bpf_link_prime(struct bpf_link
*link
,
2784 struct bpf_link_primer
*primer
)
2789 static inline int bpf_link_settle(struct bpf_link_primer
*primer
)
2794 static inline void bpf_link_cleanup(struct bpf_link_primer
*primer
)
2798 static inline void bpf_link_inc(struct bpf_link
*link
)
2802 static inline struct bpf_link
*bpf_link_inc_not_zero(struct bpf_link
*link
)
2807 static inline void bpf_link_put(struct bpf_link
*link
)
2811 static inline int bpf_obj_get_user(const char __user
*pathname
, int flags
)
2816 static inline bool bpf_token_capable(const struct bpf_token
*token
, int cap
)
2818 return capable(cap
) || (cap
!= CAP_SYS_ADMIN
&& capable(CAP_SYS_ADMIN
));
2821 static inline void bpf_token_inc(struct bpf_token
*token
)
2825 static inline void bpf_token_put(struct bpf_token
*token
)
2829 static inline struct bpf_token
*bpf_token_get_from_fd(u32 ufd
)
2831 return ERR_PTR(-EOPNOTSUPP
);
2834 static inline void __dev_flush(struct list_head
*flush_list
)
2839 struct bpf_dtab_netdev
;
2840 struct bpf_cpu_map_entry
;
2843 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_frame
*xdpf
,
2844 struct net_device
*dev_rx
)
2850 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_frame
*xdpf
,
2851 struct net_device
*dev_rx
)
2857 int dev_map_enqueue_multi(struct xdp_frame
*xdpf
, struct net_device
*dev_rx
,
2858 struct bpf_map
*map
, bool exclude_ingress
)
2865 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
,
2866 struct sk_buff
*skb
,
2867 struct bpf_prog
*xdp_prog
)
2873 int dev_map_redirect_multi(struct net_device
*dev
, struct sk_buff
*skb
,
2874 struct bpf_prog
*xdp_prog
, struct bpf_map
*map
,
2875 bool exclude_ingress
)
2880 static inline void __cpu_map_flush(struct list_head
*flush_list
)
2884 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry
*rcpu
,
2885 struct xdp_frame
*xdpf
,
2886 struct net_device
*dev_rx
)
2891 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry
*rcpu
,
2892 struct sk_buff
*skb
)
2897 static inline struct bpf_prog
*bpf_prog_get_type_path(const char *name
,
2898 enum bpf_prog_type type
)
2900 return ERR_PTR(-EOPNOTSUPP
);
2903 static inline int bpf_prog_test_run_xdp(struct bpf_prog
*prog
,
2904 const union bpf_attr
*kattr
,
2905 union bpf_attr __user
*uattr
)
2910 static inline int bpf_prog_test_run_skb(struct bpf_prog
*prog
,
2911 const union bpf_attr
*kattr
,
2912 union bpf_attr __user
*uattr
)
2917 static inline int bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
2918 const union bpf_attr
*kattr
,
2919 union bpf_attr __user
*uattr
)
2924 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
2925 const union bpf_attr
*kattr
,
2926 union bpf_attr __user
*uattr
)
2931 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog
*prog
,
2932 const union bpf_attr
*kattr
,
2933 union bpf_attr __user
*uattr
)
2938 static inline void bpf_map_put(struct bpf_map
*map
)
2942 static inline struct bpf_prog
*bpf_prog_by_id(u32 id
)
2944 return ERR_PTR(-ENOTSUPP
);
2947 static inline int btf_struct_access(struct bpf_verifier_log
*log
,
2948 const struct bpf_reg_state
*reg
,
2949 int off
, int size
, enum bpf_access_type atype
,
2950 u32
*next_btf_id
, enum bpf_type_flag
*flag
,
2951 const char **field_name
)
2956 static inline const struct bpf_func_proto
*
2957 bpf_base_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
2962 static inline void bpf_task_storage_free(struct task_struct
*task
)
2966 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog
*prog
)
2971 static inline const struct btf_func_model
*
2972 bpf_jit_find_kfunc_model(const struct bpf_prog
*prog
,
2973 const struct bpf_insn
*insn
)
2979 bpf_get_kfunc_addr(const struct bpf_prog
*prog
, u32 func_id
,
2980 u16 btf_fd_idx
, u8
**func_addr
)
2985 static inline bool unprivileged_ebpf_enabled(void)
2990 static inline bool has_current_bpf_ctx(void)
2995 static inline void bpf_prog_inc_misses_counter(struct bpf_prog
*prog
)
2999 static inline void bpf_cgrp_storage_free(struct cgroup
*cgroup
)
3003 static inline void bpf_dynptr_init(struct bpf_dynptr_kern
*ptr
, void *data
,
3004 enum bpf_dynptr_type type
, u32 offset
, u32 size
)
3008 static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern
*ptr
)
3012 static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern
*ptr
)
3015 #endif /* CONFIG_BPF_SYSCALL */
3017 static __always_inline
int
3018 bpf_probe_read_kernel_common(void *dst
, u32 size
, const void *unsafe_ptr
)
3022 if (IS_ENABLED(CONFIG_BPF_EVENTS
))
3023 ret
= copy_from_kernel_nofault(dst
, unsafe_ptr
, size
);
3024 if (unlikely(ret
< 0))
3025 memset(dst
, 0, size
);
3029 void __bpf_free_used_btfs(struct btf_mod_pair
*used_btfs
, u32 len
);
3031 static inline struct bpf_prog
*bpf_prog_get_type(u32 ufd
,
3032 enum bpf_prog_type type
)
3034 return bpf_prog_get_type_dev(ufd
, type
, false);
3037 void __bpf_free_used_maps(struct bpf_prog_aux
*aux
,
3038 struct bpf_map
**used_maps
, u32 len
);
3040 bool bpf_prog_get_ok(struct bpf_prog
*, enum bpf_prog_type
*, bool);
3042 int bpf_prog_offload_compile(struct bpf_prog
*prog
);
3043 void bpf_prog_dev_bound_destroy(struct bpf_prog
*prog
);
3044 int bpf_prog_offload_info_fill(struct bpf_prog_info
*info
,
3045 struct bpf_prog
*prog
);
3047 int bpf_map_offload_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
);
3049 int bpf_map_offload_lookup_elem(struct bpf_map
*map
, void *key
, void *value
);
3050 int bpf_map_offload_update_elem(struct bpf_map
*map
,
3051 void *key
, void *value
, u64 flags
);
3052 int bpf_map_offload_delete_elem(struct bpf_map
*map
, void *key
);
3053 int bpf_map_offload_get_next_key(struct bpf_map
*map
,
3054 void *key
, void *next_key
);
3056 bool bpf_offload_prog_map_match(struct bpf_prog
*prog
, struct bpf_map
*map
);
3058 struct bpf_offload_dev
*
3059 bpf_offload_dev_create(const struct bpf_prog_offload_ops
*ops
, void *priv
);
3060 void bpf_offload_dev_destroy(struct bpf_offload_dev
*offdev
);
3061 void *bpf_offload_dev_priv(struct bpf_offload_dev
*offdev
);
3062 int bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
3063 struct net_device
*netdev
);
3064 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
3065 struct net_device
*netdev
);
3066 bool bpf_offload_dev_match(struct bpf_prog
*prog
, struct net_device
*netdev
);
3068 void unpriv_ebpf_notify(int new_state
);
3070 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
3071 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log
*log
,
3072 struct bpf_prog_aux
*prog_aux
);
3073 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog
*prog
, u32 func_id
);
3074 int bpf_prog_dev_bound_init(struct bpf_prog
*prog
, union bpf_attr
*attr
);
3075 int bpf_prog_dev_bound_inherit(struct bpf_prog
*new_prog
, struct bpf_prog
*old_prog
);
3076 void bpf_dev_bound_netdev_unregister(struct net_device
*dev
);
3078 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux
*aux
)
3080 return aux
->dev_bound
;
3083 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux
*aux
)
3085 return aux
->offload_requested
;
3088 bool bpf_prog_dev_bound_match(const struct bpf_prog
*lhs
, const struct bpf_prog
*rhs
);
3090 static inline bool bpf_map_is_offloaded(struct bpf_map
*map
)
3092 return unlikely(map
->ops
== &bpf_map_offload_ops
);
3095 struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
);
3096 void bpf_map_offload_map_free(struct bpf_map
*map
);
3097 u64
bpf_map_offload_map_mem_usage(const struct bpf_map
*map
);
3098 int bpf_prog_test_run_syscall(struct bpf_prog
*prog
,
3099 const union bpf_attr
*kattr
,
3100 union bpf_attr __user
*uattr
);
3102 int sock_map_get_from_fd(const union bpf_attr
*attr
, struct bpf_prog
*prog
);
3103 int sock_map_prog_detach(const union bpf_attr
*attr
, enum bpf_prog_type ptype
);
3104 int sock_map_update_elem_sys(struct bpf_map
*map
, void *key
, void *value
, u64 flags
);
3105 int sock_map_bpf_prog_query(const union bpf_attr
*attr
,
3106 union bpf_attr __user
*uattr
);
3107 int sock_map_link_create(const union bpf_attr
*attr
, struct bpf_prog
*prog
);
3109 void sock_map_unhash(struct sock
*sk
);
3110 void sock_map_destroy(struct sock
*sk
);
3111 void sock_map_close(struct sock
*sk
, long timeout
);
3113 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log
*log
,
3114 struct bpf_prog_aux
*prog_aux
)
3119 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog
*prog
,
3125 static inline int bpf_prog_dev_bound_init(struct bpf_prog
*prog
,
3126 union bpf_attr
*attr
)
3131 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog
*new_prog
,
3132 struct bpf_prog
*old_prog
)
3137 static inline void bpf_dev_bound_netdev_unregister(struct net_device
*dev
)
3141 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux
*aux
)
3146 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux
*aux
)
3151 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog
*lhs
, const struct bpf_prog
*rhs
)
3156 static inline bool bpf_map_is_offloaded(struct bpf_map
*map
)
3161 static inline struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
)
3163 return ERR_PTR(-EOPNOTSUPP
);
3166 static inline void bpf_map_offload_map_free(struct bpf_map
*map
)
3170 static inline u64
bpf_map_offload_map_mem_usage(const struct bpf_map
*map
)
3175 static inline int bpf_prog_test_run_syscall(struct bpf_prog
*prog
,
3176 const union bpf_attr
*kattr
,
3177 union bpf_attr __user
*uattr
)
3182 #ifdef CONFIG_BPF_SYSCALL
3183 static inline int sock_map_get_from_fd(const union bpf_attr
*attr
,
3184 struct bpf_prog
*prog
)
3189 static inline int sock_map_prog_detach(const union bpf_attr
*attr
,
3190 enum bpf_prog_type ptype
)
3195 static inline int sock_map_update_elem_sys(struct bpf_map
*map
, void *key
, void *value
,
3201 static inline int sock_map_bpf_prog_query(const union bpf_attr
*attr
,
3202 union bpf_attr __user
*uattr
)
3207 static inline int sock_map_link_create(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
3211 #endif /* CONFIG_BPF_SYSCALL */
3212 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
3214 static __always_inline
void
3215 bpf_prog_inc_misses_counters(const struct bpf_prog_array
*array
)
3217 const struct bpf_prog_array_item
*item
;
3218 struct bpf_prog
*prog
;
3220 if (unlikely(!array
))
3223 item
= &array
->items
[0];
3224 while ((prog
= READ_ONCE(item
->prog
))) {
3225 bpf_prog_inc_misses_counter(prog
);
3230 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
3231 void bpf_sk_reuseport_detach(struct sock
*sk
);
3232 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map
*map
, void *key
,
3234 int bpf_fd_reuseport_array_update_elem(struct bpf_map
*map
, void *key
,
3235 void *value
, u64 map_flags
);
3237 static inline void bpf_sk_reuseport_detach(struct sock
*sk
)
3241 #ifdef CONFIG_BPF_SYSCALL
3242 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map
*map
,
3243 void *key
, void *value
)
3248 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map
*map
,
3249 void *key
, void *value
,
3254 #endif /* CONFIG_BPF_SYSCALL */
3255 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
3257 /* verifier prototypes for helper functions called from eBPF programs */
3258 extern const struct bpf_func_proto bpf_map_lookup_elem_proto
;
3259 extern const struct bpf_func_proto bpf_map_update_elem_proto
;
3260 extern const struct bpf_func_proto bpf_map_delete_elem_proto
;
3261 extern const struct bpf_func_proto bpf_map_push_elem_proto
;
3262 extern const struct bpf_func_proto bpf_map_pop_elem_proto
;
3263 extern const struct bpf_func_proto bpf_map_peek_elem_proto
;
3264 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto
;
3266 extern const struct bpf_func_proto bpf_get_prandom_u32_proto
;
3267 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto
;
3268 extern const struct bpf_func_proto bpf_get_numa_node_id_proto
;
3269 extern const struct bpf_func_proto bpf_tail_call_proto
;
3270 extern const struct bpf_func_proto bpf_ktime_get_ns_proto
;
3271 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto
;
3272 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto
;
3273 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto
;
3274 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto
;
3275 extern const struct bpf_func_proto bpf_get_current_comm_proto
;
3276 extern const struct bpf_func_proto bpf_get_stackid_proto
;
3277 extern const struct bpf_func_proto bpf_get_stack_proto
;
3278 extern const struct bpf_func_proto bpf_get_stack_sleepable_proto
;
3279 extern const struct bpf_func_proto bpf_get_task_stack_proto
;
3280 extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto
;
3281 extern const struct bpf_func_proto bpf_get_stackid_proto_pe
;
3282 extern const struct bpf_func_proto bpf_get_stack_proto_pe
;
3283 extern const struct bpf_func_proto bpf_sock_map_update_proto
;
3284 extern const struct bpf_func_proto bpf_sock_hash_update_proto
;
3285 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto
;
3286 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto
;
3287 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto
;
3288 extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto
;
3289 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto
;
3290 extern const struct bpf_func_proto bpf_msg_redirect_map_proto
;
3291 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto
;
3292 extern const struct bpf_func_proto bpf_sk_redirect_map_proto
;
3293 extern const struct bpf_func_proto bpf_spin_lock_proto
;
3294 extern const struct bpf_func_proto bpf_spin_unlock_proto
;
3295 extern const struct bpf_func_proto bpf_get_local_storage_proto
;
3296 extern const struct bpf_func_proto bpf_strtol_proto
;
3297 extern const struct bpf_func_proto bpf_strtoul_proto
;
3298 extern const struct bpf_func_proto bpf_tcp_sock_proto
;
3299 extern const struct bpf_func_proto bpf_jiffies64_proto
;
3300 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto
;
3301 extern const struct bpf_func_proto bpf_event_output_data_proto
;
3302 extern const struct bpf_func_proto bpf_ringbuf_output_proto
;
3303 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto
;
3304 extern const struct bpf_func_proto bpf_ringbuf_submit_proto
;
3305 extern const struct bpf_func_proto bpf_ringbuf_discard_proto
;
3306 extern const struct bpf_func_proto bpf_ringbuf_query_proto
;
3307 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto
;
3308 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto
;
3309 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto
;
3310 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto
;
3311 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto
;
3312 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto
;
3313 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto
;
3314 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto
;
3315 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto
;
3316 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto
;
3317 extern const struct bpf_func_proto bpf_copy_from_user_proto
;
3318 extern const struct bpf_func_proto bpf_snprintf_btf_proto
;
3319 extern const struct bpf_func_proto bpf_snprintf_proto
;
3320 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto
;
3321 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto
;
3322 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto
;
3323 extern const struct bpf_func_proto bpf_sock_from_file_proto
;
3324 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto
;
3325 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto
;
3326 extern const struct bpf_func_proto bpf_task_storage_get_proto
;
3327 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto
;
3328 extern const struct bpf_func_proto bpf_task_storage_delete_proto
;
3329 extern const struct bpf_func_proto bpf_for_each_map_elem_proto
;
3330 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto
;
3331 extern const struct bpf_func_proto bpf_sk_setsockopt_proto
;
3332 extern const struct bpf_func_proto bpf_sk_getsockopt_proto
;
3333 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto
;
3334 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto
;
3335 extern const struct bpf_func_proto bpf_find_vma_proto
;
3336 extern const struct bpf_func_proto bpf_loop_proto
;
3337 extern const struct bpf_func_proto bpf_copy_from_user_task_proto
;
3338 extern const struct bpf_func_proto bpf_set_retval_proto
;
3339 extern const struct bpf_func_proto bpf_get_retval_proto
;
3340 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto
;
3341 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto
;
3342 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto
;
3344 const struct bpf_func_proto
*tracing_prog_func_proto(
3345 enum bpf_func_id func_id
, const struct bpf_prog
*prog
);
3347 /* Shared helpers among cBPF and eBPF. */
3348 void bpf_user_rnd_init_once(void);
3349 u64
bpf_user_rnd_u32(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
3350 u64
bpf_get_raw_cpu_id(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
3352 #if defined(CONFIG_NET)
3353 bool bpf_sock_common_is_valid_access(int off
, int size
,
3354 enum bpf_access_type type
,
3355 struct bpf_insn_access_aux
*info
);
3356 bool bpf_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3357 struct bpf_insn_access_aux
*info
);
3358 u32
bpf_sock_convert_ctx_access(enum bpf_access_type type
,
3359 const struct bpf_insn
*si
,
3360 struct bpf_insn
*insn_buf
,
3361 struct bpf_prog
*prog
,
3363 int bpf_dynptr_from_skb_rdonly(struct __sk_buff
*skb
, u64 flags
,
3364 struct bpf_dynptr
*ptr
);
3366 static inline bool bpf_sock_common_is_valid_access(int off
, int size
,
3367 enum bpf_access_type type
,
3368 struct bpf_insn_access_aux
*info
)
3372 static inline bool bpf_sock_is_valid_access(int off
, int size
,
3373 enum bpf_access_type type
,
3374 struct bpf_insn_access_aux
*info
)
3378 static inline u32
bpf_sock_convert_ctx_access(enum bpf_access_type type
,
3379 const struct bpf_insn
*si
,
3380 struct bpf_insn
*insn_buf
,
3381 struct bpf_prog
*prog
,
3386 static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff
*skb
, u64 flags
,
3387 struct bpf_dynptr
*ptr
)
3394 struct sk_reuseport_kern
{
3395 struct sk_buff
*skb
;
3397 struct sock
*selected_sk
;
3398 struct sock
*migrating_sk
;
3404 bool bpf_tcp_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3405 struct bpf_insn_access_aux
*info
);
3407 u32
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type
,
3408 const struct bpf_insn
*si
,
3409 struct bpf_insn
*insn_buf
,
3410 struct bpf_prog
*prog
,
3413 bool bpf_xdp_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3414 struct bpf_insn_access_aux
*info
);
3416 u32
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type
,
3417 const struct bpf_insn
*si
,
3418 struct bpf_insn
*insn_buf
,
3419 struct bpf_prog
*prog
,
3422 static inline bool bpf_tcp_sock_is_valid_access(int off
, int size
,
3423 enum bpf_access_type type
,
3424 struct bpf_insn_access_aux
*info
)
3429 static inline u32
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type
,
3430 const struct bpf_insn
*si
,
3431 struct bpf_insn
*insn_buf
,
3432 struct bpf_prog
*prog
,
3437 static inline bool bpf_xdp_sock_is_valid_access(int off
, int size
,
3438 enum bpf_access_type type
,
3439 struct bpf_insn_access_aux
*info
)
3444 static inline u32
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type
,
3445 const struct bpf_insn
*si
,
3446 struct bpf_insn
*insn_buf
,
3447 struct bpf_prog
*prog
,
3452 #endif /* CONFIG_INET */
3454 enum bpf_text_poke_type
{
3459 int bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
3460 void *addr1
, void *addr2
);
3462 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor
*poke
,
3463 struct bpf_prog
*new, struct bpf_prog
*old
);
3465 void *bpf_arch_text_copy(void *dst
, void *src
, size_t len
);
3466 int bpf_arch_text_invalidate(void *dst
, size_t len
);
3469 bool btf_id_set_contains(const struct btf_id_set
*set
, u32 id
);
3471 #define MAX_BPRINTF_VARARGS 12
3472 #define MAX_BPRINTF_BUF 1024
3474 struct bpf_bprintf_data
{
3481 int bpf_bprintf_prepare(char *fmt
, u32 fmt_size
, const u64
*raw_args
,
3482 u32 num_args
, struct bpf_bprintf_data
*data
);
3483 void bpf_bprintf_cleanup(struct bpf_bprintf_data
*data
);
3485 #ifdef CONFIG_BPF_LSM
3486 void bpf_cgroup_atype_get(u32 attach_btf_id
, int cgroup_atype
);
3487 void bpf_cgroup_atype_put(int cgroup_atype
);
3489 static inline void bpf_cgroup_atype_get(u32 attach_btf_id
, int cgroup_atype
) {}
3490 static inline void bpf_cgroup_atype_put(int cgroup_atype
) {}
3491 #endif /* CONFIG_BPF_LSM */
3500 #endif /* CONFIG_KEYS */
3502 static inline bool type_is_alloc(u32 type
)
3504 return type
& MEM_ALLOC
;
3507 static inline gfp_t
bpf_memcg_flags(gfp_t flags
)
3509 if (memcg_bpf_enabled())
3510 return flags
| __GFP_ACCOUNT
;
3514 static inline bool bpf_is_subprog(const struct bpf_prog
*prog
)
3516 return prog
->aux
->func_idx
!= 0;
3519 static inline bool bpf_prog_is_raw_tp(const struct bpf_prog
*prog
)
3521 return prog
->type
== BPF_PROG_TYPE_TRACING
&&
3522 prog
->expected_attach_type
== BPF_TRACE_RAW_TP
;
3525 #endif /* _LINUX_BPF_H */