1 #ifndef __BPF_EXPERIMENTAL__
2 #define __BPF_EXPERIMENTAL__
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_core_read.h>
9 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
12 * Allocates an object of the type represented by 'local_type_id' in
13 * program BTF. User may use the bpf_core_type_id_local macro to pass the
14 * type ID of a struct in program BTF.
16 * The 'local_type_id' parameter must be a known constant.
17 * The 'meta' parameter is rewritten by the verifier, no need for BPF
20 * A pointer to an object of the type corresponding to the passed in
21 * 'local_type_id', or NULL on failure.
23 extern void *bpf_obj_new_impl(__u64 local_type_id
, void *meta
) __ksym
;
25 /* Convenience macro to wrap over bpf_obj_new_impl */
26 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
29 * Free an allocated object. All fields of the object that require
30 * destruction will be destructed before the storage is freed.
32 * The 'meta' parameter is rewritten by the verifier, no need for BPF
37 extern void bpf_obj_drop_impl(void *kptr
, void *meta
) __ksym
;
39 /* Convenience macro to wrap over bpf_obj_drop_impl */
40 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
43 * Increment the refcount on a refcounted local kptr, turning the
44 * non-owning reference input into an owning reference in the process.
46 * The 'meta' parameter is rewritten by the verifier, no need for BPF
49 * An owning reference to the object pointed to by 'kptr'
51 extern void *bpf_refcount_acquire_impl(void *kptr
, void *meta
) __ksym
;
53 /* Convenience macro to wrap over bpf_refcount_acquire_impl */
54 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
57 * Add a new entry to the beginning of the BPF linked list.
59 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
60 * for BPF programs to set them
62 * 0 if the node was successfully added
63 * -EINVAL if the node wasn't added because it's already in a list
65 extern int bpf_list_push_front_impl(struct bpf_list_head
*head
,
66 struct bpf_list_node
*node
,
67 void *meta
, __u64 off
) __ksym
;
69 /* Convenience macro to wrap over bpf_list_push_front_impl */
70 #define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
73 * Add a new entry to the end of the BPF linked list.
75 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
76 * for BPF programs to set them
78 * 0 if the node was successfully added
79 * -EINVAL if the node wasn't added because it's already in a list
81 extern int bpf_list_push_back_impl(struct bpf_list_head
*head
,
82 struct bpf_list_node
*node
,
83 void *meta
, __u64 off
) __ksym
;
85 /* Convenience macro to wrap over bpf_list_push_back_impl */
86 #define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
89 * Remove the entry at the beginning of the BPF linked list.
91 * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
93 extern struct bpf_list_node
*bpf_list_pop_front(struct bpf_list_head
*head
) __ksym
;
96 * Remove the entry at the end of the BPF linked list.
98 * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
100 extern struct bpf_list_node
*bpf_list_pop_back(struct bpf_list_head
*head
) __ksym
;
103 * Remove 'node' from rbtree with root 'root'
105 * Pointer to the removed node, or NULL if 'root' didn't contain 'node'
107 extern struct bpf_rb_node
*bpf_rbtree_remove(struct bpf_rb_root
*root
,
108 struct bpf_rb_node
*node
) __ksym
;
111 * Add 'node' to rbtree with root 'root' using comparator 'less'
113 * The 'meta' and 'off' parameters are rewritten by the verifier, no need
114 * for BPF programs to set them
116 * 0 if the node was successfully added
117 * -EINVAL if the node wasn't added because it's already in a tree
119 extern int bpf_rbtree_add_impl(struct bpf_rb_root
*root
, struct bpf_rb_node
*node
,
120 bool (less
)(struct bpf_rb_node
*a
, const struct bpf_rb_node
*b
),
121 void *meta
, __u64 off
) __ksym
;
123 /* Convenience macro to wrap over bpf_rbtree_add_impl */
124 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
127 * Return the first (leftmost) node in input tree
129 * Pointer to the node, which is _not_ removed from the tree. If the tree
130 * contains no nodes, returns NULL.
132 extern struct bpf_rb_node
*bpf_rbtree_first(struct bpf_rb_root
*root
) __ksym
;
135 * Allocates a percpu object of the type represented by 'local_type_id' in
136 * program BTF. User may use the bpf_core_type_id_local macro to pass the
137 * type ID of a struct in program BTF.
139 * The 'local_type_id' parameter must be a known constant.
140 * The 'meta' parameter is rewritten by the verifier, no need for BPF
143 * A pointer to a percpu object of the type corresponding to the passed in
144 * 'local_type_id', or NULL on failure.
146 extern void *bpf_percpu_obj_new_impl(__u64 local_type_id
, void *meta
) __ksym
;
148 /* Convenience macro to wrap over bpf_percpu_obj_new_impl */
149 #define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
152 * Free an allocated percpu object. All fields of the object that require
153 * destruction will be destructed before the storage is freed.
155 * The 'meta' parameter is rewritten by the verifier, no need for BPF
160 extern void bpf_percpu_obj_drop_impl(void *kptr
, void *meta
) __ksym
;
162 struct bpf_iter_task_vma
;
164 extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma
*it
,
165 struct task_struct
*task
,
167 extern struct vm_area_struct
*bpf_iter_task_vma_next(struct bpf_iter_task_vma
*it
) __ksym
;
168 extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma
*it
) __ksym
;
170 /* Convenience macro to wrap over bpf_obj_drop_impl */
171 #define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
174 * Throw a BPF exception from the program, immediately terminating its
175 * execution and unwinding the stack. The supplied 'cookie' parameter
176 * will be the return value of the program when an exception is thrown,
177 * and the default exception callback is used. Otherwise, if an exception
178 * callback is set using the '__exception_cb(callback)' declaration tag
179 * on the main program, the 'cookie' parameter will be the callback's only
182 * Thus, in case of default exception callback, 'cookie' is subjected to
183 * constraints on the program's return value (as with R0 on exit).
184 * Otherwise, the return value of the marked exception callback will be
185 * subjected to the same checks.
187 * Note that throwing an exception with lingering resources (locks,
188 * references, etc.) will lead to a verification error.
190 * Note that callbacks *cannot* call this helper.
194 * An exception with the specified 'cookie' value.
196 extern void bpf_throw(u64 cookie
) __ksym
;
199 * Acquire a reference on the exe_file member field belonging to the
200 * mm_struct that is nested within the supplied task_struct. The supplied
201 * task_struct must be trusted/referenced.
203 * A referenced file pointer pointing to the exe_file member field of the
204 * mm_struct nested in the supplied task_struct, or NULL.
206 extern struct file
*bpf_get_task_exe_file(struct task_struct
*task
) __ksym
;
209 * Release a reference on the supplied file. The supplied file must be
212 extern void bpf_put_file(struct file
*file
) __ksym
;
215 * Resolve a pathname for the supplied path and store it in the supplied
216 * buffer. The supplied path must be trusted/referenced.
218 * A positive integer corresponding to the length of the resolved pathname,
219 * including the NULL termination character, stored in the supplied
220 * buffer. On error, a negative integer is returned.
222 extern int bpf_path_d_path(struct path
*path
, char *buf
, size_t buf__sz
) __ksym
;
224 /* This macro must be used to mark the exception callback corresponding to the
225 * main program. For example:
227 * int exception_cb(u64 cookie) {
232 * __exception_cb(exception_cb)
233 * int main_prog(struct __sk_buff *ctx) {
238 * Here, exception callback for the main program will be 'exception_cb'. Note
239 * that this attribute can only be used once, and multiple exception callbacks
240 * specified for the main program will lead to verification error.
242 #define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
244 #define __bpf_assert_signed(x) _Generic((x), \
246 unsigned long long: 0, \
248 signed long long: 1 \
251 #define __bpf_assert_check(LHS, op, RHS) \
252 _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
253 _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
254 _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
255 _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
257 #define __bpf_assert(LHS, op, cons, RHS, VAL) \
260 asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
261 : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
264 #define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
266 __bpf_assert_check(LHS, op, RHS); \
267 if (__bpf_assert_signed(LHS) && !(supp_sign)) \
268 __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
270 __bpf_assert(LHS, #op, cons, RHS, VAL); \
273 #define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
275 if (sizeof(typeof(RHS)) == 8) { \
276 const typeof(RHS) rhs_var = (RHS); \
277 __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
279 __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
283 #define __cmp_cannot_be_signed(x) \
284 __builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
285 __builtin_strcmp(#x, "&") == 0
287 #define __is_signed_type(type) (((type)(-1)) < (type)1)
289 #define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \
292 bool ret = DEFAULT; \
293 asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \
294 :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
300 /* C type conversions coupled with comparison operator are tricky.
301 * Make sure BPF program is compiled with -Wsign-compare then
302 * __lhs OP __rhs below will catch the mistake.
303 * Be aware that we check only __lhs to figure out the sign of compare.
305 #define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \
307 typeof(LHS) __lhs = (LHS); \
308 typeof(RHS) __rhs = (RHS); \
310 _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
311 (void)(__lhs OP __rhs); \
312 if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
313 if (sizeof(__rhs) == 8) \
314 /* "i" will truncate 64-bit constant into s32, \
315 * so we have to use extra register via "r". \
317 ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \
319 ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \
321 if (sizeof(__rhs) == 8) \
322 ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \
324 ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \
329 #ifndef bpf_cmp_unlikely
330 #define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
333 #ifndef bpf_cmp_likely
334 #define bpf_cmp_likely(LHS, OP, RHS) \
337 if (__builtin_strcmp(#OP, "==") == 0) \
338 ret = _bpf_cmp(LHS, !=, RHS, false); \
339 else if (__builtin_strcmp(#OP, "!=") == 0) \
340 ret = _bpf_cmp(LHS, ==, RHS, false); \
341 else if (__builtin_strcmp(#OP, "<=") == 0) \
342 ret = _bpf_cmp(LHS, >, RHS, false); \
343 else if (__builtin_strcmp(#OP, "<") == 0) \
344 ret = _bpf_cmp(LHS, >=, RHS, false); \
345 else if (__builtin_strcmp(#OP, ">") == 0) \
346 ret = _bpf_cmp(LHS, <=, RHS, false); \
347 else if (__builtin_strcmp(#OP, ">=") == 0) \
348 ret = _bpf_cmp(LHS, <, RHS, false); \
350 asm volatile("r0 " #OP " invalid compare"); \
356 * Note that cond_break can only be portably used in the body of a breakable
357 * construct, whereas can_loop can be used anywhere.
359 #ifdef __BPF_FEATURE_MAY_GOTO
361 ({ __label__ l_break, l_continue; \
363 asm volatile goto("may_goto %l[l_break]" \
366 l_break: ret = false; \
372 ({ __label__ l_break, l_continue; \
373 asm volatile goto("may_goto %l[l_break]" \
380 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
382 ({ __label__ l_break, l_continue; \
384 asm volatile goto("1:.byte 0xe5; \
386 .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
390 l_break: ret = false; \
396 ({ __label__ l_break, l_continue; \
397 asm volatile goto("1:.byte 0xe5; \
399 .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
408 ({ __label__ l_break, l_continue; \
410 asm volatile goto("1:.byte 0xe5; \
412 .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
416 l_break: ret = false; \
422 ({ __label__ l_break, l_continue; \
423 asm volatile goto("1:.byte 0xe5; \
425 .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
436 #define bpf_nop_mov(var) \
437 asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
441 * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
443 #ifndef bpf_addr_space_cast
444 #define bpf_addr_space_cast(var, dst_as, src_as)\
445 asm volatile(".byte 0xBF; \
479 : [off]"i"(BPF_ADDR_SPACE_CAST) \
480 , [as]"i"((dst_as << 16) | src_as));
483 void bpf_preempt_disable(void) __weak __ksym
;
484 void bpf_preempt_enable(void) __weak __ksym
;
489 static inline __bpf_preempt_t
__bpf_preempt_constructor(void)
491 __bpf_preempt_t ret
= {};
493 bpf_preempt_disable();
496 static inline void __bpf_preempt_destructor(__bpf_preempt_t
*t
)
498 bpf_preempt_enable();
500 #define bpf_guard_preempt() \
501 __bpf_preempt_t ___bpf_apply(preempt, __COUNTER__) \
502 __attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) = \
503 __bpf_preempt_constructor()
506 * Assert that a conditional expression is true.
510 * An exception with the value zero when the assertion fails.
512 #define bpf_assert(cond) if (!(cond)) bpf_throw(0);
515 * Assert that a conditional expression is true.
519 * An exception with the specified value when the assertion fails.
521 #define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
524 * Assert that LHS is in the range [BEG, END] (inclusive of both). This
525 * statement updates the known bounds of LHS during verification. Note
526 * that both BEG and END must be constant values, and must fit within the
531 * An exception with the value zero when the assertion fails.
533 #define bpf_assert_range(LHS, BEG, END) \
535 _Static_assert(BEG <= END, "BEG must be <= END"); \
537 __bpf_assert_op(LHS, >=, BEG, 0, false); \
538 __bpf_assert_op(LHS, <=, END, 0, false); \
542 * Assert that LHS is in the range [BEG, END] (inclusive of both). This
543 * statement updates the known bounds of LHS during verification. Note
544 * that both BEG and END must be constant values, and must fit within the
549 * An exception with the specified value when the assertion fails.
551 #define bpf_assert_range_with(LHS, BEG, END, value) \
553 _Static_assert(BEG <= END, "BEG must be <= END"); \
555 __bpf_assert_op(LHS, >=, BEG, value, false); \
556 __bpf_assert_op(LHS, <=, END, value, false); \
559 struct bpf_iter_css_task
;
560 struct cgroup_subsys_state
;
561 extern int bpf_iter_css_task_new(struct bpf_iter_css_task
*it
,
562 struct cgroup_subsys_state
*css
, unsigned int flags
) __weak __ksym
;
563 extern struct task_struct
*bpf_iter_css_task_next(struct bpf_iter_css_task
*it
) __weak __ksym
;
564 extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task
*it
) __weak __ksym
;
566 struct bpf_iter_task
;
567 extern int bpf_iter_task_new(struct bpf_iter_task
*it
,
568 struct task_struct
*task
, unsigned int flags
) __weak __ksym
;
569 extern struct task_struct
*bpf_iter_task_next(struct bpf_iter_task
*it
) __weak __ksym
;
570 extern void bpf_iter_task_destroy(struct bpf_iter_task
*it
) __weak __ksym
;
573 extern int bpf_iter_css_new(struct bpf_iter_css
*it
,
574 struct cgroup_subsys_state
*start
, unsigned int flags
) __weak __ksym
;
575 extern struct cgroup_subsys_state
*bpf_iter_css_next(struct bpf_iter_css
*it
) __weak __ksym
;
576 extern void bpf_iter_css_destroy(struct bpf_iter_css
*it
) __weak __ksym
;
578 extern int bpf_wq_init(struct bpf_wq
*wq
, void *p__map
, unsigned int flags
) __weak __ksym
;
579 extern int bpf_wq_start(struct bpf_wq
*wq
, unsigned int flags
) __weak __ksym
;
580 extern int bpf_wq_set_callback_impl(struct bpf_wq
*wq
,
581 int (callback_fn
)(void *map
, int *key
, void *value
),
582 unsigned int flags__k
, void *aux__ign
) __ksym
;
583 #define bpf_wq_set_callback(timer, cb, flags) \
584 bpf_wq_set_callback_impl(timer, cb, flags, NULL)
586 struct bpf_iter_kmem_cache
;
587 extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache
*it
) __weak __ksym
;
588 extern struct kmem_cache
*bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache
*it
) __weak __ksym
;
589 extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache
*it
) __weak __ksym
;