1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
28 #include <net/bpf_sk_storage.h>
30 #include <uapi/linux/bpf.h>
31 #include <uapi/linux/btf.h>
35 #include "trace_probe.h"
38 #define CREATE_TRACE_POINTS
39 #include "bpf_trace.h"
41 #define bpf_event_rcu_dereference(p) \
42 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
44 #define MAX_UPROBE_MULTI_CNT (1U << 20)
45 #define MAX_KPROBE_MULTI_CNT (1U << 20)
48 struct bpf_trace_module
{
49 struct module
*module
;
50 struct list_head list
;
53 static LIST_HEAD(bpf_trace_modules
);
54 static DEFINE_MUTEX(bpf_module_mutex
);
56 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
58 struct bpf_raw_event_map
*btp
, *ret
= NULL
;
59 struct bpf_trace_module
*btm
;
62 mutex_lock(&bpf_module_mutex
);
63 list_for_each_entry(btm
, &bpf_trace_modules
, list
) {
64 for (i
= 0; i
< btm
->module
->num_bpf_raw_events
; ++i
) {
65 btp
= &btm
->module
->bpf_raw_events
[i
];
66 if (!strcmp(btp
->tp
->name
, name
)) {
67 if (try_module_get(btm
->module
))
74 mutex_unlock(&bpf_module_mutex
);
78 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
82 #endif /* CONFIG_MODULES */
84 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
85 u64
bpf_get_stack(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
87 static int bpf_btf_printf_prepare(struct btf_ptr
*ptr
, u32 btf_ptr_size
,
88 u64 flags
, const struct btf
**btf
,
90 static u64
bpf_kprobe_multi_cookie(struct bpf_run_ctx
*ctx
);
91 static u64
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
);
93 static u64
bpf_uprobe_multi_cookie(struct bpf_run_ctx
*ctx
);
94 static u64
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
);
97 * trace_call_bpf - invoke BPF program
98 * @call: tracepoint event
99 * @ctx: opaque context pointer
101 * kprobe handlers execute BPF programs via this helper.
102 * Can be used from static tracepoints in the future.
104 * Return: BPF programs always return an integer which is interpreted by
106 * 0 - return from kprobe (event is filtered out)
107 * 1 - store kprobe event into ring buffer
108 * Other values are reserved and currently alias to 1
110 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
116 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
118 * since some bpf program is already running on this cpu,
119 * don't call into another bpf program (same or different)
120 * and don't send kprobe event into ring-buffer,
121 * so return zero here
124 bpf_prog_inc_misses_counters(rcu_dereference(call
->prog_array
));
131 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
132 * to all call sites, we did a bpf_prog_array_valid() there to check
133 * whether call->prog_array is empty or not, which is
134 * a heuristic to speed up execution.
136 * If bpf_prog_array_valid() fetched prog_array was
137 * non-NULL, we go into trace_call_bpf() and do the actual
138 * proper rcu_dereference() under RCU lock.
139 * If it turns out that prog_array is NULL then, we bail out.
140 * For the opposite, if the bpf_prog_array_valid() fetched pointer
141 * was NULL, you'll skip the prog_array with the risk of missing
142 * out of events when it was updated in between this and the
143 * rcu_dereference() which is accepted risk.
146 ret
= bpf_prog_run_array(rcu_dereference(call
->prog_array
),
151 __this_cpu_dec(bpf_prog_active
);
156 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
157 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
159 regs_set_return_value(regs
, rc
);
160 override_function_with_return(regs
);
164 static const struct bpf_func_proto bpf_override_return_proto
= {
165 .func
= bpf_override_return
,
167 .ret_type
= RET_INTEGER
,
168 .arg1_type
= ARG_PTR_TO_CTX
,
169 .arg2_type
= ARG_ANYTHING
,
173 static __always_inline
int
174 bpf_probe_read_user_common(void *dst
, u32 size
, const void __user
*unsafe_ptr
)
178 ret
= copy_from_user_nofault(dst
, unsafe_ptr
, size
);
179 if (unlikely(ret
< 0))
180 memset(dst
, 0, size
);
184 BPF_CALL_3(bpf_probe_read_user
, void *, dst
, u32
, size
,
185 const void __user
*, unsafe_ptr
)
187 return bpf_probe_read_user_common(dst
, size
, unsafe_ptr
);
190 const struct bpf_func_proto bpf_probe_read_user_proto
= {
191 .func
= bpf_probe_read_user
,
193 .ret_type
= RET_INTEGER
,
194 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
195 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
196 .arg3_type
= ARG_ANYTHING
,
199 static __always_inline
int
200 bpf_probe_read_user_str_common(void *dst
, u32 size
,
201 const void __user
*unsafe_ptr
)
206 * NB: We rely on strncpy_from_user() not copying junk past the NUL
207 * terminator into `dst`.
209 * strncpy_from_user() does long-sized strides in the fast path. If the
210 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
211 * then there could be junk after the NUL in `dst`. If user takes `dst`
212 * and keys a hash map with it, then semantically identical strings can
213 * occupy multiple entries in the map.
215 ret
= strncpy_from_user_nofault(dst
, unsafe_ptr
, size
);
216 if (unlikely(ret
< 0))
217 memset(dst
, 0, size
);
221 BPF_CALL_3(bpf_probe_read_user_str
, void *, dst
, u32
, size
,
222 const void __user
*, unsafe_ptr
)
224 return bpf_probe_read_user_str_common(dst
, size
, unsafe_ptr
);
227 const struct bpf_func_proto bpf_probe_read_user_str_proto
= {
228 .func
= bpf_probe_read_user_str
,
230 .ret_type
= RET_INTEGER
,
231 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
232 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
233 .arg3_type
= ARG_ANYTHING
,
236 BPF_CALL_3(bpf_probe_read_kernel
, void *, dst
, u32
, size
,
237 const void *, unsafe_ptr
)
239 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
);
242 const struct bpf_func_proto bpf_probe_read_kernel_proto
= {
243 .func
= bpf_probe_read_kernel
,
245 .ret_type
= RET_INTEGER
,
246 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
247 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
248 .arg3_type
= ARG_ANYTHING
,
251 static __always_inline
int
252 bpf_probe_read_kernel_str_common(void *dst
, u32 size
, const void *unsafe_ptr
)
257 * The strncpy_from_kernel_nofault() call will likely not fill the
258 * entire buffer, but that's okay in this circumstance as we're probing
259 * arbitrary memory anyway similar to bpf_probe_read_*() and might
260 * as well probe the stack. Thus, memory is explicitly cleared
261 * only in error case, so that improper users ignoring return
262 * code altogether don't copy garbage; otherwise length of string
263 * is returned that can be used for bpf_perf_event_output() et al.
265 ret
= strncpy_from_kernel_nofault(dst
, unsafe_ptr
, size
);
266 if (unlikely(ret
< 0))
267 memset(dst
, 0, size
);
271 BPF_CALL_3(bpf_probe_read_kernel_str
, void *, dst
, u32
, size
,
272 const void *, unsafe_ptr
)
274 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
);
277 const struct bpf_func_proto bpf_probe_read_kernel_str_proto
= {
278 .func
= bpf_probe_read_kernel_str
,
280 .ret_type
= RET_INTEGER
,
281 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
282 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
283 .arg3_type
= ARG_ANYTHING
,
286 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
287 BPF_CALL_3(bpf_probe_read_compat
, void *, dst
, u32
, size
,
288 const void *, unsafe_ptr
)
290 if ((unsigned long)unsafe_ptr
< TASK_SIZE
) {
291 return bpf_probe_read_user_common(dst
, size
,
292 (__force
void __user
*)unsafe_ptr
);
294 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
);
297 static const struct bpf_func_proto bpf_probe_read_compat_proto
= {
298 .func
= bpf_probe_read_compat
,
300 .ret_type
= RET_INTEGER
,
301 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
302 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
303 .arg3_type
= ARG_ANYTHING
,
306 BPF_CALL_3(bpf_probe_read_compat_str
, void *, dst
, u32
, size
,
307 const void *, unsafe_ptr
)
309 if ((unsigned long)unsafe_ptr
< TASK_SIZE
) {
310 return bpf_probe_read_user_str_common(dst
, size
,
311 (__force
void __user
*)unsafe_ptr
);
313 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
);
316 static const struct bpf_func_proto bpf_probe_read_compat_str_proto
= {
317 .func
= bpf_probe_read_compat_str
,
319 .ret_type
= RET_INTEGER
,
320 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
321 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
322 .arg3_type
= ARG_ANYTHING
,
324 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
326 BPF_CALL_3(bpf_probe_write_user
, void __user
*, unsafe_ptr
, const void *, src
,
330 * Ensure we're in user context which is safe for the helper to
331 * run. This helper has no business in a kthread.
333 * access_ok() should prevent writing to non-user memory, but in
334 * some situations (nommu, temporary switch, etc) access_ok() does
335 * not provide enough validation, hence the check on KERNEL_DS.
337 * nmi_uaccess_okay() ensures the probe is not run in an interim
338 * state, when the task or mm are switched. This is specifically
339 * required to prevent the use of temporary mm.
342 if (unlikely(in_interrupt() ||
343 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
345 if (unlikely(!nmi_uaccess_okay()))
348 return copy_to_user_nofault(unsafe_ptr
, src
, size
);
351 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
352 .func
= bpf_probe_write_user
,
354 .ret_type
= RET_INTEGER
,
355 .arg1_type
= ARG_ANYTHING
,
356 .arg2_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
357 .arg3_type
= ARG_CONST_SIZE
,
360 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
362 if (!capable(CAP_SYS_ADMIN
))
365 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
366 current
->comm
, task_pid_nr(current
));
368 return &bpf_probe_write_user_proto
;
371 #define MAX_TRACE_PRINTK_VARARGS 3
372 #define BPF_TRACE_PRINTK_SIZE 1024
374 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
375 u64
, arg2
, u64
, arg3
)
377 u64 args
[MAX_TRACE_PRINTK_VARARGS
] = { arg1
, arg2
, arg3
};
378 struct bpf_bprintf_data data
= {
379 .get_bin_args
= true,
384 ret
= bpf_bprintf_prepare(fmt
, fmt_size
, args
,
385 MAX_TRACE_PRINTK_VARARGS
, &data
);
389 ret
= bstr_printf(data
.buf
, MAX_BPRINTF_BUF
, fmt
, data
.bin_args
);
391 trace_bpf_trace_printk(data
.buf
);
393 bpf_bprintf_cleanup(&data
);
398 static const struct bpf_func_proto bpf_trace_printk_proto
= {
399 .func
= bpf_trace_printk
,
401 .ret_type
= RET_INTEGER
,
402 .arg1_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
403 .arg2_type
= ARG_CONST_SIZE
,
406 static void __set_printk_clr_event(void)
409 * This program might be calling bpf_trace_printk,
410 * so enable the associated bpf_trace/bpf_trace_printk event.
411 * Repeat this each time as it is possible a user has
412 * disabled bpf_trace_printk events. By loading a program
413 * calling bpf_trace_printk() however the user has expressed
414 * the intent to see such events.
416 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
417 pr_warn_ratelimited("could not enable bpf_trace_printk events");
420 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
422 __set_printk_clr_event();
423 return &bpf_trace_printk_proto
;
426 BPF_CALL_4(bpf_trace_vprintk
, char *, fmt
, u32
, fmt_size
, const void *, args
,
429 struct bpf_bprintf_data data
= {
430 .get_bin_args
= true,
435 if (data_len
& 7 || data_len
> MAX_BPRINTF_VARARGS
* 8 ||
438 num_args
= data_len
/ 8;
440 ret
= bpf_bprintf_prepare(fmt
, fmt_size
, args
, num_args
, &data
);
444 ret
= bstr_printf(data
.buf
, MAX_BPRINTF_BUF
, fmt
, data
.bin_args
);
446 trace_bpf_trace_printk(data
.buf
);
448 bpf_bprintf_cleanup(&data
);
453 static const struct bpf_func_proto bpf_trace_vprintk_proto
= {
454 .func
= bpf_trace_vprintk
,
456 .ret_type
= RET_INTEGER
,
457 .arg1_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
458 .arg2_type
= ARG_CONST_SIZE
,
459 .arg3_type
= ARG_PTR_TO_MEM
| PTR_MAYBE_NULL
| MEM_RDONLY
,
460 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
463 const struct bpf_func_proto
*bpf_get_trace_vprintk_proto(void)
465 __set_printk_clr_event();
466 return &bpf_trace_vprintk_proto
;
469 BPF_CALL_5(bpf_seq_printf
, struct seq_file
*, m
, char *, fmt
, u32
, fmt_size
,
470 const void *, args
, u32
, data_len
)
472 struct bpf_bprintf_data data
= {
473 .get_bin_args
= true,
477 if (data_len
& 7 || data_len
> MAX_BPRINTF_VARARGS
* 8 ||
480 num_args
= data_len
/ 8;
482 err
= bpf_bprintf_prepare(fmt
, fmt_size
, args
, num_args
, &data
);
486 seq_bprintf(m
, fmt
, data
.bin_args
);
488 bpf_bprintf_cleanup(&data
);
490 return seq_has_overflowed(m
) ? -EOVERFLOW
: 0;
493 BTF_ID_LIST_SINGLE(btf_seq_file_ids
, struct, seq_file
)
495 static const struct bpf_func_proto bpf_seq_printf_proto
= {
496 .func
= bpf_seq_printf
,
498 .ret_type
= RET_INTEGER
,
499 .arg1_type
= ARG_PTR_TO_BTF_ID
,
500 .arg1_btf_id
= &btf_seq_file_ids
[0],
501 .arg2_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
502 .arg3_type
= ARG_CONST_SIZE
,
503 .arg4_type
= ARG_PTR_TO_MEM
| PTR_MAYBE_NULL
| MEM_RDONLY
,
504 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
507 BPF_CALL_3(bpf_seq_write
, struct seq_file
*, m
, const void *, data
, u32
, len
)
509 return seq_write(m
, data
, len
) ? -EOVERFLOW
: 0;
512 static const struct bpf_func_proto bpf_seq_write_proto
= {
513 .func
= bpf_seq_write
,
515 .ret_type
= RET_INTEGER
,
516 .arg1_type
= ARG_PTR_TO_BTF_ID
,
517 .arg1_btf_id
= &btf_seq_file_ids
[0],
518 .arg2_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
519 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
522 BPF_CALL_4(bpf_seq_printf_btf
, struct seq_file
*, m
, struct btf_ptr
*, ptr
,
523 u32
, btf_ptr_size
, u64
, flags
)
525 const struct btf
*btf
;
529 ret
= bpf_btf_printf_prepare(ptr
, btf_ptr_size
, flags
, &btf
, &btf_id
);
533 return btf_type_seq_show_flags(btf
, btf_id
, ptr
->ptr
, m
, flags
);
536 static const struct bpf_func_proto bpf_seq_printf_btf_proto
= {
537 .func
= bpf_seq_printf_btf
,
539 .ret_type
= RET_INTEGER
,
540 .arg1_type
= ARG_PTR_TO_BTF_ID
,
541 .arg1_btf_id
= &btf_seq_file_ids
[0],
542 .arg2_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
543 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
544 .arg4_type
= ARG_ANYTHING
,
547 static __always_inline
int
548 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
549 u64
*value
, u64
*enabled
, u64
*running
)
551 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
552 unsigned int cpu
= smp_processor_id();
553 u64 index
= flags
& BPF_F_INDEX_MASK
;
554 struct bpf_event_entry
*ee
;
556 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
558 if (index
== BPF_F_CURRENT_CPU
)
560 if (unlikely(index
>= array
->map
.max_entries
))
563 ee
= READ_ONCE(array
->ptrs
[index
]);
567 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
570 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
575 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
577 * this api is ugly since we miss [-22..-2] range of valid
578 * counter values, but that's uapi
585 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
586 .func
= bpf_perf_event_read
,
588 .ret_type
= RET_INTEGER
,
589 .arg1_type
= ARG_CONST_MAP_PTR
,
590 .arg2_type
= ARG_ANYTHING
,
593 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
594 struct bpf_perf_event_value
*, buf
, u32
, size
)
598 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
600 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
606 memset(buf
, 0, size
);
610 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
611 .func
= bpf_perf_event_read_value
,
613 .ret_type
= RET_INTEGER
,
614 .arg1_type
= ARG_CONST_MAP_PTR
,
615 .arg2_type
= ARG_ANYTHING
,
616 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
617 .arg4_type
= ARG_CONST_SIZE
,
620 static __always_inline u64
621 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
622 u64 flags
, struct perf_sample_data
*sd
)
624 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
625 unsigned int cpu
= smp_processor_id();
626 u64 index
= flags
& BPF_F_INDEX_MASK
;
627 struct bpf_event_entry
*ee
;
628 struct perf_event
*event
;
630 if (index
== BPF_F_CURRENT_CPU
)
632 if (unlikely(index
>= array
->map
.max_entries
))
635 ee
= READ_ONCE(array
->ptrs
[index
]);
640 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
641 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
644 if (unlikely(event
->oncpu
!= cpu
))
647 return perf_event_output(event
, sd
, regs
);
651 * Support executing tracepoints in normal, irq, and nmi context that each call
652 * bpf_perf_event_output
654 struct bpf_trace_sample_data
{
655 struct perf_sample_data sds
[3];
658 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_trace_sds
);
659 static DEFINE_PER_CPU(int, bpf_trace_nest_level
);
660 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
661 u64
, flags
, void *, data
, u64
, size
)
663 struct bpf_trace_sample_data
*sds
;
664 struct perf_raw_record raw
= {
670 struct perf_sample_data
*sd
;
674 sds
= this_cpu_ptr(&bpf_trace_sds
);
675 nest_level
= this_cpu_inc_return(bpf_trace_nest_level
);
677 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(sds
->sds
))) {
682 sd
= &sds
->sds
[nest_level
- 1];
684 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
))) {
689 perf_sample_data_init(sd
, 0, 0);
690 perf_sample_save_raw_data(sd
, &raw
);
692 err
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
694 this_cpu_dec(bpf_trace_nest_level
);
699 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
700 .func
= bpf_perf_event_output
,
702 .ret_type
= RET_INTEGER
,
703 .arg1_type
= ARG_PTR_TO_CTX
,
704 .arg2_type
= ARG_CONST_MAP_PTR
,
705 .arg3_type
= ARG_ANYTHING
,
706 .arg4_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
707 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
710 static DEFINE_PER_CPU(int, bpf_event_output_nest_level
);
711 struct bpf_nested_pt_regs
{
712 struct pt_regs regs
[3];
714 static DEFINE_PER_CPU(struct bpf_nested_pt_regs
, bpf_pt_regs
);
715 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_misc_sds
);
717 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
718 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
720 struct perf_raw_frag frag
= {
725 struct perf_raw_record raw
= {
728 .next
= ctx_size
? &frag
: NULL
,
734 struct perf_sample_data
*sd
;
735 struct pt_regs
*regs
;
740 nest_level
= this_cpu_inc_return(bpf_event_output_nest_level
);
742 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(bpf_misc_sds
.sds
))) {
746 sd
= this_cpu_ptr(&bpf_misc_sds
.sds
[nest_level
- 1]);
747 regs
= this_cpu_ptr(&bpf_pt_regs
.regs
[nest_level
- 1]);
749 perf_fetch_caller_regs(regs
);
750 perf_sample_data_init(sd
, 0, 0);
751 perf_sample_save_raw_data(sd
, &raw
);
753 ret
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
755 this_cpu_dec(bpf_event_output_nest_level
);
760 BPF_CALL_0(bpf_get_current_task
)
762 return (long) current
;
765 const struct bpf_func_proto bpf_get_current_task_proto
= {
766 .func
= bpf_get_current_task
,
768 .ret_type
= RET_INTEGER
,
771 BPF_CALL_0(bpf_get_current_task_btf
)
773 return (unsigned long) current
;
776 const struct bpf_func_proto bpf_get_current_task_btf_proto
= {
777 .func
= bpf_get_current_task_btf
,
779 .ret_type
= RET_PTR_TO_BTF_ID_TRUSTED
,
780 .ret_btf_id
= &btf_tracing_ids
[BTF_TRACING_TYPE_TASK
],
783 BPF_CALL_1(bpf_task_pt_regs
, struct task_struct
*, task
)
785 return (unsigned long) task_pt_regs(task
);
788 BTF_ID_LIST(bpf_task_pt_regs_ids
)
789 BTF_ID(struct, pt_regs
)
791 const struct bpf_func_proto bpf_task_pt_regs_proto
= {
792 .func
= bpf_task_pt_regs
,
794 .arg1_type
= ARG_PTR_TO_BTF_ID
,
795 .arg1_btf_id
= &btf_tracing_ids
[BTF_TRACING_TYPE_TASK
],
796 .ret_type
= RET_PTR_TO_BTF_ID
,
797 .ret_btf_id
= &bpf_task_pt_regs_ids
[0],
800 struct send_signal_irq_work
{
801 struct irq_work irq_work
;
802 struct task_struct
*task
;
806 struct kernel_siginfo info
;
809 static DEFINE_PER_CPU(struct send_signal_irq_work
, send_signal_work
);
811 static void do_bpf_send_signal(struct irq_work
*entry
)
813 struct send_signal_irq_work
*work
;
814 struct kernel_siginfo
*siginfo
;
816 work
= container_of(entry
, struct send_signal_irq_work
, irq_work
);
817 siginfo
= work
->has_siginfo
? &work
->info
: SEND_SIG_PRIV
;
819 group_send_sig_info(work
->sig
, siginfo
, work
->task
, work
->type
);
820 put_task_struct(work
->task
);
823 static int bpf_send_signal_common(u32 sig
, enum pid_type type
, struct task_struct
*task
, u64 value
)
825 struct send_signal_irq_work
*work
= NULL
;
826 struct kernel_siginfo info
;
827 struct kernel_siginfo
*siginfo
;
831 siginfo
= SEND_SIG_PRIV
;
833 clear_siginfo(&info
);
836 info
.si_code
= SI_KERNEL
;
839 info
.si_value
.sival_ptr
= (void *)(unsigned long)value
;
843 /* Similar to bpf_probe_write_user, task needs to be
844 * in a sound condition and kernel memory access be
845 * permitted in order to send signal to the current
848 if (unlikely(task
->flags
& (PF_KTHREAD
| PF_EXITING
)))
850 if (unlikely(!nmi_uaccess_okay()))
852 /* Task should not be pid=1 to avoid kernel panic. */
853 if (unlikely(is_global_init(task
)))
856 if (irqs_disabled()) {
857 /* Do an early check on signal validity. Otherwise,
858 * the error is lost in deferred irq_work.
860 if (unlikely(!valid_signal(sig
)))
863 work
= this_cpu_ptr(&send_signal_work
);
864 if (irq_work_is_busy(&work
->irq_work
))
867 /* Add the current task, which is the target of sending signal,
868 * to the irq_work. The current task may change when queued
869 * irq works get executed.
871 work
->task
= get_task_struct(task
);
872 work
->has_siginfo
= siginfo
== &info
;
873 if (work
->has_siginfo
)
874 copy_siginfo(&work
->info
, &info
);
877 irq_work_queue(&work
->irq_work
);
881 return group_send_sig_info(sig
, siginfo
, task
, type
);
884 BPF_CALL_1(bpf_send_signal
, u32
, sig
)
886 return bpf_send_signal_common(sig
, PIDTYPE_TGID
, NULL
, 0);
889 static const struct bpf_func_proto bpf_send_signal_proto
= {
890 .func
= bpf_send_signal
,
892 .ret_type
= RET_INTEGER
,
893 .arg1_type
= ARG_ANYTHING
,
896 BPF_CALL_1(bpf_send_signal_thread
, u32
, sig
)
898 return bpf_send_signal_common(sig
, PIDTYPE_PID
, NULL
, 0);
901 static const struct bpf_func_proto bpf_send_signal_thread_proto
= {
902 .func
= bpf_send_signal_thread
,
904 .ret_type
= RET_INTEGER
,
905 .arg1_type
= ARG_ANYTHING
,
908 BPF_CALL_3(bpf_d_path
, struct path
*, path
, char *, buf
, u32
, sz
)
918 * The path pointer is verified as trusted and safe to use,
919 * but let's double check it's valid anyway to workaround
920 * potentially broken verifier.
922 len
= copy_from_kernel_nofault(©
, path
, sizeof(*path
));
926 p
= d_path(©
, buf
, sz
);
931 memmove(buf
, p
, len
);
937 BTF_SET_START(btf_allowlist_d_path
)
938 #ifdef CONFIG_SECURITY
939 BTF_ID(func
, security_file_permission
)
940 BTF_ID(func
, security_inode_getattr
)
941 BTF_ID(func
, security_file_open
)
943 #ifdef CONFIG_SECURITY_PATH
944 BTF_ID(func
, security_path_truncate
)
946 BTF_ID(func
, vfs_truncate
)
947 BTF_ID(func
, vfs_fallocate
)
948 BTF_ID(func
, dentry_open
)
949 BTF_ID(func
, vfs_getattr
)
950 BTF_ID(func
, filp_close
)
951 BTF_SET_END(btf_allowlist_d_path
)
953 static bool bpf_d_path_allowed(const struct bpf_prog
*prog
)
955 if (prog
->type
== BPF_PROG_TYPE_TRACING
&&
956 prog
->expected_attach_type
== BPF_TRACE_ITER
)
959 if (prog
->type
== BPF_PROG_TYPE_LSM
)
960 return bpf_lsm_is_sleepable_hook(prog
->aux
->attach_btf_id
);
962 return btf_id_set_contains(&btf_allowlist_d_path
,
963 prog
->aux
->attach_btf_id
);
966 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids
, struct, path
)
968 static const struct bpf_func_proto bpf_d_path_proto
= {
971 .ret_type
= RET_INTEGER
,
972 .arg1_type
= ARG_PTR_TO_BTF_ID
,
973 .arg1_btf_id
= &bpf_d_path_btf_ids
[0],
974 .arg2_type
= ARG_PTR_TO_MEM
,
975 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
976 .allowed
= bpf_d_path_allowed
,
979 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
980 BTF_F_PTR_RAW | BTF_F_ZERO)
982 static int bpf_btf_printf_prepare(struct btf_ptr
*ptr
, u32 btf_ptr_size
,
983 u64 flags
, const struct btf
**btf
,
986 const struct btf_type
*t
;
988 if (unlikely(flags
& ~(BTF_F_ALL
)))
991 if (btf_ptr_size
!= sizeof(struct btf_ptr
))
994 *btf
= bpf_get_btf_vmlinux();
996 if (IS_ERR_OR_NULL(*btf
))
997 return IS_ERR(*btf
) ? PTR_ERR(*btf
) : -EINVAL
;
999 if (ptr
->type_id
> 0)
1000 *btf_id
= ptr
->type_id
;
1005 t
= btf_type_by_id(*btf
, *btf_id
);
1006 if (*btf_id
<= 0 || !t
)
1012 BPF_CALL_5(bpf_snprintf_btf
, char *, str
, u32
, str_size
, struct btf_ptr
*, ptr
,
1013 u32
, btf_ptr_size
, u64
, flags
)
1015 const struct btf
*btf
;
1019 ret
= bpf_btf_printf_prepare(ptr
, btf_ptr_size
, flags
, &btf
, &btf_id
);
1023 return btf_type_snprintf_show(btf
, btf_id
, ptr
->ptr
, str
, str_size
,
1027 const struct bpf_func_proto bpf_snprintf_btf_proto
= {
1028 .func
= bpf_snprintf_btf
,
1030 .ret_type
= RET_INTEGER
,
1031 .arg1_type
= ARG_PTR_TO_MEM
,
1032 .arg2_type
= ARG_CONST_SIZE
,
1033 .arg3_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
1034 .arg4_type
= ARG_CONST_SIZE
,
1035 .arg5_type
= ARG_ANYTHING
,
1038 BPF_CALL_1(bpf_get_func_ip_tracing
, void *, ctx
)
1040 /* This helper call is inlined by verifier. */
1041 return ((u64
*)ctx
)[-2];
1044 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing
= {
1045 .func
= bpf_get_func_ip_tracing
,
1047 .ret_type
= RET_INTEGER
,
1048 .arg1_type
= ARG_PTR_TO_CTX
,
1051 #ifdef CONFIG_X86_KERNEL_IBT
1052 static unsigned long get_entry_ip(unsigned long fentry_ip
)
1056 /* We want to be extra safe in case entry ip is on the page edge,
1057 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059 if ((fentry_ip
& ~PAGE_MASK
) < ENDBR_INSN_SIZE
) {
1060 if (get_kernel_nofault(instr
, (u32
*)(fentry_ip
- ENDBR_INSN_SIZE
)))
1063 instr
= *(u32
*)(fentry_ip
- ENDBR_INSN_SIZE
);
1065 if (is_endbr(instr
))
1066 fentry_ip
-= ENDBR_INSN_SIZE
;
1070 #define get_entry_ip(fentry_ip) fentry_ip
1073 BPF_CALL_1(bpf_get_func_ip_kprobe
, struct pt_regs
*, regs
)
1075 struct bpf_trace_run_ctx
*run_ctx __maybe_unused
;
1078 #ifdef CONFIG_UPROBES
1079 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_trace_run_ctx
, run_ctx
);
1080 if (run_ctx
->is_uprobe
)
1081 return ((struct uprobe_dispatch_data
*)current
->utask
->vaddr
)->bp_addr
;
1084 kp
= kprobe_running();
1086 if (!kp
|| !(kp
->flags
& KPROBE_FLAG_ON_FUNC_ENTRY
))
1089 return get_entry_ip((uintptr_t)kp
->addr
);
1092 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe
= {
1093 .func
= bpf_get_func_ip_kprobe
,
1095 .ret_type
= RET_INTEGER
,
1096 .arg1_type
= ARG_PTR_TO_CTX
,
1099 BPF_CALL_1(bpf_get_func_ip_kprobe_multi
, struct pt_regs
*, regs
)
1101 return bpf_kprobe_multi_entry_ip(current
->bpf_ctx
);
1104 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi
= {
1105 .func
= bpf_get_func_ip_kprobe_multi
,
1107 .ret_type
= RET_INTEGER
,
1108 .arg1_type
= ARG_PTR_TO_CTX
,
1111 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi
, struct pt_regs
*, regs
)
1113 return bpf_kprobe_multi_cookie(current
->bpf_ctx
);
1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti
= {
1117 .func
= bpf_get_attach_cookie_kprobe_multi
,
1119 .ret_type
= RET_INTEGER
,
1120 .arg1_type
= ARG_PTR_TO_CTX
,
1123 BPF_CALL_1(bpf_get_func_ip_uprobe_multi
, struct pt_regs
*, regs
)
1125 return bpf_uprobe_multi_entry_ip(current
->bpf_ctx
);
1128 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi
= {
1129 .func
= bpf_get_func_ip_uprobe_multi
,
1131 .ret_type
= RET_INTEGER
,
1132 .arg1_type
= ARG_PTR_TO_CTX
,
1135 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi
, struct pt_regs
*, regs
)
1137 return bpf_uprobe_multi_cookie(current
->bpf_ctx
);
1140 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti
= {
1141 .func
= bpf_get_attach_cookie_uprobe_multi
,
1143 .ret_type
= RET_INTEGER
,
1144 .arg1_type
= ARG_PTR_TO_CTX
,
1147 BPF_CALL_1(bpf_get_attach_cookie_trace
, void *, ctx
)
1149 struct bpf_trace_run_ctx
*run_ctx
;
1151 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_trace_run_ctx
, run_ctx
);
1152 return run_ctx
->bpf_cookie
;
1155 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace
= {
1156 .func
= bpf_get_attach_cookie_trace
,
1158 .ret_type
= RET_INTEGER
,
1159 .arg1_type
= ARG_PTR_TO_CTX
,
1162 BPF_CALL_1(bpf_get_attach_cookie_pe
, struct bpf_perf_event_data_kern
*, ctx
)
1164 return ctx
->event
->bpf_cookie
;
1167 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe
= {
1168 .func
= bpf_get_attach_cookie_pe
,
1170 .ret_type
= RET_INTEGER
,
1171 .arg1_type
= ARG_PTR_TO_CTX
,
1174 BPF_CALL_1(bpf_get_attach_cookie_tracing
, void *, ctx
)
1176 struct bpf_trace_run_ctx
*run_ctx
;
1178 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_trace_run_ctx
, run_ctx
);
1179 return run_ctx
->bpf_cookie
;
1182 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing
= {
1183 .func
= bpf_get_attach_cookie_tracing
,
1185 .ret_type
= RET_INTEGER
,
1186 .arg1_type
= ARG_PTR_TO_CTX
,
1189 BPF_CALL_3(bpf_get_branch_snapshot
, void *, buf
, u32
, size
, u64
, flags
)
1191 static const u32 br_entry_size
= sizeof(struct perf_branch_entry
);
1192 u32 entry_cnt
= size
/ br_entry_size
;
1194 entry_cnt
= static_call(perf_snapshot_branch_stack
)(buf
, entry_cnt
);
1196 if (unlikely(flags
))
1202 return entry_cnt
* br_entry_size
;
1205 static const struct bpf_func_proto bpf_get_branch_snapshot_proto
= {
1206 .func
= bpf_get_branch_snapshot
,
1208 .ret_type
= RET_INTEGER
,
1209 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
1210 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1213 BPF_CALL_3(get_func_arg
, void *, ctx
, u32
, n
, u64
*, value
)
1215 /* This helper call is inlined by verifier. */
1216 u64 nr_args
= ((u64
*)ctx
)[-1];
1218 if ((u64
) n
>= nr_args
)
1220 *value
= ((u64
*)ctx
)[n
];
1224 static const struct bpf_func_proto bpf_get_func_arg_proto
= {
1225 .func
= get_func_arg
,
1226 .ret_type
= RET_INTEGER
,
1227 .arg1_type
= ARG_PTR_TO_CTX
,
1228 .arg2_type
= ARG_ANYTHING
,
1229 .arg3_type
= ARG_PTR_TO_FIXED_SIZE_MEM
| MEM_UNINIT
| MEM_WRITE
| MEM_ALIGNED
,
1230 .arg3_size
= sizeof(u64
),
1233 BPF_CALL_2(get_func_ret
, void *, ctx
, u64
*, value
)
1235 /* This helper call is inlined by verifier. */
1236 u64 nr_args
= ((u64
*)ctx
)[-1];
1238 *value
= ((u64
*)ctx
)[nr_args
];
1242 static const struct bpf_func_proto bpf_get_func_ret_proto
= {
1243 .func
= get_func_ret
,
1244 .ret_type
= RET_INTEGER
,
1245 .arg1_type
= ARG_PTR_TO_CTX
,
1246 .arg2_type
= ARG_PTR_TO_FIXED_SIZE_MEM
| MEM_UNINIT
| MEM_WRITE
| MEM_ALIGNED
,
1247 .arg2_size
= sizeof(u64
),
1250 BPF_CALL_1(get_func_arg_cnt
, void *, ctx
)
1252 /* This helper call is inlined by verifier. */
1253 return ((u64
*)ctx
)[-1];
1256 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto
= {
1257 .func
= get_func_arg_cnt
,
1258 .ret_type
= RET_INTEGER
,
1259 .arg1_type
= ARG_PTR_TO_CTX
,
1263 __bpf_kfunc_start_defs();
1266 * bpf_lookup_user_key - lookup a key by its serial
1267 * @serial: key handle serial number
1268 * @flags: lookup-specific flags
1270 * Search a key with a given *serial* and the provided *flags*.
1271 * If found, increment the reference count of the key by one, and
1272 * return it in the bpf_key structure.
1274 * The bpf_key structure must be passed to bpf_key_put() when done
1275 * with it, so that the key reference count is decremented and the
1276 * bpf_key structure is freed.
1278 * Permission checks are deferred to the time the key is used by
1279 * one of the available key-specific kfuncs.
1281 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1282 * special keyring (e.g. session keyring), if it doesn't yet exist.
1283 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1284 * for the key construction, and to retrieve uninstantiated keys (keys
1285 * without data attached to them).
1287 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1288 * NULL pointer otherwise.
1290 __bpf_kfunc
struct bpf_key
*bpf_lookup_user_key(u32 serial
, u64 flags
)
1293 struct bpf_key
*bkey
;
1295 if (flags
& ~KEY_LOOKUP_ALL
)
1299 * Permission check is deferred until the key is used, as the
1300 * intent of the caller is unknown here.
1302 key_ref
= lookup_user_key(serial
, flags
, KEY_DEFER_PERM_CHECK
);
1303 if (IS_ERR(key_ref
))
1306 bkey
= kmalloc(sizeof(*bkey
), GFP_KERNEL
);
1308 key_put(key_ref_to_ptr(key_ref
));
1312 bkey
->key
= key_ref_to_ptr(key_ref
);
1313 bkey
->has_ref
= true;
1319 * bpf_lookup_system_key - lookup a key by a system-defined ID
1322 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1323 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1324 * attempting to decrement the key reference count on that pointer. The key
1325 * pointer set in such way is currently understood only by
1326 * verify_pkcs7_signature().
1328 * Set *id* to one of the values defined in include/linux/verification.h:
1329 * 0 for the primary keyring (immutable keyring of system keys);
1330 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1331 * (where keys can be added only if they are vouched for by existing keys
1332 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1333 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1334 * kerned image and, possibly, the initramfs signature).
1336 * Return: a bpf_key pointer with an invalid key pointer set from the
1337 * pre-determined ID on success, a NULL pointer otherwise
1339 __bpf_kfunc
struct bpf_key
*bpf_lookup_system_key(u64 id
)
1341 struct bpf_key
*bkey
;
1343 if (system_keyring_id_check(id
) < 0)
1346 bkey
= kmalloc(sizeof(*bkey
), GFP_ATOMIC
);
1350 bkey
->key
= (struct key
*)(unsigned long)id
;
1351 bkey
->has_ref
= false;
1357 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1358 * @bkey: bpf_key structure
1360 * Decrement the reference count of the key inside *bkey*, if the pointer
1361 * is valid, and free *bkey*.
1363 __bpf_kfunc
void bpf_key_put(struct bpf_key
*bkey
)
1371 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1373 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1374 * @data_p: data to verify
1375 * @sig_p: signature of the data
1376 * @trusted_keyring: keyring with keys trusted for signature verification
1378 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1379 * with keys in a keyring referenced by *trusted_keyring*.
1381 * Return: 0 on success, a negative value on error.
1383 __bpf_kfunc
int bpf_verify_pkcs7_signature(struct bpf_dynptr
*data_p
,
1384 struct bpf_dynptr
*sig_p
,
1385 struct bpf_key
*trusted_keyring
)
1387 struct bpf_dynptr_kern
*data_ptr
= (struct bpf_dynptr_kern
*)data_p
;
1388 struct bpf_dynptr_kern
*sig_ptr
= (struct bpf_dynptr_kern
*)sig_p
;
1389 const void *data
, *sig
;
1390 u32 data_len
, sig_len
;
1393 if (trusted_keyring
->has_ref
) {
1395 * Do the permission check deferred in bpf_lookup_user_key().
1396 * See bpf_lookup_user_key() for more details.
1398 * A call to key_task_permission() here would be redundant, as
1399 * it is already done by keyring_search() called by
1400 * find_asymmetric_key().
1402 ret
= key_validate(trusted_keyring
->key
);
1407 data_len
= __bpf_dynptr_size(data_ptr
);
1408 data
= __bpf_dynptr_data(data_ptr
, data_len
);
1409 sig_len
= __bpf_dynptr_size(sig_ptr
);
1410 sig
= __bpf_dynptr_data(sig_ptr
, sig_len
);
1412 return verify_pkcs7_signature(data
, data_len
, sig
, sig_len
,
1413 trusted_keyring
->key
,
1414 VERIFYING_UNSPECIFIED_SIGNATURE
, NULL
,
1417 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1419 __bpf_kfunc_end_defs();
1421 BTF_KFUNCS_START(key_sig_kfunc_set
)
1422 BTF_ID_FLAGS(func
, bpf_lookup_user_key
, KF_ACQUIRE
| KF_RET_NULL
| KF_SLEEPABLE
)
1423 BTF_ID_FLAGS(func
, bpf_lookup_system_key
, KF_ACQUIRE
| KF_RET_NULL
)
1424 BTF_ID_FLAGS(func
, bpf_key_put
, KF_RELEASE
)
1425 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1426 BTF_ID_FLAGS(func
, bpf_verify_pkcs7_signature
, KF_SLEEPABLE
)
1428 BTF_KFUNCS_END(key_sig_kfunc_set
)
1430 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set
= {
1431 .owner
= THIS_MODULE
,
1432 .set
= &key_sig_kfunc_set
,
1435 static int __init
bpf_key_sig_kfuncs_init(void)
1437 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING
,
1438 &bpf_key_sig_kfunc_set
);
1441 late_initcall(bpf_key_sig_kfuncs_init
);
1442 #endif /* CONFIG_KEYS */
1444 static const struct bpf_func_proto
*
1445 bpf_tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1448 case BPF_FUNC_map_lookup_elem
:
1449 return &bpf_map_lookup_elem_proto
;
1450 case BPF_FUNC_map_update_elem
:
1451 return &bpf_map_update_elem_proto
;
1452 case BPF_FUNC_map_delete_elem
:
1453 return &bpf_map_delete_elem_proto
;
1454 case BPF_FUNC_map_push_elem
:
1455 return &bpf_map_push_elem_proto
;
1456 case BPF_FUNC_map_pop_elem
:
1457 return &bpf_map_pop_elem_proto
;
1458 case BPF_FUNC_map_peek_elem
:
1459 return &bpf_map_peek_elem_proto
;
1460 case BPF_FUNC_map_lookup_percpu_elem
:
1461 return &bpf_map_lookup_percpu_elem_proto
;
1462 case BPF_FUNC_ktime_get_ns
:
1463 return &bpf_ktime_get_ns_proto
;
1464 case BPF_FUNC_ktime_get_boot_ns
:
1465 return &bpf_ktime_get_boot_ns_proto
;
1466 case BPF_FUNC_tail_call
:
1467 return &bpf_tail_call_proto
;
1468 case BPF_FUNC_get_current_task
:
1469 return &bpf_get_current_task_proto
;
1470 case BPF_FUNC_get_current_task_btf
:
1471 return &bpf_get_current_task_btf_proto
;
1472 case BPF_FUNC_task_pt_regs
:
1473 return &bpf_task_pt_regs_proto
;
1474 case BPF_FUNC_get_current_uid_gid
:
1475 return &bpf_get_current_uid_gid_proto
;
1476 case BPF_FUNC_get_current_comm
:
1477 return &bpf_get_current_comm_proto
;
1478 case BPF_FUNC_trace_printk
:
1479 return bpf_get_trace_printk_proto();
1480 case BPF_FUNC_get_smp_processor_id
:
1481 return &bpf_get_smp_processor_id_proto
;
1482 case BPF_FUNC_get_numa_node_id
:
1483 return &bpf_get_numa_node_id_proto
;
1484 case BPF_FUNC_perf_event_read
:
1485 return &bpf_perf_event_read_proto
;
1486 case BPF_FUNC_get_prandom_u32
:
1487 return &bpf_get_prandom_u32_proto
;
1488 case BPF_FUNC_probe_write_user
:
1489 return security_locked_down(LOCKDOWN_BPF_WRITE_USER
) < 0 ?
1490 NULL
: bpf_get_probe_write_proto();
1491 case BPF_FUNC_probe_read_user
:
1492 return &bpf_probe_read_user_proto
;
1493 case BPF_FUNC_probe_read_kernel
:
1494 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL
) < 0 ?
1495 NULL
: &bpf_probe_read_kernel_proto
;
1496 case BPF_FUNC_probe_read_user_str
:
1497 return &bpf_probe_read_user_str_proto
;
1498 case BPF_FUNC_probe_read_kernel_str
:
1499 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL
) < 0 ?
1500 NULL
: &bpf_probe_read_kernel_str_proto
;
1501 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1502 case BPF_FUNC_probe_read
:
1503 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL
) < 0 ?
1504 NULL
: &bpf_probe_read_compat_proto
;
1505 case BPF_FUNC_probe_read_str
:
1506 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL
) < 0 ?
1507 NULL
: &bpf_probe_read_compat_str_proto
;
1509 #ifdef CONFIG_CGROUPS
1510 case BPF_FUNC_cgrp_storage_get
:
1511 return &bpf_cgrp_storage_get_proto
;
1512 case BPF_FUNC_cgrp_storage_delete
:
1513 return &bpf_cgrp_storage_delete_proto
;
1514 case BPF_FUNC_current_task_under_cgroup
:
1515 return &bpf_current_task_under_cgroup_proto
;
1517 case BPF_FUNC_send_signal
:
1518 return &bpf_send_signal_proto
;
1519 case BPF_FUNC_send_signal_thread
:
1520 return &bpf_send_signal_thread_proto
;
1521 case BPF_FUNC_perf_event_read_value
:
1522 return &bpf_perf_event_read_value_proto
;
1523 case BPF_FUNC_ringbuf_output
:
1524 return &bpf_ringbuf_output_proto
;
1525 case BPF_FUNC_ringbuf_reserve
:
1526 return &bpf_ringbuf_reserve_proto
;
1527 case BPF_FUNC_ringbuf_submit
:
1528 return &bpf_ringbuf_submit_proto
;
1529 case BPF_FUNC_ringbuf_discard
:
1530 return &bpf_ringbuf_discard_proto
;
1531 case BPF_FUNC_ringbuf_query
:
1532 return &bpf_ringbuf_query_proto
;
1533 case BPF_FUNC_jiffies64
:
1534 return &bpf_jiffies64_proto
;
1535 case BPF_FUNC_get_task_stack
:
1536 return prog
->sleepable
? &bpf_get_task_stack_sleepable_proto
1537 : &bpf_get_task_stack_proto
;
1538 case BPF_FUNC_copy_from_user
:
1539 return &bpf_copy_from_user_proto
;
1540 case BPF_FUNC_copy_from_user_task
:
1541 return &bpf_copy_from_user_task_proto
;
1542 case BPF_FUNC_snprintf_btf
:
1543 return &bpf_snprintf_btf_proto
;
1544 case BPF_FUNC_per_cpu_ptr
:
1545 return &bpf_per_cpu_ptr_proto
;
1546 case BPF_FUNC_this_cpu_ptr
:
1547 return &bpf_this_cpu_ptr_proto
;
1548 case BPF_FUNC_task_storage_get
:
1549 if (bpf_prog_check_recur(prog
))
1550 return &bpf_task_storage_get_recur_proto
;
1551 return &bpf_task_storage_get_proto
;
1552 case BPF_FUNC_task_storage_delete
:
1553 if (bpf_prog_check_recur(prog
))
1554 return &bpf_task_storage_delete_recur_proto
;
1555 return &bpf_task_storage_delete_proto
;
1556 case BPF_FUNC_for_each_map_elem
:
1557 return &bpf_for_each_map_elem_proto
;
1558 case BPF_FUNC_snprintf
:
1559 return &bpf_snprintf_proto
;
1560 case BPF_FUNC_get_func_ip
:
1561 return &bpf_get_func_ip_proto_tracing
;
1562 case BPF_FUNC_get_branch_snapshot
:
1563 return &bpf_get_branch_snapshot_proto
;
1564 case BPF_FUNC_find_vma
:
1565 return &bpf_find_vma_proto
;
1566 case BPF_FUNC_trace_vprintk
:
1567 return bpf_get_trace_vprintk_proto();
1569 return bpf_base_func_proto(func_id
, prog
);
1573 static bool is_kprobe_multi(const struct bpf_prog
*prog
)
1575 return prog
->expected_attach_type
== BPF_TRACE_KPROBE_MULTI
||
1576 prog
->expected_attach_type
== BPF_TRACE_KPROBE_SESSION
;
1579 static inline bool is_kprobe_session(const struct bpf_prog
*prog
)
1581 return prog
->expected_attach_type
== BPF_TRACE_KPROBE_SESSION
;
1584 static inline bool is_uprobe_multi(const struct bpf_prog
*prog
)
1586 return prog
->expected_attach_type
== BPF_TRACE_UPROBE_MULTI
||
1587 prog
->expected_attach_type
== BPF_TRACE_UPROBE_SESSION
;
1590 static inline bool is_uprobe_session(const struct bpf_prog
*prog
)
1592 return prog
->expected_attach_type
== BPF_TRACE_UPROBE_SESSION
;
1595 static const struct bpf_func_proto
*
1596 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1599 case BPF_FUNC_perf_event_output
:
1600 return &bpf_perf_event_output_proto
;
1601 case BPF_FUNC_get_stackid
:
1602 return &bpf_get_stackid_proto
;
1603 case BPF_FUNC_get_stack
:
1604 return prog
->sleepable
? &bpf_get_stack_sleepable_proto
: &bpf_get_stack_proto
;
1605 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1606 case BPF_FUNC_override_return
:
1607 return &bpf_override_return_proto
;
1609 case BPF_FUNC_get_func_ip
:
1610 if (is_kprobe_multi(prog
))
1611 return &bpf_get_func_ip_proto_kprobe_multi
;
1612 if (is_uprobe_multi(prog
))
1613 return &bpf_get_func_ip_proto_uprobe_multi
;
1614 return &bpf_get_func_ip_proto_kprobe
;
1615 case BPF_FUNC_get_attach_cookie
:
1616 if (is_kprobe_multi(prog
))
1617 return &bpf_get_attach_cookie_proto_kmulti
;
1618 if (is_uprobe_multi(prog
))
1619 return &bpf_get_attach_cookie_proto_umulti
;
1620 return &bpf_get_attach_cookie_proto_trace
;
1622 return bpf_tracing_func_proto(func_id
, prog
);
1626 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1627 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1628 const struct bpf_prog
*prog
,
1629 struct bpf_insn_access_aux
*info
)
1631 if (off
< 0 || off
>= sizeof(struct pt_regs
))
1633 if (type
!= BPF_READ
)
1635 if (off
% size
!= 0)
1638 * Assertion for 32 bit to make sure last 8 byte access
1639 * (BPF_DW) to the last 4 byte member is disallowed.
1641 if (off
+ size
> sizeof(struct pt_regs
))
1647 const struct bpf_verifier_ops kprobe_verifier_ops
= {
1648 .get_func_proto
= kprobe_prog_func_proto
,
1649 .is_valid_access
= kprobe_prog_is_valid_access
,
1652 const struct bpf_prog_ops kprobe_prog_ops
= {
1655 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
1656 u64
, flags
, void *, data
, u64
, size
)
1658 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
1661 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1662 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1663 * from there and call the same bpf_perf_event_output() helper inline.
1665 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
1668 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
1669 .func
= bpf_perf_event_output_tp
,
1671 .ret_type
= RET_INTEGER
,
1672 .arg1_type
= ARG_PTR_TO_CTX
,
1673 .arg2_type
= ARG_CONST_MAP_PTR
,
1674 .arg3_type
= ARG_ANYTHING
,
1675 .arg4_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
1676 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
1679 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
1682 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
1685 * Same comment as in bpf_perf_event_output_tp(), only that this time
1686 * the other helper's function body cannot be inlined due to being
1687 * external, thus we need to call raw helper function.
1689 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
1693 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
1694 .func
= bpf_get_stackid_tp
,
1696 .ret_type
= RET_INTEGER
,
1697 .arg1_type
= ARG_PTR_TO_CTX
,
1698 .arg2_type
= ARG_CONST_MAP_PTR
,
1699 .arg3_type
= ARG_ANYTHING
,
1702 BPF_CALL_4(bpf_get_stack_tp
, void *, tp_buff
, void *, buf
, u32
, size
,
1705 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
1707 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
1708 (unsigned long) size
, flags
, 0);
1711 static const struct bpf_func_proto bpf_get_stack_proto_tp
= {
1712 .func
= bpf_get_stack_tp
,
1714 .ret_type
= RET_INTEGER
,
1715 .arg1_type
= ARG_PTR_TO_CTX
,
1716 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1717 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1718 .arg4_type
= ARG_ANYTHING
,
1721 static const struct bpf_func_proto
*
1722 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1725 case BPF_FUNC_perf_event_output
:
1726 return &bpf_perf_event_output_proto_tp
;
1727 case BPF_FUNC_get_stackid
:
1728 return &bpf_get_stackid_proto_tp
;
1729 case BPF_FUNC_get_stack
:
1730 return &bpf_get_stack_proto_tp
;
1731 case BPF_FUNC_get_attach_cookie
:
1732 return &bpf_get_attach_cookie_proto_trace
;
1734 return bpf_tracing_func_proto(func_id
, prog
);
1738 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1739 const struct bpf_prog
*prog
,
1740 struct bpf_insn_access_aux
*info
)
1742 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
1744 if (type
!= BPF_READ
)
1746 if (off
% size
!= 0)
1749 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
1753 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
1754 .get_func_proto
= tp_prog_func_proto
,
1755 .is_valid_access
= tp_prog_is_valid_access
,
1758 const struct bpf_prog_ops tracepoint_prog_ops
= {
1761 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
1762 struct bpf_perf_event_value
*, buf
, u32
, size
)
1766 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
1768 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
1774 memset(buf
, 0, size
);
1778 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
1779 .func
= bpf_perf_prog_read_value
,
1781 .ret_type
= RET_INTEGER
,
1782 .arg1_type
= ARG_PTR_TO_CTX
,
1783 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1784 .arg3_type
= ARG_CONST_SIZE
,
1787 BPF_CALL_4(bpf_read_branch_records
, struct bpf_perf_event_data_kern
*, ctx
,
1788 void *, buf
, u32
, size
, u64
, flags
)
1790 static const u32 br_entry_size
= sizeof(struct perf_branch_entry
);
1791 struct perf_branch_stack
*br_stack
= ctx
->data
->br_stack
;
1794 if (unlikely(flags
& ~BPF_F_GET_BRANCH_RECORDS_SIZE
))
1797 if (unlikely(!(ctx
->data
->sample_flags
& PERF_SAMPLE_BRANCH_STACK
)))
1800 if (unlikely(!br_stack
))
1803 if (flags
& BPF_F_GET_BRANCH_RECORDS_SIZE
)
1804 return br_stack
->nr
* br_entry_size
;
1806 if (!buf
|| (size
% br_entry_size
!= 0))
1809 to_copy
= min_t(u32
, br_stack
->nr
* br_entry_size
, size
);
1810 memcpy(buf
, br_stack
->entries
, to_copy
);
1815 static const struct bpf_func_proto bpf_read_branch_records_proto
= {
1816 .func
= bpf_read_branch_records
,
1818 .ret_type
= RET_INTEGER
,
1819 .arg1_type
= ARG_PTR_TO_CTX
,
1820 .arg2_type
= ARG_PTR_TO_MEM_OR_NULL
,
1821 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1822 .arg4_type
= ARG_ANYTHING
,
1825 static const struct bpf_func_proto
*
1826 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1829 case BPF_FUNC_perf_event_output
:
1830 return &bpf_perf_event_output_proto_tp
;
1831 case BPF_FUNC_get_stackid
:
1832 return &bpf_get_stackid_proto_pe
;
1833 case BPF_FUNC_get_stack
:
1834 return &bpf_get_stack_proto_pe
;
1835 case BPF_FUNC_perf_prog_read_value
:
1836 return &bpf_perf_prog_read_value_proto
;
1837 case BPF_FUNC_read_branch_records
:
1838 return &bpf_read_branch_records_proto
;
1839 case BPF_FUNC_get_attach_cookie
:
1840 return &bpf_get_attach_cookie_proto_pe
;
1842 return bpf_tracing_func_proto(func_id
, prog
);
1847 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1848 * to avoid potential recursive reuse issue when/if tracepoints are added
1849 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1851 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1852 * in normal, irq, and nmi context.
1854 struct bpf_raw_tp_regs
{
1855 struct pt_regs regs
[3];
1857 static DEFINE_PER_CPU(struct bpf_raw_tp_regs
, bpf_raw_tp_regs
);
1858 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level
);
1859 static struct pt_regs
*get_bpf_raw_tp_regs(void)
1861 struct bpf_raw_tp_regs
*tp_regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
1862 int nest_level
= this_cpu_inc_return(bpf_raw_tp_nest_level
);
1864 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(tp_regs
->regs
))) {
1865 this_cpu_dec(bpf_raw_tp_nest_level
);
1866 return ERR_PTR(-EBUSY
);
1869 return &tp_regs
->regs
[nest_level
- 1];
1872 static void put_bpf_raw_tp_regs(void)
1874 this_cpu_dec(bpf_raw_tp_nest_level
);
1877 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1878 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
1880 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1884 return PTR_ERR(regs
);
1886 perf_fetch_caller_regs(regs
);
1887 ret
= ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
1889 put_bpf_raw_tp_regs();
1893 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
1894 .func
= bpf_perf_event_output_raw_tp
,
1896 .ret_type
= RET_INTEGER
,
1897 .arg1_type
= ARG_PTR_TO_CTX
,
1898 .arg2_type
= ARG_CONST_MAP_PTR
,
1899 .arg3_type
= ARG_ANYTHING
,
1900 .arg4_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
1901 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
1904 extern const struct bpf_func_proto bpf_skb_output_proto
;
1905 extern const struct bpf_func_proto bpf_xdp_output_proto
;
1906 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto
;
1908 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1909 struct bpf_map
*, map
, u64
, flags
)
1911 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1915 return PTR_ERR(regs
);
1917 perf_fetch_caller_regs(regs
);
1918 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1919 ret
= bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
1921 put_bpf_raw_tp_regs();
1925 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
1926 .func
= bpf_get_stackid_raw_tp
,
1928 .ret_type
= RET_INTEGER
,
1929 .arg1_type
= ARG_PTR_TO_CTX
,
1930 .arg2_type
= ARG_CONST_MAP_PTR
,
1931 .arg3_type
= ARG_ANYTHING
,
1934 BPF_CALL_4(bpf_get_stack_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1935 void *, buf
, u32
, size
, u64
, flags
)
1937 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1941 return PTR_ERR(regs
);
1943 perf_fetch_caller_regs(regs
);
1944 ret
= bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
1945 (unsigned long) size
, flags
, 0);
1946 put_bpf_raw_tp_regs();
1950 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp
= {
1951 .func
= bpf_get_stack_raw_tp
,
1953 .ret_type
= RET_INTEGER
,
1954 .arg1_type
= ARG_PTR_TO_CTX
,
1955 .arg2_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
1956 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1957 .arg4_type
= ARG_ANYTHING
,
1960 static const struct bpf_func_proto
*
1961 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1964 case BPF_FUNC_perf_event_output
:
1965 return &bpf_perf_event_output_proto_raw_tp
;
1966 case BPF_FUNC_get_stackid
:
1967 return &bpf_get_stackid_proto_raw_tp
;
1968 case BPF_FUNC_get_stack
:
1969 return &bpf_get_stack_proto_raw_tp
;
1970 case BPF_FUNC_get_attach_cookie
:
1971 return &bpf_get_attach_cookie_proto_tracing
;
1973 return bpf_tracing_func_proto(func_id
, prog
);
1977 const struct bpf_func_proto
*
1978 tracing_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1980 const struct bpf_func_proto
*fn
;
1984 case BPF_FUNC_skb_output
:
1985 return &bpf_skb_output_proto
;
1986 case BPF_FUNC_xdp_output
:
1987 return &bpf_xdp_output_proto
;
1988 case BPF_FUNC_skc_to_tcp6_sock
:
1989 return &bpf_skc_to_tcp6_sock_proto
;
1990 case BPF_FUNC_skc_to_tcp_sock
:
1991 return &bpf_skc_to_tcp_sock_proto
;
1992 case BPF_FUNC_skc_to_tcp_timewait_sock
:
1993 return &bpf_skc_to_tcp_timewait_sock_proto
;
1994 case BPF_FUNC_skc_to_tcp_request_sock
:
1995 return &bpf_skc_to_tcp_request_sock_proto
;
1996 case BPF_FUNC_skc_to_udp6_sock
:
1997 return &bpf_skc_to_udp6_sock_proto
;
1998 case BPF_FUNC_skc_to_unix_sock
:
1999 return &bpf_skc_to_unix_sock_proto
;
2000 case BPF_FUNC_skc_to_mptcp_sock
:
2001 return &bpf_skc_to_mptcp_sock_proto
;
2002 case BPF_FUNC_sk_storage_get
:
2003 return &bpf_sk_storage_get_tracing_proto
;
2004 case BPF_FUNC_sk_storage_delete
:
2005 return &bpf_sk_storage_delete_tracing_proto
;
2006 case BPF_FUNC_sock_from_file
:
2007 return &bpf_sock_from_file_proto
;
2008 case BPF_FUNC_get_socket_cookie
:
2009 return &bpf_get_socket_ptr_cookie_proto
;
2010 case BPF_FUNC_xdp_get_buff_len
:
2011 return &bpf_xdp_get_buff_len_trace_proto
;
2013 case BPF_FUNC_seq_printf
:
2014 return prog
->expected_attach_type
== BPF_TRACE_ITER
?
2015 &bpf_seq_printf_proto
:
2017 case BPF_FUNC_seq_write
:
2018 return prog
->expected_attach_type
== BPF_TRACE_ITER
?
2019 &bpf_seq_write_proto
:
2021 case BPF_FUNC_seq_printf_btf
:
2022 return prog
->expected_attach_type
== BPF_TRACE_ITER
?
2023 &bpf_seq_printf_btf_proto
:
2025 case BPF_FUNC_d_path
:
2026 return &bpf_d_path_proto
;
2027 case BPF_FUNC_get_func_arg
:
2028 return bpf_prog_has_trampoline(prog
) ? &bpf_get_func_arg_proto
: NULL
;
2029 case BPF_FUNC_get_func_ret
:
2030 return bpf_prog_has_trampoline(prog
) ? &bpf_get_func_ret_proto
: NULL
;
2031 case BPF_FUNC_get_func_arg_cnt
:
2032 return bpf_prog_has_trampoline(prog
) ? &bpf_get_func_arg_cnt_proto
: NULL
;
2033 case BPF_FUNC_get_attach_cookie
:
2034 if (prog
->type
== BPF_PROG_TYPE_TRACING
&&
2035 prog
->expected_attach_type
== BPF_TRACE_RAW_TP
)
2036 return &bpf_get_attach_cookie_proto_tracing
;
2037 return bpf_prog_has_trampoline(prog
) ? &bpf_get_attach_cookie_proto_tracing
: NULL
;
2039 fn
= raw_tp_prog_func_proto(func_id
, prog
);
2040 if (!fn
&& prog
->expected_attach_type
== BPF_TRACE_ITER
)
2041 fn
= bpf_iter_get_func_proto(func_id
, prog
);
2046 static bool raw_tp_prog_is_valid_access(int off
, int size
,
2047 enum bpf_access_type type
,
2048 const struct bpf_prog
*prog
,
2049 struct bpf_insn_access_aux
*info
)
2051 return bpf_tracing_ctx_access(off
, size
, type
);
2054 static bool tracing_prog_is_valid_access(int off
, int size
,
2055 enum bpf_access_type type
,
2056 const struct bpf_prog
*prog
,
2057 struct bpf_insn_access_aux
*info
)
2059 return bpf_tracing_btf_ctx_access(off
, size
, type
, prog
, info
);
2062 int __weak
bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
2063 const union bpf_attr
*kattr
,
2064 union bpf_attr __user
*uattr
)
2069 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
2070 .get_func_proto
= raw_tp_prog_func_proto
,
2071 .is_valid_access
= raw_tp_prog_is_valid_access
,
2074 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
2076 .test_run
= bpf_prog_test_run_raw_tp
,
2080 const struct bpf_verifier_ops tracing_verifier_ops
= {
2081 .get_func_proto
= tracing_prog_func_proto
,
2082 .is_valid_access
= tracing_prog_is_valid_access
,
2085 const struct bpf_prog_ops tracing_prog_ops
= {
2086 .test_run
= bpf_prog_test_run_tracing
,
2089 static bool raw_tp_writable_prog_is_valid_access(int off
, int size
,
2090 enum bpf_access_type type
,
2091 const struct bpf_prog
*prog
,
2092 struct bpf_insn_access_aux
*info
)
2095 if (size
!= sizeof(u64
) || type
!= BPF_READ
)
2097 info
->reg_type
= PTR_TO_TP_BUFFER
;
2099 return raw_tp_prog_is_valid_access(off
, size
, type
, prog
, info
);
2102 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops
= {
2103 .get_func_proto
= raw_tp_prog_func_proto
,
2104 .is_valid_access
= raw_tp_writable_prog_is_valid_access
,
2107 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops
= {
2110 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
2111 const struct bpf_prog
*prog
,
2112 struct bpf_insn_access_aux
*info
)
2114 const int size_u64
= sizeof(u64
);
2116 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
2118 if (type
!= BPF_READ
)
2120 if (off
% size
!= 0) {
2121 if (sizeof(unsigned long) != 4)
2125 if (off
% size
!= 4)
2130 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
2131 bpf_ctx_record_field_size(info
, size_u64
);
2132 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
2135 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
2136 bpf_ctx_record_field_size(info
, size_u64
);
2137 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
2141 if (size
!= sizeof(long))
2148 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
2149 const struct bpf_insn
*si
,
2150 struct bpf_insn
*insn_buf
,
2151 struct bpf_prog
*prog
, u32
*target_size
)
2153 struct bpf_insn
*insn
= insn_buf
;
2156 case offsetof(struct bpf_perf_event_data
, sample_period
):
2157 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
2158 data
), si
->dst_reg
, si
->src_reg
,
2159 offsetof(struct bpf_perf_event_data_kern
, data
));
2160 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
2161 bpf_target_off(struct perf_sample_data
, period
, 8,
2164 case offsetof(struct bpf_perf_event_data
, addr
):
2165 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
2166 data
), si
->dst_reg
, si
->src_reg
,
2167 offsetof(struct bpf_perf_event_data_kern
, data
));
2168 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
2169 bpf_target_off(struct perf_sample_data
, addr
, 8,
2173 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
2174 regs
), si
->dst_reg
, si
->src_reg
,
2175 offsetof(struct bpf_perf_event_data_kern
, regs
));
2176 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
2181 return insn
- insn_buf
;
2184 const struct bpf_verifier_ops perf_event_verifier_ops
= {
2185 .get_func_proto
= pe_prog_func_proto
,
2186 .is_valid_access
= pe_prog_is_valid_access
,
2187 .convert_ctx_access
= pe_prog_convert_ctx_access
,
2190 const struct bpf_prog_ops perf_event_prog_ops
= {
2193 static DEFINE_MUTEX(bpf_event_mutex
);
2195 #define BPF_TRACE_MAX_PROGS 64
2197 int perf_event_attach_bpf_prog(struct perf_event
*event
,
2198 struct bpf_prog
*prog
,
2201 struct bpf_prog_array
*old_array
;
2202 struct bpf_prog_array
*new_array
;
2206 * Kprobe override only works if they are on the function entry,
2207 * and only if they are on the opt-in list.
2209 if (prog
->kprobe_override
&&
2210 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
2211 !trace_kprobe_error_injectable(event
->tp_event
)))
2214 mutex_lock(&bpf_event_mutex
);
2219 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
2221 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
2226 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, bpf_cookie
, &new_array
);
2230 /* set the new array to event->tp_event and set event->prog */
2232 event
->bpf_cookie
= bpf_cookie
;
2233 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
2234 bpf_prog_array_free_sleepable(old_array
);
2237 mutex_unlock(&bpf_event_mutex
);
2241 void perf_event_detach_bpf_prog(struct perf_event
*event
)
2243 struct bpf_prog_array
*old_array
;
2244 struct bpf_prog_array
*new_array
;
2247 mutex_lock(&bpf_event_mutex
);
2252 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
2253 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, 0, &new_array
);
2255 bpf_prog_array_delete_safe(old_array
, event
->prog
);
2257 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
2258 bpf_prog_array_free_sleepable(old_array
);
2261 bpf_prog_put(event
->prog
);
2265 mutex_unlock(&bpf_event_mutex
);
2268 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
2270 struct perf_event_query_bpf __user
*uquery
= info
;
2271 struct perf_event_query_bpf query
= {};
2272 struct bpf_prog_array
*progs
;
2273 u32
*ids
, prog_cnt
, ids_len
;
2276 if (!perfmon_capable())
2278 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
2280 if (copy_from_user(&query
, uquery
, sizeof(query
)))
2283 ids_len
= query
.ids_len
;
2284 if (ids_len
> BPF_TRACE_MAX_PROGS
)
2286 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
2290 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2291 * is required when user only wants to check for uquery->prog_cnt.
2292 * There is no need to check for it since the case is handled
2293 * gracefully in bpf_prog_array_copy_info.
2296 mutex_lock(&bpf_event_mutex
);
2297 progs
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
2298 ret
= bpf_prog_array_copy_info(progs
, ids
, ids_len
, &prog_cnt
);
2299 mutex_unlock(&bpf_event_mutex
);
2301 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
2302 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
2309 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
2310 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
2312 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
2314 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
2316 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
2317 if (!strcmp(btp
->tp
->name
, name
))
2321 return bpf_get_raw_tracepoint_module(name
);
2324 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
2329 mod
= __module_address((unsigned long)btp
);
2334 static __always_inline
2335 void __bpf_trace_run(struct bpf_raw_tp_link
*link
, u64
*args
)
2337 struct bpf_prog
*prog
= link
->link
.prog
;
2338 struct bpf_run_ctx
*old_run_ctx
;
2339 struct bpf_trace_run_ctx run_ctx
;
2342 if (unlikely(this_cpu_inc_return(*(prog
->active
)) != 1)) {
2343 bpf_prog_inc_misses_counter(prog
);
2347 run_ctx
.bpf_cookie
= link
->cookie
;
2348 old_run_ctx
= bpf_set_run_ctx(&run_ctx
.run_ctx
);
2351 (void) bpf_prog_run(prog
, args
);
2354 bpf_reset_run_ctx(old_run_ctx
);
2356 this_cpu_dec(*(prog
->active
));
2359 #define UNPACK(...) __VA_ARGS__
2360 #define REPEAT_1(FN, DL, X, ...) FN(X)
2361 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2362 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2363 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2364 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2365 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2366 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2367 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2368 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2369 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2370 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2371 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2372 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2374 #define SARG(X) u64 arg##X
2375 #define COPY(X) args[X] = arg##X
2377 #define __DL_COM (,)
2378 #define __DL_SEM (;)
2380 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2382 #define BPF_TRACE_DEFN_x(x) \
2383 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
2384 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2387 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2388 __bpf_trace_run(link, args); \
2390 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2391 BPF_TRACE_DEFN_x(1);
2392 BPF_TRACE_DEFN_x(2);
2393 BPF_TRACE_DEFN_x(3);
2394 BPF_TRACE_DEFN_x(4);
2395 BPF_TRACE_DEFN_x(5);
2396 BPF_TRACE_DEFN_x(6);
2397 BPF_TRACE_DEFN_x(7);
2398 BPF_TRACE_DEFN_x(8);
2399 BPF_TRACE_DEFN_x(9);
2400 BPF_TRACE_DEFN_x(10);
2401 BPF_TRACE_DEFN_x(11);
2402 BPF_TRACE_DEFN_x(12);
2404 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
)
2406 struct tracepoint
*tp
= btp
->tp
;
2407 struct bpf_prog
*prog
= link
->link
.prog
;
2410 * check that program doesn't access arguments beyond what's
2411 * available in this tracepoint
2413 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
2416 if (prog
->aux
->max_tp_access
> btp
->writable_size
)
2419 return tracepoint_probe_register_may_exist(tp
, (void *)btp
->bpf_func
, link
);
2422 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_raw_tp_link
*link
)
2424 return tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, link
);
2427 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
2428 u32
*fd_type
, const char **buf
,
2429 u64
*probe_offset
, u64
*probe_addr
,
2430 unsigned long *missed
)
2432 bool is_tracepoint
, is_syscall_tp
;
2433 struct bpf_prog
*prog
;
2440 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2441 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
)
2444 *prog_id
= prog
->aux
->id
;
2445 flags
= event
->tp_event
->flags
;
2446 is_tracepoint
= flags
& TRACE_EVENT_FL_TRACEPOINT
;
2447 is_syscall_tp
= is_syscall_trace_event(event
->tp_event
);
2449 if (is_tracepoint
|| is_syscall_tp
) {
2450 *buf
= is_tracepoint
? event
->tp_event
->tp
->name
2451 : event
->tp_event
->name
;
2452 /* We allow NULL pointer for tracepoint */
2454 *fd_type
= BPF_FD_TYPE_TRACEPOINT
;
2456 *probe_offset
= 0x0;
2462 #ifdef CONFIG_KPROBE_EVENTS
2463 if (flags
& TRACE_EVENT_FL_KPROBE
)
2464 err
= bpf_get_kprobe_info(event
, fd_type
, buf
,
2465 probe_offset
, probe_addr
, missed
,
2466 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
2468 #ifdef CONFIG_UPROBE_EVENTS
2469 if (flags
& TRACE_EVENT_FL_UPROBE
)
2470 err
= bpf_get_uprobe_info(event
, fd_type
, buf
,
2471 probe_offset
, probe_addr
,
2472 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
2479 static int __init
send_signal_irq_work_init(void)
2482 struct send_signal_irq_work
*work
;
2484 for_each_possible_cpu(cpu
) {
2485 work
= per_cpu_ptr(&send_signal_work
, cpu
);
2486 init_irq_work(&work
->irq_work
, do_bpf_send_signal
);
2491 subsys_initcall(send_signal_irq_work_init
);
2493 #ifdef CONFIG_MODULES
2494 static int bpf_event_notify(struct notifier_block
*nb
, unsigned long op
,
2497 struct bpf_trace_module
*btm
, *tmp
;
2498 struct module
*mod
= module
;
2501 if (mod
->num_bpf_raw_events
== 0 ||
2502 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_GOING
))
2505 mutex_lock(&bpf_module_mutex
);
2508 case MODULE_STATE_COMING
:
2509 btm
= kzalloc(sizeof(*btm
), GFP_KERNEL
);
2511 btm
->module
= module
;
2512 list_add(&btm
->list
, &bpf_trace_modules
);
2517 case MODULE_STATE_GOING
:
2518 list_for_each_entry_safe(btm
, tmp
, &bpf_trace_modules
, list
) {
2519 if (btm
->module
== module
) {
2520 list_del(&btm
->list
);
2528 mutex_unlock(&bpf_module_mutex
);
2531 return notifier_from_errno(ret
);
2534 static struct notifier_block bpf_module_nb
= {
2535 .notifier_call
= bpf_event_notify
,
2538 static int __init
bpf_event_init(void)
2540 register_module_notifier(&bpf_module_nb
);
2544 fs_initcall(bpf_event_init
);
2545 #endif /* CONFIG_MODULES */
2547 struct bpf_session_run_ctx
{
2548 struct bpf_run_ctx run_ctx
;
2553 #ifdef CONFIG_FPROBE
2554 struct bpf_kprobe_multi_link
{
2555 struct bpf_link link
;
2557 unsigned long *addrs
;
2561 struct module
**mods
;
2565 struct bpf_kprobe_multi_run_ctx
{
2566 struct bpf_session_run_ctx session_ctx
;
2567 struct bpf_kprobe_multi_link
*link
;
2568 unsigned long entry_ip
;
2576 static int copy_user_syms(struct user_syms
*us
, unsigned long __user
*usyms
, u32 cnt
)
2578 unsigned long __user usymbol
;
2579 const char **syms
= NULL
;
2580 char *buf
= NULL
, *p
;
2584 syms
= kvmalloc_array(cnt
, sizeof(*syms
), GFP_KERNEL
);
2588 buf
= kvmalloc_array(cnt
, KSYM_NAME_LEN
, GFP_KERNEL
);
2592 for (p
= buf
, i
= 0; i
< cnt
; i
++) {
2593 if (__get_user(usymbol
, usyms
+ i
)) {
2597 err
= strncpy_from_user(p
, (const char __user
*) usymbol
, KSYM_NAME_LEN
);
2598 if (err
== KSYM_NAME_LEN
)
2618 static void kprobe_multi_put_modules(struct module
**mods
, u32 cnt
)
2622 for (i
= 0; i
< cnt
; i
++)
2623 module_put(mods
[i
]);
2626 static void free_user_syms(struct user_syms
*us
)
2632 static void bpf_kprobe_multi_link_release(struct bpf_link
*link
)
2634 struct bpf_kprobe_multi_link
*kmulti_link
;
2636 kmulti_link
= container_of(link
, struct bpf_kprobe_multi_link
, link
);
2637 unregister_fprobe(&kmulti_link
->fp
);
2638 kprobe_multi_put_modules(kmulti_link
->mods
, kmulti_link
->mods_cnt
);
2641 static void bpf_kprobe_multi_link_dealloc(struct bpf_link
*link
)
2643 struct bpf_kprobe_multi_link
*kmulti_link
;
2645 kmulti_link
= container_of(link
, struct bpf_kprobe_multi_link
, link
);
2646 kvfree(kmulti_link
->addrs
);
2647 kvfree(kmulti_link
->cookies
);
2648 kfree(kmulti_link
->mods
);
2652 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link
*link
,
2653 struct bpf_link_info
*info
)
2655 u64 __user
*ucookies
= u64_to_user_ptr(info
->kprobe_multi
.cookies
);
2656 u64 __user
*uaddrs
= u64_to_user_ptr(info
->kprobe_multi
.addrs
);
2657 struct bpf_kprobe_multi_link
*kmulti_link
;
2658 u32 ucount
= info
->kprobe_multi
.count
;
2661 if (!uaddrs
^ !ucount
)
2663 if (ucookies
&& !ucount
)
2666 kmulti_link
= container_of(link
, struct bpf_kprobe_multi_link
, link
);
2667 info
->kprobe_multi
.count
= kmulti_link
->cnt
;
2668 info
->kprobe_multi
.flags
= kmulti_link
->flags
;
2669 info
->kprobe_multi
.missed
= kmulti_link
->fp
.nmissed
;
2673 if (ucount
< kmulti_link
->cnt
)
2676 ucount
= kmulti_link
->cnt
;
2679 if (kmulti_link
->cookies
) {
2680 if (copy_to_user(ucookies
, kmulti_link
->cookies
, ucount
* sizeof(u64
)))
2683 for (i
= 0; i
< ucount
; i
++) {
2684 if (put_user(0, ucookies
+ i
))
2690 if (kallsyms_show_value(current_cred())) {
2691 if (copy_to_user(uaddrs
, kmulti_link
->addrs
, ucount
* sizeof(u64
)))
2694 for (i
= 0; i
< ucount
; i
++) {
2695 if (put_user(0, uaddrs
+ i
))
2702 static const struct bpf_link_ops bpf_kprobe_multi_link_lops
= {
2703 .release
= bpf_kprobe_multi_link_release
,
2704 .dealloc_deferred
= bpf_kprobe_multi_link_dealloc
,
2705 .fill_link_info
= bpf_kprobe_multi_link_fill_link_info
,
2708 static void bpf_kprobe_multi_cookie_swap(void *a
, void *b
, int size
, const void *priv
)
2710 const struct bpf_kprobe_multi_link
*link
= priv
;
2711 unsigned long *addr_a
= a
, *addr_b
= b
;
2712 u64
*cookie_a
, *cookie_b
;
2714 cookie_a
= link
->cookies
+ (addr_a
- link
->addrs
);
2715 cookie_b
= link
->cookies
+ (addr_b
- link
->addrs
);
2717 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2718 swap(*addr_a
, *addr_b
);
2719 swap(*cookie_a
, *cookie_b
);
2722 static int bpf_kprobe_multi_addrs_cmp(const void *a
, const void *b
)
2724 const unsigned long *addr_a
= a
, *addr_b
= b
;
2726 if (*addr_a
== *addr_b
)
2728 return *addr_a
< *addr_b
? -1 : 1;
2731 static int bpf_kprobe_multi_cookie_cmp(const void *a
, const void *b
, const void *priv
)
2733 return bpf_kprobe_multi_addrs_cmp(a
, b
);
2736 static u64
bpf_kprobe_multi_cookie(struct bpf_run_ctx
*ctx
)
2738 struct bpf_kprobe_multi_run_ctx
*run_ctx
;
2739 struct bpf_kprobe_multi_link
*link
;
2740 u64
*cookie
, entry_ip
;
2741 unsigned long *addr
;
2743 if (WARN_ON_ONCE(!ctx
))
2745 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_kprobe_multi_run_ctx
,
2746 session_ctx
.run_ctx
);
2747 link
= run_ctx
->link
;
2750 entry_ip
= run_ctx
->entry_ip
;
2751 addr
= bsearch(&entry_ip
, link
->addrs
, link
->cnt
, sizeof(entry_ip
),
2752 bpf_kprobe_multi_addrs_cmp
);
2755 cookie
= link
->cookies
+ (addr
- link
->addrs
);
2759 static u64
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
)
2761 struct bpf_kprobe_multi_run_ctx
*run_ctx
;
2763 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_kprobe_multi_run_ctx
,
2764 session_ctx
.run_ctx
);
2765 return run_ctx
->entry_ip
;
2769 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link
*link
,
2770 unsigned long entry_ip
, struct pt_regs
*regs
,
2771 bool is_return
, void *data
)
2773 struct bpf_kprobe_multi_run_ctx run_ctx
= {
2775 .is_return
= is_return
,
2779 .entry_ip
= entry_ip
,
2781 struct bpf_run_ctx
*old_run_ctx
;
2784 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
2785 bpf_prog_inc_misses_counter(link
->link
.prog
);
2792 old_run_ctx
= bpf_set_run_ctx(&run_ctx
.session_ctx
.run_ctx
);
2793 err
= bpf_prog_run(link
->link
.prog
, regs
);
2794 bpf_reset_run_ctx(old_run_ctx
);
2799 __this_cpu_dec(bpf_prog_active
);
2804 kprobe_multi_link_handler(struct fprobe
*fp
, unsigned long fentry_ip
,
2805 unsigned long ret_ip
, struct pt_regs
*regs
,
2808 struct bpf_kprobe_multi_link
*link
;
2811 link
= container_of(fp
, struct bpf_kprobe_multi_link
, fp
);
2812 err
= kprobe_multi_link_prog_run(link
, get_entry_ip(fentry_ip
), regs
, false, data
);
2813 return is_kprobe_session(link
->link
.prog
) ? err
: 0;
2817 kprobe_multi_link_exit_handler(struct fprobe
*fp
, unsigned long fentry_ip
,
2818 unsigned long ret_ip
, struct pt_regs
*regs
,
2821 struct bpf_kprobe_multi_link
*link
;
2823 link
= container_of(fp
, struct bpf_kprobe_multi_link
, fp
);
2824 kprobe_multi_link_prog_run(link
, get_entry_ip(fentry_ip
), regs
, true, data
);
2827 static int symbols_cmp_r(const void *a
, const void *b
, const void *priv
)
2829 const char **str_a
= (const char **) a
;
2830 const char **str_b
= (const char **) b
;
2832 return strcmp(*str_a
, *str_b
);
2835 struct multi_symbols_sort
{
2840 static void symbols_swap_r(void *a
, void *b
, int size
, const void *priv
)
2842 const struct multi_symbols_sort
*data
= priv
;
2843 const char **name_a
= a
, **name_b
= b
;
2845 swap(*name_a
, *name_b
);
2847 /* If defined, swap also related cookies. */
2848 if (data
->cookies
) {
2849 u64
*cookie_a
, *cookie_b
;
2851 cookie_a
= data
->cookies
+ (name_a
- data
->funcs
);
2852 cookie_b
= data
->cookies
+ (name_b
- data
->funcs
);
2853 swap(*cookie_a
, *cookie_b
);
2857 struct modules_array
{
2858 struct module
**mods
;
2863 static int add_module(struct modules_array
*arr
, struct module
*mod
)
2865 struct module
**mods
;
2867 if (arr
->mods_cnt
== arr
->mods_cap
) {
2868 arr
->mods_cap
= max(16, arr
->mods_cap
* 3 / 2);
2869 mods
= krealloc_array(arr
->mods
, arr
->mods_cap
, sizeof(*mods
), GFP_KERNEL
);
2875 arr
->mods
[arr
->mods_cnt
] = mod
;
2880 static bool has_module(struct modules_array
*arr
, struct module
*mod
)
2884 for (i
= arr
->mods_cnt
- 1; i
>= 0; i
--) {
2885 if (arr
->mods
[i
] == mod
)
2891 static int get_modules_for_addrs(struct module
***mods
, unsigned long *addrs
, u32 addrs_cnt
)
2893 struct modules_array arr
= {};
2896 for (i
= 0; i
< addrs_cnt
; i
++) {
2900 mod
= __module_address(addrs
[i
]);
2901 /* Either no module or we it's already stored */
2902 if (!mod
|| has_module(&arr
, mod
)) {
2906 if (!try_module_get(mod
))
2911 err
= add_module(&arr
, mod
);
2918 /* We return either err < 0 in case of error, ... */
2920 kprobe_multi_put_modules(arr
.mods
, arr
.mods_cnt
);
2925 /* or number of modules found if everything is ok. */
2927 return arr
.mods_cnt
;
2930 static int addrs_check_error_injection_list(unsigned long *addrs
, u32 cnt
)
2934 for (i
= 0; i
< cnt
; i
++) {
2935 if (!within_error_injection_list(addrs
[i
]))
2941 int bpf_kprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
2943 struct bpf_kprobe_multi_link
*link
= NULL
;
2944 struct bpf_link_primer link_primer
;
2945 void __user
*ucookies
;
2946 unsigned long *addrs
;
2947 u32 flags
, cnt
, size
;
2948 void __user
*uaddrs
;
2949 u64
*cookies
= NULL
;
2953 /* no support for 32bit archs yet */
2954 if (sizeof(u64
) != sizeof(void *))
2957 if (!is_kprobe_multi(prog
))
2960 flags
= attr
->link_create
.kprobe_multi
.flags
;
2961 if (flags
& ~BPF_F_KPROBE_MULTI_RETURN
)
2964 uaddrs
= u64_to_user_ptr(attr
->link_create
.kprobe_multi
.addrs
);
2965 usyms
= u64_to_user_ptr(attr
->link_create
.kprobe_multi
.syms
);
2966 if (!!uaddrs
== !!usyms
)
2969 cnt
= attr
->link_create
.kprobe_multi
.cnt
;
2972 if (cnt
> MAX_KPROBE_MULTI_CNT
)
2975 size
= cnt
* sizeof(*addrs
);
2976 addrs
= kvmalloc_array(cnt
, sizeof(*addrs
), GFP_KERNEL
);
2980 ucookies
= u64_to_user_ptr(attr
->link_create
.kprobe_multi
.cookies
);
2982 cookies
= kvmalloc_array(cnt
, sizeof(*addrs
), GFP_KERNEL
);
2987 if (copy_from_user(cookies
, ucookies
, size
)) {
2994 if (copy_from_user(addrs
, uaddrs
, size
)) {
2999 struct multi_symbols_sort data
= {
3002 struct user_syms us
;
3004 err
= copy_user_syms(&us
, usyms
, cnt
);
3009 data
.funcs
= us
.syms
;
3011 sort_r(us
.syms
, cnt
, sizeof(*us
.syms
), symbols_cmp_r
,
3012 symbols_swap_r
, &data
);
3014 err
= ftrace_lookup_symbols(us
.syms
, cnt
, addrs
);
3015 free_user_syms(&us
);
3020 if (prog
->kprobe_override
&& addrs_check_error_injection_list(addrs
, cnt
)) {
3025 link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
3031 bpf_link_init(&link
->link
, BPF_LINK_TYPE_KPROBE_MULTI
,
3032 &bpf_kprobe_multi_link_lops
, prog
);
3034 err
= bpf_link_prime(&link
->link
, &link_primer
);
3038 if (!(flags
& BPF_F_KPROBE_MULTI_RETURN
))
3039 link
->fp
.entry_handler
= kprobe_multi_link_handler
;
3040 if ((flags
& BPF_F_KPROBE_MULTI_RETURN
) || is_kprobe_session(prog
))
3041 link
->fp
.exit_handler
= kprobe_multi_link_exit_handler
;
3042 if (is_kprobe_session(prog
))
3043 link
->fp
.entry_data_size
= sizeof(u64
);
3045 link
->addrs
= addrs
;
3046 link
->cookies
= cookies
;
3048 link
->flags
= flags
;
3052 * Sorting addresses will trigger sorting cookies as well
3053 * (check bpf_kprobe_multi_cookie_swap). This way we can
3054 * find cookie based on the address in bpf_get_attach_cookie
3057 sort_r(addrs
, cnt
, sizeof(*addrs
),
3058 bpf_kprobe_multi_cookie_cmp
,
3059 bpf_kprobe_multi_cookie_swap
,
3063 err
= get_modules_for_addrs(&link
->mods
, addrs
, cnt
);
3065 bpf_link_cleanup(&link_primer
);
3068 link
->mods_cnt
= err
;
3070 err
= register_fprobe_ips(&link
->fp
, addrs
, cnt
);
3072 kprobe_multi_put_modules(link
->mods
, link
->mods_cnt
);
3073 bpf_link_cleanup(&link_primer
);
3077 return bpf_link_settle(&link_primer
);
3085 #else /* !CONFIG_FPROBE */
3086 int bpf_kprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
3090 static u64
bpf_kprobe_multi_cookie(struct bpf_run_ctx
*ctx
)
3094 static u64
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
)
3100 #ifdef CONFIG_UPROBES
3101 struct bpf_uprobe_multi_link
;
3104 struct bpf_uprobe_multi_link
*link
;
3106 unsigned long ref_ctr_offset
;
3108 struct uprobe
*uprobe
;
3109 struct uprobe_consumer consumer
;
3113 struct bpf_uprobe_multi_link
{
3115 struct bpf_link link
;
3118 struct bpf_uprobe
*uprobes
;
3119 struct task_struct
*task
;
3122 struct bpf_uprobe_multi_run_ctx
{
3123 struct bpf_session_run_ctx session_ctx
;
3124 unsigned long entry_ip
;
3125 struct bpf_uprobe
*uprobe
;
3128 static void bpf_uprobe_unregister(struct bpf_uprobe
*uprobes
, u32 cnt
)
3132 for (i
= 0; i
< cnt
; i
++)
3133 uprobe_unregister_nosync(uprobes
[i
].uprobe
, &uprobes
[i
].consumer
);
3136 uprobe_unregister_sync();
3139 static void bpf_uprobe_multi_link_release(struct bpf_link
*link
)
3141 struct bpf_uprobe_multi_link
*umulti_link
;
3143 umulti_link
= container_of(link
, struct bpf_uprobe_multi_link
, link
);
3144 bpf_uprobe_unregister(umulti_link
->uprobes
, umulti_link
->cnt
);
3145 if (umulti_link
->task
)
3146 put_task_struct(umulti_link
->task
);
3147 path_put(&umulti_link
->path
);
3150 static void bpf_uprobe_multi_link_dealloc(struct bpf_link
*link
)
3152 struct bpf_uprobe_multi_link
*umulti_link
;
3154 umulti_link
= container_of(link
, struct bpf_uprobe_multi_link
, link
);
3155 kvfree(umulti_link
->uprobes
);
3159 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link
*link
,
3160 struct bpf_link_info
*info
)
3162 u64 __user
*uref_ctr_offsets
= u64_to_user_ptr(info
->uprobe_multi
.ref_ctr_offsets
);
3163 u64 __user
*ucookies
= u64_to_user_ptr(info
->uprobe_multi
.cookies
);
3164 u64 __user
*uoffsets
= u64_to_user_ptr(info
->uprobe_multi
.offsets
);
3165 u64 __user
*upath
= u64_to_user_ptr(info
->uprobe_multi
.path
);
3166 u32 upath_size
= info
->uprobe_multi
.path_size
;
3167 struct bpf_uprobe_multi_link
*umulti_link
;
3168 u32 ucount
= info
->uprobe_multi
.count
;
3173 if (!upath
^ !upath_size
)
3176 if ((uoffsets
|| uref_ctr_offsets
|| ucookies
) && !ucount
)
3179 umulti_link
= container_of(link
, struct bpf_uprobe_multi_link
, link
);
3180 info
->uprobe_multi
.count
= umulti_link
->cnt
;
3181 info
->uprobe_multi
.flags
= umulti_link
->flags
;
3182 info
->uprobe_multi
.pid
= umulti_link
->task
?
3183 task_pid_nr_ns(umulti_link
->task
, task_active_pid_ns(current
)) : 0;
3185 upath_size
= upath_size
? min_t(u32
, upath_size
, PATH_MAX
) : PATH_MAX
;
3186 buf
= kmalloc(upath_size
, GFP_KERNEL
);
3189 p
= d_path(&umulti_link
->path
, buf
, upath_size
);
3194 upath_size
= buf
+ upath_size
- p
;
3197 left
= copy_to_user(upath
, p
, upath_size
);
3201 info
->uprobe_multi
.path_size
= upath_size
;
3203 if (!uoffsets
&& !ucookies
&& !uref_ctr_offsets
)
3206 if (ucount
< umulti_link
->cnt
)
3209 ucount
= umulti_link
->cnt
;
3211 for (i
= 0; i
< ucount
; i
++) {
3213 put_user(umulti_link
->uprobes
[i
].offset
, uoffsets
+ i
))
3215 if (uref_ctr_offsets
&&
3216 put_user(umulti_link
->uprobes
[i
].ref_ctr_offset
, uref_ctr_offsets
+ i
))
3219 put_user(umulti_link
->uprobes
[i
].cookie
, ucookies
+ i
))
3226 static const struct bpf_link_ops bpf_uprobe_multi_link_lops
= {
3227 .release
= bpf_uprobe_multi_link_release
,
3228 .dealloc_deferred
= bpf_uprobe_multi_link_dealloc
,
3229 .fill_link_info
= bpf_uprobe_multi_link_fill_link_info
,
3232 static int uprobe_prog_run(struct bpf_uprobe
*uprobe
,
3233 unsigned long entry_ip
,
3234 struct pt_regs
*regs
,
3235 bool is_return
, void *data
)
3237 struct bpf_uprobe_multi_link
*link
= uprobe
->link
;
3238 struct bpf_uprobe_multi_run_ctx run_ctx
= {
3240 .is_return
= is_return
,
3243 .entry_ip
= entry_ip
,
3246 struct bpf_prog
*prog
= link
->link
.prog
;
3247 bool sleepable
= prog
->sleepable
;
3248 struct bpf_run_ctx
*old_run_ctx
;
3251 if (link
->task
&& !same_thread_group(current
, link
->task
))
3255 rcu_read_lock_trace();
3261 old_run_ctx
= bpf_set_run_ctx(&run_ctx
.session_ctx
.run_ctx
);
3262 err
= bpf_prog_run(link
->link
.prog
, regs
);
3263 bpf_reset_run_ctx(old_run_ctx
);
3268 rcu_read_unlock_trace();
3275 uprobe_multi_link_filter(struct uprobe_consumer
*con
, struct mm_struct
*mm
)
3277 struct bpf_uprobe
*uprobe
;
3279 uprobe
= container_of(con
, struct bpf_uprobe
, consumer
);
3280 return uprobe
->link
->task
->mm
== mm
;
3284 uprobe_multi_link_handler(struct uprobe_consumer
*con
, struct pt_regs
*regs
,
3287 struct bpf_uprobe
*uprobe
;
3290 uprobe
= container_of(con
, struct bpf_uprobe
, consumer
);
3291 ret
= uprobe_prog_run(uprobe
, instruction_pointer(regs
), regs
, false, data
);
3292 if (uprobe
->session
)
3293 return ret
? UPROBE_HANDLER_IGNORE
: 0;
3298 uprobe_multi_link_ret_handler(struct uprobe_consumer
*con
, unsigned long func
, struct pt_regs
*regs
,
3301 struct bpf_uprobe
*uprobe
;
3303 uprobe
= container_of(con
, struct bpf_uprobe
, consumer
);
3304 uprobe_prog_run(uprobe
, func
, regs
, true, data
);
3308 static u64
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
)
3310 struct bpf_uprobe_multi_run_ctx
*run_ctx
;
3312 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_uprobe_multi_run_ctx
,
3313 session_ctx
.run_ctx
);
3314 return run_ctx
->entry_ip
;
3317 static u64
bpf_uprobe_multi_cookie(struct bpf_run_ctx
*ctx
)
3319 struct bpf_uprobe_multi_run_ctx
*run_ctx
;
3321 run_ctx
= container_of(current
->bpf_ctx
, struct bpf_uprobe_multi_run_ctx
,
3322 session_ctx
.run_ctx
);
3323 return run_ctx
->uprobe
->cookie
;
3326 int bpf_uprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
3328 struct bpf_uprobe_multi_link
*link
= NULL
;
3329 unsigned long __user
*uref_ctr_offsets
;
3330 struct bpf_link_primer link_primer
;
3331 struct bpf_uprobe
*uprobes
= NULL
;
3332 struct task_struct
*task
= NULL
;
3333 unsigned long __user
*uoffsets
;
3334 u64 __user
*ucookies
;
3342 /* no support for 32bit archs yet */
3343 if (sizeof(u64
) != sizeof(void *))
3346 if (!is_uprobe_multi(prog
))
3349 flags
= attr
->link_create
.uprobe_multi
.flags
;
3350 if (flags
& ~BPF_F_UPROBE_MULTI_RETURN
)
3354 * path, offsets and cnt are mandatory,
3355 * ref_ctr_offsets and cookies are optional
3357 upath
= u64_to_user_ptr(attr
->link_create
.uprobe_multi
.path
);
3358 uoffsets
= u64_to_user_ptr(attr
->link_create
.uprobe_multi
.offsets
);
3359 cnt
= attr
->link_create
.uprobe_multi
.cnt
;
3360 pid
= attr
->link_create
.uprobe_multi
.pid
;
3362 if (!upath
|| !uoffsets
|| !cnt
|| pid
< 0)
3364 if (cnt
> MAX_UPROBE_MULTI_CNT
)
3367 uref_ctr_offsets
= u64_to_user_ptr(attr
->link_create
.uprobe_multi
.ref_ctr_offsets
);
3368 ucookies
= u64_to_user_ptr(attr
->link_create
.uprobe_multi
.cookies
);
3370 name
= strndup_user(upath
, PATH_MAX
);
3372 err
= PTR_ERR(name
);
3376 err
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
3381 if (!d_is_reg(path
.dentry
)) {
3383 goto error_path_put
;
3387 task
= get_pid_task(find_vpid(pid
), PIDTYPE_TGID
);
3390 goto error_path_put
;
3396 link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
3397 uprobes
= kvcalloc(cnt
, sizeof(*uprobes
), GFP_KERNEL
);
3399 if (!uprobes
|| !link
)
3402 for (i
= 0; i
< cnt
; i
++) {
3403 if (__get_user(uprobes
[i
].offset
, uoffsets
+ i
)) {
3407 if (uprobes
[i
].offset
< 0) {
3411 if (uref_ctr_offsets
&& __get_user(uprobes
[i
].ref_ctr_offset
, uref_ctr_offsets
+ i
)) {
3415 if (ucookies
&& __get_user(uprobes
[i
].cookie
, ucookies
+ i
)) {
3420 uprobes
[i
].link
= link
;
3422 if (!(flags
& BPF_F_UPROBE_MULTI_RETURN
))
3423 uprobes
[i
].consumer
.handler
= uprobe_multi_link_handler
;
3424 if (flags
& BPF_F_UPROBE_MULTI_RETURN
|| is_uprobe_session(prog
))
3425 uprobes
[i
].consumer
.ret_handler
= uprobe_multi_link_ret_handler
;
3426 if (is_uprobe_session(prog
))
3427 uprobes
[i
].session
= true;
3429 uprobes
[i
].consumer
.filter
= uprobe_multi_link_filter
;
3433 link
->uprobes
= uprobes
;
3436 link
->flags
= flags
;
3438 bpf_link_init(&link
->link
, BPF_LINK_TYPE_UPROBE_MULTI
,
3439 &bpf_uprobe_multi_link_lops
, prog
);
3441 for (i
= 0; i
< cnt
; i
++) {
3442 uprobes
[i
].uprobe
= uprobe_register(d_real_inode(link
->path
.dentry
),
3444 uprobes
[i
].ref_ctr_offset
,
3445 &uprobes
[i
].consumer
);
3446 if (IS_ERR(uprobes
[i
].uprobe
)) {
3447 err
= PTR_ERR(uprobes
[i
].uprobe
);
3449 goto error_unregister
;
3453 err
= bpf_link_prime(&link
->link
, &link_primer
);
3455 goto error_unregister
;
3457 return bpf_link_settle(&link_primer
);
3460 bpf_uprobe_unregister(uprobes
, link
->cnt
);
3466 put_task_struct(task
);
3471 #else /* !CONFIG_UPROBES */
3472 int bpf_uprobe_multi_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
3476 static u64
bpf_uprobe_multi_cookie(struct bpf_run_ctx
*ctx
)
3480 static u64
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx
*ctx
)
3484 #endif /* CONFIG_UPROBES */
3486 __bpf_kfunc_start_defs();
3488 __bpf_kfunc
bool bpf_session_is_return(void)
3490 struct bpf_session_run_ctx
*session_ctx
;
3492 session_ctx
= container_of(current
->bpf_ctx
, struct bpf_session_run_ctx
, run_ctx
);
3493 return session_ctx
->is_return
;
3496 __bpf_kfunc __u64
*bpf_session_cookie(void)
3498 struct bpf_session_run_ctx
*session_ctx
;
3500 session_ctx
= container_of(current
->bpf_ctx
, struct bpf_session_run_ctx
, run_ctx
);
3501 return session_ctx
->data
;
3504 __bpf_kfunc_end_defs();
3506 BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids
)
3507 BTF_ID_FLAGS(func
, bpf_session_is_return
)
3508 BTF_ID_FLAGS(func
, bpf_session_cookie
)
3509 BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids
)
3511 static int bpf_kprobe_multi_filter(const struct bpf_prog
*prog
, u32 kfunc_id
)
3513 if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids
, kfunc_id
))
3516 if (!is_kprobe_session(prog
) && !is_uprobe_session(prog
))
3522 static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set
= {
3523 .owner
= THIS_MODULE
,
3524 .set
= &kprobe_multi_kfunc_set_ids
,
3525 .filter
= bpf_kprobe_multi_filter
,
3528 static int __init
bpf_kprobe_multi_kfuncs_init(void)
3530 return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE
, &bpf_kprobe_multi_kfunc_set
);
3533 late_initcall(bpf_kprobe_multi_kfuncs_init
);
3535 __bpf_kfunc_start_defs();
3537 __bpf_kfunc
int bpf_send_signal_task(struct task_struct
*task
, int sig
, enum pid_type type
,
3540 if (type
!= PIDTYPE_PID
&& type
!= PIDTYPE_TGID
)
3543 return bpf_send_signal_common(sig
, type
, task
, value
);
3546 __bpf_kfunc_end_defs();