1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module
{
27 struct module
*module
;
28 struct list_head list
;
31 static LIST_HEAD(bpf_trace_modules
);
32 static DEFINE_MUTEX(bpf_module_mutex
);
34 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
36 struct bpf_raw_event_map
*btp
, *ret
= NULL
;
37 struct bpf_trace_module
*btm
;
40 mutex_lock(&bpf_module_mutex
);
41 list_for_each_entry(btm
, &bpf_trace_modules
, list
) {
42 for (i
= 0; i
< btm
->module
->num_bpf_raw_events
; ++i
) {
43 btp
= &btm
->module
->bpf_raw_events
[i
];
44 if (!strcmp(btp
->tp
->name
, name
)) {
45 if (try_module_get(btm
->module
))
52 mutex_unlock(&bpf_module_mutex
);
56 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
60 #endif /* CONFIG_MODULES */
62 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
63 u64
bpf_get_stack(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret
= BPF_PROG_RUN_ARRAY_CHECK(call
->prog_array
, ctx
, BPF_PROG_RUN
);
117 __this_cpu_dec(bpf_prog_active
);
122 EXPORT_SYMBOL_GPL(trace_call_bpf
);
124 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
125 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
127 regs_set_return_value(regs
, rc
);
128 override_function_with_return(regs
);
132 static const struct bpf_func_proto bpf_override_return_proto
= {
133 .func
= bpf_override_return
,
135 .ret_type
= RET_INTEGER
,
136 .arg1_type
= ARG_PTR_TO_CTX
,
137 .arg2_type
= ARG_ANYTHING
,
141 BPF_CALL_3(bpf_probe_read_user
, void *, dst
, u32
, size
,
142 const void __user
*, unsafe_ptr
)
144 int ret
= probe_user_read(dst
, unsafe_ptr
, size
);
146 if (unlikely(ret
< 0))
147 memset(dst
, 0, size
);
152 static const struct bpf_func_proto bpf_probe_read_user_proto
= {
153 .func
= bpf_probe_read_user
,
155 .ret_type
= RET_INTEGER
,
156 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
157 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
158 .arg3_type
= ARG_ANYTHING
,
161 BPF_CALL_3(bpf_probe_read_user_str
, void *, dst
, u32
, size
,
162 const void __user
*, unsafe_ptr
)
164 int ret
= strncpy_from_unsafe_user(dst
, unsafe_ptr
, size
);
166 if (unlikely(ret
< 0))
167 memset(dst
, 0, size
);
172 static const struct bpf_func_proto bpf_probe_read_user_str_proto
= {
173 .func
= bpf_probe_read_user_str
,
175 .ret_type
= RET_INTEGER
,
176 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
177 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
178 .arg3_type
= ARG_ANYTHING
,
181 static __always_inline
int
182 bpf_probe_read_kernel_common(void *dst
, u32 size
, const void *unsafe_ptr
,
185 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
187 if (unlikely(ret
< 0))
189 ret
= compat
? probe_kernel_read(dst
, unsafe_ptr
, size
) :
190 probe_kernel_read_strict(dst
, unsafe_ptr
, size
);
191 if (unlikely(ret
< 0))
193 memset(dst
, 0, size
);
197 BPF_CALL_3(bpf_probe_read_kernel
, void *, dst
, u32
, size
,
198 const void *, unsafe_ptr
)
200 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, false);
203 static const struct bpf_func_proto bpf_probe_read_kernel_proto
= {
204 .func
= bpf_probe_read_kernel
,
206 .ret_type
= RET_INTEGER
,
207 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
208 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
209 .arg3_type
= ARG_ANYTHING
,
212 BPF_CALL_3(bpf_probe_read_compat
, void *, dst
, u32
, size
,
213 const void *, unsafe_ptr
)
215 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, true);
218 static const struct bpf_func_proto bpf_probe_read_compat_proto
= {
219 .func
= bpf_probe_read_compat
,
221 .ret_type
= RET_INTEGER
,
222 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
223 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
224 .arg3_type
= ARG_ANYTHING
,
227 static __always_inline
int
228 bpf_probe_read_kernel_str_common(void *dst
, u32 size
, const void *unsafe_ptr
,
231 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
233 if (unlikely(ret
< 0))
236 * The strncpy_from_unsafe_*() call will likely not fill the entire
237 * buffer, but that's okay in this circumstance as we're probing
238 * arbitrary memory anyway similar to bpf_probe_read_*() and might
239 * as well probe the stack. Thus, memory is explicitly cleared
240 * only in error case, so that improper users ignoring return
241 * code altogether don't copy garbage; otherwise length of string
242 * is returned that can be used for bpf_perf_event_output() et al.
244 ret
= compat
? strncpy_from_unsafe(dst
, unsafe_ptr
, size
) :
245 strncpy_from_unsafe_strict(dst
, unsafe_ptr
, size
);
246 if (unlikely(ret
< 0))
248 memset(dst
, 0, size
);
252 BPF_CALL_3(bpf_probe_read_kernel_str
, void *, dst
, u32
, size
,
253 const void *, unsafe_ptr
)
255 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, false);
258 static const struct bpf_func_proto bpf_probe_read_kernel_str_proto
= {
259 .func
= bpf_probe_read_kernel_str
,
261 .ret_type
= RET_INTEGER
,
262 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
263 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
264 .arg3_type
= ARG_ANYTHING
,
267 BPF_CALL_3(bpf_probe_read_compat_str
, void *, dst
, u32
, size
,
268 const void *, unsafe_ptr
)
270 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, true);
273 static const struct bpf_func_proto bpf_probe_read_compat_str_proto
= {
274 .func
= bpf_probe_read_compat_str
,
276 .ret_type
= RET_INTEGER
,
277 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
278 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
279 .arg3_type
= ARG_ANYTHING
,
282 BPF_CALL_3(bpf_probe_write_user
, void __user
*, unsafe_ptr
, const void *, src
,
286 * Ensure we're in user context which is safe for the helper to
287 * run. This helper has no business in a kthread.
289 * access_ok() should prevent writing to non-user memory, but in
290 * some situations (nommu, temporary switch, etc) access_ok() does
291 * not provide enough validation, hence the check on KERNEL_DS.
293 * nmi_uaccess_okay() ensures the probe is not run in an interim
294 * state, when the task or mm are switched. This is specifically
295 * required to prevent the use of temporary mm.
298 if (unlikely(in_interrupt() ||
299 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
301 if (unlikely(uaccess_kernel()))
303 if (unlikely(!nmi_uaccess_okay()))
306 return probe_user_write(unsafe_ptr
, src
, size
);
309 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
310 .func
= bpf_probe_write_user
,
312 .ret_type
= RET_INTEGER
,
313 .arg1_type
= ARG_ANYTHING
,
314 .arg2_type
= ARG_PTR_TO_MEM
,
315 .arg3_type
= ARG_CONST_SIZE
,
318 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
320 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
321 current
->comm
, task_pid_nr(current
));
323 return &bpf_probe_write_user_proto
;
327 * Only limited trace_printk() conversion specifiers allowed:
328 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
330 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
331 u64
, arg2
, u64
, arg3
)
333 bool str_seen
= false;
341 * bpf_check()->check_func_arg()->check_stack_boundary()
342 * guarantees that fmt points to bpf program stack,
343 * fmt_size bytes of it were initialized and fmt_size > 0
345 if (fmt
[--fmt_size
] != 0)
348 /* check format string for allowed specifiers */
349 for (i
= 0; i
< fmt_size
; i
++) {
350 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
359 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
364 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
366 /* disallow any further format extensions */
367 if (fmt
[i
+ 1] != 0 &&
368 !isspace(fmt
[i
+ 1]) &&
369 !ispunct(fmt
[i
+ 1]))
374 /* allow only one '%s' per fmt string */
393 strncpy_from_unsafe(buf
,
394 (void *) (long) unsafe_addr
,
405 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
406 fmt
[i
] != 'u' && fmt
[i
] != 'x')
411 /* Horrid workaround for getting va_list handling working with different
412 * argument type combinations generically for 32 and 64 bit archs.
414 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
415 #define __BPF_TP(...) \
416 __trace_printk(0 /* Fake ip */, \
419 #define __BPF_ARG1_TP(...) \
420 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
421 ? __BPF_TP(arg1, ##__VA_ARGS__) \
422 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
423 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
424 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
426 #define __BPF_ARG2_TP(...) \
427 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
428 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
429 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
430 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
431 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
433 #define __BPF_ARG3_TP(...) \
434 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
435 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
436 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
437 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
438 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
440 return __BPF_TP_EMIT();
443 static const struct bpf_func_proto bpf_trace_printk_proto
= {
444 .func
= bpf_trace_printk
,
446 .ret_type
= RET_INTEGER
,
447 .arg1_type
= ARG_PTR_TO_MEM
,
448 .arg2_type
= ARG_CONST_SIZE
,
451 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
454 * this program might be calling bpf_trace_printk,
455 * so allocate per-cpu printk buffers
457 trace_printk_init_buffers();
459 return &bpf_trace_printk_proto
;
462 static __always_inline
int
463 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
464 u64
*value
, u64
*enabled
, u64
*running
)
466 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
467 unsigned int cpu
= smp_processor_id();
468 u64 index
= flags
& BPF_F_INDEX_MASK
;
469 struct bpf_event_entry
*ee
;
471 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
473 if (index
== BPF_F_CURRENT_CPU
)
475 if (unlikely(index
>= array
->map
.max_entries
))
478 ee
= READ_ONCE(array
->ptrs
[index
]);
482 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
485 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
490 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
492 * this api is ugly since we miss [-22..-2] range of valid
493 * counter values, but that's uapi
500 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
501 .func
= bpf_perf_event_read
,
503 .ret_type
= RET_INTEGER
,
504 .arg1_type
= ARG_CONST_MAP_PTR
,
505 .arg2_type
= ARG_ANYTHING
,
508 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
509 struct bpf_perf_event_value
*, buf
, u32
, size
)
513 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
515 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
521 memset(buf
, 0, size
);
525 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
526 .func
= bpf_perf_event_read_value
,
528 .ret_type
= RET_INTEGER
,
529 .arg1_type
= ARG_CONST_MAP_PTR
,
530 .arg2_type
= ARG_ANYTHING
,
531 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
532 .arg4_type
= ARG_CONST_SIZE
,
535 static __always_inline u64
536 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
537 u64 flags
, struct perf_sample_data
*sd
)
539 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
540 unsigned int cpu
= smp_processor_id();
541 u64 index
= flags
& BPF_F_INDEX_MASK
;
542 struct bpf_event_entry
*ee
;
543 struct perf_event
*event
;
545 if (index
== BPF_F_CURRENT_CPU
)
547 if (unlikely(index
>= array
->map
.max_entries
))
550 ee
= READ_ONCE(array
->ptrs
[index
]);
555 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
556 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
559 if (unlikely(event
->oncpu
!= cpu
))
562 return perf_event_output(event
, sd
, regs
);
566 * Support executing tracepoints in normal, irq, and nmi context that each call
567 * bpf_perf_event_output
569 struct bpf_trace_sample_data
{
570 struct perf_sample_data sds
[3];
573 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_trace_sds
);
574 static DEFINE_PER_CPU(int, bpf_trace_nest_level
);
575 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
576 u64
, flags
, void *, data
, u64
, size
)
578 struct bpf_trace_sample_data
*sds
= this_cpu_ptr(&bpf_trace_sds
);
579 int nest_level
= this_cpu_inc_return(bpf_trace_nest_level
);
580 struct perf_raw_record raw
= {
586 struct perf_sample_data
*sd
;
589 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(sds
->sds
))) {
594 sd
= &sds
->sds
[nest_level
- 1];
596 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
))) {
601 perf_sample_data_init(sd
, 0, 0);
604 err
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
607 this_cpu_dec(bpf_trace_nest_level
);
611 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
612 .func
= bpf_perf_event_output
,
614 .ret_type
= RET_INTEGER
,
615 .arg1_type
= ARG_PTR_TO_CTX
,
616 .arg2_type
= ARG_CONST_MAP_PTR
,
617 .arg3_type
= ARG_ANYTHING
,
618 .arg4_type
= ARG_PTR_TO_MEM
,
619 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
622 static DEFINE_PER_CPU(int, bpf_event_output_nest_level
);
623 struct bpf_nested_pt_regs
{
624 struct pt_regs regs
[3];
626 static DEFINE_PER_CPU(struct bpf_nested_pt_regs
, bpf_pt_regs
);
627 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_misc_sds
);
629 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
630 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
632 int nest_level
= this_cpu_inc_return(bpf_event_output_nest_level
);
633 struct perf_raw_frag frag
= {
638 struct perf_raw_record raw
= {
641 .next
= ctx_size
? &frag
: NULL
,
647 struct perf_sample_data
*sd
;
648 struct pt_regs
*regs
;
651 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(bpf_misc_sds
.sds
))) {
655 sd
= this_cpu_ptr(&bpf_misc_sds
.sds
[nest_level
- 1]);
656 regs
= this_cpu_ptr(&bpf_pt_regs
.regs
[nest_level
- 1]);
658 perf_fetch_caller_regs(regs
);
659 perf_sample_data_init(sd
, 0, 0);
662 ret
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
664 this_cpu_dec(bpf_event_output_nest_level
);
668 BPF_CALL_0(bpf_get_current_task
)
670 return (long) current
;
673 static const struct bpf_func_proto bpf_get_current_task_proto
= {
674 .func
= bpf_get_current_task
,
676 .ret_type
= RET_INTEGER
,
679 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
681 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
684 if (unlikely(idx
>= array
->map
.max_entries
))
687 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
691 return task_under_cgroup_hierarchy(current
, cgrp
);
694 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
695 .func
= bpf_current_task_under_cgroup
,
697 .ret_type
= RET_INTEGER
,
698 .arg1_type
= ARG_CONST_MAP_PTR
,
699 .arg2_type
= ARG_ANYTHING
,
702 struct send_signal_irq_work
{
703 struct irq_work irq_work
;
704 struct task_struct
*task
;
709 static DEFINE_PER_CPU(struct send_signal_irq_work
, send_signal_work
);
711 static void do_bpf_send_signal(struct irq_work
*entry
)
713 struct send_signal_irq_work
*work
;
715 work
= container_of(entry
, struct send_signal_irq_work
, irq_work
);
716 group_send_sig_info(work
->sig
, SEND_SIG_PRIV
, work
->task
, work
->type
);
719 static int bpf_send_signal_common(u32 sig
, enum pid_type type
)
721 struct send_signal_irq_work
*work
= NULL
;
723 /* Similar to bpf_probe_write_user, task needs to be
724 * in a sound condition and kernel memory access be
725 * permitted in order to send signal to the current
728 if (unlikely(current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
730 if (unlikely(uaccess_kernel()))
732 if (unlikely(!nmi_uaccess_okay()))
736 /* Do an early check on signal validity. Otherwise,
737 * the error is lost in deferred irq_work.
739 if (unlikely(!valid_signal(sig
)))
742 work
= this_cpu_ptr(&send_signal_work
);
743 if (atomic_read(&work
->irq_work
.flags
) & IRQ_WORK_BUSY
)
746 /* Add the current task, which is the target of sending signal,
747 * to the irq_work. The current task may change when queued
748 * irq works get executed.
750 work
->task
= current
;
753 irq_work_queue(&work
->irq_work
);
757 return group_send_sig_info(sig
, SEND_SIG_PRIV
, current
, type
);
760 BPF_CALL_1(bpf_send_signal
, u32
, sig
)
762 return bpf_send_signal_common(sig
, PIDTYPE_TGID
);
765 static const struct bpf_func_proto bpf_send_signal_proto
= {
766 .func
= bpf_send_signal
,
768 .ret_type
= RET_INTEGER
,
769 .arg1_type
= ARG_ANYTHING
,
772 BPF_CALL_1(bpf_send_signal_thread
, u32
, sig
)
774 return bpf_send_signal_common(sig
, PIDTYPE_PID
);
777 static const struct bpf_func_proto bpf_send_signal_thread_proto
= {
778 .func
= bpf_send_signal_thread
,
780 .ret_type
= RET_INTEGER
,
781 .arg1_type
= ARG_ANYTHING
,
784 static const struct bpf_func_proto
*
785 tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
788 case BPF_FUNC_map_lookup_elem
:
789 return &bpf_map_lookup_elem_proto
;
790 case BPF_FUNC_map_update_elem
:
791 return &bpf_map_update_elem_proto
;
792 case BPF_FUNC_map_delete_elem
:
793 return &bpf_map_delete_elem_proto
;
794 case BPF_FUNC_map_push_elem
:
795 return &bpf_map_push_elem_proto
;
796 case BPF_FUNC_map_pop_elem
:
797 return &bpf_map_pop_elem_proto
;
798 case BPF_FUNC_map_peek_elem
:
799 return &bpf_map_peek_elem_proto
;
800 case BPF_FUNC_ktime_get_ns
:
801 return &bpf_ktime_get_ns_proto
;
802 case BPF_FUNC_tail_call
:
803 return &bpf_tail_call_proto
;
804 case BPF_FUNC_get_current_pid_tgid
:
805 return &bpf_get_current_pid_tgid_proto
;
806 case BPF_FUNC_get_current_task
:
807 return &bpf_get_current_task_proto
;
808 case BPF_FUNC_get_current_uid_gid
:
809 return &bpf_get_current_uid_gid_proto
;
810 case BPF_FUNC_get_current_comm
:
811 return &bpf_get_current_comm_proto
;
812 case BPF_FUNC_trace_printk
:
813 return bpf_get_trace_printk_proto();
814 case BPF_FUNC_get_smp_processor_id
:
815 return &bpf_get_smp_processor_id_proto
;
816 case BPF_FUNC_get_numa_node_id
:
817 return &bpf_get_numa_node_id_proto
;
818 case BPF_FUNC_perf_event_read
:
819 return &bpf_perf_event_read_proto
;
820 case BPF_FUNC_probe_write_user
:
821 return bpf_get_probe_write_proto();
822 case BPF_FUNC_current_task_under_cgroup
:
823 return &bpf_current_task_under_cgroup_proto
;
824 case BPF_FUNC_get_prandom_u32
:
825 return &bpf_get_prandom_u32_proto
;
826 case BPF_FUNC_probe_read_user
:
827 return &bpf_probe_read_user_proto
;
828 case BPF_FUNC_probe_read_kernel
:
829 return &bpf_probe_read_kernel_proto
;
830 case BPF_FUNC_probe_read
:
831 return &bpf_probe_read_compat_proto
;
832 case BPF_FUNC_probe_read_user_str
:
833 return &bpf_probe_read_user_str_proto
;
834 case BPF_FUNC_probe_read_kernel_str
:
835 return &bpf_probe_read_kernel_str_proto
;
836 case BPF_FUNC_probe_read_str
:
837 return &bpf_probe_read_compat_str_proto
;
838 #ifdef CONFIG_CGROUPS
839 case BPF_FUNC_get_current_cgroup_id
:
840 return &bpf_get_current_cgroup_id_proto
;
842 case BPF_FUNC_send_signal
:
843 return &bpf_send_signal_proto
;
844 case BPF_FUNC_send_signal_thread
:
845 return &bpf_send_signal_thread_proto
;
851 static const struct bpf_func_proto
*
852 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
855 case BPF_FUNC_perf_event_output
:
856 return &bpf_perf_event_output_proto
;
857 case BPF_FUNC_get_stackid
:
858 return &bpf_get_stackid_proto
;
859 case BPF_FUNC_get_stack
:
860 return &bpf_get_stack_proto
;
861 case BPF_FUNC_perf_event_read_value
:
862 return &bpf_perf_event_read_value_proto
;
863 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
864 case BPF_FUNC_override_return
:
865 return &bpf_override_return_proto
;
868 return tracing_func_proto(func_id
, prog
);
872 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
873 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
874 const struct bpf_prog
*prog
,
875 struct bpf_insn_access_aux
*info
)
877 if (off
< 0 || off
>= sizeof(struct pt_regs
))
879 if (type
!= BPF_READ
)
884 * Assertion for 32 bit to make sure last 8 byte access
885 * (BPF_DW) to the last 4 byte member is disallowed.
887 if (off
+ size
> sizeof(struct pt_regs
))
893 const struct bpf_verifier_ops kprobe_verifier_ops
= {
894 .get_func_proto
= kprobe_prog_func_proto
,
895 .is_valid_access
= kprobe_prog_is_valid_access
,
898 const struct bpf_prog_ops kprobe_prog_ops
= {
901 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
902 u64
, flags
, void *, data
, u64
, size
)
904 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
907 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
908 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
909 * from there and call the same bpf_perf_event_output() helper inline.
911 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
914 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
915 .func
= bpf_perf_event_output_tp
,
917 .ret_type
= RET_INTEGER
,
918 .arg1_type
= ARG_PTR_TO_CTX
,
919 .arg2_type
= ARG_CONST_MAP_PTR
,
920 .arg3_type
= ARG_ANYTHING
,
921 .arg4_type
= ARG_PTR_TO_MEM
,
922 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
925 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
928 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
931 * Same comment as in bpf_perf_event_output_tp(), only that this time
932 * the other helper's function body cannot be inlined due to being
933 * external, thus we need to call raw helper function.
935 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
939 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
940 .func
= bpf_get_stackid_tp
,
942 .ret_type
= RET_INTEGER
,
943 .arg1_type
= ARG_PTR_TO_CTX
,
944 .arg2_type
= ARG_CONST_MAP_PTR
,
945 .arg3_type
= ARG_ANYTHING
,
948 BPF_CALL_4(bpf_get_stack_tp
, void *, tp_buff
, void *, buf
, u32
, size
,
951 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
953 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
954 (unsigned long) size
, flags
, 0);
957 static const struct bpf_func_proto bpf_get_stack_proto_tp
= {
958 .func
= bpf_get_stack_tp
,
960 .ret_type
= RET_INTEGER
,
961 .arg1_type
= ARG_PTR_TO_CTX
,
962 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
963 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
964 .arg4_type
= ARG_ANYTHING
,
967 static const struct bpf_func_proto
*
968 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
971 case BPF_FUNC_perf_event_output
:
972 return &bpf_perf_event_output_proto_tp
;
973 case BPF_FUNC_get_stackid
:
974 return &bpf_get_stackid_proto_tp
;
975 case BPF_FUNC_get_stack
:
976 return &bpf_get_stack_proto_tp
;
978 return tracing_func_proto(func_id
, prog
);
982 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
983 const struct bpf_prog
*prog
,
984 struct bpf_insn_access_aux
*info
)
986 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
988 if (type
!= BPF_READ
)
993 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
997 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
998 .get_func_proto
= tp_prog_func_proto
,
999 .is_valid_access
= tp_prog_is_valid_access
,
1002 const struct bpf_prog_ops tracepoint_prog_ops
= {
1005 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
1006 struct bpf_perf_event_value
*, buf
, u32
, size
)
1010 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
1012 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
1018 memset(buf
, 0, size
);
1022 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
1023 .func
= bpf_perf_prog_read_value
,
1025 .ret_type
= RET_INTEGER
,
1026 .arg1_type
= ARG_PTR_TO_CTX
,
1027 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1028 .arg3_type
= ARG_CONST_SIZE
,
1031 static const struct bpf_func_proto
*
1032 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1035 case BPF_FUNC_perf_event_output
:
1036 return &bpf_perf_event_output_proto_tp
;
1037 case BPF_FUNC_get_stackid
:
1038 return &bpf_get_stackid_proto_tp
;
1039 case BPF_FUNC_get_stack
:
1040 return &bpf_get_stack_proto_tp
;
1041 case BPF_FUNC_perf_prog_read_value
:
1042 return &bpf_perf_prog_read_value_proto
;
1044 return tracing_func_proto(func_id
, prog
);
1049 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1050 * to avoid potential recursive reuse issue when/if tracepoints are added
1051 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1053 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1054 * in normal, irq, and nmi context.
1056 struct bpf_raw_tp_regs
{
1057 struct pt_regs regs
[3];
1059 static DEFINE_PER_CPU(struct bpf_raw_tp_regs
, bpf_raw_tp_regs
);
1060 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level
);
1061 static struct pt_regs
*get_bpf_raw_tp_regs(void)
1063 struct bpf_raw_tp_regs
*tp_regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
1064 int nest_level
= this_cpu_inc_return(bpf_raw_tp_nest_level
);
1066 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(tp_regs
->regs
))) {
1067 this_cpu_dec(bpf_raw_tp_nest_level
);
1068 return ERR_PTR(-EBUSY
);
1071 return &tp_regs
->regs
[nest_level
- 1];
1074 static void put_bpf_raw_tp_regs(void)
1076 this_cpu_dec(bpf_raw_tp_nest_level
);
1079 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1080 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
1082 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1086 return PTR_ERR(regs
);
1088 perf_fetch_caller_regs(regs
);
1089 ret
= ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
1091 put_bpf_raw_tp_regs();
1095 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
1096 .func
= bpf_perf_event_output_raw_tp
,
1098 .ret_type
= RET_INTEGER
,
1099 .arg1_type
= ARG_PTR_TO_CTX
,
1100 .arg2_type
= ARG_CONST_MAP_PTR
,
1101 .arg3_type
= ARG_ANYTHING
,
1102 .arg4_type
= ARG_PTR_TO_MEM
,
1103 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
1106 extern const struct bpf_func_proto bpf_skb_output_proto
;
1108 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1109 struct bpf_map
*, map
, u64
, flags
)
1111 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1115 return PTR_ERR(regs
);
1117 perf_fetch_caller_regs(regs
);
1118 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1119 ret
= bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
1121 put_bpf_raw_tp_regs();
1125 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
1126 .func
= bpf_get_stackid_raw_tp
,
1128 .ret_type
= RET_INTEGER
,
1129 .arg1_type
= ARG_PTR_TO_CTX
,
1130 .arg2_type
= ARG_CONST_MAP_PTR
,
1131 .arg3_type
= ARG_ANYTHING
,
1134 BPF_CALL_4(bpf_get_stack_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1135 void *, buf
, u32
, size
, u64
, flags
)
1137 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1141 return PTR_ERR(regs
);
1143 perf_fetch_caller_regs(regs
);
1144 ret
= bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
1145 (unsigned long) size
, flags
, 0);
1146 put_bpf_raw_tp_regs();
1150 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp
= {
1151 .func
= bpf_get_stack_raw_tp
,
1153 .ret_type
= RET_INTEGER
,
1154 .arg1_type
= ARG_PTR_TO_CTX
,
1155 .arg2_type
= ARG_PTR_TO_MEM
,
1156 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1157 .arg4_type
= ARG_ANYTHING
,
1160 static const struct bpf_func_proto
*
1161 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1164 case BPF_FUNC_perf_event_output
:
1165 return &bpf_perf_event_output_proto_raw_tp
;
1166 case BPF_FUNC_get_stackid
:
1167 return &bpf_get_stackid_proto_raw_tp
;
1168 case BPF_FUNC_get_stack
:
1169 return &bpf_get_stack_proto_raw_tp
;
1171 return tracing_func_proto(func_id
, prog
);
1175 static const struct bpf_func_proto
*
1176 tracing_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1180 case BPF_FUNC_skb_output
:
1181 return &bpf_skb_output_proto
;
1184 return raw_tp_prog_func_proto(func_id
, prog
);
1188 static bool raw_tp_prog_is_valid_access(int off
, int size
,
1189 enum bpf_access_type type
,
1190 const struct bpf_prog
*prog
,
1191 struct bpf_insn_access_aux
*info
)
1193 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1195 if (type
!= BPF_READ
)
1197 if (off
% size
!= 0)
1202 static bool tracing_prog_is_valid_access(int off
, int size
,
1203 enum bpf_access_type type
,
1204 const struct bpf_prog
*prog
,
1205 struct bpf_insn_access_aux
*info
)
1207 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1209 if (type
!= BPF_READ
)
1211 if (off
% size
!= 0)
1213 return btf_ctx_access(off
, size
, type
, prog
, info
);
1216 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
1217 .get_func_proto
= raw_tp_prog_func_proto
,
1218 .is_valid_access
= raw_tp_prog_is_valid_access
,
1221 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
1224 const struct bpf_verifier_ops tracing_verifier_ops
= {
1225 .get_func_proto
= tracing_prog_func_proto
,
1226 .is_valid_access
= tracing_prog_is_valid_access
,
1229 const struct bpf_prog_ops tracing_prog_ops
= {
1232 static bool raw_tp_writable_prog_is_valid_access(int off
, int size
,
1233 enum bpf_access_type type
,
1234 const struct bpf_prog
*prog
,
1235 struct bpf_insn_access_aux
*info
)
1238 if (size
!= sizeof(u64
) || type
!= BPF_READ
)
1240 info
->reg_type
= PTR_TO_TP_BUFFER
;
1242 return raw_tp_prog_is_valid_access(off
, size
, type
, prog
, info
);
1245 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops
= {
1246 .get_func_proto
= raw_tp_prog_func_proto
,
1247 .is_valid_access
= raw_tp_writable_prog_is_valid_access
,
1250 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops
= {
1253 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1254 const struct bpf_prog
*prog
,
1255 struct bpf_insn_access_aux
*info
)
1257 const int size_u64
= sizeof(u64
);
1259 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
1261 if (type
!= BPF_READ
)
1263 if (off
% size
!= 0) {
1264 if (sizeof(unsigned long) != 4)
1268 if (off
% size
!= 4)
1273 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
1274 bpf_ctx_record_field_size(info
, size_u64
);
1275 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1278 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
1279 bpf_ctx_record_field_size(info
, size_u64
);
1280 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1284 if (size
!= sizeof(long))
1291 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
1292 const struct bpf_insn
*si
,
1293 struct bpf_insn
*insn_buf
,
1294 struct bpf_prog
*prog
, u32
*target_size
)
1296 struct bpf_insn
*insn
= insn_buf
;
1299 case offsetof(struct bpf_perf_event_data
, sample_period
):
1300 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1301 data
), si
->dst_reg
, si
->src_reg
,
1302 offsetof(struct bpf_perf_event_data_kern
, data
));
1303 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1304 bpf_target_off(struct perf_sample_data
, period
, 8,
1307 case offsetof(struct bpf_perf_event_data
, addr
):
1308 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1309 data
), si
->dst_reg
, si
->src_reg
,
1310 offsetof(struct bpf_perf_event_data_kern
, data
));
1311 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1312 bpf_target_off(struct perf_sample_data
, addr
, 8,
1316 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1317 regs
), si
->dst_reg
, si
->src_reg
,
1318 offsetof(struct bpf_perf_event_data_kern
, regs
));
1319 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
1324 return insn
- insn_buf
;
1327 const struct bpf_verifier_ops perf_event_verifier_ops
= {
1328 .get_func_proto
= pe_prog_func_proto
,
1329 .is_valid_access
= pe_prog_is_valid_access
,
1330 .convert_ctx_access
= pe_prog_convert_ctx_access
,
1333 const struct bpf_prog_ops perf_event_prog_ops
= {
1336 static DEFINE_MUTEX(bpf_event_mutex
);
1338 #define BPF_TRACE_MAX_PROGS 64
1340 int perf_event_attach_bpf_prog(struct perf_event
*event
,
1341 struct bpf_prog
*prog
)
1343 struct bpf_prog_array
*old_array
;
1344 struct bpf_prog_array
*new_array
;
1348 * Kprobe override only works if they are on the function entry,
1349 * and only if they are on the opt-in list.
1351 if (prog
->kprobe_override
&&
1352 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
1353 !trace_kprobe_error_injectable(event
->tp_event
)))
1356 mutex_lock(&bpf_event_mutex
);
1361 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1363 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
1368 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, &new_array
);
1372 /* set the new array to event->tp_event and set event->prog */
1374 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1375 bpf_prog_array_free(old_array
);
1378 mutex_unlock(&bpf_event_mutex
);
1382 void perf_event_detach_bpf_prog(struct perf_event
*event
)
1384 struct bpf_prog_array
*old_array
;
1385 struct bpf_prog_array
*new_array
;
1388 mutex_lock(&bpf_event_mutex
);
1393 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1394 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, &new_array
);
1398 bpf_prog_array_delete_safe(old_array
, event
->prog
);
1400 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1401 bpf_prog_array_free(old_array
);
1404 bpf_prog_put(event
->prog
);
1408 mutex_unlock(&bpf_event_mutex
);
1411 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
1413 struct perf_event_query_bpf __user
*uquery
= info
;
1414 struct perf_event_query_bpf query
= {};
1415 struct bpf_prog_array
*progs
;
1416 u32
*ids
, prog_cnt
, ids_len
;
1419 if (!capable(CAP_SYS_ADMIN
))
1421 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1423 if (copy_from_user(&query
, uquery
, sizeof(query
)))
1426 ids_len
= query
.ids_len
;
1427 if (ids_len
> BPF_TRACE_MAX_PROGS
)
1429 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
1433 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1434 * is required when user only wants to check for uquery->prog_cnt.
1435 * There is no need to check for it since the case is handled
1436 * gracefully in bpf_prog_array_copy_info.
1439 mutex_lock(&bpf_event_mutex
);
1440 progs
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1441 ret
= bpf_prog_array_copy_info(progs
, ids
, ids_len
, &prog_cnt
);
1442 mutex_unlock(&bpf_event_mutex
);
1444 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
1445 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
1452 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
1453 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
1455 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
1457 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
1459 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
1460 if (!strcmp(btp
->tp
->name
, name
))
1464 return bpf_get_raw_tracepoint_module(name
);
1467 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
1469 struct module
*mod
= __module_address((unsigned long)btp
);
1475 static __always_inline
1476 void __bpf_trace_run(struct bpf_prog
*prog
, u64
*args
)
1480 (void) BPF_PROG_RUN(prog
, args
);
1485 #define UNPACK(...) __VA_ARGS__
1486 #define REPEAT_1(FN, DL, X, ...) FN(X)
1487 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1488 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1489 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1490 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1491 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1492 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1493 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1494 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1495 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1496 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1497 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1498 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1500 #define SARG(X) u64 arg##X
1501 #define COPY(X) args[X] = arg##X
1503 #define __DL_COM (,)
1504 #define __DL_SEM (;)
1506 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1508 #define BPF_TRACE_DEFN_x(x) \
1509 void bpf_trace_run##x(struct bpf_prog *prog, \
1510 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1513 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1514 __bpf_trace_run(prog, args); \
1516 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1517 BPF_TRACE_DEFN_x(1);
1518 BPF_TRACE_DEFN_x(2);
1519 BPF_TRACE_DEFN_x(3);
1520 BPF_TRACE_DEFN_x(4);
1521 BPF_TRACE_DEFN_x(5);
1522 BPF_TRACE_DEFN_x(6);
1523 BPF_TRACE_DEFN_x(7);
1524 BPF_TRACE_DEFN_x(8);
1525 BPF_TRACE_DEFN_x(9);
1526 BPF_TRACE_DEFN_x(10);
1527 BPF_TRACE_DEFN_x(11);
1528 BPF_TRACE_DEFN_x(12);
1530 static int __bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1532 struct tracepoint
*tp
= btp
->tp
;
1535 * check that program doesn't access arguments beyond what's
1536 * available in this tracepoint
1538 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
1541 if (prog
->aux
->max_tp_access
> btp
->writable_size
)
1544 return tracepoint_probe_register(tp
, (void *)btp
->bpf_func
, prog
);
1547 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1549 return __bpf_probe_register(btp
, prog
);
1552 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1554 return tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, prog
);
1557 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
1558 u32
*fd_type
, const char **buf
,
1559 u64
*probe_offset
, u64
*probe_addr
)
1561 bool is_tracepoint
, is_syscall_tp
;
1562 struct bpf_prog
*prog
;
1569 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1570 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
)
1573 *prog_id
= prog
->aux
->id
;
1574 flags
= event
->tp_event
->flags
;
1575 is_tracepoint
= flags
& TRACE_EVENT_FL_TRACEPOINT
;
1576 is_syscall_tp
= is_syscall_trace_event(event
->tp_event
);
1578 if (is_tracepoint
|| is_syscall_tp
) {
1579 *buf
= is_tracepoint
? event
->tp_event
->tp
->name
1580 : event
->tp_event
->name
;
1581 *fd_type
= BPF_FD_TYPE_TRACEPOINT
;
1582 *probe_offset
= 0x0;
1587 #ifdef CONFIG_KPROBE_EVENTS
1588 if (flags
& TRACE_EVENT_FL_KPROBE
)
1589 err
= bpf_get_kprobe_info(event
, fd_type
, buf
,
1590 probe_offset
, probe_addr
,
1591 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1593 #ifdef CONFIG_UPROBE_EVENTS
1594 if (flags
& TRACE_EVENT_FL_UPROBE
)
1595 err
= bpf_get_uprobe_info(event
, fd_type
, buf
,
1597 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1604 static int __init
send_signal_irq_work_init(void)
1607 struct send_signal_irq_work
*work
;
1609 for_each_possible_cpu(cpu
) {
1610 work
= per_cpu_ptr(&send_signal_work
, cpu
);
1611 init_irq_work(&work
->irq_work
, do_bpf_send_signal
);
1616 subsys_initcall(send_signal_irq_work_init
);
1618 #ifdef CONFIG_MODULES
1619 static int bpf_event_notify(struct notifier_block
*nb
, unsigned long op
,
1622 struct bpf_trace_module
*btm
, *tmp
;
1623 struct module
*mod
= module
;
1625 if (mod
->num_bpf_raw_events
== 0 ||
1626 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_GOING
))
1629 mutex_lock(&bpf_module_mutex
);
1632 case MODULE_STATE_COMING
:
1633 btm
= kzalloc(sizeof(*btm
), GFP_KERNEL
);
1635 btm
->module
= module
;
1636 list_add(&btm
->list
, &bpf_trace_modules
);
1639 case MODULE_STATE_GOING
:
1640 list_for_each_entry_safe(btm
, tmp
, &bpf_trace_modules
, list
) {
1641 if (btm
->module
== module
) {
1642 list_del(&btm
->list
);
1650 mutex_unlock(&bpf_module_mutex
);
1655 static struct notifier_block bpf_module_nb
= {
1656 .notifier_call
= bpf_event_notify
,
1659 static int __init
bpf_event_init(void)
1661 register_module_notifier(&bpf_module_nb
);
1665 fs_initcall(bpf_event_init
);
1666 #endif /* CONFIG_MODULES */