1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
17 #include "trace_probe.h"
21 struct bpf_trace_module
{
22 struct module
*module
;
23 struct list_head list
;
26 static LIST_HEAD(bpf_trace_modules
);
27 static DEFINE_MUTEX(bpf_module_mutex
);
29 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
31 struct bpf_raw_event_map
*btp
, *ret
= NULL
;
32 struct bpf_trace_module
*btm
;
35 mutex_lock(&bpf_module_mutex
);
36 list_for_each_entry(btm
, &bpf_trace_modules
, list
) {
37 for (i
= 0; i
< btm
->module
->num_bpf_raw_events
; ++i
) {
38 btp
= &btm
->module
->bpf_raw_events
[i
];
39 if (!strcmp(btp
->tp
->name
, name
)) {
40 if (try_module_get(btm
->module
))
47 mutex_unlock(&bpf_module_mutex
);
51 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
55 #endif /* CONFIG_MODULES */
57 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
58 u64
bpf_get_stack(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
61 * trace_call_bpf - invoke BPF program
62 * @call: tracepoint event
63 * @ctx: opaque context pointer
65 * kprobe handlers execute BPF programs via this helper.
66 * Can be used from static tracepoints in the future.
68 * Return: BPF programs always return an integer which is interpreted by
70 * 0 - return from kprobe (event is filtered out)
71 * 1 - store kprobe event into ring buffer
72 * Other values are reserved and currently alias to 1
74 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
78 if (in_nmi()) /* not supported yet */
83 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
85 * since some bpf program is already running on this cpu,
86 * don't call into another bpf program (same or different)
87 * and don't send kprobe event into ring-buffer,
95 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
96 * to all call sites, we did a bpf_prog_array_valid() there to check
97 * whether call->prog_array is empty or not, which is
98 * a heurisitc to speed up execution.
100 * If bpf_prog_array_valid() fetched prog_array was
101 * non-NULL, we go into trace_call_bpf() and do the actual
102 * proper rcu_dereference() under RCU lock.
103 * If it turns out that prog_array is NULL then, we bail out.
104 * For the opposite, if the bpf_prog_array_valid() fetched pointer
105 * was NULL, you'll skip the prog_array with the risk of missing
106 * out of events when it was updated in between this and the
107 * rcu_dereference() which is accepted risk.
109 ret
= BPF_PROG_RUN_ARRAY_CHECK(call
->prog_array
, ctx
, BPF_PROG_RUN
);
112 __this_cpu_dec(bpf_prog_active
);
117 EXPORT_SYMBOL_GPL(trace_call_bpf
);
119 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
120 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
122 regs_set_return_value(regs
, rc
);
123 override_function_with_return(regs
);
127 static const struct bpf_func_proto bpf_override_return_proto
= {
128 .func
= bpf_override_return
,
130 .ret_type
= RET_INTEGER
,
131 .arg1_type
= ARG_PTR_TO_CTX
,
132 .arg2_type
= ARG_ANYTHING
,
136 BPF_CALL_3(bpf_probe_read
, void *, dst
, u32
, size
, const void *, unsafe_ptr
)
140 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
141 if (unlikely(ret
< 0))
142 memset(dst
, 0, size
);
147 static const struct bpf_func_proto bpf_probe_read_proto
= {
148 .func
= bpf_probe_read
,
150 .ret_type
= RET_INTEGER
,
151 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
152 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
153 .arg3_type
= ARG_ANYTHING
,
156 BPF_CALL_3(bpf_probe_write_user
, void *, unsafe_ptr
, const void *, src
,
160 * Ensure we're in user context which is safe for the helper to
161 * run. This helper has no business in a kthread.
163 * access_ok() should prevent writing to non-user memory, but in
164 * some situations (nommu, temporary switch, etc) access_ok() does
165 * not provide enough validation, hence the check on KERNEL_DS.
168 if (unlikely(in_interrupt() ||
169 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
171 if (unlikely(uaccess_kernel()))
173 if (!access_ok(VERIFY_WRITE
, unsafe_ptr
, size
))
176 return probe_kernel_write(unsafe_ptr
, src
, size
);
179 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
180 .func
= bpf_probe_write_user
,
182 .ret_type
= RET_INTEGER
,
183 .arg1_type
= ARG_ANYTHING
,
184 .arg2_type
= ARG_PTR_TO_MEM
,
185 .arg3_type
= ARG_CONST_SIZE
,
188 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
190 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
191 current
->comm
, task_pid_nr(current
));
193 return &bpf_probe_write_user_proto
;
197 * Only limited trace_printk() conversion specifiers allowed:
198 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
200 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
201 u64
, arg2
, u64
, arg3
)
203 bool str_seen
= false;
211 * bpf_check()->check_func_arg()->check_stack_boundary()
212 * guarantees that fmt points to bpf program stack,
213 * fmt_size bytes of it were initialized and fmt_size > 0
215 if (fmt
[--fmt_size
] != 0)
218 /* check format string for allowed specifiers */
219 for (i
= 0; i
< fmt_size
; i
++) {
220 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
229 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
234 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
236 /* disallow any further format extensions */
237 if (fmt
[i
+ 1] != 0 &&
238 !isspace(fmt
[i
+ 1]) &&
239 !ispunct(fmt
[i
+ 1]))
244 /* allow only one '%s' per fmt string */
263 strncpy_from_unsafe(buf
,
264 (void *) (long) unsafe_addr
,
275 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
276 fmt
[i
] != 'u' && fmt
[i
] != 'x')
281 /* Horrid workaround for getting va_list handling working with different
282 * argument type combinations generically for 32 and 64 bit archs.
284 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
285 #define __BPF_TP(...) \
286 __trace_printk(0 /* Fake ip */, \
289 #define __BPF_ARG1_TP(...) \
290 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
291 ? __BPF_TP(arg1, ##__VA_ARGS__) \
292 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
293 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
294 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
296 #define __BPF_ARG2_TP(...) \
297 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
298 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
299 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
300 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
301 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
303 #define __BPF_ARG3_TP(...) \
304 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
305 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
306 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
307 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
308 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
310 return __BPF_TP_EMIT();
313 static const struct bpf_func_proto bpf_trace_printk_proto
= {
314 .func
= bpf_trace_printk
,
316 .ret_type
= RET_INTEGER
,
317 .arg1_type
= ARG_PTR_TO_MEM
,
318 .arg2_type
= ARG_CONST_SIZE
,
321 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
324 * this program might be calling bpf_trace_printk,
325 * so allocate per-cpu printk buffers
327 trace_printk_init_buffers();
329 return &bpf_trace_printk_proto
;
332 static __always_inline
int
333 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
334 u64
*value
, u64
*enabled
, u64
*running
)
336 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
337 unsigned int cpu
= smp_processor_id();
338 u64 index
= flags
& BPF_F_INDEX_MASK
;
339 struct bpf_event_entry
*ee
;
341 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
343 if (index
== BPF_F_CURRENT_CPU
)
345 if (unlikely(index
>= array
->map
.max_entries
))
348 ee
= READ_ONCE(array
->ptrs
[index
]);
352 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
355 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
360 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
362 * this api is ugly since we miss [-22..-2] range of valid
363 * counter values, but that's uapi
370 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
371 .func
= bpf_perf_event_read
,
373 .ret_type
= RET_INTEGER
,
374 .arg1_type
= ARG_CONST_MAP_PTR
,
375 .arg2_type
= ARG_ANYTHING
,
378 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
379 struct bpf_perf_event_value
*, buf
, u32
, size
)
383 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
385 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
391 memset(buf
, 0, size
);
395 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
396 .func
= bpf_perf_event_read_value
,
398 .ret_type
= RET_INTEGER
,
399 .arg1_type
= ARG_CONST_MAP_PTR
,
400 .arg2_type
= ARG_ANYTHING
,
401 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
402 .arg4_type
= ARG_CONST_SIZE
,
405 static DEFINE_PER_CPU(struct perf_sample_data
, bpf_trace_sd
);
407 static __always_inline u64
408 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
409 u64 flags
, struct perf_sample_data
*sd
)
411 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
412 unsigned int cpu
= smp_processor_id();
413 u64 index
= flags
& BPF_F_INDEX_MASK
;
414 struct bpf_event_entry
*ee
;
415 struct perf_event
*event
;
417 if (index
== BPF_F_CURRENT_CPU
)
419 if (unlikely(index
>= array
->map
.max_entries
))
422 ee
= READ_ONCE(array
->ptrs
[index
]);
427 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
428 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
431 if (unlikely(event
->oncpu
!= cpu
))
434 perf_event_output(event
, sd
, regs
);
438 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
439 u64
, flags
, void *, data
, u64
, size
)
441 struct perf_sample_data
*sd
= this_cpu_ptr(&bpf_trace_sd
);
442 struct perf_raw_record raw
= {
449 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
452 perf_sample_data_init(sd
, 0, 0);
455 return __bpf_perf_event_output(regs
, map
, flags
, sd
);
458 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
459 .func
= bpf_perf_event_output
,
461 .ret_type
= RET_INTEGER
,
462 .arg1_type
= ARG_PTR_TO_CTX
,
463 .arg2_type
= ARG_CONST_MAP_PTR
,
464 .arg3_type
= ARG_ANYTHING
,
465 .arg4_type
= ARG_PTR_TO_MEM
,
466 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
469 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
470 static DEFINE_PER_CPU(struct perf_sample_data
, bpf_misc_sd
);
472 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
473 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
475 struct perf_sample_data
*sd
= this_cpu_ptr(&bpf_misc_sd
);
476 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
477 struct perf_raw_frag frag
= {
482 struct perf_raw_record raw
= {
485 .next
= ctx_size
? &frag
: NULL
,
492 perf_fetch_caller_regs(regs
);
493 perf_sample_data_init(sd
, 0, 0);
496 return __bpf_perf_event_output(regs
, map
, flags
, sd
);
499 BPF_CALL_0(bpf_get_current_task
)
501 return (long) current
;
504 static const struct bpf_func_proto bpf_get_current_task_proto
= {
505 .func
= bpf_get_current_task
,
507 .ret_type
= RET_INTEGER
,
510 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
512 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
515 if (unlikely(idx
>= array
->map
.max_entries
))
518 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
522 return task_under_cgroup_hierarchy(current
, cgrp
);
525 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
526 .func
= bpf_current_task_under_cgroup
,
528 .ret_type
= RET_INTEGER
,
529 .arg1_type
= ARG_CONST_MAP_PTR
,
530 .arg2_type
= ARG_ANYTHING
,
533 BPF_CALL_3(bpf_probe_read_str
, void *, dst
, u32
, size
,
534 const void *, unsafe_ptr
)
539 * The strncpy_from_unsafe() call will likely not fill the entire
540 * buffer, but that's okay in this circumstance as we're probing
541 * arbitrary memory anyway similar to bpf_probe_read() and might
542 * as well probe the stack. Thus, memory is explicitly cleared
543 * only in error case, so that improper users ignoring return
544 * code altogether don't copy garbage; otherwise length of string
545 * is returned that can be used for bpf_perf_event_output() et al.
547 ret
= strncpy_from_unsafe(dst
, unsafe_ptr
, size
);
548 if (unlikely(ret
< 0))
549 memset(dst
, 0, size
);
554 static const struct bpf_func_proto bpf_probe_read_str_proto
= {
555 .func
= bpf_probe_read_str
,
557 .ret_type
= RET_INTEGER
,
558 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
559 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
560 .arg3_type
= ARG_ANYTHING
,
563 static const struct bpf_func_proto
*
564 tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
567 case BPF_FUNC_map_lookup_elem
:
568 return &bpf_map_lookup_elem_proto
;
569 case BPF_FUNC_map_update_elem
:
570 return &bpf_map_update_elem_proto
;
571 case BPF_FUNC_map_delete_elem
:
572 return &bpf_map_delete_elem_proto
;
573 case BPF_FUNC_probe_read
:
574 return &bpf_probe_read_proto
;
575 case BPF_FUNC_ktime_get_ns
:
576 return &bpf_ktime_get_ns_proto
;
577 case BPF_FUNC_tail_call
:
578 return &bpf_tail_call_proto
;
579 case BPF_FUNC_get_current_pid_tgid
:
580 return &bpf_get_current_pid_tgid_proto
;
581 case BPF_FUNC_get_current_task
:
582 return &bpf_get_current_task_proto
;
583 case BPF_FUNC_get_current_uid_gid
:
584 return &bpf_get_current_uid_gid_proto
;
585 case BPF_FUNC_get_current_comm
:
586 return &bpf_get_current_comm_proto
;
587 case BPF_FUNC_trace_printk
:
588 return bpf_get_trace_printk_proto();
589 case BPF_FUNC_get_smp_processor_id
:
590 return &bpf_get_smp_processor_id_proto
;
591 case BPF_FUNC_get_numa_node_id
:
592 return &bpf_get_numa_node_id_proto
;
593 case BPF_FUNC_perf_event_read
:
594 return &bpf_perf_event_read_proto
;
595 case BPF_FUNC_probe_write_user
:
596 return bpf_get_probe_write_proto();
597 case BPF_FUNC_current_task_under_cgroup
:
598 return &bpf_current_task_under_cgroup_proto
;
599 case BPF_FUNC_get_prandom_u32
:
600 return &bpf_get_prandom_u32_proto
;
601 case BPF_FUNC_probe_read_str
:
602 return &bpf_probe_read_str_proto
;
603 #ifdef CONFIG_CGROUPS
604 case BPF_FUNC_get_current_cgroup_id
:
605 return &bpf_get_current_cgroup_id_proto
;
612 static const struct bpf_func_proto
*
613 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
616 case BPF_FUNC_perf_event_output
:
617 return &bpf_perf_event_output_proto
;
618 case BPF_FUNC_get_stackid
:
619 return &bpf_get_stackid_proto
;
620 case BPF_FUNC_get_stack
:
621 return &bpf_get_stack_proto
;
622 case BPF_FUNC_perf_event_read_value
:
623 return &bpf_perf_event_read_value_proto
;
624 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
625 case BPF_FUNC_override_return
:
626 return &bpf_override_return_proto
;
629 return tracing_func_proto(func_id
, prog
);
633 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
634 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
635 const struct bpf_prog
*prog
,
636 struct bpf_insn_access_aux
*info
)
638 if (off
< 0 || off
>= sizeof(struct pt_regs
))
640 if (type
!= BPF_READ
)
645 * Assertion for 32 bit to make sure last 8 byte access
646 * (BPF_DW) to the last 4 byte member is disallowed.
648 if (off
+ size
> sizeof(struct pt_regs
))
654 const struct bpf_verifier_ops kprobe_verifier_ops
= {
655 .get_func_proto
= kprobe_prog_func_proto
,
656 .is_valid_access
= kprobe_prog_is_valid_access
,
659 const struct bpf_prog_ops kprobe_prog_ops
= {
662 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
663 u64
, flags
, void *, data
, u64
, size
)
665 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
668 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
669 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
670 * from there and call the same bpf_perf_event_output() helper inline.
672 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
675 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
676 .func
= bpf_perf_event_output_tp
,
678 .ret_type
= RET_INTEGER
,
679 .arg1_type
= ARG_PTR_TO_CTX
,
680 .arg2_type
= ARG_CONST_MAP_PTR
,
681 .arg3_type
= ARG_ANYTHING
,
682 .arg4_type
= ARG_PTR_TO_MEM
,
683 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
686 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
689 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
692 * Same comment as in bpf_perf_event_output_tp(), only that this time
693 * the other helper's function body cannot be inlined due to being
694 * external, thus we need to call raw helper function.
696 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
700 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
701 .func
= bpf_get_stackid_tp
,
703 .ret_type
= RET_INTEGER
,
704 .arg1_type
= ARG_PTR_TO_CTX
,
705 .arg2_type
= ARG_CONST_MAP_PTR
,
706 .arg3_type
= ARG_ANYTHING
,
709 BPF_CALL_4(bpf_get_stack_tp
, void *, tp_buff
, void *, buf
, u32
, size
,
712 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
714 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
715 (unsigned long) size
, flags
, 0);
718 static const struct bpf_func_proto bpf_get_stack_proto_tp
= {
719 .func
= bpf_get_stack_tp
,
721 .ret_type
= RET_INTEGER
,
722 .arg1_type
= ARG_PTR_TO_CTX
,
723 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
724 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
725 .arg4_type
= ARG_ANYTHING
,
728 static const struct bpf_func_proto
*
729 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
732 case BPF_FUNC_perf_event_output
:
733 return &bpf_perf_event_output_proto_tp
;
734 case BPF_FUNC_get_stackid
:
735 return &bpf_get_stackid_proto_tp
;
736 case BPF_FUNC_get_stack
:
737 return &bpf_get_stack_proto_tp
;
739 return tracing_func_proto(func_id
, prog
);
743 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
744 const struct bpf_prog
*prog
,
745 struct bpf_insn_access_aux
*info
)
747 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
749 if (type
!= BPF_READ
)
754 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
758 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
759 .get_func_proto
= tp_prog_func_proto
,
760 .is_valid_access
= tp_prog_is_valid_access
,
763 const struct bpf_prog_ops tracepoint_prog_ops
= {
766 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
767 struct bpf_perf_event_value
*, buf
, u32
, size
)
771 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
773 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
779 memset(buf
, 0, size
);
783 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
784 .func
= bpf_perf_prog_read_value
,
786 .ret_type
= RET_INTEGER
,
787 .arg1_type
= ARG_PTR_TO_CTX
,
788 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
789 .arg3_type
= ARG_CONST_SIZE
,
792 static const struct bpf_func_proto
*
793 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
796 case BPF_FUNC_perf_event_output
:
797 return &bpf_perf_event_output_proto_tp
;
798 case BPF_FUNC_get_stackid
:
799 return &bpf_get_stackid_proto_tp
;
800 case BPF_FUNC_get_stack
:
801 return &bpf_get_stack_proto_tp
;
802 case BPF_FUNC_perf_prog_read_value
:
803 return &bpf_perf_prog_read_value_proto
;
805 return tracing_func_proto(func_id
, prog
);
810 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
811 * to avoid potential recursive reuse issue when/if tracepoints are added
812 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
814 static DEFINE_PER_CPU(struct pt_regs
, bpf_raw_tp_regs
);
815 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
816 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
818 struct pt_regs
*regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
820 perf_fetch_caller_regs(regs
);
821 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
824 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
825 .func
= bpf_perf_event_output_raw_tp
,
827 .ret_type
= RET_INTEGER
,
828 .arg1_type
= ARG_PTR_TO_CTX
,
829 .arg2_type
= ARG_CONST_MAP_PTR
,
830 .arg3_type
= ARG_ANYTHING
,
831 .arg4_type
= ARG_PTR_TO_MEM
,
832 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
835 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
836 struct bpf_map
*, map
, u64
, flags
)
838 struct pt_regs
*regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
840 perf_fetch_caller_regs(regs
);
841 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
842 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
846 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
847 .func
= bpf_get_stackid_raw_tp
,
849 .ret_type
= RET_INTEGER
,
850 .arg1_type
= ARG_PTR_TO_CTX
,
851 .arg2_type
= ARG_CONST_MAP_PTR
,
852 .arg3_type
= ARG_ANYTHING
,
855 BPF_CALL_4(bpf_get_stack_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
856 void *, buf
, u32
, size
, u64
, flags
)
858 struct pt_regs
*regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
860 perf_fetch_caller_regs(regs
);
861 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
862 (unsigned long) size
, flags
, 0);
865 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp
= {
866 .func
= bpf_get_stack_raw_tp
,
868 .ret_type
= RET_INTEGER
,
869 .arg1_type
= ARG_PTR_TO_CTX
,
870 .arg2_type
= ARG_PTR_TO_MEM
,
871 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
872 .arg4_type
= ARG_ANYTHING
,
875 static const struct bpf_func_proto
*
876 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
879 case BPF_FUNC_perf_event_output
:
880 return &bpf_perf_event_output_proto_raw_tp
;
881 case BPF_FUNC_get_stackid
:
882 return &bpf_get_stackid_proto_raw_tp
;
883 case BPF_FUNC_get_stack
:
884 return &bpf_get_stack_proto_raw_tp
;
886 return tracing_func_proto(func_id
, prog
);
890 static bool raw_tp_prog_is_valid_access(int off
, int size
,
891 enum bpf_access_type type
,
892 const struct bpf_prog
*prog
,
893 struct bpf_insn_access_aux
*info
)
895 /* largest tracepoint in the kernel has 12 args */
896 if (off
< 0 || off
>= sizeof(__u64
) * 12)
898 if (type
!= BPF_READ
)
905 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
906 .get_func_proto
= raw_tp_prog_func_proto
,
907 .is_valid_access
= raw_tp_prog_is_valid_access
,
910 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
913 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
914 const struct bpf_prog
*prog
,
915 struct bpf_insn_access_aux
*info
)
917 const int size_u64
= sizeof(u64
);
919 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
921 if (type
!= BPF_READ
)
923 if (off
% size
!= 0) {
924 if (sizeof(unsigned long) != 4)
933 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
934 bpf_ctx_record_field_size(info
, size_u64
);
935 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
938 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
939 bpf_ctx_record_field_size(info
, size_u64
);
940 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
944 if (size
!= sizeof(long))
951 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
952 const struct bpf_insn
*si
,
953 struct bpf_insn
*insn_buf
,
954 struct bpf_prog
*prog
, u32
*target_size
)
956 struct bpf_insn
*insn
= insn_buf
;
959 case offsetof(struct bpf_perf_event_data
, sample_period
):
960 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
961 data
), si
->dst_reg
, si
->src_reg
,
962 offsetof(struct bpf_perf_event_data_kern
, data
));
963 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
964 bpf_target_off(struct perf_sample_data
, period
, 8,
967 case offsetof(struct bpf_perf_event_data
, addr
):
968 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
969 data
), si
->dst_reg
, si
->src_reg
,
970 offsetof(struct bpf_perf_event_data_kern
, data
));
971 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
972 bpf_target_off(struct perf_sample_data
, addr
, 8,
976 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
977 regs
), si
->dst_reg
, si
->src_reg
,
978 offsetof(struct bpf_perf_event_data_kern
, regs
));
979 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
984 return insn
- insn_buf
;
987 const struct bpf_verifier_ops perf_event_verifier_ops
= {
988 .get_func_proto
= pe_prog_func_proto
,
989 .is_valid_access
= pe_prog_is_valid_access
,
990 .convert_ctx_access
= pe_prog_convert_ctx_access
,
993 const struct bpf_prog_ops perf_event_prog_ops
= {
996 static DEFINE_MUTEX(bpf_event_mutex
);
998 #define BPF_TRACE_MAX_PROGS 64
1000 int perf_event_attach_bpf_prog(struct perf_event
*event
,
1001 struct bpf_prog
*prog
)
1003 struct bpf_prog_array __rcu
*old_array
;
1004 struct bpf_prog_array
*new_array
;
1008 * Kprobe override only works if they are on the function entry,
1009 * and only if they are on the opt-in list.
1011 if (prog
->kprobe_override
&&
1012 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
1013 !trace_kprobe_error_injectable(event
->tp_event
)))
1016 mutex_lock(&bpf_event_mutex
);
1021 old_array
= event
->tp_event
->prog_array
;
1023 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
1028 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, &new_array
);
1032 /* set the new array to event->tp_event and set event->prog */
1034 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1035 bpf_prog_array_free(old_array
);
1038 mutex_unlock(&bpf_event_mutex
);
1042 void perf_event_detach_bpf_prog(struct perf_event
*event
)
1044 struct bpf_prog_array __rcu
*old_array
;
1045 struct bpf_prog_array
*new_array
;
1048 mutex_lock(&bpf_event_mutex
);
1053 old_array
= event
->tp_event
->prog_array
;
1054 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, &new_array
);
1058 bpf_prog_array_delete_safe(old_array
, event
->prog
);
1060 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1061 bpf_prog_array_free(old_array
);
1064 bpf_prog_put(event
->prog
);
1068 mutex_unlock(&bpf_event_mutex
);
1071 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
1073 struct perf_event_query_bpf __user
*uquery
= info
;
1074 struct perf_event_query_bpf query
= {};
1075 u32
*ids
, prog_cnt
, ids_len
;
1078 if (!capable(CAP_SYS_ADMIN
))
1080 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1082 if (copy_from_user(&query
, uquery
, sizeof(query
)))
1085 ids_len
= query
.ids_len
;
1086 if (ids_len
> BPF_TRACE_MAX_PROGS
)
1088 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
1092 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1093 * is required when user only wants to check for uquery->prog_cnt.
1094 * There is no need to check for it since the case is handled
1095 * gracefully in bpf_prog_array_copy_info.
1098 mutex_lock(&bpf_event_mutex
);
1099 ret
= bpf_prog_array_copy_info(event
->tp_event
->prog_array
,
1103 mutex_unlock(&bpf_event_mutex
);
1105 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
1106 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
1113 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
1114 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
1116 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
1118 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
1120 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
1121 if (!strcmp(btp
->tp
->name
, name
))
1125 return bpf_get_raw_tracepoint_module(name
);
1128 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
1130 struct module
*mod
= __module_address((unsigned long)btp
);
1136 static __always_inline
1137 void __bpf_trace_run(struct bpf_prog
*prog
, u64
*args
)
1141 (void) BPF_PROG_RUN(prog
, args
);
1146 #define UNPACK(...) __VA_ARGS__
1147 #define REPEAT_1(FN, DL, X, ...) FN(X)
1148 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1149 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1150 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1151 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1152 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1153 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1154 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1155 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1156 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1157 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1158 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1159 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1161 #define SARG(X) u64 arg##X
1162 #define COPY(X) args[X] = arg##X
1164 #define __DL_COM (,)
1165 #define __DL_SEM (;)
1167 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1169 #define BPF_TRACE_DEFN_x(x) \
1170 void bpf_trace_run##x(struct bpf_prog *prog, \
1171 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1174 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1175 __bpf_trace_run(prog, args); \
1177 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1178 BPF_TRACE_DEFN_x(1);
1179 BPF_TRACE_DEFN_x(2);
1180 BPF_TRACE_DEFN_x(3);
1181 BPF_TRACE_DEFN_x(4);
1182 BPF_TRACE_DEFN_x(5);
1183 BPF_TRACE_DEFN_x(6);
1184 BPF_TRACE_DEFN_x(7);
1185 BPF_TRACE_DEFN_x(8);
1186 BPF_TRACE_DEFN_x(9);
1187 BPF_TRACE_DEFN_x(10);
1188 BPF_TRACE_DEFN_x(11);
1189 BPF_TRACE_DEFN_x(12);
1191 static int __bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1193 struct tracepoint
*tp
= btp
->tp
;
1196 * check that program doesn't access arguments beyond what's
1197 * available in this tracepoint
1199 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
1202 return tracepoint_probe_register(tp
, (void *)btp
->bpf_func
, prog
);
1205 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1209 mutex_lock(&bpf_event_mutex
);
1210 err
= __bpf_probe_register(btp
, prog
);
1211 mutex_unlock(&bpf_event_mutex
);
1215 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1219 mutex_lock(&bpf_event_mutex
);
1220 err
= tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, prog
);
1221 mutex_unlock(&bpf_event_mutex
);
1225 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
1226 u32
*fd_type
, const char **buf
,
1227 u64
*probe_offset
, u64
*probe_addr
)
1229 bool is_tracepoint
, is_syscall_tp
;
1230 struct bpf_prog
*prog
;
1237 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1238 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
)
1241 *prog_id
= prog
->aux
->id
;
1242 flags
= event
->tp_event
->flags
;
1243 is_tracepoint
= flags
& TRACE_EVENT_FL_TRACEPOINT
;
1244 is_syscall_tp
= is_syscall_trace_event(event
->tp_event
);
1246 if (is_tracepoint
|| is_syscall_tp
) {
1247 *buf
= is_tracepoint
? event
->tp_event
->tp
->name
1248 : event
->tp_event
->name
;
1249 *fd_type
= BPF_FD_TYPE_TRACEPOINT
;
1250 *probe_offset
= 0x0;
1255 #ifdef CONFIG_KPROBE_EVENTS
1256 if (flags
& TRACE_EVENT_FL_KPROBE
)
1257 err
= bpf_get_kprobe_info(event
, fd_type
, buf
,
1258 probe_offset
, probe_addr
,
1259 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1261 #ifdef CONFIG_UPROBE_EVENTS
1262 if (flags
& TRACE_EVENT_FL_UPROBE
)
1263 err
= bpf_get_uprobe_info(event
, fd_type
, buf
,
1265 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1272 #ifdef CONFIG_MODULES
1273 int bpf_event_notify(struct notifier_block
*nb
, unsigned long op
, void *module
)
1275 struct bpf_trace_module
*btm
, *tmp
;
1276 struct module
*mod
= module
;
1278 if (mod
->num_bpf_raw_events
== 0 ||
1279 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_GOING
))
1282 mutex_lock(&bpf_module_mutex
);
1285 case MODULE_STATE_COMING
:
1286 btm
= kzalloc(sizeof(*btm
), GFP_KERNEL
);
1288 btm
->module
= module
;
1289 list_add(&btm
->list
, &bpf_trace_modules
);
1292 case MODULE_STATE_GOING
:
1293 list_for_each_entry_safe(btm
, tmp
, &bpf_trace_modules
, list
) {
1294 if (btm
->module
== module
) {
1295 list_del(&btm
->list
);
1303 mutex_unlock(&bpf_module_mutex
);
1308 static struct notifier_block bpf_module_nb
= {
1309 .notifier_call
= bpf_event_notify
,
1312 int __init
bpf_event_init(void)
1314 register_module_notifier(&bpf_module_nb
);
1318 fs_initcall(bpf_event_init
);
1319 #endif /* CONFIG_MODULES */