1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
16 #include <linux/kprobes.h>
17 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
25 * trace_call_bpf - invoke BPF program
26 * @call: tracepoint event
27 * @ctx: opaque context pointer
29 * kprobe handlers execute BPF programs via this helper.
30 * Can be used from static tracepoints in the future.
32 * Return: BPF programs always return an integer which is interpreted by
34 * 0 - return from kprobe (event is filtered out)
35 * 1 - store kprobe event into ring buffer
36 * Other values are reserved and currently alias to 1
38 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
42 if (in_nmi()) /* not supported yet */
47 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
49 * since some bpf program is already running on this cpu,
50 * don't call into another bpf program (same or different)
51 * and don't send kprobe event into ring-buffer,
59 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
60 * to all call sites, we did a bpf_prog_array_valid() there to check
61 * whether call->prog_array is empty or not, which is
62 * a heurisitc to speed up execution.
64 * If bpf_prog_array_valid() fetched prog_array was
65 * non-NULL, we go into trace_call_bpf() and do the actual
66 * proper rcu_dereference() under RCU lock.
67 * If it turns out that prog_array is NULL then, we bail out.
68 * For the opposite, if the bpf_prog_array_valid() fetched pointer
69 * was NULL, you'll skip the prog_array with the risk of missing
70 * out of events when it was updated in between this and the
71 * rcu_dereference() which is accepted risk.
73 ret
= BPF_PROG_RUN_ARRAY_CHECK(call
->prog_array
, ctx
, BPF_PROG_RUN
);
76 __this_cpu_dec(bpf_prog_active
);
81 EXPORT_SYMBOL_GPL(trace_call_bpf
);
83 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
84 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
86 regs_set_return_value(regs
, rc
);
87 override_function_with_return(regs
);
91 static const struct bpf_func_proto bpf_override_return_proto
= {
92 .func
= bpf_override_return
,
94 .ret_type
= RET_INTEGER
,
95 .arg1_type
= ARG_PTR_TO_CTX
,
96 .arg2_type
= ARG_ANYTHING
,
100 BPF_CALL_3(bpf_probe_read
, void *, dst
, u32
, size
, const void *, unsafe_ptr
)
104 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
105 if (unlikely(ret
< 0))
106 memset(dst
, 0, size
);
111 static const struct bpf_func_proto bpf_probe_read_proto
= {
112 .func
= bpf_probe_read
,
114 .ret_type
= RET_INTEGER
,
115 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
116 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
117 .arg3_type
= ARG_ANYTHING
,
120 BPF_CALL_3(bpf_probe_write_user
, void *, unsafe_ptr
, const void *, src
,
124 * Ensure we're in user context which is safe for the helper to
125 * run. This helper has no business in a kthread.
127 * access_ok() should prevent writing to non-user memory, but in
128 * some situations (nommu, temporary switch, etc) access_ok() does
129 * not provide enough validation, hence the check on KERNEL_DS.
132 if (unlikely(in_interrupt() ||
133 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
135 if (unlikely(uaccess_kernel()))
137 if (!access_ok(VERIFY_WRITE
, unsafe_ptr
, size
))
140 return probe_kernel_write(unsafe_ptr
, src
, size
);
143 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
144 .func
= bpf_probe_write_user
,
146 .ret_type
= RET_INTEGER
,
147 .arg1_type
= ARG_ANYTHING
,
148 .arg2_type
= ARG_PTR_TO_MEM
,
149 .arg3_type
= ARG_CONST_SIZE
,
152 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
154 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
155 current
->comm
, task_pid_nr(current
));
157 return &bpf_probe_write_user_proto
;
161 * Only limited trace_printk() conversion specifiers allowed:
162 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
164 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
165 u64
, arg2
, u64
, arg3
)
167 bool str_seen
= false;
175 * bpf_check()->check_func_arg()->check_stack_boundary()
176 * guarantees that fmt points to bpf program stack,
177 * fmt_size bytes of it were initialized and fmt_size > 0
179 if (fmt
[--fmt_size
] != 0)
182 /* check format string for allowed specifiers */
183 for (i
= 0; i
< fmt_size
; i
++) {
184 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
193 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
198 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
201 if (!isspace(fmt
[i
]) && !ispunct(fmt
[i
]) && fmt
[i
] != 0)
204 if (fmt
[i
- 1] == 's') {
206 /* allow only one '%s' per fmt string */
225 strncpy_from_unsafe(buf
,
226 (void *) (long) unsafe_addr
,
237 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
238 fmt
[i
] != 'u' && fmt
[i
] != 'x')
243 /* Horrid workaround for getting va_list handling working with different
244 * argument type combinations generically for 32 and 64 bit archs.
246 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
247 #define __BPF_TP(...) \
248 __trace_printk(0 /* Fake ip */, \
251 #define __BPF_ARG1_TP(...) \
252 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
253 ? __BPF_TP(arg1, ##__VA_ARGS__) \
254 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
255 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
256 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
258 #define __BPF_ARG2_TP(...) \
259 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
260 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
261 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
262 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
263 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
265 #define __BPF_ARG3_TP(...) \
266 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
267 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
268 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
269 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
270 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
272 return __BPF_TP_EMIT();
275 static const struct bpf_func_proto bpf_trace_printk_proto
= {
276 .func
= bpf_trace_printk
,
278 .ret_type
= RET_INTEGER
,
279 .arg1_type
= ARG_PTR_TO_MEM
,
280 .arg2_type
= ARG_CONST_SIZE
,
283 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
286 * this program might be calling bpf_trace_printk,
287 * so allocate per-cpu printk buffers
289 trace_printk_init_buffers();
291 return &bpf_trace_printk_proto
;
294 static __always_inline
int
295 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
296 u64
*value
, u64
*enabled
, u64
*running
)
298 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
299 unsigned int cpu
= smp_processor_id();
300 u64 index
= flags
& BPF_F_INDEX_MASK
;
301 struct bpf_event_entry
*ee
;
303 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
305 if (index
== BPF_F_CURRENT_CPU
)
307 if (unlikely(index
>= array
->map
.max_entries
))
310 ee
= READ_ONCE(array
->ptrs
[index
]);
314 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
317 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
322 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
324 * this api is ugly since we miss [-22..-2] range of valid
325 * counter values, but that's uapi
332 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
333 .func
= bpf_perf_event_read
,
335 .ret_type
= RET_INTEGER
,
336 .arg1_type
= ARG_CONST_MAP_PTR
,
337 .arg2_type
= ARG_ANYTHING
,
340 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
341 struct bpf_perf_event_value
*, buf
, u32
, size
)
345 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
347 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
353 memset(buf
, 0, size
);
357 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
358 .func
= bpf_perf_event_read_value
,
360 .ret_type
= RET_INTEGER
,
361 .arg1_type
= ARG_CONST_MAP_PTR
,
362 .arg2_type
= ARG_ANYTHING
,
363 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
364 .arg4_type
= ARG_CONST_SIZE
,
367 static DEFINE_PER_CPU(struct perf_sample_data
, bpf_trace_sd
);
369 static __always_inline u64
370 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
371 u64 flags
, struct perf_sample_data
*sd
)
373 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
374 unsigned int cpu
= smp_processor_id();
375 u64 index
= flags
& BPF_F_INDEX_MASK
;
376 struct bpf_event_entry
*ee
;
377 struct perf_event
*event
;
379 if (index
== BPF_F_CURRENT_CPU
)
381 if (unlikely(index
>= array
->map
.max_entries
))
384 ee
= READ_ONCE(array
->ptrs
[index
]);
389 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
390 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
393 if (unlikely(event
->oncpu
!= cpu
))
396 perf_event_output(event
, sd
, regs
);
400 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
401 u64
, flags
, void *, data
, u64
, size
)
403 struct perf_sample_data
*sd
= this_cpu_ptr(&bpf_trace_sd
);
404 struct perf_raw_record raw
= {
411 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
414 perf_sample_data_init(sd
, 0, 0);
417 return __bpf_perf_event_output(regs
, map
, flags
, sd
);
420 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
421 .func
= bpf_perf_event_output
,
423 .ret_type
= RET_INTEGER
,
424 .arg1_type
= ARG_PTR_TO_CTX
,
425 .arg2_type
= ARG_CONST_MAP_PTR
,
426 .arg3_type
= ARG_ANYTHING
,
427 .arg4_type
= ARG_PTR_TO_MEM
,
428 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
431 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
432 static DEFINE_PER_CPU(struct perf_sample_data
, bpf_misc_sd
);
434 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
435 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
437 struct perf_sample_data
*sd
= this_cpu_ptr(&bpf_misc_sd
);
438 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
439 struct perf_raw_frag frag
= {
444 struct perf_raw_record raw
= {
447 .next
= ctx_size
? &frag
: NULL
,
454 perf_fetch_caller_regs(regs
);
455 perf_sample_data_init(sd
, 0, 0);
458 return __bpf_perf_event_output(regs
, map
, flags
, sd
);
461 BPF_CALL_0(bpf_get_current_task
)
463 return (long) current
;
466 static const struct bpf_func_proto bpf_get_current_task_proto
= {
467 .func
= bpf_get_current_task
,
469 .ret_type
= RET_INTEGER
,
472 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
474 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
477 if (unlikely(in_interrupt()))
479 if (unlikely(idx
>= array
->map
.max_entries
))
482 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
486 return task_under_cgroup_hierarchy(current
, cgrp
);
489 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
490 .func
= bpf_current_task_under_cgroup
,
492 .ret_type
= RET_INTEGER
,
493 .arg1_type
= ARG_CONST_MAP_PTR
,
494 .arg2_type
= ARG_ANYTHING
,
497 BPF_CALL_3(bpf_probe_read_str
, void *, dst
, u32
, size
,
498 const void *, unsafe_ptr
)
503 * The strncpy_from_unsafe() call will likely not fill the entire
504 * buffer, but that's okay in this circumstance as we're probing
505 * arbitrary memory anyway similar to bpf_probe_read() and might
506 * as well probe the stack. Thus, memory is explicitly cleared
507 * only in error case, so that improper users ignoring return
508 * code altogether don't copy garbage; otherwise length of string
509 * is returned that can be used for bpf_perf_event_output() et al.
511 ret
= strncpy_from_unsafe(dst
, unsafe_ptr
, size
);
512 if (unlikely(ret
< 0))
513 memset(dst
, 0, size
);
518 static const struct bpf_func_proto bpf_probe_read_str_proto
= {
519 .func
= bpf_probe_read_str
,
521 .ret_type
= RET_INTEGER
,
522 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
523 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
524 .arg3_type
= ARG_ANYTHING
,
527 static const struct bpf_func_proto
*
528 tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
531 case BPF_FUNC_map_lookup_elem
:
532 return &bpf_map_lookup_elem_proto
;
533 case BPF_FUNC_map_update_elem
:
534 return &bpf_map_update_elem_proto
;
535 case BPF_FUNC_map_delete_elem
:
536 return &bpf_map_delete_elem_proto
;
537 case BPF_FUNC_probe_read
:
538 return &bpf_probe_read_proto
;
539 case BPF_FUNC_ktime_get_ns
:
540 return &bpf_ktime_get_ns_proto
;
541 case BPF_FUNC_tail_call
:
542 return &bpf_tail_call_proto
;
543 case BPF_FUNC_get_current_pid_tgid
:
544 return &bpf_get_current_pid_tgid_proto
;
545 case BPF_FUNC_get_current_task
:
546 return &bpf_get_current_task_proto
;
547 case BPF_FUNC_get_current_uid_gid
:
548 return &bpf_get_current_uid_gid_proto
;
549 case BPF_FUNC_get_current_comm
:
550 return &bpf_get_current_comm_proto
;
551 case BPF_FUNC_trace_printk
:
552 return bpf_get_trace_printk_proto();
553 case BPF_FUNC_get_smp_processor_id
:
554 return &bpf_get_smp_processor_id_proto
;
555 case BPF_FUNC_get_numa_node_id
:
556 return &bpf_get_numa_node_id_proto
;
557 case BPF_FUNC_perf_event_read
:
558 return &bpf_perf_event_read_proto
;
559 case BPF_FUNC_probe_write_user
:
560 return bpf_get_probe_write_proto();
561 case BPF_FUNC_current_task_under_cgroup
:
562 return &bpf_current_task_under_cgroup_proto
;
563 case BPF_FUNC_get_prandom_u32
:
564 return &bpf_get_prandom_u32_proto
;
565 case BPF_FUNC_probe_read_str
:
566 return &bpf_probe_read_str_proto
;
572 static const struct bpf_func_proto
*
573 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
576 case BPF_FUNC_perf_event_output
:
577 return &bpf_perf_event_output_proto
;
578 case BPF_FUNC_get_stackid
:
579 return &bpf_get_stackid_proto
;
580 case BPF_FUNC_perf_event_read_value
:
581 return &bpf_perf_event_read_value_proto
;
582 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
583 case BPF_FUNC_override_return
:
584 return &bpf_override_return_proto
;
587 return tracing_func_proto(func_id
, prog
);
591 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
592 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
593 const struct bpf_prog
*prog
,
594 struct bpf_insn_access_aux
*info
)
596 if (off
< 0 || off
>= sizeof(struct pt_regs
))
598 if (type
!= BPF_READ
)
603 * Assertion for 32 bit to make sure last 8 byte access
604 * (BPF_DW) to the last 4 byte member is disallowed.
606 if (off
+ size
> sizeof(struct pt_regs
))
612 const struct bpf_verifier_ops kprobe_verifier_ops
= {
613 .get_func_proto
= kprobe_prog_func_proto
,
614 .is_valid_access
= kprobe_prog_is_valid_access
,
617 const struct bpf_prog_ops kprobe_prog_ops
= {
620 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
621 u64
, flags
, void *, data
, u64
, size
)
623 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
626 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
627 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
628 * from there and call the same bpf_perf_event_output() helper inline.
630 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
633 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
634 .func
= bpf_perf_event_output_tp
,
636 .ret_type
= RET_INTEGER
,
637 .arg1_type
= ARG_PTR_TO_CTX
,
638 .arg2_type
= ARG_CONST_MAP_PTR
,
639 .arg3_type
= ARG_ANYTHING
,
640 .arg4_type
= ARG_PTR_TO_MEM
,
641 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
644 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
647 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
650 * Same comment as in bpf_perf_event_output_tp(), only that this time
651 * the other helper's function body cannot be inlined due to being
652 * external, thus we need to call raw helper function.
654 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
658 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
659 .func
= bpf_get_stackid_tp
,
661 .ret_type
= RET_INTEGER
,
662 .arg1_type
= ARG_PTR_TO_CTX
,
663 .arg2_type
= ARG_CONST_MAP_PTR
,
664 .arg3_type
= ARG_ANYTHING
,
667 static const struct bpf_func_proto
*
668 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
671 case BPF_FUNC_perf_event_output
:
672 return &bpf_perf_event_output_proto_tp
;
673 case BPF_FUNC_get_stackid
:
674 return &bpf_get_stackid_proto_tp
;
676 return tracing_func_proto(func_id
, prog
);
680 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
681 const struct bpf_prog
*prog
,
682 struct bpf_insn_access_aux
*info
)
684 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
686 if (type
!= BPF_READ
)
691 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
695 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
696 .get_func_proto
= tp_prog_func_proto
,
697 .is_valid_access
= tp_prog_is_valid_access
,
700 const struct bpf_prog_ops tracepoint_prog_ops
= {
703 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
704 struct bpf_perf_event_value
*, buf
, u32
, size
)
708 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
710 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
716 memset(buf
, 0, size
);
720 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
721 .func
= bpf_perf_prog_read_value
,
723 .ret_type
= RET_INTEGER
,
724 .arg1_type
= ARG_PTR_TO_CTX
,
725 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
726 .arg3_type
= ARG_CONST_SIZE
,
729 static const struct bpf_func_proto
*
730 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
733 case BPF_FUNC_perf_event_output
:
734 return &bpf_perf_event_output_proto_tp
;
735 case BPF_FUNC_get_stackid
:
736 return &bpf_get_stackid_proto_tp
;
737 case BPF_FUNC_perf_prog_read_value
:
738 return &bpf_perf_prog_read_value_proto
;
740 return tracing_func_proto(func_id
, prog
);
745 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
746 * to avoid potential recursive reuse issue when/if tracepoints are added
747 * inside bpf_*_event_output and/or bpf_get_stack_id
749 static DEFINE_PER_CPU(struct pt_regs
, bpf_raw_tp_regs
);
750 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
751 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
753 struct pt_regs
*regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
755 perf_fetch_caller_regs(regs
);
756 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
759 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
760 .func
= bpf_perf_event_output_raw_tp
,
762 .ret_type
= RET_INTEGER
,
763 .arg1_type
= ARG_PTR_TO_CTX
,
764 .arg2_type
= ARG_CONST_MAP_PTR
,
765 .arg3_type
= ARG_ANYTHING
,
766 .arg4_type
= ARG_PTR_TO_MEM
,
767 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
770 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
771 struct bpf_map
*, map
, u64
, flags
)
773 struct pt_regs
*regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
775 perf_fetch_caller_regs(regs
);
776 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
777 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
781 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
782 .func
= bpf_get_stackid_raw_tp
,
784 .ret_type
= RET_INTEGER
,
785 .arg1_type
= ARG_PTR_TO_CTX
,
786 .arg2_type
= ARG_CONST_MAP_PTR
,
787 .arg3_type
= ARG_ANYTHING
,
790 static const struct bpf_func_proto
*
791 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
794 case BPF_FUNC_perf_event_output
:
795 return &bpf_perf_event_output_proto_raw_tp
;
796 case BPF_FUNC_get_stackid
:
797 return &bpf_get_stackid_proto_raw_tp
;
799 return tracing_func_proto(func_id
, prog
);
803 static bool raw_tp_prog_is_valid_access(int off
, int size
,
804 enum bpf_access_type type
,
805 const struct bpf_prog
*prog
,
806 struct bpf_insn_access_aux
*info
)
808 /* largest tracepoint in the kernel has 12 args */
809 if (off
< 0 || off
>= sizeof(__u64
) * 12)
811 if (type
!= BPF_READ
)
818 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
819 .get_func_proto
= raw_tp_prog_func_proto
,
820 .is_valid_access
= raw_tp_prog_is_valid_access
,
823 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
826 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
827 const struct bpf_prog
*prog
,
828 struct bpf_insn_access_aux
*info
)
830 const int size_u64
= sizeof(u64
);
832 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
834 if (type
!= BPF_READ
)
840 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
841 bpf_ctx_record_field_size(info
, size_u64
);
842 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
845 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
846 bpf_ctx_record_field_size(info
, size_u64
);
847 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
851 if (size
!= sizeof(long))
858 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
859 const struct bpf_insn
*si
,
860 struct bpf_insn
*insn_buf
,
861 struct bpf_prog
*prog
, u32
*target_size
)
863 struct bpf_insn
*insn
= insn_buf
;
866 case offsetof(struct bpf_perf_event_data
, sample_period
):
867 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
868 data
), si
->dst_reg
, si
->src_reg
,
869 offsetof(struct bpf_perf_event_data_kern
, data
));
870 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
871 bpf_target_off(struct perf_sample_data
, period
, 8,
874 case offsetof(struct bpf_perf_event_data
, addr
):
875 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
876 data
), si
->dst_reg
, si
->src_reg
,
877 offsetof(struct bpf_perf_event_data_kern
, data
));
878 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
879 bpf_target_off(struct perf_sample_data
, addr
, 8,
883 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
884 regs
), si
->dst_reg
, si
->src_reg
,
885 offsetof(struct bpf_perf_event_data_kern
, regs
));
886 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
891 return insn
- insn_buf
;
894 const struct bpf_verifier_ops perf_event_verifier_ops
= {
895 .get_func_proto
= pe_prog_func_proto
,
896 .is_valid_access
= pe_prog_is_valid_access
,
897 .convert_ctx_access
= pe_prog_convert_ctx_access
,
900 const struct bpf_prog_ops perf_event_prog_ops
= {
903 static DEFINE_MUTEX(bpf_event_mutex
);
905 #define BPF_TRACE_MAX_PROGS 64
907 int perf_event_attach_bpf_prog(struct perf_event
*event
,
908 struct bpf_prog
*prog
)
910 struct bpf_prog_array __rcu
*old_array
;
911 struct bpf_prog_array
*new_array
;
915 * Kprobe override only works if they are on the function entry,
916 * and only if they are on the opt-in list.
918 if (prog
->kprobe_override
&&
919 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
920 !trace_kprobe_error_injectable(event
->tp_event
)))
923 mutex_lock(&bpf_event_mutex
);
928 old_array
= event
->tp_event
->prog_array
;
930 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
935 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, &new_array
);
939 /* set the new array to event->tp_event and set event->prog */
941 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
942 bpf_prog_array_free(old_array
);
945 mutex_unlock(&bpf_event_mutex
);
949 void perf_event_detach_bpf_prog(struct perf_event
*event
)
951 struct bpf_prog_array __rcu
*old_array
;
952 struct bpf_prog_array
*new_array
;
955 mutex_lock(&bpf_event_mutex
);
960 old_array
= event
->tp_event
->prog_array
;
961 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, &new_array
);
963 bpf_prog_array_delete_safe(old_array
, event
->prog
);
965 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
966 bpf_prog_array_free(old_array
);
969 bpf_prog_put(event
->prog
);
973 mutex_unlock(&bpf_event_mutex
);
976 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
978 struct perf_event_query_bpf __user
*uquery
= info
;
979 struct perf_event_query_bpf query
= {};
980 u32
*ids
, prog_cnt
, ids_len
;
983 if (!capable(CAP_SYS_ADMIN
))
985 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
987 if (copy_from_user(&query
, uquery
, sizeof(query
)))
990 ids_len
= query
.ids_len
;
991 if (ids_len
> BPF_TRACE_MAX_PROGS
)
993 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
997 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
998 * is required when user only wants to check for uquery->prog_cnt.
999 * There is no need to check for it since the case is handled
1000 * gracefully in bpf_prog_array_copy_info.
1003 mutex_lock(&bpf_event_mutex
);
1004 ret
= bpf_prog_array_copy_info(event
->tp_event
->prog_array
,
1008 mutex_unlock(&bpf_event_mutex
);
1010 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
1011 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
1018 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
1019 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
1021 struct bpf_raw_event_map
*bpf_find_raw_tracepoint(const char *name
)
1023 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
1025 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
1026 if (!strcmp(btp
->tp
->name
, name
))
1032 static __always_inline
1033 void __bpf_trace_run(struct bpf_prog
*prog
, u64
*args
)
1037 (void) BPF_PROG_RUN(prog
, args
);
1042 #define UNPACK(...) __VA_ARGS__
1043 #define REPEAT_1(FN, DL, X, ...) FN(X)
1044 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1045 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1046 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1047 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1048 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1049 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1050 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1051 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1052 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1053 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1054 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1055 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1057 #define SARG(X) u64 arg##X
1058 #define COPY(X) args[X] = arg##X
1060 #define __DL_COM (,)
1061 #define __DL_SEM (;)
1063 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1065 #define BPF_TRACE_DEFN_x(x) \
1066 void bpf_trace_run##x(struct bpf_prog *prog, \
1067 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1070 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1071 __bpf_trace_run(prog, args); \
1073 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1074 BPF_TRACE_DEFN_x(1);
1075 BPF_TRACE_DEFN_x(2);
1076 BPF_TRACE_DEFN_x(3);
1077 BPF_TRACE_DEFN_x(4);
1078 BPF_TRACE_DEFN_x(5);
1079 BPF_TRACE_DEFN_x(6);
1080 BPF_TRACE_DEFN_x(7);
1081 BPF_TRACE_DEFN_x(8);
1082 BPF_TRACE_DEFN_x(9);
1083 BPF_TRACE_DEFN_x(10);
1084 BPF_TRACE_DEFN_x(11);
1085 BPF_TRACE_DEFN_x(12);
1087 static int __bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1089 struct tracepoint
*tp
= btp
->tp
;
1092 * check that program doesn't access arguments beyond what's
1093 * available in this tracepoint
1095 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
1098 return tracepoint_probe_register(tp
, (void *)btp
->bpf_func
, prog
);
1101 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1105 mutex_lock(&bpf_event_mutex
);
1106 err
= __bpf_probe_register(btp
, prog
);
1107 mutex_unlock(&bpf_event_mutex
);
1111 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1115 mutex_lock(&bpf_event_mutex
);
1116 err
= tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, prog
);
1117 mutex_unlock(&bpf_event_mutex
);