1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
22 #define UPROBE_EVENT_SYSTEM "uprobes"
24 struct uprobe_trace_entry_head
{
25 struct trace_entry ent
;
26 unsigned long vaddr
[];
29 #define SIZEOF_TRACE_ENTRY(is_return) \
30 (sizeof(struct uprobe_trace_entry_head) + \
31 sizeof(unsigned long) * (is_return ? 2 : 1))
33 #define DATAOF_TRACE_ENTRY(entry, is_return) \
34 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36 struct trace_uprobe_filter
{
39 struct list_head perf_events
;
42 static int trace_uprobe_create(int argc
, const char **argv
);
43 static int trace_uprobe_show(struct seq_file
*m
, struct dyn_event
*ev
);
44 static int trace_uprobe_release(struct dyn_event
*ev
);
45 static bool trace_uprobe_is_busy(struct dyn_event
*ev
);
46 static bool trace_uprobe_match(const char *system
, const char *event
,
47 struct dyn_event
*ev
);
49 static struct dyn_event_operations trace_uprobe_ops
= {
50 .create
= trace_uprobe_create
,
51 .show
= trace_uprobe_show
,
52 .is_busy
= trace_uprobe_is_busy
,
53 .free
= trace_uprobe_release
,
54 .match
= trace_uprobe_match
,
58 * uprobe event core functions
61 struct dyn_event devent
;
62 struct trace_uprobe_filter filter
;
63 struct uprobe_consumer consumer
;
68 unsigned long ref_ctr_offset
;
70 struct trace_probe tp
;
73 static bool is_trace_uprobe(struct dyn_event
*ev
)
75 return ev
->ops
== &trace_uprobe_ops
;
78 static struct trace_uprobe
*to_trace_uprobe(struct dyn_event
*ev
)
80 return container_of(ev
, struct trace_uprobe
, devent
);
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
88 #define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
92 #define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
94 (sizeof(struct probe_arg) * (n)))
96 static int register_uprobe_event(struct trace_uprobe
*tu
);
97 static int unregister_uprobe_event(struct trace_uprobe
*tu
);
99 struct uprobe_dispatch_data
{
100 struct trace_uprobe
*tu
;
101 unsigned long bp_addr
;
104 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
);
105 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
106 unsigned long func
, struct pt_regs
*regs
);
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
111 return addr
- (n
* sizeof(long));
114 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
116 return addr
+ (n
* sizeof(long));
120 static unsigned long get_user_stack_nth(struct pt_regs
*regs
, unsigned int n
)
123 unsigned long addr
= user_stack_pointer(regs
);
125 addr
= adjust_stack_addr(addr
, n
);
127 if (copy_from_user(&ret
, (void __force __user
*) addr
, sizeof(ret
)))
134 * Uprobes-specific fetch functions
136 static nokprobe_inline
int
137 probe_mem_read(void *dest
, void *src
, size_t size
)
139 void __user
*vaddr
= (void __force __user
*)src
;
141 return copy_from_user(dest
, vaddr
, size
) ? -EFAULT
: 0;
144 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145 * length and relative data location.
147 static nokprobe_inline
int
148 fetch_store_string(unsigned long addr
, void *dest
, void *base
)
151 u32 loc
= *(u32
*)dest
;
152 int maxlen
= get_loc_len(loc
);
153 u8
*dst
= get_loc_data(dest
, base
);
154 void __user
*src
= (void __force __user
*) addr
;
156 if (unlikely(!maxlen
))
159 if (addr
== FETCH_TOKEN_COMM
)
160 ret
= strlcpy(dst
, current
->comm
, maxlen
);
162 ret
= strncpy_from_user(dst
, src
, maxlen
);
168 * Include the terminating null byte. In this case it
169 * was copied by strncpy_from_user but not accounted
173 *(u32
*)dest
= make_data_loc(ret
, (void *)dst
- base
);
179 /* Return the length of string -- including null terminal byte */
180 static nokprobe_inline
int
181 fetch_store_strlen(unsigned long addr
)
184 void __user
*vaddr
= (void __force __user
*) addr
;
186 if (addr
== FETCH_TOKEN_COMM
)
187 len
= strlen(current
->comm
) + 1;
189 len
= strnlen_user(vaddr
, MAX_STRING_SIZE
);
191 return (len
> MAX_STRING_SIZE
) ? 0 : len
;
194 static unsigned long translate_user_vaddr(unsigned long file_offset
)
196 unsigned long base_addr
;
197 struct uprobe_dispatch_data
*udd
;
199 udd
= (void *) current
->utask
->vaddr
;
201 base_addr
= udd
->bp_addr
- udd
->tu
->offset
;
202 return base_addr
+ file_offset
;
205 /* Note that we don't verify it, since the code does not come from user space */
207 process_fetch_insn(struct fetch_insn
*code
, struct pt_regs
*regs
, void *dest
,
212 /* 1st stage: get value from context */
215 val
= regs_get_register(regs
, code
->param
);
218 val
= get_user_stack_nth(regs
, code
->param
);
220 case FETCH_OP_STACKP
:
221 val
= user_stack_pointer(regs
);
223 case FETCH_OP_RETVAL
:
224 val
= regs_return_value(regs
);
227 val
= code
->immediate
;
230 val
= FETCH_TOKEN_COMM
;
233 val
= translate_user_vaddr(code
->immediate
);
240 return process_fetch_insn_bottom(code
, val
, dest
, base
);
242 NOKPROBE_SYMBOL(process_fetch_insn
)
244 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter
*filter
)
246 rwlock_init(&filter
->rwlock
);
247 filter
->nr_systemwide
= 0;
248 INIT_LIST_HEAD(&filter
->perf_events
);
251 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter
*filter
)
253 return !filter
->nr_systemwide
&& list_empty(&filter
->perf_events
);
256 static inline bool is_ret_probe(struct trace_uprobe
*tu
)
258 return tu
->consumer
.ret_handler
!= NULL
;
261 static bool trace_uprobe_is_busy(struct dyn_event
*ev
)
263 struct trace_uprobe
*tu
= to_trace_uprobe(ev
);
265 return trace_probe_is_enabled(&tu
->tp
);
268 static bool trace_uprobe_match(const char *system
, const char *event
,
269 struct dyn_event
*ev
)
271 struct trace_uprobe
*tu
= to_trace_uprobe(ev
);
273 return strcmp(trace_event_name(&tu
->tp
.call
), event
) == 0 &&
274 (!system
|| strcmp(tu
->tp
.call
.class->system
, system
) == 0);
278 * Allocate new trace_uprobe and initialize it (including uprobes).
280 static struct trace_uprobe
*
281 alloc_trace_uprobe(const char *group
, const char *event
, int nargs
, bool is_ret
)
283 struct trace_uprobe
*tu
;
285 if (!event
|| !group
)
286 return ERR_PTR(-EINVAL
);
288 tu
= kzalloc(SIZEOF_TRACE_UPROBE(nargs
), GFP_KERNEL
);
290 return ERR_PTR(-ENOMEM
);
292 tu
->tp
.call
.class = &tu
->tp
.class;
293 tu
->tp
.call
.name
= kstrdup(event
, GFP_KERNEL
);
294 if (!tu
->tp
.call
.name
)
297 tu
->tp
.class.system
= kstrdup(group
, GFP_KERNEL
);
298 if (!tu
->tp
.class.system
)
301 dyn_event_init(&tu
->devent
, &trace_uprobe_ops
);
302 INIT_LIST_HEAD(&tu
->tp
.files
);
303 tu
->consumer
.handler
= uprobe_dispatcher
;
305 tu
->consumer
.ret_handler
= uretprobe_dispatcher
;
306 init_trace_uprobe_filter(&tu
->filter
);
310 kfree(tu
->tp
.call
.name
);
313 return ERR_PTR(-ENOMEM
);
316 static void free_trace_uprobe(struct trace_uprobe
*tu
)
323 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
324 traceprobe_free_probe_arg(&tu
->tp
.args
[i
]);
327 kfree(tu
->tp
.call
.class->system
);
328 kfree(tu
->tp
.call
.name
);
333 static struct trace_uprobe
*find_probe_event(const char *event
, const char *group
)
335 struct dyn_event
*pos
;
336 struct trace_uprobe
*tu
;
338 for_each_trace_uprobe(tu
, pos
)
339 if (strcmp(trace_event_name(&tu
->tp
.call
), event
) == 0 &&
340 strcmp(tu
->tp
.call
.class->system
, group
) == 0)
346 /* Unregister a trace_uprobe and probe_event */
347 static int unregister_trace_uprobe(struct trace_uprobe
*tu
)
351 ret
= unregister_uprobe_event(tu
);
355 dyn_event_remove(&tu
->devent
);
356 free_trace_uprobe(tu
);
361 * Uprobe with multiple reference counter is not allowed. i.e.
362 * If inode and offset matches, reference counter offset *must*
363 * match as well. Though, there is one exception: If user is
364 * replacing old trace_uprobe with new one(same group/event),
365 * then we allow same uprobe with new reference counter as far
366 * as the new one does not conflict with any other existing
369 static struct trace_uprobe
*find_old_trace_uprobe(struct trace_uprobe
*new)
371 struct dyn_event
*pos
;
372 struct trace_uprobe
*tmp
, *old
= NULL
;
373 struct inode
*new_inode
= d_real_inode(new->path
.dentry
);
375 old
= find_probe_event(trace_event_name(&new->tp
.call
),
376 new->tp
.call
.class->system
);
378 for_each_trace_uprobe(tmp
, pos
) {
379 if ((old
? old
!= tmp
: true) &&
380 new_inode
== d_real_inode(tmp
->path
.dentry
) &&
381 new->offset
== tmp
->offset
&&
382 new->ref_ctr_offset
!= tmp
->ref_ctr_offset
) {
383 pr_warn("Reference counter offset mismatch.");
384 return ERR_PTR(-EINVAL
);
390 /* Register a trace_uprobe and probe_event */
391 static int register_trace_uprobe(struct trace_uprobe
*tu
)
393 struct trace_uprobe
*old_tu
;
396 mutex_lock(&event_mutex
);
398 /* register as an event */
399 old_tu
= find_old_trace_uprobe(tu
);
400 if (IS_ERR(old_tu
)) {
401 ret
= PTR_ERR(old_tu
);
406 /* delete old event */
407 ret
= unregister_trace_uprobe(old_tu
);
412 ret
= register_uprobe_event(tu
);
414 pr_warn("Failed to register probe event(%d)\n", ret
);
418 dyn_event_add(&tu
->devent
);
421 mutex_unlock(&event_mutex
);
428 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
430 static int trace_uprobe_create(int argc
, const char **argv
)
432 struct trace_uprobe
*tu
;
433 const char *event
= NULL
, *group
= UPROBE_EVENT_SYSTEM
;
434 char *arg
, *filename
, *rctr
, *rctr_end
, *tmp
;
435 char buf
[MAX_EVENT_NAME_LEN
];
437 unsigned long offset
, ref_ctr_offset
;
438 bool is_return
= false;
444 switch (argv
[0][0]) {
457 if (argv
[0][1] == ':')
460 if (!strchr(argv
[1], '/'))
463 filename
= kstrdup(argv
[1], GFP_KERNEL
);
467 /* Find the last occurrence, in case the path contains ':' too. */
468 arg
= strrchr(filename
, ':');
469 if (!arg
|| !isdigit(arg
[1])) {
474 trace_probe_log_init("trace_uprobe", argc
, argv
);
475 trace_probe_log_set_index(1); /* filename is the 2nd argument */
478 ret
= kern_path(filename
, LOOKUP_FOLLOW
, &path
);
480 trace_probe_log_err(0, FILE_NOT_FOUND
);
482 trace_probe_log_clear();
485 if (!d_is_reg(path
.dentry
)) {
486 trace_probe_log_err(0, NO_REGULAR_FILE
);
488 goto fail_address_parse
;
491 /* Parse reference counter offset if specified. */
492 rctr
= strchr(arg
, '(');
494 rctr_end
= strchr(rctr
, ')');
497 rctr_end
= rctr
+ strlen(rctr
);
498 trace_probe_log_err(rctr_end
- filename
,
500 goto fail_address_parse
;
501 } else if (rctr_end
[1] != '\0') {
503 trace_probe_log_err(rctr_end
+ 1 - filename
,
505 goto fail_address_parse
;
510 ret
= kstrtoul(rctr
, 0, &ref_ctr_offset
);
512 trace_probe_log_err(rctr
- filename
, BAD_REFCNT
);
513 goto fail_address_parse
;
517 /* Parse uprobe offset. */
518 ret
= kstrtoul(arg
, 0, &offset
);
520 trace_probe_log_err(arg
- filename
, BAD_UPROBE_OFFS
);
521 goto fail_address_parse
;
525 trace_probe_log_set_index(0);
527 ret
= traceprobe_parse_event_name(&event
, &group
, buf
,
530 goto fail_address_parse
;
535 tail
= kstrdup(kbasename(filename
), GFP_KERNEL
);
538 goto fail_address_parse
;
541 ptr
= strpbrk(tail
, ".-_");
545 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c_%s_0x%lx", 'p', tail
, offset
);
553 tu
= alloc_trace_uprobe(group
, event
, argc
, is_return
);
556 /* This must return -ENOMEM otherwise there is a bug */
557 WARN_ON_ONCE(ret
!= -ENOMEM
);
558 goto fail_address_parse
;
561 tu
->ref_ctr_offset
= ref_ctr_offset
;
563 tu
->filename
= filename
;
565 /* parse arguments */
566 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
567 tmp
= kstrdup(argv
[i
], GFP_KERNEL
);
573 trace_probe_log_set_index(i
+ 2);
574 ret
= traceprobe_parse_probe_arg(&tu
->tp
, i
, tmp
,
575 is_return
? TPARG_FL_RETURN
: 0);
581 ret
= register_trace_uprobe(tu
);
586 free_trace_uprobe(tu
);
588 trace_probe_log_clear();
592 trace_probe_log_clear();
599 static int create_or_delete_trace_uprobe(int argc
, char **argv
)
603 if (argv
[0][0] == '-')
604 return dyn_event_release(argc
, argv
, &trace_uprobe_ops
);
606 ret
= trace_uprobe_create(argc
, (const char **)argv
);
607 return ret
== -ECANCELED
? -EINVAL
: ret
;
610 static int trace_uprobe_release(struct dyn_event
*ev
)
612 struct trace_uprobe
*tu
= to_trace_uprobe(ev
);
614 return unregister_trace_uprobe(tu
);
617 /* Probes listing interfaces */
618 static int trace_uprobe_show(struct seq_file
*m
, struct dyn_event
*ev
)
620 struct trace_uprobe
*tu
= to_trace_uprobe(ev
);
621 char c
= is_ret_probe(tu
) ? 'r' : 'p';
624 seq_printf(m
, "%c:%s/%s %s:0x%0*lx", c
, tu
->tp
.call
.class->system
,
625 trace_event_name(&tu
->tp
.call
), tu
->filename
,
626 (int)(sizeof(void *) * 2), tu
->offset
);
628 if (tu
->ref_ctr_offset
)
629 seq_printf(m
, "(0x%lx)", tu
->ref_ctr_offset
);
631 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
632 seq_printf(m
, " %s=%s", tu
->tp
.args
[i
].name
, tu
->tp
.args
[i
].comm
);
638 static int probes_seq_show(struct seq_file
*m
, void *v
)
640 struct dyn_event
*ev
= v
;
642 if (!is_trace_uprobe(ev
))
645 return trace_uprobe_show(m
, ev
);
648 static const struct seq_operations probes_seq_op
= {
649 .start
= dyn_event_seq_start
,
650 .next
= dyn_event_seq_next
,
651 .stop
= dyn_event_seq_stop
,
652 .show
= probes_seq_show
655 static int probes_open(struct inode
*inode
, struct file
*file
)
659 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
660 ret
= dyn_events_release_all(&trace_uprobe_ops
);
665 return seq_open(file
, &probes_seq_op
);
668 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
669 size_t count
, loff_t
*ppos
)
671 return trace_parse_run_command(file
, buffer
, count
, ppos
,
672 create_or_delete_trace_uprobe
);
675 static const struct file_operations uprobe_events_ops
= {
676 .owner
= THIS_MODULE
,
680 .release
= seq_release
,
681 .write
= probes_write
,
684 /* Probes profiling interfaces */
685 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
687 struct dyn_event
*ev
= v
;
688 struct trace_uprobe
*tu
;
690 if (!is_trace_uprobe(ev
))
693 tu
= to_trace_uprobe(ev
);
694 seq_printf(m
, " %s %-44s %15lu\n", tu
->filename
,
695 trace_event_name(&tu
->tp
.call
), tu
->nhit
);
699 static const struct seq_operations profile_seq_op
= {
700 .start
= dyn_event_seq_start
,
701 .next
= dyn_event_seq_next
,
702 .stop
= dyn_event_seq_stop
,
703 .show
= probes_profile_seq_show
706 static int profile_open(struct inode
*inode
, struct file
*file
)
708 return seq_open(file
, &profile_seq_op
);
711 static const struct file_operations uprobe_profile_ops
= {
712 .owner
= THIS_MODULE
,
713 .open
= profile_open
,
716 .release
= seq_release
,
719 struct uprobe_cpu_buffer
{
723 static struct uprobe_cpu_buffer __percpu
*uprobe_cpu_buffer
;
724 static int uprobe_buffer_refcnt
;
726 static int uprobe_buffer_init(void)
730 uprobe_cpu_buffer
= alloc_percpu(struct uprobe_cpu_buffer
);
731 if (uprobe_cpu_buffer
== NULL
)
734 for_each_possible_cpu(cpu
) {
735 struct page
*p
= alloc_pages_node(cpu_to_node(cpu
),
741 per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
= page_address(p
);
742 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->mutex
);
748 for_each_possible_cpu(cpu
) {
751 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
);
754 free_percpu(uprobe_cpu_buffer
);
758 static int uprobe_buffer_enable(void)
762 BUG_ON(!mutex_is_locked(&event_mutex
));
764 if (uprobe_buffer_refcnt
++ == 0) {
765 ret
= uprobe_buffer_init();
767 uprobe_buffer_refcnt
--;
773 static void uprobe_buffer_disable(void)
777 BUG_ON(!mutex_is_locked(&event_mutex
));
779 if (--uprobe_buffer_refcnt
== 0) {
780 for_each_possible_cpu(cpu
)
781 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
,
784 free_percpu(uprobe_cpu_buffer
);
785 uprobe_cpu_buffer
= NULL
;
789 static struct uprobe_cpu_buffer
*uprobe_buffer_get(void)
791 struct uprobe_cpu_buffer
*ucb
;
794 cpu
= raw_smp_processor_id();
795 ucb
= per_cpu_ptr(uprobe_cpu_buffer
, cpu
);
798 * Use per-cpu buffers for fastest access, but we might migrate
799 * so the mutex makes sure we have sole access to it.
801 mutex_lock(&ucb
->mutex
);
806 static void uprobe_buffer_put(struct uprobe_cpu_buffer
*ucb
)
808 mutex_unlock(&ucb
->mutex
);
811 static void __uprobe_trace_func(struct trace_uprobe
*tu
,
812 unsigned long func
, struct pt_regs
*regs
,
813 struct uprobe_cpu_buffer
*ucb
, int dsize
,
814 struct trace_event_file
*trace_file
)
816 struct uprobe_trace_entry_head
*entry
;
817 struct ring_buffer_event
*event
;
818 struct ring_buffer
*buffer
;
821 struct trace_event_call
*call
= &tu
->tp
.call
;
823 WARN_ON(call
!= trace_file
->event_call
);
825 if (WARN_ON_ONCE(tu
->tp
.size
+ dsize
> PAGE_SIZE
))
828 if (trace_trigger_soft_disabled(trace_file
))
831 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
832 size
= esize
+ tu
->tp
.size
+ dsize
;
833 event
= trace_event_buffer_lock_reserve(&buffer
, trace_file
,
834 call
->event
.type
, size
, 0, 0);
838 entry
= ring_buffer_event_data(event
);
839 if (is_ret_probe(tu
)) {
840 entry
->vaddr
[0] = func
;
841 entry
->vaddr
[1] = instruction_pointer(regs
);
842 data
= DATAOF_TRACE_ENTRY(entry
, true);
844 entry
->vaddr
[0] = instruction_pointer(regs
);
845 data
= DATAOF_TRACE_ENTRY(entry
, false);
848 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
850 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
, 0, 0);
854 static int uprobe_trace_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
855 struct uprobe_cpu_buffer
*ucb
, int dsize
)
857 struct event_file_link
*link
;
859 if (is_ret_probe(tu
))
863 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
864 __uprobe_trace_func(tu
, 0, regs
, ucb
, dsize
, link
->file
);
870 static void uretprobe_trace_func(struct trace_uprobe
*tu
, unsigned long func
,
871 struct pt_regs
*regs
,
872 struct uprobe_cpu_buffer
*ucb
, int dsize
)
874 struct event_file_link
*link
;
877 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
878 __uprobe_trace_func(tu
, func
, regs
, ucb
, dsize
, link
->file
);
882 /* Event entry printers */
883 static enum print_line_t
884 print_uprobe_event(struct trace_iterator
*iter
, int flags
, struct trace_event
*event
)
886 struct uprobe_trace_entry_head
*entry
;
887 struct trace_seq
*s
= &iter
->seq
;
888 struct trace_uprobe
*tu
;
891 entry
= (struct uprobe_trace_entry_head
*)iter
->ent
;
892 tu
= container_of(event
, struct trace_uprobe
, tp
.call
.event
);
894 if (is_ret_probe(tu
)) {
895 trace_seq_printf(s
, "%s: (0x%lx <- 0x%lx)",
896 trace_event_name(&tu
->tp
.call
),
897 entry
->vaddr
[1], entry
->vaddr
[0]);
898 data
= DATAOF_TRACE_ENTRY(entry
, true);
900 trace_seq_printf(s
, "%s: (0x%lx)",
901 trace_event_name(&tu
->tp
.call
),
903 data
= DATAOF_TRACE_ENTRY(entry
, false);
906 if (print_probe_args(s
, tu
->tp
.args
, tu
->tp
.nr_args
, data
, entry
) < 0)
909 trace_seq_putc(s
, '\n');
912 return trace_handle_return(s
);
915 typedef bool (*filter_func_t
)(struct uprobe_consumer
*self
,
916 enum uprobe_filter_ctx ctx
,
917 struct mm_struct
*mm
);
920 probe_event_enable(struct trace_uprobe
*tu
, struct trace_event_file
*file
,
921 filter_func_t filter
)
923 bool enabled
= trace_probe_is_enabled(&tu
->tp
);
924 struct event_file_link
*link
= NULL
;
928 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
931 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
936 list_add_tail_rcu(&link
->list
, &tu
->tp
.files
);
938 tu
->tp
.flags
|= TP_FLAG_TRACE
;
940 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
943 tu
->tp
.flags
|= TP_FLAG_PROFILE
;
946 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
951 ret
= uprobe_buffer_enable();
955 tu
->consumer
.filter
= filter
;
956 tu
->inode
= d_real_inode(tu
->path
.dentry
);
957 if (tu
->ref_ctr_offset
) {
958 ret
= uprobe_register_refctr(tu
->inode
, tu
->offset
,
959 tu
->ref_ctr_offset
, &tu
->consumer
);
961 ret
= uprobe_register(tu
->inode
, tu
->offset
, &tu
->consumer
);
970 uprobe_buffer_disable();
974 list_del(&link
->list
);
976 tu
->tp
.flags
&= ~TP_FLAG_TRACE
;
978 tu
->tp
.flags
&= ~TP_FLAG_PROFILE
;
984 probe_event_disable(struct trace_uprobe
*tu
, struct trace_event_file
*file
)
986 if (!trace_probe_is_enabled(&tu
->tp
))
990 struct event_file_link
*link
;
992 link
= find_event_file_link(&tu
->tp
, file
);
996 list_del_rcu(&link
->list
);
997 /* synchronize with u{,ret}probe_trace_func */
1001 if (!list_empty(&tu
->tp
.files
))
1005 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
1007 uprobe_unregister(tu
->inode
, tu
->offset
, &tu
->consumer
);
1009 tu
->tp
.flags
&= file
? ~TP_FLAG_TRACE
: ~TP_FLAG_PROFILE
;
1011 uprobe_buffer_disable();
1014 static int uprobe_event_define_fields(struct trace_event_call
*event_call
)
1017 struct uprobe_trace_entry_head field
;
1018 struct trace_uprobe
*tu
= event_call
->data
;
1020 if (is_ret_probe(tu
)) {
1021 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_FUNC
, 0);
1022 DEFINE_FIELD(unsigned long, vaddr
[1], FIELD_STRING_RETIP
, 0);
1023 size
= SIZEOF_TRACE_ENTRY(true);
1025 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_IP
, 0);
1026 size
= SIZEOF_TRACE_ENTRY(false);
1029 return traceprobe_define_arg_fields(event_call
, size
, &tu
->tp
);
1032 #ifdef CONFIG_PERF_EVENTS
1034 __uprobe_perf_filter(struct trace_uprobe_filter
*filter
, struct mm_struct
*mm
)
1036 struct perf_event
*event
;
1038 if (filter
->nr_systemwide
)
1041 list_for_each_entry(event
, &filter
->perf_events
, hw
.tp_list
) {
1042 if (event
->hw
.target
->mm
== mm
)
1050 uprobe_filter_event(struct trace_uprobe
*tu
, struct perf_event
*event
)
1052 return __uprobe_perf_filter(&tu
->filter
, event
->hw
.target
->mm
);
1055 static int uprobe_perf_close(struct trace_uprobe
*tu
, struct perf_event
*event
)
1059 write_lock(&tu
->filter
.rwlock
);
1060 if (event
->hw
.target
) {
1061 list_del(&event
->hw
.tp_list
);
1062 done
= tu
->filter
.nr_systemwide
||
1063 (event
->hw
.target
->flags
& PF_EXITING
) ||
1064 uprobe_filter_event(tu
, event
);
1066 tu
->filter
.nr_systemwide
--;
1067 done
= tu
->filter
.nr_systemwide
;
1069 write_unlock(&tu
->filter
.rwlock
);
1072 return uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, false);
1077 static int uprobe_perf_open(struct trace_uprobe
*tu
, struct perf_event
*event
)
1082 write_lock(&tu
->filter
.rwlock
);
1083 if (event
->hw
.target
) {
1085 * event->parent != NULL means copy_process(), we can avoid
1086 * uprobe_apply(). current->mm must be probed and we can rely
1087 * on dup_mmap() which preserves the already installed bp's.
1089 * attr.enable_on_exec means that exec/mmap will install the
1090 * breakpoints we need.
1092 done
= tu
->filter
.nr_systemwide
||
1093 event
->parent
|| event
->attr
.enable_on_exec
||
1094 uprobe_filter_event(tu
, event
);
1095 list_add(&event
->hw
.tp_list
, &tu
->filter
.perf_events
);
1097 done
= tu
->filter
.nr_systemwide
;
1098 tu
->filter
.nr_systemwide
++;
1100 write_unlock(&tu
->filter
.rwlock
);
1104 err
= uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, true);
1106 uprobe_perf_close(tu
, event
);
1111 static bool uprobe_perf_filter(struct uprobe_consumer
*uc
,
1112 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
1114 struct trace_uprobe
*tu
;
1117 tu
= container_of(uc
, struct trace_uprobe
, consumer
);
1118 read_lock(&tu
->filter
.rwlock
);
1119 ret
= __uprobe_perf_filter(&tu
->filter
, mm
);
1120 read_unlock(&tu
->filter
.rwlock
);
1125 static void __uprobe_perf_func(struct trace_uprobe
*tu
,
1126 unsigned long func
, struct pt_regs
*regs
,
1127 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1129 struct trace_event_call
*call
= &tu
->tp
.call
;
1130 struct uprobe_trace_entry_head
*entry
;
1131 struct hlist_head
*head
;
1136 if (bpf_prog_array_valid(call
) && !trace_call_bpf(call
, regs
))
1139 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1141 size
= esize
+ tu
->tp
.size
+ dsize
;
1142 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
)) - sizeof(u32
);
1143 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
, "profile buffer not large enough"))
1147 head
= this_cpu_ptr(call
->perf_events
);
1148 if (hlist_empty(head
))
1151 entry
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
1155 if (is_ret_probe(tu
)) {
1156 entry
->vaddr
[0] = func
;
1157 entry
->vaddr
[1] = instruction_pointer(regs
);
1158 data
= DATAOF_TRACE_ENTRY(entry
, true);
1160 entry
->vaddr
[0] = instruction_pointer(regs
);
1161 data
= DATAOF_TRACE_ENTRY(entry
, false);
1164 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
1166 if (size
- esize
> tu
->tp
.size
+ dsize
) {
1167 int len
= tu
->tp
.size
+ dsize
;
1169 memset(data
+ len
, 0, size
- esize
- len
);
1172 perf_trace_buf_submit(entry
, size
, rctx
, call
->event
.type
, 1, regs
,
1178 /* uprobe profile handler */
1179 static int uprobe_perf_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
1180 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1182 if (!uprobe_perf_filter(&tu
->consumer
, 0, current
->mm
))
1183 return UPROBE_HANDLER_REMOVE
;
1185 if (!is_ret_probe(tu
))
1186 __uprobe_perf_func(tu
, 0, regs
, ucb
, dsize
);
1190 static void uretprobe_perf_func(struct trace_uprobe
*tu
, unsigned long func
,
1191 struct pt_regs
*regs
,
1192 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1194 __uprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1197 int bpf_get_uprobe_info(const struct perf_event
*event
, u32
*fd_type
,
1198 const char **filename
, u64
*probe_offset
,
1199 bool perf_type_tracepoint
)
1201 const char *pevent
= trace_event_name(event
->tp_event
);
1202 const char *group
= event
->tp_event
->class->system
;
1203 struct trace_uprobe
*tu
;
1205 if (perf_type_tracepoint
)
1206 tu
= find_probe_event(pevent
, group
);
1208 tu
= event
->tp_event
->data
;
1212 *fd_type
= is_ret_probe(tu
) ? BPF_FD_TYPE_URETPROBE
1213 : BPF_FD_TYPE_UPROBE
;
1214 *filename
= tu
->filename
;
1215 *probe_offset
= tu
->offset
;
1218 #endif /* CONFIG_PERF_EVENTS */
1221 trace_uprobe_register(struct trace_event_call
*event
, enum trace_reg type
,
1224 struct trace_uprobe
*tu
= event
->data
;
1225 struct trace_event_file
*file
= data
;
1228 case TRACE_REG_REGISTER
:
1229 return probe_event_enable(tu
, file
, NULL
);
1231 case TRACE_REG_UNREGISTER
:
1232 probe_event_disable(tu
, file
);
1235 #ifdef CONFIG_PERF_EVENTS
1236 case TRACE_REG_PERF_REGISTER
:
1237 return probe_event_enable(tu
, NULL
, uprobe_perf_filter
);
1239 case TRACE_REG_PERF_UNREGISTER
:
1240 probe_event_disable(tu
, NULL
);
1243 case TRACE_REG_PERF_OPEN
:
1244 return uprobe_perf_open(tu
, data
);
1246 case TRACE_REG_PERF_CLOSE
:
1247 return uprobe_perf_close(tu
, data
);
1256 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
)
1258 struct trace_uprobe
*tu
;
1259 struct uprobe_dispatch_data udd
;
1260 struct uprobe_cpu_buffer
*ucb
;
1265 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1269 udd
.bp_addr
= instruction_pointer(regs
);
1271 current
->utask
->vaddr
= (unsigned long) &udd
;
1273 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1276 dsize
= __get_data_size(&tu
->tp
, regs
);
1277 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1279 ucb
= uprobe_buffer_get();
1280 store_trace_args(ucb
->buf
, &tu
->tp
, regs
, esize
, dsize
);
1282 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1283 ret
|= uprobe_trace_func(tu
, regs
, ucb
, dsize
);
1285 #ifdef CONFIG_PERF_EVENTS
1286 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1287 ret
|= uprobe_perf_func(tu
, regs
, ucb
, dsize
);
1289 uprobe_buffer_put(ucb
);
1293 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
1294 unsigned long func
, struct pt_regs
*regs
)
1296 struct trace_uprobe
*tu
;
1297 struct uprobe_dispatch_data udd
;
1298 struct uprobe_cpu_buffer
*ucb
;
1301 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1306 current
->utask
->vaddr
= (unsigned long) &udd
;
1308 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1311 dsize
= __get_data_size(&tu
->tp
, regs
);
1312 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1314 ucb
= uprobe_buffer_get();
1315 store_trace_args(ucb
->buf
, &tu
->tp
, regs
, esize
, dsize
);
1317 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1318 uretprobe_trace_func(tu
, func
, regs
, ucb
, dsize
);
1320 #ifdef CONFIG_PERF_EVENTS
1321 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1322 uretprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1324 uprobe_buffer_put(ucb
);
1328 static struct trace_event_functions uprobe_funcs
= {
1329 .trace
= print_uprobe_event
1332 static inline void init_trace_event_call(struct trace_uprobe
*tu
,
1333 struct trace_event_call
*call
)
1335 INIT_LIST_HEAD(&call
->class->fields
);
1336 call
->event
.funcs
= &uprobe_funcs
;
1337 call
->class->define_fields
= uprobe_event_define_fields
;
1339 call
->flags
= TRACE_EVENT_FL_UPROBE
;
1340 call
->class->reg
= trace_uprobe_register
;
1344 static int register_uprobe_event(struct trace_uprobe
*tu
)
1346 struct trace_event_call
*call
= &tu
->tp
.call
;
1349 init_trace_event_call(tu
, call
);
1351 if (traceprobe_set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0)
1354 ret
= register_trace_event(&call
->event
);
1356 kfree(call
->print_fmt
);
1360 ret
= trace_add_event_call(call
);
1363 pr_info("Failed to register uprobe event: %s\n",
1364 trace_event_name(call
));
1365 kfree(call
->print_fmt
);
1366 unregister_trace_event(&call
->event
);
1372 static int unregister_uprobe_event(struct trace_uprobe
*tu
)
1376 /* tu->event is unregistered in trace_remove_event_call() */
1377 ret
= trace_remove_event_call(&tu
->tp
.call
);
1380 kfree(tu
->tp
.call
.print_fmt
);
1381 tu
->tp
.call
.print_fmt
= NULL
;
1385 #ifdef CONFIG_PERF_EVENTS
1386 struct trace_event_call
*
1387 create_local_trace_uprobe(char *name
, unsigned long offs
,
1388 unsigned long ref_ctr_offset
, bool is_return
)
1390 struct trace_uprobe
*tu
;
1394 ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
1396 return ERR_PTR(ret
);
1398 if (!d_is_reg(path
.dentry
)) {
1400 return ERR_PTR(-EINVAL
);
1404 * local trace_kprobes are not added to dyn_event, so they are never
1405 * searched in find_trace_kprobe(). Therefore, there is no concern of
1406 * duplicated name "DUMMY_EVENT" here.
1408 tu
= alloc_trace_uprobe(UPROBE_EVENT_SYSTEM
, "DUMMY_EVENT", 0,
1412 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1415 return ERR_CAST(tu
);
1420 tu
->ref_ctr_offset
= ref_ctr_offset
;
1421 tu
->filename
= kstrdup(name
, GFP_KERNEL
);
1422 init_trace_event_call(tu
, &tu
->tp
.call
);
1424 if (traceprobe_set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0) {
1429 return &tu
->tp
.call
;
1431 free_trace_uprobe(tu
);
1432 return ERR_PTR(ret
);
1435 void destroy_local_trace_uprobe(struct trace_event_call
*event_call
)
1437 struct trace_uprobe
*tu
;
1439 tu
= container_of(event_call
, struct trace_uprobe
, tp
.call
);
1441 kfree(tu
->tp
.call
.print_fmt
);
1442 tu
->tp
.call
.print_fmt
= NULL
;
1444 free_trace_uprobe(tu
);
1446 #endif /* CONFIG_PERF_EVENTS */
1448 /* Make a trace interface for controling probe points */
1449 static __init
int init_uprobe_trace(void)
1451 struct dentry
*d_tracer
;
1454 ret
= dyn_event_register(&trace_uprobe_ops
);
1458 d_tracer
= tracing_init_dentry();
1459 if (IS_ERR(d_tracer
))
1462 trace_create_file("uprobe_events", 0644, d_tracer
,
1463 NULL
, &uprobe_events_ops
);
1464 /* Profile interface */
1465 trace_create_file("uprobe_profile", 0444, d_tracer
,
1466 NULL
, &uprobe_profile_ops
);
1470 fs_initcall(init_uprobe_trace
);