2 * uprobes-based tracing events
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
20 #define pr_fmt(fmt) "trace_kprobe: " fmt
22 #include <linux/module.h>
23 #include <linux/uaccess.h>
24 #include <linux/uprobes.h>
25 #include <linux/namei.h>
26 #include <linux/string.h>
27 #include <linux/rculist.h>
29 #include "trace_probe.h"
31 #define UPROBE_EVENT_SYSTEM "uprobes"
33 struct uprobe_trace_entry_head
{
34 struct trace_entry ent
;
35 unsigned long vaddr
[];
38 #define SIZEOF_TRACE_ENTRY(is_return) \
39 (sizeof(struct uprobe_trace_entry_head) + \
40 sizeof(unsigned long) * (is_return ? 2 : 1))
42 #define DATAOF_TRACE_ENTRY(entry, is_return) \
43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
45 struct trace_uprobe_filter
{
48 struct list_head perf_events
;
52 * uprobe event core functions
55 struct list_head list
;
56 struct trace_uprobe_filter filter
;
57 struct uprobe_consumer consumer
;
63 struct trace_probe tp
;
66 #define SIZEOF_TRACE_UPROBE(n) \
67 (offsetof(struct trace_uprobe, tp.args) + \
68 (sizeof(struct probe_arg) * (n)))
70 static int register_uprobe_event(struct trace_uprobe
*tu
);
71 static int unregister_uprobe_event(struct trace_uprobe
*tu
);
73 static DEFINE_MUTEX(uprobe_lock
);
74 static LIST_HEAD(uprobe_list
);
76 struct uprobe_dispatch_data
{
77 struct trace_uprobe
*tu
;
78 unsigned long bp_addr
;
81 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
);
82 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
83 unsigned long func
, struct pt_regs
*regs
);
85 #ifdef CONFIG_STACK_GROWSUP
86 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
88 return addr
- (n
* sizeof(long));
91 static unsigned long adjust_stack_addr(unsigned long addr
, unsigned int n
)
93 return addr
+ (n
* sizeof(long));
97 static unsigned long get_user_stack_nth(struct pt_regs
*regs
, unsigned int n
)
100 unsigned long addr
= user_stack_pointer(regs
);
102 addr
= adjust_stack_addr(addr
, n
);
104 if (copy_from_user(&ret
, (void __force __user
*) addr
, sizeof(ret
)))
111 * Uprobes-specific fetch functions
113 #define DEFINE_FETCH_stack(type) \
114 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
115 void *offset, void *dest) \
117 *(type *)dest = (type)get_user_stack_nth(regs, \
118 ((unsigned long)offset)); \
120 DEFINE_BASIC_FETCH_FUNCS(stack
)
121 /* No string on the stack entry */
122 #define fetch_stack_string NULL
123 #define fetch_stack_string_size NULL
125 #define DEFINE_FETCH_memory(type) \
126 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
127 void *addr, void *dest) \
130 void __user *vaddr = (void __force __user *) addr; \
132 if (copy_from_user(&retval, vaddr, sizeof(type))) \
135 *(type *) dest = retval; \
137 DEFINE_BASIC_FETCH_FUNCS(memory
)
139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
140 * length and relative data location.
142 static void FETCH_FUNC_NAME(memory
, string
)(struct pt_regs
*regs
,
143 void *addr
, void *dest
)
146 u32 rloc
= *(u32
*)dest
;
147 int maxlen
= get_rloc_len(rloc
);
148 u8
*dst
= get_rloc_data(dest
);
149 void __user
*src
= (void __force __user
*) addr
;
154 ret
= strncpy_from_user(dst
, src
, maxlen
);
158 if (ret
< 0) { /* Failed to fetch string */
159 ((u8
*)get_rloc_data(dest
))[0] = '\0';
160 *(u32
*)dest
= make_data_rloc(0, get_rloc_offs(rloc
));
162 *(u32
*)dest
= make_data_rloc(ret
, get_rloc_offs(rloc
));
166 static void FETCH_FUNC_NAME(memory
, string_size
)(struct pt_regs
*regs
,
167 void *addr
, void *dest
)
170 void __user
*vaddr
= (void __force __user
*) addr
;
172 len
= strnlen_user(vaddr
, MAX_STRING_SIZE
);
174 if (len
== 0 || len
> MAX_STRING_SIZE
) /* Failed to check length */
180 static unsigned long translate_user_vaddr(void *file_offset
)
182 unsigned long base_addr
;
183 struct uprobe_dispatch_data
*udd
;
185 udd
= (void *) current
->utask
->vaddr
;
187 base_addr
= udd
->bp_addr
- udd
->tu
->offset
;
188 return base_addr
+ (unsigned long)file_offset
;
191 #define DEFINE_FETCH_file_offset(type) \
192 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
193 void *offset, void *dest)\
195 void *vaddr = (void *)translate_user_vaddr(offset); \
197 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
199 DEFINE_BASIC_FETCH_FUNCS(file_offset
)
200 DEFINE_FETCH_file_offset(string
)
201 DEFINE_FETCH_file_offset(string_size
)
203 /* Fetch type information table */
204 static const struct fetch_type uprobes_fetch_type_table
[] = {
206 [FETCH_TYPE_STRING
] = __ASSIGN_FETCH_TYPE("string", string
, string
,
207 sizeof(u32
), 1, "__data_loc char[]"),
208 [FETCH_TYPE_STRSIZE
] = __ASSIGN_FETCH_TYPE("string_size", u32
,
209 string_size
, sizeof(u32
), 0, "u32"),
211 ASSIGN_FETCH_TYPE(u8
, u8
, 0),
212 ASSIGN_FETCH_TYPE(u16
, u16
, 0),
213 ASSIGN_FETCH_TYPE(u32
, u32
, 0),
214 ASSIGN_FETCH_TYPE(u64
, u64
, 0),
215 ASSIGN_FETCH_TYPE(s8
, u8
, 1),
216 ASSIGN_FETCH_TYPE(s16
, u16
, 1),
217 ASSIGN_FETCH_TYPE(s32
, u32
, 1),
218 ASSIGN_FETCH_TYPE(s64
, u64
, 1),
219 ASSIGN_FETCH_TYPE_ALIAS(x8
, u8
, u8
, 0),
220 ASSIGN_FETCH_TYPE_ALIAS(x16
, u16
, u16
, 0),
221 ASSIGN_FETCH_TYPE_ALIAS(x32
, u32
, u32
, 0),
222 ASSIGN_FETCH_TYPE_ALIAS(x64
, u64
, u64
, 0),
224 ASSIGN_FETCH_TYPE_END
227 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter
*filter
)
229 rwlock_init(&filter
->rwlock
);
230 filter
->nr_systemwide
= 0;
231 INIT_LIST_HEAD(&filter
->perf_events
);
234 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter
*filter
)
236 return !filter
->nr_systemwide
&& list_empty(&filter
->perf_events
);
239 static inline bool is_ret_probe(struct trace_uprobe
*tu
)
241 return tu
->consumer
.ret_handler
!= NULL
;
245 * Allocate new trace_uprobe and initialize it (including uprobes).
247 static struct trace_uprobe
*
248 alloc_trace_uprobe(const char *group
, const char *event
, int nargs
, bool is_ret
)
250 struct trace_uprobe
*tu
;
252 if (!event
|| !is_good_name(event
))
253 return ERR_PTR(-EINVAL
);
255 if (!group
|| !is_good_name(group
))
256 return ERR_PTR(-EINVAL
);
258 tu
= kzalloc(SIZEOF_TRACE_UPROBE(nargs
), GFP_KERNEL
);
260 return ERR_PTR(-ENOMEM
);
262 tu
->tp
.call
.class = &tu
->tp
.class;
263 tu
->tp
.call
.name
= kstrdup(event
, GFP_KERNEL
);
264 if (!tu
->tp
.call
.name
)
267 tu
->tp
.class.system
= kstrdup(group
, GFP_KERNEL
);
268 if (!tu
->tp
.class.system
)
271 INIT_LIST_HEAD(&tu
->list
);
272 INIT_LIST_HEAD(&tu
->tp
.files
);
273 tu
->consumer
.handler
= uprobe_dispatcher
;
275 tu
->consumer
.ret_handler
= uretprobe_dispatcher
;
276 init_trace_uprobe_filter(&tu
->filter
);
280 kfree(tu
->tp
.call
.name
);
283 return ERR_PTR(-ENOMEM
);
286 static void free_trace_uprobe(struct trace_uprobe
*tu
)
290 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
291 traceprobe_free_probe_arg(&tu
->tp
.args
[i
]);
294 kfree(tu
->tp
.call
.class->system
);
295 kfree(tu
->tp
.call
.name
);
300 static struct trace_uprobe
*find_probe_event(const char *event
, const char *group
)
302 struct trace_uprobe
*tu
;
304 list_for_each_entry(tu
, &uprobe_list
, list
)
305 if (strcmp(trace_event_name(&tu
->tp
.call
), event
) == 0 &&
306 strcmp(tu
->tp
.call
.class->system
, group
) == 0)
312 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
313 static int unregister_trace_uprobe(struct trace_uprobe
*tu
)
317 ret
= unregister_uprobe_event(tu
);
322 free_trace_uprobe(tu
);
326 /* Register a trace_uprobe and probe_event */
327 static int register_trace_uprobe(struct trace_uprobe
*tu
)
329 struct trace_uprobe
*old_tu
;
332 mutex_lock(&uprobe_lock
);
334 /* register as an event */
335 old_tu
= find_probe_event(trace_event_name(&tu
->tp
.call
),
336 tu
->tp
.call
.class->system
);
338 /* delete old event */
339 ret
= unregister_trace_uprobe(old_tu
);
344 ret
= register_uprobe_event(tu
);
346 pr_warn("Failed to register probe event(%d)\n", ret
);
350 list_add_tail(&tu
->list
, &uprobe_list
);
353 mutex_unlock(&uprobe_lock
);
360 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
362 * - Remove uprobe: -:[GRP/]EVENT
364 static int create_trace_uprobe(int argc
, char **argv
)
366 struct trace_uprobe
*tu
;
367 char *arg
, *event
, *group
, *filename
;
368 char buf
[MAX_EVENT_NAME_LEN
];
370 unsigned long offset
;
371 bool is_delete
, is_return
;
380 /* argc must be >= 1 */
381 if (argv
[0][0] == '-')
383 else if (argv
[0][0] == 'r')
385 else if (argv
[0][0] != 'p') {
386 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
390 if (argv
[0][1] == ':') {
392 arg
= strchr(event
, '/');
399 if (strlen(group
) == 0) {
400 pr_info("Group name is not specified\n");
404 if (strlen(event
) == 0) {
405 pr_info("Event name is not specified\n");
410 group
= UPROBE_EVENT_SYSTEM
;
416 pr_info("Delete command needs an event name.\n");
419 mutex_lock(&uprobe_lock
);
420 tu
= find_probe_event(event
, group
);
423 mutex_unlock(&uprobe_lock
);
424 pr_info("Event %s/%s doesn't exist.\n", group
, event
);
427 /* delete an event */
428 ret
= unregister_trace_uprobe(tu
);
429 mutex_unlock(&uprobe_lock
);
434 pr_info("Probe point is not specified.\n");
437 /* Find the last occurrence, in case the path contains ':' too. */
438 arg
= strrchr(argv
[1], ':');
444 ret
= kern_path(filename
, LOOKUP_FOLLOW
, &path
);
448 if (!d_is_reg(path
.dentry
)) {
450 goto fail_address_parse
;
453 ret
= kstrtoul(arg
, 0, &offset
);
455 goto fail_address_parse
;
465 tail
= kstrdup(kbasename(filename
), GFP_KERNEL
);
468 goto fail_address_parse
;
471 ptr
= strpbrk(tail
, ".-_");
475 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c_%s_0x%lx", 'p', tail
, offset
);
480 tu
= alloc_trace_uprobe(group
, event
, argc
, is_return
);
482 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu
));
484 goto fail_address_parse
;
488 tu
->filename
= kstrdup(filename
, GFP_KERNEL
);
491 pr_info("Failed to allocate filename.\n");
496 /* parse arguments */
498 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
499 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
501 /* Increment count for freeing args in error case */
504 /* Parse argument name */
505 arg
= strchr(argv
[i
], '=');
508 parg
->name
= kstrdup(argv
[i
], GFP_KERNEL
);
511 /* If argument name is omitted, set "argN" */
512 snprintf(buf
, MAX_EVENT_NAME_LEN
, "arg%d", i
+ 1);
513 parg
->name
= kstrdup(buf
, GFP_KERNEL
);
517 pr_info("Failed to allocate argument[%d] name.\n", i
);
522 if (!is_good_name(parg
->name
)) {
523 pr_info("Invalid argument[%d] name: %s\n", i
, parg
->name
);
528 if (traceprobe_conflict_field_name(parg
->name
, tu
->tp
.args
, i
)) {
529 pr_info("Argument[%d] name '%s' conflicts with "
530 "another field.\n", i
, argv
[i
]);
535 /* Parse fetch argument */
536 ret
= traceprobe_parse_probe_arg(arg
, &tu
->tp
.size
, parg
,
538 uprobes_fetch_type_table
);
540 pr_info("Parse error at argument[%d]. (%d)\n", i
, ret
);
545 ret
= register_trace_uprobe(tu
);
551 free_trace_uprobe(tu
);
557 pr_info("Failed to parse address or file.\n");
562 static int cleanup_all_probes(void)
564 struct trace_uprobe
*tu
;
567 mutex_lock(&uprobe_lock
);
568 while (!list_empty(&uprobe_list
)) {
569 tu
= list_entry(uprobe_list
.next
, struct trace_uprobe
, list
);
570 ret
= unregister_trace_uprobe(tu
);
574 mutex_unlock(&uprobe_lock
);
578 /* Probes listing interfaces */
579 static void *probes_seq_start(struct seq_file
*m
, loff_t
*pos
)
581 mutex_lock(&uprobe_lock
);
582 return seq_list_start(&uprobe_list
, *pos
);
585 static void *probes_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
587 return seq_list_next(v
, &uprobe_list
, pos
);
590 static void probes_seq_stop(struct seq_file
*m
, void *v
)
592 mutex_unlock(&uprobe_lock
);
595 static int probes_seq_show(struct seq_file
*m
, void *v
)
597 struct trace_uprobe
*tu
= v
;
598 char c
= is_ret_probe(tu
) ? 'r' : 'p';
601 seq_printf(m
, "%c:%s/%s %s:0x%0*lx", c
, tu
->tp
.call
.class->system
,
602 trace_event_name(&tu
->tp
.call
), tu
->filename
,
603 (int)(sizeof(void *) * 2), tu
->offset
);
605 for (i
= 0; i
< tu
->tp
.nr_args
; i
++)
606 seq_printf(m
, " %s=%s", tu
->tp
.args
[i
].name
, tu
->tp
.args
[i
].comm
);
612 static const struct seq_operations probes_seq_op
= {
613 .start
= probes_seq_start
,
614 .next
= probes_seq_next
,
615 .stop
= probes_seq_stop
,
616 .show
= probes_seq_show
619 static int probes_open(struct inode
*inode
, struct file
*file
)
623 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
624 ret
= cleanup_all_probes();
629 return seq_open(file
, &probes_seq_op
);
632 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
633 size_t count
, loff_t
*ppos
)
635 return trace_parse_run_command(file
, buffer
, count
, ppos
, create_trace_uprobe
);
638 static const struct file_operations uprobe_events_ops
= {
639 .owner
= THIS_MODULE
,
643 .release
= seq_release
,
644 .write
= probes_write
,
647 /* Probes profiling interfaces */
648 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
650 struct trace_uprobe
*tu
= v
;
652 seq_printf(m
, " %s %-44s %15lu\n", tu
->filename
,
653 trace_event_name(&tu
->tp
.call
), tu
->nhit
);
657 static const struct seq_operations profile_seq_op
= {
658 .start
= probes_seq_start
,
659 .next
= probes_seq_next
,
660 .stop
= probes_seq_stop
,
661 .show
= probes_profile_seq_show
664 static int profile_open(struct inode
*inode
, struct file
*file
)
666 return seq_open(file
, &profile_seq_op
);
669 static const struct file_operations uprobe_profile_ops
= {
670 .owner
= THIS_MODULE
,
671 .open
= profile_open
,
674 .release
= seq_release
,
677 struct uprobe_cpu_buffer
{
681 static struct uprobe_cpu_buffer __percpu
*uprobe_cpu_buffer
;
682 static int uprobe_buffer_refcnt
;
684 static int uprobe_buffer_init(void)
688 uprobe_cpu_buffer
= alloc_percpu(struct uprobe_cpu_buffer
);
689 if (uprobe_cpu_buffer
== NULL
)
692 for_each_possible_cpu(cpu
) {
693 struct page
*p
= alloc_pages_node(cpu_to_node(cpu
),
699 per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
= page_address(p
);
700 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->mutex
);
706 for_each_possible_cpu(cpu
) {
709 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
, cpu
)->buf
);
712 free_percpu(uprobe_cpu_buffer
);
716 static int uprobe_buffer_enable(void)
720 BUG_ON(!mutex_is_locked(&event_mutex
));
722 if (uprobe_buffer_refcnt
++ == 0) {
723 ret
= uprobe_buffer_init();
725 uprobe_buffer_refcnt
--;
731 static void uprobe_buffer_disable(void)
735 BUG_ON(!mutex_is_locked(&event_mutex
));
737 if (--uprobe_buffer_refcnt
== 0) {
738 for_each_possible_cpu(cpu
)
739 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer
,
742 free_percpu(uprobe_cpu_buffer
);
743 uprobe_cpu_buffer
= NULL
;
747 static struct uprobe_cpu_buffer
*uprobe_buffer_get(void)
749 struct uprobe_cpu_buffer
*ucb
;
752 cpu
= raw_smp_processor_id();
753 ucb
= per_cpu_ptr(uprobe_cpu_buffer
, cpu
);
756 * Use per-cpu buffers for fastest access, but we might migrate
757 * so the mutex makes sure we have sole access to it.
759 mutex_lock(&ucb
->mutex
);
764 static void uprobe_buffer_put(struct uprobe_cpu_buffer
*ucb
)
766 mutex_unlock(&ucb
->mutex
);
769 static void __uprobe_trace_func(struct trace_uprobe
*tu
,
770 unsigned long func
, struct pt_regs
*regs
,
771 struct uprobe_cpu_buffer
*ucb
, int dsize
,
772 struct trace_event_file
*trace_file
)
774 struct uprobe_trace_entry_head
*entry
;
775 struct ring_buffer_event
*event
;
776 struct ring_buffer
*buffer
;
779 struct trace_event_call
*call
= &tu
->tp
.call
;
781 WARN_ON(call
!= trace_file
->event_call
);
783 if (WARN_ON_ONCE(tu
->tp
.size
+ dsize
> PAGE_SIZE
))
786 if (trace_trigger_soft_disabled(trace_file
))
789 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
790 size
= esize
+ tu
->tp
.size
+ dsize
;
791 event
= trace_event_buffer_lock_reserve(&buffer
, trace_file
,
792 call
->event
.type
, size
, 0, 0);
796 entry
= ring_buffer_event_data(event
);
797 if (is_ret_probe(tu
)) {
798 entry
->vaddr
[0] = func
;
799 entry
->vaddr
[1] = instruction_pointer(regs
);
800 data
= DATAOF_TRACE_ENTRY(entry
, true);
802 entry
->vaddr
[0] = instruction_pointer(regs
);
803 data
= DATAOF_TRACE_ENTRY(entry
, false);
806 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
808 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
, 0, 0);
812 static int uprobe_trace_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
813 struct uprobe_cpu_buffer
*ucb
, int dsize
)
815 struct event_file_link
*link
;
817 if (is_ret_probe(tu
))
821 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
822 __uprobe_trace_func(tu
, 0, regs
, ucb
, dsize
, link
->file
);
828 static void uretprobe_trace_func(struct trace_uprobe
*tu
, unsigned long func
,
829 struct pt_regs
*regs
,
830 struct uprobe_cpu_buffer
*ucb
, int dsize
)
832 struct event_file_link
*link
;
835 list_for_each_entry_rcu(link
, &tu
->tp
.files
, list
)
836 __uprobe_trace_func(tu
, func
, regs
, ucb
, dsize
, link
->file
);
840 /* Event entry printers */
841 static enum print_line_t
842 print_uprobe_event(struct trace_iterator
*iter
, int flags
, struct trace_event
*event
)
844 struct uprobe_trace_entry_head
*entry
;
845 struct trace_seq
*s
= &iter
->seq
;
846 struct trace_uprobe
*tu
;
850 entry
= (struct uprobe_trace_entry_head
*)iter
->ent
;
851 tu
= container_of(event
, struct trace_uprobe
, tp
.call
.event
);
853 if (is_ret_probe(tu
)) {
854 trace_seq_printf(s
, "%s: (0x%lx <- 0x%lx)",
855 trace_event_name(&tu
->tp
.call
),
856 entry
->vaddr
[1], entry
->vaddr
[0]);
857 data
= DATAOF_TRACE_ENTRY(entry
, true);
859 trace_seq_printf(s
, "%s: (0x%lx)",
860 trace_event_name(&tu
->tp
.call
),
862 data
= DATAOF_TRACE_ENTRY(entry
, false);
865 for (i
= 0; i
< tu
->tp
.nr_args
; i
++) {
866 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
868 if (!parg
->type
->print(s
, parg
->name
, data
+ parg
->offset
, entry
))
872 trace_seq_putc(s
, '\n');
875 return trace_handle_return(s
);
878 typedef bool (*filter_func_t
)(struct uprobe_consumer
*self
,
879 enum uprobe_filter_ctx ctx
,
880 struct mm_struct
*mm
);
883 probe_event_enable(struct trace_uprobe
*tu
, struct trace_event_file
*file
,
884 filter_func_t filter
)
886 bool enabled
= trace_probe_is_enabled(&tu
->tp
);
887 struct event_file_link
*link
= NULL
;
891 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
894 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
899 list_add_tail_rcu(&link
->list
, &tu
->tp
.files
);
901 tu
->tp
.flags
|= TP_FLAG_TRACE
;
903 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
906 tu
->tp
.flags
|= TP_FLAG_PROFILE
;
909 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
914 ret
= uprobe_buffer_enable();
918 tu
->consumer
.filter
= filter
;
919 tu
->inode
= d_real_inode(tu
->path
.dentry
);
920 ret
= uprobe_register(tu
->inode
, tu
->offset
, &tu
->consumer
);
927 uprobe_buffer_disable();
931 list_del(&link
->list
);
933 tu
->tp
.flags
&= ~TP_FLAG_TRACE
;
935 tu
->tp
.flags
&= ~TP_FLAG_PROFILE
;
941 probe_event_disable(struct trace_uprobe
*tu
, struct trace_event_file
*file
)
943 if (!trace_probe_is_enabled(&tu
->tp
))
947 struct event_file_link
*link
;
949 link
= find_event_file_link(&tu
->tp
, file
);
953 list_del_rcu(&link
->list
);
954 /* synchronize with u{,ret}probe_trace_func */
958 if (!list_empty(&tu
->tp
.files
))
962 WARN_ON(!uprobe_filter_is_empty(&tu
->filter
));
964 uprobe_unregister(tu
->inode
, tu
->offset
, &tu
->consumer
);
966 tu
->tp
.flags
&= file
? ~TP_FLAG_TRACE
: ~TP_FLAG_PROFILE
;
968 uprobe_buffer_disable();
971 static int uprobe_event_define_fields(struct trace_event_call
*event_call
)
974 struct uprobe_trace_entry_head field
;
975 struct trace_uprobe
*tu
= event_call
->data
;
977 if (is_ret_probe(tu
)) {
978 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_FUNC
, 0);
979 DEFINE_FIELD(unsigned long, vaddr
[1], FIELD_STRING_RETIP
, 0);
980 size
= SIZEOF_TRACE_ENTRY(true);
982 DEFINE_FIELD(unsigned long, vaddr
[0], FIELD_STRING_IP
, 0);
983 size
= SIZEOF_TRACE_ENTRY(false);
985 /* Set argument names as fields */
986 for (i
= 0; i
< tu
->tp
.nr_args
; i
++) {
987 struct probe_arg
*parg
= &tu
->tp
.args
[i
];
989 ret
= trace_define_field(event_call
, parg
->type
->fmttype
,
990 parg
->name
, size
+ parg
->offset
,
991 parg
->type
->size
, parg
->type
->is_signed
,
1000 #ifdef CONFIG_PERF_EVENTS
1002 __uprobe_perf_filter(struct trace_uprobe_filter
*filter
, struct mm_struct
*mm
)
1004 struct perf_event
*event
;
1006 if (filter
->nr_systemwide
)
1009 list_for_each_entry(event
, &filter
->perf_events
, hw
.tp_list
) {
1010 if (event
->hw
.target
->mm
== mm
)
1018 uprobe_filter_event(struct trace_uprobe
*tu
, struct perf_event
*event
)
1020 return __uprobe_perf_filter(&tu
->filter
, event
->hw
.target
->mm
);
1023 static int uprobe_perf_close(struct trace_uprobe
*tu
, struct perf_event
*event
)
1027 write_lock(&tu
->filter
.rwlock
);
1028 if (event
->hw
.target
) {
1029 list_del(&event
->hw
.tp_list
);
1030 done
= tu
->filter
.nr_systemwide
||
1031 (event
->hw
.target
->flags
& PF_EXITING
) ||
1032 uprobe_filter_event(tu
, event
);
1034 tu
->filter
.nr_systemwide
--;
1035 done
= tu
->filter
.nr_systemwide
;
1037 write_unlock(&tu
->filter
.rwlock
);
1040 return uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, false);
1045 static int uprobe_perf_open(struct trace_uprobe
*tu
, struct perf_event
*event
)
1050 write_lock(&tu
->filter
.rwlock
);
1051 if (event
->hw
.target
) {
1053 * event->parent != NULL means copy_process(), we can avoid
1054 * uprobe_apply(). current->mm must be probed and we can rely
1055 * on dup_mmap() which preserves the already installed bp's.
1057 * attr.enable_on_exec means that exec/mmap will install the
1058 * breakpoints we need.
1060 done
= tu
->filter
.nr_systemwide
||
1061 event
->parent
|| event
->attr
.enable_on_exec
||
1062 uprobe_filter_event(tu
, event
);
1063 list_add(&event
->hw
.tp_list
, &tu
->filter
.perf_events
);
1065 done
= tu
->filter
.nr_systemwide
;
1066 tu
->filter
.nr_systemwide
++;
1068 write_unlock(&tu
->filter
.rwlock
);
1072 err
= uprobe_apply(tu
->inode
, tu
->offset
, &tu
->consumer
, true);
1074 uprobe_perf_close(tu
, event
);
1079 static bool uprobe_perf_filter(struct uprobe_consumer
*uc
,
1080 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
1082 struct trace_uprobe
*tu
;
1085 tu
= container_of(uc
, struct trace_uprobe
, consumer
);
1086 read_lock(&tu
->filter
.rwlock
);
1087 ret
= __uprobe_perf_filter(&tu
->filter
, mm
);
1088 read_unlock(&tu
->filter
.rwlock
);
1093 static void __uprobe_perf_func(struct trace_uprobe
*tu
,
1094 unsigned long func
, struct pt_regs
*regs
,
1095 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1097 struct trace_event_call
*call
= &tu
->tp
.call
;
1098 struct uprobe_trace_entry_head
*entry
;
1099 struct hlist_head
*head
;
1104 if (bpf_prog_array_valid(call
) && !trace_call_bpf(call
, regs
))
1107 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1109 size
= esize
+ tu
->tp
.size
+ dsize
;
1110 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
)) - sizeof(u32
);
1111 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
, "profile buffer not large enough"))
1115 head
= this_cpu_ptr(call
->perf_events
);
1116 if (hlist_empty(head
))
1119 entry
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
1123 if (is_ret_probe(tu
)) {
1124 entry
->vaddr
[0] = func
;
1125 entry
->vaddr
[1] = instruction_pointer(regs
);
1126 data
= DATAOF_TRACE_ENTRY(entry
, true);
1128 entry
->vaddr
[0] = instruction_pointer(regs
);
1129 data
= DATAOF_TRACE_ENTRY(entry
, false);
1132 memcpy(data
, ucb
->buf
, tu
->tp
.size
+ dsize
);
1134 if (size
- esize
> tu
->tp
.size
+ dsize
) {
1135 int len
= tu
->tp
.size
+ dsize
;
1137 memset(data
+ len
, 0, size
- esize
- len
);
1140 perf_trace_buf_submit(entry
, size
, rctx
, call
->event
.type
, 1, regs
,
1146 /* uprobe profile handler */
1147 static int uprobe_perf_func(struct trace_uprobe
*tu
, struct pt_regs
*regs
,
1148 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1150 if (!uprobe_perf_filter(&tu
->consumer
, 0, current
->mm
))
1151 return UPROBE_HANDLER_REMOVE
;
1153 if (!is_ret_probe(tu
))
1154 __uprobe_perf_func(tu
, 0, regs
, ucb
, dsize
);
1158 static void uretprobe_perf_func(struct trace_uprobe
*tu
, unsigned long func
,
1159 struct pt_regs
*regs
,
1160 struct uprobe_cpu_buffer
*ucb
, int dsize
)
1162 __uprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1164 #endif /* CONFIG_PERF_EVENTS */
1167 trace_uprobe_register(struct trace_event_call
*event
, enum trace_reg type
,
1170 struct trace_uprobe
*tu
= event
->data
;
1171 struct trace_event_file
*file
= data
;
1174 case TRACE_REG_REGISTER
:
1175 return probe_event_enable(tu
, file
, NULL
);
1177 case TRACE_REG_UNREGISTER
:
1178 probe_event_disable(tu
, file
);
1181 #ifdef CONFIG_PERF_EVENTS
1182 case TRACE_REG_PERF_REGISTER
:
1183 return probe_event_enable(tu
, NULL
, uprobe_perf_filter
);
1185 case TRACE_REG_PERF_UNREGISTER
:
1186 probe_event_disable(tu
, NULL
);
1189 case TRACE_REG_PERF_OPEN
:
1190 return uprobe_perf_open(tu
, data
);
1192 case TRACE_REG_PERF_CLOSE
:
1193 return uprobe_perf_close(tu
, data
);
1202 static int uprobe_dispatcher(struct uprobe_consumer
*con
, struct pt_regs
*regs
)
1204 struct trace_uprobe
*tu
;
1205 struct uprobe_dispatch_data udd
;
1206 struct uprobe_cpu_buffer
*ucb
;
1211 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1215 udd
.bp_addr
= instruction_pointer(regs
);
1217 current
->utask
->vaddr
= (unsigned long) &udd
;
1219 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1222 dsize
= __get_data_size(&tu
->tp
, regs
);
1223 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1225 ucb
= uprobe_buffer_get();
1226 store_trace_args(esize
, &tu
->tp
, regs
, ucb
->buf
, dsize
);
1228 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1229 ret
|= uprobe_trace_func(tu
, regs
, ucb
, dsize
);
1231 #ifdef CONFIG_PERF_EVENTS
1232 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1233 ret
|= uprobe_perf_func(tu
, regs
, ucb
, dsize
);
1235 uprobe_buffer_put(ucb
);
1239 static int uretprobe_dispatcher(struct uprobe_consumer
*con
,
1240 unsigned long func
, struct pt_regs
*regs
)
1242 struct trace_uprobe
*tu
;
1243 struct uprobe_dispatch_data udd
;
1244 struct uprobe_cpu_buffer
*ucb
;
1247 tu
= container_of(con
, struct trace_uprobe
, consumer
);
1252 current
->utask
->vaddr
= (unsigned long) &udd
;
1254 if (WARN_ON_ONCE(!uprobe_cpu_buffer
))
1257 dsize
= __get_data_size(&tu
->tp
, regs
);
1258 esize
= SIZEOF_TRACE_ENTRY(is_ret_probe(tu
));
1260 ucb
= uprobe_buffer_get();
1261 store_trace_args(esize
, &tu
->tp
, regs
, ucb
->buf
, dsize
);
1263 if (tu
->tp
.flags
& TP_FLAG_TRACE
)
1264 uretprobe_trace_func(tu
, func
, regs
, ucb
, dsize
);
1266 #ifdef CONFIG_PERF_EVENTS
1267 if (tu
->tp
.flags
& TP_FLAG_PROFILE
)
1268 uretprobe_perf_func(tu
, func
, regs
, ucb
, dsize
);
1270 uprobe_buffer_put(ucb
);
1274 static struct trace_event_functions uprobe_funcs
= {
1275 .trace
= print_uprobe_event
1278 static inline void init_trace_event_call(struct trace_uprobe
*tu
,
1279 struct trace_event_call
*call
)
1281 INIT_LIST_HEAD(&call
->class->fields
);
1282 call
->event
.funcs
= &uprobe_funcs
;
1283 call
->class->define_fields
= uprobe_event_define_fields
;
1285 call
->flags
= TRACE_EVENT_FL_UPROBE
;
1286 call
->class->reg
= trace_uprobe_register
;
1290 static int register_uprobe_event(struct trace_uprobe
*tu
)
1292 struct trace_event_call
*call
= &tu
->tp
.call
;
1295 init_trace_event_call(tu
, call
);
1297 if (set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0)
1300 ret
= register_trace_event(&call
->event
);
1302 kfree(call
->print_fmt
);
1306 ret
= trace_add_event_call(call
);
1309 pr_info("Failed to register uprobe event: %s\n",
1310 trace_event_name(call
));
1311 kfree(call
->print_fmt
);
1312 unregister_trace_event(&call
->event
);
1318 static int unregister_uprobe_event(struct trace_uprobe
*tu
)
1322 /* tu->event is unregistered in trace_remove_event_call() */
1323 ret
= trace_remove_event_call(&tu
->tp
.call
);
1326 kfree(tu
->tp
.call
.print_fmt
);
1327 tu
->tp
.call
.print_fmt
= NULL
;
1331 #ifdef CONFIG_PERF_EVENTS
1332 struct trace_event_call
*
1333 create_local_trace_uprobe(char *name
, unsigned long offs
, bool is_return
)
1335 struct trace_uprobe
*tu
;
1339 ret
= kern_path(name
, LOOKUP_FOLLOW
, &path
);
1341 return ERR_PTR(ret
);
1343 if (!d_is_reg(path
.dentry
)) {
1345 return ERR_PTR(-EINVAL
);
1349 * local trace_kprobes are not added to probe_list, so they are never
1350 * searched in find_trace_kprobe(). Therefore, there is no concern of
1351 * duplicated name "DUMMY_EVENT" here.
1353 tu
= alloc_trace_uprobe(UPROBE_EVENT_SYSTEM
, "DUMMY_EVENT", 0,
1357 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1360 return ERR_CAST(tu
);
1365 tu
->filename
= kstrdup(name
, GFP_KERNEL
);
1366 init_trace_event_call(tu
, &tu
->tp
.call
);
1368 if (set_print_fmt(&tu
->tp
, is_ret_probe(tu
)) < 0) {
1373 return &tu
->tp
.call
;
1375 free_trace_uprobe(tu
);
1376 return ERR_PTR(ret
);
1379 void destroy_local_trace_uprobe(struct trace_event_call
*event_call
)
1381 struct trace_uprobe
*tu
;
1383 tu
= container_of(event_call
, struct trace_uprobe
, tp
.call
);
1385 kfree(tu
->tp
.call
.print_fmt
);
1386 tu
->tp
.call
.print_fmt
= NULL
;
1388 free_trace_uprobe(tu
);
1390 #endif /* CONFIG_PERF_EVENTS */
1392 /* Make a trace interface for controling probe points */
1393 static __init
int init_uprobe_trace(void)
1395 struct dentry
*d_tracer
;
1397 d_tracer
= tracing_init_dentry();
1398 if (IS_ERR(d_tracer
))
1401 trace_create_file("uprobe_events", 0644, d_tracer
,
1402 NULL
, &uprobe_events_ops
);
1403 /* Profile interface */
1404 trace_create_file("uprobe_profile", 0444, d_tracer
,
1405 NULL
, &uprobe_profile_ops
);
1409 fs_initcall(init_uprobe_trace
);