KVM: x86: Use jmp to invoke kvm_spurious_fault() from .fixup
[linux/fpc-iii.git] / kernel / trace / trace_uprobe.c
blob1dc887bab085b2538c007df2efd6705387930f60
1 /*
2 * uprobes-based tracing events
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
27 #include "trace_probe.h"
29 #define UPROBE_EVENT_SYSTEM "uprobes"
31 struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
36 #define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
40 #define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
43 struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
50 * uprobe event core functions
52 struct trace_uprobe {
53 struct list_head list;
54 struct trace_uprobe_filter filter;
55 struct uprobe_consumer consumer;
56 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
60 struct trace_probe tp;
63 #define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
65 (sizeof(struct probe_arg) * (n)))
67 static int register_uprobe_event(struct trace_uprobe *tu);
68 static int unregister_uprobe_event(struct trace_uprobe *tu);
70 static DEFINE_MUTEX(uprobe_lock);
71 static LIST_HEAD(uprobe_list);
73 struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79 static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
82 #ifdef CONFIG_STACK_GROWSUP
83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
85 return addr - (n * sizeof(long));
87 #else
88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
90 return addr + (n * sizeof(long));
92 #endif
94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
99 addr = adjust_stack_addr(addr, n);
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
104 return ret;
108 * Uprobes-specific fetch functions
110 #define DEFINE_FETCH_stack(type) \
111 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
117 DEFINE_BASIC_FETCH_FUNCS(stack)
118 /* No string on the stack entry */
119 #define fetch_stack_string NULL
120 #define fetch_stack_string_size NULL
122 #define DEFINE_FETCH_memory(type) \
123 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
134 DEFINE_BASIC_FETCH_FUNCS(memory)
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
139 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
148 if (!maxlen)
149 return;
151 ret = strncpy_from_user(dst, src, maxlen);
152 if (ret == maxlen)
153 dst[--ret] = '\0';
155 if (ret < 0) { /* Failed to fetch string */
156 ((u8 *)get_rloc_data(dest))[0] = '\0';
157 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
158 } else {
159 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
163 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
164 void *addr, void *dest)
166 int len;
167 void __user *vaddr = (void __force __user *) addr;
169 len = strnlen_user(vaddr, MAX_STRING_SIZE);
171 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
172 *(u32 *)dest = 0;
173 else
174 *(u32 *)dest = len;
177 static unsigned long translate_user_vaddr(void *file_offset)
179 unsigned long base_addr;
180 struct uprobe_dispatch_data *udd;
182 udd = (void *) current->utask->vaddr;
184 base_addr = udd->bp_addr - udd->tu->offset;
185 return base_addr + (unsigned long)file_offset;
188 #define DEFINE_FETCH_file_offset(type) \
189 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
190 void *offset, void *dest)\
192 void *vaddr = (void *)translate_user_vaddr(offset); \
194 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
196 DEFINE_BASIC_FETCH_FUNCS(file_offset)
197 DEFINE_FETCH_file_offset(string)
198 DEFINE_FETCH_file_offset(string_size)
200 /* Fetch type information table */
201 static const struct fetch_type uprobes_fetch_type_table[] = {
202 /* Special types */
203 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
204 sizeof(u32), 1, "__data_loc char[]"),
205 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
206 string_size, sizeof(u32), 0, "u32"),
207 /* Basic types */
208 ASSIGN_FETCH_TYPE(u8, u8, 0),
209 ASSIGN_FETCH_TYPE(u16, u16, 0),
210 ASSIGN_FETCH_TYPE(u32, u32, 0),
211 ASSIGN_FETCH_TYPE(u64, u64, 0),
212 ASSIGN_FETCH_TYPE(s8, u8, 1),
213 ASSIGN_FETCH_TYPE(s16, u16, 1),
214 ASSIGN_FETCH_TYPE(s32, u32, 1),
215 ASSIGN_FETCH_TYPE(s64, u64, 1),
217 ASSIGN_FETCH_TYPE_END
220 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
222 rwlock_init(&filter->rwlock);
223 filter->nr_systemwide = 0;
224 INIT_LIST_HEAD(&filter->perf_events);
227 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
229 return !filter->nr_systemwide && list_empty(&filter->perf_events);
232 static inline bool is_ret_probe(struct trace_uprobe *tu)
234 return tu->consumer.ret_handler != NULL;
238 * Allocate new trace_uprobe and initialize it (including uprobes).
240 static struct trace_uprobe *
241 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
243 struct trace_uprobe *tu;
245 if (!event || !is_good_name(event))
246 return ERR_PTR(-EINVAL);
248 if (!group || !is_good_name(group))
249 return ERR_PTR(-EINVAL);
251 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
252 if (!tu)
253 return ERR_PTR(-ENOMEM);
255 tu->tp.call.class = &tu->tp.class;
256 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
257 if (!tu->tp.call.name)
258 goto error;
260 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
261 if (!tu->tp.class.system)
262 goto error;
264 INIT_LIST_HEAD(&tu->list);
265 INIT_LIST_HEAD(&tu->tp.files);
266 tu->consumer.handler = uprobe_dispatcher;
267 if (is_ret)
268 tu->consumer.ret_handler = uretprobe_dispatcher;
269 init_trace_uprobe_filter(&tu->filter);
270 return tu;
272 error:
273 kfree(tu->tp.call.name);
274 kfree(tu);
276 return ERR_PTR(-ENOMEM);
279 static void free_trace_uprobe(struct trace_uprobe *tu)
281 int i;
283 for (i = 0; i < tu->tp.nr_args; i++)
284 traceprobe_free_probe_arg(&tu->tp.args[i]);
286 iput(tu->inode);
287 kfree(tu->tp.call.class->system);
288 kfree(tu->tp.call.name);
289 kfree(tu->filename);
290 kfree(tu);
293 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
295 struct trace_uprobe *tu;
297 list_for_each_entry(tu, &uprobe_list, list)
298 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
299 strcmp(tu->tp.call.class->system, group) == 0)
300 return tu;
302 return NULL;
305 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
306 static int unregister_trace_uprobe(struct trace_uprobe *tu)
308 int ret;
310 ret = unregister_uprobe_event(tu);
311 if (ret)
312 return ret;
314 list_del(&tu->list);
315 free_trace_uprobe(tu);
316 return 0;
319 /* Register a trace_uprobe and probe_event */
320 static int register_trace_uprobe(struct trace_uprobe *tu)
322 struct trace_uprobe *old_tu;
323 int ret;
325 mutex_lock(&uprobe_lock);
327 /* register as an event */
328 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
329 tu->tp.call.class->system);
330 if (old_tu) {
331 /* delete old event */
332 ret = unregister_trace_uprobe(old_tu);
333 if (ret)
334 goto end;
337 ret = register_uprobe_event(tu);
338 if (ret) {
339 pr_warning("Failed to register probe event(%d)\n", ret);
340 goto end;
343 list_add_tail(&tu->list, &uprobe_list);
345 end:
346 mutex_unlock(&uprobe_lock);
348 return ret;
352 * Argument syntax:
353 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
355 * - Remove uprobe: -:[GRP/]EVENT
357 static int create_trace_uprobe(int argc, char **argv)
359 struct trace_uprobe *tu;
360 struct inode *inode;
361 char *arg, *event, *group, *filename;
362 char buf[MAX_EVENT_NAME_LEN];
363 struct path path;
364 unsigned long offset;
365 bool is_delete, is_return;
366 int i, ret;
368 inode = NULL;
369 ret = 0;
370 is_delete = false;
371 is_return = false;
372 event = NULL;
373 group = NULL;
375 /* argc must be >= 1 */
376 if (argv[0][0] == '-')
377 is_delete = true;
378 else if (argv[0][0] == 'r')
379 is_return = true;
380 else if (argv[0][0] != 'p') {
381 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
382 return -EINVAL;
385 if (argv[0][1] == ':') {
386 event = &argv[0][2];
387 arg = strchr(event, '/');
389 if (arg) {
390 group = event;
391 event = arg + 1;
392 event[-1] = '\0';
394 if (strlen(group) == 0) {
395 pr_info("Group name is not specified\n");
396 return -EINVAL;
399 if (strlen(event) == 0) {
400 pr_info("Event name is not specified\n");
401 return -EINVAL;
404 if (!group)
405 group = UPROBE_EVENT_SYSTEM;
407 if (is_delete) {
408 int ret;
410 if (!event) {
411 pr_info("Delete command needs an event name.\n");
412 return -EINVAL;
414 mutex_lock(&uprobe_lock);
415 tu = find_probe_event(event, group);
417 if (!tu) {
418 mutex_unlock(&uprobe_lock);
419 pr_info("Event %s/%s doesn't exist.\n", group, event);
420 return -ENOENT;
422 /* delete an event */
423 ret = unregister_trace_uprobe(tu);
424 mutex_unlock(&uprobe_lock);
425 return ret;
428 if (argc < 2) {
429 pr_info("Probe point is not specified.\n");
430 return -EINVAL;
432 if (isdigit(argv[1][0])) {
433 pr_info("probe point must be have a filename.\n");
434 return -EINVAL;
436 arg = strchr(argv[1], ':');
437 if (!arg) {
438 ret = -EINVAL;
439 goto fail_address_parse;
442 *arg++ = '\0';
443 filename = argv[1];
444 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
445 if (ret)
446 goto fail_address_parse;
448 inode = igrab(d_inode(path.dentry));
449 path_put(&path);
451 if (!inode || !S_ISREG(inode->i_mode)) {
452 ret = -EINVAL;
453 goto fail_address_parse;
456 ret = kstrtoul(arg, 0, &offset);
457 if (ret)
458 goto fail_address_parse;
460 argc -= 2;
461 argv += 2;
463 /* setup a probe */
464 if (!event) {
465 char *tail;
466 char *ptr;
468 tail = kstrdup(kbasename(filename), GFP_KERNEL);
469 if (!tail) {
470 ret = -ENOMEM;
471 goto fail_address_parse;
474 ptr = strpbrk(tail, ".-_");
475 if (ptr)
476 *ptr = '\0';
478 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
479 event = buf;
480 kfree(tail);
483 tu = alloc_trace_uprobe(group, event, argc, is_return);
484 if (IS_ERR(tu)) {
485 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
486 ret = PTR_ERR(tu);
487 goto fail_address_parse;
489 tu->offset = offset;
490 tu->inode = inode;
491 tu->filename = kstrdup(filename, GFP_KERNEL);
493 if (!tu->filename) {
494 pr_info("Failed to allocate filename.\n");
495 ret = -ENOMEM;
496 goto error;
499 /* parse arguments */
500 ret = 0;
501 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
502 struct probe_arg *parg = &tu->tp.args[i];
504 /* Increment count for freeing args in error case */
505 tu->tp.nr_args++;
507 /* Parse argument name */
508 arg = strchr(argv[i], '=');
509 if (arg) {
510 *arg++ = '\0';
511 parg->name = kstrdup(argv[i], GFP_KERNEL);
512 } else {
513 arg = argv[i];
514 /* If argument name is omitted, set "argN" */
515 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
516 parg->name = kstrdup(buf, GFP_KERNEL);
519 if (!parg->name) {
520 pr_info("Failed to allocate argument[%d] name.\n", i);
521 ret = -ENOMEM;
522 goto error;
525 if (!is_good_name(parg->name)) {
526 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
527 ret = -EINVAL;
528 goto error;
531 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
532 pr_info("Argument[%d] name '%s' conflicts with "
533 "another field.\n", i, argv[i]);
534 ret = -EINVAL;
535 goto error;
538 /* Parse fetch argument */
539 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
540 is_return, false,
541 uprobes_fetch_type_table);
542 if (ret) {
543 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
544 goto error;
548 ret = register_trace_uprobe(tu);
549 if (ret)
550 goto error;
551 return 0;
553 error:
554 free_trace_uprobe(tu);
555 return ret;
557 fail_address_parse:
558 iput(inode);
560 pr_info("Failed to parse address or file.\n");
562 return ret;
565 static int cleanup_all_probes(void)
567 struct trace_uprobe *tu;
568 int ret = 0;
570 mutex_lock(&uprobe_lock);
571 while (!list_empty(&uprobe_list)) {
572 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
573 ret = unregister_trace_uprobe(tu);
574 if (ret)
575 break;
577 mutex_unlock(&uprobe_lock);
578 return ret;
581 /* Probes listing interfaces */
582 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
584 mutex_lock(&uprobe_lock);
585 return seq_list_start(&uprobe_list, *pos);
588 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
590 return seq_list_next(v, &uprobe_list, pos);
593 static void probes_seq_stop(struct seq_file *m, void *v)
595 mutex_unlock(&uprobe_lock);
598 static int probes_seq_show(struct seq_file *m, void *v)
600 struct trace_uprobe *tu = v;
601 char c = is_ret_probe(tu) ? 'r' : 'p';
602 int i;
604 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
605 trace_event_name(&tu->tp.call));
606 seq_printf(m, " %s:", tu->filename);
608 /* Don't print "0x (null)" when offset is 0 */
609 if (tu->offset) {
610 seq_printf(m, "0x%p", (void *)tu->offset);
611 } else {
612 switch (sizeof(void *)) {
613 case 4:
614 seq_printf(m, "0x00000000");
615 break;
616 case 8:
617 default:
618 seq_printf(m, "0x0000000000000000");
619 break;
623 for (i = 0; i < tu->tp.nr_args; i++)
624 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
626 seq_putc(m, '\n');
627 return 0;
630 static const struct seq_operations probes_seq_op = {
631 .start = probes_seq_start,
632 .next = probes_seq_next,
633 .stop = probes_seq_stop,
634 .show = probes_seq_show
637 static int probes_open(struct inode *inode, struct file *file)
639 int ret;
641 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
642 ret = cleanup_all_probes();
643 if (ret)
644 return ret;
647 return seq_open(file, &probes_seq_op);
650 static ssize_t probes_write(struct file *file, const char __user *buffer,
651 size_t count, loff_t *ppos)
653 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
656 static const struct file_operations uprobe_events_ops = {
657 .owner = THIS_MODULE,
658 .open = probes_open,
659 .read = seq_read,
660 .llseek = seq_lseek,
661 .release = seq_release,
662 .write = probes_write,
665 /* Probes profiling interfaces */
666 static int probes_profile_seq_show(struct seq_file *m, void *v)
668 struct trace_uprobe *tu = v;
670 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
671 trace_event_name(&tu->tp.call), tu->nhit);
672 return 0;
675 static const struct seq_operations profile_seq_op = {
676 .start = probes_seq_start,
677 .next = probes_seq_next,
678 .stop = probes_seq_stop,
679 .show = probes_profile_seq_show
682 static int profile_open(struct inode *inode, struct file *file)
684 return seq_open(file, &profile_seq_op);
687 static const struct file_operations uprobe_profile_ops = {
688 .owner = THIS_MODULE,
689 .open = profile_open,
690 .read = seq_read,
691 .llseek = seq_lseek,
692 .release = seq_release,
695 struct uprobe_cpu_buffer {
696 struct mutex mutex;
697 void *buf;
699 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
700 static int uprobe_buffer_refcnt;
702 static int uprobe_buffer_init(void)
704 int cpu, err_cpu;
706 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
707 if (uprobe_cpu_buffer == NULL)
708 return -ENOMEM;
710 for_each_possible_cpu(cpu) {
711 struct page *p = alloc_pages_node(cpu_to_node(cpu),
712 GFP_KERNEL, 0);
713 if (p == NULL) {
714 err_cpu = cpu;
715 goto err;
717 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
718 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
721 return 0;
723 err:
724 for_each_possible_cpu(cpu) {
725 if (cpu == err_cpu)
726 break;
727 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
730 free_percpu(uprobe_cpu_buffer);
731 return -ENOMEM;
734 static int uprobe_buffer_enable(void)
736 int ret = 0;
738 BUG_ON(!mutex_is_locked(&event_mutex));
740 if (uprobe_buffer_refcnt++ == 0) {
741 ret = uprobe_buffer_init();
742 if (ret < 0)
743 uprobe_buffer_refcnt--;
746 return ret;
749 static void uprobe_buffer_disable(void)
751 int cpu;
753 BUG_ON(!mutex_is_locked(&event_mutex));
755 if (--uprobe_buffer_refcnt == 0) {
756 for_each_possible_cpu(cpu)
757 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
758 cpu)->buf);
760 free_percpu(uprobe_cpu_buffer);
761 uprobe_cpu_buffer = NULL;
765 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
767 struct uprobe_cpu_buffer *ucb;
768 int cpu;
770 cpu = raw_smp_processor_id();
771 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
774 * Use per-cpu buffers for fastest access, but we might migrate
775 * so the mutex makes sure we have sole access to it.
777 mutex_lock(&ucb->mutex);
779 return ucb;
782 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
784 mutex_unlock(&ucb->mutex);
787 static void __uprobe_trace_func(struct trace_uprobe *tu,
788 unsigned long func, struct pt_regs *regs,
789 struct uprobe_cpu_buffer *ucb, int dsize,
790 struct trace_event_file *trace_file)
792 struct uprobe_trace_entry_head *entry;
793 struct ring_buffer_event *event;
794 struct ring_buffer *buffer;
795 void *data;
796 int size, esize;
797 struct trace_event_call *call = &tu->tp.call;
799 WARN_ON(call != trace_file->event_call);
801 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
802 return;
804 if (trace_trigger_soft_disabled(trace_file))
805 return;
807 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
808 size = esize + tu->tp.size + dsize;
809 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
810 call->event.type, size, 0, 0);
811 if (!event)
812 return;
814 entry = ring_buffer_event_data(event);
815 if (is_ret_probe(tu)) {
816 entry->vaddr[0] = func;
817 entry->vaddr[1] = instruction_pointer(regs);
818 data = DATAOF_TRACE_ENTRY(entry, true);
819 } else {
820 entry->vaddr[0] = instruction_pointer(regs);
821 data = DATAOF_TRACE_ENTRY(entry, false);
824 memcpy(data, ucb->buf, tu->tp.size + dsize);
826 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
829 /* uprobe handler */
830 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
831 struct uprobe_cpu_buffer *ucb, int dsize)
833 struct event_file_link *link;
835 if (is_ret_probe(tu))
836 return 0;
838 rcu_read_lock();
839 list_for_each_entry_rcu(link, &tu->tp.files, list)
840 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
841 rcu_read_unlock();
843 return 0;
846 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
847 struct pt_regs *regs,
848 struct uprobe_cpu_buffer *ucb, int dsize)
850 struct event_file_link *link;
852 rcu_read_lock();
853 list_for_each_entry_rcu(link, &tu->tp.files, list)
854 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
855 rcu_read_unlock();
858 /* Event entry printers */
859 static enum print_line_t
860 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
862 struct uprobe_trace_entry_head *entry;
863 struct trace_seq *s = &iter->seq;
864 struct trace_uprobe *tu;
865 u8 *data;
866 int i;
868 entry = (struct uprobe_trace_entry_head *)iter->ent;
869 tu = container_of(event, struct trace_uprobe, tp.call.event);
871 if (is_ret_probe(tu)) {
872 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
873 trace_event_name(&tu->tp.call),
874 entry->vaddr[1], entry->vaddr[0]);
875 data = DATAOF_TRACE_ENTRY(entry, true);
876 } else {
877 trace_seq_printf(s, "%s: (0x%lx)",
878 trace_event_name(&tu->tp.call),
879 entry->vaddr[0]);
880 data = DATAOF_TRACE_ENTRY(entry, false);
883 for (i = 0; i < tu->tp.nr_args; i++) {
884 struct probe_arg *parg = &tu->tp.args[i];
886 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
887 goto out;
890 trace_seq_putc(s, '\n');
892 out:
893 return trace_handle_return(s);
896 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
897 enum uprobe_filter_ctx ctx,
898 struct mm_struct *mm);
900 static int
901 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
902 filter_func_t filter)
904 bool enabled = trace_probe_is_enabled(&tu->tp);
905 struct event_file_link *link = NULL;
906 int ret;
908 if (file) {
909 if (tu->tp.flags & TP_FLAG_PROFILE)
910 return -EINTR;
912 link = kmalloc(sizeof(*link), GFP_KERNEL);
913 if (!link)
914 return -ENOMEM;
916 link->file = file;
917 list_add_tail_rcu(&link->list, &tu->tp.files);
919 tu->tp.flags |= TP_FLAG_TRACE;
920 } else {
921 if (tu->tp.flags & TP_FLAG_TRACE)
922 return -EINTR;
924 tu->tp.flags |= TP_FLAG_PROFILE;
927 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
929 if (enabled)
930 return 0;
932 ret = uprobe_buffer_enable();
933 if (ret)
934 goto err_flags;
936 tu->consumer.filter = filter;
937 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
938 if (ret)
939 goto err_buffer;
941 return 0;
943 err_buffer:
944 uprobe_buffer_disable();
946 err_flags:
947 if (file) {
948 list_del(&link->list);
949 kfree(link);
950 tu->tp.flags &= ~TP_FLAG_TRACE;
951 } else {
952 tu->tp.flags &= ~TP_FLAG_PROFILE;
954 return ret;
957 static void
958 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
960 if (!trace_probe_is_enabled(&tu->tp))
961 return;
963 if (file) {
964 struct event_file_link *link;
966 link = find_event_file_link(&tu->tp, file);
967 if (!link)
968 return;
970 list_del_rcu(&link->list);
971 /* synchronize with u{,ret}probe_trace_func */
972 synchronize_rcu();
973 kfree(link);
975 if (!list_empty(&tu->tp.files))
976 return;
979 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
981 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
982 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
984 uprobe_buffer_disable();
987 static int uprobe_event_define_fields(struct trace_event_call *event_call)
989 int ret, i, size;
990 struct uprobe_trace_entry_head field;
991 struct trace_uprobe *tu = event_call->data;
993 if (is_ret_probe(tu)) {
994 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
995 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
996 size = SIZEOF_TRACE_ENTRY(true);
997 } else {
998 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
999 size = SIZEOF_TRACE_ENTRY(false);
1001 /* Set argument names as fields */
1002 for (i = 0; i < tu->tp.nr_args; i++) {
1003 struct probe_arg *parg = &tu->tp.args[i];
1005 ret = trace_define_field(event_call, parg->type->fmttype,
1006 parg->name, size + parg->offset,
1007 parg->type->size, parg->type->is_signed,
1008 FILTER_OTHER);
1010 if (ret)
1011 return ret;
1013 return 0;
1016 #ifdef CONFIG_PERF_EVENTS
1017 static bool
1018 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1020 struct perf_event *event;
1022 if (filter->nr_systemwide)
1023 return true;
1025 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1026 if (event->hw.target->mm == mm)
1027 return true;
1030 return false;
1033 static inline bool
1034 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1036 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1039 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1041 bool done;
1043 write_lock(&tu->filter.rwlock);
1044 if (event->hw.target) {
1045 list_del(&event->hw.tp_list);
1046 done = tu->filter.nr_systemwide ||
1047 (event->hw.target->flags & PF_EXITING) ||
1048 uprobe_filter_event(tu, event);
1049 } else {
1050 tu->filter.nr_systemwide--;
1051 done = tu->filter.nr_systemwide;
1053 write_unlock(&tu->filter.rwlock);
1055 if (!done)
1056 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1058 return 0;
1061 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1063 bool done;
1064 int err;
1066 write_lock(&tu->filter.rwlock);
1067 if (event->hw.target) {
1069 * event->parent != NULL means copy_process(), we can avoid
1070 * uprobe_apply(). current->mm must be probed and we can rely
1071 * on dup_mmap() which preserves the already installed bp's.
1073 * attr.enable_on_exec means that exec/mmap will install the
1074 * breakpoints we need.
1076 done = tu->filter.nr_systemwide ||
1077 event->parent || event->attr.enable_on_exec ||
1078 uprobe_filter_event(tu, event);
1079 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1080 } else {
1081 done = tu->filter.nr_systemwide;
1082 tu->filter.nr_systemwide++;
1084 write_unlock(&tu->filter.rwlock);
1086 err = 0;
1087 if (!done) {
1088 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1089 if (err)
1090 uprobe_perf_close(tu, event);
1092 return err;
1095 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1096 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1098 struct trace_uprobe *tu;
1099 int ret;
1101 tu = container_of(uc, struct trace_uprobe, consumer);
1102 read_lock(&tu->filter.rwlock);
1103 ret = __uprobe_perf_filter(&tu->filter, mm);
1104 read_unlock(&tu->filter.rwlock);
1106 return ret;
1109 static void __uprobe_perf_func(struct trace_uprobe *tu,
1110 unsigned long func, struct pt_regs *regs,
1111 struct uprobe_cpu_buffer *ucb, int dsize)
1113 struct trace_event_call *call = &tu->tp.call;
1114 struct uprobe_trace_entry_head *entry;
1115 struct bpf_prog *prog = call->prog;
1116 struct hlist_head *head;
1117 void *data;
1118 int size, esize;
1119 int rctx;
1121 if (prog && !trace_call_bpf(prog, regs))
1122 return;
1124 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1126 size = esize + tu->tp.size + dsize;
1127 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1128 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1129 return;
1131 preempt_disable();
1132 head = this_cpu_ptr(call->perf_events);
1133 if (hlist_empty(head))
1134 goto out;
1136 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1137 if (!entry)
1138 goto out;
1140 if (is_ret_probe(tu)) {
1141 entry->vaddr[0] = func;
1142 entry->vaddr[1] = instruction_pointer(regs);
1143 data = DATAOF_TRACE_ENTRY(entry, true);
1144 } else {
1145 entry->vaddr[0] = instruction_pointer(regs);
1146 data = DATAOF_TRACE_ENTRY(entry, false);
1149 memcpy(data, ucb->buf, tu->tp.size + dsize);
1151 if (size - esize > tu->tp.size + dsize) {
1152 int len = tu->tp.size + dsize;
1154 memset(data + len, 0, size - esize - len);
1157 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1158 out:
1159 preempt_enable();
1162 /* uprobe profile handler */
1163 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1164 struct uprobe_cpu_buffer *ucb, int dsize)
1166 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1167 return UPROBE_HANDLER_REMOVE;
1169 if (!is_ret_probe(tu))
1170 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1171 return 0;
1174 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1175 struct pt_regs *regs,
1176 struct uprobe_cpu_buffer *ucb, int dsize)
1178 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1180 #endif /* CONFIG_PERF_EVENTS */
1182 static int
1183 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1184 void *data)
1186 struct trace_uprobe *tu = event->data;
1187 struct trace_event_file *file = data;
1189 switch (type) {
1190 case TRACE_REG_REGISTER:
1191 return probe_event_enable(tu, file, NULL);
1193 case TRACE_REG_UNREGISTER:
1194 probe_event_disable(tu, file);
1195 return 0;
1197 #ifdef CONFIG_PERF_EVENTS
1198 case TRACE_REG_PERF_REGISTER:
1199 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1201 case TRACE_REG_PERF_UNREGISTER:
1202 probe_event_disable(tu, NULL);
1203 return 0;
1205 case TRACE_REG_PERF_OPEN:
1206 return uprobe_perf_open(tu, data);
1208 case TRACE_REG_PERF_CLOSE:
1209 return uprobe_perf_close(tu, data);
1211 #endif
1212 default:
1213 return 0;
1215 return 0;
1218 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1220 struct trace_uprobe *tu;
1221 struct uprobe_dispatch_data udd;
1222 struct uprobe_cpu_buffer *ucb;
1223 int dsize, esize;
1224 int ret = 0;
1227 tu = container_of(con, struct trace_uprobe, consumer);
1228 tu->nhit++;
1230 udd.tu = tu;
1231 udd.bp_addr = instruction_pointer(regs);
1233 current->utask->vaddr = (unsigned long) &udd;
1235 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1236 return 0;
1238 dsize = __get_data_size(&tu->tp, regs);
1239 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1241 ucb = uprobe_buffer_get();
1242 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1244 if (tu->tp.flags & TP_FLAG_TRACE)
1245 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1247 #ifdef CONFIG_PERF_EVENTS
1248 if (tu->tp.flags & TP_FLAG_PROFILE)
1249 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1250 #endif
1251 uprobe_buffer_put(ucb);
1252 return ret;
1255 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1256 unsigned long func, struct pt_regs *regs)
1258 struct trace_uprobe *tu;
1259 struct uprobe_dispatch_data udd;
1260 struct uprobe_cpu_buffer *ucb;
1261 int dsize, esize;
1263 tu = container_of(con, struct trace_uprobe, consumer);
1265 udd.tu = tu;
1266 udd.bp_addr = func;
1268 current->utask->vaddr = (unsigned long) &udd;
1270 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1271 return 0;
1273 dsize = __get_data_size(&tu->tp, regs);
1274 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1276 ucb = uprobe_buffer_get();
1277 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1279 if (tu->tp.flags & TP_FLAG_TRACE)
1280 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1282 #ifdef CONFIG_PERF_EVENTS
1283 if (tu->tp.flags & TP_FLAG_PROFILE)
1284 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1285 #endif
1286 uprobe_buffer_put(ucb);
1287 return 0;
1290 static struct trace_event_functions uprobe_funcs = {
1291 .trace = print_uprobe_event
1294 static int register_uprobe_event(struct trace_uprobe *tu)
1296 struct trace_event_call *call = &tu->tp.call;
1297 int ret;
1299 /* Initialize trace_event_call */
1300 INIT_LIST_HEAD(&call->class->fields);
1301 call->event.funcs = &uprobe_funcs;
1302 call->class->define_fields = uprobe_event_define_fields;
1304 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1305 return -ENOMEM;
1307 ret = register_trace_event(&call->event);
1308 if (!ret) {
1309 kfree(call->print_fmt);
1310 return -ENODEV;
1313 call->flags = TRACE_EVENT_FL_UPROBE;
1314 call->class->reg = trace_uprobe_register;
1315 call->data = tu;
1316 ret = trace_add_event_call(call);
1318 if (ret) {
1319 pr_info("Failed to register uprobe event: %s\n",
1320 trace_event_name(call));
1321 kfree(call->print_fmt);
1322 unregister_trace_event(&call->event);
1325 return ret;
1328 static int unregister_uprobe_event(struct trace_uprobe *tu)
1330 int ret;
1332 /* tu->event is unregistered in trace_remove_event_call() */
1333 ret = trace_remove_event_call(&tu->tp.call);
1334 if (ret)
1335 return ret;
1336 kfree(tu->tp.call.print_fmt);
1337 tu->tp.call.print_fmt = NULL;
1338 return 0;
1341 /* Make a trace interface for controling probe points */
1342 static __init int init_uprobe_trace(void)
1344 struct dentry *d_tracer;
1346 d_tracer = tracing_init_dentry();
1347 if (IS_ERR(d_tracer))
1348 return 0;
1350 trace_create_file("uprobe_events", 0644, d_tracer,
1351 NULL, &uprobe_events_ops);
1352 /* Profile interface */
1353 trace_create_file("uprobe_profile", 0444, d_tracer,
1354 NULL, &uprobe_profile_ops);
1355 return 0;
1358 fs_initcall(init_uprobe_trace);