media: rockchip: rga: Only set output CSC mode for RGB input
[linux/fpc-iii.git] / kernel / trace / trace_uprobe.c
blob5294843de6efd645cbfba45d934d54f6496e20a4
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
23 #define UPROBE_EVENT_SYSTEM "uprobes"
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
37 static int trace_uprobe_create(int argc, const char **argv);
38 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_uprobe_release(struct dyn_event *ev);
40 static bool trace_uprobe_is_busy(struct dyn_event *ev);
41 static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
44 static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
53 * uprobe event core functions
55 struct trace_uprobe {
56 struct dyn_event devent;
57 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
62 unsigned long ref_ctr_offset;
63 unsigned long nhit;
64 struct trace_probe tp;
67 static bool is_trace_uprobe(struct dyn_event *ev)
69 return ev->ops == &trace_uprobe_ops;
72 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
74 return container_of(ev, struct trace_uprobe, devent);
77 /**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
82 #define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
86 #define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
90 static int register_uprobe_event(struct trace_uprobe *tu);
91 static int unregister_uprobe_event(struct trace_uprobe *tu);
93 struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
98 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99 static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
102 #ifdef CONFIG_STACK_GROWSUP
103 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
105 return addr - (n * sizeof(long));
107 #else
108 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 return addr + (n * sizeof(long));
112 #endif
114 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
119 addr = adjust_stack_addr(addr, n);
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
124 return ret;
128 * Uprobes-specific fetch functions
130 static nokprobe_inline int
131 probe_mem_read(void *dest, void *src, size_t size)
133 void __user *vaddr = (void __force __user *)src;
135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
138 static nokprobe_inline int
139 probe_mem_read_user(void *dest, void *src, size_t size)
141 return probe_mem_read(dest, src, size);
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
148 static nokprobe_inline int
149 fetch_store_string(unsigned long addr, void *dest, void *base)
151 long ret;
152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
155 void __user *src = (void __force __user *) addr;
157 if (unlikely(!maxlen))
158 return -ENOMEM;
160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
167 else
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
173 ret++;
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
177 return ret;
180 static nokprobe_inline int
181 fetch_store_string_user(unsigned long addr, void *dest, void *base)
183 return fetch_store_string(addr, dest, base);
186 /* Return the length of string -- including null terminal byte */
187 static nokprobe_inline int
188 fetch_store_strlen(unsigned long addr)
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
195 else
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
198 return (len > MAX_STRING_SIZE) ? 0 : len;
201 static nokprobe_inline int
202 fetch_store_strlen_user(unsigned long addr)
204 return fetch_store_strlen(addr);
207 static unsigned long translate_user_vaddr(unsigned long file_offset)
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
212 udd = (void *) current->utask->vaddr;
214 base_addr = udd->bp_addr - udd->tu->offset;
215 return base_addr + file_offset;
218 /* Note that we don't verify it, since the code does not come from user space */
219 static int
220 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
221 void *base)
223 unsigned long val;
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
254 code++;
256 return process_fetch_insn_bottom(code, val, dest, base);
258 NOKPROBE_SYMBOL(process_fetch_insn)
260 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
267 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
272 static inline bool is_ret_probe(struct trace_uprobe *tu)
274 return tu->consumer.ret_handler != NULL;
277 static bool trace_uprobe_is_busy(struct dyn_event *ev)
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
281 return trace_probe_is_enabled(&tu->tp);
284 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
290 if (!argc)
291 return true;
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
307 argc--; argv++;
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
312 static bool trace_uprobe_match(const char *system, const char *event,
313 int argc, const char **argv, struct dyn_event *ev)
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
317 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
322 static nokprobe_inline struct trace_uprobe *
323 trace_uprobe_primary_from_call(struct trace_event_call *call)
325 struct trace_probe *tp;
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
331 return container_of(tp, struct trace_uprobe, tp);
335 * Allocate new trace_uprobe and initialize it (including uprobes).
337 static struct trace_uprobe *
338 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
340 struct trace_uprobe *tu;
341 int ret;
343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
347 ret = trace_probe_init(&tu->tp, event, group, true);
348 if (ret < 0)
349 goto error;
351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
352 tu->consumer.handler = uprobe_dispatcher;
353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
355 init_trace_uprobe_filter(tu->tp.event->filter);
356 return tu;
358 error:
359 kfree(tu);
361 return ERR_PTR(ret);
364 static void free_trace_uprobe(struct trace_uprobe *tu)
366 if (!tu)
367 return;
369 path_put(&tu->path);
370 trace_probe_cleanup(&tu->tp);
371 kfree(tu->filename);
372 kfree(tu);
375 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
377 struct dyn_event *pos;
378 struct trace_uprobe *tu;
380 for_each_trace_uprobe(tu, pos)
381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
383 return tu;
385 return NULL;
388 /* Unregister a trace_uprobe and probe_event */
389 static int unregister_trace_uprobe(struct trace_uprobe *tu)
391 int ret;
393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
396 ret = unregister_uprobe_event(tu);
397 if (ret)
398 return ret;
400 unreg:
401 dyn_event_remove(&tu->devent);
402 trace_probe_unlink(&tu->tp);
403 free_trace_uprobe(tu);
404 return 0;
407 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408 struct trace_uprobe *comp)
410 struct trace_probe_event *tpe = orig->tp.event;
411 struct trace_probe *pos;
412 struct inode *comp_inode = d_real_inode(comp->path.dentry);
413 int i;
415 list_for_each_entry(pos, &tpe->probes, list) {
416 orig = container_of(pos, struct trace_uprobe, tp);
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
419 continue;
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
428 break;
431 if (i == orig->tp.nr_args)
432 return true;
435 return false;
438 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
440 int ret;
442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443 if (ret) {
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
447 return -EEXIST;
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
452 return -EEXIST;
455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
457 if (!ret)
458 dyn_event_add(&tu->devent);
460 return ret;
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
470 * ones.
472 static int validate_ref_ctr_offset(struct trace_uprobe *new)
474 struct dyn_event *pos;
475 struct trace_uprobe *tmp;
476 struct inode *new_inode = d_real_inode(new->path.dentry);
478 for_each_trace_uprobe(tmp, pos) {
479 if (new_inode == d_real_inode(tmp->path.dentry) &&
480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
483 return -EINVAL;
486 return 0;
489 /* Register a trace_uprobe and probe_event */
490 static int register_trace_uprobe(struct trace_uprobe *tu)
492 struct trace_uprobe *old_tu;
493 int ret;
495 mutex_lock(&event_mutex);
497 ret = validate_ref_ctr_offset(tu);
498 if (ret)
499 goto end;
501 /* register as an event */
502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
504 if (old_tu) {
505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
508 ret = -EEXIST;
509 } else {
510 ret = append_trace_uprobe(tu, old_tu);
512 goto end;
515 ret = register_uprobe_event(tu);
516 if (ret) {
517 pr_warn("Failed to register probe event(%d)\n", ret);
518 goto end;
521 dyn_event_add(&tu->devent);
523 end:
524 mutex_unlock(&event_mutex);
526 return ret;
530 * Argument syntax:
531 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
533 static int trace_uprobe_create(int argc, const char **argv)
535 struct trace_uprobe *tu;
536 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
537 char *arg, *filename, *rctr, *rctr_end, *tmp;
538 char buf[MAX_EVENT_NAME_LEN];
539 struct path path;
540 unsigned long offset, ref_ctr_offset;
541 bool is_return = false;
542 int i, ret;
544 ret = 0;
545 ref_ctr_offset = 0;
547 switch (argv[0][0]) {
548 case 'r':
549 is_return = true;
550 break;
551 case 'p':
552 break;
553 default:
554 return -ECANCELED;
557 if (argc < 2)
558 return -ECANCELED;
560 if (argv[0][1] == ':')
561 event = &argv[0][2];
563 if (!strchr(argv[1], '/'))
564 return -ECANCELED;
566 filename = kstrdup(argv[1], GFP_KERNEL);
567 if (!filename)
568 return -ENOMEM;
570 /* Find the last occurrence, in case the path contains ':' too. */
571 arg = strrchr(filename, ':');
572 if (!arg || !isdigit(arg[1])) {
573 kfree(filename);
574 return -ECANCELED;
577 trace_probe_log_init("trace_uprobe", argc, argv);
578 trace_probe_log_set_index(1); /* filename is the 2nd argument */
580 *arg++ = '\0';
581 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
582 if (ret) {
583 trace_probe_log_err(0, FILE_NOT_FOUND);
584 kfree(filename);
585 trace_probe_log_clear();
586 return ret;
588 if (!d_is_reg(path.dentry)) {
589 trace_probe_log_err(0, NO_REGULAR_FILE);
590 ret = -EINVAL;
591 goto fail_address_parse;
594 /* Parse reference counter offset if specified. */
595 rctr = strchr(arg, '(');
596 if (rctr) {
597 rctr_end = strchr(rctr, ')');
598 if (!rctr_end) {
599 ret = -EINVAL;
600 rctr_end = rctr + strlen(rctr);
601 trace_probe_log_err(rctr_end - filename,
602 REFCNT_OPEN_BRACE);
603 goto fail_address_parse;
604 } else if (rctr_end[1] != '\0') {
605 ret = -EINVAL;
606 trace_probe_log_err(rctr_end + 1 - filename,
607 BAD_REFCNT_SUFFIX);
608 goto fail_address_parse;
611 *rctr++ = '\0';
612 *rctr_end = '\0';
613 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
614 if (ret) {
615 trace_probe_log_err(rctr - filename, BAD_REFCNT);
616 goto fail_address_parse;
620 /* Parse uprobe offset. */
621 ret = kstrtoul(arg, 0, &offset);
622 if (ret) {
623 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
624 goto fail_address_parse;
627 /* setup a probe */
628 trace_probe_log_set_index(0);
629 if (event) {
630 ret = traceprobe_parse_event_name(&event, &group, buf,
631 event - argv[0]);
632 if (ret)
633 goto fail_address_parse;
634 } else {
635 char *tail;
636 char *ptr;
638 tail = kstrdup(kbasename(filename), GFP_KERNEL);
639 if (!tail) {
640 ret = -ENOMEM;
641 goto fail_address_parse;
644 ptr = strpbrk(tail, ".-_");
645 if (ptr)
646 *ptr = '\0';
648 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
649 event = buf;
650 kfree(tail);
653 argc -= 2;
654 argv += 2;
656 tu = alloc_trace_uprobe(group, event, argc, is_return);
657 if (IS_ERR(tu)) {
658 ret = PTR_ERR(tu);
659 /* This must return -ENOMEM otherwise there is a bug */
660 WARN_ON_ONCE(ret != -ENOMEM);
661 goto fail_address_parse;
663 tu->offset = offset;
664 tu->ref_ctr_offset = ref_ctr_offset;
665 tu->path = path;
666 tu->filename = filename;
668 /* parse arguments */
669 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
670 tmp = kstrdup(argv[i], GFP_KERNEL);
671 if (!tmp) {
672 ret = -ENOMEM;
673 goto error;
676 trace_probe_log_set_index(i + 2);
677 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
678 is_return ? TPARG_FL_RETURN : 0);
679 kfree(tmp);
680 if (ret)
681 goto error;
684 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
685 if (ret < 0)
686 goto error;
688 ret = register_trace_uprobe(tu);
689 if (!ret)
690 goto out;
692 error:
693 free_trace_uprobe(tu);
694 out:
695 trace_probe_log_clear();
696 return ret;
698 fail_address_parse:
699 trace_probe_log_clear();
700 path_put(&path);
701 kfree(filename);
703 return ret;
706 static int create_or_delete_trace_uprobe(int argc, char **argv)
708 int ret;
710 if (argv[0][0] == '-')
711 return dyn_event_release(argc, argv, &trace_uprobe_ops);
713 ret = trace_uprobe_create(argc, (const char **)argv);
714 return ret == -ECANCELED ? -EINVAL : ret;
717 static int trace_uprobe_release(struct dyn_event *ev)
719 struct trace_uprobe *tu = to_trace_uprobe(ev);
721 return unregister_trace_uprobe(tu);
724 /* Probes listing interfaces */
725 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
727 struct trace_uprobe *tu = to_trace_uprobe(ev);
728 char c = is_ret_probe(tu) ? 'r' : 'p';
729 int i;
731 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
732 trace_probe_name(&tu->tp), tu->filename,
733 (int)(sizeof(void *) * 2), tu->offset);
735 if (tu->ref_ctr_offset)
736 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
738 for (i = 0; i < tu->tp.nr_args; i++)
739 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
741 seq_putc(m, '\n');
742 return 0;
745 static int probes_seq_show(struct seq_file *m, void *v)
747 struct dyn_event *ev = v;
749 if (!is_trace_uprobe(ev))
750 return 0;
752 return trace_uprobe_show(m, ev);
755 static const struct seq_operations probes_seq_op = {
756 .start = dyn_event_seq_start,
757 .next = dyn_event_seq_next,
758 .stop = dyn_event_seq_stop,
759 .show = probes_seq_show
762 static int probes_open(struct inode *inode, struct file *file)
764 int ret;
766 ret = security_locked_down(LOCKDOWN_TRACEFS);
767 if (ret)
768 return ret;
770 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
771 ret = dyn_events_release_all(&trace_uprobe_ops);
772 if (ret)
773 return ret;
776 return seq_open(file, &probes_seq_op);
779 static ssize_t probes_write(struct file *file, const char __user *buffer,
780 size_t count, loff_t *ppos)
782 return trace_parse_run_command(file, buffer, count, ppos,
783 create_or_delete_trace_uprobe);
786 static const struct file_operations uprobe_events_ops = {
787 .owner = THIS_MODULE,
788 .open = probes_open,
789 .read = seq_read,
790 .llseek = seq_lseek,
791 .release = seq_release,
792 .write = probes_write,
795 /* Probes profiling interfaces */
796 static int probes_profile_seq_show(struct seq_file *m, void *v)
798 struct dyn_event *ev = v;
799 struct trace_uprobe *tu;
801 if (!is_trace_uprobe(ev))
802 return 0;
804 tu = to_trace_uprobe(ev);
805 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
806 trace_probe_name(&tu->tp), tu->nhit);
807 return 0;
810 static const struct seq_operations profile_seq_op = {
811 .start = dyn_event_seq_start,
812 .next = dyn_event_seq_next,
813 .stop = dyn_event_seq_stop,
814 .show = probes_profile_seq_show
817 static int profile_open(struct inode *inode, struct file *file)
819 int ret;
821 ret = security_locked_down(LOCKDOWN_TRACEFS);
822 if (ret)
823 return ret;
825 return seq_open(file, &profile_seq_op);
828 static const struct file_operations uprobe_profile_ops = {
829 .owner = THIS_MODULE,
830 .open = profile_open,
831 .read = seq_read,
832 .llseek = seq_lseek,
833 .release = seq_release,
836 struct uprobe_cpu_buffer {
837 struct mutex mutex;
838 void *buf;
840 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
841 static int uprobe_buffer_refcnt;
843 static int uprobe_buffer_init(void)
845 int cpu, err_cpu;
847 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
848 if (uprobe_cpu_buffer == NULL)
849 return -ENOMEM;
851 for_each_possible_cpu(cpu) {
852 struct page *p = alloc_pages_node(cpu_to_node(cpu),
853 GFP_KERNEL, 0);
854 if (p == NULL) {
855 err_cpu = cpu;
856 goto err;
858 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
859 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
862 return 0;
864 err:
865 for_each_possible_cpu(cpu) {
866 if (cpu == err_cpu)
867 break;
868 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
871 free_percpu(uprobe_cpu_buffer);
872 return -ENOMEM;
875 static int uprobe_buffer_enable(void)
877 int ret = 0;
879 BUG_ON(!mutex_is_locked(&event_mutex));
881 if (uprobe_buffer_refcnt++ == 0) {
882 ret = uprobe_buffer_init();
883 if (ret < 0)
884 uprobe_buffer_refcnt--;
887 return ret;
890 static void uprobe_buffer_disable(void)
892 int cpu;
894 BUG_ON(!mutex_is_locked(&event_mutex));
896 if (--uprobe_buffer_refcnt == 0) {
897 for_each_possible_cpu(cpu)
898 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
899 cpu)->buf);
901 free_percpu(uprobe_cpu_buffer);
902 uprobe_cpu_buffer = NULL;
906 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
908 struct uprobe_cpu_buffer *ucb;
909 int cpu;
911 cpu = raw_smp_processor_id();
912 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
915 * Use per-cpu buffers for fastest access, but we might migrate
916 * so the mutex makes sure we have sole access to it.
918 mutex_lock(&ucb->mutex);
920 return ucb;
923 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
925 mutex_unlock(&ucb->mutex);
928 static void __uprobe_trace_func(struct trace_uprobe *tu,
929 unsigned long func, struct pt_regs *regs,
930 struct uprobe_cpu_buffer *ucb, int dsize,
931 struct trace_event_file *trace_file)
933 struct uprobe_trace_entry_head *entry;
934 struct ring_buffer_event *event;
935 struct ring_buffer *buffer;
936 void *data;
937 int size, esize;
938 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
940 WARN_ON(call != trace_file->event_call);
942 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
943 return;
945 if (trace_trigger_soft_disabled(trace_file))
946 return;
948 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
949 size = esize + tu->tp.size + dsize;
950 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
951 call->event.type, size, 0, 0);
952 if (!event)
953 return;
955 entry = ring_buffer_event_data(event);
956 if (is_ret_probe(tu)) {
957 entry->vaddr[0] = func;
958 entry->vaddr[1] = instruction_pointer(regs);
959 data = DATAOF_TRACE_ENTRY(entry, true);
960 } else {
961 entry->vaddr[0] = instruction_pointer(regs);
962 data = DATAOF_TRACE_ENTRY(entry, false);
965 memcpy(data, ucb->buf, tu->tp.size + dsize);
967 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
970 /* uprobe handler */
971 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
972 struct uprobe_cpu_buffer *ucb, int dsize)
974 struct event_file_link *link;
976 if (is_ret_probe(tu))
977 return 0;
979 rcu_read_lock();
980 trace_probe_for_each_link_rcu(link, &tu->tp)
981 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
982 rcu_read_unlock();
984 return 0;
987 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
988 struct pt_regs *regs,
989 struct uprobe_cpu_buffer *ucb, int dsize)
991 struct event_file_link *link;
993 rcu_read_lock();
994 trace_probe_for_each_link_rcu(link, &tu->tp)
995 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
996 rcu_read_unlock();
999 /* Event entry printers */
1000 static enum print_line_t
1001 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1003 struct uprobe_trace_entry_head *entry;
1004 struct trace_seq *s = &iter->seq;
1005 struct trace_uprobe *tu;
1006 u8 *data;
1008 entry = (struct uprobe_trace_entry_head *)iter->ent;
1009 tu = trace_uprobe_primary_from_call(
1010 container_of(event, struct trace_event_call, event));
1011 if (unlikely(!tu))
1012 goto out;
1014 if (is_ret_probe(tu)) {
1015 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1016 trace_probe_name(&tu->tp),
1017 entry->vaddr[1], entry->vaddr[0]);
1018 data = DATAOF_TRACE_ENTRY(entry, true);
1019 } else {
1020 trace_seq_printf(s, "%s: (0x%lx)",
1021 trace_probe_name(&tu->tp),
1022 entry->vaddr[0]);
1023 data = DATAOF_TRACE_ENTRY(entry, false);
1026 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1027 goto out;
1029 trace_seq_putc(s, '\n');
1031 out:
1032 return trace_handle_return(s);
1035 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1036 enum uprobe_filter_ctx ctx,
1037 struct mm_struct *mm);
1039 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1041 int ret;
1043 tu->consumer.filter = filter;
1044 tu->inode = d_real_inode(tu->path.dentry);
1046 if (tu->ref_ctr_offset)
1047 ret = uprobe_register_refctr(tu->inode, tu->offset,
1048 tu->ref_ctr_offset, &tu->consumer);
1049 else
1050 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1052 if (ret)
1053 tu->inode = NULL;
1055 return ret;
1058 static void __probe_event_disable(struct trace_probe *tp)
1060 struct trace_probe *pos;
1061 struct trace_uprobe *tu;
1063 tu = container_of(tp, struct trace_uprobe, tp);
1064 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1066 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1067 tu = container_of(pos, struct trace_uprobe, tp);
1068 if (!tu->inode)
1069 continue;
1071 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1072 tu->inode = NULL;
1076 static int probe_event_enable(struct trace_event_call *call,
1077 struct trace_event_file *file, filter_func_t filter)
1079 struct trace_probe *pos, *tp;
1080 struct trace_uprobe *tu;
1081 bool enabled;
1082 int ret;
1084 tp = trace_probe_primary_from_call(call);
1085 if (WARN_ON_ONCE(!tp))
1086 return -ENODEV;
1087 enabled = trace_probe_is_enabled(tp);
1089 /* This may also change "enabled" state */
1090 if (file) {
1091 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1092 return -EINTR;
1094 ret = trace_probe_add_file(tp, file);
1095 if (ret < 0)
1096 return ret;
1097 } else {
1098 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1099 return -EINTR;
1101 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1104 tu = container_of(tp, struct trace_uprobe, tp);
1105 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1107 if (enabled)
1108 return 0;
1110 ret = uprobe_buffer_enable();
1111 if (ret)
1112 goto err_flags;
1114 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1115 tu = container_of(pos, struct trace_uprobe, tp);
1116 ret = trace_uprobe_enable(tu, filter);
1117 if (ret) {
1118 __probe_event_disable(tp);
1119 goto err_buffer;
1123 return 0;
1125 err_buffer:
1126 uprobe_buffer_disable();
1128 err_flags:
1129 if (file)
1130 trace_probe_remove_file(tp, file);
1131 else
1132 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1134 return ret;
1137 static void probe_event_disable(struct trace_event_call *call,
1138 struct trace_event_file *file)
1140 struct trace_probe *tp;
1142 tp = trace_probe_primary_from_call(call);
1143 if (WARN_ON_ONCE(!tp))
1144 return;
1146 if (!trace_probe_is_enabled(tp))
1147 return;
1149 if (file) {
1150 if (trace_probe_remove_file(tp, file) < 0)
1151 return;
1153 if (trace_probe_is_enabled(tp))
1154 return;
1155 } else
1156 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1158 __probe_event_disable(tp);
1159 uprobe_buffer_disable();
1162 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1164 int ret, size;
1165 struct uprobe_trace_entry_head field;
1166 struct trace_uprobe *tu;
1168 tu = trace_uprobe_primary_from_call(event_call);
1169 if (unlikely(!tu))
1170 return -ENODEV;
1172 if (is_ret_probe(tu)) {
1173 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1174 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1175 size = SIZEOF_TRACE_ENTRY(true);
1176 } else {
1177 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1178 size = SIZEOF_TRACE_ENTRY(false);
1181 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1184 #ifdef CONFIG_PERF_EVENTS
1185 static bool
1186 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1188 struct perf_event *event;
1190 if (filter->nr_systemwide)
1191 return true;
1193 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1194 if (event->hw.target->mm == mm)
1195 return true;
1198 return false;
1201 static inline bool
1202 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1203 struct perf_event *event)
1205 return __uprobe_perf_filter(filter, event->hw.target->mm);
1208 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1209 struct perf_event *event)
1211 bool done;
1213 write_lock(&filter->rwlock);
1214 if (event->hw.target) {
1215 list_del(&event->hw.tp_list);
1216 done = filter->nr_systemwide ||
1217 (event->hw.target->flags & PF_EXITING) ||
1218 trace_uprobe_filter_event(filter, event);
1219 } else {
1220 filter->nr_systemwide--;
1221 done = filter->nr_systemwide;
1223 write_unlock(&filter->rwlock);
1225 return done;
1228 /* This returns true if the filter always covers target mm */
1229 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1230 struct perf_event *event)
1232 bool done;
1234 write_lock(&filter->rwlock);
1235 if (event->hw.target) {
1237 * event->parent != NULL means copy_process(), we can avoid
1238 * uprobe_apply(). current->mm must be probed and we can rely
1239 * on dup_mmap() which preserves the already installed bp's.
1241 * attr.enable_on_exec means that exec/mmap will install the
1242 * breakpoints we need.
1244 done = filter->nr_systemwide ||
1245 event->parent || event->attr.enable_on_exec ||
1246 trace_uprobe_filter_event(filter, event);
1247 list_add(&event->hw.tp_list, &filter->perf_events);
1248 } else {
1249 done = filter->nr_systemwide;
1250 filter->nr_systemwide++;
1252 write_unlock(&filter->rwlock);
1254 return done;
1257 static int uprobe_perf_close(struct trace_event_call *call,
1258 struct perf_event *event)
1260 struct trace_probe *pos, *tp;
1261 struct trace_uprobe *tu;
1262 int ret = 0;
1264 tp = trace_probe_primary_from_call(call);
1265 if (WARN_ON_ONCE(!tp))
1266 return -ENODEV;
1268 tu = container_of(tp, struct trace_uprobe, tp);
1269 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1270 return 0;
1272 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1273 tu = container_of(pos, struct trace_uprobe, tp);
1274 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1275 if (ret)
1276 break;
1279 return ret;
1282 static int uprobe_perf_open(struct trace_event_call *call,
1283 struct perf_event *event)
1285 struct trace_probe *pos, *tp;
1286 struct trace_uprobe *tu;
1287 int err = 0;
1289 tp = trace_probe_primary_from_call(call);
1290 if (WARN_ON_ONCE(!tp))
1291 return -ENODEV;
1293 tu = container_of(tp, struct trace_uprobe, tp);
1294 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1295 return 0;
1297 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1298 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1299 if (err) {
1300 uprobe_perf_close(call, event);
1301 break;
1305 return err;
1308 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1309 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1311 struct trace_uprobe_filter *filter;
1312 struct trace_uprobe *tu;
1313 int ret;
1315 tu = container_of(uc, struct trace_uprobe, consumer);
1316 filter = tu->tp.event->filter;
1318 read_lock(&filter->rwlock);
1319 ret = __uprobe_perf_filter(filter, mm);
1320 read_unlock(&filter->rwlock);
1322 return ret;
1325 static void __uprobe_perf_func(struct trace_uprobe *tu,
1326 unsigned long func, struct pt_regs *regs,
1327 struct uprobe_cpu_buffer *ucb, int dsize)
1329 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1330 struct uprobe_trace_entry_head *entry;
1331 struct hlist_head *head;
1332 void *data;
1333 int size, esize;
1334 int rctx;
1336 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1337 return;
1339 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1341 size = esize + tu->tp.size + dsize;
1342 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1343 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1344 return;
1346 preempt_disable();
1347 head = this_cpu_ptr(call->perf_events);
1348 if (hlist_empty(head))
1349 goto out;
1351 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1352 if (!entry)
1353 goto out;
1355 if (is_ret_probe(tu)) {
1356 entry->vaddr[0] = func;
1357 entry->vaddr[1] = instruction_pointer(regs);
1358 data = DATAOF_TRACE_ENTRY(entry, true);
1359 } else {
1360 entry->vaddr[0] = instruction_pointer(regs);
1361 data = DATAOF_TRACE_ENTRY(entry, false);
1364 memcpy(data, ucb->buf, tu->tp.size + dsize);
1366 if (size - esize > tu->tp.size + dsize) {
1367 int len = tu->tp.size + dsize;
1369 memset(data + len, 0, size - esize - len);
1372 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1373 head, NULL);
1374 out:
1375 preempt_enable();
1378 /* uprobe profile handler */
1379 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1380 struct uprobe_cpu_buffer *ucb, int dsize)
1382 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1383 return UPROBE_HANDLER_REMOVE;
1385 if (!is_ret_probe(tu))
1386 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1387 return 0;
1390 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1391 struct pt_regs *regs,
1392 struct uprobe_cpu_buffer *ucb, int dsize)
1394 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1397 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1398 const char **filename, u64 *probe_offset,
1399 bool perf_type_tracepoint)
1401 const char *pevent = trace_event_name(event->tp_event);
1402 const char *group = event->tp_event->class->system;
1403 struct trace_uprobe *tu;
1405 if (perf_type_tracepoint)
1406 tu = find_probe_event(pevent, group);
1407 else
1408 tu = trace_uprobe_primary_from_call(event->tp_event);
1409 if (!tu)
1410 return -EINVAL;
1412 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1413 : BPF_FD_TYPE_UPROBE;
1414 *filename = tu->filename;
1415 *probe_offset = tu->offset;
1416 return 0;
1418 #endif /* CONFIG_PERF_EVENTS */
1420 static int
1421 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1422 void *data)
1424 struct trace_event_file *file = data;
1426 switch (type) {
1427 case TRACE_REG_REGISTER:
1428 return probe_event_enable(event, file, NULL);
1430 case TRACE_REG_UNREGISTER:
1431 probe_event_disable(event, file);
1432 return 0;
1434 #ifdef CONFIG_PERF_EVENTS
1435 case TRACE_REG_PERF_REGISTER:
1436 return probe_event_enable(event, NULL, uprobe_perf_filter);
1438 case TRACE_REG_PERF_UNREGISTER:
1439 probe_event_disable(event, NULL);
1440 return 0;
1442 case TRACE_REG_PERF_OPEN:
1443 return uprobe_perf_open(event, data);
1445 case TRACE_REG_PERF_CLOSE:
1446 return uprobe_perf_close(event, data);
1448 #endif
1449 default:
1450 return 0;
1452 return 0;
1455 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1457 struct trace_uprobe *tu;
1458 struct uprobe_dispatch_data udd;
1459 struct uprobe_cpu_buffer *ucb;
1460 int dsize, esize;
1461 int ret = 0;
1464 tu = container_of(con, struct trace_uprobe, consumer);
1465 tu->nhit++;
1467 udd.tu = tu;
1468 udd.bp_addr = instruction_pointer(regs);
1470 current->utask->vaddr = (unsigned long) &udd;
1472 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1473 return 0;
1475 dsize = __get_data_size(&tu->tp, regs);
1476 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1478 ucb = uprobe_buffer_get();
1479 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1481 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1482 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1484 #ifdef CONFIG_PERF_EVENTS
1485 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1486 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1487 #endif
1488 uprobe_buffer_put(ucb);
1489 return ret;
1492 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1493 unsigned long func, struct pt_regs *regs)
1495 struct trace_uprobe *tu;
1496 struct uprobe_dispatch_data udd;
1497 struct uprobe_cpu_buffer *ucb;
1498 int dsize, esize;
1500 tu = container_of(con, struct trace_uprobe, consumer);
1502 udd.tu = tu;
1503 udd.bp_addr = func;
1505 current->utask->vaddr = (unsigned long) &udd;
1507 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1508 return 0;
1510 dsize = __get_data_size(&tu->tp, regs);
1511 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1513 ucb = uprobe_buffer_get();
1514 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1516 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1517 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1519 #ifdef CONFIG_PERF_EVENTS
1520 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1521 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1522 #endif
1523 uprobe_buffer_put(ucb);
1524 return 0;
1527 static struct trace_event_functions uprobe_funcs = {
1528 .trace = print_uprobe_event
1531 static inline void init_trace_event_call(struct trace_uprobe *tu)
1533 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1535 call->event.funcs = &uprobe_funcs;
1536 call->class->define_fields = uprobe_event_define_fields;
1538 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1539 call->class->reg = trace_uprobe_register;
1542 static int register_uprobe_event(struct trace_uprobe *tu)
1544 init_trace_event_call(tu);
1546 return trace_probe_register_event_call(&tu->tp);
1549 static int unregister_uprobe_event(struct trace_uprobe *tu)
1551 return trace_probe_unregister_event_call(&tu->tp);
1554 #ifdef CONFIG_PERF_EVENTS
1555 struct trace_event_call *
1556 create_local_trace_uprobe(char *name, unsigned long offs,
1557 unsigned long ref_ctr_offset, bool is_return)
1559 struct trace_uprobe *tu;
1560 struct path path;
1561 int ret;
1563 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1564 if (ret)
1565 return ERR_PTR(ret);
1567 if (!d_is_reg(path.dentry)) {
1568 path_put(&path);
1569 return ERR_PTR(-EINVAL);
1573 * local trace_kprobes are not added to dyn_event, so they are never
1574 * searched in find_trace_kprobe(). Therefore, there is no concern of
1575 * duplicated name "DUMMY_EVENT" here.
1577 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1578 is_return);
1580 if (IS_ERR(tu)) {
1581 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1582 (int)PTR_ERR(tu));
1583 path_put(&path);
1584 return ERR_CAST(tu);
1587 tu->offset = offs;
1588 tu->path = path;
1589 tu->ref_ctr_offset = ref_ctr_offset;
1590 tu->filename = kstrdup(name, GFP_KERNEL);
1591 init_trace_event_call(tu);
1593 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1594 ret = -ENOMEM;
1595 goto error;
1598 return trace_probe_event_call(&tu->tp);
1599 error:
1600 free_trace_uprobe(tu);
1601 return ERR_PTR(ret);
1604 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1606 struct trace_uprobe *tu;
1608 tu = trace_uprobe_primary_from_call(event_call);
1610 free_trace_uprobe(tu);
1612 #endif /* CONFIG_PERF_EVENTS */
1614 /* Make a trace interface for controling probe points */
1615 static __init int init_uprobe_trace(void)
1617 struct dentry *d_tracer;
1618 int ret;
1620 ret = dyn_event_register(&trace_uprobe_ops);
1621 if (ret)
1622 return ret;
1624 d_tracer = tracing_init_dentry();
1625 if (IS_ERR(d_tracer))
1626 return 0;
1628 trace_create_file("uprobe_events", 0644, d_tracer,
1629 NULL, &uprobe_events_ops);
1630 /* Profile interface */
1631 trace_create_file("uprobe_profile", 0444, d_tracer,
1632 NULL, &uprobe_profile_ops);
1633 return 0;
1636 fs_initcall(init_uprobe_trace);