drm/modes: Fix drm_mode_vrefres() docs
[drm/drm-misc.git] / kernel / trace / trace_fprobe.c
blobc62d1629cffecd72f01b54f8586b7b02ed671a5d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Fprobe-based tracing events
4 * Copyright (C) 2022 Google LLC.
5 */
6 #define pr_fmt(fmt) "trace_fprobe: " fmt
7 #include <asm/ptrace.h>
9 #include <linux/fprobe.h>
10 #include <linux/module.h>
11 #include <linux/rculist.h>
12 #include <linux/security.h>
13 #include <linux/tracepoint.h>
14 #include <linux/uaccess.h>
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_kernel.h"
19 #include "trace_probe_tmpl.h"
21 #define FPROBE_EVENT_SYSTEM "fprobes"
22 #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
23 #define RETHOOK_MAXACTIVE_MAX 4096
24 #define TRACEPOINT_STUB ERR_PTR(-ENOENT)
26 static int trace_fprobe_create(const char *raw_command);
27 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
28 static int trace_fprobe_release(struct dyn_event *ev);
29 static bool trace_fprobe_is_busy(struct dyn_event *ev);
30 static bool trace_fprobe_match(const char *system, const char *event,
31 int argc, const char **argv, struct dyn_event *ev);
33 static struct dyn_event_operations trace_fprobe_ops = {
34 .create = trace_fprobe_create,
35 .show = trace_fprobe_show,
36 .is_busy = trace_fprobe_is_busy,
37 .free = trace_fprobe_release,
38 .match = trace_fprobe_match,
42 * Fprobe event core functions
44 struct trace_fprobe {
45 struct dyn_event devent;
46 struct fprobe fp;
47 const char *symbol;
48 struct tracepoint *tpoint;
49 struct module *mod;
50 struct trace_probe tp;
53 static bool is_trace_fprobe(struct dyn_event *ev)
55 return ev->ops == &trace_fprobe_ops;
58 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
60 return container_of(ev, struct trace_fprobe, devent);
63 /**
64 * for_each_trace_fprobe - iterate over the trace_fprobe list
65 * @pos: the struct trace_fprobe * for each entry
66 * @dpos: the struct dyn_event * to use as a loop cursor
68 #define for_each_trace_fprobe(pos, dpos) \
69 for_each_dyn_event(dpos) \
70 if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
72 static bool trace_fprobe_is_return(struct trace_fprobe *tf)
74 return tf->fp.exit_handler != NULL;
77 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
79 return tf->tpoint != NULL;
82 static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
84 return tf->symbol ? tf->symbol : "unknown";
87 static bool trace_fprobe_is_busy(struct dyn_event *ev)
89 struct trace_fprobe *tf = to_trace_fprobe(ev);
91 return trace_probe_is_enabled(&tf->tp);
94 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
95 int argc, const char **argv)
97 char buf[MAX_ARGSTR_LEN + 1];
99 if (!argc)
100 return true;
102 snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
103 if (strcmp(buf, argv[0]))
104 return false;
105 argc--; argv++;
107 return trace_probe_match_command_args(&tf->tp, argc, argv);
110 static bool trace_fprobe_match(const char *system, const char *event,
111 int argc, const char **argv, struct dyn_event *ev)
113 struct trace_fprobe *tf = to_trace_fprobe(ev);
115 if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
116 return false;
118 if (system && strcmp(trace_probe_group_name(&tf->tp), system))
119 return false;
121 return trace_fprobe_match_command_head(tf, argc, argv);
124 static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
126 return fprobe_is_registered(&tf->fp);
130 * Note that we don't verify the fetch_insn code, since it does not come
131 * from user space.
133 static int
134 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
135 void *dest, void *base)
137 struct pt_regs *regs = rec;
138 unsigned long val;
139 int ret;
141 retry:
142 /* 1st stage: get value from context */
143 switch (code->op) {
144 case FETCH_OP_STACK:
145 val = regs_get_kernel_stack_nth(regs, code->param);
146 break;
147 case FETCH_OP_STACKP:
148 val = kernel_stack_pointer(regs);
149 break;
150 case FETCH_OP_RETVAL:
151 val = regs_return_value(regs);
152 break;
153 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
154 case FETCH_OP_ARG:
155 val = regs_get_kernel_argument(regs, code->param);
156 break;
157 case FETCH_OP_EDATA:
158 val = *(unsigned long *)((unsigned long)edata + code->offset);
159 break;
160 #endif
161 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
162 code++;
163 goto retry;
164 default:
165 ret = process_common_fetch_insn(code, &val);
166 if (ret < 0)
167 return ret;
169 code++;
171 return process_fetch_insn_bottom(code, val, dest, base);
173 NOKPROBE_SYMBOL(process_fetch_insn)
175 /* function entry handler */
176 static nokprobe_inline void
177 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
178 struct pt_regs *regs,
179 struct trace_event_file *trace_file)
181 struct fentry_trace_entry_head *entry;
182 struct trace_event_call *call = trace_probe_event_call(&tf->tp);
183 struct trace_event_buffer fbuffer;
184 int dsize;
186 if (WARN_ON_ONCE(call != trace_file->event_call))
187 return;
189 if (trace_trigger_soft_disabled(trace_file))
190 return;
192 dsize = __get_data_size(&tf->tp, regs, NULL);
194 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
195 sizeof(*entry) + tf->tp.size + dsize);
196 if (!entry)
197 return;
199 fbuffer.regs = regs;
200 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
201 entry->ip = entry_ip;
202 store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
204 trace_event_buffer_commit(&fbuffer);
207 static void
208 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
209 struct pt_regs *regs)
211 struct event_file_link *link;
213 trace_probe_for_each_link_rcu(link, &tf->tp)
214 __fentry_trace_func(tf, entry_ip, regs, link->file);
216 NOKPROBE_SYMBOL(fentry_trace_func);
218 /* function exit handler */
219 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
220 unsigned long ret_ip, struct pt_regs *regs,
221 void *entry_data)
223 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
225 if (tf->tp.entry_arg)
226 store_trace_entry_data(entry_data, &tf->tp, regs);
228 return 0;
230 NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
232 static nokprobe_inline void
233 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
234 unsigned long ret_ip, struct pt_regs *regs,
235 void *entry_data, struct trace_event_file *trace_file)
237 struct fexit_trace_entry_head *entry;
238 struct trace_event_buffer fbuffer;
239 struct trace_event_call *call = trace_probe_event_call(&tf->tp);
240 int dsize;
242 if (WARN_ON_ONCE(call != trace_file->event_call))
243 return;
245 if (trace_trigger_soft_disabled(trace_file))
246 return;
248 dsize = __get_data_size(&tf->tp, regs, entry_data);
250 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
251 sizeof(*entry) + tf->tp.size + dsize);
252 if (!entry)
253 return;
255 fbuffer.regs = regs;
256 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
257 entry->func = entry_ip;
258 entry->ret_ip = ret_ip;
259 store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
261 trace_event_buffer_commit(&fbuffer);
264 static void
265 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
266 unsigned long ret_ip, struct pt_regs *regs, void *entry_data)
268 struct event_file_link *link;
270 trace_probe_for_each_link_rcu(link, &tf->tp)
271 __fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data, link->file);
273 NOKPROBE_SYMBOL(fexit_trace_func);
275 #ifdef CONFIG_PERF_EVENTS
277 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
278 struct pt_regs *regs)
280 struct trace_event_call *call = trace_probe_event_call(&tf->tp);
281 struct fentry_trace_entry_head *entry;
282 struct hlist_head *head;
283 int size, __size, dsize;
284 int rctx;
286 head = this_cpu_ptr(call->perf_events);
287 if (hlist_empty(head))
288 return 0;
290 dsize = __get_data_size(&tf->tp, regs, NULL);
291 __size = sizeof(*entry) + tf->tp.size + dsize;
292 size = ALIGN(__size + sizeof(u32), sizeof(u64));
293 size -= sizeof(u32);
295 entry = perf_trace_buf_alloc(size, NULL, &rctx);
296 if (!entry)
297 return 0;
299 entry->ip = entry_ip;
300 memset(&entry[1], 0, dsize);
301 store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
302 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
303 head, NULL);
304 return 0;
306 NOKPROBE_SYMBOL(fentry_perf_func);
308 static void
309 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
310 unsigned long ret_ip, struct pt_regs *regs,
311 void *entry_data)
313 struct trace_event_call *call = trace_probe_event_call(&tf->tp);
314 struct fexit_trace_entry_head *entry;
315 struct hlist_head *head;
316 int size, __size, dsize;
317 int rctx;
319 head = this_cpu_ptr(call->perf_events);
320 if (hlist_empty(head))
321 return;
323 dsize = __get_data_size(&tf->tp, regs, entry_data);
324 __size = sizeof(*entry) + tf->tp.size + dsize;
325 size = ALIGN(__size + sizeof(u32), sizeof(u64));
326 size -= sizeof(u32);
328 entry = perf_trace_buf_alloc(size, NULL, &rctx);
329 if (!entry)
330 return;
332 entry->func = entry_ip;
333 entry->ret_ip = ret_ip;
334 store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
335 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
336 head, NULL);
338 NOKPROBE_SYMBOL(fexit_perf_func);
339 #endif /* CONFIG_PERF_EVENTS */
341 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
342 unsigned long ret_ip, struct pt_regs *regs,
343 void *entry_data)
345 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
346 int ret = 0;
348 if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
349 fentry_trace_func(tf, entry_ip, regs);
350 #ifdef CONFIG_PERF_EVENTS
351 if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
352 ret = fentry_perf_func(tf, entry_ip, regs);
353 #endif
354 return ret;
356 NOKPROBE_SYMBOL(fentry_dispatcher);
358 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
359 unsigned long ret_ip, struct pt_regs *regs,
360 void *entry_data)
362 struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
364 if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
365 fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
366 #ifdef CONFIG_PERF_EVENTS
367 if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
368 fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
369 #endif
371 NOKPROBE_SYMBOL(fexit_dispatcher);
373 static void free_trace_fprobe(struct trace_fprobe *tf)
375 if (tf) {
376 trace_probe_cleanup(&tf->tp);
377 kfree(tf->symbol);
378 kfree(tf);
383 * Allocate new trace_probe and initialize it (including fprobe).
385 static struct trace_fprobe *alloc_trace_fprobe(const char *group,
386 const char *event,
387 const char *symbol,
388 struct tracepoint *tpoint,
389 struct module *mod,
390 int maxactive,
391 int nargs, bool is_return)
393 struct trace_fprobe *tf;
394 int ret = -ENOMEM;
396 tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
397 if (!tf)
398 return ERR_PTR(ret);
400 tf->symbol = kstrdup(symbol, GFP_KERNEL);
401 if (!tf->symbol)
402 goto error;
404 if (is_return)
405 tf->fp.exit_handler = fexit_dispatcher;
406 else
407 tf->fp.entry_handler = fentry_dispatcher;
409 tf->tpoint = tpoint;
410 tf->mod = mod;
411 tf->fp.nr_maxactive = maxactive;
413 ret = trace_probe_init(&tf->tp, event, group, false, nargs);
414 if (ret < 0)
415 goto error;
417 dyn_event_init(&tf->devent, &trace_fprobe_ops);
418 return tf;
419 error:
420 free_trace_fprobe(tf);
421 return ERR_PTR(ret);
424 static struct trace_fprobe *find_trace_fprobe(const char *event,
425 const char *group)
427 struct dyn_event *pos;
428 struct trace_fprobe *tf;
430 for_each_trace_fprobe(tf, pos)
431 if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
432 strcmp(trace_probe_group_name(&tf->tp), group) == 0)
433 return tf;
434 return NULL;
437 static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
439 if (trace_fprobe_is_registered(tf))
440 enable_fprobe(&tf->fp);
442 return 0;
445 static void __disable_trace_fprobe(struct trace_probe *tp)
447 struct trace_fprobe *tf;
449 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
450 if (!trace_fprobe_is_registered(tf))
451 continue;
452 disable_fprobe(&tf->fp);
457 * Enable trace_probe
458 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
460 static int enable_trace_fprobe(struct trace_event_call *call,
461 struct trace_event_file *file)
463 struct trace_probe *tp;
464 struct trace_fprobe *tf;
465 bool enabled;
466 int ret = 0;
468 tp = trace_probe_primary_from_call(call);
469 if (WARN_ON_ONCE(!tp))
470 return -ENODEV;
471 enabled = trace_probe_is_enabled(tp);
473 /* This also changes "enabled" state */
474 if (file) {
475 ret = trace_probe_add_file(tp, file);
476 if (ret)
477 return ret;
478 } else
479 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
481 if (!enabled) {
482 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
483 /* TODO: check the fprobe is gone */
484 __enable_trace_fprobe(tf);
488 return 0;
492 * Disable trace_probe
493 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
495 static int disable_trace_fprobe(struct trace_event_call *call,
496 struct trace_event_file *file)
498 struct trace_probe *tp;
500 tp = trace_probe_primary_from_call(call);
501 if (WARN_ON_ONCE(!tp))
502 return -ENODEV;
504 if (file) {
505 if (!trace_probe_get_file_link(tp, file))
506 return -ENOENT;
507 if (!trace_probe_has_single_file(tp))
508 goto out;
509 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
510 } else
511 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
513 if (!trace_probe_is_enabled(tp))
514 __disable_trace_fprobe(tp);
516 out:
517 if (file)
519 * Synchronization is done in below function. For perf event,
520 * file == NULL and perf_trace_event_unreg() calls
521 * tracepoint_synchronize_unregister() to ensure synchronize
522 * event. We don't need to care about it.
524 trace_probe_remove_file(tp, file);
526 return 0;
529 /* Event entry printers */
530 static enum print_line_t
531 print_fentry_event(struct trace_iterator *iter, int flags,
532 struct trace_event *event)
534 struct fentry_trace_entry_head *field;
535 struct trace_seq *s = &iter->seq;
536 struct trace_probe *tp;
538 field = (struct fentry_trace_entry_head *)iter->ent;
539 tp = trace_probe_primary_from_call(
540 container_of(event, struct trace_event_call, event));
541 if (WARN_ON_ONCE(!tp))
542 goto out;
544 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
546 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
547 goto out;
549 trace_seq_putc(s, ')');
551 if (trace_probe_print_args(s, tp->args, tp->nr_args,
552 (u8 *)&field[1], field) < 0)
553 goto out;
555 trace_seq_putc(s, '\n');
556 out:
557 return trace_handle_return(s);
560 static enum print_line_t
561 print_fexit_event(struct trace_iterator *iter, int flags,
562 struct trace_event *event)
564 struct fexit_trace_entry_head *field;
565 struct trace_seq *s = &iter->seq;
566 struct trace_probe *tp;
568 field = (struct fexit_trace_entry_head *)iter->ent;
569 tp = trace_probe_primary_from_call(
570 container_of(event, struct trace_event_call, event));
571 if (WARN_ON_ONCE(!tp))
572 goto out;
574 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
576 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
577 goto out;
579 trace_seq_puts(s, " <- ");
581 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
582 goto out;
584 trace_seq_putc(s, ')');
586 if (trace_probe_print_args(s, tp->args, tp->nr_args,
587 (u8 *)&field[1], field) < 0)
588 goto out;
590 trace_seq_putc(s, '\n');
592 out:
593 return trace_handle_return(s);
596 static int fentry_event_define_fields(struct trace_event_call *event_call)
598 int ret;
599 struct fentry_trace_entry_head field;
600 struct trace_probe *tp;
602 tp = trace_probe_primary_from_call(event_call);
603 if (WARN_ON_ONCE(!tp))
604 return -ENOENT;
606 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
608 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
611 static int fexit_event_define_fields(struct trace_event_call *event_call)
613 int ret;
614 struct fexit_trace_entry_head field;
615 struct trace_probe *tp;
617 tp = trace_probe_primary_from_call(event_call);
618 if (WARN_ON_ONCE(!tp))
619 return -ENOENT;
621 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
622 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
624 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
627 static struct trace_event_functions fentry_funcs = {
628 .trace = print_fentry_event
631 static struct trace_event_functions fexit_funcs = {
632 .trace = print_fexit_event
635 static struct trace_event_fields fentry_fields_array[] = {
636 { .type = TRACE_FUNCTION_TYPE,
637 .define_fields = fentry_event_define_fields },
641 static struct trace_event_fields fexit_fields_array[] = {
642 { .type = TRACE_FUNCTION_TYPE,
643 .define_fields = fexit_event_define_fields },
647 static int fprobe_register(struct trace_event_call *event,
648 enum trace_reg type, void *data);
650 static inline void init_trace_event_call(struct trace_fprobe *tf)
652 struct trace_event_call *call = trace_probe_event_call(&tf->tp);
654 if (trace_fprobe_is_return(tf)) {
655 call->event.funcs = &fexit_funcs;
656 call->class->fields_array = fexit_fields_array;
657 } else {
658 call->event.funcs = &fentry_funcs;
659 call->class->fields_array = fentry_fields_array;
662 call->flags = TRACE_EVENT_FL_FPROBE;
663 call->class->reg = fprobe_register;
666 static int register_fprobe_event(struct trace_fprobe *tf)
668 init_trace_event_call(tf);
670 return trace_probe_register_event_call(&tf->tp);
673 static int unregister_fprobe_event(struct trace_fprobe *tf)
675 return trace_probe_unregister_event_call(&tf->tp);
678 static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
680 struct tracepoint *tpoint = tf->tpoint;
681 unsigned long ip = (unsigned long)tpoint->probestub;
682 int ret;
685 * Here, we do 2 steps to enable fprobe on a tracepoint.
686 * At first, put __probestub_##TP function on the tracepoint
687 * and put a fprobe on the stub function.
689 ret = tracepoint_probe_register_prio_may_exist(tpoint,
690 tpoint->probestub, NULL, 0);
691 if (ret < 0)
692 return ret;
693 return register_fprobe_ips(&tf->fp, &ip, 1);
696 /* Internal register function - just handle fprobe and flags */
697 static int __register_trace_fprobe(struct trace_fprobe *tf)
699 int i, ret;
701 /* Should we need new LOCKDOWN flag for fprobe? */
702 ret = security_locked_down(LOCKDOWN_KPROBES);
703 if (ret)
704 return ret;
706 if (trace_fprobe_is_registered(tf))
707 return -EINVAL;
709 for (i = 0; i < tf->tp.nr_args; i++) {
710 ret = traceprobe_update_arg(&tf->tp.args[i]);
711 if (ret)
712 return ret;
715 /* Set/clear disabled flag according to tp->flag */
716 if (trace_probe_is_enabled(&tf->tp))
717 tf->fp.flags &= ~FPROBE_FL_DISABLED;
718 else
719 tf->fp.flags |= FPROBE_FL_DISABLED;
721 if (trace_fprobe_is_tracepoint(tf)) {
723 /* This tracepoint is not loaded yet */
724 if (tf->tpoint == TRACEPOINT_STUB)
725 return 0;
727 return __regsiter_tracepoint_fprobe(tf);
730 /* TODO: handle filter, nofilter or symbol list */
731 return register_fprobe(&tf->fp, tf->symbol, NULL);
734 /* Internal unregister function - just handle fprobe and flags */
735 static void __unregister_trace_fprobe(struct trace_fprobe *tf)
737 if (trace_fprobe_is_registered(tf)) {
738 unregister_fprobe(&tf->fp);
739 memset(&tf->fp, 0, sizeof(tf->fp));
740 if (trace_fprobe_is_tracepoint(tf)) {
741 tracepoint_probe_unregister(tf->tpoint,
742 tf->tpoint->probestub, NULL);
743 tf->tpoint = NULL;
744 tf->mod = NULL;
749 /* TODO: make this trace_*probe common function */
750 /* Unregister a trace_probe and probe_event */
751 static int unregister_trace_fprobe(struct trace_fprobe *tf)
753 /* If other probes are on the event, just unregister fprobe */
754 if (trace_probe_has_sibling(&tf->tp))
755 goto unreg;
757 /* Enabled event can not be unregistered */
758 if (trace_probe_is_enabled(&tf->tp))
759 return -EBUSY;
761 /* If there's a reference to the dynamic event */
762 if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
763 return -EBUSY;
765 /* Will fail if probe is being used by ftrace or perf */
766 if (unregister_fprobe_event(tf))
767 return -EBUSY;
769 unreg:
770 __unregister_trace_fprobe(tf);
771 dyn_event_remove(&tf->devent);
772 trace_probe_unlink(&tf->tp);
774 return 0;
777 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
778 struct trace_fprobe *comp)
780 struct trace_probe_event *tpe = orig->tp.event;
781 int i;
783 list_for_each_entry(orig, &tpe->probes, tp.list) {
784 if (strcmp(trace_fprobe_symbol(orig),
785 trace_fprobe_symbol(comp)))
786 continue;
789 * trace_probe_compare_arg_type() ensured that nr_args and
790 * each argument name and type are same. Let's compare comm.
792 for (i = 0; i < orig->tp.nr_args; i++) {
793 if (strcmp(orig->tp.args[i].comm,
794 comp->tp.args[i].comm))
795 break;
798 if (i == orig->tp.nr_args)
799 return true;
802 return false;
805 static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
807 int ret;
809 if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
810 trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
811 trace_probe_log_set_index(0);
812 trace_probe_log_err(0, DIFF_PROBE_TYPE);
813 return -EEXIST;
815 ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
816 if (ret) {
817 /* Note that argument starts index = 2 */
818 trace_probe_log_set_index(ret + 1);
819 trace_probe_log_err(0, DIFF_ARG_TYPE);
820 return -EEXIST;
822 if (trace_fprobe_has_same_fprobe(to, tf)) {
823 trace_probe_log_set_index(0);
824 trace_probe_log_err(0, SAME_PROBE);
825 return -EEXIST;
828 /* Append to existing event */
829 ret = trace_probe_append(&tf->tp, &to->tp);
830 if (ret)
831 return ret;
833 ret = __register_trace_fprobe(tf);
834 if (ret)
835 trace_probe_unlink(&tf->tp);
836 else
837 dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
839 return ret;
842 /* Register a trace_probe and probe_event */
843 static int register_trace_fprobe(struct trace_fprobe *tf)
845 struct trace_fprobe *old_tf;
846 int ret;
848 mutex_lock(&event_mutex);
850 old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
851 trace_probe_group_name(&tf->tp));
852 if (old_tf) {
853 ret = append_trace_fprobe(tf, old_tf);
854 goto end;
857 /* Register new event */
858 ret = register_fprobe_event(tf);
859 if (ret) {
860 if (ret == -EEXIST) {
861 trace_probe_log_set_index(0);
862 trace_probe_log_err(0, EVENT_EXIST);
863 } else
864 pr_warn("Failed to register probe event(%d)\n", ret);
865 goto end;
868 /* Register fprobe */
869 ret = __register_trace_fprobe(tf);
870 if (ret < 0)
871 unregister_fprobe_event(tf);
872 else
873 dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
875 end:
876 mutex_unlock(&event_mutex);
877 return ret;
880 struct __find_tracepoint_cb_data {
881 const char *tp_name;
882 struct tracepoint *tpoint;
883 struct module *mod;
886 static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
888 struct __find_tracepoint_cb_data *data = priv;
890 if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
891 data->tpoint = tp;
892 if (!data->mod) {
893 data->mod = mod;
894 if (!try_module_get(data->mod)) {
895 data->tpoint = NULL;
896 data->mod = NULL;
902 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
904 struct __find_tracepoint_cb_data *data = priv;
906 if (!data->tpoint && !strcmp(data->tp_name, tp->name))
907 data->tpoint = tp;
911 * Find a tracepoint from kernel and module. If the tracepoint is in a module,
912 * this increments the module refcount to prevent unloading until the
913 * trace_fprobe is registered to the list. After registering the trace_fprobe
914 * on the trace_fprobe list, the module refcount is decremented because
915 * tracepoint_probe_module_cb will handle it.
917 static struct tracepoint *find_tracepoint(const char *tp_name,
918 struct module **tp_mod)
920 struct __find_tracepoint_cb_data data = {
921 .tp_name = tp_name,
922 .mod = NULL,
925 for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
927 if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
928 for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
929 *tp_mod = data.mod;
932 return data.tpoint;
935 #ifdef CONFIG_MODULES
936 static void reenable_trace_fprobe(struct trace_fprobe *tf)
938 struct trace_probe *tp = &tf->tp;
940 list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
941 __enable_trace_fprobe(tf);
945 static struct tracepoint *find_tracepoint_in_module(struct module *mod,
946 const char *tp_name)
948 struct __find_tracepoint_cb_data data = {
949 .tp_name = tp_name,
950 .mod = mod,
953 for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
954 return data.tpoint;
957 static int __tracepoint_probe_module_cb(struct notifier_block *self,
958 unsigned long val, void *data)
960 struct tp_module *tp_mod = data;
961 struct tracepoint *tpoint;
962 struct trace_fprobe *tf;
963 struct dyn_event *pos;
965 if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
966 return NOTIFY_DONE;
968 mutex_lock(&event_mutex);
969 for_each_trace_fprobe(tf, pos) {
970 if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
971 tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
972 if (tpoint) {
973 tf->tpoint = tpoint;
974 tf->mod = tp_mod->mod;
975 if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
976 trace_probe_is_enabled(&tf->tp))
977 reenable_trace_fprobe(tf);
979 } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
980 tracepoint_probe_unregister(tf->tpoint,
981 tf->tpoint->probestub, NULL);
982 tf->tpoint = NULL;
983 tf->mod = NULL;
986 mutex_unlock(&event_mutex);
988 return NOTIFY_DONE;
991 static struct notifier_block tracepoint_module_nb = {
992 .notifier_call = __tracepoint_probe_module_cb,
994 #endif /* CONFIG_MODULES */
996 static int parse_symbol_and_return(int argc, const char *argv[],
997 char **symbol, bool *is_return,
998 bool is_tracepoint)
1000 char *tmp = strchr(argv[1], '%');
1001 int i;
1003 if (tmp) {
1004 int len = tmp - argv[1];
1006 if (!is_tracepoint && !strcmp(tmp, "%return")) {
1007 *is_return = true;
1008 } else {
1009 trace_probe_log_err(len, BAD_ADDR_SUFFIX);
1010 return -EINVAL;
1012 *symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
1013 } else
1014 *symbol = kstrdup(argv[1], GFP_KERNEL);
1015 if (!*symbol)
1016 return -ENOMEM;
1018 if (*is_return)
1019 return 0;
1021 /* If there is $retval, this should be a return fprobe. */
1022 for (i = 2; i < argc; i++) {
1023 tmp = strstr(argv[i], "$retval");
1024 if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
1025 if (is_tracepoint) {
1026 trace_probe_log_set_index(i);
1027 trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
1028 return -EINVAL;
1030 *is_return = true;
1031 break;
1034 return 0;
1037 static int __trace_fprobe_create(int argc, const char *argv[])
1040 * Argument syntax:
1041 * - Add fentry probe:
1042 * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
1043 * - Add fexit probe:
1044 * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
1045 * - Add tracepoint probe:
1046 * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
1048 * Fetch args:
1049 * $retval : fetch return value
1050 * $stack : fetch stack address
1051 * $stackN : fetch Nth entry of stack (N:0-)
1052 * $argN : fetch Nth argument (N:1-)
1053 * $comm : fetch current task comm
1054 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
1055 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
1056 * Dereferencing memory fetch:
1057 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
1058 * Alias name of args:
1059 * NAME=FETCHARG : set NAME as alias of FETCHARG.
1060 * Type of args:
1061 * FETCHARG:TYPE : use TYPE instead of unsigned long.
1063 struct trace_fprobe *tf = NULL;
1064 int i, len, new_argc = 0, ret = 0;
1065 bool is_return = false;
1066 char *symbol = NULL;
1067 const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
1068 const char **new_argv = NULL;
1069 int maxactive = 0;
1070 char buf[MAX_EVENT_NAME_LEN];
1071 char gbuf[MAX_EVENT_NAME_LEN];
1072 char sbuf[KSYM_NAME_LEN];
1073 char abuf[MAX_BTF_ARGS_LEN];
1074 char *dbuf = NULL;
1075 bool is_tracepoint = false;
1076 struct module *tp_mod = NULL;
1077 struct tracepoint *tpoint = NULL;
1078 struct traceprobe_parse_context ctx = {
1079 .flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
1082 if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
1083 return -ECANCELED;
1085 if (argv[0][0] == 't') {
1086 is_tracepoint = true;
1087 group = TRACEPOINT_EVENT_SYSTEM;
1090 trace_probe_log_init("trace_fprobe", argc, argv);
1092 event = strchr(&argv[0][1], ':');
1093 if (event)
1094 event++;
1096 if (isdigit(argv[0][1])) {
1097 if (event)
1098 len = event - &argv[0][1] - 1;
1099 else
1100 len = strlen(&argv[0][1]);
1101 if (len > MAX_EVENT_NAME_LEN - 1) {
1102 trace_probe_log_err(1, BAD_MAXACT);
1103 goto parse_error;
1105 memcpy(buf, &argv[0][1], len);
1106 buf[len] = '\0';
1107 ret = kstrtouint(buf, 0, &maxactive);
1108 if (ret || !maxactive) {
1109 trace_probe_log_err(1, BAD_MAXACT);
1110 goto parse_error;
1112 /* fprobe rethook instances are iterated over via a list. The
1113 * maximum should stay reasonable.
1115 if (maxactive > RETHOOK_MAXACTIVE_MAX) {
1116 trace_probe_log_err(1, MAXACT_TOO_BIG);
1117 goto parse_error;
1121 trace_probe_log_set_index(1);
1123 /* a symbol(or tracepoint) must be specified */
1124 ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
1125 if (ret < 0)
1126 goto parse_error;
1128 if (!is_return && maxactive) {
1129 trace_probe_log_set_index(0);
1130 trace_probe_log_err(1, BAD_MAXACT_TYPE);
1131 goto parse_error;
1134 trace_probe_log_set_index(0);
1135 if (event) {
1136 ret = traceprobe_parse_event_name(&event, &group, gbuf,
1137 event - argv[0]);
1138 if (ret)
1139 goto parse_error;
1142 if (!event) {
1143 /* Make a new event name */
1144 if (is_tracepoint)
1145 snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
1146 isdigit(*symbol) ? "_" : "", symbol);
1147 else
1148 snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
1149 is_return ? "exit" : "entry");
1150 sanitize_event_name(buf);
1151 event = buf;
1154 if (is_return)
1155 ctx.flags |= TPARG_FL_RETURN;
1156 else
1157 ctx.flags |= TPARG_FL_FENTRY;
1159 if (is_tracepoint) {
1160 ctx.flags |= TPARG_FL_TPOINT;
1161 tpoint = find_tracepoint(symbol, &tp_mod);
1162 if (tpoint) {
1163 ctx.funcname = kallsyms_lookup(
1164 (unsigned long)tpoint->probestub,
1165 NULL, NULL, NULL, sbuf);
1166 } else if (IS_ENABLED(CONFIG_MODULES)) {
1167 /* This *may* be loaded afterwards */
1168 tpoint = TRACEPOINT_STUB;
1169 ctx.funcname = symbol;
1170 } else {
1171 trace_probe_log_set_index(1);
1172 trace_probe_log_err(0, NO_TRACEPOINT);
1173 goto parse_error;
1175 } else
1176 ctx.funcname = symbol;
1178 argc -= 2; argv += 2;
1179 new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1180 abuf, MAX_BTF_ARGS_LEN, &ctx);
1181 if (IS_ERR(new_argv)) {
1182 ret = PTR_ERR(new_argv);
1183 new_argv = NULL;
1184 goto out;
1186 if (new_argv) {
1187 argc = new_argc;
1188 argv = new_argv;
1190 if (argc > MAX_TRACE_ARGS) {
1191 ret = -E2BIG;
1192 goto out;
1195 ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1196 if (ret)
1197 goto out;
1199 /* setup a probe */
1200 tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
1201 maxactive, argc, is_return);
1202 if (IS_ERR(tf)) {
1203 ret = PTR_ERR(tf);
1204 /* This must return -ENOMEM, else there is a bug */
1205 WARN_ON_ONCE(ret != -ENOMEM);
1206 goto out; /* We know tf is not allocated */
1209 /* parse arguments */
1210 for (i = 0; i < argc; i++) {
1211 trace_probe_log_set_index(i + 2);
1212 ctx.offset = 0;
1213 ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
1214 if (ret)
1215 goto error; /* This can be -ENOMEM */
1218 if (is_return && tf->tp.entry_arg) {
1219 tf->fp.entry_handler = trace_fprobe_entry_handler;
1220 tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
1223 ret = traceprobe_set_print_fmt(&tf->tp,
1224 is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
1225 if (ret < 0)
1226 goto error;
1228 ret = register_trace_fprobe(tf);
1229 if (ret) {
1230 trace_probe_log_set_index(1);
1231 if (ret == -EILSEQ)
1232 trace_probe_log_err(0, BAD_INSN_BNDRY);
1233 else if (ret == -ENOENT)
1234 trace_probe_log_err(0, BAD_PROBE_ADDR);
1235 else if (ret != -ENOMEM && ret != -EEXIST)
1236 trace_probe_log_err(0, FAIL_REG_PROBE);
1237 goto error;
1240 out:
1241 if (tp_mod)
1242 module_put(tp_mod);
1243 traceprobe_finish_parse(&ctx);
1244 trace_probe_log_clear();
1245 kfree(new_argv);
1246 kfree(symbol);
1247 kfree(dbuf);
1248 return ret;
1250 parse_error:
1251 ret = -EINVAL;
1252 error:
1253 free_trace_fprobe(tf);
1254 goto out;
1257 static int trace_fprobe_create(const char *raw_command)
1259 return trace_probe_create(raw_command, __trace_fprobe_create);
1262 static int trace_fprobe_release(struct dyn_event *ev)
1264 struct trace_fprobe *tf = to_trace_fprobe(ev);
1265 int ret = unregister_trace_fprobe(tf);
1267 if (!ret)
1268 free_trace_fprobe(tf);
1269 return ret;
1272 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
1274 struct trace_fprobe *tf = to_trace_fprobe(ev);
1275 int i;
1277 if (trace_fprobe_is_tracepoint(tf))
1278 seq_putc(m, 't');
1279 else
1280 seq_putc(m, 'f');
1281 if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
1282 seq_printf(m, "%d", tf->fp.nr_maxactive);
1283 seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
1284 trace_probe_name(&tf->tp));
1286 seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
1287 trace_fprobe_is_return(tf) ? "%return" : "");
1289 for (i = 0; i < tf->tp.nr_args; i++)
1290 seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
1291 seq_putc(m, '\n');
1293 return 0;
1297 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1299 static int fprobe_register(struct trace_event_call *event,
1300 enum trace_reg type, void *data)
1302 struct trace_event_file *file = data;
1304 switch (type) {
1305 case TRACE_REG_REGISTER:
1306 return enable_trace_fprobe(event, file);
1307 case TRACE_REG_UNREGISTER:
1308 return disable_trace_fprobe(event, file);
1310 #ifdef CONFIG_PERF_EVENTS
1311 case TRACE_REG_PERF_REGISTER:
1312 return enable_trace_fprobe(event, NULL);
1313 case TRACE_REG_PERF_UNREGISTER:
1314 return disable_trace_fprobe(event, NULL);
1315 case TRACE_REG_PERF_OPEN:
1316 case TRACE_REG_PERF_CLOSE:
1317 case TRACE_REG_PERF_ADD:
1318 case TRACE_REG_PERF_DEL:
1319 return 0;
1320 #endif
1322 return 0;
1326 * Register dynevent at core_initcall. This allows kernel to setup fprobe
1327 * events in postcore_initcall without tracefs.
1329 static __init int init_fprobe_trace_early(void)
1331 int ret;
1333 ret = dyn_event_register(&trace_fprobe_ops);
1334 if (ret)
1335 return ret;
1337 #ifdef CONFIG_MODULES
1338 ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
1339 if (ret)
1340 return ret;
1341 #endif
1343 return 0;
1345 core_initcall(init_fprobe_trace_early);