1 // SPDX-License-Identifier: GPL-2.0
3 * Kprobes-based tracing events
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
15 #include "trace_kprobe_selftest.h"
16 #include "trace_probe.h"
18 #define KPROBE_EVENT_SYSTEM "kprobes"
19 #define KRETPROBE_MAXACTIVE_MAX 4096
22 * Kprobe event core functions
25 struct list_head list
;
26 struct kretprobe rp
; /* Use rp.kp for kprobe use */
27 unsigned long __percpu
*nhit
;
28 const char *symbol
; /* symbol name */
29 struct trace_probe tp
;
32 #define SIZEOF_TRACE_KPROBE(n) \
33 (offsetof(struct trace_kprobe, tp.args) + \
34 (sizeof(struct probe_arg) * (n)))
36 static nokprobe_inline
bool trace_kprobe_is_return(struct trace_kprobe
*tk
)
38 return tk
->rp
.handler
!= NULL
;
41 static nokprobe_inline
const char *trace_kprobe_symbol(struct trace_kprobe
*tk
)
43 return tk
->symbol
? tk
->symbol
: "unknown";
46 static nokprobe_inline
unsigned long trace_kprobe_offset(struct trace_kprobe
*tk
)
48 return tk
->rp
.kp
.offset
;
51 static nokprobe_inline
bool trace_kprobe_has_gone(struct trace_kprobe
*tk
)
53 return !!(kprobe_gone(&tk
->rp
.kp
));
56 static nokprobe_inline
bool trace_kprobe_within_module(struct trace_kprobe
*tk
,
59 int len
= strlen(mod
->name
);
60 const char *name
= trace_kprobe_symbol(tk
);
61 return strncmp(mod
->name
, name
, len
) == 0 && name
[len
] == ':';
64 static nokprobe_inline
bool trace_kprobe_is_on_module(struct trace_kprobe
*tk
)
66 return !!strchr(trace_kprobe_symbol(tk
), ':');
69 static nokprobe_inline
unsigned long trace_kprobe_nhit(struct trace_kprobe
*tk
)
71 unsigned long nhit
= 0;
74 for_each_possible_cpu(cpu
)
75 nhit
+= *per_cpu_ptr(tk
->nhit
, cpu
);
80 /* Return 0 if it fails to find the symbol address */
81 static nokprobe_inline
82 unsigned long trace_kprobe_address(struct trace_kprobe
*tk
)
87 addr
= (unsigned long)
88 kallsyms_lookup_name(trace_kprobe_symbol(tk
));
90 addr
+= tk
->rp
.kp
.offset
;
92 addr
= (unsigned long)tk
->rp
.kp
.addr
;
97 bool trace_kprobe_on_func_entry(struct trace_event_call
*call
)
99 struct trace_kprobe
*tk
= (struct trace_kprobe
*)call
->data
;
101 return kprobe_on_func_entry(tk
->rp
.kp
.addr
,
102 tk
->rp
.kp
.addr
? NULL
: tk
->rp
.kp
.symbol_name
,
103 tk
->rp
.kp
.addr
? 0 : tk
->rp
.kp
.offset
);
106 bool trace_kprobe_error_injectable(struct trace_event_call
*call
)
108 struct trace_kprobe
*tk
= (struct trace_kprobe
*)call
->data
;
110 return within_error_injection_list(trace_kprobe_address(tk
));
113 static int register_kprobe_event(struct trace_kprobe
*tk
);
114 static int unregister_kprobe_event(struct trace_kprobe
*tk
);
116 static DEFINE_MUTEX(probe_lock
);
117 static LIST_HEAD(probe_list
);
119 static int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
);
120 static int kretprobe_dispatcher(struct kretprobe_instance
*ri
,
121 struct pt_regs
*regs
);
123 /* Memory fetching by symbol */
124 struct symbol_cache
{
130 unsigned long update_symbol_cache(struct symbol_cache
*sc
)
132 sc
->addr
= (unsigned long)kallsyms_lookup_name(sc
->symbol
);
135 sc
->addr
+= sc
->offset
;
140 void free_symbol_cache(struct symbol_cache
*sc
)
146 struct symbol_cache
*alloc_symbol_cache(const char *sym
, long offset
)
148 struct symbol_cache
*sc
;
150 if (!sym
|| strlen(sym
) == 0)
153 sc
= kzalloc(sizeof(struct symbol_cache
), GFP_KERNEL
);
157 sc
->symbol
= kstrdup(sym
, GFP_KERNEL
);
163 update_symbol_cache(sc
);
169 * Kprobes-specific fetch functions
171 #define DEFINE_FETCH_stack(type) \
172 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
173 void *offset, void *dest) \
175 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
176 (unsigned int)((unsigned long)offset)); \
178 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
180 DEFINE_BASIC_FETCH_FUNCS(stack
)
181 /* No string on the stack entry */
182 #define fetch_stack_string NULL
183 #define fetch_stack_string_size NULL
185 #define DEFINE_FETCH_memory(type) \
186 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
187 void *addr, void *dest) \
190 if (probe_kernel_address(addr, retval)) \
193 *(type *)dest = retval; \
195 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
197 DEFINE_BASIC_FETCH_FUNCS(memory
)
199 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
200 * length and relative data location.
202 static void FETCH_FUNC_NAME(memory
, string
)(struct pt_regs
*regs
,
203 void *addr
, void *dest
)
205 int maxlen
= get_rloc_len(*(u32
*)dest
);
206 u8
*dst
= get_rloc_data(dest
);
213 * Try to get string again, since the string can be changed while
216 ret
= strncpy_from_unsafe(dst
, addr
, maxlen
);
218 if (ret
< 0) { /* Failed to fetch string */
220 *(u32
*)dest
= make_data_rloc(0, get_rloc_offs(*(u32
*)dest
));
222 *(u32
*)dest
= make_data_rloc(ret
, get_rloc_offs(*(u32
*)dest
));
225 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory
, string
));
227 /* Return the length of string -- including null terminal byte */
228 static void FETCH_FUNC_NAME(memory
, string_size
)(struct pt_regs
*regs
,
229 void *addr
, void *dest
)
240 ret
= __copy_from_user_inatomic(&c
, (u8
*)addr
+ len
, 1);
242 } while (c
&& ret
== 0 && len
< MAX_STRING_SIZE
);
247 if (ret
< 0) /* Failed to check the length */
252 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory
, string_size
));
254 #define DEFINE_FETCH_symbol(type) \
255 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
257 struct symbol_cache *sc = data; \
259 fetch_memory_##type(regs, (void *)sc->addr, dest); \
263 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
265 DEFINE_BASIC_FETCH_FUNCS(symbol
)
266 DEFINE_FETCH_symbol(string
)
267 DEFINE_FETCH_symbol(string_size
)
269 /* kprobes don't support file_offset fetch methods */
270 #define fetch_file_offset_u8 NULL
271 #define fetch_file_offset_u16 NULL
272 #define fetch_file_offset_u32 NULL
273 #define fetch_file_offset_u64 NULL
274 #define fetch_file_offset_string NULL
275 #define fetch_file_offset_string_size NULL
277 /* Fetch type information table */
278 static const struct fetch_type kprobes_fetch_type_table
[] = {
280 [FETCH_TYPE_STRING
] = __ASSIGN_FETCH_TYPE("string", string
, string
,
281 sizeof(u32
), 1, "__data_loc char[]"),
282 [FETCH_TYPE_STRSIZE
] = __ASSIGN_FETCH_TYPE("string_size", u32
,
283 string_size
, sizeof(u32
), 0, "u32"),
285 ASSIGN_FETCH_TYPE(u8
, u8
, 0),
286 ASSIGN_FETCH_TYPE(u16
, u16
, 0),
287 ASSIGN_FETCH_TYPE(u32
, u32
, 0),
288 ASSIGN_FETCH_TYPE(u64
, u64
, 0),
289 ASSIGN_FETCH_TYPE(s8
, u8
, 1),
290 ASSIGN_FETCH_TYPE(s16
, u16
, 1),
291 ASSIGN_FETCH_TYPE(s32
, u32
, 1),
292 ASSIGN_FETCH_TYPE(s64
, u64
, 1),
293 ASSIGN_FETCH_TYPE_ALIAS(x8
, u8
, u8
, 0),
294 ASSIGN_FETCH_TYPE_ALIAS(x16
, u16
, u16
, 0),
295 ASSIGN_FETCH_TYPE_ALIAS(x32
, u32
, u32
, 0),
296 ASSIGN_FETCH_TYPE_ALIAS(x64
, u64
, u64
, 0),
298 ASSIGN_FETCH_TYPE_END
302 * Allocate new trace_probe and initialize it (including kprobes).
304 static struct trace_kprobe
*alloc_trace_kprobe(const char *group
,
310 int nargs
, bool is_return
)
312 struct trace_kprobe
*tk
;
315 tk
= kzalloc(SIZEOF_TRACE_KPROBE(nargs
), GFP_KERNEL
);
319 tk
->nhit
= alloc_percpu(unsigned long);
324 tk
->symbol
= kstrdup(symbol
, GFP_KERNEL
);
327 tk
->rp
.kp
.symbol_name
= tk
->symbol
;
328 tk
->rp
.kp
.offset
= offs
;
330 tk
->rp
.kp
.addr
= addr
;
333 tk
->rp
.handler
= kretprobe_dispatcher
;
335 tk
->rp
.kp
.pre_handler
= kprobe_dispatcher
;
337 tk
->rp
.maxactive
= maxactive
;
339 if (!event
|| !is_good_name(event
)) {
344 tk
->tp
.call
.class = &tk
->tp
.class;
345 tk
->tp
.call
.name
= kstrdup(event
, GFP_KERNEL
);
346 if (!tk
->tp
.call
.name
)
349 if (!group
|| !is_good_name(group
)) {
354 tk
->tp
.class.system
= kstrdup(group
, GFP_KERNEL
);
355 if (!tk
->tp
.class.system
)
358 INIT_LIST_HEAD(&tk
->list
);
359 INIT_LIST_HEAD(&tk
->tp
.files
);
362 kfree(tk
->tp
.call
.name
);
364 free_percpu(tk
->nhit
);
369 static void free_trace_kprobe(struct trace_kprobe
*tk
)
373 for (i
= 0; i
< tk
->tp
.nr_args
; i
++)
374 traceprobe_free_probe_arg(&tk
->tp
.args
[i
]);
376 kfree(tk
->tp
.call
.class->system
);
377 kfree(tk
->tp
.call
.name
);
379 free_percpu(tk
->nhit
);
383 static struct trace_kprobe
*find_trace_kprobe(const char *event
,
386 struct trace_kprobe
*tk
;
388 list_for_each_entry(tk
, &probe_list
, list
)
389 if (strcmp(trace_event_name(&tk
->tp
.call
), event
) == 0 &&
390 strcmp(tk
->tp
.call
.class->system
, group
) == 0)
395 static inline int __enable_trace_kprobe(struct trace_kprobe
*tk
)
399 if (trace_probe_is_registered(&tk
->tp
) && !trace_kprobe_has_gone(tk
)) {
400 if (trace_kprobe_is_return(tk
))
401 ret
= enable_kretprobe(&tk
->rp
);
403 ret
= enable_kprobe(&tk
->rp
.kp
);
411 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
414 enable_trace_kprobe(struct trace_kprobe
*tk
, struct trace_event_file
*file
)
416 struct event_file_link
*link
;
420 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
427 list_add_tail_rcu(&link
->list
, &tk
->tp
.files
);
429 tk
->tp
.flags
|= TP_FLAG_TRACE
;
430 ret
= __enable_trace_kprobe(tk
);
432 list_del_rcu(&link
->list
);
434 tk
->tp
.flags
&= ~TP_FLAG_TRACE
;
438 tk
->tp
.flags
|= TP_FLAG_PROFILE
;
439 ret
= __enable_trace_kprobe(tk
);
441 tk
->tp
.flags
&= ~TP_FLAG_PROFILE
;
448 * Disable trace_probe
449 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
452 disable_trace_kprobe(struct trace_kprobe
*tk
, struct trace_event_file
*file
)
454 struct event_file_link
*link
= NULL
;
459 link
= find_event_file_link(&tk
->tp
, file
);
465 list_del_rcu(&link
->list
);
467 if (!list_empty(&tk
->tp
.files
))
470 tk
->tp
.flags
&= ~TP_FLAG_TRACE
;
472 tk
->tp
.flags
&= ~TP_FLAG_PROFILE
;
474 if (!trace_probe_is_enabled(&tk
->tp
) && trace_probe_is_registered(&tk
->tp
)) {
475 if (trace_kprobe_is_return(tk
))
476 disable_kretprobe(&tk
->rp
);
478 disable_kprobe(&tk
->rp
.kp
);
483 * if tk is not added to any list, it must be a local trace_kprobe
484 * created with perf_event_open. We don't need to wait for these
487 if (list_empty(&tk
->list
))
492 * Synchronize with kprobe_trace_func/kretprobe_trace_func
493 * to ensure disabled (all running handlers are finished).
494 * This is not only for kfree(), but also the caller,
495 * trace_remove_event_call() supposes it for releasing
496 * event_call related objects, which will be accessed in
497 * the kprobe_trace_func/kretprobe_trace_func.
500 kfree(link
); /* Ignored if link == NULL */
506 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
507 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
508 static bool within_notrace_func(struct trace_kprobe
*tk
)
510 unsigned long offset
, size
, addr
;
512 addr
= trace_kprobe_address(tk
);
513 if (!addr
|| !kallsyms_lookup_size_offset(addr
, &size
, &offset
))
516 /* Get the entry address of the target function */
520 * Since ftrace_location_range() does inclusive range check, we need
521 * to subtract 1 byte from the end address.
523 return !ftrace_location_range(addr
, addr
+ size
- 1);
526 #define within_notrace_func(tk) (false)
529 /* Internal register function - just handle k*probes and flags */
530 static int __register_trace_kprobe(struct trace_kprobe
*tk
)
534 if (trace_probe_is_registered(&tk
->tp
))
537 if (within_notrace_func(tk
)) {
538 pr_warn("Could not probe notrace function %s\n",
539 trace_kprobe_symbol(tk
));
543 for (i
= 0; i
< tk
->tp
.nr_args
; i
++)
544 traceprobe_update_arg(&tk
->tp
.args
[i
]);
546 /* Set/clear disabled flag according to tp->flag */
547 if (trace_probe_is_enabled(&tk
->tp
))
548 tk
->rp
.kp
.flags
&= ~KPROBE_FLAG_DISABLED
;
550 tk
->rp
.kp
.flags
|= KPROBE_FLAG_DISABLED
;
552 if (trace_kprobe_is_return(tk
))
553 ret
= register_kretprobe(&tk
->rp
);
555 ret
= register_kprobe(&tk
->rp
.kp
);
558 tk
->tp
.flags
|= TP_FLAG_REGISTERED
;
560 if (ret
== -ENOENT
&& trace_kprobe_is_on_module(tk
)) {
561 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
563 } else if (ret
== -EILSEQ
) {
564 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
573 /* Internal unregister function - just handle k*probes and flags */
574 static void __unregister_trace_kprobe(struct trace_kprobe
*tk
)
576 if (trace_probe_is_registered(&tk
->tp
)) {
577 if (trace_kprobe_is_return(tk
))
578 unregister_kretprobe(&tk
->rp
);
580 unregister_kprobe(&tk
->rp
.kp
);
581 tk
->tp
.flags
&= ~TP_FLAG_REGISTERED
;
582 /* Cleanup kprobe for reuse */
583 if (tk
->rp
.kp
.symbol_name
)
584 tk
->rp
.kp
.addr
= NULL
;
588 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
589 static int unregister_trace_kprobe(struct trace_kprobe
*tk
)
591 /* Enabled event can not be unregistered */
592 if (trace_probe_is_enabled(&tk
->tp
))
595 /* Will fail if probe is being used by ftrace or perf */
596 if (unregister_kprobe_event(tk
))
599 __unregister_trace_kprobe(tk
);
605 /* Register a trace_probe and probe_event */
606 static int register_trace_kprobe(struct trace_kprobe
*tk
)
608 struct trace_kprobe
*old_tk
;
611 mutex_lock(&probe_lock
);
613 /* Delete old (same name) event if exist */
614 old_tk
= find_trace_kprobe(trace_event_name(&tk
->tp
.call
),
615 tk
->tp
.call
.class->system
);
617 ret
= unregister_trace_kprobe(old_tk
);
620 free_trace_kprobe(old_tk
);
623 /* Register new event */
624 ret
= register_kprobe_event(tk
);
626 pr_warn("Failed to register probe event(%d)\n", ret
);
630 /* Register k*probe */
631 ret
= __register_trace_kprobe(tk
);
633 unregister_kprobe_event(tk
);
635 list_add_tail(&tk
->list
, &probe_list
);
638 mutex_unlock(&probe_lock
);
642 /* Module notifier call back, checking event on the module */
643 static int trace_kprobe_module_callback(struct notifier_block
*nb
,
644 unsigned long val
, void *data
)
646 struct module
*mod
= data
;
647 struct trace_kprobe
*tk
;
650 if (val
!= MODULE_STATE_COMING
)
653 /* Update probes on coming module */
654 mutex_lock(&probe_lock
);
655 list_for_each_entry(tk
, &probe_list
, list
) {
656 if (trace_kprobe_within_module(tk
, mod
)) {
657 /* Don't need to check busy - this should have gone. */
658 __unregister_trace_kprobe(tk
);
659 ret
= __register_trace_kprobe(tk
);
661 pr_warn("Failed to re-register probe %s on %s: %d\n",
662 trace_event_name(&tk
->tp
.call
),
666 mutex_unlock(&probe_lock
);
671 static struct notifier_block trace_kprobe_module_nb
= {
672 .notifier_call
= trace_kprobe_module_callback
,
673 .priority
= 1 /* Invoked after kprobe module callback */
676 /* Convert certain expected symbols into '_' when generating event names */
677 static inline void sanitize_event_name(char *name
)
679 while (*name
++ != '\0')
680 if (*name
== ':' || *name
== '.')
684 static int create_trace_kprobe(int argc
, char **argv
)
689 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
691 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
693 * $retval : fetch return value
694 * $stack : fetch stack address
695 * $stackN : fetch Nth of stack (N:0-)
696 * $comm : fetch current task comm
697 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
698 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
699 * %REG : fetch register REG
700 * Dereferencing memory fetch:
701 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
702 * Alias name of args:
703 * NAME=FETCHARG : set NAME as alias of FETCHARG.
705 * FETCHARG:TYPE : use TYPE instead of unsigned long.
707 struct trace_kprobe
*tk
;
709 bool is_return
= false, is_delete
= false;
710 char *symbol
= NULL
, *event
= NULL
, *group
= NULL
;
715 char buf
[MAX_EVENT_NAME_LEN
];
717 /* argc must be >= 1 */
718 if (argv
[0][0] == 'p')
720 else if (argv
[0][0] == 'r')
722 else if (argv
[0][0] == '-')
725 pr_info("Probe definition must be started with 'p', 'r' or"
730 event
= strchr(&argv
[0][1], ':');
735 if (is_return
&& isdigit(argv
[0][1])) {
736 ret
= kstrtouint(&argv
[0][1], 0, &maxactive
);
738 pr_info("Failed to parse maxactive.\n");
741 /* kretprobes instances are iterated over via a list. The
742 * maximum should stay reasonable.
744 if (maxactive
> KRETPROBE_MAXACTIVE_MAX
) {
745 pr_info("Maxactive is too big (%d > %d).\n",
746 maxactive
, KRETPROBE_MAXACTIVE_MAX
);
752 if (strchr(event
, '/')) {
754 event
= strchr(group
, '/') + 1;
756 if (strlen(group
) == 0) {
757 pr_info("Group name is not specified\n");
761 if (strlen(event
) == 0) {
762 pr_info("Event name is not specified\n");
767 group
= KPROBE_EVENT_SYSTEM
;
771 pr_info("Delete command needs an event name.\n");
774 mutex_lock(&probe_lock
);
775 tk
= find_trace_kprobe(event
, group
);
777 mutex_unlock(&probe_lock
);
778 pr_info("Event %s/%s doesn't exist.\n", group
, event
);
781 /* delete an event */
782 ret
= unregister_trace_kprobe(tk
);
784 free_trace_kprobe(tk
);
785 mutex_unlock(&probe_lock
);
790 pr_info("Probe point is not specified.\n");
794 /* try to parse an address. if that fails, try to read the
795 * input as a symbol. */
796 if (kstrtoul(argv
[1], 0, (unsigned long *)&addr
)) {
797 /* a symbol specified */
799 /* TODO: support .init module functions */
800 ret
= traceprobe_split_symbol_offset(symbol
, &offset
);
801 if (ret
|| offset
< 0 || offset
> UINT_MAX
) {
802 pr_info("Failed to parse either an address or a symbol.\n");
805 if (offset
&& is_return
&&
806 !kprobe_on_func_entry(NULL
, symbol
, offset
)) {
807 pr_info("Given offset is not valid for return probe.\n");
811 argc
-= 2; argv
+= 2;
815 /* Make a new event name */
817 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c_%s_%ld",
818 is_return
? 'r' : 'p', symbol
, offset
);
820 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c_0x%p",
821 is_return
? 'r' : 'p', addr
);
822 sanitize_event_name(buf
);
825 tk
= alloc_trace_kprobe(group
, event
, addr
, symbol
, offset
, maxactive
,
828 pr_info("Failed to allocate trace_probe.(%d)\n",
833 /* parse arguments */
835 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
836 struct probe_arg
*parg
= &tk
->tp
.args
[i
];
838 /* Increment count for freeing args in error case */
841 /* Parse argument name */
842 arg
= strchr(argv
[i
], '=');
845 parg
->name
= kstrdup(argv
[i
], GFP_KERNEL
);
848 /* If argument name is omitted, set "argN" */
849 snprintf(buf
, MAX_EVENT_NAME_LEN
, "arg%d", i
+ 1);
850 parg
->name
= kstrdup(buf
, GFP_KERNEL
);
854 pr_info("Failed to allocate argument[%d] name.\n", i
);
859 if (!is_good_name(parg
->name
)) {
860 pr_info("Invalid argument[%d] name: %s\n",
866 if (traceprobe_conflict_field_name(parg
->name
,
868 pr_info("Argument[%d] name '%s' conflicts with "
869 "another field.\n", i
, argv
[i
]);
874 /* Parse fetch argument */
875 ret
= traceprobe_parse_probe_arg(arg
, &tk
->tp
.size
, parg
,
877 kprobes_fetch_type_table
);
879 pr_info("Parse error at argument[%d]. (%d)\n", i
, ret
);
884 ret
= register_trace_kprobe(tk
);
890 free_trace_kprobe(tk
);
894 static int release_all_trace_kprobes(void)
896 struct trace_kprobe
*tk
;
899 mutex_lock(&probe_lock
);
900 /* Ensure no probe is in use. */
901 list_for_each_entry(tk
, &probe_list
, list
)
902 if (trace_probe_is_enabled(&tk
->tp
)) {
906 /* TODO: Use batch unregistration */
907 while (!list_empty(&probe_list
)) {
908 tk
= list_entry(probe_list
.next
, struct trace_kprobe
, list
);
909 ret
= unregister_trace_kprobe(tk
);
912 free_trace_kprobe(tk
);
916 mutex_unlock(&probe_lock
);
921 /* Probes listing interfaces */
922 static void *probes_seq_start(struct seq_file
*m
, loff_t
*pos
)
924 mutex_lock(&probe_lock
);
925 return seq_list_start(&probe_list
, *pos
);
928 static void *probes_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
930 return seq_list_next(v
, &probe_list
, pos
);
933 static void probes_seq_stop(struct seq_file
*m
, void *v
)
935 mutex_unlock(&probe_lock
);
938 static int probes_seq_show(struct seq_file
*m
, void *v
)
940 struct trace_kprobe
*tk
= v
;
943 seq_putc(m
, trace_kprobe_is_return(tk
) ? 'r' : 'p');
944 seq_printf(m
, ":%s/%s", tk
->tp
.call
.class->system
,
945 trace_event_name(&tk
->tp
.call
));
948 seq_printf(m
, " 0x%p", tk
->rp
.kp
.addr
);
949 else if (tk
->rp
.kp
.offset
)
950 seq_printf(m
, " %s+%u", trace_kprobe_symbol(tk
),
953 seq_printf(m
, " %s", trace_kprobe_symbol(tk
));
955 for (i
= 0; i
< tk
->tp
.nr_args
; i
++)
956 seq_printf(m
, " %s=%s", tk
->tp
.args
[i
].name
, tk
->tp
.args
[i
].comm
);
962 static const struct seq_operations probes_seq_op
= {
963 .start
= probes_seq_start
,
964 .next
= probes_seq_next
,
965 .stop
= probes_seq_stop
,
966 .show
= probes_seq_show
969 static int probes_open(struct inode
*inode
, struct file
*file
)
973 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
974 ret
= release_all_trace_kprobes();
979 return seq_open(file
, &probes_seq_op
);
982 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
983 size_t count
, loff_t
*ppos
)
985 return trace_parse_run_command(file
, buffer
, count
, ppos
,
986 create_trace_kprobe
);
989 static const struct file_operations kprobe_events_ops
= {
990 .owner
= THIS_MODULE
,
994 .release
= seq_release
,
995 .write
= probes_write
,
998 /* Probes profiling interfaces */
999 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
1001 struct trace_kprobe
*tk
= v
;
1003 seq_printf(m
, " %-44s %15lu %15lu\n",
1004 trace_event_name(&tk
->tp
.call
),
1005 trace_kprobe_nhit(tk
),
1011 static const struct seq_operations profile_seq_op
= {
1012 .start
= probes_seq_start
,
1013 .next
= probes_seq_next
,
1014 .stop
= probes_seq_stop
,
1015 .show
= probes_profile_seq_show
1018 static int profile_open(struct inode
*inode
, struct file
*file
)
1020 return seq_open(file
, &profile_seq_op
);
1023 static const struct file_operations kprobe_profile_ops
= {
1024 .owner
= THIS_MODULE
,
1025 .open
= profile_open
,
1027 .llseek
= seq_lseek
,
1028 .release
= seq_release
,
1031 /* Kprobe handler */
1032 static nokprobe_inline
void
1033 __kprobe_trace_func(struct trace_kprobe
*tk
, struct pt_regs
*regs
,
1034 struct trace_event_file
*trace_file
)
1036 struct kprobe_trace_entry_head
*entry
;
1037 struct ring_buffer_event
*event
;
1038 struct ring_buffer
*buffer
;
1039 int size
, dsize
, pc
;
1040 unsigned long irq_flags
;
1041 struct trace_event_call
*call
= &tk
->tp
.call
;
1043 WARN_ON(call
!= trace_file
->event_call
);
1045 if (trace_trigger_soft_disabled(trace_file
))
1048 local_save_flags(irq_flags
);
1049 pc
= preempt_count();
1051 dsize
= __get_data_size(&tk
->tp
, regs
);
1052 size
= sizeof(*entry
) + tk
->tp
.size
+ dsize
;
1054 event
= trace_event_buffer_lock_reserve(&buffer
, trace_file
,
1056 size
, irq_flags
, pc
);
1060 entry
= ring_buffer_event_data(event
);
1061 entry
->ip
= (unsigned long)tk
->rp
.kp
.addr
;
1062 store_trace_args(sizeof(*entry
), &tk
->tp
, regs
, (u8
*)&entry
[1], dsize
);
1064 event_trigger_unlock_commit_regs(trace_file
, buffer
, event
,
1065 entry
, irq_flags
, pc
, regs
);
1069 kprobe_trace_func(struct trace_kprobe
*tk
, struct pt_regs
*regs
)
1071 struct event_file_link
*link
;
1073 list_for_each_entry_rcu(link
, &tk
->tp
.files
, list
)
1074 __kprobe_trace_func(tk
, regs
, link
->file
);
1076 NOKPROBE_SYMBOL(kprobe_trace_func
);
1078 /* Kretprobe handler */
1079 static nokprobe_inline
void
1080 __kretprobe_trace_func(struct trace_kprobe
*tk
, struct kretprobe_instance
*ri
,
1081 struct pt_regs
*regs
,
1082 struct trace_event_file
*trace_file
)
1084 struct kretprobe_trace_entry_head
*entry
;
1085 struct ring_buffer_event
*event
;
1086 struct ring_buffer
*buffer
;
1087 int size
, pc
, dsize
;
1088 unsigned long irq_flags
;
1089 struct trace_event_call
*call
= &tk
->tp
.call
;
1091 WARN_ON(call
!= trace_file
->event_call
);
1093 if (trace_trigger_soft_disabled(trace_file
))
1096 local_save_flags(irq_flags
);
1097 pc
= preempt_count();
1099 dsize
= __get_data_size(&tk
->tp
, regs
);
1100 size
= sizeof(*entry
) + tk
->tp
.size
+ dsize
;
1102 event
= trace_event_buffer_lock_reserve(&buffer
, trace_file
,
1104 size
, irq_flags
, pc
);
1108 entry
= ring_buffer_event_data(event
);
1109 entry
->func
= (unsigned long)tk
->rp
.kp
.addr
;
1110 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
1111 store_trace_args(sizeof(*entry
), &tk
->tp
, regs
, (u8
*)&entry
[1], dsize
);
1113 event_trigger_unlock_commit_regs(trace_file
, buffer
, event
,
1114 entry
, irq_flags
, pc
, regs
);
1118 kretprobe_trace_func(struct trace_kprobe
*tk
, struct kretprobe_instance
*ri
,
1119 struct pt_regs
*regs
)
1121 struct event_file_link
*link
;
1123 list_for_each_entry_rcu(link
, &tk
->tp
.files
, list
)
1124 __kretprobe_trace_func(tk
, ri
, regs
, link
->file
);
1126 NOKPROBE_SYMBOL(kretprobe_trace_func
);
1128 /* Event entry printers */
1129 static enum print_line_t
1130 print_kprobe_event(struct trace_iterator
*iter
, int flags
,
1131 struct trace_event
*event
)
1133 struct kprobe_trace_entry_head
*field
;
1134 struct trace_seq
*s
= &iter
->seq
;
1135 struct trace_probe
*tp
;
1139 field
= (struct kprobe_trace_entry_head
*)iter
->ent
;
1140 tp
= container_of(event
, struct trace_probe
, call
.event
);
1142 trace_seq_printf(s
, "%s: (", trace_event_name(&tp
->call
));
1144 if (!seq_print_ip_sym(s
, field
->ip
, flags
| TRACE_ITER_SYM_OFFSET
))
1147 trace_seq_putc(s
, ')');
1149 data
= (u8
*)&field
[1];
1150 for (i
= 0; i
< tp
->nr_args
; i
++)
1151 if (!tp
->args
[i
].type
->print(s
, tp
->args
[i
].name
,
1152 data
+ tp
->args
[i
].offset
, field
))
1155 trace_seq_putc(s
, '\n');
1157 return trace_handle_return(s
);
1160 static enum print_line_t
1161 print_kretprobe_event(struct trace_iterator
*iter
, int flags
,
1162 struct trace_event
*event
)
1164 struct kretprobe_trace_entry_head
*field
;
1165 struct trace_seq
*s
= &iter
->seq
;
1166 struct trace_probe
*tp
;
1170 field
= (struct kretprobe_trace_entry_head
*)iter
->ent
;
1171 tp
= container_of(event
, struct trace_probe
, call
.event
);
1173 trace_seq_printf(s
, "%s: (", trace_event_name(&tp
->call
));
1175 if (!seq_print_ip_sym(s
, field
->ret_ip
, flags
| TRACE_ITER_SYM_OFFSET
))
1178 trace_seq_puts(s
, " <- ");
1180 if (!seq_print_ip_sym(s
, field
->func
, flags
& ~TRACE_ITER_SYM_OFFSET
))
1183 trace_seq_putc(s
, ')');
1185 data
= (u8
*)&field
[1];
1186 for (i
= 0; i
< tp
->nr_args
; i
++)
1187 if (!tp
->args
[i
].type
->print(s
, tp
->args
[i
].name
,
1188 data
+ tp
->args
[i
].offset
, field
))
1191 trace_seq_putc(s
, '\n');
1194 return trace_handle_return(s
);
1198 static int kprobe_event_define_fields(struct trace_event_call
*event_call
)
1201 struct kprobe_trace_entry_head field
;
1202 struct trace_kprobe
*tk
= (struct trace_kprobe
*)event_call
->data
;
1204 DEFINE_FIELD(unsigned long, ip
, FIELD_STRING_IP
, 0);
1205 /* Set argument names as fields */
1206 for (i
= 0; i
< tk
->tp
.nr_args
; i
++) {
1207 struct probe_arg
*parg
= &tk
->tp
.args
[i
];
1209 ret
= trace_define_field(event_call
, parg
->type
->fmttype
,
1211 sizeof(field
) + parg
->offset
,
1213 parg
->type
->is_signed
,
1221 static int kretprobe_event_define_fields(struct trace_event_call
*event_call
)
1224 struct kretprobe_trace_entry_head field
;
1225 struct trace_kprobe
*tk
= (struct trace_kprobe
*)event_call
->data
;
1227 DEFINE_FIELD(unsigned long, func
, FIELD_STRING_FUNC
, 0);
1228 DEFINE_FIELD(unsigned long, ret_ip
, FIELD_STRING_RETIP
, 0);
1229 /* Set argument names as fields */
1230 for (i
= 0; i
< tk
->tp
.nr_args
; i
++) {
1231 struct probe_arg
*parg
= &tk
->tp
.args
[i
];
1233 ret
= trace_define_field(event_call
, parg
->type
->fmttype
,
1235 sizeof(field
) + parg
->offset
,
1237 parg
->type
->is_signed
,
1245 #ifdef CONFIG_PERF_EVENTS
1247 /* Kprobe profile handler */
1249 kprobe_perf_func(struct trace_kprobe
*tk
, struct pt_regs
*regs
)
1251 struct trace_event_call
*call
= &tk
->tp
.call
;
1252 struct kprobe_trace_entry_head
*entry
;
1253 struct hlist_head
*head
;
1254 int size
, __size
, dsize
;
1257 if (bpf_prog_array_valid(call
)) {
1258 unsigned long orig_ip
= instruction_pointer(regs
);
1261 ret
= trace_call_bpf(call
, regs
);
1264 * We need to check and see if we modified the pc of the
1265 * pt_regs, and if so return 1 so that we don't do the
1268 if (orig_ip
!= instruction_pointer(regs
))
1274 head
= this_cpu_ptr(call
->perf_events
);
1275 if (hlist_empty(head
))
1278 dsize
= __get_data_size(&tk
->tp
, regs
);
1279 __size
= sizeof(*entry
) + tk
->tp
.size
+ dsize
;
1280 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1281 size
-= sizeof(u32
);
1283 entry
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
1287 entry
->ip
= (unsigned long)tk
->rp
.kp
.addr
;
1288 memset(&entry
[1], 0, dsize
);
1289 store_trace_args(sizeof(*entry
), &tk
->tp
, regs
, (u8
*)&entry
[1], dsize
);
1290 perf_trace_buf_submit(entry
, size
, rctx
, call
->event
.type
, 1, regs
,
1294 NOKPROBE_SYMBOL(kprobe_perf_func
);
1296 /* Kretprobe profile handler */
1298 kretprobe_perf_func(struct trace_kprobe
*tk
, struct kretprobe_instance
*ri
,
1299 struct pt_regs
*regs
)
1301 struct trace_event_call
*call
= &tk
->tp
.call
;
1302 struct kretprobe_trace_entry_head
*entry
;
1303 struct hlist_head
*head
;
1304 int size
, __size
, dsize
;
1307 if (bpf_prog_array_valid(call
) && !trace_call_bpf(call
, regs
))
1310 head
= this_cpu_ptr(call
->perf_events
);
1311 if (hlist_empty(head
))
1314 dsize
= __get_data_size(&tk
->tp
, regs
);
1315 __size
= sizeof(*entry
) + tk
->tp
.size
+ dsize
;
1316 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1317 size
-= sizeof(u32
);
1319 entry
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
1323 entry
->func
= (unsigned long)tk
->rp
.kp
.addr
;
1324 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
1325 store_trace_args(sizeof(*entry
), &tk
->tp
, regs
, (u8
*)&entry
[1], dsize
);
1326 perf_trace_buf_submit(entry
, size
, rctx
, call
->event
.type
, 1, regs
,
1329 NOKPROBE_SYMBOL(kretprobe_perf_func
);
1331 int bpf_get_kprobe_info(const struct perf_event
*event
, u32
*fd_type
,
1332 const char **symbol
, u64
*probe_offset
,
1333 u64
*probe_addr
, bool perf_type_tracepoint
)
1335 const char *pevent
= trace_event_name(event
->tp_event
);
1336 const char *group
= event
->tp_event
->class->system
;
1337 struct trace_kprobe
*tk
;
1339 if (perf_type_tracepoint
)
1340 tk
= find_trace_kprobe(pevent
, group
);
1342 tk
= event
->tp_event
->data
;
1346 *fd_type
= trace_kprobe_is_return(tk
) ? BPF_FD_TYPE_KRETPROBE
1347 : BPF_FD_TYPE_KPROBE
;
1349 *symbol
= tk
->symbol
;
1350 *probe_offset
= tk
->rp
.kp
.offset
;
1355 *probe_addr
= (unsigned long)tk
->rp
.kp
.addr
;
1359 #endif /* CONFIG_PERF_EVENTS */
1362 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1364 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1365 * lockless, but we can't race with this __init function.
1367 static int kprobe_register(struct trace_event_call
*event
,
1368 enum trace_reg type
, void *data
)
1370 struct trace_kprobe
*tk
= (struct trace_kprobe
*)event
->data
;
1371 struct trace_event_file
*file
= data
;
1374 case TRACE_REG_REGISTER
:
1375 return enable_trace_kprobe(tk
, file
);
1376 case TRACE_REG_UNREGISTER
:
1377 return disable_trace_kprobe(tk
, file
);
1379 #ifdef CONFIG_PERF_EVENTS
1380 case TRACE_REG_PERF_REGISTER
:
1381 return enable_trace_kprobe(tk
, NULL
);
1382 case TRACE_REG_PERF_UNREGISTER
:
1383 return disable_trace_kprobe(tk
, NULL
);
1384 case TRACE_REG_PERF_OPEN
:
1385 case TRACE_REG_PERF_CLOSE
:
1386 case TRACE_REG_PERF_ADD
:
1387 case TRACE_REG_PERF_DEL
:
1394 static int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
)
1396 struct trace_kprobe
*tk
= container_of(kp
, struct trace_kprobe
, rp
.kp
);
1399 raw_cpu_inc(*tk
->nhit
);
1401 if (tk
->tp
.flags
& TP_FLAG_TRACE
)
1402 kprobe_trace_func(tk
, regs
);
1403 #ifdef CONFIG_PERF_EVENTS
1404 if (tk
->tp
.flags
& TP_FLAG_PROFILE
)
1405 ret
= kprobe_perf_func(tk
, regs
);
1409 NOKPROBE_SYMBOL(kprobe_dispatcher
);
1412 kretprobe_dispatcher(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
1414 struct trace_kprobe
*tk
= container_of(ri
->rp
, struct trace_kprobe
, rp
);
1416 raw_cpu_inc(*tk
->nhit
);
1418 if (tk
->tp
.flags
& TP_FLAG_TRACE
)
1419 kretprobe_trace_func(tk
, ri
, regs
);
1420 #ifdef CONFIG_PERF_EVENTS
1421 if (tk
->tp
.flags
& TP_FLAG_PROFILE
)
1422 kretprobe_perf_func(tk
, ri
, regs
);
1424 return 0; /* We don't tweek kernel, so just return 0 */
1426 NOKPROBE_SYMBOL(kretprobe_dispatcher
);
1428 static struct trace_event_functions kretprobe_funcs
= {
1429 .trace
= print_kretprobe_event
1432 static struct trace_event_functions kprobe_funcs
= {
1433 .trace
= print_kprobe_event
1436 static inline void init_trace_event_call(struct trace_kprobe
*tk
,
1437 struct trace_event_call
*call
)
1439 INIT_LIST_HEAD(&call
->class->fields
);
1440 if (trace_kprobe_is_return(tk
)) {
1441 call
->event
.funcs
= &kretprobe_funcs
;
1442 call
->class->define_fields
= kretprobe_event_define_fields
;
1444 call
->event
.funcs
= &kprobe_funcs
;
1445 call
->class->define_fields
= kprobe_event_define_fields
;
1448 call
->flags
= TRACE_EVENT_FL_KPROBE
;
1449 call
->class->reg
= kprobe_register
;
1453 static int register_kprobe_event(struct trace_kprobe
*tk
)
1455 struct trace_event_call
*call
= &tk
->tp
.call
;
1458 init_trace_event_call(tk
, call
);
1460 if (set_print_fmt(&tk
->tp
, trace_kprobe_is_return(tk
)) < 0)
1462 ret
= register_trace_event(&call
->event
);
1464 kfree(call
->print_fmt
);
1467 ret
= trace_add_event_call(call
);
1469 pr_info("Failed to register kprobe event: %s\n",
1470 trace_event_name(call
));
1471 kfree(call
->print_fmt
);
1472 unregister_trace_event(&call
->event
);
1477 static int unregister_kprobe_event(struct trace_kprobe
*tk
)
1481 /* tp->event is unregistered in trace_remove_event_call() */
1482 ret
= trace_remove_event_call(&tk
->tp
.call
);
1484 kfree(tk
->tp
.call
.print_fmt
);
1488 #ifdef CONFIG_PERF_EVENTS
1489 /* create a trace_kprobe, but don't add it to global lists */
1490 struct trace_event_call
*
1491 create_local_trace_kprobe(char *func
, void *addr
, unsigned long offs
,
1494 struct trace_kprobe
*tk
;
1499 * local trace_kprobes are not added to probe_list, so they are never
1500 * searched in find_trace_kprobe(). Therefore, there is no concern of
1501 * duplicated name here.
1503 event
= func
? func
: "DUMMY_EVENT";
1505 tk
= alloc_trace_kprobe(KPROBE_EVENT_SYSTEM
, event
, (void *)addr
, func
,
1506 offs
, 0 /* maxactive */, 0 /* nargs */,
1510 pr_info("Failed to allocate trace_probe.(%d)\n",
1512 return ERR_CAST(tk
);
1515 init_trace_event_call(tk
, &tk
->tp
.call
);
1517 if (set_print_fmt(&tk
->tp
, trace_kprobe_is_return(tk
)) < 0) {
1522 ret
= __register_trace_kprobe(tk
);
1524 kfree(tk
->tp
.call
.print_fmt
);
1528 return &tk
->tp
.call
;
1530 free_trace_kprobe(tk
);
1531 return ERR_PTR(ret
);
1534 void destroy_local_trace_kprobe(struct trace_event_call
*event_call
)
1536 struct trace_kprobe
*tk
;
1538 tk
= container_of(event_call
, struct trace_kprobe
, tp
.call
);
1540 if (trace_probe_is_enabled(&tk
->tp
)) {
1545 __unregister_trace_kprobe(tk
);
1547 kfree(tk
->tp
.call
.print_fmt
);
1548 free_trace_kprobe(tk
);
1550 #endif /* CONFIG_PERF_EVENTS */
1552 /* Make a tracefs interface for controlling probe points */
1553 static __init
int init_kprobe_trace(void)
1555 struct dentry
*d_tracer
;
1556 struct dentry
*entry
;
1558 if (register_module_notifier(&trace_kprobe_module_nb
))
1561 d_tracer
= tracing_init_dentry();
1562 if (IS_ERR(d_tracer
))
1565 entry
= tracefs_create_file("kprobe_events", 0644, d_tracer
,
1566 NULL
, &kprobe_events_ops
);
1568 /* Event list interface */
1570 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1572 /* Profile interface */
1573 entry
= tracefs_create_file("kprobe_profile", 0444, d_tracer
,
1574 NULL
, &kprobe_profile_ops
);
1577 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1580 fs_initcall(init_kprobe_trace
);
1583 #ifdef CONFIG_FTRACE_STARTUP_TEST
1584 static __init
struct trace_event_file
*
1585 find_trace_probe_file(struct trace_kprobe
*tk
, struct trace_array
*tr
)
1587 struct trace_event_file
*file
;
1589 list_for_each_entry(file
, &tr
->events
, list
)
1590 if (file
->event_call
== &tk
->tp
.call
)
1597 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1598 * stage, we can do this lockless.
1600 static __init
int kprobe_trace_self_tests_init(void)
1603 int (*target
)(int, int, int, int, int, int);
1604 struct trace_kprobe
*tk
;
1605 struct trace_event_file
*file
;
1607 if (tracing_is_disabled())
1610 target
= kprobe_trace_selftest_target
;
1612 pr_info("Testing kprobe tracing: ");
1614 ret
= trace_run_command("p:testprobe kprobe_trace_selftest_target "
1615 "$stack $stack0 +0($stack)",
1616 create_trace_kprobe
);
1617 if (WARN_ON_ONCE(ret
)) {
1618 pr_warn("error on probing function entry.\n");
1621 /* Enable trace point */
1622 tk
= find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM
);
1623 if (WARN_ON_ONCE(tk
== NULL
)) {
1624 pr_warn("error on getting new probe.\n");
1627 file
= find_trace_probe_file(tk
, top_trace_array());
1628 if (WARN_ON_ONCE(file
== NULL
)) {
1629 pr_warn("error on getting probe file.\n");
1632 enable_trace_kprobe(tk
, file
);
1636 ret
= trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1637 "$retval", create_trace_kprobe
);
1638 if (WARN_ON_ONCE(ret
)) {
1639 pr_warn("error on probing function return.\n");
1642 /* Enable trace point */
1643 tk
= find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM
);
1644 if (WARN_ON_ONCE(tk
== NULL
)) {
1645 pr_warn("error on getting 2nd new probe.\n");
1648 file
= find_trace_probe_file(tk
, top_trace_array());
1649 if (WARN_ON_ONCE(file
== NULL
)) {
1650 pr_warn("error on getting probe file.\n");
1653 enable_trace_kprobe(tk
, file
);
1660 ret
= target(1, 2, 3, 4, 5, 6);
1663 * Not expecting an error here, the check is only to prevent the
1664 * optimizer from removing the call to target() as otherwise there
1665 * are no side-effects and the call is never performed.
1670 /* Disable trace points before removing it */
1671 tk
= find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM
);
1672 if (WARN_ON_ONCE(tk
== NULL
)) {
1673 pr_warn("error on getting test probe.\n");
1676 if (trace_kprobe_nhit(tk
) != 1) {
1677 pr_warn("incorrect number of testprobe hits\n");
1681 file
= find_trace_probe_file(tk
, top_trace_array());
1682 if (WARN_ON_ONCE(file
== NULL
)) {
1683 pr_warn("error on getting probe file.\n");
1686 disable_trace_kprobe(tk
, file
);
1689 tk
= find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM
);
1690 if (WARN_ON_ONCE(tk
== NULL
)) {
1691 pr_warn("error on getting 2nd test probe.\n");
1694 if (trace_kprobe_nhit(tk
) != 1) {
1695 pr_warn("incorrect number of testprobe2 hits\n");
1699 file
= find_trace_probe_file(tk
, top_trace_array());
1700 if (WARN_ON_ONCE(file
== NULL
)) {
1701 pr_warn("error on getting probe file.\n");
1704 disable_trace_kprobe(tk
, file
);
1707 ret
= trace_run_command("-:testprobe", create_trace_kprobe
);
1708 if (WARN_ON_ONCE(ret
)) {
1709 pr_warn("error on deleting a probe.\n");
1713 ret
= trace_run_command("-:testprobe2", create_trace_kprobe
);
1714 if (WARN_ON_ONCE(ret
)) {
1715 pr_warn("error on deleting a probe.\n");
1720 release_all_trace_kprobes();
1722 * Wait for the optimizer work to finish. Otherwise it might fiddle
1723 * with probes in already freed __init text.
1725 wait_for_kprobe_optimizer();
1727 pr_cont("NG: Some tests are failed. Please check them.\n");
1733 late_initcall(kprobe_trace_self_tests_init
);