1 // SPDX-License-Identifier: GPL-2.0-only
3 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/ftrace.h>
6 #include <linux/ktime.h>
7 #include <linux/module.h>
9 #include <asm/barrier.h>
12 * Arbitrary large value chosen to be sufficiently large to minimize noise but
13 * sufficiently small to complete quickly.
15 static unsigned int nr_function_calls
= 100000;
16 module_param(nr_function_calls
, uint
, 0);
17 MODULE_PARM_DESC(nr_function_calls
, "How many times to call the relevant tracee");
20 * The number of ops associated with a call site affects whether a tracer can
21 * be called directly or whether it's necessary to go via the list func, which
22 * can be significantly more expensive.
24 static unsigned int nr_ops_relevant
= 1;
25 module_param(nr_ops_relevant
, uint
, 0);
26 MODULE_PARM_DESC(nr_ops_relevant
, "How many ftrace_ops to associate with the relevant tracee");
29 * On architectures where all call sites share the same trampoline, having
30 * tracers enabled for distinct functions can force the use of the list func
31 * and incur overhead for all call sites.
33 static unsigned int nr_ops_irrelevant
;
34 module_param(nr_ops_irrelevant
, uint
, 0);
35 MODULE_PARM_DESC(nr_ops_irrelevant
, "How many ftrace_ops to associate with the irrelevant tracee");
38 * On architectures with DYNAMIC_FTRACE_WITH_REGS, saving the full pt_regs can
39 * be more expensive than only saving the minimal necessary regs.
41 static bool save_regs
;
42 module_param(save_regs
, bool, 0);
43 MODULE_PARM_DESC(save_regs
, "Register ops with FTRACE_OPS_FL_SAVE_REGS (save all registers in the trampoline)");
45 static bool assist_recursion
;
46 module_param(assist_recursion
, bool, 0);
47 MODULE_PARM_DESC(assist_reursion
, "Register ops with FTRACE_OPS_FL_RECURSION");
49 static bool assist_rcu
;
50 module_param(assist_rcu
, bool, 0);
51 MODULE_PARM_DESC(assist_reursion
, "Register ops with FTRACE_OPS_FL_RCU");
54 * By default, a trivial tracer is used which immediately returns to mimimize
55 * overhead. Sometimes a consistency check using a more expensive tracer is
58 static bool check_count
;
59 module_param(check_count
, bool, 0);
60 MODULE_PARM_DESC(check_count
, "Check that tracers are called the expected number of times\n");
63 * Usually it's not interesting to leave the ops registered after the test
64 * runs, but sometimes it can be useful to leave them registered so that they
65 * can be inspected through the tracefs 'enabled_functions' file.
68 module_param(persist
, bool, 0);
69 MODULE_PARM_DESC(persist
, "Successfully load module and leave ftrace ops registered after test completes\n");
72 * Marked as noinline to ensure that an out-of-line traceable copy is
73 * generated by the compiler.
75 * The barrier() ensures the compiler won't elide calls by determining there
76 * are no side-effects.
78 static noinline
void tracee_relevant(void)
84 * Marked as noinline to ensure that an out-of-line traceable copy is
85 * generated by the compiler.
87 * The barrier() ensures the compiler won't elide calls by determining there
88 * are no side-effects.
90 static noinline
void tracee_irrelevant(void)
96 struct ftrace_ops ops
;
100 static void ops_func_nop(unsigned long ip
, unsigned long parent_ip
,
101 struct ftrace_ops
*op
,
102 struct ftrace_regs
*fregs
)
107 static void ops_func_count(unsigned long ip
, unsigned long parent_ip
,
108 struct ftrace_ops
*op
,
109 struct ftrace_regs
*fregs
)
111 struct sample_ops
*self
;
113 self
= container_of(op
, struct sample_ops
, ops
);
117 static struct sample_ops
*ops_relevant
;
118 static struct sample_ops
*ops_irrelevant
;
120 static struct sample_ops
*ops_alloc_init(void *tracee
, ftrace_func_t func
,
121 unsigned long flags
, int nr
)
123 struct sample_ops
*ops
;
125 ops
= kcalloc(nr
, sizeof(*ops
), GFP_KERNEL
);
126 if (WARN_ON_ONCE(!ops
))
129 for (unsigned int i
= 0; i
< nr
; i
++) {
130 ops
[i
].ops
.func
= func
;
131 ops
[i
].ops
.flags
= flags
;
132 WARN_ON_ONCE(ftrace_set_filter_ip(&ops
[i
].ops
, (unsigned long)tracee
, 0, 0));
133 WARN_ON_ONCE(register_ftrace_function(&ops
[i
].ops
));
139 static void ops_destroy(struct sample_ops
*ops
, int nr
)
144 for (unsigned int i
= 0; i
< nr
; i
++) {
145 WARN_ON_ONCE(unregister_ftrace_function(&ops
[i
].ops
));
146 ftrace_free_filter(&ops
[i
].ops
);
152 static void ops_check(struct sample_ops
*ops
, int nr
,
153 unsigned int expected_count
)
155 if (!ops
|| !check_count
)
158 for (unsigned int i
= 0; i
< nr
; i
++) {
159 if (ops
->count
== expected_count
)
161 pr_warn("Counter called %u times (expected %u)\n",
162 ops
->count
, expected_count
);
166 static ftrace_func_t tracer_relevant
= ops_func_nop
;
167 static ftrace_func_t tracer_irrelevant
= ops_func_nop
;
169 static int __init
ftrace_ops_sample_init(void)
171 unsigned long flags
= 0;
175 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS
) && save_regs
) {
176 pr_info("this kernel does not support saving registers\n");
178 } else if (save_regs
) {
179 flags
|= FTRACE_OPS_FL_SAVE_REGS
;
182 if (assist_recursion
)
183 flags
|= FTRACE_OPS_FL_RECURSION
;
186 flags
|= FTRACE_OPS_FL_RCU
;
189 tracer_relevant
= ops_func_count
;
190 tracer_irrelevant
= ops_func_count
;
193 pr_info("registering:\n"
194 " relevant ops: %u\n"
197 " irrelevant ops: %u\n"
200 " saving registers: %s\n"
201 " assist recursion: %s\n"
203 nr_ops_relevant
, tracee_relevant
, tracer_relevant
,
204 nr_ops_irrelevant
, tracee_irrelevant
, tracer_irrelevant
,
205 save_regs
? "YES" : "NO",
206 assist_recursion
? "YES" : "NO",
207 assist_rcu
? "YES" : "NO");
209 ops_relevant
= ops_alloc_init(tracee_relevant
, tracer_relevant
,
210 flags
, nr_ops_relevant
);
211 ops_irrelevant
= ops_alloc_init(tracee_irrelevant
, tracer_irrelevant
,
212 flags
, nr_ops_irrelevant
);
215 for (unsigned int i
= 0; i
< nr_function_calls
; i
++)
219 ops_check(ops_relevant
, nr_ops_relevant
, nr_function_calls
);
220 ops_check(ops_irrelevant
, nr_ops_irrelevant
, 0);
222 period
= ktime_to_ns(ktime_sub(end
, start
));
224 pr_info("Attempted %u calls to %ps in %lluns (%lluns / call)\n",
225 nr_function_calls
, tracee_relevant
,
226 period
, div_u64(period
, nr_function_calls
));
231 ops_destroy(ops_relevant
, nr_ops_relevant
);
232 ops_destroy(ops_irrelevant
, nr_ops_irrelevant
);
235 * The benchmark completed sucessfully, but there's no reason to keep
236 * the module around. Return an error do the user doesn't have to
237 * manually unload the module.
241 module_init(ftrace_ops_sample_init
);
243 static void __exit
ftrace_ops_sample_exit(void)
245 ops_destroy(ops_relevant
, nr_ops_relevant
);
246 ops_destroy(ops_irrelevant
, nr_ops_irrelevant
);
248 module_exit(ftrace_ops_sample_exit
);
250 MODULE_AUTHOR("Mark Rutland");
251 MODULE_DESCRIPTION("Example of using custom ftrace_ops");
252 MODULE_LICENSE("GPL");