1 // SPDX-License-Identifier: GPL-2.0
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code in the latency_tracer, that is:
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
39 #include <trace/events/sched.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
48 #define FTRACE_WARN_ON(cond) \
56 #define FTRACE_WARN_ON_ONCE(cond) \
59 if (WARN_ON_ONCE(___r)) \
64 /* hash bits for specific function selection */
65 #define FTRACE_HASH_DEFAULT_BITS 10
66 #define FTRACE_HASH_MAX_BITS 12
68 #ifdef CONFIG_DYNAMIC_FTRACE
69 #define INIT_OPS_HASH(opsname) \
70 .func_hash = &opsname.local_hash, \
71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
73 #define INIT_OPS_HASH(opsname)
77 FTRACE_MODIFY_ENABLE_FL
= (1 << 0),
78 FTRACE_MODIFY_MAY_SLEEP_FL
= (1 << 1),
81 struct ftrace_ops ftrace_list_end __read_mostly
= {
83 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
84 INIT_OPS_HASH(ftrace_list_end
)
87 /* ftrace_enabled is a method to turn ftrace on or off */
88 int ftrace_enabled __read_mostly
;
89 static int last_ftrace_enabled
;
91 /* Current function tracing op */
92 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
93 /* What to set function_trace_op to */
94 static struct ftrace_ops
*set_function_trace_op
;
96 static bool ftrace_pids_enabled(struct ftrace_ops
*ops
)
98 struct trace_array
*tr
;
100 if (!(ops
->flags
& FTRACE_OPS_FL_PID
) || !ops
->private)
105 return tr
->function_pids
!= NULL
|| tr
->function_no_pids
!= NULL
;
108 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
111 * ftrace_disabled is set when an anomaly is discovered.
112 * ftrace_disabled is much stronger than ftrace_enabled.
114 static int ftrace_disabled __read_mostly
;
116 DEFINE_MUTEX(ftrace_lock
);
118 struct ftrace_ops __rcu
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
119 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
120 struct ftrace_ops global_ops
;
122 #if ARCH_SUPPORTS_FTRACE_OPS
123 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
124 struct ftrace_ops
*op
, struct pt_regs
*regs
);
126 /* See comment below, where ftrace_ops_list_func is defined */
127 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
128 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
131 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
133 #ifdef CONFIG_DYNAMIC_FTRACE
134 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
135 mutex_init(&ops
->local_hash
.regex_lock
);
136 ops
->func_hash
= &ops
->local_hash
;
137 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
142 #define FTRACE_PID_IGNORE -1
143 #define FTRACE_PID_TRACE -2
145 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
146 struct ftrace_ops
*op
, struct pt_regs
*regs
)
148 struct trace_array
*tr
= op
->private;
152 pid
= this_cpu_read(tr
->array_buffer
.data
->ftrace_ignore_pid
);
153 if (pid
== FTRACE_PID_IGNORE
)
155 if (pid
!= FTRACE_PID_TRACE
&&
160 op
->saved_func(ip
, parent_ip
, op
, regs
);
163 static void ftrace_sync_ipi(void *data
)
165 /* Probably not needed, but do it anyway */
169 static ftrace_func_t
ftrace_ops_get_list_func(struct ftrace_ops
*ops
)
172 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
173 * then it needs to call the list anyway.
175 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_RCU
) ||
176 FTRACE_FORCE_LIST_FUNC
)
177 return ftrace_ops_list_func
;
179 return ftrace_ops_get_func(ops
);
182 static void update_ftrace_function(void)
187 * Prepare the ftrace_ops that the arch callback will use.
188 * If there's only one ftrace_ops registered, the ftrace_ops_list
189 * will point to the ops we want.
191 set_function_trace_op
= rcu_dereference_protected(ftrace_ops_list
,
192 lockdep_is_held(&ftrace_lock
));
194 /* If there's no ftrace_ops registered, just call the stub function */
195 if (set_function_trace_op
== &ftrace_list_end
) {
199 * If we are at the end of the list and this ops is
200 * recursion safe and not dynamic and the arch supports passing ops,
201 * then have the mcount trampoline call the function directly.
203 } else if (rcu_dereference_protected(ftrace_ops_list
->next
,
204 lockdep_is_held(&ftrace_lock
)) == &ftrace_list_end
) {
205 func
= ftrace_ops_get_list_func(ftrace_ops_list
);
208 /* Just use the default ftrace_ops */
209 set_function_trace_op
= &ftrace_list_end
;
210 func
= ftrace_ops_list_func
;
213 update_function_graph_func();
215 /* If there's no change, then do nothing more here */
216 if (ftrace_trace_function
== func
)
220 * If we are using the list function, it doesn't care
221 * about the function_trace_ops.
223 if (func
== ftrace_ops_list_func
) {
224 ftrace_trace_function
= func
;
226 * Don't even bother setting function_trace_ops,
227 * it would be racy to do so anyway.
232 #ifndef CONFIG_DYNAMIC_FTRACE
234 * For static tracing, we need to be a bit more careful.
235 * The function change takes affect immediately. Thus,
236 * we need to coorditate the setting of the function_trace_ops
237 * with the setting of the ftrace_trace_function.
239 * Set the function to the list ops, which will call the
240 * function we want, albeit indirectly, but it handles the
241 * ftrace_ops and doesn't depend on function_trace_op.
243 ftrace_trace_function
= ftrace_ops_list_func
;
245 * Make sure all CPUs see this. Yes this is slow, but static
246 * tracing is slow and nasty to have enabled.
248 synchronize_rcu_tasks_rude();
249 /* Now all cpus are using the list ops. */
250 function_trace_op
= set_function_trace_op
;
251 /* Make sure the function_trace_op is visible on all CPUs */
253 /* Nasty way to force a rmb on all cpus */
254 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
255 /* OK, we are all set to update the ftrace_trace_function now! */
256 #endif /* !CONFIG_DYNAMIC_FTRACE */
258 ftrace_trace_function
= func
;
261 static void add_ftrace_ops(struct ftrace_ops __rcu
**list
,
262 struct ftrace_ops
*ops
)
264 rcu_assign_pointer(ops
->next
, *list
);
267 * We are entering ops into the list but another
268 * CPU might be walking that list. We need to make sure
269 * the ops->next pointer is valid before another CPU sees
270 * the ops pointer included into the list.
272 rcu_assign_pointer(*list
, ops
);
275 static int remove_ftrace_ops(struct ftrace_ops __rcu
**list
,
276 struct ftrace_ops
*ops
)
278 struct ftrace_ops
**p
;
281 * If we are removing the last function, then simply point
282 * to the ftrace_stub.
284 if (rcu_dereference_protected(*list
,
285 lockdep_is_held(&ftrace_lock
)) == ops
&&
286 rcu_dereference_protected(ops
->next
,
287 lockdep_is_held(&ftrace_lock
)) == &ftrace_list_end
) {
288 *list
= &ftrace_list_end
;
292 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
303 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
305 int __register_ftrace_function(struct ftrace_ops
*ops
)
307 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
310 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
313 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
315 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
316 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
317 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
319 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
320 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
323 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
324 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
326 if (!ftrace_enabled
&& (ops
->flags
& FTRACE_OPS_FL_PERMANENT
))
329 if (!core_kernel_data((unsigned long)ops
))
330 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
332 add_ftrace_ops(&ftrace_ops_list
, ops
);
334 /* Always save the function, and reset at unregistering */
335 ops
->saved_func
= ops
->func
;
337 if (ftrace_pids_enabled(ops
))
338 ops
->func
= ftrace_pid_func
;
340 ftrace_update_trampoline(ops
);
343 update_ftrace_function();
348 int __unregister_ftrace_function(struct ftrace_ops
*ops
)
352 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
355 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
361 update_ftrace_function();
363 ops
->func
= ops
->saved_func
;
368 static void ftrace_update_pid_func(void)
370 struct ftrace_ops
*op
;
372 /* Only do something if we are tracing something */
373 if (ftrace_trace_function
== ftrace_stub
)
376 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
377 if (op
->flags
& FTRACE_OPS_FL_PID
) {
378 op
->func
= ftrace_pids_enabled(op
) ?
379 ftrace_pid_func
: op
->saved_func
;
380 ftrace_update_trampoline(op
);
382 } while_for_each_ftrace_op(op
);
384 update_ftrace_function();
387 #ifdef CONFIG_FUNCTION_PROFILER
388 struct ftrace_profile
{
389 struct hlist_node node
;
391 unsigned long counter
;
392 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
393 unsigned long long time
;
394 unsigned long long time_squared
;
398 struct ftrace_profile_page
{
399 struct ftrace_profile_page
*next
;
401 struct ftrace_profile records
[];
404 struct ftrace_profile_stat
{
406 struct hlist_head
*hash
;
407 struct ftrace_profile_page
*pages
;
408 struct ftrace_profile_page
*start
;
409 struct tracer_stat stat
;
412 #define PROFILE_RECORDS_SIZE \
413 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
415 #define PROFILES_PER_PAGE \
416 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
418 static int ftrace_profile_enabled __read_mostly
;
420 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
421 static DEFINE_MUTEX(ftrace_profile_lock
);
423 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
425 #define FTRACE_PROFILE_HASH_BITS 10
426 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
429 function_stat_next(void *v
, int idx
)
431 struct ftrace_profile
*rec
= v
;
432 struct ftrace_profile_page
*pg
;
434 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
440 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
444 rec
= &pg
->records
[0];
452 static void *function_stat_start(struct tracer_stat
*trace
)
454 struct ftrace_profile_stat
*stat
=
455 container_of(trace
, struct ftrace_profile_stat
, stat
);
457 if (!stat
|| !stat
->start
)
460 return function_stat_next(&stat
->start
->records
[0], 0);
463 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
464 /* function graph compares on total time */
465 static int function_stat_cmp(const void *p1
, const void *p2
)
467 const struct ftrace_profile
*a
= p1
;
468 const struct ftrace_profile
*b
= p2
;
470 if (a
->time
< b
->time
)
472 if (a
->time
> b
->time
)
478 /* not function graph compares against hits */
479 static int function_stat_cmp(const void *p1
, const void *p2
)
481 const struct ftrace_profile
*a
= p1
;
482 const struct ftrace_profile
*b
= p2
;
484 if (a
->counter
< b
->counter
)
486 if (a
->counter
> b
->counter
)
493 static int function_stat_headers(struct seq_file
*m
)
495 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
496 seq_puts(m
, " Function "
499 "--- ---- --- ---\n");
501 seq_puts(m
, " Function Hit\n"
507 static int function_stat_show(struct seq_file
*m
, void *v
)
509 struct ftrace_profile
*rec
= v
;
510 char str
[KSYM_SYMBOL_LEN
];
512 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
513 static struct trace_seq s
;
514 unsigned long long avg
;
515 unsigned long long stddev
;
517 mutex_lock(&ftrace_profile_lock
);
519 /* we raced with function_profile_reset() */
520 if (unlikely(rec
->counter
== 0)) {
525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
526 avg
= div64_ul(rec
->time
, rec
->counter
);
527 if (tracing_thresh
&& (avg
< tracing_thresh
))
531 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
532 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
534 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
537 /* Sample standard deviation (s^2) */
538 if (rec
->counter
<= 1)
542 * Apply Welford's method:
543 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
545 stddev
= rec
->counter
* rec
->time_squared
-
546 rec
->time
* rec
->time
;
549 * Divide only 1000 for ns^2 -> us^2 conversion.
550 * trace_print_graph_duration will divide 1000 again.
552 stddev
= div64_ul(stddev
,
553 rec
->counter
* (rec
->counter
- 1) * 1000);
557 trace_print_graph_duration(rec
->time
, &s
);
558 trace_seq_puts(&s
, " ");
559 trace_print_graph_duration(avg
, &s
);
560 trace_seq_puts(&s
, " ");
561 trace_print_graph_duration(stddev
, &s
);
562 trace_print_seq(m
, &s
);
566 mutex_unlock(&ftrace_profile_lock
);
571 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
573 struct ftrace_profile_page
*pg
;
575 pg
= stat
->pages
= stat
->start
;
578 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
583 memset(stat
->hash
, 0,
584 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
587 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
589 struct ftrace_profile_page
*pg
;
594 /* If we already allocated, do nothing */
598 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
602 #ifdef CONFIG_DYNAMIC_FTRACE
603 functions
= ftrace_update_tot_cnt
;
606 * We do not know the number of functions that exist because
607 * dynamic tracing is what counts them. With past experience
608 * we have around 20K functions. That should be more than enough.
609 * It is highly unlikely we will execute every function in
615 pg
= stat
->start
= stat
->pages
;
617 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
619 for (i
= 1; i
< pages
; i
++) {
620 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
631 unsigned long tmp
= (unsigned long)pg
;
643 static int ftrace_profile_init_cpu(int cpu
)
645 struct ftrace_profile_stat
*stat
;
648 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
651 /* If the profile is already created, simply reset it */
652 ftrace_profile_reset(stat
);
657 * We are profiling all functions, but usually only a few thousand
658 * functions are hit. We'll make a hash of 1024 items.
660 size
= FTRACE_PROFILE_HASH_SIZE
;
662 stat
->hash
= kcalloc(size
, sizeof(struct hlist_head
), GFP_KERNEL
);
667 /* Preallocate the function profiling pages */
668 if (ftrace_profile_pages_init(stat
) < 0) {
677 static int ftrace_profile_init(void)
682 for_each_possible_cpu(cpu
) {
683 ret
= ftrace_profile_init_cpu(cpu
);
691 /* interrupts must be disabled */
692 static struct ftrace_profile
*
693 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
695 struct ftrace_profile
*rec
;
696 struct hlist_head
*hhd
;
699 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
700 hhd
= &stat
->hash
[key
];
702 if (hlist_empty(hhd
))
705 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
713 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
714 struct ftrace_profile
*rec
)
718 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
719 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
723 * The memory is already allocated, this simply finds a new record to use.
725 static struct ftrace_profile
*
726 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
728 struct ftrace_profile
*rec
= NULL
;
730 /* prevent recursion (from NMIs) */
731 if (atomic_inc_return(&stat
->disabled
) != 1)
735 * Try to find the function again since an NMI
736 * could have added it
738 rec
= ftrace_find_profiled_func(stat
, ip
);
742 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
743 if (!stat
->pages
->next
)
745 stat
->pages
= stat
->pages
->next
;
748 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
750 ftrace_add_profile(stat
, rec
);
753 atomic_dec(&stat
->disabled
);
759 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
760 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
762 struct ftrace_profile_stat
*stat
;
763 struct ftrace_profile
*rec
;
766 if (!ftrace_profile_enabled
)
769 local_irq_save(flags
);
771 stat
= this_cpu_ptr(&ftrace_profile_stats
);
772 if (!stat
->hash
|| !ftrace_profile_enabled
)
775 rec
= ftrace_find_profiled_func(stat
, ip
);
777 rec
= ftrace_profile_alloc(stat
, ip
);
784 local_irq_restore(flags
);
787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
788 static bool fgraph_graph_time
= true;
790 void ftrace_graph_graph_time_control(bool enable
)
792 fgraph_graph_time
= enable
;
795 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
797 struct ftrace_ret_stack
*ret_stack
;
799 function_profile_call(trace
->func
, 0, NULL
, NULL
);
801 /* If function graph is shutting down, ret_stack can be NULL */
802 if (!current
->ret_stack
)
805 ret_stack
= ftrace_graph_get_ret_stack(current
, 0);
807 ret_stack
->subtime
= 0;
812 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
814 struct ftrace_ret_stack
*ret_stack
;
815 struct ftrace_profile_stat
*stat
;
816 unsigned long long calltime
;
817 struct ftrace_profile
*rec
;
820 local_irq_save(flags
);
821 stat
= this_cpu_ptr(&ftrace_profile_stats
);
822 if (!stat
->hash
|| !ftrace_profile_enabled
)
825 /* If the calltime was zero'd ignore it */
826 if (!trace
->calltime
)
829 calltime
= trace
->rettime
- trace
->calltime
;
831 if (!fgraph_graph_time
) {
833 /* Append this call time to the parent time to subtract */
834 ret_stack
= ftrace_graph_get_ret_stack(current
, 1);
836 ret_stack
->subtime
+= calltime
;
838 ret_stack
= ftrace_graph_get_ret_stack(current
, 0);
839 if (ret_stack
&& ret_stack
->subtime
< calltime
)
840 calltime
-= ret_stack
->subtime
;
845 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
847 rec
->time
+= calltime
;
848 rec
->time_squared
+= calltime
* calltime
;
852 local_irq_restore(flags
);
855 static struct fgraph_ops fprofiler_ops
= {
856 .entryfunc
= &profile_graph_entry
,
857 .retfunc
= &profile_graph_return
,
860 static int register_ftrace_profiler(void)
862 return register_ftrace_graph(&fprofiler_ops
);
865 static void unregister_ftrace_profiler(void)
867 unregister_ftrace_graph(&fprofiler_ops
);
870 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
871 .func
= function_profile_call
,
872 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
873 INIT_OPS_HASH(ftrace_profile_ops
)
876 static int register_ftrace_profiler(void)
878 return register_ftrace_function(&ftrace_profile_ops
);
881 static void unregister_ftrace_profiler(void)
883 unregister_ftrace_function(&ftrace_profile_ops
);
885 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
888 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
889 size_t cnt
, loff_t
*ppos
)
894 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
900 mutex_lock(&ftrace_profile_lock
);
901 if (ftrace_profile_enabled
^ val
) {
903 ret
= ftrace_profile_init();
909 ret
= register_ftrace_profiler();
914 ftrace_profile_enabled
= 1;
916 ftrace_profile_enabled
= 0;
918 * unregister_ftrace_profiler calls stop_machine
919 * so this acts like an synchronize_rcu.
921 unregister_ftrace_profiler();
925 mutex_unlock(&ftrace_profile_lock
);
933 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
934 size_t cnt
, loff_t
*ppos
)
936 char buf
[64]; /* big enough to hold a number */
939 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
940 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
943 static const struct file_operations ftrace_profile_fops
= {
944 .open
= tracing_open_generic
,
945 .read
= ftrace_profile_read
,
946 .write
= ftrace_profile_write
,
947 .llseek
= default_llseek
,
950 /* used to initialize the real stat files */
951 static struct tracer_stat function_stats __initdata
= {
953 .stat_start
= function_stat_start
,
954 .stat_next
= function_stat_next
,
955 .stat_cmp
= function_stat_cmp
,
956 .stat_headers
= function_stat_headers
,
957 .stat_show
= function_stat_show
960 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
962 struct ftrace_profile_stat
*stat
;
963 struct dentry
*entry
;
968 for_each_possible_cpu(cpu
) {
969 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
971 name
= kasprintf(GFP_KERNEL
, "function%d", cpu
);
974 * The files created are permanent, if something happens
975 * we still do not free memory.
978 "Could not allocate stat file for cpu %d\n",
982 stat
->stat
= function_stats
;
983 stat
->stat
.name
= name
;
984 ret
= register_stat_tracer(&stat
->stat
);
987 "Could not register function stat for cpu %d\n",
994 entry
= tracefs_create_file("function_profile_enabled", 0644,
995 d_tracer
, NULL
, &ftrace_profile_fops
);
997 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1000 #else /* CONFIG_FUNCTION_PROFILER */
1001 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1004 #endif /* CONFIG_FUNCTION_PROFILER */
1006 #ifdef CONFIG_DYNAMIC_FTRACE
1008 static struct ftrace_ops
*removed_ops
;
1011 * Set when doing a global update, like enabling all recs or disabling them.
1012 * It is not set when just updating a single ftrace_ops.
1014 static bool update_all_ops
;
1016 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1017 # error Dynamic ftrace depends on MCOUNT_RECORD
1020 struct ftrace_func_probe
{
1021 struct ftrace_probe_ops
*probe_ops
;
1022 struct ftrace_ops ops
;
1023 struct trace_array
*tr
;
1024 struct list_head list
;
1030 * We make these constant because no one should touch them,
1031 * but they are used as the default "empty hash", to avoid allocating
1032 * it all the time. These are in a read only section such that if
1033 * anyone does try to modify it, it will cause an exception.
1035 static const struct hlist_head empty_buckets
[1];
1036 static const struct ftrace_hash empty_hash
= {
1037 .buckets
= (struct hlist_head
*)empty_buckets
,
1039 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1041 struct ftrace_ops global_ops
= {
1042 .func
= ftrace_stub
,
1043 .local_hash
.notrace_hash
= EMPTY_HASH
,
1044 .local_hash
.filter_hash
= EMPTY_HASH
,
1045 INIT_OPS_HASH(global_ops
)
1046 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1047 FTRACE_OPS_FL_INITIALIZED
|
1052 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1054 struct ftrace_ops
*ftrace_ops_trampoline(unsigned long addr
)
1056 struct ftrace_ops
*op
= NULL
;
1059 * Some of the ops may be dynamically allocated,
1060 * they are freed after a synchronize_rcu().
1062 preempt_disable_notrace();
1064 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1066 * This is to check for dynamically allocated trampolines.
1067 * Trampolines that are in kernel text will have
1068 * core_kernel_text() return true.
1070 if (op
->trampoline
&& op
->trampoline_size
)
1071 if (addr
>= op
->trampoline
&&
1072 addr
< op
->trampoline
+ op
->trampoline_size
) {
1073 preempt_enable_notrace();
1076 } while_for_each_ftrace_op(op
);
1077 preempt_enable_notrace();
1083 * This is used by __kernel_text_address() to return true if the
1084 * address is on a dynamically allocated trampoline that would
1085 * not return true for either core_kernel_text() or
1086 * is_module_text_address().
1088 bool is_ftrace_trampoline(unsigned long addr
)
1090 return ftrace_ops_trampoline(addr
) != NULL
;
1093 struct ftrace_page
{
1094 struct ftrace_page
*next
;
1095 struct dyn_ftrace
*records
;
1100 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1101 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1103 static struct ftrace_page
*ftrace_pages_start
;
1104 static struct ftrace_page
*ftrace_pages
;
1106 static __always_inline
unsigned long
1107 ftrace_hash_key(struct ftrace_hash
*hash
, unsigned long ip
)
1109 if (hash
->size_bits
> 0)
1110 return hash_long(ip
, hash
->size_bits
);
1115 /* Only use this function if ftrace_hash_empty() has already been tested */
1116 static __always_inline
struct ftrace_func_entry
*
1117 __ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1120 struct ftrace_func_entry
*entry
;
1121 struct hlist_head
*hhd
;
1123 key
= ftrace_hash_key(hash
, ip
);
1124 hhd
= &hash
->buckets
[key
];
1126 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1127 if (entry
->ip
== ip
)
1134 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1135 * @hash: The hash to look at
1136 * @ip: The instruction pointer to test
1138 * Search a given @hash to see if a given instruction pointer (@ip)
1141 * Returns the entry that holds the @ip if found. NULL otherwise.
1143 struct ftrace_func_entry
*
1144 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1146 if (ftrace_hash_empty(hash
))
1149 return __ftrace_lookup_ip(hash
, ip
);
1152 static void __add_hash_entry(struct ftrace_hash
*hash
,
1153 struct ftrace_func_entry
*entry
)
1155 struct hlist_head
*hhd
;
1158 key
= ftrace_hash_key(hash
, entry
->ip
);
1159 hhd
= &hash
->buckets
[key
];
1160 hlist_add_head(&entry
->hlist
, hhd
);
1164 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1166 struct ftrace_func_entry
*entry
;
1168 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1173 __add_hash_entry(hash
, entry
);
1179 free_hash_entry(struct ftrace_hash
*hash
,
1180 struct ftrace_func_entry
*entry
)
1182 hlist_del(&entry
->hlist
);
1188 remove_hash_entry(struct ftrace_hash
*hash
,
1189 struct ftrace_func_entry
*entry
)
1191 hlist_del_rcu(&entry
->hlist
);
1195 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1197 struct hlist_head
*hhd
;
1198 struct hlist_node
*tn
;
1199 struct ftrace_func_entry
*entry
;
1200 int size
= 1 << hash
->size_bits
;
1206 for (i
= 0; i
< size
; i
++) {
1207 hhd
= &hash
->buckets
[i
];
1208 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1209 free_hash_entry(hash
, entry
);
1211 FTRACE_WARN_ON(hash
->count
);
1214 static void free_ftrace_mod(struct ftrace_mod_load
*ftrace_mod
)
1216 list_del(&ftrace_mod
->list
);
1217 kfree(ftrace_mod
->module
);
1218 kfree(ftrace_mod
->func
);
1222 static void clear_ftrace_mod_list(struct list_head
*head
)
1224 struct ftrace_mod_load
*p
, *n
;
1226 /* stack tracer isn't supported yet */
1230 mutex_lock(&ftrace_lock
);
1231 list_for_each_entry_safe(p
, n
, head
, list
)
1233 mutex_unlock(&ftrace_lock
);
1236 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1238 if (!hash
|| hash
== EMPTY_HASH
)
1240 ftrace_hash_clear(hash
);
1241 kfree(hash
->buckets
);
1245 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1247 struct ftrace_hash
*hash
;
1249 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1250 free_ftrace_hash(hash
);
1253 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1255 if (!hash
|| hash
== EMPTY_HASH
)
1257 call_rcu(&hash
->rcu
, __free_ftrace_hash_rcu
);
1260 void ftrace_free_filter(struct ftrace_ops
*ops
)
1262 ftrace_ops_init(ops
);
1263 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1264 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1267 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1269 struct ftrace_hash
*hash
;
1272 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1276 size
= 1 << size_bits
;
1277 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1279 if (!hash
->buckets
) {
1284 hash
->size_bits
= size_bits
;
1290 static int ftrace_add_mod(struct trace_array
*tr
,
1291 const char *func
, const char *module
,
1294 struct ftrace_mod_load
*ftrace_mod
;
1295 struct list_head
*mod_head
= enable
? &tr
->mod_trace
: &tr
->mod_notrace
;
1297 ftrace_mod
= kzalloc(sizeof(*ftrace_mod
), GFP_KERNEL
);
1301 ftrace_mod
->func
= kstrdup(func
, GFP_KERNEL
);
1302 ftrace_mod
->module
= kstrdup(module
, GFP_KERNEL
);
1303 ftrace_mod
->enable
= enable
;
1305 if (!ftrace_mod
->func
|| !ftrace_mod
->module
)
1308 list_add(&ftrace_mod
->list
, mod_head
);
1313 free_ftrace_mod(ftrace_mod
);
1318 static struct ftrace_hash
*
1319 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1321 struct ftrace_func_entry
*entry
;
1322 struct ftrace_hash
*new_hash
;
1327 new_hash
= alloc_ftrace_hash(size_bits
);
1332 new_hash
->flags
= hash
->flags
;
1335 if (ftrace_hash_empty(hash
))
1338 size
= 1 << hash
->size_bits
;
1339 for (i
= 0; i
< size
; i
++) {
1340 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1341 ret
= add_hash_entry(new_hash
, entry
->ip
);
1347 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1352 free_ftrace_hash(new_hash
);
1357 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1359 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1361 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1362 struct ftrace_hash
*new_hash
);
1364 static struct ftrace_hash
*dup_hash(struct ftrace_hash
*src
, int size
)
1366 struct ftrace_func_entry
*entry
;
1367 struct ftrace_hash
*new_hash
;
1368 struct hlist_head
*hhd
;
1369 struct hlist_node
*tn
;
1374 * Make the hash size about 1/2 the # found
1376 for (size
/= 2; size
; size
>>= 1)
1379 /* Don't allocate too much */
1380 if (bits
> FTRACE_HASH_MAX_BITS
)
1381 bits
= FTRACE_HASH_MAX_BITS
;
1383 new_hash
= alloc_ftrace_hash(bits
);
1387 new_hash
->flags
= src
->flags
;
1389 size
= 1 << src
->size_bits
;
1390 for (i
= 0; i
< size
; i
++) {
1391 hhd
= &src
->buckets
[i
];
1392 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1393 remove_hash_entry(src
, entry
);
1394 __add_hash_entry(new_hash
, entry
);
1400 static struct ftrace_hash
*
1401 __ftrace_hash_move(struct ftrace_hash
*src
)
1403 int size
= src
->count
;
1406 * If the new source is empty, just return the empty_hash.
1408 if (ftrace_hash_empty(src
))
1411 return dup_hash(src
, size
);
1415 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1416 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1418 struct ftrace_hash
*new_hash
;
1421 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1422 if (ops
->flags
& FTRACE_OPS_FL_IPMODIFY
&& !enable
)
1425 new_hash
= __ftrace_hash_move(src
);
1429 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1431 /* IPMODIFY should be updated only when filter_hash updating */
1432 ret
= ftrace_hash_ipmodify_update(ops
, new_hash
);
1434 free_ftrace_hash(new_hash
);
1440 * Remove the current set, update the hash and add
1443 ftrace_hash_rec_disable_modify(ops
, enable
);
1445 rcu_assign_pointer(*dst
, new_hash
);
1447 ftrace_hash_rec_enable_modify(ops
, enable
);
1452 static bool hash_contains_ip(unsigned long ip
,
1453 struct ftrace_ops_hash
*hash
)
1456 * The function record is a match if it exists in the filter
1457 * hash and not in the notrace hash. Note, an emty hash is
1458 * considered a match for the filter hash, but an empty
1459 * notrace hash is considered not in the notrace hash.
1461 return (ftrace_hash_empty(hash
->filter_hash
) ||
1462 __ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1463 (ftrace_hash_empty(hash
->notrace_hash
) ||
1464 !__ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1468 * Test the hashes for this ops to see if we want to call
1469 * the ops->func or not.
1471 * It's a match if the ip is in the ops->filter_hash or
1472 * the filter_hash does not exist or is empty,
1474 * the ip is not in the ops->notrace_hash.
1476 * This needs to be called with preemption disabled as
1477 * the hashes are freed with call_rcu().
1480 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1482 struct ftrace_ops_hash hash
;
1485 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1487 * There's a small race when adding ops that the ftrace handler
1488 * that wants regs, may be called without them. We can not
1489 * allow that handler to be called if regs is NULL.
1491 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1495 rcu_assign_pointer(hash
.filter_hash
, ops
->func_hash
->filter_hash
);
1496 rcu_assign_pointer(hash
.notrace_hash
, ops
->func_hash
->notrace_hash
);
1498 if (hash_contains_ip(ip
, &hash
))
1507 * This is a double for. Do not use 'break' to break out of the loop,
1508 * you must use a goto.
1510 #define do_for_each_ftrace_rec(pg, rec) \
1511 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1513 for (_____i = 0; _____i < pg->index; _____i++) { \
1514 rec = &pg->records[_____i];
1516 #define while_for_each_ftrace_rec() \
1521 static int ftrace_cmp_recs(const void *a
, const void *b
)
1523 const struct dyn_ftrace
*key
= a
;
1524 const struct dyn_ftrace
*rec
= b
;
1526 if (key
->flags
< rec
->ip
)
1528 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1533 static struct dyn_ftrace
*lookup_rec(unsigned long start
, unsigned long end
)
1535 struct ftrace_page
*pg
;
1536 struct dyn_ftrace
*rec
= NULL
;
1537 struct dyn_ftrace key
;
1540 key
.flags
= end
; /* overload flags, as it is unsigned long */
1542 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1543 if (end
< pg
->records
[0].ip
||
1544 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1546 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1547 sizeof(struct dyn_ftrace
),
1556 * ftrace_location_range - return the first address of a traced location
1557 * if it touches the given ip range
1558 * @start: start of range to search.
1559 * @end: end of range to search (inclusive). @end points to the last byte
1562 * Returns rec->ip if the related ftrace location is a least partly within
1563 * the given address range. That is, the first address of the instruction
1564 * that is either a NOP or call to the function tracer. It checks the ftrace
1565 * internal tables to determine if the address belongs or not.
1567 unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1569 struct dyn_ftrace
*rec
;
1571 rec
= lookup_rec(start
, end
);
1579 * ftrace_location - return true if the ip giving is a traced location
1580 * @ip: the instruction pointer to check
1582 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1583 * That is, the instruction that is either a NOP or call to
1584 * the function tracer. It checks the ftrace internal tables to
1585 * determine if the address belongs or not.
1587 unsigned long ftrace_location(unsigned long ip
)
1589 return ftrace_location_range(ip
, ip
);
1593 * ftrace_text_reserved - return true if range contains an ftrace location
1594 * @start: start of range to search
1595 * @end: end of range to search (inclusive). @end points to the last byte to check.
1597 * Returns 1 if @start and @end contains a ftrace location.
1598 * That is, the instruction that is either a NOP or call to
1599 * the function tracer. It checks the ftrace internal tables to
1600 * determine if the address belongs or not.
1602 int ftrace_text_reserved(const void *start
, const void *end
)
1606 ret
= ftrace_location_range((unsigned long)start
,
1607 (unsigned long)end
);
1612 /* Test if ops registered to this rec needs regs */
1613 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1615 struct ftrace_ops
*ops
;
1616 bool keep_regs
= false;
1618 for (ops
= ftrace_ops_list
;
1619 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1620 /* pass rec in as regs to have non-NULL val */
1621 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1622 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1632 static struct ftrace_ops
*
1633 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
);
1634 static struct ftrace_ops
*
1635 ftrace_find_tramp_ops_next(struct dyn_ftrace
*rec
, struct ftrace_ops
*ops
);
1637 static bool __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1641 struct ftrace_hash
*hash
;
1642 struct ftrace_hash
*other_hash
;
1643 struct ftrace_page
*pg
;
1644 struct dyn_ftrace
*rec
;
1645 bool update
= false;
1649 /* Only update if the ops has been registered */
1650 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1654 * In the filter_hash case:
1655 * If the count is zero, we update all records.
1656 * Otherwise we just update the items in the hash.
1658 * In the notrace_hash case:
1659 * We enable the update in the hash.
1660 * As disabling notrace means enabling the tracing,
1661 * and enabling notrace means disabling, the inc variable
1665 hash
= ops
->func_hash
->filter_hash
;
1666 other_hash
= ops
->func_hash
->notrace_hash
;
1667 if (ftrace_hash_empty(hash
))
1671 hash
= ops
->func_hash
->notrace_hash
;
1672 other_hash
= ops
->func_hash
->filter_hash
;
1674 * If the notrace hash has no items,
1675 * then there's nothing to do.
1677 if (ftrace_hash_empty(hash
))
1681 do_for_each_ftrace_rec(pg
, rec
) {
1682 int in_other_hash
= 0;
1686 if (rec
->flags
& FTRACE_FL_DISABLED
)
1691 * Only the filter_hash affects all records.
1692 * Update if the record is not in the notrace hash.
1694 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1697 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1698 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1701 * If filter_hash is set, we want to match all functions
1702 * that are in the hash but not in the other hash.
1704 * If filter_hash is not set, then we are decrementing.
1705 * That means we match anything that is in the hash
1706 * and also in the other_hash. That is, we need to turn
1707 * off functions in the other hash because they are disabled
1710 if (filter_hash
&& in_hash
&& !in_other_hash
)
1712 else if (!filter_hash
&& in_hash
&&
1713 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1721 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1724 if (ops
->flags
& FTRACE_OPS_FL_DIRECT
)
1725 rec
->flags
|= FTRACE_FL_DIRECT
;
1728 * If there's only a single callback registered to a
1729 * function, and the ops has a trampoline registered
1730 * for it, then we can call it directly.
1732 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1733 rec
->flags
|= FTRACE_FL_TRAMP
;
1736 * If we are adding another function callback
1737 * to this function, and the previous had a
1738 * custom trampoline in use, then we need to go
1739 * back to the default trampoline.
1741 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1744 * If any ops wants regs saved for this function
1745 * then all ops will get saved regs.
1747 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1748 rec
->flags
|= FTRACE_FL_REGS
;
1750 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1755 * Only the internal direct_ops should have the
1756 * DIRECT flag set. Thus, if it is removing a
1757 * function, then that function should no longer
1760 if (ops
->flags
& FTRACE_OPS_FL_DIRECT
)
1761 rec
->flags
&= ~FTRACE_FL_DIRECT
;
1764 * If the rec had REGS enabled and the ops that is
1765 * being removed had REGS set, then see if there is
1766 * still any ops for this record that wants regs.
1767 * If not, we can stop recording them.
1769 if (ftrace_rec_count(rec
) > 0 &&
1770 rec
->flags
& FTRACE_FL_REGS
&&
1771 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1772 if (!test_rec_ops_needs_regs(rec
))
1773 rec
->flags
&= ~FTRACE_FL_REGS
;
1777 * The TRAMP needs to be set only if rec count
1778 * is decremented to one, and the ops that is
1779 * left has a trampoline. As TRAMP can only be
1780 * enabled if there is only a single ops attached
1783 if (ftrace_rec_count(rec
) == 1 &&
1784 ftrace_find_tramp_ops_any(rec
))
1785 rec
->flags
|= FTRACE_FL_TRAMP
;
1787 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1790 * flags will be cleared in ftrace_check_record()
1791 * if rec count is zero.
1796 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1797 update
|= ftrace_test_record(rec
, true) != FTRACE_UPDATE_IGNORE
;
1799 /* Shortcut, if we handled all records, we are done. */
1800 if (!all
&& count
== hash
->count
)
1802 } while_for_each_ftrace_rec();
1807 static bool ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1810 return __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1813 static bool ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1816 return __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1819 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1820 int filter_hash
, int inc
)
1822 struct ftrace_ops
*op
;
1824 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1826 if (ops
->func_hash
!= &global_ops
.local_hash
)
1830 * If the ops shares the global_ops hash, then we need to update
1831 * all ops that are enabled and use this hash.
1833 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1837 if (op
->func_hash
== &global_ops
.local_hash
)
1838 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1839 } while_for_each_ftrace_op(op
);
1842 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1845 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1848 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1851 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1855 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1856 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1857 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1858 * Note that old_hash and new_hash has below meanings
1859 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1860 * - If the hash is EMPTY_HASH, it hits nothing
1861 * - Anything else hits the recs which match the hash entries.
1863 static int __ftrace_hash_update_ipmodify(struct ftrace_ops
*ops
,
1864 struct ftrace_hash
*old_hash
,
1865 struct ftrace_hash
*new_hash
)
1867 struct ftrace_page
*pg
;
1868 struct dyn_ftrace
*rec
, *end
= NULL
;
1871 /* Only update if the ops has been registered */
1872 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1875 if (!(ops
->flags
& FTRACE_OPS_FL_IPMODIFY
))
1879 * Since the IPMODIFY is a very address sensitive action, we do not
1880 * allow ftrace_ops to set all functions to new hash.
1882 if (!new_hash
|| !old_hash
)
1885 /* Update rec->flags */
1886 do_for_each_ftrace_rec(pg
, rec
) {
1888 if (rec
->flags
& FTRACE_FL_DISABLED
)
1891 /* We need to update only differences of filter_hash */
1892 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1893 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1894 if (in_old
== in_new
)
1898 /* New entries must ensure no others are using it */
1899 if (rec
->flags
& FTRACE_FL_IPMODIFY
)
1901 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1902 } else /* Removed entry */
1903 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1904 } while_for_each_ftrace_rec();
1911 /* Roll back what we did above */
1912 do_for_each_ftrace_rec(pg
, rec
) {
1914 if (rec
->flags
& FTRACE_FL_DISABLED
)
1920 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1921 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1922 if (in_old
== in_new
)
1926 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1928 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1929 } while_for_each_ftrace_rec();
1935 static int ftrace_hash_ipmodify_enable(struct ftrace_ops
*ops
)
1937 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1939 if (ftrace_hash_empty(hash
))
1942 return __ftrace_hash_update_ipmodify(ops
, EMPTY_HASH
, hash
);
1945 /* Disabling always succeeds */
1946 static void ftrace_hash_ipmodify_disable(struct ftrace_ops
*ops
)
1948 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1950 if (ftrace_hash_empty(hash
))
1953 __ftrace_hash_update_ipmodify(ops
, hash
, EMPTY_HASH
);
1956 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1957 struct ftrace_hash
*new_hash
)
1959 struct ftrace_hash
*old_hash
= ops
->func_hash
->filter_hash
;
1961 if (ftrace_hash_empty(old_hash
))
1964 if (ftrace_hash_empty(new_hash
))
1967 return __ftrace_hash_update_ipmodify(ops
, old_hash
, new_hash
);
1970 static void print_ip_ins(const char *fmt
, const unsigned char *p
)
1974 printk(KERN_CONT
"%s", fmt
);
1976 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1977 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1980 enum ftrace_bug_type ftrace_bug_type
;
1981 const void *ftrace_expected
;
1983 static void print_bug_type(void)
1985 switch (ftrace_bug_type
) {
1986 case FTRACE_BUG_UNKNOWN
:
1988 case FTRACE_BUG_INIT
:
1989 pr_info("Initializing ftrace call sites\n");
1991 case FTRACE_BUG_NOP
:
1992 pr_info("Setting ftrace call site to NOP\n");
1994 case FTRACE_BUG_CALL
:
1995 pr_info("Setting ftrace call site to call ftrace function\n");
1997 case FTRACE_BUG_UPDATE
:
1998 pr_info("Updating ftrace call site to call a different ftrace function\n");
2004 * ftrace_bug - report and shutdown function tracer
2005 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2006 * @rec: The record that failed
2008 * The arch code that enables or disables the function tracing
2009 * can call ftrace_bug() when it has detected a problem in
2010 * modifying the code. @failed should be one of either:
2011 * EFAULT - if the problem happens on reading the @ip address
2012 * EINVAL - if what is read at @ip is not what was expected
2013 * EPERM - if the problem happens on writing to the @ip address
2015 void ftrace_bug(int failed
, struct dyn_ftrace
*rec
)
2017 unsigned long ip
= rec
? rec
->ip
: 0;
2019 pr_info("------------[ ftrace bug ]------------\n");
2023 pr_info("ftrace faulted on modifying ");
2024 print_ip_sym(KERN_INFO
, ip
);
2027 pr_info("ftrace failed to modify ");
2028 print_ip_sym(KERN_INFO
, ip
);
2029 print_ip_ins(" actual: ", (unsigned char *)ip
);
2031 if (ftrace_expected
) {
2032 print_ip_ins(" expected: ", ftrace_expected
);
2037 pr_info("ftrace faulted on writing ");
2038 print_ip_sym(KERN_INFO
, ip
);
2041 pr_info("ftrace faulted on unknown error ");
2042 print_ip_sym(KERN_INFO
, ip
);
2046 struct ftrace_ops
*ops
= NULL
;
2048 pr_info("ftrace record flags: %lx\n", rec
->flags
);
2049 pr_cont(" (%ld)%s", ftrace_rec_count(rec
),
2050 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
2051 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2052 ops
= ftrace_find_tramp_ops_any(rec
);
2055 pr_cont("\ttramp: %pS (%pS)",
2056 (void *)ops
->trampoline
,
2058 ops
= ftrace_find_tramp_ops_next(rec
, ops
);
2061 pr_cont("\ttramp: ERROR!");
2064 ip
= ftrace_get_addr_curr(rec
);
2065 pr_cont("\n expected tramp: %lx\n", ip
);
2068 FTRACE_WARN_ON_ONCE(1);
2071 static int ftrace_check_record(struct dyn_ftrace
*rec
, bool enable
, bool update
)
2073 unsigned long flag
= 0UL;
2075 ftrace_bug_type
= FTRACE_BUG_UNKNOWN
;
2077 if (rec
->flags
& FTRACE_FL_DISABLED
)
2078 return FTRACE_UPDATE_IGNORE
;
2081 * If we are updating calls:
2083 * If the record has a ref count, then we need to enable it
2084 * because someone is using it.
2086 * Otherwise we make sure its disabled.
2088 * If we are disabling calls, then disable all records that
2091 if (enable
&& ftrace_rec_count(rec
))
2092 flag
= FTRACE_FL_ENABLED
;
2095 * If enabling and the REGS flag does not match the REGS_EN, or
2096 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2097 * this record. Set flags to fail the compare against ENABLED.
2098 * Same for direct calls.
2101 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
2102 !(rec
->flags
& FTRACE_FL_REGS_EN
))
2103 flag
|= FTRACE_FL_REGS
;
2105 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
2106 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
2107 flag
|= FTRACE_FL_TRAMP
;
2110 * Direct calls are special, as count matters.
2111 * We must test the record for direct, if the
2112 * DIRECT and DIRECT_EN do not match, but only
2113 * if the count is 1. That's because, if the
2114 * count is something other than one, we do not
2115 * want the direct enabled (it will be done via the
2116 * direct helper). But if DIRECT_EN is set, and
2117 * the count is not one, we need to clear it.
2119 if (ftrace_rec_count(rec
) == 1) {
2120 if (!(rec
->flags
& FTRACE_FL_DIRECT
) !=
2121 !(rec
->flags
& FTRACE_FL_DIRECT_EN
))
2122 flag
|= FTRACE_FL_DIRECT
;
2123 } else if (rec
->flags
& FTRACE_FL_DIRECT_EN
) {
2124 flag
|= FTRACE_FL_DIRECT
;
2128 /* If the state of this record hasn't changed, then do nothing */
2129 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
2130 return FTRACE_UPDATE_IGNORE
;
2133 /* Save off if rec is being enabled (for return value) */
2134 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
2137 rec
->flags
|= FTRACE_FL_ENABLED
;
2138 if (flag
& FTRACE_FL_REGS
) {
2139 if (rec
->flags
& FTRACE_FL_REGS
)
2140 rec
->flags
|= FTRACE_FL_REGS_EN
;
2142 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
2144 if (flag
& FTRACE_FL_TRAMP
) {
2145 if (rec
->flags
& FTRACE_FL_TRAMP
)
2146 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
2148 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
2150 if (flag
& FTRACE_FL_DIRECT
) {
2152 * If there's only one user (direct_ops helper)
2153 * then we can call the direct function
2154 * directly (no ftrace trampoline).
2156 if (ftrace_rec_count(rec
) == 1) {
2157 if (rec
->flags
& FTRACE_FL_DIRECT
)
2158 rec
->flags
|= FTRACE_FL_DIRECT_EN
;
2160 rec
->flags
&= ~FTRACE_FL_DIRECT_EN
;
2163 * Can only call directly if there's
2164 * only one callback to the function.
2166 rec
->flags
&= ~FTRACE_FL_DIRECT_EN
;
2172 * If this record is being updated from a nop, then
2173 * return UPDATE_MAKE_CALL.
2175 * return UPDATE_MODIFY_CALL to tell the caller to convert
2176 * from the save regs, to a non-save regs function or
2177 * vice versa, or from a trampoline call.
2179 if (flag
& FTRACE_FL_ENABLED
) {
2180 ftrace_bug_type
= FTRACE_BUG_CALL
;
2181 return FTRACE_UPDATE_MAKE_CALL
;
2184 ftrace_bug_type
= FTRACE_BUG_UPDATE
;
2185 return FTRACE_UPDATE_MODIFY_CALL
;
2189 /* If there's no more users, clear all flags */
2190 if (!ftrace_rec_count(rec
))
2194 * Just disable the record, but keep the ops TRAMP
2195 * and REGS states. The _EN flags must be disabled though.
2197 rec
->flags
&= ~(FTRACE_FL_ENABLED
| FTRACE_FL_TRAMP_EN
|
2198 FTRACE_FL_REGS_EN
| FTRACE_FL_DIRECT_EN
);
2201 ftrace_bug_type
= FTRACE_BUG_NOP
;
2202 return FTRACE_UPDATE_MAKE_NOP
;
2206 * ftrace_update_record, set a record that now is tracing or not
2207 * @rec: the record to update
2208 * @enable: set to true if the record is tracing, false to force disable
2210 * The records that represent all functions that can be traced need
2211 * to be updated when tracing has been enabled.
2213 int ftrace_update_record(struct dyn_ftrace
*rec
, bool enable
)
2215 return ftrace_check_record(rec
, enable
, true);
2219 * ftrace_test_record, check if the record has been enabled or not
2220 * @rec: the record to test
2221 * @enable: set to true to check if enabled, false if it is disabled
2223 * The arch code may need to test if a record is already set to
2224 * tracing to determine how to modify the function code that it
2227 int ftrace_test_record(struct dyn_ftrace
*rec
, bool enable
)
2229 return ftrace_check_record(rec
, enable
, false);
2232 static struct ftrace_ops
*
2233 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
2235 struct ftrace_ops
*op
;
2236 unsigned long ip
= rec
->ip
;
2238 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2240 if (!op
->trampoline
)
2243 if (hash_contains_ip(ip
, op
->func_hash
))
2245 } while_for_each_ftrace_op(op
);
2250 static struct ftrace_ops
*
2251 ftrace_find_tramp_ops_next(struct dyn_ftrace
*rec
,
2252 struct ftrace_ops
*op
)
2254 unsigned long ip
= rec
->ip
;
2256 while_for_each_ftrace_op(op
) {
2258 if (!op
->trampoline
)
2261 if (hash_contains_ip(ip
, op
->func_hash
))
2268 static struct ftrace_ops
*
2269 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
2271 struct ftrace_ops
*op
;
2272 unsigned long ip
= rec
->ip
;
2275 * Need to check removed ops first.
2276 * If they are being removed, and this rec has a tramp,
2277 * and this rec is in the ops list, then it would be the
2278 * one with the tramp.
2281 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
2286 * Need to find the current trampoline for a rec.
2287 * Now, a trampoline is only attached to a rec if there
2288 * was a single 'ops' attached to it. But this can be called
2289 * when we are adding another op to the rec or removing the
2290 * current one. Thus, if the op is being added, we can
2291 * ignore it because it hasn't attached itself to the rec
2294 * If an ops is being modified (hooking to different functions)
2295 * then we don't care about the new functions that are being
2296 * added, just the old ones (that are probably being removed).
2298 * If we are adding an ops to a function that already is using
2299 * a trampoline, it needs to be removed (trampolines are only
2300 * for single ops connected), then an ops that is not being
2301 * modified also needs to be checked.
2303 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2305 if (!op
->trampoline
)
2309 * If the ops is being added, it hasn't gotten to
2310 * the point to be removed from this tree yet.
2312 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
2317 * If the ops is being modified and is in the old
2318 * hash, then it is probably being removed from this
2321 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2322 hash_contains_ip(ip
, &op
->old_hash
))
2325 * If the ops is not being added or modified, and it's
2326 * in its normal filter hash, then this must be the one
2329 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2330 hash_contains_ip(ip
, op
->func_hash
))
2333 } while_for_each_ftrace_op(op
);
2338 static struct ftrace_ops
*
2339 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
2341 struct ftrace_ops
*op
;
2342 unsigned long ip
= rec
->ip
;
2344 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2345 /* pass rec in as regs to have non-NULL val */
2346 if (hash_contains_ip(ip
, op
->func_hash
))
2348 } while_for_each_ftrace_op(op
);
2353 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2354 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2355 static struct ftrace_hash
*direct_functions
= EMPTY_HASH
;
2356 static DEFINE_MUTEX(direct_mutex
);
2357 int ftrace_direct_func_count
;
2360 * Search the direct_functions hash to see if the given instruction pointer
2361 * has a direct caller attached to it.
2363 unsigned long ftrace_find_rec_direct(unsigned long ip
)
2365 struct ftrace_func_entry
*entry
;
2367 entry
= __ftrace_lookup_ip(direct_functions
, ip
);
2371 return entry
->direct
;
2374 static void call_direct_funcs(unsigned long ip
, unsigned long pip
,
2375 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
2379 addr
= ftrace_find_rec_direct(ip
);
2383 arch_ftrace_set_direct_caller(regs
, addr
);
2386 struct ftrace_ops direct_ops
= {
2387 .func
= call_direct_funcs
,
2388 .flags
= FTRACE_OPS_FL_IPMODIFY
| FTRACE_OPS_FL_RECURSION_SAFE
2389 | FTRACE_OPS_FL_DIRECT
| FTRACE_OPS_FL_SAVE_REGS
2390 | FTRACE_OPS_FL_PERMANENT
,
2392 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2395 * ftrace_get_addr_new - Get the call address to set to
2396 * @rec: The ftrace record descriptor
2398 * If the record has the FTRACE_FL_REGS set, that means that it
2399 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2400 * is not not set, then it wants to convert to the normal callback.
2402 * Returns the address of the trampoline to set to
2404 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2406 struct ftrace_ops
*ops
;
2409 if ((rec
->flags
& FTRACE_FL_DIRECT
) &&
2410 (ftrace_rec_count(rec
) == 1)) {
2411 addr
= ftrace_find_rec_direct(rec
->ip
);
2417 /* Trampolines take precedence over regs */
2418 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2419 ops
= ftrace_find_tramp_ops_new(rec
);
2420 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2421 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2422 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2423 /* Ftrace is shutting down, return anything */
2424 return (unsigned long)FTRACE_ADDR
;
2426 return ops
->trampoline
;
2429 if (rec
->flags
& FTRACE_FL_REGS
)
2430 return (unsigned long)FTRACE_REGS_ADDR
;
2432 return (unsigned long)FTRACE_ADDR
;
2436 * ftrace_get_addr_curr - Get the call address that is already there
2437 * @rec: The ftrace record descriptor
2439 * The FTRACE_FL_REGS_EN is set when the record already points to
2440 * a function that saves all the regs. Basically the '_EN' version
2441 * represents the current state of the function.
2443 * Returns the address of the trampoline that is currently being called
2445 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2447 struct ftrace_ops
*ops
;
2450 /* Direct calls take precedence over trampolines */
2451 if (rec
->flags
& FTRACE_FL_DIRECT_EN
) {
2452 addr
= ftrace_find_rec_direct(rec
->ip
);
2458 /* Trampolines take precedence over regs */
2459 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2460 ops
= ftrace_find_tramp_ops_curr(rec
);
2461 if (FTRACE_WARN_ON(!ops
)) {
2462 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2463 (void *)rec
->ip
, (void *)rec
->ip
);
2464 /* Ftrace is shutting down, return anything */
2465 return (unsigned long)FTRACE_ADDR
;
2467 return ops
->trampoline
;
2470 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2471 return (unsigned long)FTRACE_REGS_ADDR
;
2473 return (unsigned long)FTRACE_ADDR
;
2477 __ftrace_replace_code(struct dyn_ftrace
*rec
, bool enable
)
2479 unsigned long ftrace_old_addr
;
2480 unsigned long ftrace_addr
;
2483 ftrace_addr
= ftrace_get_addr_new(rec
);
2485 /* This needs to be done before we call ftrace_update_record */
2486 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2488 ret
= ftrace_update_record(rec
, enable
);
2490 ftrace_bug_type
= FTRACE_BUG_UNKNOWN
;
2493 case FTRACE_UPDATE_IGNORE
:
2496 case FTRACE_UPDATE_MAKE_CALL
:
2497 ftrace_bug_type
= FTRACE_BUG_CALL
;
2498 return ftrace_make_call(rec
, ftrace_addr
);
2500 case FTRACE_UPDATE_MAKE_NOP
:
2501 ftrace_bug_type
= FTRACE_BUG_NOP
;
2502 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2504 case FTRACE_UPDATE_MODIFY_CALL
:
2505 ftrace_bug_type
= FTRACE_BUG_UPDATE
;
2506 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2509 return -1; /* unknown ftrace bug */
2512 void __weak
ftrace_replace_code(int mod_flags
)
2514 struct dyn_ftrace
*rec
;
2515 struct ftrace_page
*pg
;
2516 bool enable
= mod_flags
& FTRACE_MODIFY_ENABLE_FL
;
2517 int schedulable
= mod_flags
& FTRACE_MODIFY_MAY_SLEEP_FL
;
2520 if (unlikely(ftrace_disabled
))
2523 do_for_each_ftrace_rec(pg
, rec
) {
2525 if (rec
->flags
& FTRACE_FL_DISABLED
)
2528 failed
= __ftrace_replace_code(rec
, enable
);
2530 ftrace_bug(failed
, rec
);
2531 /* Stop processing */
2536 } while_for_each_ftrace_rec();
2539 struct ftrace_rec_iter
{
2540 struct ftrace_page
*pg
;
2545 * ftrace_rec_iter_start, start up iterating over traced functions
2547 * Returns an iterator handle that is used to iterate over all
2548 * the records that represent address locations where functions
2551 * May return NULL if no records are available.
2553 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2556 * We only use a single iterator.
2557 * Protected by the ftrace_lock mutex.
2559 static struct ftrace_rec_iter ftrace_rec_iter
;
2560 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2562 iter
->pg
= ftrace_pages_start
;
2565 /* Could have empty pages */
2566 while (iter
->pg
&& !iter
->pg
->index
)
2567 iter
->pg
= iter
->pg
->next
;
2576 * ftrace_rec_iter_next, get the next record to process.
2577 * @iter: The handle to the iterator.
2579 * Returns the next iterator after the given iterator @iter.
2581 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2585 if (iter
->index
>= iter
->pg
->index
) {
2586 iter
->pg
= iter
->pg
->next
;
2589 /* Could have empty pages */
2590 while (iter
->pg
&& !iter
->pg
->index
)
2591 iter
->pg
= iter
->pg
->next
;
2601 * ftrace_rec_iter_record, get the record at the iterator location
2602 * @iter: The current iterator location
2604 * Returns the record that the current @iter is at.
2606 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2608 return &iter
->pg
->records
[iter
->index
];
2612 ftrace_nop_initialize(struct module
*mod
, struct dyn_ftrace
*rec
)
2616 if (unlikely(ftrace_disabled
))
2619 ret
= ftrace_init_nop(mod
, rec
);
2621 ftrace_bug_type
= FTRACE_BUG_INIT
;
2622 ftrace_bug(ret
, rec
);
2629 * archs can override this function if they must do something
2630 * before the modifying code is performed.
2632 int __weak
ftrace_arch_code_modify_prepare(void)
2638 * archs can override this function if they must do something
2639 * after the modifying code is performed.
2641 int __weak
ftrace_arch_code_modify_post_process(void)
2646 void ftrace_modify_all_code(int command
)
2648 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2652 if (command
& FTRACE_MAY_SLEEP
)
2653 mod_flags
= FTRACE_MODIFY_MAY_SLEEP_FL
;
2656 * If the ftrace_caller calls a ftrace_ops func directly,
2657 * we need to make sure that it only traces functions it
2658 * expects to trace. When doing the switch of functions,
2659 * we need to update to the ftrace_ops_list_func first
2660 * before the transition between old and new calls are set,
2661 * as the ftrace_ops_list_func will check the ops hashes
2662 * to make sure the ops are having the right functions
2666 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2667 if (FTRACE_WARN_ON(err
))
2671 if (command
& FTRACE_UPDATE_CALLS
)
2672 ftrace_replace_code(mod_flags
| FTRACE_MODIFY_ENABLE_FL
);
2673 else if (command
& FTRACE_DISABLE_CALLS
)
2674 ftrace_replace_code(mod_flags
);
2676 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2677 function_trace_op
= set_function_trace_op
;
2679 /* If irqs are disabled, we are in stop machine */
2680 if (!irqs_disabled())
2681 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2682 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2683 if (FTRACE_WARN_ON(err
))
2687 if (command
& FTRACE_START_FUNC_RET
)
2688 err
= ftrace_enable_ftrace_graph_caller();
2689 else if (command
& FTRACE_STOP_FUNC_RET
)
2690 err
= ftrace_disable_ftrace_graph_caller();
2691 FTRACE_WARN_ON(err
);
2694 static int __ftrace_modify_code(void *data
)
2696 int *command
= data
;
2698 ftrace_modify_all_code(*command
);
2704 * ftrace_run_stop_machine, go back to the stop machine method
2705 * @command: The command to tell ftrace what to do
2707 * If an arch needs to fall back to the stop machine method, the
2708 * it can call this function.
2710 void ftrace_run_stop_machine(int command
)
2712 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2716 * arch_ftrace_update_code, modify the code to trace or not trace
2717 * @command: The command that needs to be done
2719 * Archs can override this function if it does not need to
2720 * run stop_machine() to modify code.
2722 void __weak
arch_ftrace_update_code(int command
)
2724 ftrace_run_stop_machine(command
);
2727 static void ftrace_run_update_code(int command
)
2731 ret
= ftrace_arch_code_modify_prepare();
2732 FTRACE_WARN_ON(ret
);
2737 * By default we use stop_machine() to modify the code.
2738 * But archs can do what ever they want as long as it
2739 * is safe. The stop_machine() is the safest, but also
2740 * produces the most overhead.
2742 arch_ftrace_update_code(command
);
2744 ret
= ftrace_arch_code_modify_post_process();
2745 FTRACE_WARN_ON(ret
);
2748 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2749 struct ftrace_ops_hash
*old_hash
)
2751 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2752 ops
->old_hash
.filter_hash
= old_hash
->filter_hash
;
2753 ops
->old_hash
.notrace_hash
= old_hash
->notrace_hash
;
2754 ftrace_run_update_code(command
);
2755 ops
->old_hash
.filter_hash
= NULL
;
2756 ops
->old_hash
.notrace_hash
= NULL
;
2757 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2760 static ftrace_func_t saved_ftrace_func
;
2761 static int ftrace_start_up
;
2763 void __weak
arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
2767 /* List of trace_ops that have allocated trampolines */
2768 static LIST_HEAD(ftrace_ops_trampoline_list
);
2770 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops
*ops
)
2772 lockdep_assert_held(&ftrace_lock
);
2773 list_add_rcu(&ops
->list
, &ftrace_ops_trampoline_list
);
2776 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops
*ops
)
2778 lockdep_assert_held(&ftrace_lock
);
2779 list_del_rcu(&ops
->list
);
2783 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2784 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2787 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2788 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2790 static void ftrace_trampoline_free(struct ftrace_ops
*ops
)
2792 if (ops
&& (ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
) &&
2795 * Record the text poke event before the ksymbol unregister
2798 perf_event_text_poke((void *)ops
->trampoline
,
2799 (void *)ops
->trampoline
,
2800 ops
->trampoline_size
, NULL
, 0);
2801 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL
,
2802 ops
->trampoline
, ops
->trampoline_size
,
2803 true, FTRACE_TRAMPOLINE_SYM
);
2804 /* Remove from kallsyms after the perf events */
2805 ftrace_remove_trampoline_from_kallsyms(ops
);
2808 arch_ftrace_trampoline_free(ops
);
2811 static void ftrace_startup_enable(int command
)
2813 if (saved_ftrace_func
!= ftrace_trace_function
) {
2814 saved_ftrace_func
= ftrace_trace_function
;
2815 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2818 if (!command
|| !ftrace_enabled
)
2821 ftrace_run_update_code(command
);
2824 static void ftrace_startup_all(int command
)
2826 update_all_ops
= true;
2827 ftrace_startup_enable(command
);
2828 update_all_ops
= false;
2831 int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2835 if (unlikely(ftrace_disabled
))
2838 ret
= __register_ftrace_function(ops
);
2845 * Note that ftrace probes uses this to start up
2846 * and modify functions it will probe. But we still
2847 * set the ADDING flag for modification, as probes
2848 * do not have trampolines. If they add them in the
2849 * future, then the probes will need to distinguish
2850 * between adding and updating probes.
2852 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2854 ret
= ftrace_hash_ipmodify_enable(ops
);
2856 /* Rollback registration process */
2857 __unregister_ftrace_function(ops
);
2859 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2863 if (ftrace_hash_rec_enable(ops
, 1))
2864 command
|= FTRACE_UPDATE_CALLS
;
2866 ftrace_startup_enable(command
);
2868 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2873 int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2877 if (unlikely(ftrace_disabled
))
2880 ret
= __unregister_ftrace_function(ops
);
2886 * Just warn in case of unbalance, no need to kill ftrace, it's not
2887 * critical but the ftrace_call callers may be never nopped again after
2888 * further ftrace uses.
2890 WARN_ON_ONCE(ftrace_start_up
< 0);
2892 /* Disabling ipmodify never fails */
2893 ftrace_hash_ipmodify_disable(ops
);
2895 if (ftrace_hash_rec_disable(ops
, 1))
2896 command
|= FTRACE_UPDATE_CALLS
;
2898 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2900 if (saved_ftrace_func
!= ftrace_trace_function
) {
2901 saved_ftrace_func
= ftrace_trace_function
;
2902 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2905 if (!command
|| !ftrace_enabled
) {
2907 * If these are dynamic or per_cpu ops, they still
2908 * need their data freed. Since, function tracing is
2909 * not currently active, we can just free them
2910 * without synchronizing all CPUs.
2912 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
2919 * If the ops uses a trampoline, then it needs to be
2920 * tested first on update.
2922 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2925 /* The trampoline logic checks the old hashes */
2926 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2927 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2929 ftrace_run_update_code(command
);
2932 * If there's no more ops registered with ftrace, run a
2933 * sanity check to make sure all rec flags are cleared.
2935 if (rcu_dereference_protected(ftrace_ops_list
,
2936 lockdep_is_held(&ftrace_lock
)) == &ftrace_list_end
) {
2937 struct ftrace_page
*pg
;
2938 struct dyn_ftrace
*rec
;
2940 do_for_each_ftrace_rec(pg
, rec
) {
2941 if (FTRACE_WARN_ON_ONCE(rec
->flags
& ~FTRACE_FL_DISABLED
))
2942 pr_warn(" %pS flags:%lx\n",
2943 (void *)rec
->ip
, rec
->flags
);
2944 } while_for_each_ftrace_rec();
2947 ops
->old_hash
.filter_hash
= NULL
;
2948 ops
->old_hash
.notrace_hash
= NULL
;
2951 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2954 * Dynamic ops may be freed, we must make sure that all
2955 * callers are done before leaving this function.
2956 * The same goes for freeing the per_cpu data of the per_cpu
2959 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
) {
2961 * We need to do a hard force of sched synchronization.
2962 * This is because we use preempt_disable() to do RCU, but
2963 * the function tracers can be called where RCU is not watching
2964 * (like before user_exit()). We can not rely on the RCU
2965 * infrastructure to do the synchronization, thus we must do it
2968 synchronize_rcu_tasks_rude();
2971 * When the kernel is preeptive, tasks can be preempted
2972 * while on a ftrace trampoline. Just scheduling a task on
2973 * a CPU is not good enough to flush them. Calling
2974 * synchornize_rcu_tasks() will wait for those tasks to
2975 * execute and either schedule voluntarily or enter user space.
2977 if (IS_ENABLED(CONFIG_PREEMPTION
))
2978 synchronize_rcu_tasks();
2981 ftrace_trampoline_free(ops
);
2987 static void ftrace_startup_sysctl(void)
2991 if (unlikely(ftrace_disabled
))
2994 /* Force update next time */
2995 saved_ftrace_func
= NULL
;
2996 /* ftrace_start_up is true if we want ftrace running */
2997 if (ftrace_start_up
) {
2998 command
= FTRACE_UPDATE_CALLS
;
2999 if (ftrace_graph_active
)
3000 command
|= FTRACE_START_FUNC_RET
;
3001 ftrace_startup_enable(command
);
3005 static void ftrace_shutdown_sysctl(void)
3009 if (unlikely(ftrace_disabled
))
3012 /* ftrace_start_up is true if ftrace is running */
3013 if (ftrace_start_up
) {
3014 command
= FTRACE_DISABLE_CALLS
;
3015 if (ftrace_graph_active
)
3016 command
|= FTRACE_STOP_FUNC_RET
;
3017 ftrace_run_update_code(command
);
3021 static u64 ftrace_update_time
;
3022 unsigned long ftrace_update_tot_cnt
;
3023 unsigned long ftrace_number_of_pages
;
3024 unsigned long ftrace_number_of_groups
;
3026 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
3029 * Filter_hash being empty will default to trace module.
3030 * But notrace hash requires a test of individual module functions.
3032 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
3033 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
3037 * Check if the current ops references the record.
3039 * If the ops traces all functions, then it was already accounted for.
3040 * If the ops does not trace the current record function, skip it.
3041 * If the ops ignores the function via notrace filter, skip it.
3044 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
3046 /* If ops isn't enabled, ignore it */
3047 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
3050 /* If ops traces all then it includes this function */
3051 if (ops_traces_mod(ops
))
3054 /* The function must be in the filter */
3055 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
3056 !__ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
3059 /* If in notrace hash, we ignore it too */
3060 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
3066 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
3068 struct ftrace_page
*pg
;
3069 struct dyn_ftrace
*p
;
3071 unsigned long update_cnt
= 0;
3072 unsigned long rec_flags
= 0;
3075 start
= ftrace_now(raw_smp_processor_id());
3078 * When a module is loaded, this function is called to convert
3079 * the calls to mcount in its text to nops, and also to create
3080 * an entry in the ftrace data. Now, if ftrace is activated
3081 * after this call, but before the module sets its text to
3082 * read-only, the modification of enabling ftrace can fail if
3083 * the read-only is done while ftrace is converting the calls.
3084 * To prevent this, the module's records are set as disabled
3085 * and will be enabled after the call to set the module's text
3089 rec_flags
|= FTRACE_FL_DISABLED
;
3091 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
3093 for (i
= 0; i
< pg
->index
; i
++) {
3095 /* If something went wrong, bail without enabling anything */
3096 if (unlikely(ftrace_disabled
))
3099 p
= &pg
->records
[i
];
3100 p
->flags
= rec_flags
;
3103 * Do the initial record conversion from mcount jump
3104 * to the NOP instructions.
3106 if (!__is_defined(CC_USING_NOP_MCOUNT
) &&
3107 !ftrace_nop_initialize(mod
, p
))
3114 stop
= ftrace_now(raw_smp_processor_id());
3115 ftrace_update_time
= stop
- start
;
3116 ftrace_update_tot_cnt
+= update_cnt
;
3121 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
3126 if (WARN_ON(!count
))
3129 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
3132 * We want to fill as much as possible. No more than a page
3135 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
3139 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
3142 /* if we can't allocate this size, try something smaller */
3149 ftrace_number_of_pages
+= 1 << order
;
3150 ftrace_number_of_groups
++;
3152 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
3161 static struct ftrace_page
*
3162 ftrace_allocate_pages(unsigned long num_to_init
)
3164 struct ftrace_page
*start_pg
;
3165 struct ftrace_page
*pg
;
3172 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
3177 * Try to allocate as much as possible in one continues
3178 * location that fills in all of the space. We want to
3179 * waste as little space as possible.
3182 cnt
= ftrace_allocate_records(pg
, num_to_init
);
3190 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
3202 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
3203 free_pages((unsigned long)pg
->records
, order
);
3204 start_pg
= pg
->next
;
3207 ftrace_number_of_pages
-= 1 << order
;
3208 ftrace_number_of_groups
--;
3210 pr_info("ftrace: FAILED to allocate memory for functions\n");
3214 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3216 struct ftrace_iterator
{
3220 struct ftrace_page
*pg
;
3221 struct dyn_ftrace
*func
;
3222 struct ftrace_func_probe
*probe
;
3223 struct ftrace_func_entry
*probe_entry
;
3224 struct trace_parser parser
;
3225 struct ftrace_hash
*hash
;
3226 struct ftrace_ops
*ops
;
3227 struct trace_array
*tr
;
3228 struct list_head
*mod_list
;
3235 t_probe_next(struct seq_file
*m
, loff_t
*pos
)
3237 struct ftrace_iterator
*iter
= m
->private;
3238 struct trace_array
*tr
= iter
->ops
->private;
3239 struct list_head
*func_probes
;
3240 struct ftrace_hash
*hash
;
3241 struct list_head
*next
;
3242 struct hlist_node
*hnd
= NULL
;
3243 struct hlist_head
*hhd
;
3252 func_probes
= &tr
->func_probes
;
3253 if (list_empty(func_probes
))
3257 next
= func_probes
->next
;
3258 iter
->probe
= list_entry(next
, struct ftrace_func_probe
, list
);
3261 if (iter
->probe_entry
)
3262 hnd
= &iter
->probe_entry
->hlist
;
3264 hash
= iter
->probe
->ops
.func_hash
->filter_hash
;
3267 * A probe being registered may temporarily have an empty hash
3268 * and it's at the end of the func_probes list.
3270 if (!hash
|| hash
== EMPTY_HASH
)
3273 size
= 1 << hash
->size_bits
;
3276 if (iter
->pidx
>= size
) {
3277 if (iter
->probe
->list
.next
== func_probes
)
3279 next
= iter
->probe
->list
.next
;
3280 iter
->probe
= list_entry(next
, struct ftrace_func_probe
, list
);
3281 hash
= iter
->probe
->ops
.func_hash
->filter_hash
;
3282 size
= 1 << hash
->size_bits
;
3286 hhd
= &hash
->buckets
[iter
->pidx
];
3288 if (hlist_empty(hhd
)) {
3304 if (WARN_ON_ONCE(!hnd
))
3307 iter
->probe_entry
= hlist_entry(hnd
, struct ftrace_func_entry
, hlist
);
3312 static void *t_probe_start(struct seq_file
*m
, loff_t
*pos
)
3314 struct ftrace_iterator
*iter
= m
->private;
3318 if (!(iter
->flags
& FTRACE_ITER_DO_PROBES
))
3321 if (iter
->mod_pos
> *pos
)
3325 iter
->probe_entry
= NULL
;
3327 for (l
= 0; l
<= (*pos
- iter
->mod_pos
); ) {
3328 p
= t_probe_next(m
, &l
);
3335 /* Only set this if we have an item */
3336 iter
->flags
|= FTRACE_ITER_PROBE
;
3342 t_probe_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
3344 struct ftrace_func_entry
*probe_entry
;
3345 struct ftrace_probe_ops
*probe_ops
;
3346 struct ftrace_func_probe
*probe
;
3348 probe
= iter
->probe
;
3349 probe_entry
= iter
->probe_entry
;
3351 if (WARN_ON_ONCE(!probe
|| !probe_entry
))
3354 probe_ops
= probe
->probe_ops
;
3356 if (probe_ops
->print
)
3357 return probe_ops
->print(m
, probe_entry
->ip
, probe_ops
, probe
->data
);
3359 seq_printf(m
, "%ps:%ps\n", (void *)probe_entry
->ip
,
3360 (void *)probe_ops
->func
);
3366 t_mod_next(struct seq_file
*m
, loff_t
*pos
)
3368 struct ftrace_iterator
*iter
= m
->private;
3369 struct trace_array
*tr
= iter
->tr
;
3374 iter
->mod_list
= iter
->mod_list
->next
;
3376 if (iter
->mod_list
== &tr
->mod_trace
||
3377 iter
->mod_list
== &tr
->mod_notrace
) {
3378 iter
->flags
&= ~FTRACE_ITER_MOD
;
3382 iter
->mod_pos
= *pos
;
3387 static void *t_mod_start(struct seq_file
*m
, loff_t
*pos
)
3389 struct ftrace_iterator
*iter
= m
->private;
3393 if (iter
->func_pos
> *pos
)
3396 iter
->mod_pos
= iter
->func_pos
;
3398 /* probes are only available if tr is set */
3402 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
3403 p
= t_mod_next(m
, &l
);
3408 iter
->flags
&= ~FTRACE_ITER_MOD
;
3409 return t_probe_start(m
, pos
);
3412 /* Only set this if we have an item */
3413 iter
->flags
|= FTRACE_ITER_MOD
;
3419 t_mod_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
3421 struct ftrace_mod_load
*ftrace_mod
;
3422 struct trace_array
*tr
= iter
->tr
;
3424 if (WARN_ON_ONCE(!iter
->mod_list
) ||
3425 iter
->mod_list
== &tr
->mod_trace
||
3426 iter
->mod_list
== &tr
->mod_notrace
)
3429 ftrace_mod
= list_entry(iter
->mod_list
, struct ftrace_mod_load
, list
);
3431 if (ftrace_mod
->func
)
3432 seq_printf(m
, "%s", ftrace_mod
->func
);
3436 seq_printf(m
, ":mod:%s\n", ftrace_mod
->module
);
3442 t_func_next(struct seq_file
*m
, loff_t
*pos
)
3444 struct ftrace_iterator
*iter
= m
->private;
3445 struct dyn_ftrace
*rec
= NULL
;
3450 if (iter
->idx
>= iter
->pg
->index
) {
3451 if (iter
->pg
->next
) {
3452 iter
->pg
= iter
->pg
->next
;
3457 rec
= &iter
->pg
->records
[iter
->idx
++];
3458 if (((iter
->flags
& (FTRACE_ITER_FILTER
| FTRACE_ITER_NOTRACE
)) &&
3459 !ftrace_lookup_ip(iter
->hash
, rec
->ip
)) ||
3461 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
3462 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
3472 iter
->pos
= iter
->func_pos
= *pos
;
3479 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3481 struct ftrace_iterator
*iter
= m
->private;
3482 loff_t l
= *pos
; /* t_probe_start() must use original pos */
3485 if (unlikely(ftrace_disabled
))
3488 if (iter
->flags
& FTRACE_ITER_PROBE
)
3489 return t_probe_next(m
, pos
);
3491 if (iter
->flags
& FTRACE_ITER_MOD
)
3492 return t_mod_next(m
, pos
);
3494 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
3495 /* next must increment pos, and t_probe_start does not */
3497 return t_mod_start(m
, &l
);
3500 ret
= t_func_next(m
, pos
);
3503 return t_mod_start(m
, &l
);
3508 static void reset_iter_read(struct ftrace_iterator
*iter
)
3512 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_PROBE
| FTRACE_ITER_MOD
);
3515 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3517 struct ftrace_iterator
*iter
= m
->private;
3521 mutex_lock(&ftrace_lock
);
3523 if (unlikely(ftrace_disabled
))
3527 * If an lseek was done, then reset and start from beginning.
3529 if (*pos
< iter
->pos
)
3530 reset_iter_read(iter
);
3533 * For set_ftrace_filter reading, if we have the filter
3534 * off, we can short cut and just print out that all
3535 * functions are enabled.
3537 if ((iter
->flags
& (FTRACE_ITER_FILTER
| FTRACE_ITER_NOTRACE
)) &&
3538 ftrace_hash_empty(iter
->hash
)) {
3539 iter
->func_pos
= 1; /* Account for the message */
3541 return t_mod_start(m
, pos
);
3542 iter
->flags
|= FTRACE_ITER_PRINTALL
;
3543 /* reset in case of seek/pread */
3544 iter
->flags
&= ~FTRACE_ITER_PROBE
;
3548 if (iter
->flags
& FTRACE_ITER_MOD
)
3549 return t_mod_start(m
, pos
);
3552 * Unfortunately, we need to restart at ftrace_pages_start
3553 * every time we let go of the ftrace_mutex. This is because
3554 * those pointers can change without the lock.
3556 iter
->pg
= ftrace_pages_start
;
3558 for (l
= 0; l
<= *pos
; ) {
3559 p
= t_func_next(m
, &l
);
3565 return t_mod_start(m
, pos
);
3570 static void t_stop(struct seq_file
*m
, void *p
)
3572 mutex_unlock(&ftrace_lock
);
3576 arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
3581 static void add_trampoline_func(struct seq_file
*m
, struct ftrace_ops
*ops
,
3582 struct dyn_ftrace
*rec
)
3586 ptr
= arch_ftrace_trampoline_func(ops
, rec
);
3588 seq_printf(m
, " ->%pS", ptr
);
3591 static int t_show(struct seq_file
*m
, void *v
)
3593 struct ftrace_iterator
*iter
= m
->private;
3594 struct dyn_ftrace
*rec
;
3596 if (iter
->flags
& FTRACE_ITER_PROBE
)
3597 return t_probe_show(m
, iter
);
3599 if (iter
->flags
& FTRACE_ITER_MOD
)
3600 return t_mod_show(m
, iter
);
3602 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
3603 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
3604 seq_puts(m
, "#### no functions disabled ####\n");
3606 seq_puts(m
, "#### all functions enabled ####\n");
3615 seq_printf(m
, "%ps", (void *)rec
->ip
);
3616 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
3617 struct ftrace_ops
*ops
;
3619 seq_printf(m
, " (%ld)%s%s%s",
3620 ftrace_rec_count(rec
),
3621 rec
->flags
& FTRACE_FL_REGS
? " R" : " ",
3622 rec
->flags
& FTRACE_FL_IPMODIFY
? " I" : " ",
3623 rec
->flags
& FTRACE_FL_DIRECT
? " D" : " ");
3624 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
3625 ops
= ftrace_find_tramp_ops_any(rec
);
3628 seq_printf(m
, "\ttramp: %pS (%pS)",
3629 (void *)ops
->trampoline
,
3631 add_trampoline_func(m
, ops
, rec
);
3632 ops
= ftrace_find_tramp_ops_next(rec
, ops
);
3635 seq_puts(m
, "\ttramp: ERROR!");
3637 add_trampoline_func(m
, NULL
, rec
);
3639 if (rec
->flags
& FTRACE_FL_DIRECT
) {
3640 unsigned long direct
;
3642 direct
= ftrace_find_rec_direct(rec
->ip
);
3644 seq_printf(m
, "\n\tdirect-->%pS", (void *)direct
);
3653 static const struct seq_operations show_ftrace_seq_ops
= {
3661 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3663 struct ftrace_iterator
*iter
;
3666 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
3670 if (unlikely(ftrace_disabled
))
3673 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3677 iter
->pg
= ftrace_pages_start
;
3678 iter
->ops
= &global_ops
;
3684 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3686 struct ftrace_iterator
*iter
;
3689 * This shows us what functions are currently being
3690 * traced and by what. Not sure if we want lockdown
3691 * to hide such critical information for an admin.
3692 * Although, perhaps it can show information we don't
3693 * want people to see, but if something is tracing
3694 * something, we probably want to know about it.
3697 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3701 iter
->pg
= ftrace_pages_start
;
3702 iter
->flags
= FTRACE_ITER_ENABLED
;
3703 iter
->ops
= &global_ops
;
3709 * ftrace_regex_open - initialize function tracer filter files
3710 * @ops: The ftrace_ops that hold the hash filters
3711 * @flag: The type of filter to process
3712 * @inode: The inode, usually passed in to your open routine
3713 * @file: The file, usually passed in to your open routine
3715 * ftrace_regex_open() initializes the filter files for the
3716 * @ops. Depending on @flag it may process the filter hash or
3717 * the notrace hash of @ops. With this called from the open
3718 * routine, you can use ftrace_filter_write() for the write
3719 * routine if @flag has FTRACE_ITER_FILTER set, or
3720 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3721 * tracing_lseek() should be used as the lseek routine, and
3722 * release must call ftrace_regex_release().
3725 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3726 struct inode
*inode
, struct file
*file
)
3728 struct ftrace_iterator
*iter
;
3729 struct ftrace_hash
*hash
;
3730 struct list_head
*mod_head
;
3731 struct trace_array
*tr
= ops
->private;
3734 ftrace_ops_init(ops
);
3736 if (unlikely(ftrace_disabled
))
3739 if (tracing_check_open_get_tr(tr
))
3742 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3746 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
))
3753 mutex_lock(&ops
->func_hash
->regex_lock
);
3755 if (flag
& FTRACE_ITER_NOTRACE
) {
3756 hash
= ops
->func_hash
->notrace_hash
;
3757 mod_head
= tr
? &tr
->mod_notrace
: NULL
;
3759 hash
= ops
->func_hash
->filter_hash
;
3760 mod_head
= tr
? &tr
->mod_trace
: NULL
;
3763 iter
->mod_list
= mod_head
;
3765 if (file
->f_mode
& FMODE_WRITE
) {
3766 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3768 if (file
->f_flags
& O_TRUNC
) {
3769 iter
->hash
= alloc_ftrace_hash(size_bits
);
3770 clear_ftrace_mod_list(mod_head
);
3772 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3776 trace_parser_put(&iter
->parser
);
3784 if (file
->f_mode
& FMODE_READ
) {
3785 iter
->pg
= ftrace_pages_start
;
3787 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3789 struct seq_file
*m
= file
->private_data
;
3793 free_ftrace_hash(iter
->hash
);
3794 trace_parser_put(&iter
->parser
);
3797 file
->private_data
= iter
;
3800 mutex_unlock(&ops
->func_hash
->regex_lock
);
3806 trace_array_put(tr
);
3813 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3815 struct ftrace_ops
*ops
= inode
->i_private
;
3817 /* Checks for tracefs lockdown */
3818 return ftrace_regex_open(ops
,
3819 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_PROBES
,
3824 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3826 struct ftrace_ops
*ops
= inode
->i_private
;
3828 /* Checks for tracefs lockdown */
3829 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3833 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3834 struct ftrace_glob
{
3841 * If symbols in an architecture don't correspond exactly to the user-visible
3842 * name of what they represent, it is possible to define this function to
3843 * perform the necessary adjustments.
3845 char * __weak
arch_ftrace_match_adjust(char *str
, const char *search
)
3850 static int ftrace_match(char *str
, struct ftrace_glob
*g
)
3855 str
= arch_ftrace_match_adjust(str
, g
->search
);
3859 if (strcmp(str
, g
->search
) == 0)
3862 case MATCH_FRONT_ONLY
:
3863 if (strncmp(str
, g
->search
, g
->len
) == 0)
3866 case MATCH_MIDDLE_ONLY
:
3867 if (strstr(str
, g
->search
))
3870 case MATCH_END_ONLY
:
3872 if (slen
>= g
->len
&&
3873 memcmp(str
+ slen
- g
->len
, g
->search
, g
->len
) == 0)
3877 if (glob_match(g
->search
, str
))
3886 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int clear_filter
)
3888 struct ftrace_func_entry
*entry
;
3891 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3893 /* Do nothing if it doesn't exist */
3897 free_hash_entry(hash
, entry
);
3899 /* Do nothing if it exists */
3903 ret
= add_hash_entry(hash
, rec
->ip
);
3909 add_rec_by_index(struct ftrace_hash
*hash
, struct ftrace_glob
*func_g
,
3912 long index
= simple_strtoul(func_g
->search
, NULL
, 0);
3913 struct ftrace_page
*pg
;
3914 struct dyn_ftrace
*rec
;
3916 /* The index starts at 1 */
3920 do_for_each_ftrace_rec(pg
, rec
) {
3921 if (pg
->index
<= index
) {
3923 /* this is a double loop, break goes to the next page */
3926 rec
= &pg
->records
[index
];
3927 enter_record(hash
, rec
, clear_filter
);
3929 } while_for_each_ftrace_rec();
3934 ftrace_match_record(struct dyn_ftrace
*rec
, struct ftrace_glob
*func_g
,
3935 struct ftrace_glob
*mod_g
, int exclude_mod
)
3937 char str
[KSYM_SYMBOL_LEN
];
3940 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3943 int mod_matches
= (modname
) ? ftrace_match(modname
, mod_g
) : 0;
3945 /* blank module name to match all modules */
3947 /* blank module globbing: modname xor exclude_mod */
3948 if (!exclude_mod
!= !modname
)
3954 * exclude_mod is set to trace everything but the given
3955 * module. If it is set and the module matches, then
3956 * return 0. If it is not set, and the module doesn't match
3957 * also return 0. Otherwise, check the function to see if
3960 if (!mod_matches
== !exclude_mod
)
3963 /* blank search means to match all funcs in the mod */
3968 return ftrace_match(str
, func_g
);
3972 match_records(struct ftrace_hash
*hash
, char *func
, int len
, char *mod
)
3974 struct ftrace_page
*pg
;
3975 struct dyn_ftrace
*rec
;
3976 struct ftrace_glob func_g
= { .type
= MATCH_FULL
};
3977 struct ftrace_glob mod_g
= { .type
= MATCH_FULL
};
3978 struct ftrace_glob
*mod_match
= (mod
) ? &mod_g
: NULL
;
3979 int exclude_mod
= 0;
3982 int clear_filter
= 0;
3985 func_g
.type
= filter_parse_regex(func
, len
, &func_g
.search
,
3987 func_g
.len
= strlen(func_g
.search
);
3991 mod_g
.type
= filter_parse_regex(mod
, strlen(mod
),
3992 &mod_g
.search
, &exclude_mod
);
3993 mod_g
.len
= strlen(mod_g
.search
);
3996 mutex_lock(&ftrace_lock
);
3998 if (unlikely(ftrace_disabled
))
4001 if (func_g
.type
== MATCH_INDEX
) {
4002 found
= add_rec_by_index(hash
, &func_g
, clear_filter
);
4006 do_for_each_ftrace_rec(pg
, rec
) {
4008 if (rec
->flags
& FTRACE_FL_DISABLED
)
4011 if (ftrace_match_record(rec
, &func_g
, mod_match
, exclude_mod
)) {
4012 ret
= enter_record(hash
, rec
, clear_filter
);
4019 } while_for_each_ftrace_rec();
4021 mutex_unlock(&ftrace_lock
);
4027 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
4029 return match_records(hash
, buff
, len
, NULL
);
4032 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
4033 struct ftrace_ops_hash
*old_hash
)
4035 struct ftrace_ops
*op
;
4037 if (!ftrace_enabled
)
4040 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
4041 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
4046 * If this is the shared global_ops filter, then we need to
4047 * check if there is another ops that shares it, is enabled.
4048 * If so, we still need to run the modify code.
4050 if (ops
->func_hash
!= &global_ops
.local_hash
)
4053 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4054 if (op
->func_hash
== &global_ops
.local_hash
&&
4055 op
->flags
& FTRACE_OPS_FL_ENABLED
) {
4056 ftrace_run_modify_code(op
, FTRACE_UPDATE_CALLS
, old_hash
);
4057 /* Only need to do this once */
4060 } while_for_each_ftrace_op(op
);
4063 static int ftrace_hash_move_and_update_ops(struct ftrace_ops
*ops
,
4064 struct ftrace_hash
**orig_hash
,
4065 struct ftrace_hash
*hash
,
4068 struct ftrace_ops_hash old_hash_ops
;
4069 struct ftrace_hash
*old_hash
;
4072 old_hash
= *orig_hash
;
4073 old_hash_ops
.filter_hash
= ops
->func_hash
->filter_hash
;
4074 old_hash_ops
.notrace_hash
= ops
->func_hash
->notrace_hash
;
4075 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
4077 ftrace_ops_update_code(ops
, &old_hash_ops
);
4078 free_ftrace_hash_rcu(old_hash
);
4083 static bool module_exists(const char *module
)
4085 /* All modules have the symbol __this_module */
4086 static const char this_mod
[] = "__this_module";
4087 char modname
[MAX_PARAM_PREFIX_LEN
+ sizeof(this_mod
) + 2];
4091 n
= snprintf(modname
, sizeof(modname
), "%s:%s", module
, this_mod
);
4093 if (n
> sizeof(modname
) - 1)
4096 val
= module_kallsyms_lookup_name(modname
);
4100 static int cache_mod(struct trace_array
*tr
,
4101 const char *func
, char *module
, int enable
)
4103 struct ftrace_mod_load
*ftrace_mod
, *n
;
4104 struct list_head
*head
= enable
? &tr
->mod_trace
: &tr
->mod_notrace
;
4107 mutex_lock(&ftrace_lock
);
4109 /* We do not cache inverse filters */
4110 if (func
[0] == '!') {
4114 /* Look to remove this hash */
4115 list_for_each_entry_safe(ftrace_mod
, n
, head
, list
) {
4116 if (strcmp(ftrace_mod
->module
, module
) != 0)
4119 /* no func matches all */
4120 if (strcmp(func
, "*") == 0 ||
4121 (ftrace_mod
->func
&&
4122 strcmp(ftrace_mod
->func
, func
) == 0)) {
4124 free_ftrace_mod(ftrace_mod
);
4132 /* We only care about modules that have not been loaded yet */
4133 if (module_exists(module
))
4136 /* Save this string off, and execute it when the module is loaded */
4137 ret
= ftrace_add_mod(tr
, func
, module
, enable
);
4139 mutex_unlock(&ftrace_lock
);
4145 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4146 int reset
, int enable
);
4148 #ifdef CONFIG_MODULES
4149 static void process_mod_list(struct list_head
*head
, struct ftrace_ops
*ops
,
4150 char *mod
, bool enable
)
4152 struct ftrace_mod_load
*ftrace_mod
, *n
;
4153 struct ftrace_hash
**orig_hash
, *new_hash
;
4154 LIST_HEAD(process_mods
);
4158 mutex_lock(&ops
->func_hash
->regex_lock
);
4161 orig_hash
= &ops
->func_hash
->filter_hash
;
4163 orig_hash
= &ops
->func_hash
->notrace_hash
;
4165 new_hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
,
4168 goto out
; /* warn? */
4170 mutex_lock(&ftrace_lock
);
4172 list_for_each_entry_safe(ftrace_mod
, n
, head
, list
) {
4174 if (strcmp(ftrace_mod
->module
, mod
) != 0)
4177 if (ftrace_mod
->func
)
4178 func
= kstrdup(ftrace_mod
->func
, GFP_KERNEL
);
4180 func
= kstrdup("*", GFP_KERNEL
);
4182 if (!func
) /* warn? */
4185 list_del(&ftrace_mod
->list
);
4186 list_add(&ftrace_mod
->list
, &process_mods
);
4188 /* Use the newly allocated func, as it may be "*" */
4189 kfree(ftrace_mod
->func
);
4190 ftrace_mod
->func
= func
;
4193 mutex_unlock(&ftrace_lock
);
4195 list_for_each_entry_safe(ftrace_mod
, n
, &process_mods
, list
) {
4197 func
= ftrace_mod
->func
;
4199 /* Grabs ftrace_lock, which is why we have this extra step */
4200 match_records(new_hash
, func
, strlen(func
), mod
);
4201 free_ftrace_mod(ftrace_mod
);
4204 if (enable
&& list_empty(head
))
4205 new_hash
->flags
&= ~FTRACE_HASH_FL_MOD
;
4207 mutex_lock(&ftrace_lock
);
4209 ret
= ftrace_hash_move_and_update_ops(ops
, orig_hash
,
4211 mutex_unlock(&ftrace_lock
);
4214 mutex_unlock(&ops
->func_hash
->regex_lock
);
4216 free_ftrace_hash(new_hash
);
4219 static void process_cached_mods(const char *mod_name
)
4221 struct trace_array
*tr
;
4224 mod
= kstrdup(mod_name
, GFP_KERNEL
);
4228 mutex_lock(&trace_types_lock
);
4229 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
4230 if (!list_empty(&tr
->mod_trace
))
4231 process_mod_list(&tr
->mod_trace
, tr
->ops
, mod
, true);
4232 if (!list_empty(&tr
->mod_notrace
))
4233 process_mod_list(&tr
->mod_notrace
, tr
->ops
, mod
, false);
4235 mutex_unlock(&trace_types_lock
);
4242 * We register the module command as a template to show others how
4243 * to register the a command as well.
4247 ftrace_mod_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
4248 char *func_orig
, char *cmd
, char *module
, int enable
)
4253 /* match_records() modifies func, and we need the original */
4254 func
= kstrdup(func_orig
, GFP_KERNEL
);
4259 * cmd == 'mod' because we only registered this func
4260 * for the 'mod' ftrace_func_command.
4261 * But if you register one func with multiple commands,
4262 * you can tell which command was used by the cmd
4265 ret
= match_records(hash
, func
, strlen(func
), module
);
4269 return cache_mod(tr
, func_orig
, module
, enable
);
4275 static struct ftrace_func_command ftrace_mod_cmd
= {
4277 .func
= ftrace_mod_callback
,
4280 static int __init
ftrace_mod_cmd_init(void)
4282 return register_ftrace_command(&ftrace_mod_cmd
);
4284 core_initcall(ftrace_mod_cmd_init
);
4286 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
4287 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
4289 struct ftrace_probe_ops
*probe_ops
;
4290 struct ftrace_func_probe
*probe
;
4292 probe
= container_of(op
, struct ftrace_func_probe
, ops
);
4293 probe_ops
= probe
->probe_ops
;
4296 * Disable preemption for these calls to prevent a RCU grace
4297 * period. This syncs the hash iteration and freeing of items
4298 * on the hash. rcu_read_lock is too dangerous here.
4300 preempt_disable_notrace();
4301 probe_ops
->func(ip
, parent_ip
, probe
->tr
, probe_ops
, probe
->data
);
4302 preempt_enable_notrace();
4305 struct ftrace_func_map
{
4306 struct ftrace_func_entry entry
;
4310 struct ftrace_func_mapper
{
4311 struct ftrace_hash hash
;
4315 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4317 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4319 struct ftrace_func_mapper
*allocate_ftrace_func_mapper(void)
4321 struct ftrace_hash
*hash
;
4324 * The mapper is simply a ftrace_hash, but since the entries
4325 * in the hash are not ftrace_func_entry type, we define it
4326 * as a separate structure.
4328 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
4329 return (struct ftrace_func_mapper
*)hash
;
4333 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4334 * @mapper: The mapper that has the ip maps
4335 * @ip: the instruction pointer to find the data for
4337 * Returns the data mapped to @ip if found otherwise NULL. The return
4338 * is actually the address of the mapper data pointer. The address is
4339 * returned for use cases where the data is no bigger than a long, and
4340 * the user can use the data pointer as its data instead of having to
4341 * allocate more memory for the reference.
4343 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper
*mapper
,
4346 struct ftrace_func_entry
*entry
;
4347 struct ftrace_func_map
*map
;
4349 entry
= ftrace_lookup_ip(&mapper
->hash
, ip
);
4353 map
= (struct ftrace_func_map
*)entry
;
4358 * ftrace_func_mapper_add_ip - Map some data to an ip
4359 * @mapper: The mapper that has the ip maps
4360 * @ip: The instruction pointer address to map @data to
4361 * @data: The data to map to @ip
4363 * Returns 0 on succes otherwise an error.
4365 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper
*mapper
,
4366 unsigned long ip
, void *data
)
4368 struct ftrace_func_entry
*entry
;
4369 struct ftrace_func_map
*map
;
4371 entry
= ftrace_lookup_ip(&mapper
->hash
, ip
);
4375 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
4382 __add_hash_entry(&mapper
->hash
, &map
->entry
);
4388 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4389 * @mapper: The mapper that has the ip maps
4390 * @ip: The instruction pointer address to remove the data from
4392 * Returns the data if it is found, otherwise NULL.
4393 * Note, if the data pointer is used as the data itself, (see
4394 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4395 * if the data pointer was set to zero.
4397 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper
*mapper
,
4400 struct ftrace_func_entry
*entry
;
4401 struct ftrace_func_map
*map
;
4404 entry
= ftrace_lookup_ip(&mapper
->hash
, ip
);
4408 map
= (struct ftrace_func_map
*)entry
;
4411 remove_hash_entry(&mapper
->hash
, entry
);
4418 * free_ftrace_func_mapper - free a mapping of ips and data
4419 * @mapper: The mapper that has the ip maps
4420 * @free_func: A function to be called on each data item.
4422 * This is used to free the function mapper. The @free_func is optional
4423 * and can be used if the data needs to be freed as well.
4425 void free_ftrace_func_mapper(struct ftrace_func_mapper
*mapper
,
4426 ftrace_mapper_func free_func
)
4428 struct ftrace_func_entry
*entry
;
4429 struct ftrace_func_map
*map
;
4430 struct hlist_head
*hhd
;
4436 if (free_func
&& mapper
->hash
.count
) {
4437 size
= 1 << mapper
->hash
.size_bits
;
4438 for (i
= 0; i
< size
; i
++) {
4439 hhd
= &mapper
->hash
.buckets
[i
];
4440 hlist_for_each_entry(entry
, hhd
, hlist
) {
4441 map
= (struct ftrace_func_map
*)entry
;
4446 free_ftrace_hash(&mapper
->hash
);
4449 static void release_probe(struct ftrace_func_probe
*probe
)
4451 struct ftrace_probe_ops
*probe_ops
;
4453 mutex_lock(&ftrace_lock
);
4455 WARN_ON(probe
->ref
<= 0);
4457 /* Subtract the ref that was used to protect this instance */
4461 probe_ops
= probe
->probe_ops
;
4463 * Sending zero as ip tells probe_ops to free
4464 * the probe->data itself
4466 if (probe_ops
->free
)
4467 probe_ops
->free(probe_ops
, probe
->tr
, 0, probe
->data
);
4468 list_del(&probe
->list
);
4471 mutex_unlock(&ftrace_lock
);
4474 static void acquire_probe_locked(struct ftrace_func_probe
*probe
)
4477 * Add one ref to keep it from being freed when releasing the
4478 * ftrace_lock mutex.
4484 register_ftrace_function_probe(char *glob
, struct trace_array
*tr
,
4485 struct ftrace_probe_ops
*probe_ops
,
4488 struct ftrace_func_entry
*entry
;
4489 struct ftrace_func_probe
*probe
;
4490 struct ftrace_hash
**orig_hash
;
4491 struct ftrace_hash
*old_hash
;
4492 struct ftrace_hash
*hash
;
4501 /* We do not support '!' for function probes */
4502 if (WARN_ON(glob
[0] == '!'))
4506 mutex_lock(&ftrace_lock
);
4507 /* Check if the probe_ops is already registered */
4508 list_for_each_entry(probe
, &tr
->func_probes
, list
) {
4509 if (probe
->probe_ops
== probe_ops
)
4512 if (&probe
->list
== &tr
->func_probes
) {
4513 probe
= kzalloc(sizeof(*probe
), GFP_KERNEL
);
4515 mutex_unlock(&ftrace_lock
);
4518 probe
->probe_ops
= probe_ops
;
4519 probe
->ops
.func
= function_trace_probe_call
;
4521 ftrace_ops_init(&probe
->ops
);
4522 list_add(&probe
->list
, &tr
->func_probes
);
4525 acquire_probe_locked(probe
);
4527 mutex_unlock(&ftrace_lock
);
4530 * Note, there's a small window here that the func_hash->filter_hash
4531 * may be NULL or empty. Need to be carefule when reading the loop.
4533 mutex_lock(&probe
->ops
.func_hash
->regex_lock
);
4535 orig_hash
= &probe
->ops
.func_hash
->filter_hash
;
4536 old_hash
= *orig_hash
;
4537 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
4544 ret
= ftrace_match_records(hash
, glob
, strlen(glob
));
4546 /* Nothing found? */
4553 size
= 1 << hash
->size_bits
;
4554 for (i
= 0; i
< size
; i
++) {
4555 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
4556 if (ftrace_lookup_ip(old_hash
, entry
->ip
))
4559 * The caller might want to do something special
4560 * for each function we find. We call the callback
4561 * to give the caller an opportunity to do so.
4563 if (probe_ops
->init
) {
4564 ret
= probe_ops
->init(probe_ops
, tr
,
4568 if (probe_ops
->free
&& count
)
4569 probe_ops
->free(probe_ops
, tr
,
4579 mutex_lock(&ftrace_lock
);
4582 /* Nothing was added? */
4587 ret
= ftrace_hash_move_and_update_ops(&probe
->ops
, orig_hash
,
4592 /* One ref for each new function traced */
4593 probe
->ref
+= count
;
4595 if (!(probe
->ops
.flags
& FTRACE_OPS_FL_ENABLED
))
4596 ret
= ftrace_startup(&probe
->ops
, 0);
4599 mutex_unlock(&ftrace_lock
);
4604 mutex_unlock(&probe
->ops
.func_hash
->regex_lock
);
4605 free_ftrace_hash(hash
);
4607 release_probe(probe
);
4612 if (!probe_ops
->free
|| !count
)
4615 /* Failed to do the move, need to call the free functions */
4616 for (i
= 0; i
< size
; i
++) {
4617 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
4618 if (ftrace_lookup_ip(old_hash
, entry
->ip
))
4620 probe_ops
->free(probe_ops
, tr
, entry
->ip
, probe
->data
);
4627 unregister_ftrace_function_probe_func(char *glob
, struct trace_array
*tr
,
4628 struct ftrace_probe_ops
*probe_ops
)
4630 struct ftrace_ops_hash old_hash_ops
;
4631 struct ftrace_func_entry
*entry
;
4632 struct ftrace_func_probe
*probe
;
4633 struct ftrace_glob func_g
;
4634 struct ftrace_hash
**orig_hash
;
4635 struct ftrace_hash
*old_hash
;
4636 struct ftrace_hash
*hash
= NULL
;
4637 struct hlist_node
*tmp
;
4638 struct hlist_head hhd
;
4639 char str
[KSYM_SYMBOL_LEN
];
4641 int i
, ret
= -ENODEV
;
4644 if (!glob
|| !strlen(glob
) || !strcmp(glob
, "*"))
4645 func_g
.search
= NULL
;
4649 func_g
.type
= filter_parse_regex(glob
, strlen(glob
),
4650 &func_g
.search
, ¬);
4651 func_g
.len
= strlen(func_g
.search
);
4653 /* we do not support '!' for function probes */
4658 mutex_lock(&ftrace_lock
);
4659 /* Check if the probe_ops is already registered */
4660 list_for_each_entry(probe
, &tr
->func_probes
, list
) {
4661 if (probe
->probe_ops
== probe_ops
)
4664 if (&probe
->list
== &tr
->func_probes
)
4665 goto err_unlock_ftrace
;
4668 if (!(probe
->ops
.flags
& FTRACE_OPS_FL_INITIALIZED
))
4669 goto err_unlock_ftrace
;
4671 acquire_probe_locked(probe
);
4673 mutex_unlock(&ftrace_lock
);
4675 mutex_lock(&probe
->ops
.func_hash
->regex_lock
);
4677 orig_hash
= &probe
->ops
.func_hash
->filter_hash
;
4678 old_hash
= *orig_hash
;
4680 if (ftrace_hash_empty(old_hash
))
4683 old_hash_ops
.filter_hash
= old_hash
;
4684 /* Probes only have filters */
4685 old_hash_ops
.notrace_hash
= NULL
;
4688 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
4692 INIT_HLIST_HEAD(&hhd
);
4694 size
= 1 << hash
->size_bits
;
4695 for (i
= 0; i
< size
; i
++) {
4696 hlist_for_each_entry_safe(entry
, tmp
, &hash
->buckets
[i
], hlist
) {
4698 if (func_g
.search
) {
4699 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
4701 if (!ftrace_match(str
, &func_g
))
4705 remove_hash_entry(hash
, entry
);
4706 hlist_add_head(&entry
->hlist
, &hhd
);
4710 /* Nothing found? */
4716 mutex_lock(&ftrace_lock
);
4718 WARN_ON(probe
->ref
< count
);
4720 probe
->ref
-= count
;
4722 if (ftrace_hash_empty(hash
))
4723 ftrace_shutdown(&probe
->ops
, 0);
4725 ret
= ftrace_hash_move_and_update_ops(&probe
->ops
, orig_hash
,
4728 /* still need to update the function call sites */
4729 if (ftrace_enabled
&& !ftrace_hash_empty(hash
))
4730 ftrace_run_modify_code(&probe
->ops
, FTRACE_UPDATE_CALLS
,
4734 hlist_for_each_entry_safe(entry
, tmp
, &hhd
, hlist
) {
4735 hlist_del(&entry
->hlist
);
4736 if (probe_ops
->free
)
4737 probe_ops
->free(probe_ops
, tr
, entry
->ip
, probe
->data
);
4740 mutex_unlock(&ftrace_lock
);
4743 mutex_unlock(&probe
->ops
.func_hash
->regex_lock
);
4744 free_ftrace_hash(hash
);
4746 release_probe(probe
);
4751 mutex_unlock(&ftrace_lock
);
4755 void clear_ftrace_function_probes(struct trace_array
*tr
)
4757 struct ftrace_func_probe
*probe
, *n
;
4759 list_for_each_entry_safe(probe
, n
, &tr
->func_probes
, list
)
4760 unregister_ftrace_function_probe_func(NULL
, tr
, probe
->probe_ops
);
4763 static LIST_HEAD(ftrace_commands
);
4764 static DEFINE_MUTEX(ftrace_cmd_mutex
);
4767 * Currently we only register ftrace commands from __init, so mark this
4770 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
4772 struct ftrace_func_command
*p
;
4775 mutex_lock(&ftrace_cmd_mutex
);
4776 list_for_each_entry(p
, &ftrace_commands
, list
) {
4777 if (strcmp(cmd
->name
, p
->name
) == 0) {
4782 list_add(&cmd
->list
, &ftrace_commands
);
4784 mutex_unlock(&ftrace_cmd_mutex
);
4790 * Currently we only unregister ftrace commands from __init, so mark
4793 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
4795 struct ftrace_func_command
*p
, *n
;
4798 mutex_lock(&ftrace_cmd_mutex
);
4799 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
4800 if (strcmp(cmd
->name
, p
->name
) == 0) {
4802 list_del_init(&p
->list
);
4807 mutex_unlock(&ftrace_cmd_mutex
);
4812 static int ftrace_process_regex(struct ftrace_iterator
*iter
,
4813 char *buff
, int len
, int enable
)
4815 struct ftrace_hash
*hash
= iter
->hash
;
4816 struct trace_array
*tr
= iter
->ops
->private;
4817 char *func
, *command
, *next
= buff
;
4818 struct ftrace_func_command
*p
;
4821 func
= strsep(&next
, ":");
4824 ret
= ftrace_match_records(hash
, func
, len
);
4834 command
= strsep(&next
, ":");
4836 mutex_lock(&ftrace_cmd_mutex
);
4837 list_for_each_entry(p
, &ftrace_commands
, list
) {
4838 if (strcmp(p
->name
, command
) == 0) {
4839 ret
= p
->func(tr
, hash
, func
, command
, next
, enable
);
4844 mutex_unlock(&ftrace_cmd_mutex
);
4850 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
4851 size_t cnt
, loff_t
*ppos
, int enable
)
4853 struct ftrace_iterator
*iter
;
4854 struct trace_parser
*parser
;
4860 if (file
->f_mode
& FMODE_READ
) {
4861 struct seq_file
*m
= file
->private_data
;
4864 iter
= file
->private_data
;
4866 if (unlikely(ftrace_disabled
))
4869 /* iter->hash is a local copy, so we don't need regex_lock */
4871 parser
= &iter
->parser
;
4872 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
4874 if (read
>= 0 && trace_parser_loaded(parser
) &&
4875 !trace_parser_cont(parser
)) {
4876 ret
= ftrace_process_regex(iter
, parser
->buffer
,
4877 parser
->idx
, enable
);
4878 trace_parser_clear(parser
);
4889 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
4890 size_t cnt
, loff_t
*ppos
)
4892 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
4896 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
4897 size_t cnt
, loff_t
*ppos
)
4899 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
4903 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
4905 struct ftrace_func_entry
*entry
;
4907 if (!ftrace_location(ip
))
4911 entry
= ftrace_lookup_ip(hash
, ip
);
4914 free_hash_entry(hash
, entry
);
4918 return add_hash_entry(hash
, ip
);
4922 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4923 unsigned long ip
, int remove
, int reset
, int enable
)
4925 struct ftrace_hash
**orig_hash
;
4926 struct ftrace_hash
*hash
;
4929 if (unlikely(ftrace_disabled
))
4932 mutex_lock(&ops
->func_hash
->regex_lock
);
4935 orig_hash
= &ops
->func_hash
->filter_hash
;
4937 orig_hash
= &ops
->func_hash
->notrace_hash
;
4940 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
4942 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
4946 goto out_regex_unlock
;
4949 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
4951 goto out_regex_unlock
;
4954 ret
= ftrace_match_addr(hash
, ip
, remove
);
4956 goto out_regex_unlock
;
4959 mutex_lock(&ftrace_lock
);
4960 ret
= ftrace_hash_move_and_update_ops(ops
, orig_hash
, hash
, enable
);
4961 mutex_unlock(&ftrace_lock
);
4964 mutex_unlock(&ops
->func_hash
->regex_lock
);
4966 free_ftrace_hash(hash
);
4971 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
4972 int reset
, int enable
)
4974 return ftrace_set_hash(ops
, NULL
, 0, ip
, remove
, reset
, enable
);
4977 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
4979 struct ftrace_direct_func
{
4980 struct list_head next
;
4985 static LIST_HEAD(ftrace_direct_funcs
);
4988 * ftrace_find_direct_func - test an address if it is a registered direct caller
4989 * @addr: The address of a registered direct caller
4991 * This searches to see if a ftrace direct caller has been registered
4992 * at a specific address, and if so, it returns a descriptor for it.
4994 * This can be used by architecture code to see if an address is
4995 * a direct caller (trampoline) attached to a fentry/mcount location.
4996 * This is useful for the function_graph tracer, as it may need to
4997 * do adjustments if it traced a location that also has a direct
4998 * trampoline attached to it.
5000 struct ftrace_direct_func
*ftrace_find_direct_func(unsigned long addr
)
5002 struct ftrace_direct_func
*entry
;
5005 /* May be called by fgraph trampoline (protected by rcu tasks) */
5006 list_for_each_entry_rcu(entry
, &ftrace_direct_funcs
, next
) {
5007 if (entry
->addr
== addr
) {
5019 * register_ftrace_direct - Call a custom trampoline directly
5020 * @ip: The address of the nop at the beginning of a function
5021 * @addr: The address of the trampoline to call at @ip
5023 * This is used to connect a direct call from the nop location (@ip)
5024 * at the start of ftrace traced functions. The location that it calls
5025 * (@addr) must be able to handle a direct call, and save the parameters
5026 * of the function being traced, and restore them (or inject new ones
5027 * if needed), before returning.
5031 * -EBUSY - Another direct function is already attached (there can be only one)
5032 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5033 * -ENOMEM - There was an allocation failure.
5035 int register_ftrace_direct(unsigned long ip
, unsigned long addr
)
5037 struct ftrace_direct_func
*direct
;
5038 struct ftrace_func_entry
*entry
;
5039 struct ftrace_hash
*free_hash
= NULL
;
5040 struct dyn_ftrace
*rec
;
5043 mutex_lock(&direct_mutex
);
5045 /* See if there's a direct function at @ip already */
5046 if (ftrace_find_rec_direct(ip
))
5050 rec
= lookup_rec(ip
, ip
);
5055 * Check if the rec says it has a direct call but we didn't
5058 if (WARN_ON(rec
->flags
& FTRACE_FL_DIRECT
))
5061 /* Make sure the ip points to the exact record */
5062 if (ip
!= rec
->ip
) {
5064 /* Need to check this ip for a direct. */
5065 if (ftrace_find_rec_direct(ip
))
5070 if (ftrace_hash_empty(direct_functions
) ||
5071 direct_functions
->count
> 2 * (1 << direct_functions
->size_bits
)) {
5072 struct ftrace_hash
*new_hash
;
5073 int size
= ftrace_hash_empty(direct_functions
) ? 0 :
5074 direct_functions
->count
+ 1;
5079 new_hash
= dup_hash(direct_functions
, size
);
5083 free_hash
= direct_functions
;
5084 direct_functions
= new_hash
;
5087 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
5091 direct
= ftrace_find_direct_func(addr
);
5093 direct
= kmalloc(sizeof(*direct
), GFP_KERNEL
);
5098 direct
->addr
= addr
;
5100 list_add_rcu(&direct
->next
, &ftrace_direct_funcs
);
5101 ftrace_direct_func_count
++;
5105 entry
->direct
= addr
;
5106 __add_hash_entry(direct_functions
, entry
);
5108 ret
= ftrace_set_filter_ip(&direct_ops
, ip
, 0, 0);
5110 remove_hash_entry(direct_functions
, entry
);
5112 if (!ret
&& !(direct_ops
.flags
& FTRACE_OPS_FL_ENABLED
)) {
5113 ret
= register_ftrace_function(&direct_ops
);
5115 ftrace_set_filter_ip(&direct_ops
, ip
, 1, 0);
5120 if (!direct
->count
) {
5121 list_del_rcu(&direct
->next
);
5122 synchronize_rcu_tasks();
5125 free_ftrace_hash(free_hash
);
5127 ftrace_direct_func_count
--;
5133 mutex_unlock(&direct_mutex
);
5136 synchronize_rcu_tasks();
5137 free_ftrace_hash(free_hash
);
5142 EXPORT_SYMBOL_GPL(register_ftrace_direct
);
5144 static struct ftrace_func_entry
*find_direct_entry(unsigned long *ip
,
5145 struct dyn_ftrace
**recp
)
5147 struct ftrace_func_entry
*entry
;
5148 struct dyn_ftrace
*rec
;
5150 rec
= lookup_rec(*ip
, *ip
);
5154 entry
= __ftrace_lookup_ip(direct_functions
, rec
->ip
);
5156 WARN_ON(rec
->flags
& FTRACE_FL_DIRECT
);
5160 WARN_ON(!(rec
->flags
& FTRACE_FL_DIRECT
));
5162 /* Passed in ip just needs to be on the call site */
5171 int unregister_ftrace_direct(unsigned long ip
, unsigned long addr
)
5173 struct ftrace_direct_func
*direct
;
5174 struct ftrace_func_entry
*entry
;
5177 mutex_lock(&direct_mutex
);
5179 entry
= find_direct_entry(&ip
, NULL
);
5183 if (direct_functions
->count
== 1)
5184 unregister_ftrace_function(&direct_ops
);
5186 ret
= ftrace_set_filter_ip(&direct_ops
, ip
, 1, 0);
5190 remove_hash_entry(direct_functions
, entry
);
5192 direct
= ftrace_find_direct_func(addr
);
5193 if (!WARN_ON(!direct
)) {
5194 /* This is the good path (see the ! before WARN) */
5196 WARN_ON(direct
->count
< 0);
5197 if (!direct
->count
) {
5198 list_del_rcu(&direct
->next
);
5199 synchronize_rcu_tasks();
5202 ftrace_direct_func_count
--;
5206 mutex_unlock(&direct_mutex
);
5210 EXPORT_SYMBOL_GPL(unregister_ftrace_direct
);
5212 static struct ftrace_ops stub_ops
= {
5213 .func
= ftrace_stub
,
5217 * ftrace_modify_direct_caller - modify ftrace nop directly
5218 * @entry: The ftrace hash entry of the direct helper for @rec
5219 * @rec: The record representing the function site to patch
5220 * @old_addr: The location that the site at @rec->ip currently calls
5221 * @new_addr: The location that the site at @rec->ip should call
5223 * An architecture may overwrite this function to optimize the
5224 * changing of the direct callback on an ftrace nop location.
5225 * This is called with the ftrace_lock mutex held, and no other
5226 * ftrace callbacks are on the associated record (@rec). Thus,
5227 * it is safe to modify the ftrace record, where it should be
5228 * currently calling @old_addr directly, to call @new_addr.
5230 * Safety checks should be made to make sure that the code at
5231 * @rec->ip is currently calling @old_addr. And this must
5232 * also update entry->direct to @new_addr.
5234 int __weak
ftrace_modify_direct_caller(struct ftrace_func_entry
*entry
,
5235 struct dyn_ftrace
*rec
,
5236 unsigned long old_addr
,
5237 unsigned long new_addr
)
5239 unsigned long ip
= rec
->ip
;
5243 * The ftrace_lock was used to determine if the record
5244 * had more than one registered user to it. If it did,
5245 * we needed to prevent that from changing to do the quick
5246 * switch. But if it did not (only a direct caller was attached)
5247 * then this function is called. But this function can deal
5248 * with attached callers to the rec that we care about, and
5249 * since this function uses standard ftrace calls that take
5250 * the ftrace_lock mutex, we need to release it.
5252 mutex_unlock(&ftrace_lock
);
5255 * By setting a stub function at the same address, we force
5256 * the code to call the iterator and the direct_ops helper.
5257 * This means that @ip does not call the direct call, and
5258 * we can simply modify it.
5260 ret
= ftrace_set_filter_ip(&stub_ops
, ip
, 0, 0);
5264 ret
= register_ftrace_function(&stub_ops
);
5266 ftrace_set_filter_ip(&stub_ops
, ip
, 1, 0);
5270 entry
->direct
= new_addr
;
5273 * By removing the stub, we put back the direct call, calling
5276 unregister_ftrace_function(&stub_ops
);
5277 ftrace_set_filter_ip(&stub_ops
, ip
, 1, 0);
5280 mutex_lock(&ftrace_lock
);
5286 * modify_ftrace_direct - Modify an existing direct call to call something else
5287 * @ip: The instruction pointer to modify
5288 * @old_addr: The address that the current @ip calls directly
5289 * @new_addr: The address that the @ip should call
5291 * This modifies a ftrace direct caller at an instruction pointer without
5292 * having to disable it first. The direct call will switch over to the
5293 * @new_addr without missing anything.
5295 * Returns: zero on success. Non zero on error, which includes:
5296 * -ENODEV : the @ip given has no direct caller attached
5297 * -EINVAL : the @old_addr does not match the current direct caller
5299 int modify_ftrace_direct(unsigned long ip
,
5300 unsigned long old_addr
, unsigned long new_addr
)
5302 struct ftrace_func_entry
*entry
;
5303 struct dyn_ftrace
*rec
;
5306 mutex_lock(&direct_mutex
);
5308 mutex_lock(&ftrace_lock
);
5309 entry
= find_direct_entry(&ip
, &rec
);
5314 if (entry
->direct
!= old_addr
)
5318 * If there's no other ftrace callback on the rec->ip location,
5319 * then it can be changed directly by the architecture.
5320 * If there is another caller, then we just need to change the
5321 * direct caller helper to point to @new_addr.
5323 if (ftrace_rec_count(rec
) == 1) {
5324 ret
= ftrace_modify_direct_caller(entry
, rec
, old_addr
, new_addr
);
5326 entry
->direct
= new_addr
;
5331 mutex_unlock(&ftrace_lock
);
5332 mutex_unlock(&direct_mutex
);
5335 EXPORT_SYMBOL_GPL(modify_ftrace_direct
);
5336 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5339 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5340 * @ops - the ops to set the filter with
5341 * @ip - the address to add to or remove from the filter.
5342 * @remove - non zero to remove the ip from the filter
5343 * @reset - non zero to reset all filters before applying this filter.
5345 * Filters denote which functions should be enabled when tracing is enabled
5346 * If @ip is NULL, it failes to update filter.
5348 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
5349 int remove
, int reset
)
5351 ftrace_ops_init(ops
);
5352 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
5354 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
5357 * ftrace_ops_set_global_filter - setup ops to use global filters
5358 * @ops - the ops which will use the global filters
5360 * ftrace users who need global function trace filtering should call this.
5361 * It can set the global filter only if ops were not initialized before.
5363 void ftrace_ops_set_global_filter(struct ftrace_ops
*ops
)
5365 if (ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)
5368 ftrace_ops_init(ops
);
5369 ops
->func_hash
= &global_ops
.local_hash
;
5371 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter
);
5374 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
5375 int reset
, int enable
)
5377 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
5381 * ftrace_set_filter - set a function to filter on in ftrace
5382 * @ops - the ops to set the filter with
5383 * @buf - the string that holds the function filter text.
5384 * @len - the length of the string.
5385 * @reset - non zero to reset all filters before applying this filter.
5387 * Filters denote which functions should be enabled when tracing is enabled.
5388 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5390 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
5393 ftrace_ops_init(ops
);
5394 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
5396 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
5399 * ftrace_set_notrace - set a function to not trace in ftrace
5400 * @ops - the ops to set the notrace filter with
5401 * @buf - the string that holds the function notrace text.
5402 * @len - the length of the string.
5403 * @reset - non zero to reset all filters before applying this filter.
5405 * Notrace Filters denote which functions should not be enabled when tracing
5406 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5409 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
5412 ftrace_ops_init(ops
);
5413 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
5415 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
5417 * ftrace_set_global_filter - set a function to filter on with global tracers
5418 * @buf - the string that holds the function filter text.
5419 * @len - the length of the string.
5420 * @reset - non zero to reset all filters before applying this filter.
5422 * Filters denote which functions should be enabled when tracing is enabled.
5423 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5425 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
5427 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
5429 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
5432 * ftrace_set_global_notrace - set a function to not trace with global tracers
5433 * @buf - the string that holds the function notrace text.
5434 * @len - the length of the string.
5435 * @reset - non zero to reset all filters before applying this filter.
5437 * Notrace Filters denote which functions should not be enabled when tracing
5438 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5441 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
5443 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
5445 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
5448 * command line interface to allow users to set filters on boot up.
5450 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5451 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
5452 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
5454 /* Used by function selftest to not test if filter is set */
5455 bool ftrace_filter_param __initdata
;
5457 static int __init
set_ftrace_notrace(char *str
)
5459 ftrace_filter_param
= true;
5460 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
5463 __setup("ftrace_notrace=", set_ftrace_notrace
);
5465 static int __init
set_ftrace_filter(char *str
)
5467 ftrace_filter_param
= true;
5468 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
5471 __setup("ftrace_filter=", set_ftrace_filter
);
5473 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5474 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
5475 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
5476 static int ftrace_graph_set_hash(struct ftrace_hash
*hash
, char *buffer
);
5478 static int __init
set_graph_function(char *str
)
5480 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
5483 __setup("ftrace_graph_filter=", set_graph_function
);
5485 static int __init
set_graph_notrace_function(char *str
)
5487 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
5490 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
5492 static int __init
set_graph_max_depth_function(char *str
)
5496 fgraph_max_depth
= simple_strtoul(str
, NULL
, 0);
5499 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function
);
5501 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
5505 struct ftrace_hash
*hash
;
5507 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
5508 if (MEM_FAIL(!hash
, "Failed to allocate hash\n"))
5512 func
= strsep(&buf
, ",");
5513 /* we allow only one expression at a time */
5514 ret
= ftrace_graph_set_hash(hash
, func
);
5516 printk(KERN_DEBUG
"ftrace: function %s not "
5517 "traceable\n", func
);
5521 ftrace_graph_hash
= hash
;
5523 ftrace_graph_notrace_hash
= hash
;
5525 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5528 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
5532 ftrace_ops_init(ops
);
5535 func
= strsep(&buf
, ",");
5536 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
5540 static void __init
set_ftrace_early_filters(void)
5542 if (ftrace_filter_buf
[0])
5543 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
5544 if (ftrace_notrace_buf
[0])
5545 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
5546 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5547 if (ftrace_graph_buf
[0])
5548 set_ftrace_early_graph(ftrace_graph_buf
, 1);
5549 if (ftrace_graph_notrace_buf
[0])
5550 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
5551 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5554 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
5556 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
5557 struct ftrace_iterator
*iter
;
5558 struct ftrace_hash
**orig_hash
;
5559 struct trace_parser
*parser
;
5563 if (file
->f_mode
& FMODE_READ
) {
5565 seq_release(inode
, file
);
5567 iter
= file
->private_data
;
5569 parser
= &iter
->parser
;
5570 if (trace_parser_loaded(parser
)) {
5571 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
5574 trace_parser_put(parser
);
5576 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
5578 if (file
->f_mode
& FMODE_WRITE
) {
5579 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
5582 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
5583 if (iter
->tr
&& !list_empty(&iter
->tr
->mod_trace
))
5584 iter
->hash
->flags
|= FTRACE_HASH_FL_MOD
;
5586 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
5588 mutex_lock(&ftrace_lock
);
5589 ret
= ftrace_hash_move_and_update_ops(iter
->ops
, orig_hash
,
5590 iter
->hash
, filter_hash
);
5591 mutex_unlock(&ftrace_lock
);
5593 /* For read only, the hash is the ops hash */
5597 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
5598 free_ftrace_hash(iter
->hash
);
5600 trace_array_put(iter
->tr
);
5606 static const struct file_operations ftrace_avail_fops
= {
5607 .open
= ftrace_avail_open
,
5609 .llseek
= seq_lseek
,
5610 .release
= seq_release_private
,
5613 static const struct file_operations ftrace_enabled_fops
= {
5614 .open
= ftrace_enabled_open
,
5616 .llseek
= seq_lseek
,
5617 .release
= seq_release_private
,
5620 static const struct file_operations ftrace_filter_fops
= {
5621 .open
= ftrace_filter_open
,
5623 .write
= ftrace_filter_write
,
5624 .llseek
= tracing_lseek
,
5625 .release
= ftrace_regex_release
,
5628 static const struct file_operations ftrace_notrace_fops
= {
5629 .open
= ftrace_notrace_open
,
5631 .write
= ftrace_notrace_write
,
5632 .llseek
= tracing_lseek
,
5633 .release
= ftrace_regex_release
,
5636 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5638 static DEFINE_MUTEX(graph_lock
);
5640 struct ftrace_hash __rcu
*ftrace_graph_hash
= EMPTY_HASH
;
5641 struct ftrace_hash __rcu
*ftrace_graph_notrace_hash
= EMPTY_HASH
;
5643 enum graph_filter_type
{
5644 GRAPH_FILTER_NOTRACE
= 0,
5645 GRAPH_FILTER_FUNCTION
,
5648 #define FTRACE_GRAPH_EMPTY ((void *)1)
5650 struct ftrace_graph_data
{
5651 struct ftrace_hash
*hash
;
5652 struct ftrace_func_entry
*entry
;
5653 int idx
; /* for hash table iteration */
5654 enum graph_filter_type type
;
5655 struct ftrace_hash
*new_hash
;
5656 const struct seq_operations
*seq_ops
;
5657 struct trace_parser parser
;
5661 __g_next(struct seq_file
*m
, loff_t
*pos
)
5663 struct ftrace_graph_data
*fgd
= m
->private;
5664 struct ftrace_func_entry
*entry
= fgd
->entry
;
5665 struct hlist_head
*head
;
5666 int i
, idx
= fgd
->idx
;
5668 if (*pos
>= fgd
->hash
->count
)
5672 hlist_for_each_entry_continue(entry
, hlist
) {
5680 for (i
= idx
; i
< 1 << fgd
->hash
->size_bits
; i
++) {
5681 head
= &fgd
->hash
->buckets
[i
];
5682 hlist_for_each_entry(entry
, head
, hlist
) {
5692 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5695 return __g_next(m
, pos
);
5698 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
5700 struct ftrace_graph_data
*fgd
= m
->private;
5702 mutex_lock(&graph_lock
);
5704 if (fgd
->type
== GRAPH_FILTER_FUNCTION
)
5705 fgd
->hash
= rcu_dereference_protected(ftrace_graph_hash
,
5706 lockdep_is_held(&graph_lock
));
5708 fgd
->hash
= rcu_dereference_protected(ftrace_graph_notrace_hash
,
5709 lockdep_is_held(&graph_lock
));
5711 /* Nothing, tell g_show to print all functions are enabled */
5712 if (ftrace_hash_empty(fgd
->hash
) && !*pos
)
5713 return FTRACE_GRAPH_EMPTY
;
5717 return __g_next(m
, pos
);
5720 static void g_stop(struct seq_file
*m
, void *p
)
5722 mutex_unlock(&graph_lock
);
5725 static int g_show(struct seq_file
*m
, void *v
)
5727 struct ftrace_func_entry
*entry
= v
;
5732 if (entry
== FTRACE_GRAPH_EMPTY
) {
5733 struct ftrace_graph_data
*fgd
= m
->private;
5735 if (fgd
->type
== GRAPH_FILTER_FUNCTION
)
5736 seq_puts(m
, "#### all functions enabled ####\n");
5738 seq_puts(m
, "#### no functions disabled ####\n");
5742 seq_printf(m
, "%ps\n", (void *)entry
->ip
);
5747 static const struct seq_operations ftrace_graph_seq_ops
= {
5755 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
5756 struct ftrace_graph_data
*fgd
)
5759 struct ftrace_hash
*new_hash
= NULL
;
5761 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
5765 if (file
->f_mode
& FMODE_WRITE
) {
5766 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
5768 if (trace_parser_get_init(&fgd
->parser
, FTRACE_BUFF_MAX
))
5771 if (file
->f_flags
& O_TRUNC
)
5772 new_hash
= alloc_ftrace_hash(size_bits
);
5774 new_hash
= alloc_and_copy_ftrace_hash(size_bits
,
5782 if (file
->f_mode
& FMODE_READ
) {
5783 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
5785 struct seq_file
*m
= file
->private_data
;
5789 free_ftrace_hash(new_hash
);
5793 file
->private_data
= fgd
;
5796 if (ret
< 0 && file
->f_mode
& FMODE_WRITE
)
5797 trace_parser_put(&fgd
->parser
);
5799 fgd
->new_hash
= new_hash
;
5802 * All uses of fgd->hash must be taken with the graph_lock
5803 * held. The graph_lock is going to be released, so force
5804 * fgd->hash to be reinitialized when it is taken again.
5812 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
5814 struct ftrace_graph_data
*fgd
;
5817 if (unlikely(ftrace_disabled
))
5820 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
5824 mutex_lock(&graph_lock
);
5826 fgd
->hash
= rcu_dereference_protected(ftrace_graph_hash
,
5827 lockdep_is_held(&graph_lock
));
5828 fgd
->type
= GRAPH_FILTER_FUNCTION
;
5829 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
5831 ret
= __ftrace_graph_open(inode
, file
, fgd
);
5835 mutex_unlock(&graph_lock
);
5840 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
5842 struct ftrace_graph_data
*fgd
;
5845 if (unlikely(ftrace_disabled
))
5848 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
5852 mutex_lock(&graph_lock
);
5854 fgd
->hash
= rcu_dereference_protected(ftrace_graph_notrace_hash
,
5855 lockdep_is_held(&graph_lock
));
5856 fgd
->type
= GRAPH_FILTER_NOTRACE
;
5857 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
5859 ret
= __ftrace_graph_open(inode
, file
, fgd
);
5863 mutex_unlock(&graph_lock
);
5868 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
5870 struct ftrace_graph_data
*fgd
;
5871 struct ftrace_hash
*old_hash
, *new_hash
;
5872 struct trace_parser
*parser
;
5875 if (file
->f_mode
& FMODE_READ
) {
5876 struct seq_file
*m
= file
->private_data
;
5879 seq_release(inode
, file
);
5881 fgd
= file
->private_data
;
5885 if (file
->f_mode
& FMODE_WRITE
) {
5887 parser
= &fgd
->parser
;
5889 if (trace_parser_loaded((parser
))) {
5890 ret
= ftrace_graph_set_hash(fgd
->new_hash
,
5894 trace_parser_put(parser
);
5896 new_hash
= __ftrace_hash_move(fgd
->new_hash
);
5902 mutex_lock(&graph_lock
);
5904 if (fgd
->type
== GRAPH_FILTER_FUNCTION
) {
5905 old_hash
= rcu_dereference_protected(ftrace_graph_hash
,
5906 lockdep_is_held(&graph_lock
));
5907 rcu_assign_pointer(ftrace_graph_hash
, new_hash
);
5909 old_hash
= rcu_dereference_protected(ftrace_graph_notrace_hash
,
5910 lockdep_is_held(&graph_lock
));
5911 rcu_assign_pointer(ftrace_graph_notrace_hash
, new_hash
);
5914 mutex_unlock(&graph_lock
);
5917 * We need to do a hard force of sched synchronization.
5918 * This is because we use preempt_disable() to do RCU, but
5919 * the function tracers can be called where RCU is not watching
5920 * (like before user_exit()). We can not rely on the RCU
5921 * infrastructure to do the synchronization, thus we must do it
5924 synchronize_rcu_tasks_rude();
5926 free_ftrace_hash(old_hash
);
5930 free_ftrace_hash(fgd
->new_hash
);
5937 ftrace_graph_set_hash(struct ftrace_hash
*hash
, char *buffer
)
5939 struct ftrace_glob func_g
;
5940 struct dyn_ftrace
*rec
;
5941 struct ftrace_page
*pg
;
5942 struct ftrace_func_entry
*entry
;
5947 func_g
.type
= filter_parse_regex(buffer
, strlen(buffer
),
5948 &func_g
.search
, ¬);
5950 func_g
.len
= strlen(func_g
.search
);
5952 mutex_lock(&ftrace_lock
);
5954 if (unlikely(ftrace_disabled
)) {
5955 mutex_unlock(&ftrace_lock
);
5959 do_for_each_ftrace_rec(pg
, rec
) {
5961 if (rec
->flags
& FTRACE_FL_DISABLED
)
5964 if (ftrace_match_record(rec
, &func_g
, NULL
, 0)) {
5965 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
5972 if (add_hash_entry(hash
, rec
->ip
) < 0)
5976 free_hash_entry(hash
, entry
);
5981 } while_for_each_ftrace_rec();
5983 mutex_unlock(&ftrace_lock
);
5992 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
5993 size_t cnt
, loff_t
*ppos
)
5995 ssize_t read
, ret
= 0;
5996 struct ftrace_graph_data
*fgd
= file
->private_data
;
5997 struct trace_parser
*parser
;
6002 /* Read mode uses seq functions */
6003 if (file
->f_mode
& FMODE_READ
) {
6004 struct seq_file
*m
= file
->private_data
;
6008 parser
= &fgd
->parser
;
6010 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
6012 if (read
>= 0 && trace_parser_loaded(parser
) &&
6013 !trace_parser_cont(parser
)) {
6015 ret
= ftrace_graph_set_hash(fgd
->new_hash
,
6017 trace_parser_clear(parser
);
6026 static const struct file_operations ftrace_graph_fops
= {
6027 .open
= ftrace_graph_open
,
6029 .write
= ftrace_graph_write
,
6030 .llseek
= tracing_lseek
,
6031 .release
= ftrace_graph_release
,
6034 static const struct file_operations ftrace_graph_notrace_fops
= {
6035 .open
= ftrace_graph_notrace_open
,
6037 .write
= ftrace_graph_write
,
6038 .llseek
= tracing_lseek
,
6039 .release
= ftrace_graph_release
,
6041 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6043 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
6044 struct dentry
*parent
)
6047 trace_create_file("set_ftrace_filter", 0644, parent
,
6048 ops
, &ftrace_filter_fops
);
6050 trace_create_file("set_ftrace_notrace", 0644, parent
,
6051 ops
, &ftrace_notrace_fops
);
6055 * The name "destroy_filter_files" is really a misnomer. Although
6056 * in the future, it may actually delete the files, but this is
6057 * really intended to make sure the ops passed in are disabled
6058 * and that when this function returns, the caller is free to
6061 * The "destroy" name is only to match the "create" name that this
6062 * should be paired with.
6064 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
6066 mutex_lock(&ftrace_lock
);
6067 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
6068 ftrace_shutdown(ops
, 0);
6069 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
6070 ftrace_free_filter(ops
);
6071 mutex_unlock(&ftrace_lock
);
6074 static __init
int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
)
6077 trace_create_file("available_filter_functions", 0444,
6078 d_tracer
, NULL
, &ftrace_avail_fops
);
6080 trace_create_file("enabled_functions", 0444,
6081 d_tracer
, NULL
, &ftrace_enabled_fops
);
6083 ftrace_create_filter_files(&global_ops
, d_tracer
);
6085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6086 trace_create_file("set_graph_function", 0644, d_tracer
,
6088 &ftrace_graph_fops
);
6089 trace_create_file("set_graph_notrace", 0644, d_tracer
,
6091 &ftrace_graph_notrace_fops
);
6092 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6097 static int ftrace_cmp_ips(const void *a
, const void *b
)
6099 const unsigned long *ipa
= a
;
6100 const unsigned long *ipb
= b
;
6109 static int ftrace_process_locs(struct module
*mod
,
6110 unsigned long *start
,
6113 struct ftrace_page
*start_pg
;
6114 struct ftrace_page
*pg
;
6115 struct dyn_ftrace
*rec
;
6116 unsigned long count
;
6119 unsigned long flags
= 0; /* Shut up gcc */
6122 count
= end
- start
;
6127 sort(start
, count
, sizeof(*start
),
6128 ftrace_cmp_ips
, NULL
);
6130 start_pg
= ftrace_allocate_pages(count
);
6134 mutex_lock(&ftrace_lock
);
6137 * Core and each module needs their own pages, as
6138 * modules will free them when they are removed.
6139 * Force a new page to be allocated for modules.
6142 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
6143 /* First initialization */
6144 ftrace_pages
= ftrace_pages_start
= start_pg
;
6149 if (WARN_ON(ftrace_pages
->next
)) {
6150 /* Hmm, we have free pages? */
6151 while (ftrace_pages
->next
)
6152 ftrace_pages
= ftrace_pages
->next
;
6155 ftrace_pages
->next
= start_pg
;
6161 addr
= ftrace_call_adjust(*p
++);
6163 * Some architecture linkers will pad between
6164 * the different mcount_loc sections of different
6165 * object files to satisfy alignments.
6166 * Skip any NULL pointers.
6171 if (pg
->index
== pg
->size
) {
6172 /* We should have allocated enough */
6173 if (WARN_ON(!pg
->next
))
6178 rec
= &pg
->records
[pg
->index
++];
6182 /* We should have used all pages */
6185 /* Assign the last page to ftrace_pages */
6189 * We only need to disable interrupts on start up
6190 * because we are modifying code that an interrupt
6191 * may execute, and the modification is not atomic.
6192 * But for modules, nothing runs the code we modify
6193 * until we are finished with it, and there's no
6194 * reason to cause large interrupt latencies while we do it.
6197 local_irq_save(flags
);
6198 ftrace_update_code(mod
, start_pg
);
6200 local_irq_restore(flags
);
6203 mutex_unlock(&ftrace_lock
);
6208 struct ftrace_mod_func
{
6209 struct list_head list
;
6215 struct ftrace_mod_map
{
6216 struct rcu_head rcu
;
6217 struct list_head list
;
6219 unsigned long start_addr
;
6220 unsigned long end_addr
;
6221 struct list_head funcs
;
6222 unsigned int num_funcs
;
6225 static int ftrace_get_trampoline_kallsym(unsigned int symnum
,
6226 unsigned long *value
, char *type
,
6227 char *name
, char *module_name
,
6230 struct ftrace_ops
*op
;
6232 list_for_each_entry_rcu(op
, &ftrace_ops_trampoline_list
, list
) {
6233 if (!op
->trampoline
|| symnum
--)
6235 *value
= op
->trampoline
;
6237 strlcpy(name
, FTRACE_TRAMPOLINE_SYM
, KSYM_NAME_LEN
);
6238 strlcpy(module_name
, FTRACE_TRAMPOLINE_MOD
, MODULE_NAME_LEN
);
6246 #ifdef CONFIG_MODULES
6248 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6250 static LIST_HEAD(ftrace_mod_maps
);
6252 static int referenced_filters(struct dyn_ftrace
*rec
)
6254 struct ftrace_ops
*ops
;
6257 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
6258 if (ops_references_rec(ops
, rec
))
6266 clear_mod_from_hash(struct ftrace_page
*pg
, struct ftrace_hash
*hash
)
6268 struct ftrace_func_entry
*entry
;
6269 struct dyn_ftrace
*rec
;
6272 if (ftrace_hash_empty(hash
))
6275 for (i
= 0; i
< pg
->index
; i
++) {
6276 rec
= &pg
->records
[i
];
6277 entry
= __ftrace_lookup_ip(hash
, rec
->ip
);
6279 * Do not allow this rec to match again.
6280 * Yeah, it may waste some memory, but will be removed
6281 * if/when the hash is modified again.
6288 /* Clear any records from hashs */
6289 static void clear_mod_from_hashes(struct ftrace_page
*pg
)
6291 struct trace_array
*tr
;
6293 mutex_lock(&trace_types_lock
);
6294 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6295 if (!tr
->ops
|| !tr
->ops
->func_hash
)
6297 mutex_lock(&tr
->ops
->func_hash
->regex_lock
);
6298 clear_mod_from_hash(pg
, tr
->ops
->func_hash
->filter_hash
);
6299 clear_mod_from_hash(pg
, tr
->ops
->func_hash
->notrace_hash
);
6300 mutex_unlock(&tr
->ops
->func_hash
->regex_lock
);
6302 mutex_unlock(&trace_types_lock
);
6305 static void ftrace_free_mod_map(struct rcu_head
*rcu
)
6307 struct ftrace_mod_map
*mod_map
= container_of(rcu
, struct ftrace_mod_map
, rcu
);
6308 struct ftrace_mod_func
*mod_func
;
6309 struct ftrace_mod_func
*n
;
6311 /* All the contents of mod_map are now not visible to readers */
6312 list_for_each_entry_safe(mod_func
, n
, &mod_map
->funcs
, list
) {
6313 kfree(mod_func
->name
);
6314 list_del(&mod_func
->list
);
6321 void ftrace_release_mod(struct module
*mod
)
6323 struct ftrace_mod_map
*mod_map
;
6324 struct ftrace_mod_map
*n
;
6325 struct dyn_ftrace
*rec
;
6326 struct ftrace_page
**last_pg
;
6327 struct ftrace_page
*tmp_page
= NULL
;
6328 struct ftrace_page
*pg
;
6331 mutex_lock(&ftrace_lock
);
6333 if (ftrace_disabled
)
6336 list_for_each_entry_safe(mod_map
, n
, &ftrace_mod_maps
, list
) {
6337 if (mod_map
->mod
== mod
) {
6338 list_del_rcu(&mod_map
->list
);
6339 call_rcu(&mod_map
->rcu
, ftrace_free_mod_map
);
6345 * Each module has its own ftrace_pages, remove
6346 * them from the list.
6348 last_pg
= &ftrace_pages_start
;
6349 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
6350 rec
= &pg
->records
[0];
6351 if (within_module_core(rec
->ip
, mod
) ||
6352 within_module_init(rec
->ip
, mod
)) {
6354 * As core pages are first, the first
6355 * page should never be a module page.
6357 if (WARN_ON(pg
== ftrace_pages_start
))
6360 /* Check if we are deleting the last page */
6361 if (pg
== ftrace_pages
)
6362 ftrace_pages
= next_to_ftrace_page(last_pg
);
6364 ftrace_update_tot_cnt
-= pg
->index
;
6365 *last_pg
= pg
->next
;
6367 pg
->next
= tmp_page
;
6370 last_pg
= &pg
->next
;
6373 mutex_unlock(&ftrace_lock
);
6375 for (pg
= tmp_page
; pg
; pg
= tmp_page
) {
6377 /* Needs to be called outside of ftrace_lock */
6378 clear_mod_from_hashes(pg
);
6380 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
6381 free_pages((unsigned long)pg
->records
, order
);
6382 tmp_page
= pg
->next
;
6384 ftrace_number_of_pages
-= 1 << order
;
6385 ftrace_number_of_groups
--;
6389 void ftrace_module_enable(struct module
*mod
)
6391 struct dyn_ftrace
*rec
;
6392 struct ftrace_page
*pg
;
6394 mutex_lock(&ftrace_lock
);
6396 if (ftrace_disabled
)
6400 * If the tracing is enabled, go ahead and enable the record.
6402 * The reason not to enable the record immediately is the
6403 * inherent check of ftrace_make_nop/ftrace_make_call for
6404 * correct previous instructions. Making first the NOP
6405 * conversion puts the module to the correct state, thus
6406 * passing the ftrace_make_call check.
6408 * We also delay this to after the module code already set the
6409 * text to read-only, as we now need to set it back to read-write
6410 * so that we can modify the text.
6412 if (ftrace_start_up
)
6413 ftrace_arch_code_modify_prepare();
6415 do_for_each_ftrace_rec(pg
, rec
) {
6418 * do_for_each_ftrace_rec() is a double loop.
6419 * module text shares the pg. If a record is
6420 * not part of this module, then skip this pg,
6421 * which the "break" will do.
6423 if (!within_module_core(rec
->ip
, mod
) &&
6424 !within_module_init(rec
->ip
, mod
))
6430 * When adding a module, we need to check if tracers are
6431 * currently enabled and if they are, and can trace this record,
6432 * we need to enable the module functions as well as update the
6433 * reference counts for those function records.
6435 if (ftrace_start_up
)
6436 cnt
+= referenced_filters(rec
);
6438 /* This clears FTRACE_FL_DISABLED */
6441 if (ftrace_start_up
&& cnt
) {
6442 int failed
= __ftrace_replace_code(rec
, 1);
6444 ftrace_bug(failed
, rec
);
6449 } while_for_each_ftrace_rec();
6452 if (ftrace_start_up
)
6453 ftrace_arch_code_modify_post_process();
6456 mutex_unlock(&ftrace_lock
);
6458 process_cached_mods(mod
->name
);
6461 void ftrace_module_init(struct module
*mod
)
6463 if (ftrace_disabled
|| !mod
->num_ftrace_callsites
)
6466 ftrace_process_locs(mod
, mod
->ftrace_callsites
,
6467 mod
->ftrace_callsites
+ mod
->num_ftrace_callsites
);
6470 static void save_ftrace_mod_rec(struct ftrace_mod_map
*mod_map
,
6471 struct dyn_ftrace
*rec
)
6473 struct ftrace_mod_func
*mod_func
;
6474 unsigned long symsize
;
6475 unsigned long offset
;
6476 char str
[KSYM_SYMBOL_LEN
];
6480 ret
= kallsyms_lookup(rec
->ip
, &symsize
, &offset
, &modname
, str
);
6484 mod_func
= kmalloc(sizeof(*mod_func
), GFP_KERNEL
);
6488 mod_func
->name
= kstrdup(str
, GFP_KERNEL
);
6489 if (!mod_func
->name
) {
6494 mod_func
->ip
= rec
->ip
- offset
;
6495 mod_func
->size
= symsize
;
6497 mod_map
->num_funcs
++;
6499 list_add_rcu(&mod_func
->list
, &mod_map
->funcs
);
6502 static struct ftrace_mod_map
*
6503 allocate_ftrace_mod_map(struct module
*mod
,
6504 unsigned long start
, unsigned long end
)
6506 struct ftrace_mod_map
*mod_map
;
6508 mod_map
= kmalloc(sizeof(*mod_map
), GFP_KERNEL
);
6513 mod_map
->start_addr
= start
;
6514 mod_map
->end_addr
= end
;
6515 mod_map
->num_funcs
= 0;
6517 INIT_LIST_HEAD_RCU(&mod_map
->funcs
);
6519 list_add_rcu(&mod_map
->list
, &ftrace_mod_maps
);
6525 ftrace_func_address_lookup(struct ftrace_mod_map
*mod_map
,
6526 unsigned long addr
, unsigned long *size
,
6527 unsigned long *off
, char *sym
)
6529 struct ftrace_mod_func
*found_func
= NULL
;
6530 struct ftrace_mod_func
*mod_func
;
6532 list_for_each_entry_rcu(mod_func
, &mod_map
->funcs
, list
) {
6533 if (addr
>= mod_func
->ip
&&
6534 addr
< mod_func
->ip
+ mod_func
->size
) {
6535 found_func
= mod_func
;
6542 *size
= found_func
->size
;
6544 *off
= addr
- found_func
->ip
;
6546 strlcpy(sym
, found_func
->name
, KSYM_NAME_LEN
);
6548 return found_func
->name
;
6555 ftrace_mod_address_lookup(unsigned long addr
, unsigned long *size
,
6556 unsigned long *off
, char **modname
, char *sym
)
6558 struct ftrace_mod_map
*mod_map
;
6559 const char *ret
= NULL
;
6561 /* mod_map is freed via call_rcu() */
6563 list_for_each_entry_rcu(mod_map
, &ftrace_mod_maps
, list
) {
6564 ret
= ftrace_func_address_lookup(mod_map
, addr
, size
, off
, sym
);
6567 *modname
= mod_map
->mod
->name
;
6576 int ftrace_mod_get_kallsym(unsigned int symnum
, unsigned long *value
,
6577 char *type
, char *name
,
6578 char *module_name
, int *exported
)
6580 struct ftrace_mod_map
*mod_map
;
6581 struct ftrace_mod_func
*mod_func
;
6585 list_for_each_entry_rcu(mod_map
, &ftrace_mod_maps
, list
) {
6587 if (symnum
>= mod_map
->num_funcs
) {
6588 symnum
-= mod_map
->num_funcs
;
6592 list_for_each_entry_rcu(mod_func
, &mod_map
->funcs
, list
) {
6598 *value
= mod_func
->ip
;
6600 strlcpy(name
, mod_func
->name
, KSYM_NAME_LEN
);
6601 strlcpy(module_name
, mod_map
->mod
->name
, MODULE_NAME_LEN
);
6609 ret
= ftrace_get_trampoline_kallsym(symnum
, value
, type
, name
,
6610 module_name
, exported
);
6616 static void save_ftrace_mod_rec(struct ftrace_mod_map
*mod_map
,
6617 struct dyn_ftrace
*rec
) { }
6618 static inline struct ftrace_mod_map
*
6619 allocate_ftrace_mod_map(struct module
*mod
,
6620 unsigned long start
, unsigned long end
)
6624 int ftrace_mod_get_kallsym(unsigned int symnum
, unsigned long *value
,
6625 char *type
, char *name
, char *module_name
,
6631 ret
= ftrace_get_trampoline_kallsym(symnum
, value
, type
, name
,
6632 module_name
, exported
);
6636 #endif /* CONFIG_MODULES */
6638 struct ftrace_init_func
{
6639 struct list_head list
;
6643 /* Clear any init ips from hashes */
6645 clear_func_from_hash(struct ftrace_init_func
*func
, struct ftrace_hash
*hash
)
6647 struct ftrace_func_entry
*entry
;
6649 entry
= ftrace_lookup_ip(hash
, func
->ip
);
6651 * Do not allow this rec to match again.
6652 * Yeah, it may waste some memory, but will be removed
6653 * if/when the hash is modified again.
6660 clear_func_from_hashes(struct ftrace_init_func
*func
)
6662 struct trace_array
*tr
;
6664 mutex_lock(&trace_types_lock
);
6665 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6666 if (!tr
->ops
|| !tr
->ops
->func_hash
)
6668 mutex_lock(&tr
->ops
->func_hash
->regex_lock
);
6669 clear_func_from_hash(func
, tr
->ops
->func_hash
->filter_hash
);
6670 clear_func_from_hash(func
, tr
->ops
->func_hash
->notrace_hash
);
6671 mutex_unlock(&tr
->ops
->func_hash
->regex_lock
);
6673 mutex_unlock(&trace_types_lock
);
6676 static void add_to_clear_hash_list(struct list_head
*clear_list
,
6677 struct dyn_ftrace
*rec
)
6679 struct ftrace_init_func
*func
;
6681 func
= kmalloc(sizeof(*func
), GFP_KERNEL
);
6683 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
6688 list_add(&func
->list
, clear_list
);
6691 void ftrace_free_mem(struct module
*mod
, void *start_ptr
, void *end_ptr
)
6693 unsigned long start
= (unsigned long)(start_ptr
);
6694 unsigned long end
= (unsigned long)(end_ptr
);
6695 struct ftrace_page
**last_pg
= &ftrace_pages_start
;
6696 struct ftrace_page
*pg
;
6697 struct dyn_ftrace
*rec
;
6698 struct dyn_ftrace key
;
6699 struct ftrace_mod_map
*mod_map
= NULL
;
6700 struct ftrace_init_func
*func
, *func_next
;
6701 struct list_head clear_hash
;
6704 INIT_LIST_HEAD(&clear_hash
);
6707 key
.flags
= end
; /* overload flags, as it is unsigned long */
6709 mutex_lock(&ftrace_lock
);
6712 * If we are freeing module init memory, then check if
6713 * any tracer is active. If so, we need to save a mapping of
6714 * the module functions being freed with the address.
6716 if (mod
&& ftrace_ops_list
!= &ftrace_list_end
)
6717 mod_map
= allocate_ftrace_mod_map(mod
, start
, end
);
6719 for (pg
= ftrace_pages_start
; pg
; last_pg
= &pg
->next
, pg
= *last_pg
) {
6720 if (end
< pg
->records
[0].ip
||
6721 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
6724 rec
= bsearch(&key
, pg
->records
, pg
->index
,
6725 sizeof(struct dyn_ftrace
),
6730 /* rec will be cleared from hashes after ftrace_lock unlock */
6731 add_to_clear_hash_list(&clear_hash
, rec
);
6734 save_ftrace_mod_rec(mod_map
, rec
);
6737 ftrace_update_tot_cnt
--;
6739 *last_pg
= pg
->next
;
6740 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
6741 free_pages((unsigned long)pg
->records
, order
);
6742 ftrace_number_of_pages
-= 1 << order
;
6743 ftrace_number_of_groups
--;
6745 pg
= container_of(last_pg
, struct ftrace_page
, next
);
6750 memmove(rec
, rec
+ 1,
6751 (pg
->index
- (rec
- pg
->records
)) * sizeof(*rec
));
6752 /* More than one function may be in this block */
6755 mutex_unlock(&ftrace_lock
);
6757 list_for_each_entry_safe(func
, func_next
, &clear_hash
, list
) {
6758 clear_func_from_hashes(func
);
6763 void __init
ftrace_free_init_mem(void)
6765 void *start
= (void *)(&__init_begin
);
6766 void *end
= (void *)(&__init_end
);
6768 ftrace_free_mem(NULL
, start
, end
);
6771 void __init
ftrace_init(void)
6773 extern unsigned long __start_mcount_loc
[];
6774 extern unsigned long __stop_mcount_loc
[];
6775 unsigned long count
, flags
;
6778 local_irq_save(flags
);
6779 ret
= ftrace_dyn_arch_init();
6780 local_irq_restore(flags
);
6784 count
= __stop_mcount_loc
- __start_mcount_loc
;
6786 pr_info("ftrace: No functions to be traced?\n");
6790 pr_info("ftrace: allocating %ld entries in %ld pages\n",
6791 count
, count
/ ENTRIES_PER_PAGE
+ 1);
6793 last_ftrace_enabled
= ftrace_enabled
= 1;
6795 ret
= ftrace_process_locs(NULL
,
6799 pr_info("ftrace: allocated %ld pages with %ld groups\n",
6800 ftrace_number_of_pages
, ftrace_number_of_groups
);
6802 set_ftrace_early_filters();
6806 ftrace_disabled
= 1;
6809 /* Do nothing if arch does not support this */
6810 void __weak
arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
6814 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
6816 unsigned long trampoline
= ops
->trampoline
;
6818 arch_ftrace_update_trampoline(ops
);
6819 if (ops
->trampoline
&& ops
->trampoline
!= trampoline
&&
6820 (ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
)) {
6821 /* Add to kallsyms before the perf events */
6822 ftrace_add_trampoline_to_kallsyms(ops
);
6823 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL
,
6824 ops
->trampoline
, ops
->trampoline_size
, false,
6825 FTRACE_TRAMPOLINE_SYM
);
6827 * Record the perf text poke event after the ksymbol register
6830 perf_event_text_poke((void *)ops
->trampoline
, NULL
, 0,
6831 (void *)ops
->trampoline
,
6832 ops
->trampoline_size
);
6836 void ftrace_init_trace_array(struct trace_array
*tr
)
6838 INIT_LIST_HEAD(&tr
->func_probes
);
6839 INIT_LIST_HEAD(&tr
->mod_trace
);
6840 INIT_LIST_HEAD(&tr
->mod_notrace
);
6844 struct ftrace_ops global_ops
= {
6845 .func
= ftrace_stub
,
6846 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
6847 FTRACE_OPS_FL_INITIALIZED
|
6851 static int __init
ftrace_nodyn_init(void)
6856 core_initcall(ftrace_nodyn_init
);
6858 static inline int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
) { return 0; }
6859 static inline void ftrace_startup_enable(int command
) { }
6860 static inline void ftrace_startup_all(int command
) { }
6862 # define ftrace_startup_sysctl() do { } while (0)
6863 # define ftrace_shutdown_sysctl() do { } while (0)
6865 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
6869 #endif /* CONFIG_DYNAMIC_FTRACE */
6871 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
6873 tr
->ops
= &global_ops
;
6874 tr
->ops
->private = tr
;
6875 ftrace_init_trace_array(tr
);
6878 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
6880 /* If we filter on pids, update to use the pid function */
6881 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
6882 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
6883 printk("ftrace ops had %pS for function\n",
6886 tr
->ops
->func
= func
;
6887 tr
->ops
->private = tr
;
6890 void ftrace_reset_array_ops(struct trace_array
*tr
)
6892 tr
->ops
->func
= ftrace_stub
;
6895 static nokprobe_inline
void
6896 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
6897 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
6899 struct ftrace_ops
*op
;
6902 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
6907 * Some of the ops may be dynamically allocated,
6908 * they must be freed after a synchronize_rcu().
6910 preempt_disable_notrace();
6912 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
6913 /* Stub functions don't need to be called nor tested */
6914 if (op
->flags
& FTRACE_OPS_FL_STUB
)
6917 * Check the following for each ops before calling their func:
6918 * if RCU flag is set, then rcu_is_watching() must be true
6919 * if PER_CPU is set, then ftrace_function_local_disable()
6921 * Otherwise test if the ip matches the ops filter
6923 * If any of the above fails then the op->func() is not executed.
6925 if ((!(op
->flags
& FTRACE_OPS_FL_RCU
) || rcu_is_watching()) &&
6926 ftrace_ops_test(op
, ip
, regs
)) {
6927 if (FTRACE_WARN_ON(!op
->func
)) {
6928 pr_warn("op=%p %pS\n", op
, op
);
6931 op
->func(ip
, parent_ip
, op
, regs
);
6933 } while_for_each_ftrace_op(op
);
6935 preempt_enable_notrace();
6936 trace_clear_recursion(bit
);
6940 * Some archs only support passing ip and parent_ip. Even though
6941 * the list function ignores the op parameter, we do not want any
6942 * C side effects, where a function is called without the caller
6943 * sending a third parameter.
6944 * Archs are to support both the regs and ftrace_ops at the same time.
6945 * If they support ftrace_ops, it is assumed they support regs.
6946 * If call backs want to use regs, they must either check for regs
6947 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6948 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6949 * An architecture can pass partial regs with ftrace_ops and still
6950 * set the ARCH_SUPPORTS_FTRACE_OPS.
6952 #if ARCH_SUPPORTS_FTRACE_OPS
6953 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
6954 struct ftrace_ops
*op
, struct pt_regs
*regs
)
6956 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
6958 NOKPROBE_SYMBOL(ftrace_ops_list_func
);
6960 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
6962 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
6964 NOKPROBE_SYMBOL(ftrace_ops_no_ops
);
6968 * If there's only one function registered but it does not support
6969 * recursion, needs RCU protection and/or requires per cpu handling, then
6970 * this function will be called by the mcount trampoline.
6972 static void ftrace_ops_assist_func(unsigned long ip
, unsigned long parent_ip
,
6973 struct ftrace_ops
*op
, struct pt_regs
*regs
)
6977 if ((op
->flags
& FTRACE_OPS_FL_RCU
) && !rcu_is_watching())
6980 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
6984 preempt_disable_notrace();
6986 op
->func(ip
, parent_ip
, op
, regs
);
6988 preempt_enable_notrace();
6989 trace_clear_recursion(bit
);
6991 NOKPROBE_SYMBOL(ftrace_ops_assist_func
);
6994 * ftrace_ops_get_func - get the function a trampoline should call
6995 * @ops: the ops to get the function for
6997 * Normally the mcount trampoline will call the ops->func, but there
6998 * are times that it should not. For example, if the ops does not
6999 * have its own recursion protection, then it should call the
7000 * ftrace_ops_assist_func() instead.
7002 * Returns the function that the trampoline should call for @ops.
7004 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
7007 * If the function does not handle recursion, needs to be RCU safe,
7008 * or does per cpu logic, then we need to call the assist handler.
7010 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
) ||
7011 ops
->flags
& FTRACE_OPS_FL_RCU
)
7012 return ftrace_ops_assist_func
;
7018 ftrace_filter_pid_sched_switch_probe(void *data
, bool preempt
,
7019 struct task_struct
*prev
, struct task_struct
*next
)
7021 struct trace_array
*tr
= data
;
7022 struct trace_pid_list
*pid_list
;
7023 struct trace_pid_list
*no_pid_list
;
7025 pid_list
= rcu_dereference_sched(tr
->function_pids
);
7026 no_pid_list
= rcu_dereference_sched(tr
->function_no_pids
);
7028 if (trace_ignore_this_task(pid_list
, no_pid_list
, next
))
7029 this_cpu_write(tr
->array_buffer
.data
->ftrace_ignore_pid
,
7032 this_cpu_write(tr
->array_buffer
.data
->ftrace_ignore_pid
,
7037 ftrace_pid_follow_sched_process_fork(void *data
,
7038 struct task_struct
*self
,
7039 struct task_struct
*task
)
7041 struct trace_pid_list
*pid_list
;
7042 struct trace_array
*tr
= data
;
7044 pid_list
= rcu_dereference_sched(tr
->function_pids
);
7045 trace_filter_add_remove_task(pid_list
, self
, task
);
7047 pid_list
= rcu_dereference_sched(tr
->function_no_pids
);
7048 trace_filter_add_remove_task(pid_list
, self
, task
);
7052 ftrace_pid_follow_sched_process_exit(void *data
, struct task_struct
*task
)
7054 struct trace_pid_list
*pid_list
;
7055 struct trace_array
*tr
= data
;
7057 pid_list
= rcu_dereference_sched(tr
->function_pids
);
7058 trace_filter_add_remove_task(pid_list
, NULL
, task
);
7060 pid_list
= rcu_dereference_sched(tr
->function_no_pids
);
7061 trace_filter_add_remove_task(pid_list
, NULL
, task
);
7064 void ftrace_pid_follow_fork(struct trace_array
*tr
, bool enable
)
7067 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork
,
7069 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit
,
7072 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork
,
7074 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit
,
7079 static void clear_ftrace_pids(struct trace_array
*tr
, int type
)
7081 struct trace_pid_list
*pid_list
;
7082 struct trace_pid_list
*no_pid_list
;
7085 pid_list
= rcu_dereference_protected(tr
->function_pids
,
7086 lockdep_is_held(&ftrace_lock
));
7087 no_pid_list
= rcu_dereference_protected(tr
->function_no_pids
,
7088 lockdep_is_held(&ftrace_lock
));
7090 /* Make sure there's something to do */
7091 if (!pid_type_enabled(type
, pid_list
, no_pid_list
))
7094 /* See if the pids still need to be checked after this */
7095 if (!still_need_pid_events(type
, pid_list
, no_pid_list
)) {
7096 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe
, tr
);
7097 for_each_possible_cpu(cpu
)
7098 per_cpu_ptr(tr
->array_buffer
.data
, cpu
)->ftrace_ignore_pid
= FTRACE_PID_TRACE
;
7101 if (type
& TRACE_PIDS
)
7102 rcu_assign_pointer(tr
->function_pids
, NULL
);
7104 if (type
& TRACE_NO_PIDS
)
7105 rcu_assign_pointer(tr
->function_no_pids
, NULL
);
7107 /* Wait till all users are no longer using pid filtering */
7110 if ((type
& TRACE_PIDS
) && pid_list
)
7111 trace_free_pid_list(pid_list
);
7113 if ((type
& TRACE_NO_PIDS
) && no_pid_list
)
7114 trace_free_pid_list(no_pid_list
);
7117 void ftrace_clear_pids(struct trace_array
*tr
)
7119 mutex_lock(&ftrace_lock
);
7121 clear_ftrace_pids(tr
, TRACE_PIDS
| TRACE_NO_PIDS
);
7123 mutex_unlock(&ftrace_lock
);
7126 static void ftrace_pid_reset(struct trace_array
*tr
, int type
)
7128 mutex_lock(&ftrace_lock
);
7129 clear_ftrace_pids(tr
, type
);
7131 ftrace_update_pid_func();
7132 ftrace_startup_all(0);
7134 mutex_unlock(&ftrace_lock
);
7137 /* Greater than any max PID */
7138 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7140 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
7143 struct trace_pid_list
*pid_list
;
7144 struct trace_array
*tr
= m
->private;
7146 mutex_lock(&ftrace_lock
);
7147 rcu_read_lock_sched();
7149 pid_list
= rcu_dereference_sched(tr
->function_pids
);
7152 return !(*pos
) ? FTRACE_NO_PIDS
: NULL
;
7154 return trace_pid_start(pid_list
, pos
);
7157 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
7159 struct trace_array
*tr
= m
->private;
7160 struct trace_pid_list
*pid_list
= rcu_dereference_sched(tr
->function_pids
);
7162 if (v
== FTRACE_NO_PIDS
) {
7166 return trace_pid_next(pid_list
, v
, pos
);
7169 static void fpid_stop(struct seq_file
*m
, void *p
)
7172 rcu_read_unlock_sched();
7173 mutex_unlock(&ftrace_lock
);
7176 static int fpid_show(struct seq_file
*m
, void *v
)
7178 if (v
== FTRACE_NO_PIDS
) {
7179 seq_puts(m
, "no pid\n");
7183 return trace_pid_show(m
, v
);
7186 static const struct seq_operations ftrace_pid_sops
= {
7187 .start
= fpid_start
,
7193 static void *fnpid_start(struct seq_file
*m
, loff_t
*pos
)
7196 struct trace_pid_list
*pid_list
;
7197 struct trace_array
*tr
= m
->private;
7199 mutex_lock(&ftrace_lock
);
7200 rcu_read_lock_sched();
7202 pid_list
= rcu_dereference_sched(tr
->function_no_pids
);
7205 return !(*pos
) ? FTRACE_NO_PIDS
: NULL
;
7207 return trace_pid_start(pid_list
, pos
);
7210 static void *fnpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
7212 struct trace_array
*tr
= m
->private;
7213 struct trace_pid_list
*pid_list
= rcu_dereference_sched(tr
->function_no_pids
);
7215 if (v
== FTRACE_NO_PIDS
) {
7219 return trace_pid_next(pid_list
, v
, pos
);
7222 static const struct seq_operations ftrace_no_pid_sops
= {
7223 .start
= fnpid_start
,
7229 static int pid_open(struct inode
*inode
, struct file
*file
, int type
)
7231 const struct seq_operations
*seq_ops
;
7232 struct trace_array
*tr
= inode
->i_private
;
7236 ret
= tracing_check_open_get_tr(tr
);
7240 if ((file
->f_mode
& FMODE_WRITE
) &&
7241 (file
->f_flags
& O_TRUNC
))
7242 ftrace_pid_reset(tr
, type
);
7246 seq_ops
= &ftrace_pid_sops
;
7249 seq_ops
= &ftrace_no_pid_sops
;
7252 trace_array_put(tr
);
7257 ret
= seq_open(file
, seq_ops
);
7259 trace_array_put(tr
);
7261 m
= file
->private_data
;
7262 /* copy tr over to seq ops */
7270 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
7272 return pid_open(inode
, file
, TRACE_PIDS
);
7276 ftrace_no_pid_open(struct inode
*inode
, struct file
*file
)
7278 return pid_open(inode
, file
, TRACE_NO_PIDS
);
7281 static void ignore_task_cpu(void *data
)
7283 struct trace_array
*tr
= data
;
7284 struct trace_pid_list
*pid_list
;
7285 struct trace_pid_list
*no_pid_list
;
7288 * This function is called by on_each_cpu() while the
7289 * event_mutex is held.
7291 pid_list
= rcu_dereference_protected(tr
->function_pids
,
7292 mutex_is_locked(&ftrace_lock
));
7293 no_pid_list
= rcu_dereference_protected(tr
->function_no_pids
,
7294 mutex_is_locked(&ftrace_lock
));
7296 if (trace_ignore_this_task(pid_list
, no_pid_list
, current
))
7297 this_cpu_write(tr
->array_buffer
.data
->ftrace_ignore_pid
,
7300 this_cpu_write(tr
->array_buffer
.data
->ftrace_ignore_pid
,
7305 pid_write(struct file
*filp
, const char __user
*ubuf
,
7306 size_t cnt
, loff_t
*ppos
, int type
)
7308 struct seq_file
*m
= filp
->private_data
;
7309 struct trace_array
*tr
= m
->private;
7310 struct trace_pid_list
*filtered_pids
;
7311 struct trace_pid_list
*other_pids
;
7312 struct trace_pid_list
*pid_list
;
7318 mutex_lock(&ftrace_lock
);
7322 filtered_pids
= rcu_dereference_protected(tr
->function_pids
,
7323 lockdep_is_held(&ftrace_lock
));
7324 other_pids
= rcu_dereference_protected(tr
->function_no_pids
,
7325 lockdep_is_held(&ftrace_lock
));
7328 filtered_pids
= rcu_dereference_protected(tr
->function_no_pids
,
7329 lockdep_is_held(&ftrace_lock
));
7330 other_pids
= rcu_dereference_protected(tr
->function_pids
,
7331 lockdep_is_held(&ftrace_lock
));
7339 ret
= trace_pid_write(filtered_pids
, &pid_list
, ubuf
, cnt
);
7345 rcu_assign_pointer(tr
->function_pids
, pid_list
);
7348 rcu_assign_pointer(tr
->function_no_pids
, pid_list
);
7353 if (filtered_pids
) {
7355 trace_free_pid_list(filtered_pids
);
7356 } else if (pid_list
&& !other_pids
) {
7357 /* Register a probe to set whether to ignore the tracing of a task */
7358 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe
, tr
);
7362 * Ignoring of pids is done at task switch. But we have to
7363 * check for those tasks that are currently running.
7364 * Always do this in case a pid was appended or removed.
7366 on_each_cpu(ignore_task_cpu
, tr
, 1);
7368 ftrace_update_pid_func();
7369 ftrace_startup_all(0);
7371 mutex_unlock(&ftrace_lock
);
7380 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
7381 size_t cnt
, loff_t
*ppos
)
7383 return pid_write(filp
, ubuf
, cnt
, ppos
, TRACE_PIDS
);
7387 ftrace_no_pid_write(struct file
*filp
, const char __user
*ubuf
,
7388 size_t cnt
, loff_t
*ppos
)
7390 return pid_write(filp
, ubuf
, cnt
, ppos
, TRACE_NO_PIDS
);
7394 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
7396 struct trace_array
*tr
= inode
->i_private
;
7398 trace_array_put(tr
);
7400 return seq_release(inode
, file
);
7403 static const struct file_operations ftrace_pid_fops
= {
7404 .open
= ftrace_pid_open
,
7405 .write
= ftrace_pid_write
,
7407 .llseek
= tracing_lseek
,
7408 .release
= ftrace_pid_release
,
7411 static const struct file_operations ftrace_no_pid_fops
= {
7412 .open
= ftrace_no_pid_open
,
7413 .write
= ftrace_no_pid_write
,
7415 .llseek
= tracing_lseek
,
7416 .release
= ftrace_pid_release
,
7419 void ftrace_init_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7421 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
7422 tr
, &ftrace_pid_fops
);
7423 trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer
,
7424 tr
, &ftrace_no_pid_fops
);
7427 void __init
ftrace_init_tracefs_toplevel(struct trace_array
*tr
,
7428 struct dentry
*d_tracer
)
7430 /* Only the top level directory has the dyn_tracefs and profile */
7431 WARN_ON(!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
));
7433 ftrace_init_dyn_tracefs(d_tracer
);
7434 ftrace_profile_tracefs(d_tracer
);
7438 * ftrace_kill - kill ftrace
7440 * This function should be used by panic code. It stops ftrace
7441 * but in a not so nice way. If you need to simply kill ftrace
7442 * from a non-atomic section, use ftrace_kill.
7444 void ftrace_kill(void)
7446 ftrace_disabled
= 1;
7448 ftrace_trace_function
= ftrace_stub
;
7452 * Test if ftrace is dead or not.
7454 int ftrace_is_dead(void)
7456 return ftrace_disabled
;
7460 * register_ftrace_function - register a function for profiling
7461 * @ops - ops structure that holds the function for profiling.
7463 * Register a function to be called by all functions in the
7466 * Note: @ops->func and all the functions it calls must be labeled
7467 * with "notrace", otherwise it will go into a
7470 int register_ftrace_function(struct ftrace_ops
*ops
)
7474 ftrace_ops_init(ops
);
7476 mutex_lock(&ftrace_lock
);
7478 ret
= ftrace_startup(ops
, 0);
7480 mutex_unlock(&ftrace_lock
);
7484 EXPORT_SYMBOL_GPL(register_ftrace_function
);
7487 * unregister_ftrace_function - unregister a function for profiling.
7488 * @ops - ops structure that holds the function to unregister
7490 * Unregister a function that was added to be called by ftrace profiling.
7492 int unregister_ftrace_function(struct ftrace_ops
*ops
)
7496 mutex_lock(&ftrace_lock
);
7497 ret
= ftrace_shutdown(ops
, 0);
7498 mutex_unlock(&ftrace_lock
);
7502 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
7504 static bool is_permanent_ops_registered(void)
7506 struct ftrace_ops
*op
;
7508 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
7509 if (op
->flags
& FTRACE_OPS_FL_PERMANENT
)
7511 } while_for_each_ftrace_op(op
);
7517 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
7518 void __user
*buffer
, size_t *lenp
,
7523 mutex_lock(&ftrace_lock
);
7525 if (unlikely(ftrace_disabled
))
7528 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
7530 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
7533 if (ftrace_enabled
) {
7535 /* we are starting ftrace again */
7536 if (rcu_dereference_protected(ftrace_ops_list
,
7537 lockdep_is_held(&ftrace_lock
)) != &ftrace_list_end
)
7538 update_ftrace_function();
7540 ftrace_startup_sysctl();
7543 if (is_permanent_ops_registered()) {
7544 ftrace_enabled
= true;
7549 /* stopping ftrace calls (just send to ftrace_stub) */
7550 ftrace_trace_function
= ftrace_stub
;
7552 ftrace_shutdown_sysctl();
7555 last_ftrace_enabled
= !!ftrace_enabled
;
7557 mutex_unlock(&ftrace_lock
);