2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/tracefs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
51 #define FTRACE_WARN_ON_ONCE(cond) \
54 if (WARN_ON_ONCE(___r)) \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
79 static struct ftrace_ops ftrace_list_end __read_mostly
= {
81 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
82 INIT_OPS_HASH(ftrace_list_end
)
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly
;
87 static int last_ftrace_enabled
;
89 /* Current function tracing op */
90 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops
*set_function_trace_op
;
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids
);
97 struct list_head list
;
101 static bool ftrace_pids_enabled(void)
103 return !list_empty(&ftrace_pids
);
106 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
109 * ftrace_disabled is set when an anomaly is discovered.
110 * ftrace_disabled is much stronger than ftrace_enabled.
112 static int ftrace_disabled __read_mostly
;
114 static DEFINE_MUTEX(ftrace_lock
);
116 static struct ftrace_ops
*ftrace_control_list __read_mostly
= &ftrace_list_end
;
117 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
118 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
119 static struct ftrace_ops global_ops
;
120 static struct ftrace_ops control_ops
;
122 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
123 struct ftrace_ops
*op
, struct pt_regs
*regs
);
125 #if ARCH_SUPPORTS_FTRACE_OPS
126 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
127 struct ftrace_ops
*op
, struct pt_regs
*regs
);
129 /* See comment below, where ftrace_ops_list_func is defined */
130 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
131 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
135 * Traverse the ftrace_global_list, invoking all entries. The reason that we
136 * can use rcu_dereference_raw_notrace() is that elements removed from this list
137 * are simply leaked, so there is no need to interact with a grace-period
138 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
139 * concurrent insertions into the ftrace_global_list.
141 * Silly Alpha and silly pointer-speculation compiler optimizations!
143 #define do_for_each_ftrace_op(op, list) \
144 op = rcu_dereference_raw_notrace(list); \
148 * Optimized for just a single item in the list (as that is the normal case).
150 #define while_for_each_ftrace_op(op) \
151 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
152 unlikely((op) != &ftrace_list_end))
154 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
156 #ifdef CONFIG_DYNAMIC_FTRACE
157 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
158 mutex_init(&ops
->local_hash
.regex_lock
);
159 ops
->func_hash
= &ops
->local_hash
;
160 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
166 * ftrace_nr_registered_ops - return number of ops registered
168 * Returns the number of ftrace_ops registered and tracing functions
170 int ftrace_nr_registered_ops(void)
172 struct ftrace_ops
*ops
;
175 mutex_lock(&ftrace_lock
);
177 for (ops
= ftrace_ops_list
;
178 ops
!= &ftrace_list_end
; ops
= ops
->next
)
181 mutex_unlock(&ftrace_lock
);
186 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
187 struct ftrace_ops
*op
, struct pt_regs
*regs
)
189 if (!test_tsk_trace_trace(current
))
192 op
->saved_func(ip
, parent_ip
, op
, regs
);
196 * clear_ftrace_function - reset the ftrace function
198 * This NULLs the ftrace function and in essence stops
199 * tracing. There may be lag
201 void clear_ftrace_function(void)
203 ftrace_trace_function
= ftrace_stub
;
206 static void control_ops_disable_all(struct ftrace_ops
*ops
)
210 for_each_possible_cpu(cpu
)
211 *per_cpu_ptr(ops
->disabled
, cpu
) = 1;
214 static int control_ops_alloc(struct ftrace_ops
*ops
)
216 int __percpu
*disabled
;
218 disabled
= alloc_percpu(int);
222 ops
->disabled
= disabled
;
223 control_ops_disable_all(ops
);
227 static void ftrace_sync(struct work_struct
*work
)
230 * This function is just a stub to implement a hard force
231 * of synchronize_sched(). This requires synchronizing
232 * tasks even in userspace and idle.
234 * Yes, function tracing is rude.
238 static void ftrace_sync_ipi(void *data
)
240 /* Probably not needed, but do it anyway */
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
245 static void update_function_graph_func(void);
247 static inline void update_function_graph_func(void) { }
251 static ftrace_func_t
ftrace_ops_get_list_func(struct ftrace_ops
*ops
)
254 * If this is a dynamic ops or we force list func,
255 * then it needs to call the list anyway.
257 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
|| FTRACE_FORCE_LIST_FUNC
)
258 return ftrace_ops_list_func
;
260 return ftrace_ops_get_func(ops
);
263 static void update_ftrace_function(void)
268 * Prepare the ftrace_ops that the arch callback will use.
269 * If there's only one ftrace_ops registered, the ftrace_ops_list
270 * will point to the ops we want.
272 set_function_trace_op
= ftrace_ops_list
;
274 /* If there's no ftrace_ops registered, just call the stub function */
275 if (ftrace_ops_list
== &ftrace_list_end
) {
279 * If we are at the end of the list and this ops is
280 * recursion safe and not dynamic and the arch supports passing ops,
281 * then have the mcount trampoline call the function directly.
283 } else if (ftrace_ops_list
->next
== &ftrace_list_end
) {
284 func
= ftrace_ops_get_list_func(ftrace_ops_list
);
287 /* Just use the default ftrace_ops */
288 set_function_trace_op
= &ftrace_list_end
;
289 func
= ftrace_ops_list_func
;
292 update_function_graph_func();
294 /* If there's no change, then do nothing more here */
295 if (ftrace_trace_function
== func
)
299 * If we are using the list function, it doesn't care
300 * about the function_trace_ops.
302 if (func
== ftrace_ops_list_func
) {
303 ftrace_trace_function
= func
;
305 * Don't even bother setting function_trace_ops,
306 * it would be racy to do so anyway.
311 #ifndef CONFIG_DYNAMIC_FTRACE
313 * For static tracing, we need to be a bit more careful.
314 * The function change takes affect immediately. Thus,
315 * we need to coorditate the setting of the function_trace_ops
316 * with the setting of the ftrace_trace_function.
318 * Set the function to the list ops, which will call the
319 * function we want, albeit indirectly, but it handles the
320 * ftrace_ops and doesn't depend on function_trace_op.
322 ftrace_trace_function
= ftrace_ops_list_func
;
324 * Make sure all CPUs see this. Yes this is slow, but static
325 * tracing is slow and nasty to have enabled.
327 schedule_on_each_cpu(ftrace_sync
);
328 /* Now all cpus are using the list ops. */
329 function_trace_op
= set_function_trace_op
;
330 /* Make sure the function_trace_op is visible on all CPUs */
332 /* Nasty way to force a rmb on all cpus */
333 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
334 /* OK, we are all set to update the ftrace_trace_function now! */
335 #endif /* !CONFIG_DYNAMIC_FTRACE */
337 ftrace_trace_function
= func
;
340 int using_ftrace_ops_list_func(void)
342 return ftrace_trace_function
== ftrace_ops_list_func
;
345 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
349 * We are entering ops into the list but another
350 * CPU might be walking that list. We need to make sure
351 * the ops->next pointer is valid before another CPU sees
352 * the ops pointer included into the list.
354 rcu_assign_pointer(*list
, ops
);
357 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
359 struct ftrace_ops
**p
;
362 * If we are removing the last function, then simply point
363 * to the ftrace_stub.
365 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
366 *list
= &ftrace_list_end
;
370 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
381 static void add_ftrace_list_ops(struct ftrace_ops
**list
,
382 struct ftrace_ops
*main_ops
,
383 struct ftrace_ops
*ops
)
385 int first
= *list
== &ftrace_list_end
;
386 add_ftrace_ops(list
, ops
);
388 add_ftrace_ops(&ftrace_ops_list
, main_ops
);
391 static int remove_ftrace_list_ops(struct ftrace_ops
**list
,
392 struct ftrace_ops
*main_ops
,
393 struct ftrace_ops
*ops
)
395 int ret
= remove_ftrace_ops(list
, ops
);
396 if (!ret
&& *list
== &ftrace_list_end
)
397 ret
= remove_ftrace_ops(&ftrace_ops_list
, main_ops
);
401 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
403 static int __register_ftrace_function(struct ftrace_ops
*ops
)
405 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
408 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
411 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
413 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
414 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
415 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
417 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
418 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
421 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
422 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
425 if (!core_kernel_data((unsigned long)ops
))
426 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
428 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
429 if (control_ops_alloc(ops
))
431 add_ftrace_list_ops(&ftrace_control_list
, &control_ops
, ops
);
432 /* The control_ops needs the trampoline update */
435 add_ftrace_ops(&ftrace_ops_list
, ops
);
437 /* Always save the function, and reset at unregistering */
438 ops
->saved_func
= ops
->func
;
440 if (ops
->flags
& FTRACE_OPS_FL_PID
&& ftrace_pids_enabled())
441 ops
->func
= ftrace_pid_func
;
443 ftrace_update_trampoline(ops
);
446 update_ftrace_function();
451 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
455 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
458 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
459 ret
= remove_ftrace_list_ops(&ftrace_control_list
,
462 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
468 update_ftrace_function();
470 ops
->func
= ops
->saved_func
;
475 static void ftrace_update_pid_func(void)
477 bool enabled
= ftrace_pids_enabled();
478 struct ftrace_ops
*op
;
480 /* Only do something if we are tracing something */
481 if (ftrace_trace_function
== ftrace_stub
)
484 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
485 if (op
->flags
& FTRACE_OPS_FL_PID
) {
486 op
->func
= enabled
? ftrace_pid_func
:
488 ftrace_update_trampoline(op
);
490 } while_for_each_ftrace_op(op
);
492 update_ftrace_function();
495 #ifdef CONFIG_FUNCTION_PROFILER
496 struct ftrace_profile
{
497 struct hlist_node node
;
499 unsigned long counter
;
500 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
501 unsigned long long time
;
502 unsigned long long time_squared
;
506 struct ftrace_profile_page
{
507 struct ftrace_profile_page
*next
;
509 struct ftrace_profile records
[];
512 struct ftrace_profile_stat
{
514 struct hlist_head
*hash
;
515 struct ftrace_profile_page
*pages
;
516 struct ftrace_profile_page
*start
;
517 struct tracer_stat stat
;
520 #define PROFILE_RECORDS_SIZE \
521 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
523 #define PROFILES_PER_PAGE \
524 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
526 static int ftrace_profile_enabled __read_mostly
;
528 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
529 static DEFINE_MUTEX(ftrace_profile_lock
);
531 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
533 #define FTRACE_PROFILE_HASH_BITS 10
534 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
537 function_stat_next(void *v
, int idx
)
539 struct ftrace_profile
*rec
= v
;
540 struct ftrace_profile_page
*pg
;
542 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
548 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
552 rec
= &pg
->records
[0];
560 static void *function_stat_start(struct tracer_stat
*trace
)
562 struct ftrace_profile_stat
*stat
=
563 container_of(trace
, struct ftrace_profile_stat
, stat
);
565 if (!stat
|| !stat
->start
)
568 return function_stat_next(&stat
->start
->records
[0], 0);
571 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
572 /* function graph compares on total time */
573 static int function_stat_cmp(void *p1
, void *p2
)
575 struct ftrace_profile
*a
= p1
;
576 struct ftrace_profile
*b
= p2
;
578 if (a
->time
< b
->time
)
580 if (a
->time
> b
->time
)
586 /* not function graph compares against hits */
587 static int function_stat_cmp(void *p1
, void *p2
)
589 struct ftrace_profile
*a
= p1
;
590 struct ftrace_profile
*b
= p2
;
592 if (a
->counter
< b
->counter
)
594 if (a
->counter
> b
->counter
)
601 static int function_stat_headers(struct seq_file
*m
)
603 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
604 seq_puts(m
, " Function "
607 "--- ---- --- ---\n");
609 seq_puts(m
, " Function Hit\n"
615 static int function_stat_show(struct seq_file
*m
, void *v
)
617 struct ftrace_profile
*rec
= v
;
618 char str
[KSYM_SYMBOL_LEN
];
620 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
621 static struct trace_seq s
;
622 unsigned long long avg
;
623 unsigned long long stddev
;
625 mutex_lock(&ftrace_profile_lock
);
627 /* we raced with function_profile_reset() */
628 if (unlikely(rec
->counter
== 0)) {
633 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
634 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
636 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
639 do_div(avg
, rec
->counter
);
641 /* Sample standard deviation (s^2) */
642 if (rec
->counter
<= 1)
646 * Apply Welford's method:
647 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
649 stddev
= rec
->counter
* rec
->time_squared
-
650 rec
->time
* rec
->time
;
653 * Divide only 1000 for ns^2 -> us^2 conversion.
654 * trace_print_graph_duration will divide 1000 again.
656 do_div(stddev
, rec
->counter
* (rec
->counter
- 1) * 1000);
660 trace_print_graph_duration(rec
->time
, &s
);
661 trace_seq_puts(&s
, " ");
662 trace_print_graph_duration(avg
, &s
);
663 trace_seq_puts(&s
, " ");
664 trace_print_graph_duration(stddev
, &s
);
665 trace_print_seq(m
, &s
);
669 mutex_unlock(&ftrace_profile_lock
);
674 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
676 struct ftrace_profile_page
*pg
;
678 pg
= stat
->pages
= stat
->start
;
681 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
686 memset(stat
->hash
, 0,
687 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
690 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
692 struct ftrace_profile_page
*pg
;
697 /* If we already allocated, do nothing */
701 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
705 #ifdef CONFIG_DYNAMIC_FTRACE
706 functions
= ftrace_update_tot_cnt
;
709 * We do not know the number of functions that exist because
710 * dynamic tracing is what counts them. With past experience
711 * we have around 20K functions. That should be more than enough.
712 * It is highly unlikely we will execute every function in
718 pg
= stat
->start
= stat
->pages
;
720 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
722 for (i
= 1; i
< pages
; i
++) {
723 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
734 unsigned long tmp
= (unsigned long)pg
;
746 static int ftrace_profile_init_cpu(int cpu
)
748 struct ftrace_profile_stat
*stat
;
751 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
754 /* If the profile is already created, simply reset it */
755 ftrace_profile_reset(stat
);
760 * We are profiling all functions, but usually only a few thousand
761 * functions are hit. We'll make a hash of 1024 items.
763 size
= FTRACE_PROFILE_HASH_SIZE
;
765 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
770 /* Preallocate the function profiling pages */
771 if (ftrace_profile_pages_init(stat
) < 0) {
780 static int ftrace_profile_init(void)
785 for_each_possible_cpu(cpu
) {
786 ret
= ftrace_profile_init_cpu(cpu
);
794 /* interrupts must be disabled */
795 static struct ftrace_profile
*
796 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
798 struct ftrace_profile
*rec
;
799 struct hlist_head
*hhd
;
802 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
803 hhd
= &stat
->hash
[key
];
805 if (hlist_empty(hhd
))
808 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
816 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
817 struct ftrace_profile
*rec
)
821 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
822 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
826 * The memory is already allocated, this simply finds a new record to use.
828 static struct ftrace_profile
*
829 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
831 struct ftrace_profile
*rec
= NULL
;
833 /* prevent recursion (from NMIs) */
834 if (atomic_inc_return(&stat
->disabled
) != 1)
838 * Try to find the function again since an NMI
839 * could have added it
841 rec
= ftrace_find_profiled_func(stat
, ip
);
845 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
846 if (!stat
->pages
->next
)
848 stat
->pages
= stat
->pages
->next
;
851 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
853 ftrace_add_profile(stat
, rec
);
856 atomic_dec(&stat
->disabled
);
862 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
863 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
865 struct ftrace_profile_stat
*stat
;
866 struct ftrace_profile
*rec
;
869 if (!ftrace_profile_enabled
)
872 local_irq_save(flags
);
874 stat
= this_cpu_ptr(&ftrace_profile_stats
);
875 if (!stat
->hash
|| !ftrace_profile_enabled
)
878 rec
= ftrace_find_profiled_func(stat
, ip
);
880 rec
= ftrace_profile_alloc(stat
, ip
);
887 local_irq_restore(flags
);
890 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
891 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
893 function_profile_call(trace
->func
, 0, NULL
, NULL
);
897 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
899 struct ftrace_profile_stat
*stat
;
900 unsigned long long calltime
;
901 struct ftrace_profile
*rec
;
904 local_irq_save(flags
);
905 stat
= this_cpu_ptr(&ftrace_profile_stats
);
906 if (!stat
->hash
|| !ftrace_profile_enabled
)
909 /* If the calltime was zero'd ignore it */
910 if (!trace
->calltime
)
913 calltime
= trace
->rettime
- trace
->calltime
;
915 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
918 index
= trace
->depth
;
920 /* Append this call time to the parent time to subtract */
922 current
->ret_stack
[index
- 1].subtime
+= calltime
;
924 if (current
->ret_stack
[index
].subtime
< calltime
)
925 calltime
-= current
->ret_stack
[index
].subtime
;
930 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
932 rec
->time
+= calltime
;
933 rec
->time_squared
+= calltime
* calltime
;
937 local_irq_restore(flags
);
940 static int register_ftrace_profiler(void)
942 return register_ftrace_graph(&profile_graph_return
,
943 &profile_graph_entry
);
946 static void unregister_ftrace_profiler(void)
948 unregister_ftrace_graph();
951 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
952 .func
= function_profile_call
,
953 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
954 INIT_OPS_HASH(ftrace_profile_ops
)
957 static int register_ftrace_profiler(void)
959 return register_ftrace_function(&ftrace_profile_ops
);
962 static void unregister_ftrace_profiler(void)
964 unregister_ftrace_function(&ftrace_profile_ops
);
966 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
969 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
970 size_t cnt
, loff_t
*ppos
)
975 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
981 mutex_lock(&ftrace_profile_lock
);
982 if (ftrace_profile_enabled
^ val
) {
984 ret
= ftrace_profile_init();
990 ret
= register_ftrace_profiler();
995 ftrace_profile_enabled
= 1;
997 ftrace_profile_enabled
= 0;
999 * unregister_ftrace_profiler calls stop_machine
1000 * so this acts like an synchronize_sched.
1002 unregister_ftrace_profiler();
1006 mutex_unlock(&ftrace_profile_lock
);
1014 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
1015 size_t cnt
, loff_t
*ppos
)
1017 char buf
[64]; /* big enough to hold a number */
1020 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
1021 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1024 static const struct file_operations ftrace_profile_fops
= {
1025 .open
= tracing_open_generic
,
1026 .read
= ftrace_profile_read
,
1027 .write
= ftrace_profile_write
,
1028 .llseek
= default_llseek
,
1031 /* used to initialize the real stat files */
1032 static struct tracer_stat function_stats __initdata
= {
1033 .name
= "functions",
1034 .stat_start
= function_stat_start
,
1035 .stat_next
= function_stat_next
,
1036 .stat_cmp
= function_stat_cmp
,
1037 .stat_headers
= function_stat_headers
,
1038 .stat_show
= function_stat_show
1041 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1043 struct ftrace_profile_stat
*stat
;
1044 struct dentry
*entry
;
1049 for_each_possible_cpu(cpu
) {
1050 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
1052 /* allocate enough for function name + cpu number */
1053 name
= kmalloc(32, GFP_KERNEL
);
1056 * The files created are permanent, if something happens
1057 * we still do not free memory.
1060 "Could not allocate stat file for cpu %d\n",
1064 stat
->stat
= function_stats
;
1065 snprintf(name
, 32, "function%d", cpu
);
1066 stat
->stat
.name
= name
;
1067 ret
= register_stat_tracer(&stat
->stat
);
1070 "Could not register function stat for cpu %d\n",
1077 entry
= tracefs_create_file("function_profile_enabled", 0644,
1078 d_tracer
, NULL
, &ftrace_profile_fops
);
1080 pr_warning("Could not create tracefs "
1081 "'function_profile_enabled' entry\n");
1084 #else /* CONFIG_FUNCTION_PROFILER */
1085 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1088 #endif /* CONFIG_FUNCTION_PROFILER */
1090 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
1092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1093 static int ftrace_graph_active
;
1095 # define ftrace_graph_active 0
1098 #ifdef CONFIG_DYNAMIC_FTRACE
1100 static struct ftrace_ops
*removed_ops
;
1103 * Set when doing a global update, like enabling all recs or disabling them.
1104 * It is not set when just updating a single ftrace_ops.
1106 static bool update_all_ops
;
1108 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1109 # error Dynamic ftrace depends on MCOUNT_RECORD
1112 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
1114 struct ftrace_func_probe
{
1115 struct hlist_node node
;
1116 struct ftrace_probe_ops
*ops
;
1117 unsigned long flags
;
1120 struct list_head free_list
;
1123 struct ftrace_func_entry
{
1124 struct hlist_node hlist
;
1128 struct ftrace_hash
{
1129 unsigned long size_bits
;
1130 struct hlist_head
*buckets
;
1131 unsigned long count
;
1132 struct rcu_head rcu
;
1136 * We make these constant because no one should touch them,
1137 * but they are used as the default "empty hash", to avoid allocating
1138 * it all the time. These are in a read only section such that if
1139 * anyone does try to modify it, it will cause an exception.
1141 static const struct hlist_head empty_buckets
[1];
1142 static const struct ftrace_hash empty_hash
= {
1143 .buckets
= (struct hlist_head
*)empty_buckets
,
1145 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1147 static struct ftrace_ops global_ops
= {
1148 .func
= ftrace_stub
,
1149 .local_hash
.notrace_hash
= EMPTY_HASH
,
1150 .local_hash
.filter_hash
= EMPTY_HASH
,
1151 INIT_OPS_HASH(global_ops
)
1152 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1153 FTRACE_OPS_FL_INITIALIZED
|
1158 * This is used by __kernel_text_address() to return true if the
1159 * address is on a dynamically allocated trampoline that would
1160 * not return true for either core_kernel_text() or
1161 * is_module_text_address().
1163 bool is_ftrace_trampoline(unsigned long addr
)
1165 struct ftrace_ops
*op
;
1169 * Some of the ops may be dynamically allocated,
1170 * they are freed after a synchronize_sched().
1172 preempt_disable_notrace();
1174 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1176 * This is to check for dynamically allocated trampolines.
1177 * Trampolines that are in kernel text will have
1178 * core_kernel_text() return true.
1180 if (op
->trampoline
&& op
->trampoline_size
)
1181 if (addr
>= op
->trampoline
&&
1182 addr
< op
->trampoline
+ op
->trampoline_size
) {
1186 } while_for_each_ftrace_op(op
);
1189 preempt_enable_notrace();
1194 struct ftrace_page
{
1195 struct ftrace_page
*next
;
1196 struct dyn_ftrace
*records
;
1201 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1202 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1204 /* estimate from running different kernels */
1205 #define NR_TO_INIT 10000
1207 static struct ftrace_page
*ftrace_pages_start
;
1208 static struct ftrace_page
*ftrace_pages
;
1210 static bool __always_inline
ftrace_hash_empty(struct ftrace_hash
*hash
)
1212 return !hash
|| !hash
->count
;
1215 static struct ftrace_func_entry
*
1216 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1219 struct ftrace_func_entry
*entry
;
1220 struct hlist_head
*hhd
;
1222 if (ftrace_hash_empty(hash
))
1225 if (hash
->size_bits
> 0)
1226 key
= hash_long(ip
, hash
->size_bits
);
1230 hhd
= &hash
->buckets
[key
];
1232 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1233 if (entry
->ip
== ip
)
1239 static void __add_hash_entry(struct ftrace_hash
*hash
,
1240 struct ftrace_func_entry
*entry
)
1242 struct hlist_head
*hhd
;
1245 if (hash
->size_bits
)
1246 key
= hash_long(entry
->ip
, hash
->size_bits
);
1250 hhd
= &hash
->buckets
[key
];
1251 hlist_add_head(&entry
->hlist
, hhd
);
1255 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1257 struct ftrace_func_entry
*entry
;
1259 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1264 __add_hash_entry(hash
, entry
);
1270 free_hash_entry(struct ftrace_hash
*hash
,
1271 struct ftrace_func_entry
*entry
)
1273 hlist_del(&entry
->hlist
);
1279 remove_hash_entry(struct ftrace_hash
*hash
,
1280 struct ftrace_func_entry
*entry
)
1282 hlist_del(&entry
->hlist
);
1286 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1288 struct hlist_head
*hhd
;
1289 struct hlist_node
*tn
;
1290 struct ftrace_func_entry
*entry
;
1291 int size
= 1 << hash
->size_bits
;
1297 for (i
= 0; i
< size
; i
++) {
1298 hhd
= &hash
->buckets
[i
];
1299 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1300 free_hash_entry(hash
, entry
);
1302 FTRACE_WARN_ON(hash
->count
);
1305 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1307 if (!hash
|| hash
== EMPTY_HASH
)
1309 ftrace_hash_clear(hash
);
1310 kfree(hash
->buckets
);
1314 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1316 struct ftrace_hash
*hash
;
1318 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1319 free_ftrace_hash(hash
);
1322 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1324 if (!hash
|| hash
== EMPTY_HASH
)
1326 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1329 void ftrace_free_filter(struct ftrace_ops
*ops
)
1331 ftrace_ops_init(ops
);
1332 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1333 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1336 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1338 struct ftrace_hash
*hash
;
1341 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1345 size
= 1 << size_bits
;
1346 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1348 if (!hash
->buckets
) {
1353 hash
->size_bits
= size_bits
;
1358 static struct ftrace_hash
*
1359 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1361 struct ftrace_func_entry
*entry
;
1362 struct ftrace_hash
*new_hash
;
1367 new_hash
= alloc_ftrace_hash(size_bits
);
1372 if (ftrace_hash_empty(hash
))
1375 size
= 1 << hash
->size_bits
;
1376 for (i
= 0; i
< size
; i
++) {
1377 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1378 ret
= add_hash_entry(new_hash
, entry
->ip
);
1384 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1389 free_ftrace_hash(new_hash
);
1394 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1396 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1398 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1399 struct ftrace_hash
*new_hash
);
1402 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1403 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1405 struct ftrace_func_entry
*entry
;
1406 struct hlist_node
*tn
;
1407 struct hlist_head
*hhd
;
1408 struct ftrace_hash
*new_hash
;
1409 int size
= src
->count
;
1414 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1415 if (ops
->flags
& FTRACE_OPS_FL_IPMODIFY
&& !enable
)
1419 * If the new source is empty, just free dst and assign it
1423 new_hash
= EMPTY_HASH
;
1428 * Make the hash size about 1/2 the # found
1430 for (size
/= 2; size
; size
>>= 1)
1433 /* Don't allocate too much */
1434 if (bits
> FTRACE_HASH_MAX_BITS
)
1435 bits
= FTRACE_HASH_MAX_BITS
;
1437 new_hash
= alloc_ftrace_hash(bits
);
1441 size
= 1 << src
->size_bits
;
1442 for (i
= 0; i
< size
; i
++) {
1443 hhd
= &src
->buckets
[i
];
1444 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1445 remove_hash_entry(src
, entry
);
1446 __add_hash_entry(new_hash
, entry
);
1451 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1453 /* IPMODIFY should be updated only when filter_hash updating */
1454 ret
= ftrace_hash_ipmodify_update(ops
, new_hash
);
1456 free_ftrace_hash(new_hash
);
1462 * Remove the current set, update the hash and add
1465 ftrace_hash_rec_disable_modify(ops
, enable
);
1467 rcu_assign_pointer(*dst
, new_hash
);
1469 ftrace_hash_rec_enable_modify(ops
, enable
);
1474 static bool hash_contains_ip(unsigned long ip
,
1475 struct ftrace_ops_hash
*hash
)
1478 * The function record is a match if it exists in the filter
1479 * hash and not in the notrace hash. Note, an emty hash is
1480 * considered a match for the filter hash, but an empty
1481 * notrace hash is considered not in the notrace hash.
1483 return (ftrace_hash_empty(hash
->filter_hash
) ||
1484 ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1485 (ftrace_hash_empty(hash
->notrace_hash
) ||
1486 !ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1490 * Test the hashes for this ops to see if we want to call
1491 * the ops->func or not.
1493 * It's a match if the ip is in the ops->filter_hash or
1494 * the filter_hash does not exist or is empty,
1496 * the ip is not in the ops->notrace_hash.
1498 * This needs to be called with preemption disabled as
1499 * the hashes are freed with call_rcu_sched().
1502 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1504 struct ftrace_ops_hash hash
;
1507 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1509 * There's a small race when adding ops that the ftrace handler
1510 * that wants regs, may be called without them. We can not
1511 * allow that handler to be called if regs is NULL.
1513 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1517 hash
.filter_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->filter_hash
);
1518 hash
.notrace_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->notrace_hash
);
1520 if (hash_contains_ip(ip
, &hash
))
1529 * This is a double for. Do not use 'break' to break out of the loop,
1530 * you must use a goto.
1532 #define do_for_each_ftrace_rec(pg, rec) \
1533 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1535 for (_____i = 0; _____i < pg->index; _____i++) { \
1536 rec = &pg->records[_____i];
1538 #define while_for_each_ftrace_rec() \
1543 static int ftrace_cmp_recs(const void *a
, const void *b
)
1545 const struct dyn_ftrace
*key
= a
;
1546 const struct dyn_ftrace
*rec
= b
;
1548 if (key
->flags
< rec
->ip
)
1550 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1555 static unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1557 struct ftrace_page
*pg
;
1558 struct dyn_ftrace
*rec
;
1559 struct dyn_ftrace key
;
1562 key
.flags
= end
; /* overload flags, as it is unsigned long */
1564 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1565 if (end
< pg
->records
[0].ip
||
1566 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1568 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1569 sizeof(struct dyn_ftrace
),
1579 * ftrace_location - return true if the ip giving is a traced location
1580 * @ip: the instruction pointer to check
1582 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1583 * That is, the instruction that is either a NOP or call to
1584 * the function tracer. It checks the ftrace internal tables to
1585 * determine if the address belongs or not.
1587 unsigned long ftrace_location(unsigned long ip
)
1589 return ftrace_location_range(ip
, ip
);
1593 * ftrace_text_reserved - return true if range contains an ftrace location
1594 * @start: start of range to search
1595 * @end: end of range to search (inclusive). @end points to the last byte to check.
1597 * Returns 1 if @start and @end contains a ftrace location.
1598 * That is, the instruction that is either a NOP or call to
1599 * the function tracer. It checks the ftrace internal tables to
1600 * determine if the address belongs or not.
1602 int ftrace_text_reserved(const void *start
, const void *end
)
1606 ret
= ftrace_location_range((unsigned long)start
,
1607 (unsigned long)end
);
1612 /* Test if ops registered to this rec needs regs */
1613 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1615 struct ftrace_ops
*ops
;
1616 bool keep_regs
= false;
1618 for (ops
= ftrace_ops_list
;
1619 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1620 /* pass rec in as regs to have non-NULL val */
1621 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1622 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1632 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1636 struct ftrace_hash
*hash
;
1637 struct ftrace_hash
*other_hash
;
1638 struct ftrace_page
*pg
;
1639 struct dyn_ftrace
*rec
;
1643 /* Only update if the ops has been registered */
1644 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1648 * In the filter_hash case:
1649 * If the count is zero, we update all records.
1650 * Otherwise we just update the items in the hash.
1652 * In the notrace_hash case:
1653 * We enable the update in the hash.
1654 * As disabling notrace means enabling the tracing,
1655 * and enabling notrace means disabling, the inc variable
1659 hash
= ops
->func_hash
->filter_hash
;
1660 other_hash
= ops
->func_hash
->notrace_hash
;
1661 if (ftrace_hash_empty(hash
))
1665 hash
= ops
->func_hash
->notrace_hash
;
1666 other_hash
= ops
->func_hash
->filter_hash
;
1668 * If the notrace hash has no items,
1669 * then there's nothing to do.
1671 if (ftrace_hash_empty(hash
))
1675 do_for_each_ftrace_rec(pg
, rec
) {
1676 int in_other_hash
= 0;
1682 * Only the filter_hash affects all records.
1683 * Update if the record is not in the notrace hash.
1685 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1688 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1689 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1692 * If filter_hash is set, we want to match all functions
1693 * that are in the hash but not in the other hash.
1695 * If filter_hash is not set, then we are decrementing.
1696 * That means we match anything that is in the hash
1697 * and also in the other_hash. That is, we need to turn
1698 * off functions in the other hash because they are disabled
1701 if (filter_hash
&& in_hash
&& !in_other_hash
)
1703 else if (!filter_hash
&& in_hash
&&
1704 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1712 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1716 * If there's only a single callback registered to a
1717 * function, and the ops has a trampoline registered
1718 * for it, then we can call it directly.
1720 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1721 rec
->flags
|= FTRACE_FL_TRAMP
;
1724 * If we are adding another function callback
1725 * to this function, and the previous had a
1726 * custom trampoline in use, then we need to go
1727 * back to the default trampoline.
1729 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1732 * If any ops wants regs saved for this function
1733 * then all ops will get saved regs.
1735 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1736 rec
->flags
|= FTRACE_FL_REGS
;
1738 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1743 * If the rec had REGS enabled and the ops that is
1744 * being removed had REGS set, then see if there is
1745 * still any ops for this record that wants regs.
1746 * If not, we can stop recording them.
1748 if (ftrace_rec_count(rec
) > 0 &&
1749 rec
->flags
& FTRACE_FL_REGS
&&
1750 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1751 if (!test_rec_ops_needs_regs(rec
))
1752 rec
->flags
&= ~FTRACE_FL_REGS
;
1756 * If the rec had TRAMP enabled, then it needs to
1757 * be cleared. As TRAMP can only be enabled iff
1758 * there is only a single ops attached to it.
1759 * In otherwords, always disable it on decrementing.
1760 * In the future, we may set it if rec count is
1761 * decremented to one, and the ops that is left
1764 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1767 * flags will be cleared in ftrace_check_record()
1768 * if rec count is zero.
1772 /* Shortcut, if we handled all records, we are done. */
1773 if (!all
&& count
== hash
->count
)
1775 } while_for_each_ftrace_rec();
1778 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1781 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1784 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1787 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1790 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1791 int filter_hash
, int inc
)
1793 struct ftrace_ops
*op
;
1795 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1797 if (ops
->func_hash
!= &global_ops
.local_hash
)
1801 * If the ops shares the global_ops hash, then we need to update
1802 * all ops that are enabled and use this hash.
1804 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1808 if (op
->func_hash
== &global_ops
.local_hash
)
1809 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1810 } while_for_each_ftrace_op(op
);
1813 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1816 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1819 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1822 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1826 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1827 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1828 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1829 * Note that old_hash and new_hash has below meanings
1830 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1831 * - If the hash is EMPTY_HASH, it hits nothing
1832 * - Anything else hits the recs which match the hash entries.
1834 static int __ftrace_hash_update_ipmodify(struct ftrace_ops
*ops
,
1835 struct ftrace_hash
*old_hash
,
1836 struct ftrace_hash
*new_hash
)
1838 struct ftrace_page
*pg
;
1839 struct dyn_ftrace
*rec
, *end
= NULL
;
1842 /* Only update if the ops has been registered */
1843 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1846 if (!(ops
->flags
& FTRACE_OPS_FL_IPMODIFY
))
1850 * Since the IPMODIFY is a very address sensitive action, we do not
1851 * allow ftrace_ops to set all functions to new hash.
1853 if (!new_hash
|| !old_hash
)
1856 /* Update rec->flags */
1857 do_for_each_ftrace_rec(pg
, rec
) {
1858 /* We need to update only differences of filter_hash */
1859 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1860 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1861 if (in_old
== in_new
)
1865 /* New entries must ensure no others are using it */
1866 if (rec
->flags
& FTRACE_FL_IPMODIFY
)
1868 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1869 } else /* Removed entry */
1870 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1871 } while_for_each_ftrace_rec();
1878 /* Roll back what we did above */
1879 do_for_each_ftrace_rec(pg
, rec
) {
1883 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1884 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1885 if (in_old
== in_new
)
1889 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1891 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1892 } while_for_each_ftrace_rec();
1898 static int ftrace_hash_ipmodify_enable(struct ftrace_ops
*ops
)
1900 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1902 if (ftrace_hash_empty(hash
))
1905 return __ftrace_hash_update_ipmodify(ops
, EMPTY_HASH
, hash
);
1908 /* Disabling always succeeds */
1909 static void ftrace_hash_ipmodify_disable(struct ftrace_ops
*ops
)
1911 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1913 if (ftrace_hash_empty(hash
))
1916 __ftrace_hash_update_ipmodify(ops
, hash
, EMPTY_HASH
);
1919 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1920 struct ftrace_hash
*new_hash
)
1922 struct ftrace_hash
*old_hash
= ops
->func_hash
->filter_hash
;
1924 if (ftrace_hash_empty(old_hash
))
1927 if (ftrace_hash_empty(new_hash
))
1930 return __ftrace_hash_update_ipmodify(ops
, old_hash
, new_hash
);
1933 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1937 printk(KERN_CONT
"%s", fmt
);
1939 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1940 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1943 static struct ftrace_ops
*
1944 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
);
1947 * ftrace_bug - report and shutdown function tracer
1948 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1949 * @rec: The record that failed
1951 * The arch code that enables or disables the function tracing
1952 * can call ftrace_bug() when it has detected a problem in
1953 * modifying the code. @failed should be one of either:
1954 * EFAULT - if the problem happens on reading the @ip address
1955 * EINVAL - if what is read at @ip is not what was expected
1956 * EPERM - if the problem happens on writting to the @ip address
1958 void ftrace_bug(int failed
, struct dyn_ftrace
*rec
)
1960 unsigned long ip
= rec
? rec
->ip
: 0;
1964 FTRACE_WARN_ON_ONCE(1);
1965 pr_info("ftrace faulted on modifying ");
1969 FTRACE_WARN_ON_ONCE(1);
1970 pr_info("ftrace failed to modify ");
1972 print_ip_ins(" actual: ", (unsigned char *)ip
);
1976 FTRACE_WARN_ON_ONCE(1);
1977 pr_info("ftrace faulted on writing ");
1981 FTRACE_WARN_ON_ONCE(1);
1982 pr_info("ftrace faulted on unknown error ");
1986 struct ftrace_ops
*ops
= NULL
;
1988 pr_info("ftrace record flags: %lx\n", rec
->flags
);
1989 pr_cont(" (%ld)%s", ftrace_rec_count(rec
),
1990 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
1991 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
1992 ops
= ftrace_find_tramp_ops_any(rec
);
1994 pr_cont("\ttramp: %pS",
1995 (void *)ops
->trampoline
);
1997 pr_cont("\ttramp: ERROR!");
2000 ip
= ftrace_get_addr_curr(rec
);
2001 pr_cont(" expected tramp: %lx\n", ip
);
2005 static int ftrace_check_record(struct dyn_ftrace
*rec
, int enable
, int update
)
2007 unsigned long flag
= 0UL;
2010 * If we are updating calls:
2012 * If the record has a ref count, then we need to enable it
2013 * because someone is using it.
2015 * Otherwise we make sure its disabled.
2017 * If we are disabling calls, then disable all records that
2020 if (enable
&& ftrace_rec_count(rec
))
2021 flag
= FTRACE_FL_ENABLED
;
2024 * If enabling and the REGS flag does not match the REGS_EN, or
2025 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2026 * this record. Set flags to fail the compare against ENABLED.
2029 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
2030 !(rec
->flags
& FTRACE_FL_REGS_EN
))
2031 flag
|= FTRACE_FL_REGS
;
2033 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
2034 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
2035 flag
|= FTRACE_FL_TRAMP
;
2038 /* If the state of this record hasn't changed, then do nothing */
2039 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
2040 return FTRACE_UPDATE_IGNORE
;
2043 /* Save off if rec is being enabled (for return value) */
2044 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
2047 rec
->flags
|= FTRACE_FL_ENABLED
;
2048 if (flag
& FTRACE_FL_REGS
) {
2049 if (rec
->flags
& FTRACE_FL_REGS
)
2050 rec
->flags
|= FTRACE_FL_REGS_EN
;
2052 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
2054 if (flag
& FTRACE_FL_TRAMP
) {
2055 if (rec
->flags
& FTRACE_FL_TRAMP
)
2056 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
2058 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
2063 * If this record is being updated from a nop, then
2064 * return UPDATE_MAKE_CALL.
2066 * return UPDATE_MODIFY_CALL to tell the caller to convert
2067 * from the save regs, to a non-save regs function or
2068 * vice versa, or from a trampoline call.
2070 if (flag
& FTRACE_FL_ENABLED
)
2071 return FTRACE_UPDATE_MAKE_CALL
;
2073 return FTRACE_UPDATE_MODIFY_CALL
;
2077 /* If there's no more users, clear all flags */
2078 if (!ftrace_rec_count(rec
))
2082 * Just disable the record, but keep the ops TRAMP
2083 * and REGS states. The _EN flags must be disabled though.
2085 rec
->flags
&= ~(FTRACE_FL_ENABLED
| FTRACE_FL_TRAMP_EN
|
2089 return FTRACE_UPDATE_MAKE_NOP
;
2093 * ftrace_update_record, set a record that now is tracing or not
2094 * @rec: the record to update
2095 * @enable: set to 1 if the record is tracing, zero to force disable
2097 * The records that represent all functions that can be traced need
2098 * to be updated when tracing has been enabled.
2100 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
)
2102 return ftrace_check_record(rec
, enable
, 1);
2106 * ftrace_test_record, check if the record has been enabled or not
2107 * @rec: the record to test
2108 * @enable: set to 1 to check if enabled, 0 if it is disabled
2110 * The arch code may need to test if a record is already set to
2111 * tracing to determine how to modify the function code that it
2114 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
)
2116 return ftrace_check_record(rec
, enable
, 0);
2119 static struct ftrace_ops
*
2120 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
2122 struct ftrace_ops
*op
;
2123 unsigned long ip
= rec
->ip
;
2125 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2127 if (!op
->trampoline
)
2130 if (hash_contains_ip(ip
, op
->func_hash
))
2132 } while_for_each_ftrace_op(op
);
2137 static struct ftrace_ops
*
2138 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
2140 struct ftrace_ops
*op
;
2141 unsigned long ip
= rec
->ip
;
2144 * Need to check removed ops first.
2145 * If they are being removed, and this rec has a tramp,
2146 * and this rec is in the ops list, then it would be the
2147 * one with the tramp.
2150 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
2155 * Need to find the current trampoline for a rec.
2156 * Now, a trampoline is only attached to a rec if there
2157 * was a single 'ops' attached to it. But this can be called
2158 * when we are adding another op to the rec or removing the
2159 * current one. Thus, if the op is being added, we can
2160 * ignore it because it hasn't attached itself to the rec
2163 * If an ops is being modified (hooking to different functions)
2164 * then we don't care about the new functions that are being
2165 * added, just the old ones (that are probably being removed).
2167 * If we are adding an ops to a function that already is using
2168 * a trampoline, it needs to be removed (trampolines are only
2169 * for single ops connected), then an ops that is not being
2170 * modified also needs to be checked.
2172 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2174 if (!op
->trampoline
)
2178 * If the ops is being added, it hasn't gotten to
2179 * the point to be removed from this tree yet.
2181 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
2186 * If the ops is being modified and is in the old
2187 * hash, then it is probably being removed from this
2190 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2191 hash_contains_ip(ip
, &op
->old_hash
))
2194 * If the ops is not being added or modified, and it's
2195 * in its normal filter hash, then this must be the one
2198 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2199 hash_contains_ip(ip
, op
->func_hash
))
2202 } while_for_each_ftrace_op(op
);
2207 static struct ftrace_ops
*
2208 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
2210 struct ftrace_ops
*op
;
2211 unsigned long ip
= rec
->ip
;
2213 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2214 /* pass rec in as regs to have non-NULL val */
2215 if (hash_contains_ip(ip
, op
->func_hash
))
2217 } while_for_each_ftrace_op(op
);
2223 * ftrace_get_addr_new - Get the call address to set to
2224 * @rec: The ftrace record descriptor
2226 * If the record has the FTRACE_FL_REGS set, that means that it
2227 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2228 * is not not set, then it wants to convert to the normal callback.
2230 * Returns the address of the trampoline to set to
2232 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2234 struct ftrace_ops
*ops
;
2236 /* Trampolines take precedence over regs */
2237 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2238 ops
= ftrace_find_tramp_ops_new(rec
);
2239 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2240 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2241 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2242 /* Ftrace is shutting down, return anything */
2243 return (unsigned long)FTRACE_ADDR
;
2245 return ops
->trampoline
;
2248 if (rec
->flags
& FTRACE_FL_REGS
)
2249 return (unsigned long)FTRACE_REGS_ADDR
;
2251 return (unsigned long)FTRACE_ADDR
;
2255 * ftrace_get_addr_curr - Get the call address that is already there
2256 * @rec: The ftrace record descriptor
2258 * The FTRACE_FL_REGS_EN is set when the record already points to
2259 * a function that saves all the regs. Basically the '_EN' version
2260 * represents the current state of the function.
2262 * Returns the address of the trampoline that is currently being called
2264 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2266 struct ftrace_ops
*ops
;
2268 /* Trampolines take precedence over regs */
2269 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2270 ops
= ftrace_find_tramp_ops_curr(rec
);
2271 if (FTRACE_WARN_ON(!ops
)) {
2272 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2273 (void *)rec
->ip
, (void *)rec
->ip
);
2274 /* Ftrace is shutting down, return anything */
2275 return (unsigned long)FTRACE_ADDR
;
2277 return ops
->trampoline
;
2280 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2281 return (unsigned long)FTRACE_REGS_ADDR
;
2283 return (unsigned long)FTRACE_ADDR
;
2287 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
2289 unsigned long ftrace_old_addr
;
2290 unsigned long ftrace_addr
;
2293 ftrace_addr
= ftrace_get_addr_new(rec
);
2295 /* This needs to be done before we call ftrace_update_record */
2296 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2298 ret
= ftrace_update_record(rec
, enable
);
2301 case FTRACE_UPDATE_IGNORE
:
2304 case FTRACE_UPDATE_MAKE_CALL
:
2305 return ftrace_make_call(rec
, ftrace_addr
);
2307 case FTRACE_UPDATE_MAKE_NOP
:
2308 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2310 case FTRACE_UPDATE_MODIFY_CALL
:
2311 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2314 return -1; /* unknow ftrace bug */
2317 void __weak
ftrace_replace_code(int enable
)
2319 struct dyn_ftrace
*rec
;
2320 struct ftrace_page
*pg
;
2323 if (unlikely(ftrace_disabled
))
2326 do_for_each_ftrace_rec(pg
, rec
) {
2327 failed
= __ftrace_replace_code(rec
, enable
);
2329 ftrace_bug(failed
, rec
);
2330 /* Stop processing */
2333 } while_for_each_ftrace_rec();
2336 struct ftrace_rec_iter
{
2337 struct ftrace_page
*pg
;
2342 * ftrace_rec_iter_start, start up iterating over traced functions
2344 * Returns an iterator handle that is used to iterate over all
2345 * the records that represent address locations where functions
2348 * May return NULL if no records are available.
2350 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2353 * We only use a single iterator.
2354 * Protected by the ftrace_lock mutex.
2356 static struct ftrace_rec_iter ftrace_rec_iter
;
2357 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2359 iter
->pg
= ftrace_pages_start
;
2362 /* Could have empty pages */
2363 while (iter
->pg
&& !iter
->pg
->index
)
2364 iter
->pg
= iter
->pg
->next
;
2373 * ftrace_rec_iter_next, get the next record to process.
2374 * @iter: The handle to the iterator.
2376 * Returns the next iterator after the given iterator @iter.
2378 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2382 if (iter
->index
>= iter
->pg
->index
) {
2383 iter
->pg
= iter
->pg
->next
;
2386 /* Could have empty pages */
2387 while (iter
->pg
&& !iter
->pg
->index
)
2388 iter
->pg
= iter
->pg
->next
;
2398 * ftrace_rec_iter_record, get the record at the iterator location
2399 * @iter: The current iterator location
2401 * Returns the record that the current @iter is at.
2403 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2405 return &iter
->pg
->records
[iter
->index
];
2409 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
2413 if (unlikely(ftrace_disabled
))
2416 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
2418 ftrace_bug(ret
, rec
);
2425 * archs can override this function if they must do something
2426 * before the modifying code is performed.
2428 int __weak
ftrace_arch_code_modify_prepare(void)
2434 * archs can override this function if they must do something
2435 * after the modifying code is performed.
2437 int __weak
ftrace_arch_code_modify_post_process(void)
2442 void ftrace_modify_all_code(int command
)
2444 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2448 * If the ftrace_caller calls a ftrace_ops func directly,
2449 * we need to make sure that it only traces functions it
2450 * expects to trace. When doing the switch of functions,
2451 * we need to update to the ftrace_ops_list_func first
2452 * before the transition between old and new calls are set,
2453 * as the ftrace_ops_list_func will check the ops hashes
2454 * to make sure the ops are having the right functions
2458 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2459 if (FTRACE_WARN_ON(err
))
2463 if (command
& FTRACE_UPDATE_CALLS
)
2464 ftrace_replace_code(1);
2465 else if (command
& FTRACE_DISABLE_CALLS
)
2466 ftrace_replace_code(0);
2468 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2469 function_trace_op
= set_function_trace_op
;
2471 /* If irqs are disabled, we are in stop machine */
2472 if (!irqs_disabled())
2473 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2474 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2475 if (FTRACE_WARN_ON(err
))
2479 if (command
& FTRACE_START_FUNC_RET
)
2480 err
= ftrace_enable_ftrace_graph_caller();
2481 else if (command
& FTRACE_STOP_FUNC_RET
)
2482 err
= ftrace_disable_ftrace_graph_caller();
2483 FTRACE_WARN_ON(err
);
2486 static int __ftrace_modify_code(void *data
)
2488 int *command
= data
;
2490 ftrace_modify_all_code(*command
);
2496 * ftrace_run_stop_machine, go back to the stop machine method
2497 * @command: The command to tell ftrace what to do
2499 * If an arch needs to fall back to the stop machine method, the
2500 * it can call this function.
2502 void ftrace_run_stop_machine(int command
)
2504 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2508 * arch_ftrace_update_code, modify the code to trace or not trace
2509 * @command: The command that needs to be done
2511 * Archs can override this function if it does not need to
2512 * run stop_machine() to modify code.
2514 void __weak
arch_ftrace_update_code(int command
)
2516 ftrace_run_stop_machine(command
);
2519 static void ftrace_run_update_code(int command
)
2523 ret
= ftrace_arch_code_modify_prepare();
2524 FTRACE_WARN_ON(ret
);
2529 * By default we use stop_machine() to modify the code.
2530 * But archs can do what ever they want as long as it
2531 * is safe. The stop_machine() is the safest, but also
2532 * produces the most overhead.
2534 arch_ftrace_update_code(command
);
2536 ret
= ftrace_arch_code_modify_post_process();
2537 FTRACE_WARN_ON(ret
);
2540 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2541 struct ftrace_ops_hash
*old_hash
)
2543 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2544 ops
->old_hash
.filter_hash
= old_hash
->filter_hash
;
2545 ops
->old_hash
.notrace_hash
= old_hash
->notrace_hash
;
2546 ftrace_run_update_code(command
);
2547 ops
->old_hash
.filter_hash
= NULL
;
2548 ops
->old_hash
.notrace_hash
= NULL
;
2549 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2552 static ftrace_func_t saved_ftrace_func
;
2553 static int ftrace_start_up
;
2555 void __weak
arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
2559 static void control_ops_free(struct ftrace_ops
*ops
)
2561 free_percpu(ops
->disabled
);
2564 static void ftrace_startup_enable(int command
)
2566 if (saved_ftrace_func
!= ftrace_trace_function
) {
2567 saved_ftrace_func
= ftrace_trace_function
;
2568 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2571 if (!command
|| !ftrace_enabled
)
2574 ftrace_run_update_code(command
);
2577 static void ftrace_startup_all(int command
)
2579 update_all_ops
= true;
2580 ftrace_startup_enable(command
);
2581 update_all_ops
= false;
2584 static int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2588 if (unlikely(ftrace_disabled
))
2591 ret
= __register_ftrace_function(ops
);
2596 command
|= FTRACE_UPDATE_CALLS
;
2599 * Note that ftrace probes uses this to start up
2600 * and modify functions it will probe. But we still
2601 * set the ADDING flag for modification, as probes
2602 * do not have trampolines. If they add them in the
2603 * future, then the probes will need to distinguish
2604 * between adding and updating probes.
2606 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2608 ret
= ftrace_hash_ipmodify_enable(ops
);
2610 /* Rollback registration process */
2611 __unregister_ftrace_function(ops
);
2613 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2617 ftrace_hash_rec_enable(ops
, 1);
2619 ftrace_startup_enable(command
);
2621 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2626 static int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2630 if (unlikely(ftrace_disabled
))
2633 ret
= __unregister_ftrace_function(ops
);
2639 * Just warn in case of unbalance, no need to kill ftrace, it's not
2640 * critical but the ftrace_call callers may be never nopped again after
2641 * further ftrace uses.
2643 WARN_ON_ONCE(ftrace_start_up
< 0);
2645 /* Disabling ipmodify never fails */
2646 ftrace_hash_ipmodify_disable(ops
);
2647 ftrace_hash_rec_disable(ops
, 1);
2649 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2651 command
|= FTRACE_UPDATE_CALLS
;
2653 if (saved_ftrace_func
!= ftrace_trace_function
) {
2654 saved_ftrace_func
= ftrace_trace_function
;
2655 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2658 if (!command
|| !ftrace_enabled
) {
2660 * If these are control ops, they still need their
2661 * per_cpu field freed. Since, function tracing is
2662 * not currently active, we can just free them
2663 * without synchronizing all CPUs.
2665 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2666 control_ops_free(ops
);
2671 * If the ops uses a trampoline, then it needs to be
2672 * tested first on update.
2674 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2677 /* The trampoline logic checks the old hashes */
2678 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2679 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2681 ftrace_run_update_code(command
);
2684 * If there's no more ops registered with ftrace, run a
2685 * sanity check to make sure all rec flags are cleared.
2687 if (ftrace_ops_list
== &ftrace_list_end
) {
2688 struct ftrace_page
*pg
;
2689 struct dyn_ftrace
*rec
;
2691 do_for_each_ftrace_rec(pg
, rec
) {
2692 if (FTRACE_WARN_ON_ONCE(rec
->flags
))
2693 pr_warn(" %pS flags:%lx\n",
2694 (void *)rec
->ip
, rec
->flags
);
2695 } while_for_each_ftrace_rec();
2698 ops
->old_hash
.filter_hash
= NULL
;
2699 ops
->old_hash
.notrace_hash
= NULL
;
2702 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2705 * Dynamic ops may be freed, we must make sure that all
2706 * callers are done before leaving this function.
2707 * The same goes for freeing the per_cpu data of the control
2710 * Again, normal synchronize_sched() is not good enough.
2711 * We need to do a hard force of sched synchronization.
2712 * This is because we use preempt_disable() to do RCU, but
2713 * the function tracers can be called where RCU is not watching
2714 * (like before user_exit()). We can not rely on the RCU
2715 * infrastructure to do the synchronization, thus we must do it
2718 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_CONTROL
)) {
2719 schedule_on_each_cpu(ftrace_sync
);
2721 arch_ftrace_trampoline_free(ops
);
2723 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2724 control_ops_free(ops
);
2730 static void ftrace_startup_sysctl(void)
2734 if (unlikely(ftrace_disabled
))
2737 /* Force update next time */
2738 saved_ftrace_func
= NULL
;
2739 /* ftrace_start_up is true if we want ftrace running */
2740 if (ftrace_start_up
) {
2741 command
= FTRACE_UPDATE_CALLS
;
2742 if (ftrace_graph_active
)
2743 command
|= FTRACE_START_FUNC_RET
;
2744 ftrace_startup_enable(command
);
2748 static void ftrace_shutdown_sysctl(void)
2752 if (unlikely(ftrace_disabled
))
2755 /* ftrace_start_up is true if ftrace is running */
2756 if (ftrace_start_up
) {
2757 command
= FTRACE_DISABLE_CALLS
;
2758 if (ftrace_graph_active
)
2759 command
|= FTRACE_STOP_FUNC_RET
;
2760 ftrace_run_update_code(command
);
2764 static cycle_t ftrace_update_time
;
2765 unsigned long ftrace_update_tot_cnt
;
2767 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
2770 * Filter_hash being empty will default to trace module.
2771 * But notrace hash requires a test of individual module functions.
2773 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2774 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
2778 * Check if the current ops references the record.
2780 * If the ops traces all functions, then it was already accounted for.
2781 * If the ops does not trace the current record function, skip it.
2782 * If the ops ignores the function via notrace filter, skip it.
2785 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
2787 /* If ops isn't enabled, ignore it */
2788 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
2791 /* If ops traces all mods, we already accounted for it */
2792 if (ops_traces_mod(ops
))
2795 /* The function must be in the filter */
2796 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2797 !ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
2800 /* If in notrace hash, we ignore it too */
2801 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
2807 static int referenced_filters(struct dyn_ftrace
*rec
)
2809 struct ftrace_ops
*ops
;
2812 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2813 if (ops_references_rec(ops
, rec
))
2820 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
2822 struct ftrace_page
*pg
;
2823 struct dyn_ftrace
*p
;
2824 cycle_t start
, stop
;
2825 unsigned long update_cnt
= 0;
2826 unsigned long ref
= 0;
2831 * When adding a module, we need to check if tracers are
2832 * currently enabled and if they are set to trace all functions.
2833 * If they are, we need to enable the module functions as well
2834 * as update the reference counts for those function records.
2837 struct ftrace_ops
*ops
;
2839 for (ops
= ftrace_ops_list
;
2840 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2841 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
2842 if (ops_traces_mod(ops
))
2850 start
= ftrace_now(raw_smp_processor_id());
2852 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
2854 for (i
= 0; i
< pg
->index
; i
++) {
2857 /* If something went wrong, bail without enabling anything */
2858 if (unlikely(ftrace_disabled
))
2861 p
= &pg
->records
[i
];
2863 cnt
+= referenced_filters(p
);
2867 * Do the initial record conversion from mcount jump
2868 * to the NOP instructions.
2870 if (!ftrace_code_disable(mod
, p
))
2876 * If the tracing is enabled, go ahead and enable the record.
2878 * The reason not to enable the record immediatelly is the
2879 * inherent check of ftrace_make_nop/ftrace_make_call for
2880 * correct previous instructions. Making first the NOP
2881 * conversion puts the module to the correct state, thus
2882 * passing the ftrace_make_call check.
2884 if (ftrace_start_up
&& cnt
) {
2885 int failed
= __ftrace_replace_code(p
, 1);
2887 ftrace_bug(failed
, p
);
2892 stop
= ftrace_now(raw_smp_processor_id());
2893 ftrace_update_time
= stop
- start
;
2894 ftrace_update_tot_cnt
+= update_cnt
;
2899 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
2904 if (WARN_ON(!count
))
2907 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
2910 * We want to fill as much as possible. No more than a page
2913 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
2917 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
2920 /* if we can't allocate this size, try something smaller */
2927 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
2936 static struct ftrace_page
*
2937 ftrace_allocate_pages(unsigned long num_to_init
)
2939 struct ftrace_page
*start_pg
;
2940 struct ftrace_page
*pg
;
2947 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2952 * Try to allocate as much as possible in one continues
2953 * location that fills in all of the space. We want to
2954 * waste as little space as possible.
2957 cnt
= ftrace_allocate_records(pg
, num_to_init
);
2965 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2977 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
2978 free_pages((unsigned long)pg
->records
, order
);
2979 start_pg
= pg
->next
;
2983 pr_info("ftrace: FAILED to allocate memory for functions\n");
2987 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2989 struct ftrace_iterator
{
2992 struct ftrace_page
*pg
;
2993 struct dyn_ftrace
*func
;
2994 struct ftrace_func_probe
*probe
;
2995 struct trace_parser parser
;
2996 struct ftrace_hash
*hash
;
2997 struct ftrace_ops
*ops
;
3004 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
3006 struct ftrace_iterator
*iter
= m
->private;
3007 struct hlist_node
*hnd
= NULL
;
3008 struct hlist_head
*hhd
;
3014 hnd
= &iter
->probe
->node
;
3016 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
3019 hhd
= &ftrace_func_hash
[iter
->hidx
];
3021 if (hlist_empty(hhd
)) {
3037 if (WARN_ON_ONCE(!hnd
))
3040 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
3045 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
3047 struct ftrace_iterator
*iter
= m
->private;
3051 if (!(iter
->flags
& FTRACE_ITER_DO_HASH
))
3054 if (iter
->func_pos
> *pos
)
3058 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
3059 p
= t_hash_next(m
, &l
);
3066 /* Only set this if we have an item */
3067 iter
->flags
|= FTRACE_ITER_HASH
;
3073 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
3075 struct ftrace_func_probe
*rec
;
3078 if (WARN_ON_ONCE(!rec
))
3081 if (rec
->ops
->print
)
3082 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
3084 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
3087 seq_printf(m
, ":%p", rec
->data
);
3094 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3096 struct ftrace_iterator
*iter
= m
->private;
3097 struct ftrace_ops
*ops
= iter
->ops
;
3098 struct dyn_ftrace
*rec
= NULL
;
3100 if (unlikely(ftrace_disabled
))
3103 if (iter
->flags
& FTRACE_ITER_HASH
)
3104 return t_hash_next(m
, pos
);
3107 iter
->pos
= iter
->func_pos
= *pos
;
3109 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
3110 return t_hash_start(m
, pos
);
3113 if (iter
->idx
>= iter
->pg
->index
) {
3114 if (iter
->pg
->next
) {
3115 iter
->pg
= iter
->pg
->next
;
3120 rec
= &iter
->pg
->records
[iter
->idx
++];
3121 if (((iter
->flags
& FTRACE_ITER_FILTER
) &&
3122 !(ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))) ||
3124 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
3125 !ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
)) ||
3127 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
3128 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
3136 return t_hash_start(m
, pos
);
3143 static void reset_iter_read(struct ftrace_iterator
*iter
)
3147 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_HASH
);
3150 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3152 struct ftrace_iterator
*iter
= m
->private;
3153 struct ftrace_ops
*ops
= iter
->ops
;
3157 mutex_lock(&ftrace_lock
);
3159 if (unlikely(ftrace_disabled
))
3163 * If an lseek was done, then reset and start from beginning.
3165 if (*pos
< iter
->pos
)
3166 reset_iter_read(iter
);
3169 * For set_ftrace_filter reading, if we have the filter
3170 * off, we can short cut and just print out that all
3171 * functions are enabled.
3173 if ((iter
->flags
& FTRACE_ITER_FILTER
&&
3174 ftrace_hash_empty(ops
->func_hash
->filter_hash
)) ||
3175 (iter
->flags
& FTRACE_ITER_NOTRACE
&&
3176 ftrace_hash_empty(ops
->func_hash
->notrace_hash
))) {
3178 return t_hash_start(m
, pos
);
3179 iter
->flags
|= FTRACE_ITER_PRINTALL
;
3180 /* reset in case of seek/pread */
3181 iter
->flags
&= ~FTRACE_ITER_HASH
;
3185 if (iter
->flags
& FTRACE_ITER_HASH
)
3186 return t_hash_start(m
, pos
);
3189 * Unfortunately, we need to restart at ftrace_pages_start
3190 * every time we let go of the ftrace_mutex. This is because
3191 * those pointers can change without the lock.
3193 iter
->pg
= ftrace_pages_start
;
3195 for (l
= 0; l
<= *pos
; ) {
3196 p
= t_next(m
, p
, &l
);
3202 return t_hash_start(m
, pos
);
3207 static void t_stop(struct seq_file
*m
, void *p
)
3209 mutex_unlock(&ftrace_lock
);
3213 arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
3218 static void add_trampoline_func(struct seq_file
*m
, struct ftrace_ops
*ops
,
3219 struct dyn_ftrace
*rec
)
3223 ptr
= arch_ftrace_trampoline_func(ops
, rec
);
3225 seq_printf(m
, " ->%pS", ptr
);
3228 static int t_show(struct seq_file
*m
, void *v
)
3230 struct ftrace_iterator
*iter
= m
->private;
3231 struct dyn_ftrace
*rec
;
3233 if (iter
->flags
& FTRACE_ITER_HASH
)
3234 return t_hash_show(m
, iter
);
3236 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
3237 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
3238 seq_puts(m
, "#### no functions disabled ####\n");
3240 seq_puts(m
, "#### all functions enabled ####\n");
3249 seq_printf(m
, "%ps", (void *)rec
->ip
);
3250 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
3251 struct ftrace_ops
*ops
= NULL
;
3253 seq_printf(m
, " (%ld)%s%s",
3254 ftrace_rec_count(rec
),
3255 rec
->flags
& FTRACE_FL_REGS
? " R" : " ",
3256 rec
->flags
& FTRACE_FL_IPMODIFY
? " I" : " ");
3257 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
3258 ops
= ftrace_find_tramp_ops_any(rec
);
3260 seq_printf(m
, "\ttramp: %pS",
3261 (void *)ops
->trampoline
);
3263 seq_puts(m
, "\ttramp: ERROR!");
3266 add_trampoline_func(m
, ops
, rec
);
3274 static const struct seq_operations show_ftrace_seq_ops
= {
3282 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3284 struct ftrace_iterator
*iter
;
3286 if (unlikely(ftrace_disabled
))
3289 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3291 iter
->pg
= ftrace_pages_start
;
3292 iter
->ops
= &global_ops
;
3295 return iter
? 0 : -ENOMEM
;
3299 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3301 struct ftrace_iterator
*iter
;
3303 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3305 iter
->pg
= ftrace_pages_start
;
3306 iter
->flags
= FTRACE_ITER_ENABLED
;
3307 iter
->ops
= &global_ops
;
3310 return iter
? 0 : -ENOMEM
;
3314 * ftrace_regex_open - initialize function tracer filter files
3315 * @ops: The ftrace_ops that hold the hash filters
3316 * @flag: The type of filter to process
3317 * @inode: The inode, usually passed in to your open routine
3318 * @file: The file, usually passed in to your open routine
3320 * ftrace_regex_open() initializes the filter files for the
3321 * @ops. Depending on @flag it may process the filter hash or
3322 * the notrace hash of @ops. With this called from the open
3323 * routine, you can use ftrace_filter_write() for the write
3324 * routine if @flag has FTRACE_ITER_FILTER set, or
3325 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3326 * tracing_lseek() should be used as the lseek routine, and
3327 * release must call ftrace_regex_release().
3330 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3331 struct inode
*inode
, struct file
*file
)
3333 struct ftrace_iterator
*iter
;
3334 struct ftrace_hash
*hash
;
3337 ftrace_ops_init(ops
);
3339 if (unlikely(ftrace_disabled
))
3342 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3346 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
3354 mutex_lock(&ops
->func_hash
->regex_lock
);
3356 if (flag
& FTRACE_ITER_NOTRACE
)
3357 hash
= ops
->func_hash
->notrace_hash
;
3359 hash
= ops
->func_hash
->filter_hash
;
3361 if (file
->f_mode
& FMODE_WRITE
) {
3362 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3364 if (file
->f_flags
& O_TRUNC
)
3365 iter
->hash
= alloc_ftrace_hash(size_bits
);
3367 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3370 trace_parser_put(&iter
->parser
);
3377 if (file
->f_mode
& FMODE_READ
) {
3378 iter
->pg
= ftrace_pages_start
;
3380 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3382 struct seq_file
*m
= file
->private_data
;
3386 free_ftrace_hash(iter
->hash
);
3387 trace_parser_put(&iter
->parser
);
3391 file
->private_data
= iter
;
3394 mutex_unlock(&ops
->func_hash
->regex_lock
);
3400 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3402 struct ftrace_ops
*ops
= inode
->i_private
;
3404 return ftrace_regex_open(ops
,
3405 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_HASH
,
3410 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3412 struct ftrace_ops
*ops
= inode
->i_private
;
3414 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3418 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
3425 if (strcmp(str
, regex
) == 0)
3428 case MATCH_FRONT_ONLY
:
3429 if (strncmp(str
, regex
, len
) == 0)
3432 case MATCH_MIDDLE_ONLY
:
3433 if (strstr(str
, regex
))
3436 case MATCH_END_ONLY
:
3438 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
3447 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
3449 struct ftrace_func_entry
*entry
;
3452 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3454 /* Do nothing if it doesn't exist */
3458 free_hash_entry(hash
, entry
);
3460 /* Do nothing if it exists */
3464 ret
= add_hash_entry(hash
, rec
->ip
);
3470 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
3471 char *regex
, int len
, int type
)
3473 char str
[KSYM_SYMBOL_LEN
];
3476 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3479 /* module lookup requires matching the module */
3480 if (!modname
|| strcmp(modname
, mod
))
3483 /* blank search means to match all funcs in the mod */
3488 return ftrace_match(str
, regex
, len
, type
);
3492 match_records(struct ftrace_hash
*hash
, char *buff
,
3493 int len
, char *mod
, int not)
3495 unsigned search_len
= 0;
3496 struct ftrace_page
*pg
;
3497 struct dyn_ftrace
*rec
;
3498 int type
= MATCH_FULL
;
3499 char *search
= buff
;
3504 type
= filter_parse_regex(buff
, len
, &search
, ¬);
3505 search_len
= strlen(search
);
3508 mutex_lock(&ftrace_lock
);
3510 if (unlikely(ftrace_disabled
))
3513 do_for_each_ftrace_rec(pg
, rec
) {
3514 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
3515 ret
= enter_record(hash
, rec
, not);
3522 } while_for_each_ftrace_rec();
3524 mutex_unlock(&ftrace_lock
);
3530 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
3532 return match_records(hash
, buff
, len
, NULL
, 0);
3536 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
3540 /* blank or '*' mean the same */
3541 if (strcmp(buff
, "*") == 0)
3544 /* handle the case of 'dont filter this module' */
3545 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
3550 return match_records(hash
, buff
, strlen(buff
), mod
, not);
3554 * We register the module command as a template to show others how
3555 * to register the a command as well.
3559 ftrace_mod_callback(struct ftrace_hash
*hash
,
3560 char *func
, char *cmd
, char *param
, int enable
)
3566 * cmd == 'mod' because we only registered this func
3567 * for the 'mod' ftrace_func_command.
3568 * But if you register one func with multiple commands,
3569 * you can tell which command was used by the cmd
3573 /* we must have a module name */
3577 mod
= strsep(¶m
, ":");
3581 ret
= ftrace_match_module_records(hash
, func
, mod
);
3590 static struct ftrace_func_command ftrace_mod_cmd
= {
3592 .func
= ftrace_mod_callback
,
3595 static int __init
ftrace_mod_cmd_init(void)
3597 return register_ftrace_command(&ftrace_mod_cmd
);
3599 core_initcall(ftrace_mod_cmd_init
);
3601 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
3602 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
3604 struct ftrace_func_probe
*entry
;
3605 struct hlist_head
*hhd
;
3608 key
= hash_long(ip
, FTRACE_HASH_BITS
);
3610 hhd
= &ftrace_func_hash
[key
];
3612 if (hlist_empty(hhd
))
3616 * Disable preemption for these calls to prevent a RCU grace
3617 * period. This syncs the hash iteration and freeing of items
3618 * on the hash. rcu_read_lock is too dangerous here.
3620 preempt_disable_notrace();
3621 hlist_for_each_entry_rcu_notrace(entry
, hhd
, node
) {
3622 if (entry
->ip
== ip
)
3623 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
3625 preempt_enable_notrace();
3628 static struct ftrace_ops trace_probe_ops __read_mostly
=
3630 .func
= function_trace_probe_call
,
3631 .flags
= FTRACE_OPS_FL_INITIALIZED
,
3632 INIT_OPS_HASH(trace_probe_ops
)
3635 static int ftrace_probe_registered
;
3637 static void __enable_ftrace_function_probe(struct ftrace_ops_hash
*old_hash
)
3642 if (ftrace_probe_registered
) {
3643 /* still need to update the function call sites */
3645 ftrace_run_modify_code(&trace_probe_ops
, FTRACE_UPDATE_CALLS
,
3650 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3651 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3655 /* Nothing registered? */
3656 if (i
== FTRACE_FUNC_HASHSIZE
)
3659 ret
= ftrace_startup(&trace_probe_ops
, 0);
3661 ftrace_probe_registered
= 1;
3664 static void __disable_ftrace_function_probe(void)
3668 if (!ftrace_probe_registered
)
3671 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3672 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3677 /* no more funcs left */
3678 ftrace_shutdown(&trace_probe_ops
, 0);
3680 ftrace_probe_registered
= 0;
3684 static void ftrace_free_entry(struct ftrace_func_probe
*entry
)
3686 if (entry
->ops
->free
)
3687 entry
->ops
->free(entry
->ops
, entry
->ip
, &entry
->data
);
3692 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3695 struct ftrace_ops_hash old_hash_ops
;
3696 struct ftrace_func_probe
*entry
;
3697 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3698 struct ftrace_hash
*old_hash
= *orig_hash
;
3699 struct ftrace_hash
*hash
;
3700 struct ftrace_page
*pg
;
3701 struct dyn_ftrace
*rec
;
3708 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3709 len
= strlen(search
);
3711 /* we do not support '!' for function probes */
3715 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3717 old_hash_ops
.filter_hash
= old_hash
;
3718 /* Probes only have filters */
3719 old_hash_ops
.notrace_hash
= NULL
;
3721 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
3727 if (unlikely(ftrace_disabled
)) {
3732 mutex_lock(&ftrace_lock
);
3734 do_for_each_ftrace_rec(pg
, rec
) {
3736 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
3739 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
3741 /* If we did not process any, then return error */
3752 * The caller might want to do something special
3753 * for each function we find. We call the callback
3754 * to give the caller an opportunity to do so.
3757 if (ops
->init(ops
, rec
->ip
, &entry
->data
) < 0) {
3758 /* caller does not like this func */
3764 ret
= enter_record(hash
, rec
, 0);
3772 entry
->ip
= rec
->ip
;
3774 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
3775 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
3777 } while_for_each_ftrace_rec();
3779 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3781 __enable_ftrace_function_probe(&old_hash_ops
);
3784 free_ftrace_hash_rcu(old_hash
);
3789 mutex_unlock(&ftrace_lock
);
3791 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3792 free_ftrace_hash(hash
);
3798 PROBE_TEST_FUNC
= 1,
3803 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3804 void *data
, int flags
)
3806 struct ftrace_func_entry
*rec_entry
;
3807 struct ftrace_func_probe
*entry
;
3808 struct ftrace_func_probe
*p
;
3809 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3810 struct ftrace_hash
*old_hash
= *orig_hash
;
3811 struct list_head free_list
;
3812 struct ftrace_hash
*hash
;
3813 struct hlist_node
*tmp
;
3814 char str
[KSYM_SYMBOL_LEN
];
3815 int type
= MATCH_FULL
;
3820 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
3825 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3826 len
= strlen(search
);
3828 /* we do not support '!' for function probes */
3833 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3835 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3837 /* Hmm, should report this somehow */
3840 INIT_LIST_HEAD(&free_list
);
3842 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3843 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3845 hlist_for_each_entry_safe(entry
, tmp
, hhd
, node
) {
3847 /* break up if statements for readability */
3848 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
3851 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
3854 /* do this last, since it is the most expensive */
3856 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
3858 if (!ftrace_match(str
, glob
, len
, type
))
3862 rec_entry
= ftrace_lookup_ip(hash
, entry
->ip
);
3863 /* It is possible more than one entry had this ip */
3865 free_hash_entry(hash
, rec_entry
);
3867 hlist_del_rcu(&entry
->node
);
3868 list_add(&entry
->free_list
, &free_list
);
3871 mutex_lock(&ftrace_lock
);
3872 __disable_ftrace_function_probe();
3874 * Remove after the disable is called. Otherwise, if the last
3875 * probe is removed, a null hash means *all enabled*.
3877 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3878 synchronize_sched();
3880 free_ftrace_hash_rcu(old_hash
);
3882 list_for_each_entry_safe(entry
, p
, &free_list
, free_list
) {
3883 list_del(&entry
->free_list
);
3884 ftrace_free_entry(entry
);
3886 mutex_unlock(&ftrace_lock
);
3889 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3890 free_ftrace_hash(hash
);
3894 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3897 __unregister_ftrace_function_probe(glob
, ops
, data
,
3898 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
3902 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
3904 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
3907 void unregister_ftrace_function_probe_all(char *glob
)
3909 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
3912 static LIST_HEAD(ftrace_commands
);
3913 static DEFINE_MUTEX(ftrace_cmd_mutex
);
3916 * Currently we only register ftrace commands from __init, so mark this
3919 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
3921 struct ftrace_func_command
*p
;
3924 mutex_lock(&ftrace_cmd_mutex
);
3925 list_for_each_entry(p
, &ftrace_commands
, list
) {
3926 if (strcmp(cmd
->name
, p
->name
) == 0) {
3931 list_add(&cmd
->list
, &ftrace_commands
);
3933 mutex_unlock(&ftrace_cmd_mutex
);
3939 * Currently we only unregister ftrace commands from __init, so mark
3942 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
3944 struct ftrace_func_command
*p
, *n
;
3947 mutex_lock(&ftrace_cmd_mutex
);
3948 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
3949 if (strcmp(cmd
->name
, p
->name
) == 0) {
3951 list_del_init(&p
->list
);
3956 mutex_unlock(&ftrace_cmd_mutex
);
3961 static int ftrace_process_regex(struct ftrace_hash
*hash
,
3962 char *buff
, int len
, int enable
)
3964 char *func
, *command
, *next
= buff
;
3965 struct ftrace_func_command
*p
;
3968 func
= strsep(&next
, ":");
3971 ret
= ftrace_match_records(hash
, func
, len
);
3981 command
= strsep(&next
, ":");
3983 mutex_lock(&ftrace_cmd_mutex
);
3984 list_for_each_entry(p
, &ftrace_commands
, list
) {
3985 if (strcmp(p
->name
, command
) == 0) {
3986 ret
= p
->func(hash
, func
, command
, next
, enable
);
3991 mutex_unlock(&ftrace_cmd_mutex
);
3997 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
3998 size_t cnt
, loff_t
*ppos
, int enable
)
4000 struct ftrace_iterator
*iter
;
4001 struct trace_parser
*parser
;
4007 if (file
->f_mode
& FMODE_READ
) {
4008 struct seq_file
*m
= file
->private_data
;
4011 iter
= file
->private_data
;
4013 if (unlikely(ftrace_disabled
))
4016 /* iter->hash is a local copy, so we don't need regex_lock */
4018 parser
= &iter
->parser
;
4019 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
4021 if (read
>= 0 && trace_parser_loaded(parser
) &&
4022 !trace_parser_cont(parser
)) {
4023 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
4024 parser
->idx
, enable
);
4025 trace_parser_clear(parser
);
4036 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
4037 size_t cnt
, loff_t
*ppos
)
4039 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
4043 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
4044 size_t cnt
, loff_t
*ppos
)
4046 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
4050 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
4052 struct ftrace_func_entry
*entry
;
4054 if (!ftrace_location(ip
))
4058 entry
= ftrace_lookup_ip(hash
, ip
);
4061 free_hash_entry(hash
, entry
);
4065 return add_hash_entry(hash
, ip
);
4068 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
4069 struct ftrace_ops_hash
*old_hash
)
4071 struct ftrace_ops
*op
;
4073 if (!ftrace_enabled
)
4076 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
4077 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
4082 * If this is the shared global_ops filter, then we need to
4083 * check if there is another ops that shares it, is enabled.
4084 * If so, we still need to run the modify code.
4086 if (ops
->func_hash
!= &global_ops
.local_hash
)
4089 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4090 if (op
->func_hash
== &global_ops
.local_hash
&&
4091 op
->flags
& FTRACE_OPS_FL_ENABLED
) {
4092 ftrace_run_modify_code(op
, FTRACE_UPDATE_CALLS
, old_hash
);
4093 /* Only need to do this once */
4096 } while_for_each_ftrace_op(op
);
4100 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4101 unsigned long ip
, int remove
, int reset
, int enable
)
4103 struct ftrace_hash
**orig_hash
;
4104 struct ftrace_ops_hash old_hash_ops
;
4105 struct ftrace_hash
*old_hash
;
4106 struct ftrace_hash
*hash
;
4109 if (unlikely(ftrace_disabled
))
4112 mutex_lock(&ops
->func_hash
->regex_lock
);
4115 orig_hash
= &ops
->func_hash
->filter_hash
;
4117 orig_hash
= &ops
->func_hash
->notrace_hash
;
4120 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
4122 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
4126 goto out_regex_unlock
;
4129 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
4131 goto out_regex_unlock
;
4134 ret
= ftrace_match_addr(hash
, ip
, remove
);
4136 goto out_regex_unlock
;
4139 mutex_lock(&ftrace_lock
);
4140 old_hash
= *orig_hash
;
4141 old_hash_ops
.filter_hash
= ops
->func_hash
->filter_hash
;
4142 old_hash_ops
.notrace_hash
= ops
->func_hash
->notrace_hash
;
4143 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
4145 ftrace_ops_update_code(ops
, &old_hash_ops
);
4146 free_ftrace_hash_rcu(old_hash
);
4148 mutex_unlock(&ftrace_lock
);
4151 mutex_unlock(&ops
->func_hash
->regex_lock
);
4153 free_ftrace_hash(hash
);
4158 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
4159 int reset
, int enable
)
4161 return ftrace_set_hash(ops
, 0, 0, ip
, remove
, reset
, enable
);
4165 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4166 * @ops - the ops to set the filter with
4167 * @ip - the address to add to or remove from the filter.
4168 * @remove - non zero to remove the ip from the filter
4169 * @reset - non zero to reset all filters before applying this filter.
4171 * Filters denote which functions should be enabled when tracing is enabled
4172 * If @ip is NULL, it failes to update filter.
4174 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
4175 int remove
, int reset
)
4177 ftrace_ops_init(ops
);
4178 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
4180 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
4183 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4184 int reset
, int enable
)
4186 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
4190 * ftrace_set_filter - set a function to filter on in ftrace
4191 * @ops - the ops to set the filter with
4192 * @buf - the string that holds the function filter text.
4193 * @len - the length of the string.
4194 * @reset - non zero to reset all filters before applying this filter.
4196 * Filters denote which functions should be enabled when tracing is enabled.
4197 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4199 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
4202 ftrace_ops_init(ops
);
4203 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
4205 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
4208 * ftrace_set_notrace - set a function to not trace in ftrace
4209 * @ops - the ops to set the notrace filter with
4210 * @buf - the string that holds the function notrace text.
4211 * @len - the length of the string.
4212 * @reset - non zero to reset all filters before applying this filter.
4214 * Notrace Filters denote which functions should not be enabled when tracing
4215 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4218 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
4221 ftrace_ops_init(ops
);
4222 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
4224 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
4226 * ftrace_set_global_filter - set a function to filter on with global tracers
4227 * @buf - the string that holds the function filter text.
4228 * @len - the length of the string.
4229 * @reset - non zero to reset all filters before applying this filter.
4231 * Filters denote which functions should be enabled when tracing is enabled.
4232 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4234 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
4236 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
4238 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
4241 * ftrace_set_global_notrace - set a function to not trace with global tracers
4242 * @buf - the string that holds the function notrace text.
4243 * @len - the length of the string.
4244 * @reset - non zero to reset all filters before applying this filter.
4246 * Notrace Filters denote which functions should not be enabled when tracing
4247 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4250 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
4252 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
4254 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
4257 * command line interface to allow users to set filters on boot up.
4259 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4260 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4261 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4263 /* Used by function selftest to not test if filter is set */
4264 bool ftrace_filter_param __initdata
;
4266 static int __init
set_ftrace_notrace(char *str
)
4268 ftrace_filter_param
= true;
4269 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4272 __setup("ftrace_notrace=", set_ftrace_notrace
);
4274 static int __init
set_ftrace_filter(char *str
)
4276 ftrace_filter_param
= true;
4277 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
4280 __setup("ftrace_filter=", set_ftrace_filter
);
4282 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4283 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4284 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4285 static int ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
);
4287 static unsigned long save_global_trampoline
;
4288 static unsigned long save_global_flags
;
4290 static int __init
set_graph_function(char *str
)
4292 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
4295 __setup("ftrace_graph_filter=", set_graph_function
);
4297 static int __init
set_graph_notrace_function(char *str
)
4299 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4302 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
4304 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
4308 unsigned long *table
= ftrace_graph_funcs
;
4309 int *count
= &ftrace_graph_count
;
4312 table
= ftrace_graph_notrace_funcs
;
4313 count
= &ftrace_graph_notrace_count
;
4317 func
= strsep(&buf
, ",");
4318 /* we allow only one expression at a time */
4319 ret
= ftrace_set_func(table
, count
, FTRACE_GRAPH_MAX_FUNCS
, func
);
4321 printk(KERN_DEBUG
"ftrace: function %s not "
4322 "traceable\n", func
);
4325 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4328 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
4332 ftrace_ops_init(ops
);
4335 func
= strsep(&buf
, ",");
4336 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
4340 static void __init
set_ftrace_early_filters(void)
4342 if (ftrace_filter_buf
[0])
4343 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
4344 if (ftrace_notrace_buf
[0])
4345 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
4346 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4347 if (ftrace_graph_buf
[0])
4348 set_ftrace_early_graph(ftrace_graph_buf
, 1);
4349 if (ftrace_graph_notrace_buf
[0])
4350 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
4351 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4354 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
4356 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
4357 struct ftrace_ops_hash old_hash_ops
;
4358 struct ftrace_iterator
*iter
;
4359 struct ftrace_hash
**orig_hash
;
4360 struct ftrace_hash
*old_hash
;
4361 struct trace_parser
*parser
;
4365 if (file
->f_mode
& FMODE_READ
) {
4367 seq_release(inode
, file
);
4369 iter
= file
->private_data
;
4371 parser
= &iter
->parser
;
4372 if (trace_parser_loaded(parser
)) {
4373 parser
->buffer
[parser
->idx
] = 0;
4374 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
4377 trace_parser_put(parser
);
4379 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
4381 if (file
->f_mode
& FMODE_WRITE
) {
4382 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
4385 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
4387 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
4389 mutex_lock(&ftrace_lock
);
4390 old_hash
= *orig_hash
;
4391 old_hash_ops
.filter_hash
= iter
->ops
->func_hash
->filter_hash
;
4392 old_hash_ops
.notrace_hash
= iter
->ops
->func_hash
->notrace_hash
;
4393 ret
= ftrace_hash_move(iter
->ops
, filter_hash
,
4394 orig_hash
, iter
->hash
);
4396 ftrace_ops_update_code(iter
->ops
, &old_hash_ops
);
4397 free_ftrace_hash_rcu(old_hash
);
4399 mutex_unlock(&ftrace_lock
);
4402 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
4403 free_ftrace_hash(iter
->hash
);
4409 static const struct file_operations ftrace_avail_fops
= {
4410 .open
= ftrace_avail_open
,
4412 .llseek
= seq_lseek
,
4413 .release
= seq_release_private
,
4416 static const struct file_operations ftrace_enabled_fops
= {
4417 .open
= ftrace_enabled_open
,
4419 .llseek
= seq_lseek
,
4420 .release
= seq_release_private
,
4423 static const struct file_operations ftrace_filter_fops
= {
4424 .open
= ftrace_filter_open
,
4426 .write
= ftrace_filter_write
,
4427 .llseek
= tracing_lseek
,
4428 .release
= ftrace_regex_release
,
4431 static const struct file_operations ftrace_notrace_fops
= {
4432 .open
= ftrace_notrace_open
,
4434 .write
= ftrace_notrace_write
,
4435 .llseek
= tracing_lseek
,
4436 .release
= ftrace_regex_release
,
4439 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4441 static DEFINE_MUTEX(graph_lock
);
4443 int ftrace_graph_count
;
4444 int ftrace_graph_notrace_count
;
4445 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4446 unsigned long ftrace_graph_notrace_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4448 struct ftrace_graph_data
{
4449 unsigned long *table
;
4452 const struct seq_operations
*seq_ops
;
4456 __g_next(struct seq_file
*m
, loff_t
*pos
)
4458 struct ftrace_graph_data
*fgd
= m
->private;
4460 if (*pos
>= *fgd
->count
)
4462 return &fgd
->table
[*pos
];
4466 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4469 return __g_next(m
, pos
);
4472 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
4474 struct ftrace_graph_data
*fgd
= m
->private;
4476 mutex_lock(&graph_lock
);
4478 /* Nothing, tell g_show to print all functions are enabled */
4479 if (!*fgd
->count
&& !*pos
)
4482 return __g_next(m
, pos
);
4485 static void g_stop(struct seq_file
*m
, void *p
)
4487 mutex_unlock(&graph_lock
);
4490 static int g_show(struct seq_file
*m
, void *v
)
4492 unsigned long *ptr
= v
;
4497 if (ptr
== (unsigned long *)1) {
4498 struct ftrace_graph_data
*fgd
= m
->private;
4500 if (fgd
->table
== ftrace_graph_funcs
)
4501 seq_puts(m
, "#### all functions enabled ####\n");
4503 seq_puts(m
, "#### no functions disabled ####\n");
4507 seq_printf(m
, "%ps\n", (void *)*ptr
);
4512 static const struct seq_operations ftrace_graph_seq_ops
= {
4520 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
4521 struct ftrace_graph_data
*fgd
)
4525 mutex_lock(&graph_lock
);
4526 if ((file
->f_mode
& FMODE_WRITE
) &&
4527 (file
->f_flags
& O_TRUNC
)) {
4529 memset(fgd
->table
, 0, fgd
->size
* sizeof(*fgd
->table
));
4531 mutex_unlock(&graph_lock
);
4533 if (file
->f_mode
& FMODE_READ
) {
4534 ret
= seq_open(file
, fgd
->seq_ops
);
4536 struct seq_file
*m
= file
->private_data
;
4540 file
->private_data
= fgd
;
4546 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
4548 struct ftrace_graph_data
*fgd
;
4550 if (unlikely(ftrace_disabled
))
4553 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4557 fgd
->table
= ftrace_graph_funcs
;
4558 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4559 fgd
->count
= &ftrace_graph_count
;
4560 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4562 return __ftrace_graph_open(inode
, file
, fgd
);
4566 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
4568 struct ftrace_graph_data
*fgd
;
4570 if (unlikely(ftrace_disabled
))
4573 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4577 fgd
->table
= ftrace_graph_notrace_funcs
;
4578 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4579 fgd
->count
= &ftrace_graph_notrace_count
;
4580 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4582 return __ftrace_graph_open(inode
, file
, fgd
);
4586 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
4588 if (file
->f_mode
& FMODE_READ
) {
4589 struct seq_file
*m
= file
->private_data
;
4592 seq_release(inode
, file
);
4594 kfree(file
->private_data
);
4601 ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
)
4603 struct dyn_ftrace
*rec
;
4604 struct ftrace_page
*pg
;
4613 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
4614 if (!not && *idx
>= size
)
4617 search_len
= strlen(search
);
4619 mutex_lock(&ftrace_lock
);
4621 if (unlikely(ftrace_disabled
)) {
4622 mutex_unlock(&ftrace_lock
);
4626 do_for_each_ftrace_rec(pg
, rec
) {
4628 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
4629 /* if it is in the array */
4631 for (i
= 0; i
< *idx
; i
++) {
4632 if (array
[i
] == rec
->ip
) {
4641 array
[(*idx
)++] = rec
->ip
;
4647 array
[i
] = array
[--(*idx
)];
4653 } while_for_each_ftrace_rec();
4655 mutex_unlock(&ftrace_lock
);
4664 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
4665 size_t cnt
, loff_t
*ppos
)
4667 struct trace_parser parser
;
4668 ssize_t read
, ret
= 0;
4669 struct ftrace_graph_data
*fgd
= file
->private_data
;
4674 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
))
4677 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
4679 if (read
>= 0 && trace_parser_loaded((&parser
))) {
4680 parser
.buffer
[parser
.idx
] = 0;
4682 mutex_lock(&graph_lock
);
4684 /* we allow only one expression at a time */
4685 ret
= ftrace_set_func(fgd
->table
, fgd
->count
, fgd
->size
,
4688 mutex_unlock(&graph_lock
);
4694 trace_parser_put(&parser
);
4699 static const struct file_operations ftrace_graph_fops
= {
4700 .open
= ftrace_graph_open
,
4702 .write
= ftrace_graph_write
,
4703 .llseek
= tracing_lseek
,
4704 .release
= ftrace_graph_release
,
4707 static const struct file_operations ftrace_graph_notrace_fops
= {
4708 .open
= ftrace_graph_notrace_open
,
4710 .write
= ftrace_graph_write
,
4711 .llseek
= tracing_lseek
,
4712 .release
= ftrace_graph_release
,
4714 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4716 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
4717 struct dentry
*parent
)
4720 trace_create_file("set_ftrace_filter", 0644, parent
,
4721 ops
, &ftrace_filter_fops
);
4723 trace_create_file("set_ftrace_notrace", 0644, parent
,
4724 ops
, &ftrace_notrace_fops
);
4728 * The name "destroy_filter_files" is really a misnomer. Although
4729 * in the future, it may actualy delete the files, but this is
4730 * really intended to make sure the ops passed in are disabled
4731 * and that when this function returns, the caller is free to
4734 * The "destroy" name is only to match the "create" name that this
4735 * should be paired with.
4737 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
4739 mutex_lock(&ftrace_lock
);
4740 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
4741 ftrace_shutdown(ops
, 0);
4742 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
4743 mutex_unlock(&ftrace_lock
);
4746 static __init
int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
)
4749 trace_create_file("available_filter_functions", 0444,
4750 d_tracer
, NULL
, &ftrace_avail_fops
);
4752 trace_create_file("enabled_functions", 0444,
4753 d_tracer
, NULL
, &ftrace_enabled_fops
);
4755 ftrace_create_filter_files(&global_ops
, d_tracer
);
4757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4758 trace_create_file("set_graph_function", 0444, d_tracer
,
4760 &ftrace_graph_fops
);
4761 trace_create_file("set_graph_notrace", 0444, d_tracer
,
4763 &ftrace_graph_notrace_fops
);
4764 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4769 static int ftrace_cmp_ips(const void *a
, const void *b
)
4771 const unsigned long *ipa
= a
;
4772 const unsigned long *ipb
= b
;
4781 static void ftrace_swap_ips(void *a
, void *b
, int size
)
4783 unsigned long *ipa
= a
;
4784 unsigned long *ipb
= b
;
4792 static int ftrace_process_locs(struct module
*mod
,
4793 unsigned long *start
,
4796 struct ftrace_page
*start_pg
;
4797 struct ftrace_page
*pg
;
4798 struct dyn_ftrace
*rec
;
4799 unsigned long count
;
4802 unsigned long flags
= 0; /* Shut up gcc */
4805 count
= end
- start
;
4810 sort(start
, count
, sizeof(*start
),
4811 ftrace_cmp_ips
, ftrace_swap_ips
);
4813 start_pg
= ftrace_allocate_pages(count
);
4817 mutex_lock(&ftrace_lock
);
4820 * Core and each module needs their own pages, as
4821 * modules will free them when they are removed.
4822 * Force a new page to be allocated for modules.
4825 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
4826 /* First initialization */
4827 ftrace_pages
= ftrace_pages_start
= start_pg
;
4832 if (WARN_ON(ftrace_pages
->next
)) {
4833 /* Hmm, we have free pages? */
4834 while (ftrace_pages
->next
)
4835 ftrace_pages
= ftrace_pages
->next
;
4838 ftrace_pages
->next
= start_pg
;
4844 addr
= ftrace_call_adjust(*p
++);
4846 * Some architecture linkers will pad between
4847 * the different mcount_loc sections of different
4848 * object files to satisfy alignments.
4849 * Skip any NULL pointers.
4854 if (pg
->index
== pg
->size
) {
4855 /* We should have allocated enough */
4856 if (WARN_ON(!pg
->next
))
4861 rec
= &pg
->records
[pg
->index
++];
4865 /* We should have used all pages */
4868 /* Assign the last page to ftrace_pages */
4872 * We only need to disable interrupts on start up
4873 * because we are modifying code that an interrupt
4874 * may execute, and the modification is not atomic.
4875 * But for modules, nothing runs the code we modify
4876 * until we are finished with it, and there's no
4877 * reason to cause large interrupt latencies while we do it.
4880 local_irq_save(flags
);
4881 ftrace_update_code(mod
, start_pg
);
4883 local_irq_restore(flags
);
4886 mutex_unlock(&ftrace_lock
);
4891 #ifdef CONFIG_MODULES
4893 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4895 void ftrace_release_mod(struct module
*mod
)
4897 struct dyn_ftrace
*rec
;
4898 struct ftrace_page
**last_pg
;
4899 struct ftrace_page
*pg
;
4902 mutex_lock(&ftrace_lock
);
4904 if (ftrace_disabled
)
4908 * Each module has its own ftrace_pages, remove
4909 * them from the list.
4911 last_pg
= &ftrace_pages_start
;
4912 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
4913 rec
= &pg
->records
[0];
4914 if (within_module_core(rec
->ip
, mod
)) {
4916 * As core pages are first, the first
4917 * page should never be a module page.
4919 if (WARN_ON(pg
== ftrace_pages_start
))
4922 /* Check if we are deleting the last page */
4923 if (pg
== ftrace_pages
)
4924 ftrace_pages
= next_to_ftrace_page(last_pg
);
4926 *last_pg
= pg
->next
;
4927 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
4928 free_pages((unsigned long)pg
->records
, order
);
4931 last_pg
= &pg
->next
;
4934 mutex_unlock(&ftrace_lock
);
4937 static void ftrace_init_module(struct module
*mod
,
4938 unsigned long *start
, unsigned long *end
)
4940 if (ftrace_disabled
|| start
== end
)
4942 ftrace_process_locs(mod
, start
, end
);
4945 void ftrace_module_init(struct module
*mod
)
4947 ftrace_init_module(mod
, mod
->ftrace_callsites
,
4948 mod
->ftrace_callsites
+
4949 mod
->num_ftrace_callsites
);
4952 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4953 unsigned long val
, void *data
)
4955 struct module
*mod
= data
;
4957 if (val
== MODULE_STATE_GOING
)
4958 ftrace_release_mod(mod
);
4963 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4964 unsigned long val
, void *data
)
4968 #endif /* CONFIG_MODULES */
4970 struct notifier_block ftrace_module_exit_nb
= {
4971 .notifier_call
= ftrace_module_notify_exit
,
4972 .priority
= INT_MIN
, /* Run after anything that can remove kprobes */
4975 void __init
ftrace_init(void)
4977 extern unsigned long __start_mcount_loc
[];
4978 extern unsigned long __stop_mcount_loc
[];
4979 unsigned long count
, flags
;
4982 local_irq_save(flags
);
4983 ret
= ftrace_dyn_arch_init();
4984 local_irq_restore(flags
);
4988 count
= __stop_mcount_loc
- __start_mcount_loc
;
4990 pr_info("ftrace: No functions to be traced?\n");
4994 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4995 count
, count
/ ENTRIES_PER_PAGE
+ 1);
4997 last_ftrace_enabled
= ftrace_enabled
= 1;
4999 ret
= ftrace_process_locs(NULL
,
5003 ret
= register_module_notifier(&ftrace_module_exit_nb
);
5005 pr_warning("Failed to register trace ftrace module exit notifier\n");
5007 set_ftrace_early_filters();
5011 ftrace_disabled
= 1;
5014 /* Do nothing if arch does not support this */
5015 void __weak
arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
5019 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5023 * Currently there's no safe way to free a trampoline when the kernel
5024 * is configured with PREEMPT. That is because a task could be preempted
5025 * when it jumped to the trampoline, it may be preempted for a long time
5026 * depending on the system load, and currently there's no way to know
5027 * when it will be off the trampoline. If the trampoline is freed
5028 * too early, when the task runs again, it will be executing on freed
5031 #ifdef CONFIG_PREEMPT
5032 /* Currently, only non dynamic ops can have a trampoline */
5033 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
5037 arch_ftrace_update_trampoline(ops
);
5042 static struct ftrace_ops global_ops
= {
5043 .func
= ftrace_stub
,
5044 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5045 FTRACE_OPS_FL_INITIALIZED
|
5049 static int __init
ftrace_nodyn_init(void)
5054 core_initcall(ftrace_nodyn_init
);
5056 static inline int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
) { return 0; }
5057 static inline void ftrace_startup_enable(int command
) { }
5058 static inline void ftrace_startup_all(int command
) { }
5059 /* Keep as macros so we do not need to define the commands */
5060 # define ftrace_startup(ops, command) \
5062 int ___ret = __register_ftrace_function(ops); \
5064 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
5067 # define ftrace_shutdown(ops, command) \
5069 int ___ret = __unregister_ftrace_function(ops); \
5071 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
5075 # define ftrace_startup_sysctl() do { } while (0)
5076 # define ftrace_shutdown_sysctl() do { } while (0)
5079 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
5084 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5088 #endif /* CONFIG_DYNAMIC_FTRACE */
5090 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
5092 tr
->ops
= &global_ops
;
5093 tr
->ops
->private = tr
;
5096 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
5098 /* If we filter on pids, update to use the pid function */
5099 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
5100 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
5101 printk("ftrace ops had %pS for function\n",
5104 tr
->ops
->func
= func
;
5105 tr
->ops
->private = tr
;
5108 void ftrace_reset_array_ops(struct trace_array
*tr
)
5110 tr
->ops
->func
= ftrace_stub
;
5114 ftrace_ops_control_func(unsigned long ip
, unsigned long parent_ip
,
5115 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5117 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT
)))
5121 * Some of the ops may be dynamically allocated,
5122 * they must be freed after a synchronize_sched().
5124 preempt_disable_notrace();
5125 trace_recursion_set(TRACE_CONTROL_BIT
);
5128 * Control funcs (perf) uses RCU. Only trace if
5129 * RCU is currently active.
5131 if (!rcu_is_watching())
5134 do_for_each_ftrace_op(op
, ftrace_control_list
) {
5135 if (!(op
->flags
& FTRACE_OPS_FL_STUB
) &&
5136 !ftrace_function_local_disabled(op
) &&
5137 ftrace_ops_test(op
, ip
, regs
))
5138 op
->func(ip
, parent_ip
, op
, regs
);
5139 } while_for_each_ftrace_op(op
);
5141 trace_recursion_clear(TRACE_CONTROL_BIT
);
5142 preempt_enable_notrace();
5145 static struct ftrace_ops control_ops
= {
5146 .func
= ftrace_ops_control_func
,
5147 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
5148 INIT_OPS_HASH(control_ops
)
5152 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5153 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
5155 struct ftrace_ops
*op
;
5158 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5163 * Some of the ops may be dynamically allocated,
5164 * they must be freed after a synchronize_sched().
5166 preempt_disable_notrace();
5167 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5168 if (ftrace_ops_test(op
, ip
, regs
)) {
5169 if (FTRACE_WARN_ON(!op
->func
)) {
5170 pr_warn("op=%p %pS\n", op
, op
);
5173 op
->func(ip
, parent_ip
, op
, regs
);
5175 } while_for_each_ftrace_op(op
);
5177 preempt_enable_notrace();
5178 trace_clear_recursion(bit
);
5182 * Some archs only support passing ip and parent_ip. Even though
5183 * the list function ignores the op parameter, we do not want any
5184 * C side effects, where a function is called without the caller
5185 * sending a third parameter.
5186 * Archs are to support both the regs and ftrace_ops at the same time.
5187 * If they support ftrace_ops, it is assumed they support regs.
5188 * If call backs want to use regs, they must either check for regs
5189 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5190 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5191 * An architecture can pass partial regs with ftrace_ops and still
5192 * set the ARCH_SUPPORT_FTARCE_OPS.
5194 #if ARCH_SUPPORTS_FTRACE_OPS
5195 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5196 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5198 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
5201 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
5203 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
5208 * If there's only one function registered but it does not support
5209 * recursion, this function will be called by the mcount trampoline.
5210 * This function will handle recursion protection.
5212 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
5213 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5217 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5221 op
->func(ip
, parent_ip
, op
, regs
);
5223 trace_clear_recursion(bit
);
5227 * ftrace_ops_get_func - get the function a trampoline should call
5228 * @ops: the ops to get the function for
5230 * Normally the mcount trampoline will call the ops->func, but there
5231 * are times that it should not. For example, if the ops does not
5232 * have its own recursion protection, then it should call the
5233 * ftrace_ops_recurs_func() instead.
5235 * Returns the function that the trampoline should call for @ops.
5237 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
5240 * If the func handles its own recursion, call it directly.
5241 * Otherwise call the recursion protected function that
5242 * will call the ftrace ops function.
5244 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
))
5245 return ftrace_ops_recurs_func
;
5250 static void clear_ftrace_swapper(void)
5252 struct task_struct
*p
;
5256 for_each_online_cpu(cpu
) {
5258 clear_tsk_trace_trace(p
);
5263 static void set_ftrace_swapper(void)
5265 struct task_struct
*p
;
5269 for_each_online_cpu(cpu
) {
5271 set_tsk_trace_trace(p
);
5276 static void clear_ftrace_pid(struct pid
*pid
)
5278 struct task_struct
*p
;
5281 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5282 clear_tsk_trace_trace(p
);
5283 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5289 static void set_ftrace_pid(struct pid
*pid
)
5291 struct task_struct
*p
;
5294 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5295 set_tsk_trace_trace(p
);
5296 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5300 static void clear_ftrace_pid_task(struct pid
*pid
)
5302 if (pid
== ftrace_swapper_pid
)
5303 clear_ftrace_swapper();
5305 clear_ftrace_pid(pid
);
5308 static void set_ftrace_pid_task(struct pid
*pid
)
5310 if (pid
== ftrace_swapper_pid
)
5311 set_ftrace_swapper();
5313 set_ftrace_pid(pid
);
5316 static int ftrace_pid_add(int p
)
5319 struct ftrace_pid
*fpid
;
5322 mutex_lock(&ftrace_lock
);
5325 pid
= ftrace_swapper_pid
;
5327 pid
= find_get_pid(p
);
5334 list_for_each_entry(fpid
, &ftrace_pids
, list
)
5335 if (fpid
->pid
== pid
)
5340 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
5344 list_add(&fpid
->list
, &ftrace_pids
);
5347 set_ftrace_pid_task(pid
);
5349 ftrace_update_pid_func();
5351 ftrace_startup_all(0);
5353 mutex_unlock(&ftrace_lock
);
5357 if (pid
!= ftrace_swapper_pid
)
5361 mutex_unlock(&ftrace_lock
);
5365 static void ftrace_pid_reset(void)
5367 struct ftrace_pid
*fpid
, *safe
;
5369 mutex_lock(&ftrace_lock
);
5370 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
5371 struct pid
*pid
= fpid
->pid
;
5373 clear_ftrace_pid_task(pid
);
5375 list_del(&fpid
->list
);
5379 ftrace_update_pid_func();
5380 ftrace_startup_all(0);
5382 mutex_unlock(&ftrace_lock
);
5385 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
5387 mutex_lock(&ftrace_lock
);
5389 if (!ftrace_pids_enabled() && (!*pos
))
5392 return seq_list_start(&ftrace_pids
, *pos
);
5395 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5400 return seq_list_next(v
, &ftrace_pids
, pos
);
5403 static void fpid_stop(struct seq_file
*m
, void *p
)
5405 mutex_unlock(&ftrace_lock
);
5408 static int fpid_show(struct seq_file
*m
, void *v
)
5410 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
5412 if (v
== (void *)1) {
5413 seq_puts(m
, "no pid\n");
5417 if (fpid
->pid
== ftrace_swapper_pid
)
5418 seq_puts(m
, "swapper tasks\n");
5420 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
5425 static const struct seq_operations ftrace_pid_sops
= {
5426 .start
= fpid_start
,
5433 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
5437 if ((file
->f_mode
& FMODE_WRITE
) &&
5438 (file
->f_flags
& O_TRUNC
))
5441 if (file
->f_mode
& FMODE_READ
)
5442 ret
= seq_open(file
, &ftrace_pid_sops
);
5448 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
5449 size_t cnt
, loff_t
*ppos
)
5455 if (cnt
>= sizeof(buf
))
5458 if (copy_from_user(&buf
, ubuf
, cnt
))
5464 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5465 * to clean the filter quietly.
5467 tmp
= strstrip(buf
);
5468 if (strlen(tmp
) == 0)
5471 ret
= kstrtol(tmp
, 10, &val
);
5475 ret
= ftrace_pid_add(val
);
5477 return ret
? ret
: cnt
;
5481 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
5483 if (file
->f_mode
& FMODE_READ
)
5484 seq_release(inode
, file
);
5489 static const struct file_operations ftrace_pid_fops
= {
5490 .open
= ftrace_pid_open
,
5491 .write
= ftrace_pid_write
,
5493 .llseek
= tracing_lseek
,
5494 .release
= ftrace_pid_release
,
5497 static __init
int ftrace_init_tracefs(void)
5499 struct dentry
*d_tracer
;
5501 d_tracer
= tracing_init_dentry();
5502 if (IS_ERR(d_tracer
))
5505 ftrace_init_dyn_tracefs(d_tracer
);
5507 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
5508 NULL
, &ftrace_pid_fops
);
5510 ftrace_profile_tracefs(d_tracer
);
5514 fs_initcall(ftrace_init_tracefs
);
5517 * ftrace_kill - kill ftrace
5519 * This function should be used by panic code. It stops ftrace
5520 * but in a not so nice way. If you need to simply kill ftrace
5521 * from a non-atomic section, use ftrace_kill.
5523 void ftrace_kill(void)
5525 ftrace_disabled
= 1;
5527 clear_ftrace_function();
5531 * Test if ftrace is dead or not.
5533 int ftrace_is_dead(void)
5535 return ftrace_disabled
;
5539 * register_ftrace_function - register a function for profiling
5540 * @ops - ops structure that holds the function for profiling.
5542 * Register a function to be called by all functions in the
5545 * Note: @ops->func and all the functions it calls must be labeled
5546 * with "notrace", otherwise it will go into a
5549 int register_ftrace_function(struct ftrace_ops
*ops
)
5553 ftrace_ops_init(ops
);
5555 mutex_lock(&ftrace_lock
);
5557 ret
= ftrace_startup(ops
, 0);
5559 mutex_unlock(&ftrace_lock
);
5563 EXPORT_SYMBOL_GPL(register_ftrace_function
);
5566 * unregister_ftrace_function - unregister a function for profiling.
5567 * @ops - ops structure that holds the function to unregister
5569 * Unregister a function that was added to be called by ftrace profiling.
5571 int unregister_ftrace_function(struct ftrace_ops
*ops
)
5575 mutex_lock(&ftrace_lock
);
5576 ret
= ftrace_shutdown(ops
, 0);
5577 mutex_unlock(&ftrace_lock
);
5581 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
5584 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
5585 void __user
*buffer
, size_t *lenp
,
5590 mutex_lock(&ftrace_lock
);
5592 if (unlikely(ftrace_disabled
))
5595 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
5597 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
5600 last_ftrace_enabled
= !!ftrace_enabled
;
5602 if (ftrace_enabled
) {
5604 /* we are starting ftrace again */
5605 if (ftrace_ops_list
!= &ftrace_list_end
)
5606 update_ftrace_function();
5608 ftrace_startup_sysctl();
5611 /* stopping ftrace calls (just send to ftrace_stub) */
5612 ftrace_trace_function
= ftrace_stub
;
5614 ftrace_shutdown_sysctl();
5618 mutex_unlock(&ftrace_lock
);
5622 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5624 static struct ftrace_ops graph_ops
= {
5625 .func
= ftrace_stub
,
5626 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5627 FTRACE_OPS_FL_INITIALIZED
|
5630 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5631 .trampoline
= FTRACE_GRAPH_TRAMP_ADDR
,
5632 /* trampoline_size is only needed for dynamically allocated tramps */
5634 ASSIGN_OPS_HASH(graph_ops
, &global_ops
.local_hash
)
5637 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
5642 /* The callbacks that hook a function */
5643 trace_func_graph_ret_t ftrace_graph_return
=
5644 (trace_func_graph_ret_t
)ftrace_stub
;
5645 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
5646 static trace_func_graph_ent_t __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5648 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5649 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
5653 unsigned long flags
;
5654 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
5655 struct task_struct
*g
, *t
;
5657 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
5658 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
5659 * sizeof(struct ftrace_ret_stack
),
5661 if (!ret_stack_list
[i
]) {
5669 read_lock_irqsave(&tasklist_lock
, flags
);
5670 do_each_thread(g
, t
) {
5676 if (t
->ret_stack
== NULL
) {
5677 atomic_set(&t
->tracing_graph_pause
, 0);
5678 atomic_set(&t
->trace_overrun
, 0);
5679 t
->curr_ret_stack
= -1;
5680 /* Make sure the tasks see the -1 first: */
5682 t
->ret_stack
= ret_stack_list
[start
++];
5684 } while_each_thread(g
, t
);
5687 read_unlock_irqrestore(&tasklist_lock
, flags
);
5689 for (i
= start
; i
< end
; i
++)
5690 kfree(ret_stack_list
[i
]);
5695 ftrace_graph_probe_sched_switch(void *ignore
,
5696 struct task_struct
*prev
, struct task_struct
*next
)
5698 unsigned long long timestamp
;
5702 * Does the user want to count the time a function was asleep.
5703 * If so, do not update the time stamps.
5705 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
5708 timestamp
= trace_clock_local();
5710 prev
->ftrace_timestamp
= timestamp
;
5712 /* only process tasks that we timestamped */
5713 if (!next
->ftrace_timestamp
)
5717 * Update all the counters in next to make up for the
5718 * time next was sleeping.
5720 timestamp
-= next
->ftrace_timestamp
;
5722 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
5723 next
->ret_stack
[index
].calltime
+= timestamp
;
5726 /* Allocate a return stack for each task */
5727 static int start_graph_tracing(void)
5729 struct ftrace_ret_stack
**ret_stack_list
;
5732 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
5733 sizeof(struct ftrace_ret_stack
*),
5736 if (!ret_stack_list
)
5739 /* The cpu_boot init_task->ret_stack will never be freed */
5740 for_each_online_cpu(cpu
) {
5741 if (!idle_task(cpu
)->ret_stack
)
5742 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
5746 ret
= alloc_retstack_tasklist(ret_stack_list
);
5747 } while (ret
== -EAGAIN
);
5750 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5752 pr_info("ftrace_graph: Couldn't activate tracepoint"
5753 " probe to kernel_sched_switch\n");
5756 kfree(ret_stack_list
);
5761 * Hibernation protection.
5762 * The state of the current task is too much unstable during
5763 * suspend/restore to disk. We want to protect against that.
5766 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
5770 case PM_HIBERNATION_PREPARE
:
5771 pause_graph_tracing();
5774 case PM_POST_HIBERNATION
:
5775 unpause_graph_tracing();
5781 static int ftrace_graph_entry_test(struct ftrace_graph_ent
*trace
)
5783 if (!ftrace_ops_test(&global_ops
, trace
->func
, NULL
))
5785 return __ftrace_graph_entry(trace
);
5789 * The function graph tracer should only trace the functions defined
5790 * by set_ftrace_filter and set_ftrace_notrace. If another function
5791 * tracer ops is registered, the graph tracer requires testing the
5792 * function against the global ops, and not just trace any function
5793 * that any ftrace_ops registered.
5795 static void update_function_graph_func(void)
5797 struct ftrace_ops
*op
;
5798 bool do_test
= false;
5801 * The graph and global ops share the same set of functions
5802 * to test. If any other ops is on the list, then
5803 * the graph tracing needs to test if its the function
5806 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5807 if (op
!= &global_ops
&& op
!= &graph_ops
&&
5808 op
!= &ftrace_list_end
) {
5810 /* in double loop, break out with goto */
5813 } while_for_each_ftrace_op(op
);
5816 ftrace_graph_entry
= ftrace_graph_entry_test
;
5818 ftrace_graph_entry
= __ftrace_graph_entry
;
5821 static struct notifier_block ftrace_suspend_notifier
= {
5822 .notifier_call
= ftrace_suspend_notifier_call
,
5825 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
5826 trace_func_graph_ent_t entryfunc
)
5830 mutex_lock(&ftrace_lock
);
5832 /* we currently allow only one tracer registered at a time */
5833 if (ftrace_graph_active
) {
5838 register_pm_notifier(&ftrace_suspend_notifier
);
5840 ftrace_graph_active
++;
5841 ret
= start_graph_tracing();
5843 ftrace_graph_active
--;
5847 ftrace_graph_return
= retfunc
;
5850 * Update the indirect function to the entryfunc, and the
5851 * function that gets called to the entry_test first. Then
5852 * call the update fgraph entry function to determine if
5853 * the entryfunc should be called directly or not.
5855 __ftrace_graph_entry
= entryfunc
;
5856 ftrace_graph_entry
= ftrace_graph_entry_test
;
5857 update_function_graph_func();
5859 ret
= ftrace_startup(&graph_ops
, FTRACE_START_FUNC_RET
);
5861 mutex_unlock(&ftrace_lock
);
5865 void unregister_ftrace_graph(void)
5867 mutex_lock(&ftrace_lock
);
5869 if (unlikely(!ftrace_graph_active
))
5872 ftrace_graph_active
--;
5873 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
5874 ftrace_graph_entry
= ftrace_graph_entry_stub
;
5875 __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5876 ftrace_shutdown(&graph_ops
, FTRACE_STOP_FUNC_RET
);
5877 unregister_pm_notifier(&ftrace_suspend_notifier
);
5878 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5880 #ifdef CONFIG_DYNAMIC_FTRACE
5882 * Function graph does not allocate the trampoline, but
5883 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5886 global_ops
.trampoline
= save_global_trampoline
;
5887 if (save_global_flags
& FTRACE_OPS_FL_ALLOC_TRAMP
)
5888 global_ops
.flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
5892 mutex_unlock(&ftrace_lock
);
5895 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
5898 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
5900 atomic_set(&t
->tracing_graph_pause
, 0);
5901 atomic_set(&t
->trace_overrun
, 0);
5902 t
->ftrace_timestamp
= 0;
5903 /* make curr_ret_stack visible before we add the ret_stack */
5905 t
->ret_stack
= ret_stack
;
5909 * Allocate a return stack for the idle task. May be the first
5910 * time through, or it may be done by CPU hotplug online.
5912 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
5914 t
->curr_ret_stack
= -1;
5916 * The idle task has no parent, it either has its own
5917 * stack or no stack at all.
5920 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
5922 if (ftrace_graph_active
) {
5923 struct ftrace_ret_stack
*ret_stack
;
5925 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
5927 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5928 * sizeof(struct ftrace_ret_stack
),
5932 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
5934 graph_init_task(t
, ret_stack
);
5938 /* Allocate a return stack for newly created task */
5939 void ftrace_graph_init_task(struct task_struct
*t
)
5941 /* Make sure we do not use the parent ret_stack */
5942 t
->ret_stack
= NULL
;
5943 t
->curr_ret_stack
= -1;
5945 if (ftrace_graph_active
) {
5946 struct ftrace_ret_stack
*ret_stack
;
5948 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5949 * sizeof(struct ftrace_ret_stack
),
5953 graph_init_task(t
, ret_stack
);
5957 void ftrace_graph_exit_task(struct task_struct
*t
)
5959 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
5961 t
->ret_stack
= NULL
;
5962 /* NULL must become visible to IRQs before we free it: */