2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
51 #define FTRACE_WARN_ON_ONCE(cond) \
54 if (WARN_ON_ONCE(___r)) \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
79 static struct ftrace_ops ftrace_list_end __read_mostly
= {
81 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
82 INIT_OPS_HASH(ftrace_list_end
)
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly
;
87 static int last_ftrace_enabled
;
89 /* Current function tracing op */
90 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops
*set_function_trace_op
;
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids
);
97 struct list_head list
;
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
105 static int ftrace_disabled __read_mostly
;
107 static DEFINE_MUTEX(ftrace_lock
);
109 static struct ftrace_ops
*ftrace_control_list __read_mostly
= &ftrace_list_end
;
110 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
111 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
112 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
113 static struct ftrace_ops global_ops
;
114 static struct ftrace_ops control_ops
;
116 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
117 struct ftrace_ops
*op
, struct pt_regs
*regs
);
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
121 struct ftrace_ops
*op
, struct pt_regs
*regs
);
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
142 * Optimized for just a single item in the list (as that is the normal case).
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
148 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
152 mutex_init(&ops
->local_hash
.regex_lock
);
153 ops
->func_hash
= &ops
->local_hash
;
154 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
160 * ftrace_nr_registered_ops - return number of ops registered
162 * Returns the number of ftrace_ops registered and tracing functions
164 int ftrace_nr_registered_ops(void)
166 struct ftrace_ops
*ops
;
169 mutex_lock(&ftrace_lock
);
171 for (ops
= ftrace_ops_list
;
172 ops
!= &ftrace_list_end
; ops
= ops
->next
)
175 mutex_unlock(&ftrace_lock
);
180 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
181 struct ftrace_ops
*op
, struct pt_regs
*regs
)
183 if (!test_tsk_trace_trace(current
))
186 ftrace_pid_function(ip
, parent_ip
, op
, regs
);
189 static void set_ftrace_pid_function(ftrace_func_t func
)
191 /* do not set ftrace_pid_function to itself! */
192 if (func
!= ftrace_pid_func
)
193 ftrace_pid_function
= func
;
197 * clear_ftrace_function - reset the ftrace function
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
202 void clear_ftrace_function(void)
204 ftrace_trace_function
= ftrace_stub
;
205 ftrace_pid_function
= ftrace_stub
;
208 static void control_ops_disable_all(struct ftrace_ops
*ops
)
212 for_each_possible_cpu(cpu
)
213 *per_cpu_ptr(ops
->disabled
, cpu
) = 1;
216 static int control_ops_alloc(struct ftrace_ops
*ops
)
218 int __percpu
*disabled
;
220 disabled
= alloc_percpu(int);
224 ops
->disabled
= disabled
;
225 control_ops_disable_all(ops
);
229 static void ftrace_sync(struct work_struct
*work
)
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
236 * Yes, function tracing is rude.
240 static void ftrace_sync_ipi(void *data
)
242 /* Probably not needed, but do it anyway */
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
249 static inline void update_function_graph_func(void) { }
252 static void update_ftrace_function(void)
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
261 set_function_trace_op
= ftrace_ops_list
;
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list
== &ftrace_list_end
) {
268 * If we are at the end of the list and this ops is
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
272 } else if (ftrace_ops_list
->next
== &ftrace_list_end
) {
273 func
= ftrace_ops_get_func(ftrace_ops_list
);
276 /* Just use the default ftrace_ops */
277 set_function_trace_op
= &ftrace_list_end
;
278 func
= ftrace_ops_list_func
;
281 update_function_graph_func();
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function
== func
)
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
291 if (func
== ftrace_ops_list_func
) {
292 ftrace_trace_function
= func
;
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
300 #ifndef CONFIG_DYNAMIC_FTRACE
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
311 ftrace_trace_function
= ftrace_ops_list_func
;
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
316 schedule_on_each_cpu(ftrace_sync
);
317 /* Now all cpus are using the list ops. */
318 function_trace_op
= set_function_trace_op
;
319 /* Make sure the function_trace_op is visible on all CPUs */
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
326 ftrace_trace_function
= func
;
329 int using_ftrace_ops_list_func(void)
331 return ftrace_trace_function
== ftrace_ops_list_func
;
334 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
338 * We are entering ops into the list but another
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
341 * the ops pointer included into the list.
343 rcu_assign_pointer(*list
, ops
);
346 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
348 struct ftrace_ops
**p
;
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
354 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
355 *list
= &ftrace_list_end
;
359 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
370 static void add_ftrace_list_ops(struct ftrace_ops
**list
,
371 struct ftrace_ops
*main_ops
,
372 struct ftrace_ops
*ops
)
374 int first
= *list
== &ftrace_list_end
;
375 add_ftrace_ops(list
, ops
);
377 add_ftrace_ops(&ftrace_ops_list
, main_ops
);
380 static int remove_ftrace_list_ops(struct ftrace_ops
**list
,
381 struct ftrace_ops
*main_ops
,
382 struct ftrace_ops
*ops
)
384 int ret
= remove_ftrace_ops(list
, ops
);
385 if (!ret
&& *list
== &ftrace_list_end
)
386 ret
= remove_ftrace_ops(&ftrace_ops_list
, main_ops
);
390 static int __register_ftrace_function(struct ftrace_ops
*ops
)
392 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
395 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
398 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
400 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
401 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
402 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
404 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
405 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
408 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
409 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
412 if (!core_kernel_data((unsigned long)ops
))
413 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
415 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
416 if (control_ops_alloc(ops
))
418 add_ftrace_list_ops(&ftrace_control_list
, &control_ops
, ops
);
420 add_ftrace_ops(&ftrace_ops_list
, ops
);
423 update_ftrace_function();
428 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
432 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
435 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
436 ret
= remove_ftrace_list_ops(&ftrace_control_list
,
439 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
445 update_ftrace_function();
450 static void ftrace_update_pid_func(void)
452 /* Only do something if we are tracing something */
453 if (ftrace_trace_function
== ftrace_stub
)
456 update_ftrace_function();
459 #ifdef CONFIG_FUNCTION_PROFILER
460 struct ftrace_profile
{
461 struct hlist_node node
;
463 unsigned long counter
;
464 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
465 unsigned long long time
;
466 unsigned long long time_squared
;
470 struct ftrace_profile_page
{
471 struct ftrace_profile_page
*next
;
473 struct ftrace_profile records
[];
476 struct ftrace_profile_stat
{
478 struct hlist_head
*hash
;
479 struct ftrace_profile_page
*pages
;
480 struct ftrace_profile_page
*start
;
481 struct tracer_stat stat
;
484 #define PROFILE_RECORDS_SIZE \
485 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
487 #define PROFILES_PER_PAGE \
488 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
490 static int ftrace_profile_enabled __read_mostly
;
492 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
493 static DEFINE_MUTEX(ftrace_profile_lock
);
495 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
497 #define FTRACE_PROFILE_HASH_BITS 10
498 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
501 function_stat_next(void *v
, int idx
)
503 struct ftrace_profile
*rec
= v
;
504 struct ftrace_profile_page
*pg
;
506 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
512 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
516 rec
= &pg
->records
[0];
524 static void *function_stat_start(struct tracer_stat
*trace
)
526 struct ftrace_profile_stat
*stat
=
527 container_of(trace
, struct ftrace_profile_stat
, stat
);
529 if (!stat
|| !stat
->start
)
532 return function_stat_next(&stat
->start
->records
[0], 0);
535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
536 /* function graph compares on total time */
537 static int function_stat_cmp(void *p1
, void *p2
)
539 struct ftrace_profile
*a
= p1
;
540 struct ftrace_profile
*b
= p2
;
542 if (a
->time
< b
->time
)
544 if (a
->time
> b
->time
)
550 /* not function graph compares against hits */
551 static int function_stat_cmp(void *p1
, void *p2
)
553 struct ftrace_profile
*a
= p1
;
554 struct ftrace_profile
*b
= p2
;
556 if (a
->counter
< b
->counter
)
558 if (a
->counter
> b
->counter
)
565 static int function_stat_headers(struct seq_file
*m
)
567 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
568 seq_printf(m
, " Function "
571 "--- ---- --- ---\n");
573 seq_printf(m
, " Function Hit\n"
579 static int function_stat_show(struct seq_file
*m
, void *v
)
581 struct ftrace_profile
*rec
= v
;
582 char str
[KSYM_SYMBOL_LEN
];
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
585 static struct trace_seq s
;
586 unsigned long long avg
;
587 unsigned long long stddev
;
589 mutex_lock(&ftrace_profile_lock
);
591 /* we raced with function_profile_reset() */
592 if (unlikely(rec
->counter
== 0)) {
597 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
598 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603 do_div(avg
, rec
->counter
);
605 /* Sample standard deviation (s^2) */
606 if (rec
->counter
<= 1)
610 * Apply Welford's method:
611 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
613 stddev
= rec
->counter
* rec
->time_squared
-
614 rec
->time
* rec
->time
;
617 * Divide only 1000 for ns^2 -> us^2 conversion.
618 * trace_print_graph_duration will divide 1000 again.
620 do_div(stddev
, rec
->counter
* (rec
->counter
- 1) * 1000);
624 trace_print_graph_duration(rec
->time
, &s
);
625 trace_seq_puts(&s
, " ");
626 trace_print_graph_duration(avg
, &s
);
627 trace_seq_puts(&s
, " ");
628 trace_print_graph_duration(stddev
, &s
);
629 trace_print_seq(m
, &s
);
633 mutex_unlock(&ftrace_profile_lock
);
638 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
640 struct ftrace_profile_page
*pg
;
642 pg
= stat
->pages
= stat
->start
;
645 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
650 memset(stat
->hash
, 0,
651 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
654 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
656 struct ftrace_profile_page
*pg
;
661 /* If we already allocated, do nothing */
665 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
669 #ifdef CONFIG_DYNAMIC_FTRACE
670 functions
= ftrace_update_tot_cnt
;
673 * We do not know the number of functions that exist because
674 * dynamic tracing is what counts them. With past experience
675 * we have around 20K functions. That should be more than enough.
676 * It is highly unlikely we will execute every function in
682 pg
= stat
->start
= stat
->pages
;
684 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
686 for (i
= 1; i
< pages
; i
++) {
687 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
698 unsigned long tmp
= (unsigned long)pg
;
710 static int ftrace_profile_init_cpu(int cpu
)
712 struct ftrace_profile_stat
*stat
;
715 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
718 /* If the profile is already created, simply reset it */
719 ftrace_profile_reset(stat
);
724 * We are profiling all functions, but usually only a few thousand
725 * functions are hit. We'll make a hash of 1024 items.
727 size
= FTRACE_PROFILE_HASH_SIZE
;
729 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
734 /* Preallocate the function profiling pages */
735 if (ftrace_profile_pages_init(stat
) < 0) {
744 static int ftrace_profile_init(void)
749 for_each_possible_cpu(cpu
) {
750 ret
= ftrace_profile_init_cpu(cpu
);
758 /* interrupts must be disabled */
759 static struct ftrace_profile
*
760 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
762 struct ftrace_profile
*rec
;
763 struct hlist_head
*hhd
;
766 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
767 hhd
= &stat
->hash
[key
];
769 if (hlist_empty(hhd
))
772 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
780 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
781 struct ftrace_profile
*rec
)
785 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
786 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
790 * The memory is already allocated, this simply finds a new record to use.
792 static struct ftrace_profile
*
793 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
795 struct ftrace_profile
*rec
= NULL
;
797 /* prevent recursion (from NMIs) */
798 if (atomic_inc_return(&stat
->disabled
) != 1)
802 * Try to find the function again since an NMI
803 * could have added it
805 rec
= ftrace_find_profiled_func(stat
, ip
);
809 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
810 if (!stat
->pages
->next
)
812 stat
->pages
= stat
->pages
->next
;
815 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
817 ftrace_add_profile(stat
, rec
);
820 atomic_dec(&stat
->disabled
);
826 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
827 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
829 struct ftrace_profile_stat
*stat
;
830 struct ftrace_profile
*rec
;
833 if (!ftrace_profile_enabled
)
836 local_irq_save(flags
);
838 stat
= this_cpu_ptr(&ftrace_profile_stats
);
839 if (!stat
->hash
|| !ftrace_profile_enabled
)
842 rec
= ftrace_find_profiled_func(stat
, ip
);
844 rec
= ftrace_profile_alloc(stat
, ip
);
851 local_irq_restore(flags
);
854 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
855 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
857 function_profile_call(trace
->func
, 0, NULL
, NULL
);
861 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
863 struct ftrace_profile_stat
*stat
;
864 unsigned long long calltime
;
865 struct ftrace_profile
*rec
;
868 local_irq_save(flags
);
869 stat
= this_cpu_ptr(&ftrace_profile_stats
);
870 if (!stat
->hash
|| !ftrace_profile_enabled
)
873 /* If the calltime was zero'd ignore it */
874 if (!trace
->calltime
)
877 calltime
= trace
->rettime
- trace
->calltime
;
879 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
882 index
= trace
->depth
;
884 /* Append this call time to the parent time to subtract */
886 current
->ret_stack
[index
- 1].subtime
+= calltime
;
888 if (current
->ret_stack
[index
].subtime
< calltime
)
889 calltime
-= current
->ret_stack
[index
].subtime
;
894 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
896 rec
->time
+= calltime
;
897 rec
->time_squared
+= calltime
* calltime
;
901 local_irq_restore(flags
);
904 static int register_ftrace_profiler(void)
906 return register_ftrace_graph(&profile_graph_return
,
907 &profile_graph_entry
);
910 static void unregister_ftrace_profiler(void)
912 unregister_ftrace_graph();
915 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
916 .func
= function_profile_call
,
917 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
918 INIT_OPS_HASH(ftrace_profile_ops
)
921 static int register_ftrace_profiler(void)
923 return register_ftrace_function(&ftrace_profile_ops
);
926 static void unregister_ftrace_profiler(void)
928 unregister_ftrace_function(&ftrace_profile_ops
);
930 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
933 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
934 size_t cnt
, loff_t
*ppos
)
939 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
945 mutex_lock(&ftrace_profile_lock
);
946 if (ftrace_profile_enabled
^ val
) {
948 ret
= ftrace_profile_init();
954 ret
= register_ftrace_profiler();
959 ftrace_profile_enabled
= 1;
961 ftrace_profile_enabled
= 0;
963 * unregister_ftrace_profiler calls stop_machine
964 * so this acts like an synchronize_sched.
966 unregister_ftrace_profiler();
970 mutex_unlock(&ftrace_profile_lock
);
978 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
979 size_t cnt
, loff_t
*ppos
)
981 char buf
[64]; /* big enough to hold a number */
984 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
985 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
988 static const struct file_operations ftrace_profile_fops
= {
989 .open
= tracing_open_generic
,
990 .read
= ftrace_profile_read
,
991 .write
= ftrace_profile_write
,
992 .llseek
= default_llseek
,
995 /* used to initialize the real stat files */
996 static struct tracer_stat function_stats __initdata
= {
998 .stat_start
= function_stat_start
,
999 .stat_next
= function_stat_next
,
1000 .stat_cmp
= function_stat_cmp
,
1001 .stat_headers
= function_stat_headers
,
1002 .stat_show
= function_stat_show
1005 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
1007 struct ftrace_profile_stat
*stat
;
1008 struct dentry
*entry
;
1013 for_each_possible_cpu(cpu
) {
1014 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
1016 /* allocate enough for function name + cpu number */
1017 name
= kmalloc(32, GFP_KERNEL
);
1020 * The files created are permanent, if something happens
1021 * we still do not free memory.
1024 "Could not allocate stat file for cpu %d\n",
1028 stat
->stat
= function_stats
;
1029 snprintf(name
, 32, "function%d", cpu
);
1030 stat
->stat
.name
= name
;
1031 ret
= register_stat_tracer(&stat
->stat
);
1034 "Could not register function stat for cpu %d\n",
1041 entry
= debugfs_create_file("function_profile_enabled", 0644,
1042 d_tracer
, NULL
, &ftrace_profile_fops
);
1044 pr_warning("Could not create debugfs "
1045 "'function_profile_enabled' entry\n");
1048 #else /* CONFIG_FUNCTION_PROFILER */
1049 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
1052 #endif /* CONFIG_FUNCTION_PROFILER */
1054 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
1056 #ifdef CONFIG_DYNAMIC_FTRACE
1058 static struct ftrace_ops
*removed_ops
;
1061 * Set when doing a global update, like enabling all recs or disabling them.
1062 * It is not set when just updating a single ftrace_ops.
1064 static bool update_all_ops
;
1066 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1067 # error Dynamic ftrace depends on MCOUNT_RECORD
1070 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
1072 struct ftrace_func_probe
{
1073 struct hlist_node node
;
1074 struct ftrace_probe_ops
*ops
;
1075 unsigned long flags
;
1078 struct list_head free_list
;
1081 struct ftrace_func_entry
{
1082 struct hlist_node hlist
;
1086 struct ftrace_hash
{
1087 unsigned long size_bits
;
1088 struct hlist_head
*buckets
;
1089 unsigned long count
;
1090 struct rcu_head rcu
;
1094 * We make these constant because no one should touch them,
1095 * but they are used as the default "empty hash", to avoid allocating
1096 * it all the time. These are in a read only section such that if
1097 * anyone does try to modify it, it will cause an exception.
1099 static const struct hlist_head empty_buckets
[1];
1100 static const struct ftrace_hash empty_hash
= {
1101 .buckets
= (struct hlist_head
*)empty_buckets
,
1103 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1105 static struct ftrace_ops global_ops
= {
1106 .func
= ftrace_stub
,
1107 .local_hash
.notrace_hash
= EMPTY_HASH
,
1108 .local_hash
.filter_hash
= EMPTY_HASH
,
1109 INIT_OPS_HASH(global_ops
)
1110 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1111 FTRACE_OPS_FL_INITIALIZED
,
1114 struct ftrace_page
{
1115 struct ftrace_page
*next
;
1116 struct dyn_ftrace
*records
;
1121 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1122 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1124 /* estimate from running different kernels */
1125 #define NR_TO_INIT 10000
1127 static struct ftrace_page
*ftrace_pages_start
;
1128 static struct ftrace_page
*ftrace_pages
;
1130 static bool __always_inline
ftrace_hash_empty(struct ftrace_hash
*hash
)
1132 return !hash
|| !hash
->count
;
1135 static struct ftrace_func_entry
*
1136 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1139 struct ftrace_func_entry
*entry
;
1140 struct hlist_head
*hhd
;
1142 if (ftrace_hash_empty(hash
))
1145 if (hash
->size_bits
> 0)
1146 key
= hash_long(ip
, hash
->size_bits
);
1150 hhd
= &hash
->buckets
[key
];
1152 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1153 if (entry
->ip
== ip
)
1159 static void __add_hash_entry(struct ftrace_hash
*hash
,
1160 struct ftrace_func_entry
*entry
)
1162 struct hlist_head
*hhd
;
1165 if (hash
->size_bits
)
1166 key
= hash_long(entry
->ip
, hash
->size_bits
);
1170 hhd
= &hash
->buckets
[key
];
1171 hlist_add_head(&entry
->hlist
, hhd
);
1175 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1177 struct ftrace_func_entry
*entry
;
1179 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1184 __add_hash_entry(hash
, entry
);
1190 free_hash_entry(struct ftrace_hash
*hash
,
1191 struct ftrace_func_entry
*entry
)
1193 hlist_del(&entry
->hlist
);
1199 remove_hash_entry(struct ftrace_hash
*hash
,
1200 struct ftrace_func_entry
*entry
)
1202 hlist_del(&entry
->hlist
);
1206 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1208 struct hlist_head
*hhd
;
1209 struct hlist_node
*tn
;
1210 struct ftrace_func_entry
*entry
;
1211 int size
= 1 << hash
->size_bits
;
1217 for (i
= 0; i
< size
; i
++) {
1218 hhd
= &hash
->buckets
[i
];
1219 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1220 free_hash_entry(hash
, entry
);
1222 FTRACE_WARN_ON(hash
->count
);
1225 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1227 if (!hash
|| hash
== EMPTY_HASH
)
1229 ftrace_hash_clear(hash
);
1230 kfree(hash
->buckets
);
1234 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1236 struct ftrace_hash
*hash
;
1238 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1239 free_ftrace_hash(hash
);
1242 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1244 if (!hash
|| hash
== EMPTY_HASH
)
1246 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1249 void ftrace_free_filter(struct ftrace_ops
*ops
)
1251 ftrace_ops_init(ops
);
1252 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1253 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1256 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1258 struct ftrace_hash
*hash
;
1261 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1265 size
= 1 << size_bits
;
1266 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1268 if (!hash
->buckets
) {
1273 hash
->size_bits
= size_bits
;
1278 static struct ftrace_hash
*
1279 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1281 struct ftrace_func_entry
*entry
;
1282 struct ftrace_hash
*new_hash
;
1287 new_hash
= alloc_ftrace_hash(size_bits
);
1292 if (ftrace_hash_empty(hash
))
1295 size
= 1 << hash
->size_bits
;
1296 for (i
= 0; i
< size
; i
++) {
1297 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1298 ret
= add_hash_entry(new_hash
, entry
->ip
);
1304 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1309 free_ftrace_hash(new_hash
);
1314 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1316 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1319 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1320 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1322 struct ftrace_func_entry
*entry
;
1323 struct hlist_node
*tn
;
1324 struct hlist_head
*hhd
;
1325 struct ftrace_hash
*new_hash
;
1326 int size
= src
->count
;
1331 * If the new source is empty, just free dst and assign it
1335 new_hash
= EMPTY_HASH
;
1340 * Make the hash size about 1/2 the # found
1342 for (size
/= 2; size
; size
>>= 1)
1345 /* Don't allocate too much */
1346 if (bits
> FTRACE_HASH_MAX_BITS
)
1347 bits
= FTRACE_HASH_MAX_BITS
;
1349 new_hash
= alloc_ftrace_hash(bits
);
1353 size
= 1 << src
->size_bits
;
1354 for (i
= 0; i
< size
; i
++) {
1355 hhd
= &src
->buckets
[i
];
1356 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1357 remove_hash_entry(src
, entry
);
1358 __add_hash_entry(new_hash
, entry
);
1364 * Remove the current set, update the hash and add
1367 ftrace_hash_rec_disable_modify(ops
, enable
);
1369 rcu_assign_pointer(*dst
, new_hash
);
1371 ftrace_hash_rec_enable_modify(ops
, enable
);
1376 static bool hash_contains_ip(unsigned long ip
,
1377 struct ftrace_ops_hash
*hash
)
1380 * The function record is a match if it exists in the filter
1381 * hash and not in the notrace hash. Note, an emty hash is
1382 * considered a match for the filter hash, but an empty
1383 * notrace hash is considered not in the notrace hash.
1385 return (ftrace_hash_empty(hash
->filter_hash
) ||
1386 ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1387 (ftrace_hash_empty(hash
->notrace_hash
) ||
1388 !ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1392 * Test the hashes for this ops to see if we want to call
1393 * the ops->func or not.
1395 * It's a match if the ip is in the ops->filter_hash or
1396 * the filter_hash does not exist or is empty,
1398 * the ip is not in the ops->notrace_hash.
1400 * This needs to be called with preemption disabled as
1401 * the hashes are freed with call_rcu_sched().
1404 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1406 struct ftrace_ops_hash hash
;
1409 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1411 * There's a small race when adding ops that the ftrace handler
1412 * that wants regs, may be called without them. We can not
1413 * allow that handler to be called if regs is NULL.
1415 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1419 hash
.filter_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->filter_hash
);
1420 hash
.notrace_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->notrace_hash
);
1422 if (hash_contains_ip(ip
, &hash
))
1431 * This is a double for. Do not use 'break' to break out of the loop,
1432 * you must use a goto.
1434 #define do_for_each_ftrace_rec(pg, rec) \
1435 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1437 for (_____i = 0; _____i < pg->index; _____i++) { \
1438 rec = &pg->records[_____i];
1440 #define while_for_each_ftrace_rec() \
1445 static int ftrace_cmp_recs(const void *a
, const void *b
)
1447 const struct dyn_ftrace
*key
= a
;
1448 const struct dyn_ftrace
*rec
= b
;
1450 if (key
->flags
< rec
->ip
)
1452 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1457 static unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1459 struct ftrace_page
*pg
;
1460 struct dyn_ftrace
*rec
;
1461 struct dyn_ftrace key
;
1464 key
.flags
= end
; /* overload flags, as it is unsigned long */
1466 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1467 if (end
< pg
->records
[0].ip
||
1468 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1470 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1471 sizeof(struct dyn_ftrace
),
1481 * ftrace_location - return true if the ip giving is a traced location
1482 * @ip: the instruction pointer to check
1484 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1485 * That is, the instruction that is either a NOP or call to
1486 * the function tracer. It checks the ftrace internal tables to
1487 * determine if the address belongs or not.
1489 unsigned long ftrace_location(unsigned long ip
)
1491 return ftrace_location_range(ip
, ip
);
1495 * ftrace_text_reserved - return true if range contains an ftrace location
1496 * @start: start of range to search
1497 * @end: end of range to search (inclusive). @end points to the last byte to check.
1499 * Returns 1 if @start and @end contains a ftrace location.
1500 * That is, the instruction that is either a NOP or call to
1501 * the function tracer. It checks the ftrace internal tables to
1502 * determine if the address belongs or not.
1504 int ftrace_text_reserved(const void *start
, const void *end
)
1508 ret
= ftrace_location_range((unsigned long)start
,
1509 (unsigned long)end
);
1514 /* Test if ops registered to this rec needs regs */
1515 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1517 struct ftrace_ops
*ops
;
1518 bool keep_regs
= false;
1520 for (ops
= ftrace_ops_list
;
1521 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1522 /* pass rec in as regs to have non-NULL val */
1523 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1524 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1534 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1538 struct ftrace_hash
*hash
;
1539 struct ftrace_hash
*other_hash
;
1540 struct ftrace_page
*pg
;
1541 struct dyn_ftrace
*rec
;
1545 /* Only update if the ops has been registered */
1546 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1550 * In the filter_hash case:
1551 * If the count is zero, we update all records.
1552 * Otherwise we just update the items in the hash.
1554 * In the notrace_hash case:
1555 * We enable the update in the hash.
1556 * As disabling notrace means enabling the tracing,
1557 * and enabling notrace means disabling, the inc variable
1561 hash
= ops
->func_hash
->filter_hash
;
1562 other_hash
= ops
->func_hash
->notrace_hash
;
1563 if (ftrace_hash_empty(hash
))
1567 hash
= ops
->func_hash
->notrace_hash
;
1568 other_hash
= ops
->func_hash
->filter_hash
;
1570 * If the notrace hash has no items,
1571 * then there's nothing to do.
1573 if (ftrace_hash_empty(hash
))
1577 do_for_each_ftrace_rec(pg
, rec
) {
1578 int in_other_hash
= 0;
1584 * Only the filter_hash affects all records.
1585 * Update if the record is not in the notrace hash.
1587 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1590 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1591 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1594 * If filter_hash is set, we want to match all functions
1595 * that are in the hash but not in the other hash.
1597 * If filter_hash is not set, then we are decrementing.
1598 * That means we match anything that is in the hash
1599 * and also in the other_hash. That is, we need to turn
1600 * off functions in the other hash because they are disabled
1603 if (filter_hash
&& in_hash
&& !in_other_hash
)
1605 else if (!filter_hash
&& in_hash
&&
1606 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1614 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1618 * If there's only a single callback registered to a
1619 * function, and the ops has a trampoline registered
1620 * for it, then we can call it directly.
1622 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1623 rec
->flags
|= FTRACE_FL_TRAMP
;
1626 * If we are adding another function callback
1627 * to this function, and the previous had a
1628 * custom trampoline in use, then we need to go
1629 * back to the default trampoline.
1631 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1634 * If any ops wants regs saved for this function
1635 * then all ops will get saved regs.
1637 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1638 rec
->flags
|= FTRACE_FL_REGS
;
1640 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1645 * If the rec had REGS enabled and the ops that is
1646 * being removed had REGS set, then see if there is
1647 * still any ops for this record that wants regs.
1648 * If not, we can stop recording them.
1650 if (ftrace_rec_count(rec
) > 0 &&
1651 rec
->flags
& FTRACE_FL_REGS
&&
1652 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1653 if (!test_rec_ops_needs_regs(rec
))
1654 rec
->flags
&= ~FTRACE_FL_REGS
;
1658 * If the rec had TRAMP enabled, then it needs to
1659 * be cleared. As TRAMP can only be enabled iff
1660 * there is only a single ops attached to it.
1661 * In otherwords, always disable it on decrementing.
1662 * In the future, we may set it if rec count is
1663 * decremented to one, and the ops that is left
1666 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1669 * flags will be cleared in ftrace_check_record()
1670 * if rec count is zero.
1674 /* Shortcut, if we handled all records, we are done. */
1675 if (!all
&& count
== hash
->count
)
1677 } while_for_each_ftrace_rec();
1680 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1683 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1686 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1689 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1692 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1693 int filter_hash
, int inc
)
1695 struct ftrace_ops
*op
;
1697 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1699 if (ops
->func_hash
!= &global_ops
.local_hash
)
1703 * If the ops shares the global_ops hash, then we need to update
1704 * all ops that are enabled and use this hash.
1706 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1710 if (op
->func_hash
== &global_ops
.local_hash
)
1711 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1712 } while_for_each_ftrace_op(op
);
1715 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1718 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1721 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1724 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1727 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1731 printk(KERN_CONT
"%s", fmt
);
1733 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1734 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1738 * ftrace_bug - report and shutdown function tracer
1739 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1740 * @ip: The address that failed
1742 * The arch code that enables or disables the function tracing
1743 * can call ftrace_bug() when it has detected a problem in
1744 * modifying the code. @failed should be one of either:
1745 * EFAULT - if the problem happens on reading the @ip address
1746 * EINVAL - if what is read at @ip is not what was expected
1747 * EPERM - if the problem happens on writting to the @ip address
1749 void ftrace_bug(int failed
, unsigned long ip
)
1753 FTRACE_WARN_ON_ONCE(1);
1754 pr_info("ftrace faulted on modifying ");
1758 FTRACE_WARN_ON_ONCE(1);
1759 pr_info("ftrace failed to modify ");
1761 print_ip_ins(" actual: ", (unsigned char *)ip
);
1762 printk(KERN_CONT
"\n");
1765 FTRACE_WARN_ON_ONCE(1);
1766 pr_info("ftrace faulted on writing ");
1770 FTRACE_WARN_ON_ONCE(1);
1771 pr_info("ftrace faulted on unknown error ");
1776 static int ftrace_check_record(struct dyn_ftrace
*rec
, int enable
, int update
)
1778 unsigned long flag
= 0UL;
1781 * If we are updating calls:
1783 * If the record has a ref count, then we need to enable it
1784 * because someone is using it.
1786 * Otherwise we make sure its disabled.
1788 * If we are disabling calls, then disable all records that
1791 if (enable
&& ftrace_rec_count(rec
))
1792 flag
= FTRACE_FL_ENABLED
;
1795 * If enabling and the REGS flag does not match the REGS_EN, or
1796 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1797 * this record. Set flags to fail the compare against ENABLED.
1800 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
1801 !(rec
->flags
& FTRACE_FL_REGS_EN
))
1802 flag
|= FTRACE_FL_REGS
;
1804 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
1805 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
1806 flag
|= FTRACE_FL_TRAMP
;
1809 /* If the state of this record hasn't changed, then do nothing */
1810 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
1811 return FTRACE_UPDATE_IGNORE
;
1814 /* Save off if rec is being enabled (for return value) */
1815 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
1818 rec
->flags
|= FTRACE_FL_ENABLED
;
1819 if (flag
& FTRACE_FL_REGS
) {
1820 if (rec
->flags
& FTRACE_FL_REGS
)
1821 rec
->flags
|= FTRACE_FL_REGS_EN
;
1823 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
1825 if (flag
& FTRACE_FL_TRAMP
) {
1826 if (rec
->flags
& FTRACE_FL_TRAMP
)
1827 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
1829 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
1834 * If this record is being updated from a nop, then
1835 * return UPDATE_MAKE_CALL.
1837 * return UPDATE_MODIFY_CALL to tell the caller to convert
1838 * from the save regs, to a non-save regs function or
1839 * vice versa, or from a trampoline call.
1841 if (flag
& FTRACE_FL_ENABLED
)
1842 return FTRACE_UPDATE_MAKE_CALL
;
1844 return FTRACE_UPDATE_MODIFY_CALL
;
1848 /* If there's no more users, clear all flags */
1849 if (!ftrace_rec_count(rec
))
1852 /* Just disable the record (keep REGS state) */
1853 rec
->flags
&= ~FTRACE_FL_ENABLED
;
1856 return FTRACE_UPDATE_MAKE_NOP
;
1860 * ftrace_update_record, set a record that now is tracing or not
1861 * @rec: the record to update
1862 * @enable: set to 1 if the record is tracing, zero to force disable
1864 * The records that represent all functions that can be traced need
1865 * to be updated when tracing has been enabled.
1867 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
)
1869 return ftrace_check_record(rec
, enable
, 1);
1873 * ftrace_test_record, check if the record has been enabled or not
1874 * @rec: the record to test
1875 * @enable: set to 1 to check if enabled, 0 if it is disabled
1877 * The arch code may need to test if a record is already set to
1878 * tracing to determine how to modify the function code that it
1881 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
)
1883 return ftrace_check_record(rec
, enable
, 0);
1886 static struct ftrace_ops
*
1887 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
1889 struct ftrace_ops
*op
;
1890 unsigned long ip
= rec
->ip
;
1892 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1894 if (!op
->trampoline
)
1897 if (hash_contains_ip(ip
, op
->func_hash
))
1899 } while_for_each_ftrace_op(op
);
1904 static struct ftrace_ops
*
1905 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
1907 struct ftrace_ops
*op
;
1908 unsigned long ip
= rec
->ip
;
1911 * Need to check removed ops first.
1912 * If they are being removed, and this rec has a tramp,
1913 * and this rec is in the ops list, then it would be the
1914 * one with the tramp.
1917 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
1922 * Need to find the current trampoline for a rec.
1923 * Now, a trampoline is only attached to a rec if there
1924 * was a single 'ops' attached to it. But this can be called
1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec
1930 * If an ops is being modified (hooking to different functions)
1931 * then we don't care about the new functions that are being
1932 * added, just the old ones (that are probably being removed).
1934 * If we are adding an ops to a function that already is using
1935 * a trampoline, it needs to be removed (trampolines are only
1936 * for single ops connected), then an ops that is not being
1937 * modified also needs to be checked.
1939 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1941 if (!op
->trampoline
)
1945 * If the ops is being added, it hasn't gotten to
1946 * the point to be removed from this tree yet.
1948 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
1953 * If the ops is being modified and is in the old
1954 * hash, then it is probably being removed from this
1957 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
1958 hash_contains_ip(ip
, &op
->old_hash
))
1961 * If the ops is not being added or modified, and it's
1962 * in its normal filter hash, then this must be the one
1965 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
1966 hash_contains_ip(ip
, op
->func_hash
))
1969 } while_for_each_ftrace_op(op
);
1974 static struct ftrace_ops
*
1975 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
1977 struct ftrace_ops
*op
;
1978 unsigned long ip
= rec
->ip
;
1980 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1981 /* pass rec in as regs to have non-NULL val */
1982 if (hash_contains_ip(ip
, op
->func_hash
))
1984 } while_for_each_ftrace_op(op
);
1990 * ftrace_get_addr_new - Get the call address to set to
1991 * @rec: The ftrace record descriptor
1993 * If the record has the FTRACE_FL_REGS set, that means that it
1994 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1995 * is not not set, then it wants to convert to the normal callback.
1997 * Returns the address of the trampoline to set to
1999 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2001 struct ftrace_ops
*ops
;
2003 /* Trampolines take precedence over regs */
2004 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2005 ops
= ftrace_find_tramp_ops_new(rec
);
2006 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2007 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2008 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2009 /* Ftrace is shutting down, return anything */
2010 return (unsigned long)FTRACE_ADDR
;
2012 return ops
->trampoline
;
2015 if (rec
->flags
& FTRACE_FL_REGS
)
2016 return (unsigned long)FTRACE_REGS_ADDR
;
2018 return (unsigned long)FTRACE_ADDR
;
2022 * ftrace_get_addr_curr - Get the call address that is already there
2023 * @rec: The ftrace record descriptor
2025 * The FTRACE_FL_REGS_EN is set when the record already points to
2026 * a function that saves all the regs. Basically the '_EN' version
2027 * represents the current state of the function.
2029 * Returns the address of the trampoline that is currently being called
2031 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2033 struct ftrace_ops
*ops
;
2035 /* Trampolines take precedence over regs */
2036 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2037 ops
= ftrace_find_tramp_ops_curr(rec
);
2038 if (FTRACE_WARN_ON(!ops
)) {
2039 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2040 (void *)rec
->ip
, (void *)rec
->ip
);
2041 /* Ftrace is shutting down, return anything */
2042 return (unsigned long)FTRACE_ADDR
;
2044 return ops
->trampoline
;
2047 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2048 return (unsigned long)FTRACE_REGS_ADDR
;
2050 return (unsigned long)FTRACE_ADDR
;
2054 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
2056 unsigned long ftrace_old_addr
;
2057 unsigned long ftrace_addr
;
2060 ftrace_addr
= ftrace_get_addr_new(rec
);
2062 /* This needs to be done before we call ftrace_update_record */
2063 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2065 ret
= ftrace_update_record(rec
, enable
);
2068 case FTRACE_UPDATE_IGNORE
:
2071 case FTRACE_UPDATE_MAKE_CALL
:
2072 return ftrace_make_call(rec
, ftrace_addr
);
2074 case FTRACE_UPDATE_MAKE_NOP
:
2075 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2077 case FTRACE_UPDATE_MODIFY_CALL
:
2078 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2081 return -1; /* unknow ftrace bug */
2084 void __weak
ftrace_replace_code(int enable
)
2086 struct dyn_ftrace
*rec
;
2087 struct ftrace_page
*pg
;
2090 if (unlikely(ftrace_disabled
))
2093 do_for_each_ftrace_rec(pg
, rec
) {
2094 failed
= __ftrace_replace_code(rec
, enable
);
2096 ftrace_bug(failed
, rec
->ip
);
2097 /* Stop processing */
2100 } while_for_each_ftrace_rec();
2103 struct ftrace_rec_iter
{
2104 struct ftrace_page
*pg
;
2109 * ftrace_rec_iter_start, start up iterating over traced functions
2111 * Returns an iterator handle that is used to iterate over all
2112 * the records that represent address locations where functions
2115 * May return NULL if no records are available.
2117 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2120 * We only use a single iterator.
2121 * Protected by the ftrace_lock mutex.
2123 static struct ftrace_rec_iter ftrace_rec_iter
;
2124 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2126 iter
->pg
= ftrace_pages_start
;
2129 /* Could have empty pages */
2130 while (iter
->pg
&& !iter
->pg
->index
)
2131 iter
->pg
= iter
->pg
->next
;
2140 * ftrace_rec_iter_next, get the next record to process.
2141 * @iter: The handle to the iterator.
2143 * Returns the next iterator after the given iterator @iter.
2145 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2149 if (iter
->index
>= iter
->pg
->index
) {
2150 iter
->pg
= iter
->pg
->next
;
2153 /* Could have empty pages */
2154 while (iter
->pg
&& !iter
->pg
->index
)
2155 iter
->pg
= iter
->pg
->next
;
2165 * ftrace_rec_iter_record, get the record at the iterator location
2166 * @iter: The current iterator location
2168 * Returns the record that the current @iter is at.
2170 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2172 return &iter
->pg
->records
[iter
->index
];
2176 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
2183 if (unlikely(ftrace_disabled
))
2186 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
2188 ftrace_bug(ret
, ip
);
2195 * archs can override this function if they must do something
2196 * before the modifying code is performed.
2198 int __weak
ftrace_arch_code_modify_prepare(void)
2204 * archs can override this function if they must do something
2205 * after the modifying code is performed.
2207 int __weak
ftrace_arch_code_modify_post_process(void)
2212 void ftrace_modify_all_code(int command
)
2214 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2218 * If the ftrace_caller calls a ftrace_ops func directly,
2219 * we need to make sure that it only traces functions it
2220 * expects to trace. When doing the switch of functions,
2221 * we need to update to the ftrace_ops_list_func first
2222 * before the transition between old and new calls are set,
2223 * as the ftrace_ops_list_func will check the ops hashes
2224 * to make sure the ops are having the right functions
2228 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2229 if (FTRACE_WARN_ON(err
))
2233 if (command
& FTRACE_UPDATE_CALLS
)
2234 ftrace_replace_code(1);
2235 else if (command
& FTRACE_DISABLE_CALLS
)
2236 ftrace_replace_code(0);
2238 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2239 function_trace_op
= set_function_trace_op
;
2241 /* If irqs are disabled, we are in stop machine */
2242 if (!irqs_disabled())
2243 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2244 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2245 if (FTRACE_WARN_ON(err
))
2249 if (command
& FTRACE_START_FUNC_RET
)
2250 err
= ftrace_enable_ftrace_graph_caller();
2251 else if (command
& FTRACE_STOP_FUNC_RET
)
2252 err
= ftrace_disable_ftrace_graph_caller();
2253 FTRACE_WARN_ON(err
);
2256 static int __ftrace_modify_code(void *data
)
2258 int *command
= data
;
2260 ftrace_modify_all_code(*command
);
2266 * ftrace_run_stop_machine, go back to the stop machine method
2267 * @command: The command to tell ftrace what to do
2269 * If an arch needs to fall back to the stop machine method, the
2270 * it can call this function.
2272 void ftrace_run_stop_machine(int command
)
2274 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2278 * arch_ftrace_update_code, modify the code to trace or not trace
2279 * @command: The command that needs to be done
2281 * Archs can override this function if it does not need to
2282 * run stop_machine() to modify code.
2284 void __weak
arch_ftrace_update_code(int command
)
2286 ftrace_run_stop_machine(command
);
2289 static void ftrace_run_update_code(int command
)
2293 ret
= ftrace_arch_code_modify_prepare();
2294 FTRACE_WARN_ON(ret
);
2299 * By default we use stop_machine() to modify the code.
2300 * But archs can do what ever they want as long as it
2301 * is safe. The stop_machine() is the safest, but also
2302 * produces the most overhead.
2304 arch_ftrace_update_code(command
);
2306 ret
= ftrace_arch_code_modify_post_process();
2307 FTRACE_WARN_ON(ret
);
2310 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2311 struct ftrace_hash
*old_hash
)
2313 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2314 ops
->old_hash
.filter_hash
= old_hash
;
2315 ftrace_run_update_code(command
);
2316 ops
->old_hash
.filter_hash
= NULL
;
2317 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2320 static ftrace_func_t saved_ftrace_func
;
2321 static int ftrace_start_up
;
2323 static void control_ops_free(struct ftrace_ops
*ops
)
2325 free_percpu(ops
->disabled
);
2328 static void ftrace_startup_enable(int command
)
2330 if (saved_ftrace_func
!= ftrace_trace_function
) {
2331 saved_ftrace_func
= ftrace_trace_function
;
2332 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2335 if (!command
|| !ftrace_enabled
)
2338 ftrace_run_update_code(command
);
2341 static void ftrace_startup_all(int command
)
2343 update_all_ops
= true;
2344 ftrace_startup_enable(command
);
2345 update_all_ops
= false;
2348 static int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2352 if (unlikely(ftrace_disabled
))
2355 ret
= __register_ftrace_function(ops
);
2360 command
|= FTRACE_UPDATE_CALLS
;
2363 * Note that ftrace probes uses this to start up
2364 * and modify functions it will probe. But we still
2365 * set the ADDING flag for modification, as probes
2366 * do not have trampolines. If they add them in the
2367 * future, then the probes will need to distinguish
2368 * between adding and updating probes.
2370 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2372 ftrace_hash_rec_enable(ops
, 1);
2374 ftrace_startup_enable(command
);
2376 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2381 static int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2385 if (unlikely(ftrace_disabled
))
2388 ret
= __unregister_ftrace_function(ops
);
2394 * Just warn in case of unbalance, no need to kill ftrace, it's not
2395 * critical but the ftrace_call callers may be never nopped again after
2396 * further ftrace uses.
2398 WARN_ON_ONCE(ftrace_start_up
< 0);
2400 ftrace_hash_rec_disable(ops
, 1);
2402 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2404 command
|= FTRACE_UPDATE_CALLS
;
2406 if (saved_ftrace_func
!= ftrace_trace_function
) {
2407 saved_ftrace_func
= ftrace_trace_function
;
2408 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2411 if (!command
|| !ftrace_enabled
) {
2413 * If these are control ops, they still need their
2414 * per_cpu field freed. Since, function tracing is
2415 * not currently active, we can just free them
2416 * without synchronizing all CPUs.
2418 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2419 control_ops_free(ops
);
2424 * If the ops uses a trampoline, then it needs to be
2425 * tested first on update.
2427 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2430 /* The trampoline logic checks the old hashes */
2431 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2432 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2434 ftrace_run_update_code(command
);
2437 * If there's no more ops registered with ftrace, run a
2438 * sanity check to make sure all rec flags are cleared.
2440 if (ftrace_ops_list
== &ftrace_list_end
) {
2441 struct ftrace_page
*pg
;
2442 struct dyn_ftrace
*rec
;
2444 do_for_each_ftrace_rec(pg
, rec
) {
2445 if (FTRACE_WARN_ON_ONCE(rec
->flags
))
2446 pr_warn(" %pS flags:%lx\n",
2447 (void *)rec
->ip
, rec
->flags
);
2448 } while_for_each_ftrace_rec();
2451 ops
->old_hash
.filter_hash
= NULL
;
2452 ops
->old_hash
.notrace_hash
= NULL
;
2455 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2458 * Dynamic ops may be freed, we must make sure that all
2459 * callers are done before leaving this function.
2460 * The same goes for freeing the per_cpu data of the control
2463 * Again, normal synchronize_sched() is not good enough.
2464 * We need to do a hard force of sched synchronization.
2465 * This is because we use preempt_disable() to do RCU, but
2466 * the function tracers can be called where RCU is not watching
2467 * (like before user_exit()). We can not rely on the RCU
2468 * infrastructure to do the synchronization, thus we must do it
2471 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_CONTROL
)) {
2472 schedule_on_each_cpu(ftrace_sync
);
2474 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2475 control_ops_free(ops
);
2481 static void ftrace_startup_sysctl(void)
2483 if (unlikely(ftrace_disabled
))
2486 /* Force update next time */
2487 saved_ftrace_func
= NULL
;
2488 /* ftrace_start_up is true if we want ftrace running */
2489 if (ftrace_start_up
)
2490 ftrace_run_update_code(FTRACE_UPDATE_CALLS
);
2493 static void ftrace_shutdown_sysctl(void)
2495 if (unlikely(ftrace_disabled
))
2498 /* ftrace_start_up is true if ftrace is running */
2499 if (ftrace_start_up
)
2500 ftrace_run_update_code(FTRACE_DISABLE_CALLS
);
2503 static cycle_t ftrace_update_time
;
2504 unsigned long ftrace_update_tot_cnt
;
2506 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
2509 * Filter_hash being empty will default to trace module.
2510 * But notrace hash requires a test of individual module functions.
2512 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2513 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
2517 * Check if the current ops references the record.
2519 * If the ops traces all functions, then it was already accounted for.
2520 * If the ops does not trace the current record function, skip it.
2521 * If the ops ignores the function via notrace filter, skip it.
2524 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
2526 /* If ops isn't enabled, ignore it */
2527 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
2530 /* If ops traces all mods, we already accounted for it */
2531 if (ops_traces_mod(ops
))
2534 /* The function must be in the filter */
2535 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2536 !ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
2539 /* If in notrace hash, we ignore it too */
2540 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
2546 static int referenced_filters(struct dyn_ftrace
*rec
)
2548 struct ftrace_ops
*ops
;
2551 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2552 if (ops_references_rec(ops
, rec
))
2559 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
2561 struct ftrace_page
*pg
;
2562 struct dyn_ftrace
*p
;
2563 cycle_t start
, stop
;
2564 unsigned long update_cnt
= 0;
2565 unsigned long ref
= 0;
2570 * When adding a module, we need to check if tracers are
2571 * currently enabled and if they are set to trace all functions.
2572 * If they are, we need to enable the module functions as well
2573 * as update the reference counts for those function records.
2576 struct ftrace_ops
*ops
;
2578 for (ops
= ftrace_ops_list
;
2579 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2580 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
2581 if (ops_traces_mod(ops
))
2589 start
= ftrace_now(raw_smp_processor_id());
2591 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
2593 for (i
= 0; i
< pg
->index
; i
++) {
2596 /* If something went wrong, bail without enabling anything */
2597 if (unlikely(ftrace_disabled
))
2600 p
= &pg
->records
[i
];
2602 cnt
+= referenced_filters(p
);
2606 * Do the initial record conversion from mcount jump
2607 * to the NOP instructions.
2609 if (!ftrace_code_disable(mod
, p
))
2615 * If the tracing is enabled, go ahead and enable the record.
2617 * The reason not to enable the record immediatelly is the
2618 * inherent check of ftrace_make_nop/ftrace_make_call for
2619 * correct previous instructions. Making first the NOP
2620 * conversion puts the module to the correct state, thus
2621 * passing the ftrace_make_call check.
2623 if (ftrace_start_up
&& cnt
) {
2624 int failed
= __ftrace_replace_code(p
, 1);
2626 ftrace_bug(failed
, p
->ip
);
2631 stop
= ftrace_now(raw_smp_processor_id());
2632 ftrace_update_time
= stop
- start
;
2633 ftrace_update_tot_cnt
+= update_cnt
;
2638 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
2643 if (WARN_ON(!count
))
2646 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
2649 * We want to fill as much as possible. No more than a page
2652 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
2656 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
2659 /* if we can't allocate this size, try something smaller */
2666 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
2675 static struct ftrace_page
*
2676 ftrace_allocate_pages(unsigned long num_to_init
)
2678 struct ftrace_page
*start_pg
;
2679 struct ftrace_page
*pg
;
2686 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2691 * Try to allocate as much as possible in one continues
2692 * location that fills in all of the space. We want to
2693 * waste as little space as possible.
2696 cnt
= ftrace_allocate_records(pg
, num_to_init
);
2704 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2716 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
2717 free_pages((unsigned long)pg
->records
, order
);
2718 start_pg
= pg
->next
;
2722 pr_info("ftrace: FAILED to allocate memory for functions\n");
2726 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2728 struct ftrace_iterator
{
2731 struct ftrace_page
*pg
;
2732 struct dyn_ftrace
*func
;
2733 struct ftrace_func_probe
*probe
;
2734 struct trace_parser parser
;
2735 struct ftrace_hash
*hash
;
2736 struct ftrace_ops
*ops
;
2743 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
2745 struct ftrace_iterator
*iter
= m
->private;
2746 struct hlist_node
*hnd
= NULL
;
2747 struct hlist_head
*hhd
;
2753 hnd
= &iter
->probe
->node
;
2755 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
2758 hhd
= &ftrace_func_hash
[iter
->hidx
];
2760 if (hlist_empty(hhd
)) {
2776 if (WARN_ON_ONCE(!hnd
))
2779 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
2784 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
2786 struct ftrace_iterator
*iter
= m
->private;
2790 if (!(iter
->flags
& FTRACE_ITER_DO_HASH
))
2793 if (iter
->func_pos
> *pos
)
2797 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
2798 p
= t_hash_next(m
, &l
);
2805 /* Only set this if we have an item */
2806 iter
->flags
|= FTRACE_ITER_HASH
;
2812 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
2814 struct ftrace_func_probe
*rec
;
2817 if (WARN_ON_ONCE(!rec
))
2820 if (rec
->ops
->print
)
2821 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
2823 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
2826 seq_printf(m
, ":%p", rec
->data
);
2833 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2835 struct ftrace_iterator
*iter
= m
->private;
2836 struct ftrace_ops
*ops
= iter
->ops
;
2837 struct dyn_ftrace
*rec
= NULL
;
2839 if (unlikely(ftrace_disabled
))
2842 if (iter
->flags
& FTRACE_ITER_HASH
)
2843 return t_hash_next(m
, pos
);
2846 iter
->pos
= iter
->func_pos
= *pos
;
2848 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
2849 return t_hash_start(m
, pos
);
2852 if (iter
->idx
>= iter
->pg
->index
) {
2853 if (iter
->pg
->next
) {
2854 iter
->pg
= iter
->pg
->next
;
2859 rec
= &iter
->pg
->records
[iter
->idx
++];
2860 if (((iter
->flags
& FTRACE_ITER_FILTER
) &&
2861 !(ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))) ||
2863 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
2864 !ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
)) ||
2866 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
2867 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
2875 return t_hash_start(m
, pos
);
2882 static void reset_iter_read(struct ftrace_iterator
*iter
)
2886 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_HASH
);
2889 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
2891 struct ftrace_iterator
*iter
= m
->private;
2892 struct ftrace_ops
*ops
= iter
->ops
;
2896 mutex_lock(&ftrace_lock
);
2898 if (unlikely(ftrace_disabled
))
2902 * If an lseek was done, then reset and start from beginning.
2904 if (*pos
< iter
->pos
)
2905 reset_iter_read(iter
);
2908 * For set_ftrace_filter reading, if we have the filter
2909 * off, we can short cut and just print out that all
2910 * functions are enabled.
2912 if ((iter
->flags
& FTRACE_ITER_FILTER
&&
2913 ftrace_hash_empty(ops
->func_hash
->filter_hash
)) ||
2914 (iter
->flags
& FTRACE_ITER_NOTRACE
&&
2915 ftrace_hash_empty(ops
->func_hash
->notrace_hash
))) {
2917 return t_hash_start(m
, pos
);
2918 iter
->flags
|= FTRACE_ITER_PRINTALL
;
2919 /* reset in case of seek/pread */
2920 iter
->flags
&= ~FTRACE_ITER_HASH
;
2924 if (iter
->flags
& FTRACE_ITER_HASH
)
2925 return t_hash_start(m
, pos
);
2928 * Unfortunately, we need to restart at ftrace_pages_start
2929 * every time we let go of the ftrace_mutex. This is because
2930 * those pointers can change without the lock.
2932 iter
->pg
= ftrace_pages_start
;
2934 for (l
= 0; l
<= *pos
; ) {
2935 p
= t_next(m
, p
, &l
);
2941 return t_hash_start(m
, pos
);
2946 static void t_stop(struct seq_file
*m
, void *p
)
2948 mutex_unlock(&ftrace_lock
);
2951 static int t_show(struct seq_file
*m
, void *v
)
2953 struct ftrace_iterator
*iter
= m
->private;
2954 struct dyn_ftrace
*rec
;
2956 if (iter
->flags
& FTRACE_ITER_HASH
)
2957 return t_hash_show(m
, iter
);
2959 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
2960 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
2961 seq_printf(m
, "#### no functions disabled ####\n");
2963 seq_printf(m
, "#### all functions enabled ####\n");
2972 seq_printf(m
, "%ps", (void *)rec
->ip
);
2973 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
2974 seq_printf(m
, " (%ld)%s",
2975 ftrace_rec_count(rec
),
2976 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
2977 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2978 struct ftrace_ops
*ops
;
2980 ops
= ftrace_find_tramp_ops_any(rec
);
2982 seq_printf(m
, "\ttramp: %pS",
2983 (void *)ops
->trampoline
);
2985 seq_printf(m
, "\ttramp: ERROR!");
2989 seq_printf(m
, "\n");
2994 static const struct seq_operations show_ftrace_seq_ops
= {
3002 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3004 struct ftrace_iterator
*iter
;
3006 if (unlikely(ftrace_disabled
))
3009 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3011 iter
->pg
= ftrace_pages_start
;
3012 iter
->ops
= &global_ops
;
3015 return iter
? 0 : -ENOMEM
;
3019 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3021 struct ftrace_iterator
*iter
;
3023 if (unlikely(ftrace_disabled
))
3026 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3028 iter
->pg
= ftrace_pages_start
;
3029 iter
->flags
= FTRACE_ITER_ENABLED
;
3030 iter
->ops
= &global_ops
;
3033 return iter
? 0 : -ENOMEM
;
3037 * ftrace_regex_open - initialize function tracer filter files
3038 * @ops: The ftrace_ops that hold the hash filters
3039 * @flag: The type of filter to process
3040 * @inode: The inode, usually passed in to your open routine
3041 * @file: The file, usually passed in to your open routine
3043 * ftrace_regex_open() initializes the filter files for the
3044 * @ops. Depending on @flag it may process the filter hash or
3045 * the notrace hash of @ops. With this called from the open
3046 * routine, you can use ftrace_filter_write() for the write
3047 * routine if @flag has FTRACE_ITER_FILTER set, or
3048 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3049 * tracing_lseek() should be used as the lseek routine, and
3050 * release must call ftrace_regex_release().
3053 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3054 struct inode
*inode
, struct file
*file
)
3056 struct ftrace_iterator
*iter
;
3057 struct ftrace_hash
*hash
;
3060 ftrace_ops_init(ops
);
3062 if (unlikely(ftrace_disabled
))
3065 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3069 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
3077 mutex_lock(&ops
->func_hash
->regex_lock
);
3079 if (flag
& FTRACE_ITER_NOTRACE
)
3080 hash
= ops
->func_hash
->notrace_hash
;
3082 hash
= ops
->func_hash
->filter_hash
;
3084 if (file
->f_mode
& FMODE_WRITE
) {
3085 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3087 if (file
->f_flags
& O_TRUNC
)
3088 iter
->hash
= alloc_ftrace_hash(size_bits
);
3090 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3093 trace_parser_put(&iter
->parser
);
3100 if (file
->f_mode
& FMODE_READ
) {
3101 iter
->pg
= ftrace_pages_start
;
3103 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3105 struct seq_file
*m
= file
->private_data
;
3109 free_ftrace_hash(iter
->hash
);
3110 trace_parser_put(&iter
->parser
);
3114 file
->private_data
= iter
;
3117 mutex_unlock(&ops
->func_hash
->regex_lock
);
3123 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3125 struct ftrace_ops
*ops
= inode
->i_private
;
3127 return ftrace_regex_open(ops
,
3128 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_HASH
,
3133 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3135 struct ftrace_ops
*ops
= inode
->i_private
;
3137 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3141 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
3148 if (strcmp(str
, regex
) == 0)
3151 case MATCH_FRONT_ONLY
:
3152 if (strncmp(str
, regex
, len
) == 0)
3155 case MATCH_MIDDLE_ONLY
:
3156 if (strstr(str
, regex
))
3159 case MATCH_END_ONLY
:
3161 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
3170 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
3172 struct ftrace_func_entry
*entry
;
3175 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3177 /* Do nothing if it doesn't exist */
3181 free_hash_entry(hash
, entry
);
3183 /* Do nothing if it exists */
3187 ret
= add_hash_entry(hash
, rec
->ip
);
3193 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
3194 char *regex
, int len
, int type
)
3196 char str
[KSYM_SYMBOL_LEN
];
3199 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3202 /* module lookup requires matching the module */
3203 if (!modname
|| strcmp(modname
, mod
))
3206 /* blank search means to match all funcs in the mod */
3211 return ftrace_match(str
, regex
, len
, type
);
3215 match_records(struct ftrace_hash
*hash
, char *buff
,
3216 int len
, char *mod
, int not)
3218 unsigned search_len
= 0;
3219 struct ftrace_page
*pg
;
3220 struct dyn_ftrace
*rec
;
3221 int type
= MATCH_FULL
;
3222 char *search
= buff
;
3227 type
= filter_parse_regex(buff
, len
, &search
, ¬);
3228 search_len
= strlen(search
);
3231 mutex_lock(&ftrace_lock
);
3233 if (unlikely(ftrace_disabled
))
3236 do_for_each_ftrace_rec(pg
, rec
) {
3237 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
3238 ret
= enter_record(hash
, rec
, not);
3245 } while_for_each_ftrace_rec();
3247 mutex_unlock(&ftrace_lock
);
3253 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
3255 return match_records(hash
, buff
, len
, NULL
, 0);
3259 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
3263 /* blank or '*' mean the same */
3264 if (strcmp(buff
, "*") == 0)
3267 /* handle the case of 'dont filter this module' */
3268 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
3273 return match_records(hash
, buff
, strlen(buff
), mod
, not);
3277 * We register the module command as a template to show others how
3278 * to register the a command as well.
3282 ftrace_mod_callback(struct ftrace_hash
*hash
,
3283 char *func
, char *cmd
, char *param
, int enable
)
3289 * cmd == 'mod' because we only registered this func
3290 * for the 'mod' ftrace_func_command.
3291 * But if you register one func with multiple commands,
3292 * you can tell which command was used by the cmd
3296 /* we must have a module name */
3300 mod
= strsep(¶m
, ":");
3304 ret
= ftrace_match_module_records(hash
, func
, mod
);
3313 static struct ftrace_func_command ftrace_mod_cmd
= {
3315 .func
= ftrace_mod_callback
,
3318 static int __init
ftrace_mod_cmd_init(void)
3320 return register_ftrace_command(&ftrace_mod_cmd
);
3322 core_initcall(ftrace_mod_cmd_init
);
3324 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
3325 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
3327 struct ftrace_func_probe
*entry
;
3328 struct hlist_head
*hhd
;
3331 key
= hash_long(ip
, FTRACE_HASH_BITS
);
3333 hhd
= &ftrace_func_hash
[key
];
3335 if (hlist_empty(hhd
))
3339 * Disable preemption for these calls to prevent a RCU grace
3340 * period. This syncs the hash iteration and freeing of items
3341 * on the hash. rcu_read_lock is too dangerous here.
3343 preempt_disable_notrace();
3344 hlist_for_each_entry_rcu_notrace(entry
, hhd
, node
) {
3345 if (entry
->ip
== ip
)
3346 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
3348 preempt_enable_notrace();
3351 static struct ftrace_ops trace_probe_ops __read_mostly
=
3353 .func
= function_trace_probe_call
,
3354 .flags
= FTRACE_OPS_FL_INITIALIZED
,
3355 INIT_OPS_HASH(trace_probe_ops
)
3358 static int ftrace_probe_registered
;
3360 static void __enable_ftrace_function_probe(struct ftrace_hash
*old_hash
)
3365 if (ftrace_probe_registered
) {
3366 /* still need to update the function call sites */
3368 ftrace_run_modify_code(&trace_probe_ops
, FTRACE_UPDATE_CALLS
,
3373 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3374 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3378 /* Nothing registered? */
3379 if (i
== FTRACE_FUNC_HASHSIZE
)
3382 ret
= ftrace_startup(&trace_probe_ops
, 0);
3384 ftrace_probe_registered
= 1;
3387 static void __disable_ftrace_function_probe(void)
3391 if (!ftrace_probe_registered
)
3394 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3395 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3400 /* no more funcs left */
3401 ftrace_shutdown(&trace_probe_ops
, 0);
3403 ftrace_probe_registered
= 0;
3407 static void ftrace_free_entry(struct ftrace_func_probe
*entry
)
3409 if (entry
->ops
->free
)
3410 entry
->ops
->free(entry
->ops
, entry
->ip
, &entry
->data
);
3415 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3418 struct ftrace_func_probe
*entry
;
3419 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3420 struct ftrace_hash
*old_hash
= *orig_hash
;
3421 struct ftrace_hash
*hash
;
3422 struct ftrace_page
*pg
;
3423 struct dyn_ftrace
*rec
;
3430 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3431 len
= strlen(search
);
3433 /* we do not support '!' for function probes */
3437 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3439 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
3445 if (unlikely(ftrace_disabled
)) {
3450 mutex_lock(&ftrace_lock
);
3452 do_for_each_ftrace_rec(pg
, rec
) {
3454 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
3457 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
3459 /* If we did not process any, then return error */
3470 * The caller might want to do something special
3471 * for each function we find. We call the callback
3472 * to give the caller an opportunity to do so.
3475 if (ops
->init(ops
, rec
->ip
, &entry
->data
) < 0) {
3476 /* caller does not like this func */
3482 ret
= enter_record(hash
, rec
, 0);
3490 entry
->ip
= rec
->ip
;
3492 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
3493 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
3495 } while_for_each_ftrace_rec();
3497 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3499 __enable_ftrace_function_probe(old_hash
);
3502 free_ftrace_hash_rcu(old_hash
);
3507 mutex_unlock(&ftrace_lock
);
3509 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3510 free_ftrace_hash(hash
);
3516 PROBE_TEST_FUNC
= 1,
3521 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3522 void *data
, int flags
)
3524 struct ftrace_func_entry
*rec_entry
;
3525 struct ftrace_func_probe
*entry
;
3526 struct ftrace_func_probe
*p
;
3527 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3528 struct ftrace_hash
*old_hash
= *orig_hash
;
3529 struct list_head free_list
;
3530 struct ftrace_hash
*hash
;
3531 struct hlist_node
*tmp
;
3532 char str
[KSYM_SYMBOL_LEN
];
3533 int type
= MATCH_FULL
;
3538 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
3543 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3544 len
= strlen(search
);
3546 /* we do not support '!' for function probes */
3551 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3553 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3555 /* Hmm, should report this somehow */
3558 INIT_LIST_HEAD(&free_list
);
3560 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3561 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3563 hlist_for_each_entry_safe(entry
, tmp
, hhd
, node
) {
3565 /* break up if statements for readability */
3566 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
3569 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
3572 /* do this last, since it is the most expensive */
3574 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
3576 if (!ftrace_match(str
, glob
, len
, type
))
3580 rec_entry
= ftrace_lookup_ip(hash
, entry
->ip
);
3581 /* It is possible more than one entry had this ip */
3583 free_hash_entry(hash
, rec_entry
);
3585 hlist_del_rcu(&entry
->node
);
3586 list_add(&entry
->free_list
, &free_list
);
3589 mutex_lock(&ftrace_lock
);
3590 __disable_ftrace_function_probe();
3592 * Remove after the disable is called. Otherwise, if the last
3593 * probe is removed, a null hash means *all enabled*.
3595 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3596 synchronize_sched();
3598 free_ftrace_hash_rcu(old_hash
);
3600 list_for_each_entry_safe(entry
, p
, &free_list
, free_list
) {
3601 list_del(&entry
->free_list
);
3602 ftrace_free_entry(entry
);
3604 mutex_unlock(&ftrace_lock
);
3607 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3608 free_ftrace_hash(hash
);
3612 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3615 __unregister_ftrace_function_probe(glob
, ops
, data
,
3616 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
3620 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
3622 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
3625 void unregister_ftrace_function_probe_all(char *glob
)
3627 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
3630 static LIST_HEAD(ftrace_commands
);
3631 static DEFINE_MUTEX(ftrace_cmd_mutex
);
3634 * Currently we only register ftrace commands from __init, so mark this
3637 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
3639 struct ftrace_func_command
*p
;
3642 mutex_lock(&ftrace_cmd_mutex
);
3643 list_for_each_entry(p
, &ftrace_commands
, list
) {
3644 if (strcmp(cmd
->name
, p
->name
) == 0) {
3649 list_add(&cmd
->list
, &ftrace_commands
);
3651 mutex_unlock(&ftrace_cmd_mutex
);
3657 * Currently we only unregister ftrace commands from __init, so mark
3660 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
3662 struct ftrace_func_command
*p
, *n
;
3665 mutex_lock(&ftrace_cmd_mutex
);
3666 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
3667 if (strcmp(cmd
->name
, p
->name
) == 0) {
3669 list_del_init(&p
->list
);
3674 mutex_unlock(&ftrace_cmd_mutex
);
3679 static int ftrace_process_regex(struct ftrace_hash
*hash
,
3680 char *buff
, int len
, int enable
)
3682 char *func
, *command
, *next
= buff
;
3683 struct ftrace_func_command
*p
;
3686 func
= strsep(&next
, ":");
3689 ret
= ftrace_match_records(hash
, func
, len
);
3699 command
= strsep(&next
, ":");
3701 mutex_lock(&ftrace_cmd_mutex
);
3702 list_for_each_entry(p
, &ftrace_commands
, list
) {
3703 if (strcmp(p
->name
, command
) == 0) {
3704 ret
= p
->func(hash
, func
, command
, next
, enable
);
3709 mutex_unlock(&ftrace_cmd_mutex
);
3715 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
3716 size_t cnt
, loff_t
*ppos
, int enable
)
3718 struct ftrace_iterator
*iter
;
3719 struct trace_parser
*parser
;
3725 if (file
->f_mode
& FMODE_READ
) {
3726 struct seq_file
*m
= file
->private_data
;
3729 iter
= file
->private_data
;
3731 if (unlikely(ftrace_disabled
))
3734 /* iter->hash is a local copy, so we don't need regex_lock */
3736 parser
= &iter
->parser
;
3737 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
3739 if (read
>= 0 && trace_parser_loaded(parser
) &&
3740 !trace_parser_cont(parser
)) {
3741 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
3742 parser
->idx
, enable
);
3743 trace_parser_clear(parser
);
3754 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
3755 size_t cnt
, loff_t
*ppos
)
3757 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
3761 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
3762 size_t cnt
, loff_t
*ppos
)
3764 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
3768 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
3770 struct ftrace_func_entry
*entry
;
3772 if (!ftrace_location(ip
))
3776 entry
= ftrace_lookup_ip(hash
, ip
);
3779 free_hash_entry(hash
, entry
);
3783 return add_hash_entry(hash
, ip
);
3786 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
3787 struct ftrace_hash
*old_hash
)
3789 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
&& ftrace_enabled
)
3790 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
3794 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
3795 unsigned long ip
, int remove
, int reset
, int enable
)
3797 struct ftrace_hash
**orig_hash
;
3798 struct ftrace_hash
*old_hash
;
3799 struct ftrace_hash
*hash
;
3802 if (unlikely(ftrace_disabled
))
3805 mutex_lock(&ops
->func_hash
->regex_lock
);
3808 orig_hash
= &ops
->func_hash
->filter_hash
;
3810 orig_hash
= &ops
->func_hash
->notrace_hash
;
3813 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
3815 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3819 goto out_regex_unlock
;
3822 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
3824 goto out_regex_unlock
;
3827 ret
= ftrace_match_addr(hash
, ip
, remove
);
3829 goto out_regex_unlock
;
3832 mutex_lock(&ftrace_lock
);
3833 old_hash
= *orig_hash
;
3834 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
3836 ftrace_ops_update_code(ops
, old_hash
);
3837 free_ftrace_hash_rcu(old_hash
);
3839 mutex_unlock(&ftrace_lock
);
3842 mutex_unlock(&ops
->func_hash
->regex_lock
);
3844 free_ftrace_hash(hash
);
3849 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
3850 int reset
, int enable
)
3852 return ftrace_set_hash(ops
, 0, 0, ip
, remove
, reset
, enable
);
3856 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3857 * @ops - the ops to set the filter with
3858 * @ip - the address to add to or remove from the filter.
3859 * @remove - non zero to remove the ip from the filter
3860 * @reset - non zero to reset all filters before applying this filter.
3862 * Filters denote which functions should be enabled when tracing is enabled
3863 * If @ip is NULL, it failes to update filter.
3865 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
3866 int remove
, int reset
)
3868 ftrace_ops_init(ops
);
3869 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
3871 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
3874 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
3875 int reset
, int enable
)
3877 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
3881 * ftrace_set_filter - set a function to filter on in ftrace
3882 * @ops - the ops to set the filter with
3883 * @buf - the string that holds the function filter text.
3884 * @len - the length of the string.
3885 * @reset - non zero to reset all filters before applying this filter.
3887 * Filters denote which functions should be enabled when tracing is enabled.
3888 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3890 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
3893 ftrace_ops_init(ops
);
3894 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
3896 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
3899 * ftrace_set_notrace - set a function to not trace in ftrace
3900 * @ops - the ops to set the notrace filter with
3901 * @buf - the string that holds the function notrace text.
3902 * @len - the length of the string.
3903 * @reset - non zero to reset all filters before applying this filter.
3905 * Notrace Filters denote which functions should not be enabled when tracing
3906 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3909 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
3912 ftrace_ops_init(ops
);
3913 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
3915 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
3917 * ftrace_set_global_filter - set a function to filter on with global tracers
3918 * @buf - the string that holds the function filter text.
3919 * @len - the length of the string.
3920 * @reset - non zero to reset all filters before applying this filter.
3922 * Filters denote which functions should be enabled when tracing is enabled.
3923 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3925 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
3927 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
3929 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
3932 * ftrace_set_global_notrace - set a function to not trace with global tracers
3933 * @buf - the string that holds the function notrace text.
3934 * @len - the length of the string.
3935 * @reset - non zero to reset all filters before applying this filter.
3937 * Notrace Filters denote which functions should not be enabled when tracing
3938 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3941 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
3943 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
3945 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
3948 * command line interface to allow users to set filters on boot up.
3950 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3951 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3952 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3954 /* Used by function selftest to not test if filter is set */
3955 bool ftrace_filter_param __initdata
;
3957 static int __init
set_ftrace_notrace(char *str
)
3959 ftrace_filter_param
= true;
3960 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
3963 __setup("ftrace_notrace=", set_ftrace_notrace
);
3965 static int __init
set_ftrace_filter(char *str
)
3967 ftrace_filter_param
= true;
3968 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
3971 __setup("ftrace_filter=", set_ftrace_filter
);
3973 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3974 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3975 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3976 static int ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
);
3978 static int __init
set_graph_function(char *str
)
3980 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
3983 __setup("ftrace_graph_filter=", set_graph_function
);
3985 static int __init
set_graph_notrace_function(char *str
)
3987 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
3990 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
3992 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
3996 unsigned long *table
= ftrace_graph_funcs
;
3997 int *count
= &ftrace_graph_count
;
4000 table
= ftrace_graph_notrace_funcs
;
4001 count
= &ftrace_graph_notrace_count
;
4005 func
= strsep(&buf
, ",");
4006 /* we allow only one expression at a time */
4007 ret
= ftrace_set_func(table
, count
, FTRACE_GRAPH_MAX_FUNCS
, func
);
4009 printk(KERN_DEBUG
"ftrace: function %s not "
4010 "traceable\n", func
);
4013 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4016 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
4020 ftrace_ops_init(ops
);
4023 func
= strsep(&buf
, ",");
4024 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
4028 static void __init
set_ftrace_early_filters(void)
4030 if (ftrace_filter_buf
[0])
4031 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
4032 if (ftrace_notrace_buf
[0])
4033 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
4034 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4035 if (ftrace_graph_buf
[0])
4036 set_ftrace_early_graph(ftrace_graph_buf
, 1);
4037 if (ftrace_graph_notrace_buf
[0])
4038 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
4039 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4042 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
4044 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
4045 struct ftrace_iterator
*iter
;
4046 struct ftrace_hash
**orig_hash
;
4047 struct ftrace_hash
*old_hash
;
4048 struct trace_parser
*parser
;
4052 if (file
->f_mode
& FMODE_READ
) {
4054 seq_release(inode
, file
);
4056 iter
= file
->private_data
;
4058 parser
= &iter
->parser
;
4059 if (trace_parser_loaded(parser
)) {
4060 parser
->buffer
[parser
->idx
] = 0;
4061 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
4064 trace_parser_put(parser
);
4066 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
4068 if (file
->f_mode
& FMODE_WRITE
) {
4069 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
4072 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
4074 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
4076 mutex_lock(&ftrace_lock
);
4077 old_hash
= *orig_hash
;
4078 ret
= ftrace_hash_move(iter
->ops
, filter_hash
,
4079 orig_hash
, iter
->hash
);
4081 ftrace_ops_update_code(iter
->ops
, old_hash
);
4082 free_ftrace_hash_rcu(old_hash
);
4084 mutex_unlock(&ftrace_lock
);
4087 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
4088 free_ftrace_hash(iter
->hash
);
4094 static const struct file_operations ftrace_avail_fops
= {
4095 .open
= ftrace_avail_open
,
4097 .llseek
= seq_lseek
,
4098 .release
= seq_release_private
,
4101 static const struct file_operations ftrace_enabled_fops
= {
4102 .open
= ftrace_enabled_open
,
4104 .llseek
= seq_lseek
,
4105 .release
= seq_release_private
,
4108 static const struct file_operations ftrace_filter_fops
= {
4109 .open
= ftrace_filter_open
,
4111 .write
= ftrace_filter_write
,
4112 .llseek
= tracing_lseek
,
4113 .release
= ftrace_regex_release
,
4116 static const struct file_operations ftrace_notrace_fops
= {
4117 .open
= ftrace_notrace_open
,
4119 .write
= ftrace_notrace_write
,
4120 .llseek
= tracing_lseek
,
4121 .release
= ftrace_regex_release
,
4124 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4126 static DEFINE_MUTEX(graph_lock
);
4128 int ftrace_graph_count
;
4129 int ftrace_graph_notrace_count
;
4130 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4131 unsigned long ftrace_graph_notrace_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4133 struct ftrace_graph_data
{
4134 unsigned long *table
;
4137 const struct seq_operations
*seq_ops
;
4141 __g_next(struct seq_file
*m
, loff_t
*pos
)
4143 struct ftrace_graph_data
*fgd
= m
->private;
4145 if (*pos
>= *fgd
->count
)
4147 return &fgd
->table
[*pos
];
4151 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4154 return __g_next(m
, pos
);
4157 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
4159 struct ftrace_graph_data
*fgd
= m
->private;
4161 mutex_lock(&graph_lock
);
4163 /* Nothing, tell g_show to print all functions are enabled */
4164 if (!*fgd
->count
&& !*pos
)
4167 return __g_next(m
, pos
);
4170 static void g_stop(struct seq_file
*m
, void *p
)
4172 mutex_unlock(&graph_lock
);
4175 static int g_show(struct seq_file
*m
, void *v
)
4177 unsigned long *ptr
= v
;
4182 if (ptr
== (unsigned long *)1) {
4183 struct ftrace_graph_data
*fgd
= m
->private;
4185 if (fgd
->table
== ftrace_graph_funcs
)
4186 seq_printf(m
, "#### all functions enabled ####\n");
4188 seq_printf(m
, "#### no functions disabled ####\n");
4192 seq_printf(m
, "%ps\n", (void *)*ptr
);
4197 static const struct seq_operations ftrace_graph_seq_ops
= {
4205 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
4206 struct ftrace_graph_data
*fgd
)
4210 mutex_lock(&graph_lock
);
4211 if ((file
->f_mode
& FMODE_WRITE
) &&
4212 (file
->f_flags
& O_TRUNC
)) {
4214 memset(fgd
->table
, 0, fgd
->size
* sizeof(*fgd
->table
));
4216 mutex_unlock(&graph_lock
);
4218 if (file
->f_mode
& FMODE_READ
) {
4219 ret
= seq_open(file
, fgd
->seq_ops
);
4221 struct seq_file
*m
= file
->private_data
;
4225 file
->private_data
= fgd
;
4231 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
4233 struct ftrace_graph_data
*fgd
;
4235 if (unlikely(ftrace_disabled
))
4238 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4242 fgd
->table
= ftrace_graph_funcs
;
4243 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4244 fgd
->count
= &ftrace_graph_count
;
4245 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4247 return __ftrace_graph_open(inode
, file
, fgd
);
4251 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
4253 struct ftrace_graph_data
*fgd
;
4255 if (unlikely(ftrace_disabled
))
4258 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4262 fgd
->table
= ftrace_graph_notrace_funcs
;
4263 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4264 fgd
->count
= &ftrace_graph_notrace_count
;
4265 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4267 return __ftrace_graph_open(inode
, file
, fgd
);
4271 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
4273 if (file
->f_mode
& FMODE_READ
) {
4274 struct seq_file
*m
= file
->private_data
;
4277 seq_release(inode
, file
);
4279 kfree(file
->private_data
);
4286 ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
)
4288 struct dyn_ftrace
*rec
;
4289 struct ftrace_page
*pg
;
4298 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
4299 if (!not && *idx
>= size
)
4302 search_len
= strlen(search
);
4304 mutex_lock(&ftrace_lock
);
4306 if (unlikely(ftrace_disabled
)) {
4307 mutex_unlock(&ftrace_lock
);
4311 do_for_each_ftrace_rec(pg
, rec
) {
4313 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
4314 /* if it is in the array */
4316 for (i
= 0; i
< *idx
; i
++) {
4317 if (array
[i
] == rec
->ip
) {
4326 array
[(*idx
)++] = rec
->ip
;
4332 array
[i
] = array
[--(*idx
)];
4338 } while_for_each_ftrace_rec();
4340 mutex_unlock(&ftrace_lock
);
4349 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
4350 size_t cnt
, loff_t
*ppos
)
4352 struct trace_parser parser
;
4353 ssize_t read
, ret
= 0;
4354 struct ftrace_graph_data
*fgd
= file
->private_data
;
4359 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
))
4362 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
4364 if (read
>= 0 && trace_parser_loaded((&parser
))) {
4365 parser
.buffer
[parser
.idx
] = 0;
4367 mutex_lock(&graph_lock
);
4369 /* we allow only one expression at a time */
4370 ret
= ftrace_set_func(fgd
->table
, fgd
->count
, fgd
->size
,
4373 mutex_unlock(&graph_lock
);
4379 trace_parser_put(&parser
);
4384 static const struct file_operations ftrace_graph_fops
= {
4385 .open
= ftrace_graph_open
,
4387 .write
= ftrace_graph_write
,
4388 .llseek
= tracing_lseek
,
4389 .release
= ftrace_graph_release
,
4392 static const struct file_operations ftrace_graph_notrace_fops
= {
4393 .open
= ftrace_graph_notrace_open
,
4395 .write
= ftrace_graph_write
,
4396 .llseek
= tracing_lseek
,
4397 .release
= ftrace_graph_release
,
4399 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4401 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
4402 struct dentry
*parent
)
4405 trace_create_file("set_ftrace_filter", 0644, parent
,
4406 ops
, &ftrace_filter_fops
);
4408 trace_create_file("set_ftrace_notrace", 0644, parent
,
4409 ops
, &ftrace_notrace_fops
);
4413 * The name "destroy_filter_files" is really a misnomer. Although
4414 * in the future, it may actualy delete the files, but this is
4415 * really intended to make sure the ops passed in are disabled
4416 * and that when this function returns, the caller is free to
4419 * The "destroy" name is only to match the "create" name that this
4420 * should be paired with.
4422 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
4424 mutex_lock(&ftrace_lock
);
4425 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
4426 ftrace_shutdown(ops
, 0);
4427 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
4428 mutex_unlock(&ftrace_lock
);
4431 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
4434 trace_create_file("available_filter_functions", 0444,
4435 d_tracer
, NULL
, &ftrace_avail_fops
);
4437 trace_create_file("enabled_functions", 0444,
4438 d_tracer
, NULL
, &ftrace_enabled_fops
);
4440 ftrace_create_filter_files(&global_ops
, d_tracer
);
4442 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4443 trace_create_file("set_graph_function", 0444, d_tracer
,
4445 &ftrace_graph_fops
);
4446 trace_create_file("set_graph_notrace", 0444, d_tracer
,
4448 &ftrace_graph_notrace_fops
);
4449 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4454 static int ftrace_cmp_ips(const void *a
, const void *b
)
4456 const unsigned long *ipa
= a
;
4457 const unsigned long *ipb
= b
;
4466 static void ftrace_swap_ips(void *a
, void *b
, int size
)
4468 unsigned long *ipa
= a
;
4469 unsigned long *ipb
= b
;
4477 static int ftrace_process_locs(struct module
*mod
,
4478 unsigned long *start
,
4481 struct ftrace_page
*start_pg
;
4482 struct ftrace_page
*pg
;
4483 struct dyn_ftrace
*rec
;
4484 unsigned long count
;
4487 unsigned long flags
= 0; /* Shut up gcc */
4490 count
= end
- start
;
4495 sort(start
, count
, sizeof(*start
),
4496 ftrace_cmp_ips
, ftrace_swap_ips
);
4498 start_pg
= ftrace_allocate_pages(count
);
4502 mutex_lock(&ftrace_lock
);
4505 * Core and each module needs their own pages, as
4506 * modules will free them when they are removed.
4507 * Force a new page to be allocated for modules.
4510 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
4511 /* First initialization */
4512 ftrace_pages
= ftrace_pages_start
= start_pg
;
4517 if (WARN_ON(ftrace_pages
->next
)) {
4518 /* Hmm, we have free pages? */
4519 while (ftrace_pages
->next
)
4520 ftrace_pages
= ftrace_pages
->next
;
4523 ftrace_pages
->next
= start_pg
;
4529 addr
= ftrace_call_adjust(*p
++);
4531 * Some architecture linkers will pad between
4532 * the different mcount_loc sections of different
4533 * object files to satisfy alignments.
4534 * Skip any NULL pointers.
4539 if (pg
->index
== pg
->size
) {
4540 /* We should have allocated enough */
4541 if (WARN_ON(!pg
->next
))
4546 rec
= &pg
->records
[pg
->index
++];
4550 /* We should have used all pages */
4553 /* Assign the last page to ftrace_pages */
4557 * We only need to disable interrupts on start up
4558 * because we are modifying code that an interrupt
4559 * may execute, and the modification is not atomic.
4560 * But for modules, nothing runs the code we modify
4561 * until we are finished with it, and there's no
4562 * reason to cause large interrupt latencies while we do it.
4565 local_irq_save(flags
);
4566 ftrace_update_code(mod
, start_pg
);
4568 local_irq_restore(flags
);
4571 mutex_unlock(&ftrace_lock
);
4576 #ifdef CONFIG_MODULES
4578 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4580 void ftrace_release_mod(struct module
*mod
)
4582 struct dyn_ftrace
*rec
;
4583 struct ftrace_page
**last_pg
;
4584 struct ftrace_page
*pg
;
4587 mutex_lock(&ftrace_lock
);
4589 if (ftrace_disabled
)
4593 * Each module has its own ftrace_pages, remove
4594 * them from the list.
4596 last_pg
= &ftrace_pages_start
;
4597 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
4598 rec
= &pg
->records
[0];
4599 if (within_module_core(rec
->ip
, mod
)) {
4601 * As core pages are first, the first
4602 * page should never be a module page.
4604 if (WARN_ON(pg
== ftrace_pages_start
))
4607 /* Check if we are deleting the last page */
4608 if (pg
== ftrace_pages
)
4609 ftrace_pages
= next_to_ftrace_page(last_pg
);
4611 *last_pg
= pg
->next
;
4612 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
4613 free_pages((unsigned long)pg
->records
, order
);
4616 last_pg
= &pg
->next
;
4619 mutex_unlock(&ftrace_lock
);
4622 static void ftrace_init_module(struct module
*mod
,
4623 unsigned long *start
, unsigned long *end
)
4625 if (ftrace_disabled
|| start
== end
)
4627 ftrace_process_locs(mod
, start
, end
);
4630 void ftrace_module_init(struct module
*mod
)
4632 ftrace_init_module(mod
, mod
->ftrace_callsites
,
4633 mod
->ftrace_callsites
+
4634 mod
->num_ftrace_callsites
);
4637 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4638 unsigned long val
, void *data
)
4640 struct module
*mod
= data
;
4642 if (val
== MODULE_STATE_GOING
)
4643 ftrace_release_mod(mod
);
4648 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4649 unsigned long val
, void *data
)
4653 #endif /* CONFIG_MODULES */
4655 struct notifier_block ftrace_module_exit_nb
= {
4656 .notifier_call
= ftrace_module_notify_exit
,
4657 .priority
= INT_MIN
, /* Run after anything that can remove kprobes */
4660 void __init
ftrace_init(void)
4662 extern unsigned long __start_mcount_loc
[];
4663 extern unsigned long __stop_mcount_loc
[];
4664 unsigned long count
, flags
;
4667 local_irq_save(flags
);
4668 ret
= ftrace_dyn_arch_init();
4669 local_irq_restore(flags
);
4673 count
= __stop_mcount_loc
- __start_mcount_loc
;
4675 pr_info("ftrace: No functions to be traced?\n");
4679 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4680 count
, count
/ ENTRIES_PER_PAGE
+ 1);
4682 last_ftrace_enabled
= ftrace_enabled
= 1;
4684 ret
= ftrace_process_locs(NULL
,
4688 ret
= register_module_notifier(&ftrace_module_exit_nb
);
4690 pr_warning("Failed to register trace ftrace module exit notifier\n");
4692 set_ftrace_early_filters();
4696 ftrace_disabled
= 1;
4701 static struct ftrace_ops global_ops
= {
4702 .func
= ftrace_stub
,
4703 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
4706 static int __init
ftrace_nodyn_init(void)
4711 core_initcall(ftrace_nodyn_init
);
4713 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
4714 static inline void ftrace_startup_enable(int command
) { }
4715 static inline void ftrace_startup_all(int command
) { }
4716 /* Keep as macros so we do not need to define the commands */
4717 # define ftrace_startup(ops, command) \
4719 int ___ret = __register_ftrace_function(ops); \
4721 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4724 # define ftrace_shutdown(ops, command) \
4726 int ___ret = __unregister_ftrace_function(ops); \
4728 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4732 # define ftrace_startup_sysctl() do { } while (0)
4733 # define ftrace_shutdown_sysctl() do { } while (0)
4736 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
4741 #endif /* CONFIG_DYNAMIC_FTRACE */
4743 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
4745 tr
->ops
= &global_ops
;
4746 tr
->ops
->private = tr
;
4749 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
4751 /* If we filter on pids, update to use the pid function */
4752 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4753 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
4754 printk("ftrace ops had %pS for function\n",
4756 /* Only the top level instance does pid tracing */
4757 if (!list_empty(&ftrace_pids
)) {
4758 set_ftrace_pid_function(func
);
4759 func
= ftrace_pid_func
;
4762 tr
->ops
->func
= func
;
4763 tr
->ops
->private = tr
;
4766 void ftrace_reset_array_ops(struct trace_array
*tr
)
4768 tr
->ops
->func
= ftrace_stub
;
4772 ftrace_ops_control_func(unsigned long ip
, unsigned long parent_ip
,
4773 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4775 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT
)))
4779 * Some of the ops may be dynamically allocated,
4780 * they must be freed after a synchronize_sched().
4782 preempt_disable_notrace();
4783 trace_recursion_set(TRACE_CONTROL_BIT
);
4786 * Control funcs (perf) uses RCU. Only trace if
4787 * RCU is currently active.
4789 if (!rcu_is_watching())
4792 do_for_each_ftrace_op(op
, ftrace_control_list
) {
4793 if (!(op
->flags
& FTRACE_OPS_FL_STUB
) &&
4794 !ftrace_function_local_disabled(op
) &&
4795 ftrace_ops_test(op
, ip
, regs
))
4796 op
->func(ip
, parent_ip
, op
, regs
);
4797 } while_for_each_ftrace_op(op
);
4799 trace_recursion_clear(TRACE_CONTROL_BIT
);
4800 preempt_enable_notrace();
4803 static struct ftrace_ops control_ops
= {
4804 .func
= ftrace_ops_control_func
,
4805 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
4806 INIT_OPS_HASH(control_ops
)
4810 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
4811 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
4813 struct ftrace_ops
*op
;
4816 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
4821 * Some of the ops may be dynamically allocated,
4822 * they must be freed after a synchronize_sched().
4824 preempt_disable_notrace();
4825 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4826 if (ftrace_ops_test(op
, ip
, regs
)) {
4827 if (FTRACE_WARN_ON(!op
->func
)) {
4828 pr_warn("op=%p %pS\n", op
, op
);
4831 op
->func(ip
, parent_ip
, op
, regs
);
4833 } while_for_each_ftrace_op(op
);
4835 preempt_enable_notrace();
4836 trace_clear_recursion(bit
);
4840 * Some archs only support passing ip and parent_ip. Even though
4841 * the list function ignores the op parameter, we do not want any
4842 * C side effects, where a function is called without the caller
4843 * sending a third parameter.
4844 * Archs are to support both the regs and ftrace_ops at the same time.
4845 * If they support ftrace_ops, it is assumed they support regs.
4846 * If call backs want to use regs, they must either check for regs
4847 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4848 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4849 * An architecture can pass partial regs with ftrace_ops and still
4850 * set the ARCH_SUPPORT_FTARCE_OPS.
4852 #if ARCH_SUPPORTS_FTRACE_OPS
4853 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
4854 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4856 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
4859 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
4861 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
4866 * If there's only one function registered but it does not support
4867 * recursion, this function will be called by the mcount trampoline.
4868 * This function will handle recursion protection.
4870 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
4871 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4875 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
4879 op
->func(ip
, parent_ip
, op
, regs
);
4881 trace_clear_recursion(bit
);
4885 * ftrace_ops_get_func - get the function a trampoline should call
4886 * @ops: the ops to get the function for
4888 * Normally the mcount trampoline will call the ops->func, but there
4889 * are times that it should not. For example, if the ops does not
4890 * have its own recursion protection, then it should call the
4891 * ftrace_ops_recurs_func() instead.
4893 * Returns the function that the trampoline should call for @ops.
4895 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
4898 * If this is a dynamic ops or we force list func,
4899 * then it needs to call the list anyway.
4901 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
|| FTRACE_FORCE_LIST_FUNC
)
4902 return ftrace_ops_list_func
;
4905 * If the func handles its own recursion, call it directly.
4906 * Otherwise call the recursion protected function that
4907 * will call the ftrace ops function.
4909 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
))
4910 return ftrace_ops_recurs_func
;
4915 static void clear_ftrace_swapper(void)
4917 struct task_struct
*p
;
4921 for_each_online_cpu(cpu
) {
4923 clear_tsk_trace_trace(p
);
4928 static void set_ftrace_swapper(void)
4930 struct task_struct
*p
;
4934 for_each_online_cpu(cpu
) {
4936 set_tsk_trace_trace(p
);
4941 static void clear_ftrace_pid(struct pid
*pid
)
4943 struct task_struct
*p
;
4946 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
4947 clear_tsk_trace_trace(p
);
4948 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
4954 static void set_ftrace_pid(struct pid
*pid
)
4956 struct task_struct
*p
;
4959 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
4960 set_tsk_trace_trace(p
);
4961 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
4965 static void clear_ftrace_pid_task(struct pid
*pid
)
4967 if (pid
== ftrace_swapper_pid
)
4968 clear_ftrace_swapper();
4970 clear_ftrace_pid(pid
);
4973 static void set_ftrace_pid_task(struct pid
*pid
)
4975 if (pid
== ftrace_swapper_pid
)
4976 set_ftrace_swapper();
4978 set_ftrace_pid(pid
);
4981 static int ftrace_pid_add(int p
)
4984 struct ftrace_pid
*fpid
;
4987 mutex_lock(&ftrace_lock
);
4990 pid
= ftrace_swapper_pid
;
4992 pid
= find_get_pid(p
);
4999 list_for_each_entry(fpid
, &ftrace_pids
, list
)
5000 if (fpid
->pid
== pid
)
5005 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
5009 list_add(&fpid
->list
, &ftrace_pids
);
5012 set_ftrace_pid_task(pid
);
5014 ftrace_update_pid_func();
5016 ftrace_startup_all(0);
5018 mutex_unlock(&ftrace_lock
);
5022 if (pid
!= ftrace_swapper_pid
)
5026 mutex_unlock(&ftrace_lock
);
5030 static void ftrace_pid_reset(void)
5032 struct ftrace_pid
*fpid
, *safe
;
5034 mutex_lock(&ftrace_lock
);
5035 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
5036 struct pid
*pid
= fpid
->pid
;
5038 clear_ftrace_pid_task(pid
);
5040 list_del(&fpid
->list
);
5044 ftrace_update_pid_func();
5045 ftrace_startup_all(0);
5047 mutex_unlock(&ftrace_lock
);
5050 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
5052 mutex_lock(&ftrace_lock
);
5054 if (list_empty(&ftrace_pids
) && (!*pos
))
5057 return seq_list_start(&ftrace_pids
, *pos
);
5060 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5065 return seq_list_next(v
, &ftrace_pids
, pos
);
5068 static void fpid_stop(struct seq_file
*m
, void *p
)
5070 mutex_unlock(&ftrace_lock
);
5073 static int fpid_show(struct seq_file
*m
, void *v
)
5075 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
5077 if (v
== (void *)1) {
5078 seq_printf(m
, "no pid\n");
5082 if (fpid
->pid
== ftrace_swapper_pid
)
5083 seq_printf(m
, "swapper tasks\n");
5085 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
5090 static const struct seq_operations ftrace_pid_sops
= {
5091 .start
= fpid_start
,
5098 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
5102 if ((file
->f_mode
& FMODE_WRITE
) &&
5103 (file
->f_flags
& O_TRUNC
))
5106 if (file
->f_mode
& FMODE_READ
)
5107 ret
= seq_open(file
, &ftrace_pid_sops
);
5113 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
5114 size_t cnt
, loff_t
*ppos
)
5120 if (cnt
>= sizeof(buf
))
5123 if (copy_from_user(&buf
, ubuf
, cnt
))
5129 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5130 * to clean the filter quietly.
5132 tmp
= strstrip(buf
);
5133 if (strlen(tmp
) == 0)
5136 ret
= kstrtol(tmp
, 10, &val
);
5140 ret
= ftrace_pid_add(val
);
5142 return ret
? ret
: cnt
;
5146 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
5148 if (file
->f_mode
& FMODE_READ
)
5149 seq_release(inode
, file
);
5154 static const struct file_operations ftrace_pid_fops
= {
5155 .open
= ftrace_pid_open
,
5156 .write
= ftrace_pid_write
,
5158 .llseek
= tracing_lseek
,
5159 .release
= ftrace_pid_release
,
5162 static __init
int ftrace_init_debugfs(void)
5164 struct dentry
*d_tracer
;
5166 d_tracer
= tracing_init_dentry();
5170 ftrace_init_dyn_debugfs(d_tracer
);
5172 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
5173 NULL
, &ftrace_pid_fops
);
5175 ftrace_profile_debugfs(d_tracer
);
5179 fs_initcall(ftrace_init_debugfs
);
5182 * ftrace_kill - kill ftrace
5184 * This function should be used by panic code. It stops ftrace
5185 * but in a not so nice way. If you need to simply kill ftrace
5186 * from a non-atomic section, use ftrace_kill.
5188 void ftrace_kill(void)
5190 ftrace_disabled
= 1;
5192 clear_ftrace_function();
5196 * Test if ftrace is dead or not.
5198 int ftrace_is_dead(void)
5200 return ftrace_disabled
;
5204 * register_ftrace_function - register a function for profiling
5205 * @ops - ops structure that holds the function for profiling.
5207 * Register a function to be called by all functions in the
5210 * Note: @ops->func and all the functions it calls must be labeled
5211 * with "notrace", otherwise it will go into a
5214 int register_ftrace_function(struct ftrace_ops
*ops
)
5218 ftrace_ops_init(ops
);
5220 mutex_lock(&ftrace_lock
);
5222 ret
= ftrace_startup(ops
, 0);
5224 mutex_unlock(&ftrace_lock
);
5228 EXPORT_SYMBOL_GPL(register_ftrace_function
);
5231 * unregister_ftrace_function - unregister a function for profiling.
5232 * @ops - ops structure that holds the function to unregister
5234 * Unregister a function that was added to be called by ftrace profiling.
5236 int unregister_ftrace_function(struct ftrace_ops
*ops
)
5240 mutex_lock(&ftrace_lock
);
5241 ret
= ftrace_shutdown(ops
, 0);
5242 mutex_unlock(&ftrace_lock
);
5246 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
5249 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
5250 void __user
*buffer
, size_t *lenp
,
5255 mutex_lock(&ftrace_lock
);
5257 if (unlikely(ftrace_disabled
))
5260 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
5262 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
5265 last_ftrace_enabled
= !!ftrace_enabled
;
5267 if (ftrace_enabled
) {
5269 ftrace_startup_sysctl();
5271 /* we are starting ftrace again */
5272 if (ftrace_ops_list
!= &ftrace_list_end
)
5273 update_ftrace_function();
5276 /* stopping ftrace calls (just send to ftrace_stub) */
5277 ftrace_trace_function
= ftrace_stub
;
5279 ftrace_shutdown_sysctl();
5283 mutex_unlock(&ftrace_lock
);
5287 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5289 static struct ftrace_ops graph_ops
= {
5290 .func
= ftrace_stub
,
5291 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5292 FTRACE_OPS_FL_INITIALIZED
|
5294 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5295 .trampoline
= FTRACE_GRAPH_TRAMP_ADDR
,
5297 ASSIGN_OPS_HASH(graph_ops
, &global_ops
.local_hash
)
5300 static int ftrace_graph_active
;
5302 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
5307 /* The callbacks that hook a function */
5308 trace_func_graph_ret_t ftrace_graph_return
=
5309 (trace_func_graph_ret_t
)ftrace_stub
;
5310 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
5311 static trace_func_graph_ent_t __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5313 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5314 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
5318 unsigned long flags
;
5319 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
5320 struct task_struct
*g
, *t
;
5322 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
5323 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
5324 * sizeof(struct ftrace_ret_stack
),
5326 if (!ret_stack_list
[i
]) {
5334 read_lock_irqsave(&tasklist_lock
, flags
);
5335 do_each_thread(g
, t
) {
5341 if (t
->ret_stack
== NULL
) {
5342 atomic_set(&t
->tracing_graph_pause
, 0);
5343 atomic_set(&t
->trace_overrun
, 0);
5344 t
->curr_ret_stack
= -1;
5345 /* Make sure the tasks see the -1 first: */
5347 t
->ret_stack
= ret_stack_list
[start
++];
5349 } while_each_thread(g
, t
);
5352 read_unlock_irqrestore(&tasklist_lock
, flags
);
5354 for (i
= start
; i
< end
; i
++)
5355 kfree(ret_stack_list
[i
]);
5360 ftrace_graph_probe_sched_switch(void *ignore
,
5361 struct task_struct
*prev
, struct task_struct
*next
)
5363 unsigned long long timestamp
;
5367 * Does the user want to count the time a function was asleep.
5368 * If so, do not update the time stamps.
5370 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
5373 timestamp
= trace_clock_local();
5375 prev
->ftrace_timestamp
= timestamp
;
5377 /* only process tasks that we timestamped */
5378 if (!next
->ftrace_timestamp
)
5382 * Update all the counters in next to make up for the
5383 * time next was sleeping.
5385 timestamp
-= next
->ftrace_timestamp
;
5387 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
5388 next
->ret_stack
[index
].calltime
+= timestamp
;
5391 /* Allocate a return stack for each task */
5392 static int start_graph_tracing(void)
5394 struct ftrace_ret_stack
**ret_stack_list
;
5397 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
5398 sizeof(struct ftrace_ret_stack
*),
5401 if (!ret_stack_list
)
5404 /* The cpu_boot init_task->ret_stack will never be freed */
5405 for_each_online_cpu(cpu
) {
5406 if (!idle_task(cpu
)->ret_stack
)
5407 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
5411 ret
= alloc_retstack_tasklist(ret_stack_list
);
5412 } while (ret
== -EAGAIN
);
5415 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5417 pr_info("ftrace_graph: Couldn't activate tracepoint"
5418 " probe to kernel_sched_switch\n");
5421 kfree(ret_stack_list
);
5426 * Hibernation protection.
5427 * The state of the current task is too much unstable during
5428 * suspend/restore to disk. We want to protect against that.
5431 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
5435 case PM_HIBERNATION_PREPARE
:
5436 pause_graph_tracing();
5439 case PM_POST_HIBERNATION
:
5440 unpause_graph_tracing();
5446 static int ftrace_graph_entry_test(struct ftrace_graph_ent
*trace
)
5448 if (!ftrace_ops_test(&global_ops
, trace
->func
, NULL
))
5450 return __ftrace_graph_entry(trace
);
5454 * The function graph tracer should only trace the functions defined
5455 * by set_ftrace_filter and set_ftrace_notrace. If another function
5456 * tracer ops is registered, the graph tracer requires testing the
5457 * function against the global ops, and not just trace any function
5458 * that any ftrace_ops registered.
5460 static void update_function_graph_func(void)
5462 struct ftrace_ops
*op
;
5463 bool do_test
= false;
5466 * The graph and global ops share the same set of functions
5467 * to test. If any other ops is on the list, then
5468 * the graph tracing needs to test if its the function
5471 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5472 if (op
!= &global_ops
&& op
!= &graph_ops
&&
5473 op
!= &ftrace_list_end
) {
5475 /* in double loop, break out with goto */
5478 } while_for_each_ftrace_op(op
);
5481 ftrace_graph_entry
= ftrace_graph_entry_test
;
5483 ftrace_graph_entry
= __ftrace_graph_entry
;
5486 static struct notifier_block ftrace_suspend_notifier
= {
5487 .notifier_call
= ftrace_suspend_notifier_call
,
5490 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
5491 trace_func_graph_ent_t entryfunc
)
5495 mutex_lock(&ftrace_lock
);
5497 /* we currently allow only one tracer registered at a time */
5498 if (ftrace_graph_active
) {
5503 register_pm_notifier(&ftrace_suspend_notifier
);
5505 ftrace_graph_active
++;
5506 ret
= start_graph_tracing();
5508 ftrace_graph_active
--;
5512 ftrace_graph_return
= retfunc
;
5515 * Update the indirect function to the entryfunc, and the
5516 * function that gets called to the entry_test first. Then
5517 * call the update fgraph entry function to determine if
5518 * the entryfunc should be called directly or not.
5520 __ftrace_graph_entry
= entryfunc
;
5521 ftrace_graph_entry
= ftrace_graph_entry_test
;
5522 update_function_graph_func();
5524 ret
= ftrace_startup(&graph_ops
, FTRACE_START_FUNC_RET
);
5527 mutex_unlock(&ftrace_lock
);
5531 void unregister_ftrace_graph(void)
5533 mutex_lock(&ftrace_lock
);
5535 if (unlikely(!ftrace_graph_active
))
5538 ftrace_graph_active
--;
5539 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
5540 ftrace_graph_entry
= ftrace_graph_entry_stub
;
5541 __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5542 ftrace_shutdown(&graph_ops
, FTRACE_STOP_FUNC_RET
);
5543 unregister_pm_notifier(&ftrace_suspend_notifier
);
5544 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5547 mutex_unlock(&ftrace_lock
);
5550 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
5553 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
5555 atomic_set(&t
->tracing_graph_pause
, 0);
5556 atomic_set(&t
->trace_overrun
, 0);
5557 t
->ftrace_timestamp
= 0;
5558 /* make curr_ret_stack visible before we add the ret_stack */
5560 t
->ret_stack
= ret_stack
;
5564 * Allocate a return stack for the idle task. May be the first
5565 * time through, or it may be done by CPU hotplug online.
5567 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
5569 t
->curr_ret_stack
= -1;
5571 * The idle task has no parent, it either has its own
5572 * stack or no stack at all.
5575 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
5577 if (ftrace_graph_active
) {
5578 struct ftrace_ret_stack
*ret_stack
;
5580 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
5582 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5583 * sizeof(struct ftrace_ret_stack
),
5587 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
5589 graph_init_task(t
, ret_stack
);
5593 /* Allocate a return stack for newly created task */
5594 void ftrace_graph_init_task(struct task_struct
*t
)
5596 /* Make sure we do not use the parent ret_stack */
5597 t
->ret_stack
= NULL
;
5598 t
->curr_ret_stack
= -1;
5600 if (ftrace_graph_active
) {
5601 struct ftrace_ret_stack
*ret_stack
;
5603 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5604 * sizeof(struct ftrace_ret_stack
),
5608 graph_init_task(t
, ret_stack
);
5612 void ftrace_graph_exit_task(struct task_struct
*t
)
5614 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
5616 t
->ret_stack
= NULL
;
5617 /* NULL must become visible to IRQs before we free it: */