2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
33 #include <trace/events/sched.h>
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
38 #include "trace_output.h"
39 #include "trace_stat.h"
41 #define FTRACE_WARN_ON(cond) \
49 #define FTRACE_WARN_ON_ONCE(cond) \
52 if (WARN_ON_ONCE(___r)) \
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly
;
65 static int last_ftrace_enabled
;
67 /* Quick disabling of function tracer. */
68 int function_trace_stop
;
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids
);
73 struct list_head list
;
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
81 static int ftrace_disabled __read_mostly
;
83 static DEFINE_MUTEX(ftrace_lock
);
85 static struct ftrace_ops ftrace_list_end __read_mostly
=
90 static struct ftrace_ops
*ftrace_global_list __read_mostly
= &ftrace_list_end
;
91 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
92 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
93 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
94 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
95 static struct ftrace_ops global_ops
;
98 ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
);
101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
102 * can use rcu_dereference_raw() is that elements removed from this list
103 * are simply leaked, so there is no need to interact with a grace-period
104 * mechanism. The rcu_dereference_raw() calls are needed to handle
105 * concurrent insertions into the ftrace_global_list.
107 * Silly Alpha and silly pointer-speculation compiler optimizations!
109 static void ftrace_global_list_func(unsigned long ip
,
110 unsigned long parent_ip
)
112 struct ftrace_ops
*op
= rcu_dereference_raw(ftrace_global_list
); /*see above*/
114 while (op
!= &ftrace_list_end
) {
115 op
->func(ip
, parent_ip
);
116 op
= rcu_dereference_raw(op
->next
); /*see above*/
120 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
122 if (!test_tsk_trace_trace(current
))
125 ftrace_pid_function(ip
, parent_ip
);
128 static void set_ftrace_pid_function(ftrace_func_t func
)
130 /* do not set ftrace_pid_function to itself! */
131 if (func
!= ftrace_pid_func
)
132 ftrace_pid_function
= func
;
136 * clear_ftrace_function - reset the ftrace function
138 * This NULLs the ftrace function and in essence stops
139 * tracing. There may be lag
141 void clear_ftrace_function(void)
143 ftrace_trace_function
= ftrace_stub
;
144 __ftrace_trace_function
= ftrace_stub
;
145 ftrace_pid_function
= ftrace_stub
;
148 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
150 * For those archs that do not test ftrace_trace_stop in their
151 * mcount call site, we need to do it from C.
153 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
155 if (function_trace_stop
)
158 __ftrace_trace_function(ip
, parent_ip
);
162 static void update_global_ops(void)
167 * If there's only one function registered, then call that
168 * function directly. Otherwise, we need to iterate over the
169 * registered callers.
171 if (ftrace_global_list
== &ftrace_list_end
||
172 ftrace_global_list
->next
== &ftrace_list_end
)
173 func
= ftrace_global_list
->func
;
175 func
= ftrace_global_list_func
;
177 /* If we filter on pids, update to use the pid function */
178 if (!list_empty(&ftrace_pids
)) {
179 set_ftrace_pid_function(func
);
180 func
= ftrace_pid_func
;
183 global_ops
.func
= func
;
186 static void update_ftrace_function(void)
193 * If we are at the end of the list and this ops is
194 * not dynamic, then have the mcount trampoline call
195 * the function directly
197 if (ftrace_ops_list
== &ftrace_list_end
||
198 (ftrace_ops_list
->next
== &ftrace_list_end
&&
199 !(ftrace_ops_list
->flags
& FTRACE_OPS_FL_DYNAMIC
)))
200 func
= ftrace_ops_list
->func
;
202 func
= ftrace_ops_list_func
;
204 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
205 ftrace_trace_function
= func
;
207 __ftrace_trace_function
= func
;
208 ftrace_trace_function
= ftrace_test_stop_func
;
212 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
216 * We are entering ops into the list but another
217 * CPU might be walking that list. We need to make sure
218 * the ops->next pointer is valid before another CPU sees
219 * the ops pointer included into the list.
221 rcu_assign_pointer(*list
, ops
);
224 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
226 struct ftrace_ops
**p
;
229 * If we are removing the last function, then simply point
230 * to the ftrace_stub.
232 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
233 *list
= &ftrace_list_end
;
237 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
248 static int __register_ftrace_function(struct ftrace_ops
*ops
)
253 if (FTRACE_WARN_ON(ops
== &global_ops
))
256 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
259 if (!core_kernel_data((unsigned long)ops
))
260 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
262 if (ops
->flags
& FTRACE_OPS_FL_GLOBAL
) {
263 int first
= ftrace_global_list
== &ftrace_list_end
;
264 add_ftrace_ops(&ftrace_global_list
, ops
);
265 ops
->flags
|= FTRACE_OPS_FL_ENABLED
;
267 add_ftrace_ops(&ftrace_ops_list
, &global_ops
);
269 add_ftrace_ops(&ftrace_ops_list
, ops
);
272 update_ftrace_function();
277 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
284 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
287 if (FTRACE_WARN_ON(ops
== &global_ops
))
290 if (ops
->flags
& FTRACE_OPS_FL_GLOBAL
) {
291 ret
= remove_ftrace_ops(&ftrace_global_list
, ops
);
292 if (!ret
&& ftrace_global_list
== &ftrace_list_end
)
293 ret
= remove_ftrace_ops(&ftrace_ops_list
, &global_ops
);
295 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
297 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
303 update_ftrace_function();
306 * Dynamic ops may be freed, we must make sure that all
307 * callers are done before leaving this function.
309 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
315 static void ftrace_update_pid_func(void)
317 /* Only do something if we are tracing something */
318 if (ftrace_trace_function
== ftrace_stub
)
321 update_ftrace_function();
324 #ifdef CONFIG_FUNCTION_PROFILER
325 struct ftrace_profile
{
326 struct hlist_node node
;
328 unsigned long counter
;
329 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
330 unsigned long long time
;
331 unsigned long long time_squared
;
335 struct ftrace_profile_page
{
336 struct ftrace_profile_page
*next
;
338 struct ftrace_profile records
[];
341 struct ftrace_profile_stat
{
343 struct hlist_head
*hash
;
344 struct ftrace_profile_page
*pages
;
345 struct ftrace_profile_page
*start
;
346 struct tracer_stat stat
;
349 #define PROFILE_RECORDS_SIZE \
350 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
352 #define PROFILES_PER_PAGE \
353 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
355 static int ftrace_profile_bits __read_mostly
;
356 static int ftrace_profile_enabled __read_mostly
;
358 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
359 static DEFINE_MUTEX(ftrace_profile_lock
);
361 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
363 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
366 function_stat_next(void *v
, int idx
)
368 struct ftrace_profile
*rec
= v
;
369 struct ftrace_profile_page
*pg
;
371 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
377 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
381 rec
= &pg
->records
[0];
389 static void *function_stat_start(struct tracer_stat
*trace
)
391 struct ftrace_profile_stat
*stat
=
392 container_of(trace
, struct ftrace_profile_stat
, stat
);
394 if (!stat
|| !stat
->start
)
397 return function_stat_next(&stat
->start
->records
[0], 0);
400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
401 /* function graph compares on total time */
402 static int function_stat_cmp(void *p1
, void *p2
)
404 struct ftrace_profile
*a
= p1
;
405 struct ftrace_profile
*b
= p2
;
407 if (a
->time
< b
->time
)
409 if (a
->time
> b
->time
)
415 /* not function graph compares against hits */
416 static int function_stat_cmp(void *p1
, void *p2
)
418 struct ftrace_profile
*a
= p1
;
419 struct ftrace_profile
*b
= p2
;
421 if (a
->counter
< b
->counter
)
423 if (a
->counter
> b
->counter
)
430 static int function_stat_headers(struct seq_file
*m
)
432 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
433 seq_printf(m
, " Function "
436 "--- ---- --- ---\n");
438 seq_printf(m
, " Function Hit\n"
444 static int function_stat_show(struct seq_file
*m
, void *v
)
446 struct ftrace_profile
*rec
= v
;
447 char str
[KSYM_SYMBOL_LEN
];
449 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
450 static struct trace_seq s
;
451 unsigned long long avg
;
452 unsigned long long stddev
;
454 mutex_lock(&ftrace_profile_lock
);
456 /* we raced with function_profile_reset() */
457 if (unlikely(rec
->counter
== 0)) {
462 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
463 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
468 do_div(avg
, rec
->counter
);
470 /* Sample standard deviation (s^2) */
471 if (rec
->counter
<= 1)
474 stddev
= rec
->time_squared
- rec
->counter
* avg
* avg
;
476 * Divide only 1000 for ns^2 -> us^2 conversion.
477 * trace_print_graph_duration will divide 1000 again.
479 do_div(stddev
, (rec
->counter
- 1) * 1000);
483 trace_print_graph_duration(rec
->time
, &s
);
484 trace_seq_puts(&s
, " ");
485 trace_print_graph_duration(avg
, &s
);
486 trace_seq_puts(&s
, " ");
487 trace_print_graph_duration(stddev
, &s
);
488 trace_print_seq(m
, &s
);
492 mutex_unlock(&ftrace_profile_lock
);
497 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
499 struct ftrace_profile_page
*pg
;
501 pg
= stat
->pages
= stat
->start
;
504 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
509 memset(stat
->hash
, 0,
510 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
513 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
515 struct ftrace_profile_page
*pg
;
520 /* If we already allocated, do nothing */
524 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
528 #ifdef CONFIG_DYNAMIC_FTRACE
529 functions
= ftrace_update_tot_cnt
;
532 * We do not know the number of functions that exist because
533 * dynamic tracing is what counts them. With past experience
534 * we have around 20K functions. That should be more than enough.
535 * It is highly unlikely we will execute every function in
541 pg
= stat
->start
= stat
->pages
;
543 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
545 for (i
= 0; i
< pages
; i
++) {
546 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
557 unsigned long tmp
= (unsigned long)pg
;
563 free_page((unsigned long)stat
->pages
);
570 static int ftrace_profile_init_cpu(int cpu
)
572 struct ftrace_profile_stat
*stat
;
575 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
578 /* If the profile is already created, simply reset it */
579 ftrace_profile_reset(stat
);
584 * We are profiling all functions, but usually only a few thousand
585 * functions are hit. We'll make a hash of 1024 items.
587 size
= FTRACE_PROFILE_HASH_SIZE
;
589 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
594 if (!ftrace_profile_bits
) {
597 for (; size
; size
>>= 1)
598 ftrace_profile_bits
++;
601 /* Preallocate the function profiling pages */
602 if (ftrace_profile_pages_init(stat
) < 0) {
611 static int ftrace_profile_init(void)
616 for_each_online_cpu(cpu
) {
617 ret
= ftrace_profile_init_cpu(cpu
);
625 /* interrupts must be disabled */
626 static struct ftrace_profile
*
627 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
629 struct ftrace_profile
*rec
;
630 struct hlist_head
*hhd
;
631 struct hlist_node
*n
;
634 key
= hash_long(ip
, ftrace_profile_bits
);
635 hhd
= &stat
->hash
[key
];
637 if (hlist_empty(hhd
))
640 hlist_for_each_entry_rcu(rec
, n
, hhd
, node
) {
648 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
649 struct ftrace_profile
*rec
)
653 key
= hash_long(rec
->ip
, ftrace_profile_bits
);
654 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
658 * The memory is already allocated, this simply finds a new record to use.
660 static struct ftrace_profile
*
661 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
663 struct ftrace_profile
*rec
= NULL
;
665 /* prevent recursion (from NMIs) */
666 if (atomic_inc_return(&stat
->disabled
) != 1)
670 * Try to find the function again since an NMI
671 * could have added it
673 rec
= ftrace_find_profiled_func(stat
, ip
);
677 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
678 if (!stat
->pages
->next
)
680 stat
->pages
= stat
->pages
->next
;
683 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
685 ftrace_add_profile(stat
, rec
);
688 atomic_dec(&stat
->disabled
);
694 function_profile_call(unsigned long ip
, unsigned long parent_ip
)
696 struct ftrace_profile_stat
*stat
;
697 struct ftrace_profile
*rec
;
700 if (!ftrace_profile_enabled
)
703 local_irq_save(flags
);
705 stat
= &__get_cpu_var(ftrace_profile_stats
);
706 if (!stat
->hash
|| !ftrace_profile_enabled
)
709 rec
= ftrace_find_profiled_func(stat
, ip
);
711 rec
= ftrace_profile_alloc(stat
, ip
);
718 local_irq_restore(flags
);
721 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
722 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
724 function_profile_call(trace
->func
, 0);
728 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
730 struct ftrace_profile_stat
*stat
;
731 unsigned long long calltime
;
732 struct ftrace_profile
*rec
;
735 local_irq_save(flags
);
736 stat
= &__get_cpu_var(ftrace_profile_stats
);
737 if (!stat
->hash
|| !ftrace_profile_enabled
)
740 /* If the calltime was zero'd ignore it */
741 if (!trace
->calltime
)
744 calltime
= trace
->rettime
- trace
->calltime
;
746 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
749 index
= trace
->depth
;
751 /* Append this call time to the parent time to subtract */
753 current
->ret_stack
[index
- 1].subtime
+= calltime
;
755 if (current
->ret_stack
[index
].subtime
< calltime
)
756 calltime
-= current
->ret_stack
[index
].subtime
;
761 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
763 rec
->time
+= calltime
;
764 rec
->time_squared
+= calltime
* calltime
;
768 local_irq_restore(flags
);
771 static int register_ftrace_profiler(void)
773 return register_ftrace_graph(&profile_graph_return
,
774 &profile_graph_entry
);
777 static void unregister_ftrace_profiler(void)
779 unregister_ftrace_graph();
782 static struct ftrace_ops ftrace_profile_ops __read_mostly
=
784 .func
= function_profile_call
,
787 static int register_ftrace_profiler(void)
789 return register_ftrace_function(&ftrace_profile_ops
);
792 static void unregister_ftrace_profiler(void)
794 unregister_ftrace_function(&ftrace_profile_ops
);
796 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
799 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
800 size_t cnt
, loff_t
*ppos
)
803 char buf
[64]; /* big enough to hold a number */
806 if (cnt
>= sizeof(buf
))
809 if (copy_from_user(&buf
, ubuf
, cnt
))
814 ret
= strict_strtoul(buf
, 10, &val
);
820 mutex_lock(&ftrace_profile_lock
);
821 if (ftrace_profile_enabled
^ val
) {
823 ret
= ftrace_profile_init();
829 ret
= register_ftrace_profiler();
834 ftrace_profile_enabled
= 1;
836 ftrace_profile_enabled
= 0;
838 * unregister_ftrace_profiler calls stop_machine
839 * so this acts like an synchronize_sched.
841 unregister_ftrace_profiler();
845 mutex_unlock(&ftrace_profile_lock
);
853 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
854 size_t cnt
, loff_t
*ppos
)
856 char buf
[64]; /* big enough to hold a number */
859 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
860 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
863 static const struct file_operations ftrace_profile_fops
= {
864 .open
= tracing_open_generic
,
865 .read
= ftrace_profile_read
,
866 .write
= ftrace_profile_write
,
867 .llseek
= default_llseek
,
870 /* used to initialize the real stat files */
871 static struct tracer_stat function_stats __initdata
= {
873 .stat_start
= function_stat_start
,
874 .stat_next
= function_stat_next
,
875 .stat_cmp
= function_stat_cmp
,
876 .stat_headers
= function_stat_headers
,
877 .stat_show
= function_stat_show
880 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
882 struct ftrace_profile_stat
*stat
;
883 struct dentry
*entry
;
888 for_each_possible_cpu(cpu
) {
889 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
891 /* allocate enough for function name + cpu number */
892 name
= kmalloc(32, GFP_KERNEL
);
895 * The files created are permanent, if something happens
896 * we still do not free memory.
899 "Could not allocate stat file for cpu %d\n",
903 stat
->stat
= function_stats
;
904 snprintf(name
, 32, "function%d", cpu
);
905 stat
->stat
.name
= name
;
906 ret
= register_stat_tracer(&stat
->stat
);
909 "Could not register function stat for cpu %d\n",
916 entry
= debugfs_create_file("function_profile_enabled", 0644,
917 d_tracer
, NULL
, &ftrace_profile_fops
);
919 pr_warning("Could not create debugfs "
920 "'function_profile_enabled' entry\n");
923 #else /* CONFIG_FUNCTION_PROFILER */
924 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
927 #endif /* CONFIG_FUNCTION_PROFILER */
929 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
931 #ifdef CONFIG_DYNAMIC_FTRACE
933 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
934 # error Dynamic ftrace depends on MCOUNT_RECORD
937 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
939 struct ftrace_func_probe
{
940 struct hlist_node node
;
941 struct ftrace_probe_ops
*ops
;
949 FTRACE_ENABLE_CALLS
= (1 << 0),
950 FTRACE_DISABLE_CALLS
= (1 << 1),
951 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
952 FTRACE_START_FUNC_RET
= (1 << 3),
953 FTRACE_STOP_FUNC_RET
= (1 << 4),
955 struct ftrace_func_entry
{
956 struct hlist_node hlist
;
961 unsigned long size_bits
;
962 struct hlist_head
*buckets
;
968 * We make these constant because no one should touch them,
969 * but they are used as the default "empty hash", to avoid allocating
970 * it all the time. These are in a read only section such that if
971 * anyone does try to modify it, it will cause an exception.
973 static const struct hlist_head empty_buckets
[1];
974 static const struct ftrace_hash empty_hash
= {
975 .buckets
= (struct hlist_head
*)empty_buckets
,
977 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
979 static struct ftrace_ops global_ops
= {
981 .notrace_hash
= EMPTY_HASH
,
982 .filter_hash
= EMPTY_HASH
,
985 static struct dyn_ftrace
*ftrace_new_addrs
;
987 static DEFINE_MUTEX(ftrace_regex_lock
);
990 struct ftrace_page
*next
;
992 struct dyn_ftrace records
[];
995 #define ENTRIES_PER_PAGE \
996 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998 /* estimate from running different kernels */
999 #define NR_TO_INIT 10000
1001 static struct ftrace_page
*ftrace_pages_start
;
1002 static struct ftrace_page
*ftrace_pages
;
1004 static struct dyn_ftrace
*ftrace_free_records
;
1006 static struct ftrace_func_entry
*
1007 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1010 struct ftrace_func_entry
*entry
;
1011 struct hlist_head
*hhd
;
1012 struct hlist_node
*n
;
1017 if (hash
->size_bits
> 0)
1018 key
= hash_long(ip
, hash
->size_bits
);
1022 hhd
= &hash
->buckets
[key
];
1024 hlist_for_each_entry_rcu(entry
, n
, hhd
, hlist
) {
1025 if (entry
->ip
== ip
)
1031 static void __add_hash_entry(struct ftrace_hash
*hash
,
1032 struct ftrace_func_entry
*entry
)
1034 struct hlist_head
*hhd
;
1037 if (hash
->size_bits
)
1038 key
= hash_long(entry
->ip
, hash
->size_bits
);
1042 hhd
= &hash
->buckets
[key
];
1043 hlist_add_head(&entry
->hlist
, hhd
);
1047 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1049 struct ftrace_func_entry
*entry
;
1051 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1056 __add_hash_entry(hash
, entry
);
1062 free_hash_entry(struct ftrace_hash
*hash
,
1063 struct ftrace_func_entry
*entry
)
1065 hlist_del(&entry
->hlist
);
1071 remove_hash_entry(struct ftrace_hash
*hash
,
1072 struct ftrace_func_entry
*entry
)
1074 hlist_del(&entry
->hlist
);
1078 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1080 struct hlist_head
*hhd
;
1081 struct hlist_node
*tp
, *tn
;
1082 struct ftrace_func_entry
*entry
;
1083 int size
= 1 << hash
->size_bits
;
1089 for (i
= 0; i
< size
; i
++) {
1090 hhd
= &hash
->buckets
[i
];
1091 hlist_for_each_entry_safe(entry
, tp
, tn
, hhd
, hlist
)
1092 free_hash_entry(hash
, entry
);
1094 FTRACE_WARN_ON(hash
->count
);
1097 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1099 if (!hash
|| hash
== EMPTY_HASH
)
1101 ftrace_hash_clear(hash
);
1102 kfree(hash
->buckets
);
1106 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1108 struct ftrace_hash
*hash
;
1110 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1111 free_ftrace_hash(hash
);
1114 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1116 if (!hash
|| hash
== EMPTY_HASH
)
1118 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1121 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1123 struct ftrace_hash
*hash
;
1126 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1130 size
= 1 << size_bits
;
1131 hash
->buckets
= kzalloc(sizeof(*hash
->buckets
) * size
, GFP_KERNEL
);
1133 if (!hash
->buckets
) {
1138 hash
->size_bits
= size_bits
;
1143 static struct ftrace_hash
*
1144 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1146 struct ftrace_func_entry
*entry
;
1147 struct ftrace_hash
*new_hash
;
1148 struct hlist_node
*tp
;
1153 new_hash
= alloc_ftrace_hash(size_bits
);
1158 if (!hash
|| !hash
->count
)
1161 size
= 1 << hash
->size_bits
;
1162 for (i
= 0; i
< size
; i
++) {
1163 hlist_for_each_entry(entry
, tp
, &hash
->buckets
[i
], hlist
) {
1164 ret
= add_hash_entry(new_hash
, entry
->ip
);
1170 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1175 free_ftrace_hash(new_hash
);
1180 ftrace_hash_move(struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1182 struct ftrace_func_entry
*entry
;
1183 struct hlist_node
*tp
, *tn
;
1184 struct hlist_head
*hhd
;
1185 struct ftrace_hash
*old_hash
;
1186 struct ftrace_hash
*new_hash
;
1188 int size
= src
->count
;
1193 * If the new source is empty, just free dst and assign it
1197 free_ftrace_hash_rcu(*dst
);
1198 rcu_assign_pointer(*dst
, EMPTY_HASH
);
1203 * Make the hash size about 1/2 the # found
1205 for (size
/= 2; size
; size
>>= 1)
1208 /* Don't allocate too much */
1209 if (bits
> FTRACE_HASH_MAX_BITS
)
1210 bits
= FTRACE_HASH_MAX_BITS
;
1212 new_hash
= alloc_ftrace_hash(bits
);
1216 size
= 1 << src
->size_bits
;
1217 for (i
= 0; i
< size
; i
++) {
1218 hhd
= &src
->buckets
[i
];
1219 hlist_for_each_entry_safe(entry
, tp
, tn
, hhd
, hlist
) {
1221 key
= hash_long(entry
->ip
, bits
);
1224 remove_hash_entry(src
, entry
);
1225 __add_hash_entry(new_hash
, entry
);
1230 rcu_assign_pointer(*dst
, new_hash
);
1231 free_ftrace_hash_rcu(old_hash
);
1237 * Test the hashes for this ops to see if we want to call
1238 * the ops->func or not.
1240 * It's a match if the ip is in the ops->filter_hash or
1241 * the filter_hash does not exist or is empty,
1243 * the ip is not in the ops->notrace_hash.
1245 * This needs to be called with preemption disabled as
1246 * the hashes are freed with call_rcu_sched().
1249 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
)
1251 struct ftrace_hash
*filter_hash
;
1252 struct ftrace_hash
*notrace_hash
;
1255 filter_hash
= rcu_dereference_raw(ops
->filter_hash
);
1256 notrace_hash
= rcu_dereference_raw(ops
->notrace_hash
);
1258 if ((!filter_hash
|| !filter_hash
->count
||
1259 ftrace_lookup_ip(filter_hash
, ip
)) &&
1260 (!notrace_hash
|| !notrace_hash
->count
||
1261 !ftrace_lookup_ip(notrace_hash
, ip
)))
1270 * This is a double for. Do not use 'break' to break out of the loop,
1271 * you must use a goto.
1273 #define do_for_each_ftrace_rec(pg, rec) \
1274 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1276 for (_____i = 0; _____i < pg->index; _____i++) { \
1277 rec = &pg->records[_____i];
1279 #define while_for_each_ftrace_rec() \
1283 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1287 struct ftrace_hash
*hash
;
1288 struct ftrace_hash
*other_hash
;
1289 struct ftrace_page
*pg
;
1290 struct dyn_ftrace
*rec
;
1294 /* Only update if the ops has been registered */
1295 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1299 * In the filter_hash case:
1300 * If the count is zero, we update all records.
1301 * Otherwise we just update the items in the hash.
1303 * In the notrace_hash case:
1304 * We enable the update in the hash.
1305 * As disabling notrace means enabling the tracing,
1306 * and enabling notrace means disabling, the inc variable
1310 hash
= ops
->filter_hash
;
1311 other_hash
= ops
->notrace_hash
;
1312 if (!hash
|| !hash
->count
)
1316 hash
= ops
->notrace_hash
;
1317 other_hash
= ops
->filter_hash
;
1319 * If the notrace hash has no items,
1320 * then there's nothing to do.
1322 if (hash
&& !hash
->count
)
1326 do_for_each_ftrace_rec(pg
, rec
) {
1327 int in_other_hash
= 0;
1333 * Only the filter_hash affects all records.
1334 * Update if the record is not in the notrace hash.
1336 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1339 in_hash
= hash
&& !!ftrace_lookup_ip(hash
, rec
->ip
);
1340 in_other_hash
= other_hash
&& !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1345 if (filter_hash
&& in_hash
&& !in_other_hash
)
1347 else if (!filter_hash
&& in_hash
&&
1348 (in_other_hash
|| !other_hash
->count
))
1356 if (FTRACE_WARN_ON((rec
->flags
& ~FTRACE_FL_MASK
) == FTRACE_REF_MAX
))
1359 if (FTRACE_WARN_ON((rec
->flags
& ~FTRACE_FL_MASK
) == 0))
1364 /* Shortcut, if we handled all records, we are done. */
1365 if (!all
&& count
== hash
->count
)
1367 } while_for_each_ftrace_rec();
1370 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1373 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1376 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1379 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1382 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
1384 rec
->freelist
= ftrace_free_records
;
1385 ftrace_free_records
= rec
;
1386 rec
->flags
|= FTRACE_FL_FREE
;
1389 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
1391 struct dyn_ftrace
*rec
;
1393 /* First check for freed records */
1394 if (ftrace_free_records
) {
1395 rec
= ftrace_free_records
;
1397 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
1398 FTRACE_WARN_ON_ONCE(1);
1399 ftrace_free_records
= NULL
;
1403 ftrace_free_records
= rec
->freelist
;
1404 memset(rec
, 0, sizeof(*rec
));
1408 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
1409 if (!ftrace_pages
->next
) {
1410 /* allocate another page */
1411 ftrace_pages
->next
=
1412 (void *)get_zeroed_page(GFP_KERNEL
);
1413 if (!ftrace_pages
->next
)
1416 ftrace_pages
= ftrace_pages
->next
;
1419 return &ftrace_pages
->records
[ftrace_pages
->index
++];
1422 static struct dyn_ftrace
*
1423 ftrace_record_ip(unsigned long ip
)
1425 struct dyn_ftrace
*rec
;
1427 if (ftrace_disabled
)
1430 rec
= ftrace_alloc_dyn_node(ip
);
1435 rec
->newlist
= ftrace_new_addrs
;
1436 ftrace_new_addrs
= rec
;
1441 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1445 printk(KERN_CONT
"%s", fmt
);
1447 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1448 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1451 static void ftrace_bug(int failed
, unsigned long ip
)
1455 FTRACE_WARN_ON_ONCE(1);
1456 pr_info("ftrace faulted on modifying ");
1460 FTRACE_WARN_ON_ONCE(1);
1461 pr_info("ftrace failed to modify ");
1463 print_ip_ins(" actual: ", (unsigned char *)ip
);
1464 printk(KERN_CONT
"\n");
1467 FTRACE_WARN_ON_ONCE(1);
1468 pr_info("ftrace faulted on writing ");
1472 FTRACE_WARN_ON_ONCE(1);
1473 pr_info("ftrace faulted on unknown error ");
1479 /* Return 1 if the address range is reserved for ftrace */
1480 int ftrace_text_reserved(void *start
, void *end
)
1482 struct dyn_ftrace
*rec
;
1483 struct ftrace_page
*pg
;
1485 do_for_each_ftrace_rec(pg
, rec
) {
1486 if (rec
->ip
<= (unsigned long)end
&&
1487 rec
->ip
+ MCOUNT_INSN_SIZE
> (unsigned long)start
)
1489 } while_for_each_ftrace_rec();
1495 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
1497 unsigned long ftrace_addr
;
1498 unsigned long flag
= 0UL;
1500 ftrace_addr
= (unsigned long)FTRACE_ADDR
;
1503 * If we are enabling tracing:
1505 * If the record has a ref count, then we need to enable it
1506 * because someone is using it.
1508 * Otherwise we make sure its disabled.
1510 * If we are disabling tracing, then disable all records that
1513 if (enable
&& (rec
->flags
& ~FTRACE_FL_MASK
))
1514 flag
= FTRACE_FL_ENABLED
;
1516 /* If the state of this record hasn't changed, then do nothing */
1517 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
1521 rec
->flags
|= FTRACE_FL_ENABLED
;
1522 return ftrace_make_call(rec
, ftrace_addr
);
1525 rec
->flags
&= ~FTRACE_FL_ENABLED
;
1526 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
1529 static void ftrace_replace_code(int enable
)
1531 struct dyn_ftrace
*rec
;
1532 struct ftrace_page
*pg
;
1535 if (unlikely(ftrace_disabled
))
1538 do_for_each_ftrace_rec(pg
, rec
) {
1539 /* Skip over free records */
1540 if (rec
->flags
& FTRACE_FL_FREE
)
1543 failed
= __ftrace_replace_code(rec
, enable
);
1545 ftrace_bug(failed
, rec
->ip
);
1546 /* Stop processing */
1549 } while_for_each_ftrace_rec();
1553 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
1560 if (unlikely(ftrace_disabled
))
1563 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
1565 ftrace_bug(ret
, ip
);
1572 * archs can override this function if they must do something
1573 * before the modifying code is performed.
1575 int __weak
ftrace_arch_code_modify_prepare(void)
1581 * archs can override this function if they must do something
1582 * after the modifying code is performed.
1584 int __weak
ftrace_arch_code_modify_post_process(void)
1589 static int __ftrace_modify_code(void *data
)
1591 int *command
= data
;
1593 if (*command
& FTRACE_ENABLE_CALLS
)
1594 ftrace_replace_code(1);
1595 else if (*command
& FTRACE_DISABLE_CALLS
)
1596 ftrace_replace_code(0);
1598 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
1599 ftrace_update_ftrace_func(ftrace_trace_function
);
1601 if (*command
& FTRACE_START_FUNC_RET
)
1602 ftrace_enable_ftrace_graph_caller();
1603 else if (*command
& FTRACE_STOP_FUNC_RET
)
1604 ftrace_disable_ftrace_graph_caller();
1609 static void ftrace_run_update_code(int command
)
1613 ret
= ftrace_arch_code_modify_prepare();
1614 FTRACE_WARN_ON(ret
);
1618 stop_machine(__ftrace_modify_code
, &command
, NULL
);
1620 ret
= ftrace_arch_code_modify_post_process();
1621 FTRACE_WARN_ON(ret
);
1624 static ftrace_func_t saved_ftrace_func
;
1625 static int ftrace_start_up
;
1626 static int global_start_up
;
1628 static void ftrace_startup_enable(int command
)
1630 if (saved_ftrace_func
!= ftrace_trace_function
) {
1631 saved_ftrace_func
= ftrace_trace_function
;
1632 command
|= FTRACE_UPDATE_TRACE_FUNC
;
1635 if (!command
|| !ftrace_enabled
)
1638 ftrace_run_update_code(command
);
1641 static void ftrace_startup(struct ftrace_ops
*ops
, int command
)
1643 bool hash_enable
= true;
1645 if (unlikely(ftrace_disabled
))
1649 command
|= FTRACE_ENABLE_CALLS
;
1651 /* ops marked global share the filter hashes */
1652 if (ops
->flags
& FTRACE_OPS_FL_GLOBAL
) {
1654 /* Don't update hash if global is already set */
1655 if (global_start_up
)
1656 hash_enable
= false;
1660 ops
->flags
|= FTRACE_OPS_FL_ENABLED
;
1662 ftrace_hash_rec_enable(ops
, 1);
1664 ftrace_startup_enable(command
);
1667 static void ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
1669 bool hash_disable
= true;
1671 if (unlikely(ftrace_disabled
))
1676 * Just warn in case of unbalance, no need to kill ftrace, it's not
1677 * critical but the ftrace_call callers may be never nopped again after
1678 * further ftrace uses.
1680 WARN_ON_ONCE(ftrace_start_up
< 0);
1682 if (ops
->flags
& FTRACE_OPS_FL_GLOBAL
) {
1685 WARN_ON_ONCE(global_start_up
< 0);
1686 /* Don't update hash if global still has users */
1687 if (global_start_up
) {
1688 WARN_ON_ONCE(!ftrace_start_up
);
1689 hash_disable
= false;
1694 ftrace_hash_rec_disable(ops
, 1);
1696 if (ops
!= &global_ops
|| !global_start_up
)
1697 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
1699 if (!ftrace_start_up
)
1700 command
|= FTRACE_DISABLE_CALLS
;
1702 if (saved_ftrace_func
!= ftrace_trace_function
) {
1703 saved_ftrace_func
= ftrace_trace_function
;
1704 command
|= FTRACE_UPDATE_TRACE_FUNC
;
1707 if (!command
|| !ftrace_enabled
)
1710 ftrace_run_update_code(command
);
1713 static void ftrace_startup_sysctl(void)
1715 if (unlikely(ftrace_disabled
))
1718 /* Force update next time */
1719 saved_ftrace_func
= NULL
;
1720 /* ftrace_start_up is true if we want ftrace running */
1721 if (ftrace_start_up
)
1722 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1725 static void ftrace_shutdown_sysctl(void)
1727 if (unlikely(ftrace_disabled
))
1730 /* ftrace_start_up is true if ftrace is running */
1731 if (ftrace_start_up
)
1732 ftrace_run_update_code(FTRACE_DISABLE_CALLS
);
1735 static cycle_t ftrace_update_time
;
1736 static unsigned long ftrace_update_cnt
;
1737 unsigned long ftrace_update_tot_cnt
;
1739 static int ftrace_update_code(struct module
*mod
)
1741 struct dyn_ftrace
*p
;
1742 cycle_t start
, stop
;
1744 start
= ftrace_now(raw_smp_processor_id());
1745 ftrace_update_cnt
= 0;
1747 while (ftrace_new_addrs
) {
1749 /* If something went wrong, bail without enabling anything */
1750 if (unlikely(ftrace_disabled
))
1753 p
= ftrace_new_addrs
;
1754 ftrace_new_addrs
= p
->newlist
;
1758 * Do the initial record conversion from mcount jump
1759 * to the NOP instructions.
1761 if (!ftrace_code_disable(mod
, p
)) {
1767 ftrace_update_cnt
++;
1770 * If the tracing is enabled, go ahead and enable the record.
1772 * The reason not to enable the record immediatelly is the
1773 * inherent check of ftrace_make_nop/ftrace_make_call for
1774 * correct previous instructions. Making first the NOP
1775 * conversion puts the module to the correct state, thus
1776 * passing the ftrace_make_call check.
1778 if (ftrace_start_up
) {
1779 int failed
= __ftrace_replace_code(p
, 1);
1781 ftrace_bug(failed
, p
->ip
);
1787 stop
= ftrace_now(raw_smp_processor_id());
1788 ftrace_update_time
= stop
- start
;
1789 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
1794 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
1796 struct ftrace_page
*pg
;
1800 /* allocate a few pages */
1801 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
1802 if (!ftrace_pages_start
)
1806 * Allocate a few more pages.
1808 * TODO: have some parser search vmlinux before
1809 * final linking to find all calls to ftrace.
1811 * a) know how many pages to allocate.
1813 * b) set up the table then.
1815 * The dynamic code is still necessary for
1819 pg
= ftrace_pages
= ftrace_pages_start
;
1821 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
1822 pr_info("ftrace: allocating %ld entries in %d pages\n",
1823 num_to_init
, cnt
+ 1);
1825 for (i
= 0; i
< cnt
; i
++) {
1826 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
1828 /* If we fail, we'll try later anyway */
1839 FTRACE_ITER_FILTER
= (1 << 0),
1840 FTRACE_ITER_NOTRACE
= (1 << 1),
1841 FTRACE_ITER_PRINTALL
= (1 << 2),
1842 FTRACE_ITER_HASH
= (1 << 3),
1843 FTRACE_ITER_ENABLED
= (1 << 4),
1846 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1848 struct ftrace_iterator
{
1851 struct ftrace_page
*pg
;
1852 struct dyn_ftrace
*func
;
1853 struct ftrace_func_probe
*probe
;
1854 struct trace_parser parser
;
1855 struct ftrace_hash
*hash
;
1856 struct ftrace_ops
*ops
;
1863 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
1865 struct ftrace_iterator
*iter
= m
->private;
1866 struct hlist_node
*hnd
= NULL
;
1867 struct hlist_head
*hhd
;
1873 hnd
= &iter
->probe
->node
;
1875 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
1878 hhd
= &ftrace_func_hash
[iter
->hidx
];
1880 if (hlist_empty(hhd
)) {
1896 if (WARN_ON_ONCE(!hnd
))
1899 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
1904 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
1906 struct ftrace_iterator
*iter
= m
->private;
1910 if (iter
->func_pos
> *pos
)
1914 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
1915 p
= t_hash_next(m
, &l
);
1922 /* Only set this if we have an item */
1923 iter
->flags
|= FTRACE_ITER_HASH
;
1929 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
1931 struct ftrace_func_probe
*rec
;
1934 if (WARN_ON_ONCE(!rec
))
1937 if (rec
->ops
->print
)
1938 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
1940 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
1943 seq_printf(m
, ":%p", rec
->data
);
1950 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1952 struct ftrace_iterator
*iter
= m
->private;
1953 struct ftrace_ops
*ops
= &global_ops
;
1954 struct dyn_ftrace
*rec
= NULL
;
1956 if (unlikely(ftrace_disabled
))
1959 if (iter
->flags
& FTRACE_ITER_HASH
)
1960 return t_hash_next(m
, pos
);
1963 iter
->pos
= iter
->func_pos
= *pos
;
1965 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
1966 return t_hash_start(m
, pos
);
1969 if (iter
->idx
>= iter
->pg
->index
) {
1970 if (iter
->pg
->next
) {
1971 iter
->pg
= iter
->pg
->next
;
1976 rec
= &iter
->pg
->records
[iter
->idx
++];
1977 if ((rec
->flags
& FTRACE_FL_FREE
) ||
1979 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
1980 !(ftrace_lookup_ip(ops
->filter_hash
, rec
->ip
))) ||
1982 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
1983 !ftrace_lookup_ip(ops
->notrace_hash
, rec
->ip
)) ||
1985 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
1986 !(rec
->flags
& ~FTRACE_FL_MASK
))) {
1994 return t_hash_start(m
, pos
);
2001 static void reset_iter_read(struct ftrace_iterator
*iter
)
2005 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
& FTRACE_ITER_HASH
);
2008 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
2010 struct ftrace_iterator
*iter
= m
->private;
2011 struct ftrace_ops
*ops
= &global_ops
;
2015 mutex_lock(&ftrace_lock
);
2017 if (unlikely(ftrace_disabled
))
2021 * If an lseek was done, then reset and start from beginning.
2023 if (*pos
< iter
->pos
)
2024 reset_iter_read(iter
);
2027 * For set_ftrace_filter reading, if we have the filter
2028 * off, we can short cut and just print out that all
2029 * functions are enabled.
2031 if (iter
->flags
& FTRACE_ITER_FILTER
&& !ops
->filter_hash
->count
) {
2033 return t_hash_start(m
, pos
);
2034 iter
->flags
|= FTRACE_ITER_PRINTALL
;
2035 /* reset in case of seek/pread */
2036 iter
->flags
&= ~FTRACE_ITER_HASH
;
2040 if (iter
->flags
& FTRACE_ITER_HASH
)
2041 return t_hash_start(m
, pos
);
2044 * Unfortunately, we need to restart at ftrace_pages_start
2045 * every time we let go of the ftrace_mutex. This is because
2046 * those pointers can change without the lock.
2048 iter
->pg
= ftrace_pages_start
;
2050 for (l
= 0; l
<= *pos
; ) {
2051 p
= t_next(m
, p
, &l
);
2057 if (iter
->flags
& FTRACE_ITER_FILTER
)
2058 return t_hash_start(m
, pos
);
2066 static void t_stop(struct seq_file
*m
, void *p
)
2068 mutex_unlock(&ftrace_lock
);
2071 static int t_show(struct seq_file
*m
, void *v
)
2073 struct ftrace_iterator
*iter
= m
->private;
2074 struct dyn_ftrace
*rec
;
2076 if (iter
->flags
& FTRACE_ITER_HASH
)
2077 return t_hash_show(m
, iter
);
2079 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
2080 seq_printf(m
, "#### all functions enabled ####\n");
2089 seq_printf(m
, "%ps", (void *)rec
->ip
);
2090 if (iter
->flags
& FTRACE_ITER_ENABLED
)
2091 seq_printf(m
, " (%ld)",
2092 rec
->flags
& ~FTRACE_FL_MASK
);
2093 seq_printf(m
, "\n");
2098 static const struct seq_operations show_ftrace_seq_ops
= {
2106 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
2108 struct ftrace_iterator
*iter
;
2111 if (unlikely(ftrace_disabled
))
2114 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
2118 iter
->pg
= ftrace_pages_start
;
2120 ret
= seq_open(file
, &show_ftrace_seq_ops
);
2122 struct seq_file
*m
= file
->private_data
;
2133 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
2135 struct ftrace_iterator
*iter
;
2138 if (unlikely(ftrace_disabled
))
2141 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
2145 iter
->pg
= ftrace_pages_start
;
2146 iter
->flags
= FTRACE_ITER_ENABLED
;
2148 ret
= seq_open(file
, &show_ftrace_seq_ops
);
2150 struct seq_file
*m
= file
->private_data
;
2160 static void ftrace_filter_reset(struct ftrace_hash
*hash
)
2162 mutex_lock(&ftrace_lock
);
2163 ftrace_hash_clear(hash
);
2164 mutex_unlock(&ftrace_lock
);
2168 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
2169 struct inode
*inode
, struct file
*file
)
2171 struct ftrace_iterator
*iter
;
2172 struct ftrace_hash
*hash
;
2175 if (unlikely(ftrace_disabled
))
2178 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
2182 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
2187 if (flag
& FTRACE_ITER_NOTRACE
)
2188 hash
= ops
->notrace_hash
;
2190 hash
= ops
->filter_hash
;
2195 if (file
->f_mode
& FMODE_WRITE
) {
2196 mutex_lock(&ftrace_lock
);
2197 iter
->hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, hash
);
2198 mutex_unlock(&ftrace_lock
);
2201 trace_parser_put(&iter
->parser
);
2207 mutex_lock(&ftrace_regex_lock
);
2209 if ((file
->f_mode
& FMODE_WRITE
) &&
2210 (file
->f_flags
& O_TRUNC
))
2211 ftrace_filter_reset(iter
->hash
);
2213 if (file
->f_mode
& FMODE_READ
) {
2214 iter
->pg
= ftrace_pages_start
;
2216 ret
= seq_open(file
, &show_ftrace_seq_ops
);
2218 struct seq_file
*m
= file
->private_data
;
2222 free_ftrace_hash(iter
->hash
);
2223 trace_parser_put(&iter
->parser
);
2227 file
->private_data
= iter
;
2228 mutex_unlock(&ftrace_regex_lock
);
2234 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
2236 return ftrace_regex_open(&global_ops
, FTRACE_ITER_FILTER
,
2241 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
2243 return ftrace_regex_open(&global_ops
, FTRACE_ITER_NOTRACE
,
2248 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
2252 if (file
->f_mode
& FMODE_READ
)
2253 ret
= seq_lseek(file
, offset
, origin
);
2255 file
->f_pos
= ret
= 1;
2260 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
2267 if (strcmp(str
, regex
) == 0)
2270 case MATCH_FRONT_ONLY
:
2271 if (strncmp(str
, regex
, len
) == 0)
2274 case MATCH_MIDDLE_ONLY
:
2275 if (strstr(str
, regex
))
2278 case MATCH_END_ONLY
:
2280 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
2289 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
2291 struct ftrace_func_entry
*entry
;
2294 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
2296 /* Do nothing if it doesn't exist */
2300 free_hash_entry(hash
, entry
);
2302 /* Do nothing if it exists */
2306 ret
= add_hash_entry(hash
, rec
->ip
);
2312 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
2313 char *regex
, int len
, int type
)
2315 char str
[KSYM_SYMBOL_LEN
];
2318 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
2321 /* module lookup requires matching the module */
2322 if (!modname
|| strcmp(modname
, mod
))
2325 /* blank search means to match all funcs in the mod */
2330 return ftrace_match(str
, regex
, len
, type
);
2334 match_records(struct ftrace_hash
*hash
, char *buff
,
2335 int len
, char *mod
, int not)
2337 unsigned search_len
= 0;
2338 struct ftrace_page
*pg
;
2339 struct dyn_ftrace
*rec
;
2340 int type
= MATCH_FULL
;
2341 char *search
= buff
;
2346 type
= filter_parse_regex(buff
, len
, &search
, ¬);
2347 search_len
= strlen(search
);
2350 mutex_lock(&ftrace_lock
);
2352 if (unlikely(ftrace_disabled
))
2355 do_for_each_ftrace_rec(pg
, rec
) {
2357 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
2358 ret
= enter_record(hash
, rec
, not);
2365 } while_for_each_ftrace_rec();
2367 mutex_unlock(&ftrace_lock
);
2373 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
2375 return match_records(hash
, buff
, len
, NULL
, 0);
2379 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
2383 /* blank or '*' mean the same */
2384 if (strcmp(buff
, "*") == 0)
2387 /* handle the case of 'dont filter this module' */
2388 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
2393 return match_records(hash
, buff
, strlen(buff
), mod
, not);
2397 * We register the module command as a template to show others how
2398 * to register the a command as well.
2402 ftrace_mod_callback(char *func
, char *cmd
, char *param
, int enable
)
2404 struct ftrace_ops
*ops
= &global_ops
;
2405 struct ftrace_hash
*hash
;
2410 * cmd == 'mod' because we only registered this func
2411 * for the 'mod' ftrace_func_command.
2412 * But if you register one func with multiple commands,
2413 * you can tell which command was used by the cmd
2417 /* we must have a module name */
2421 mod
= strsep(¶m
, ":");
2426 hash
= ops
->filter_hash
;
2428 hash
= ops
->notrace_hash
;
2430 ret
= ftrace_match_module_records(hash
, func
, mod
);
2439 static struct ftrace_func_command ftrace_mod_cmd
= {
2441 .func
= ftrace_mod_callback
,
2444 static int __init
ftrace_mod_cmd_init(void)
2446 return register_ftrace_command(&ftrace_mod_cmd
);
2448 device_initcall(ftrace_mod_cmd_init
);
2451 function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
)
2453 struct ftrace_func_probe
*entry
;
2454 struct hlist_head
*hhd
;
2455 struct hlist_node
*n
;
2458 key
= hash_long(ip
, FTRACE_HASH_BITS
);
2460 hhd
= &ftrace_func_hash
[key
];
2462 if (hlist_empty(hhd
))
2466 * Disable preemption for these calls to prevent a RCU grace
2467 * period. This syncs the hash iteration and freeing of items
2468 * on the hash. rcu_read_lock is too dangerous here.
2470 preempt_disable_notrace();
2471 hlist_for_each_entry_rcu(entry
, n
, hhd
, node
) {
2472 if (entry
->ip
== ip
)
2473 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
2475 preempt_enable_notrace();
2478 static struct ftrace_ops trace_probe_ops __read_mostly
=
2480 .func
= function_trace_probe_call
,
2483 static int ftrace_probe_registered
;
2485 static void __enable_ftrace_function_probe(void)
2490 if (ftrace_probe_registered
)
2493 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
2494 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
2498 /* Nothing registered? */
2499 if (i
== FTRACE_FUNC_HASHSIZE
)
2502 ret
= __register_ftrace_function(&trace_probe_ops
);
2504 ftrace_startup(&trace_probe_ops
, 0);
2506 ftrace_probe_registered
= 1;
2509 static void __disable_ftrace_function_probe(void)
2514 if (!ftrace_probe_registered
)
2517 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
2518 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
2523 /* no more funcs left */
2524 ret
= __unregister_ftrace_function(&trace_probe_ops
);
2526 ftrace_shutdown(&trace_probe_ops
, 0);
2528 ftrace_probe_registered
= 0;
2532 static void ftrace_free_entry_rcu(struct rcu_head
*rhp
)
2534 struct ftrace_func_probe
*entry
=
2535 container_of(rhp
, struct ftrace_func_probe
, rcu
);
2537 if (entry
->ops
->free
)
2538 entry
->ops
->free(&entry
->data
);
2544 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
2547 struct ftrace_func_probe
*entry
;
2548 struct ftrace_page
*pg
;
2549 struct dyn_ftrace
*rec
;
2555 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
2556 len
= strlen(search
);
2558 /* we do not support '!' for function probes */
2562 mutex_lock(&ftrace_lock
);
2564 if (unlikely(ftrace_disabled
))
2567 do_for_each_ftrace_rec(pg
, rec
) {
2569 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
2572 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
2574 /* If we did not process any, then return error */
2585 * The caller might want to do something special
2586 * for each function we find. We call the callback
2587 * to give the caller an opportunity to do so.
2589 if (ops
->callback
) {
2590 if (ops
->callback(rec
->ip
, &entry
->data
) < 0) {
2591 /* caller does not like this func */
2598 entry
->ip
= rec
->ip
;
2600 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
2601 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
2603 } while_for_each_ftrace_rec();
2604 __enable_ftrace_function_probe();
2607 mutex_unlock(&ftrace_lock
);
2613 PROBE_TEST_FUNC
= 1,
2618 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
2619 void *data
, int flags
)
2621 struct ftrace_func_probe
*entry
;
2622 struct hlist_node
*n
, *tmp
;
2623 char str
[KSYM_SYMBOL_LEN
];
2624 int type
= MATCH_FULL
;
2628 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
2633 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
2634 len
= strlen(search
);
2636 /* we do not support '!' for function probes */
2641 mutex_lock(&ftrace_lock
);
2642 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
2643 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
2645 hlist_for_each_entry_safe(entry
, n
, tmp
, hhd
, node
) {
2647 /* break up if statements for readability */
2648 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
2651 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
2654 /* do this last, since it is the most expensive */
2656 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
2658 if (!ftrace_match(str
, glob
, len
, type
))
2662 hlist_del(&entry
->node
);
2663 call_rcu(&entry
->rcu
, ftrace_free_entry_rcu
);
2666 __disable_ftrace_function_probe();
2667 mutex_unlock(&ftrace_lock
);
2671 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
2674 __unregister_ftrace_function_probe(glob
, ops
, data
,
2675 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
2679 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
2681 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
2684 void unregister_ftrace_function_probe_all(char *glob
)
2686 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
2689 static LIST_HEAD(ftrace_commands
);
2690 static DEFINE_MUTEX(ftrace_cmd_mutex
);
2692 int register_ftrace_command(struct ftrace_func_command
*cmd
)
2694 struct ftrace_func_command
*p
;
2697 mutex_lock(&ftrace_cmd_mutex
);
2698 list_for_each_entry(p
, &ftrace_commands
, list
) {
2699 if (strcmp(cmd
->name
, p
->name
) == 0) {
2704 list_add(&cmd
->list
, &ftrace_commands
);
2706 mutex_unlock(&ftrace_cmd_mutex
);
2711 int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
2713 struct ftrace_func_command
*p
, *n
;
2716 mutex_lock(&ftrace_cmd_mutex
);
2717 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
2718 if (strcmp(cmd
->name
, p
->name
) == 0) {
2720 list_del_init(&p
->list
);
2725 mutex_unlock(&ftrace_cmd_mutex
);
2730 static int ftrace_process_regex(struct ftrace_hash
*hash
,
2731 char *buff
, int len
, int enable
)
2733 char *func
, *command
, *next
= buff
;
2734 struct ftrace_func_command
*p
;
2737 func
= strsep(&next
, ":");
2740 ret
= ftrace_match_records(hash
, func
, len
);
2750 command
= strsep(&next
, ":");
2752 mutex_lock(&ftrace_cmd_mutex
);
2753 list_for_each_entry(p
, &ftrace_commands
, list
) {
2754 if (strcmp(p
->name
, command
) == 0) {
2755 ret
= p
->func(func
, command
, next
, enable
);
2760 mutex_unlock(&ftrace_cmd_mutex
);
2766 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
2767 size_t cnt
, loff_t
*ppos
, int enable
)
2769 struct ftrace_iterator
*iter
;
2770 struct trace_parser
*parser
;
2776 mutex_lock(&ftrace_regex_lock
);
2779 if (unlikely(ftrace_disabled
))
2782 if (file
->f_mode
& FMODE_READ
) {
2783 struct seq_file
*m
= file
->private_data
;
2786 iter
= file
->private_data
;
2788 parser
= &iter
->parser
;
2789 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
2791 if (read
>= 0 && trace_parser_loaded(parser
) &&
2792 !trace_parser_cont(parser
)) {
2793 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
2794 parser
->idx
, enable
);
2795 trace_parser_clear(parser
);
2802 mutex_unlock(&ftrace_regex_lock
);
2808 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
2809 size_t cnt
, loff_t
*ppos
)
2811 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
2815 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
2816 size_t cnt
, loff_t
*ppos
)
2818 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
2822 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
2823 int reset
, int enable
)
2825 struct ftrace_hash
**orig_hash
;
2826 struct ftrace_hash
*hash
;
2829 /* All global ops uses the global ops filters */
2830 if (ops
->flags
& FTRACE_OPS_FL_GLOBAL
)
2833 if (unlikely(ftrace_disabled
))
2837 orig_hash
= &ops
->filter_hash
;
2839 orig_hash
= &ops
->notrace_hash
;
2841 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
2845 mutex_lock(&ftrace_regex_lock
);
2847 ftrace_filter_reset(hash
);
2849 ftrace_match_records(hash
, buf
, len
);
2851 mutex_lock(&ftrace_lock
);
2852 ret
= ftrace_hash_move(orig_hash
, hash
);
2853 mutex_unlock(&ftrace_lock
);
2855 mutex_unlock(&ftrace_regex_lock
);
2857 free_ftrace_hash(hash
);
2862 * ftrace_set_filter - set a function to filter on in ftrace
2863 * @ops - the ops to set the filter with
2864 * @buf - the string that holds the function filter text.
2865 * @len - the length of the string.
2866 * @reset - non zero to reset all filters before applying this filter.
2868 * Filters denote which functions should be enabled when tracing is enabled.
2869 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2871 void ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
2874 ftrace_set_regex(ops
, buf
, len
, reset
, 1);
2876 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
2879 * ftrace_set_notrace - set a function to not trace in ftrace
2880 * @ops - the ops to set the notrace filter with
2881 * @buf - the string that holds the function notrace text.
2882 * @len - the length of the string.
2883 * @reset - non zero to reset all filters before applying this filter.
2885 * Notrace Filters denote which functions should not be enabled when tracing
2886 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2889 void ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
2892 ftrace_set_regex(ops
, buf
, len
, reset
, 0);
2894 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
2896 * ftrace_set_filter - set a function to filter on in ftrace
2897 * @ops - the ops to set the filter with
2898 * @buf - the string that holds the function filter text.
2899 * @len - the length of the string.
2900 * @reset - non zero to reset all filters before applying this filter.
2902 * Filters denote which functions should be enabled when tracing is enabled.
2903 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2905 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
2907 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
2909 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
2912 * ftrace_set_notrace - set a function to not trace in ftrace
2913 * @ops - the ops to set the notrace filter with
2914 * @buf - the string that holds the function notrace text.
2915 * @len - the length of the string.
2916 * @reset - non zero to reset all filters before applying this filter.
2918 * Notrace Filters denote which functions should not be enabled when tracing
2919 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2922 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
2924 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
2926 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
2929 * command line interface to allow users to set filters on boot up.
2931 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2932 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
2933 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
2935 static int __init
set_ftrace_notrace(char *str
)
2937 strncpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
2940 __setup("ftrace_notrace=", set_ftrace_notrace
);
2942 static int __init
set_ftrace_filter(char *str
)
2944 strncpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
2947 __setup("ftrace_filter=", set_ftrace_filter
);
2949 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2950 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
2951 static int ftrace_set_func(unsigned long *array
, int *idx
, char *buffer
);
2953 static int __init
set_graph_function(char *str
)
2955 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
2958 __setup("ftrace_graph_filter=", set_graph_function
);
2960 static void __init
set_ftrace_early_graph(char *buf
)
2966 func
= strsep(&buf
, ",");
2967 /* we allow only one expression at a time */
2968 ret
= ftrace_set_func(ftrace_graph_funcs
, &ftrace_graph_count
,
2971 printk(KERN_DEBUG
"ftrace: function %s not "
2972 "traceable\n", func
);
2975 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2978 set_ftrace_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
2983 func
= strsep(&buf
, ",");
2984 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
2988 static void __init
set_ftrace_early_filters(void)
2990 if (ftrace_filter_buf
[0])
2991 set_ftrace_early_filter(&global_ops
, ftrace_filter_buf
, 1);
2992 if (ftrace_notrace_buf
[0])
2993 set_ftrace_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
2994 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2995 if (ftrace_graph_buf
[0])
2996 set_ftrace_early_graph(ftrace_graph_buf
);
2997 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3001 ftrace_regex_release(struct inode
*inode
, struct file
*file
)
3003 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
3004 struct ftrace_iterator
*iter
;
3005 struct ftrace_hash
**orig_hash
;
3006 struct trace_parser
*parser
;
3010 mutex_lock(&ftrace_regex_lock
);
3011 if (file
->f_mode
& FMODE_READ
) {
3014 seq_release(inode
, file
);
3016 iter
= file
->private_data
;
3018 parser
= &iter
->parser
;
3019 if (trace_parser_loaded(parser
)) {
3020 parser
->buffer
[parser
->idx
] = 0;
3021 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
3024 trace_parser_put(parser
);
3026 if (file
->f_mode
& FMODE_WRITE
) {
3027 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
3030 orig_hash
= &iter
->ops
->filter_hash
;
3032 orig_hash
= &iter
->ops
->notrace_hash
;
3034 mutex_lock(&ftrace_lock
);
3036 * Remove the current set, update the hash and add
3039 ftrace_hash_rec_disable(iter
->ops
, filter_hash
);
3040 ret
= ftrace_hash_move(orig_hash
, iter
->hash
);
3042 ftrace_hash_rec_enable(iter
->ops
, filter_hash
);
3043 if (iter
->ops
->flags
& FTRACE_OPS_FL_ENABLED
3045 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
3047 mutex_unlock(&ftrace_lock
);
3049 free_ftrace_hash(iter
->hash
);
3052 mutex_unlock(&ftrace_regex_lock
);
3056 static const struct file_operations ftrace_avail_fops
= {
3057 .open
= ftrace_avail_open
,
3059 .llseek
= seq_lseek
,
3060 .release
= seq_release_private
,
3063 static const struct file_operations ftrace_enabled_fops
= {
3064 .open
= ftrace_enabled_open
,
3066 .llseek
= seq_lseek
,
3067 .release
= seq_release_private
,
3070 static const struct file_operations ftrace_filter_fops
= {
3071 .open
= ftrace_filter_open
,
3073 .write
= ftrace_filter_write
,
3074 .llseek
= ftrace_regex_lseek
,
3075 .release
= ftrace_regex_release
,
3078 static const struct file_operations ftrace_notrace_fops
= {
3079 .open
= ftrace_notrace_open
,
3081 .write
= ftrace_notrace_write
,
3082 .llseek
= ftrace_regex_lseek
,
3083 .release
= ftrace_regex_release
,
3086 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3088 static DEFINE_MUTEX(graph_lock
);
3090 int ftrace_graph_count
;
3091 int ftrace_graph_filter_enabled
;
3092 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
3095 __g_next(struct seq_file
*m
, loff_t
*pos
)
3097 if (*pos
>= ftrace_graph_count
)
3099 return &ftrace_graph_funcs
[*pos
];
3103 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3106 return __g_next(m
, pos
);
3109 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
3111 mutex_lock(&graph_lock
);
3113 /* Nothing, tell g_show to print all functions are enabled */
3114 if (!ftrace_graph_filter_enabled
&& !*pos
)
3117 return __g_next(m
, pos
);
3120 static void g_stop(struct seq_file
*m
, void *p
)
3122 mutex_unlock(&graph_lock
);
3125 static int g_show(struct seq_file
*m
, void *v
)
3127 unsigned long *ptr
= v
;
3132 if (ptr
== (unsigned long *)1) {
3133 seq_printf(m
, "#### all functions enabled ####\n");
3137 seq_printf(m
, "%ps\n", (void *)*ptr
);
3142 static const struct seq_operations ftrace_graph_seq_ops
= {
3150 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
3154 if (unlikely(ftrace_disabled
))
3157 mutex_lock(&graph_lock
);
3158 if ((file
->f_mode
& FMODE_WRITE
) &&
3159 (file
->f_flags
& O_TRUNC
)) {
3160 ftrace_graph_filter_enabled
= 0;
3161 ftrace_graph_count
= 0;
3162 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
3164 mutex_unlock(&graph_lock
);
3166 if (file
->f_mode
& FMODE_READ
)
3167 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
3173 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
3175 if (file
->f_mode
& FMODE_READ
)
3176 seq_release(inode
, file
);
3181 ftrace_set_func(unsigned long *array
, int *idx
, char *buffer
)
3183 struct dyn_ftrace
*rec
;
3184 struct ftrace_page
*pg
;
3193 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
3194 if (!not && *idx
>= FTRACE_GRAPH_MAX_FUNCS
)
3197 search_len
= strlen(search
);
3199 mutex_lock(&ftrace_lock
);
3201 if (unlikely(ftrace_disabled
)) {
3202 mutex_unlock(&ftrace_lock
);
3206 do_for_each_ftrace_rec(pg
, rec
) {
3208 if (rec
->flags
& FTRACE_FL_FREE
)
3211 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
3212 /* if it is in the array */
3214 for (i
= 0; i
< *idx
; i
++) {
3215 if (array
[i
] == rec
->ip
) {
3224 array
[(*idx
)++] = rec
->ip
;
3225 if (*idx
>= FTRACE_GRAPH_MAX_FUNCS
)
3230 array
[i
] = array
[--(*idx
)];
3236 } while_for_each_ftrace_rec();
3238 mutex_unlock(&ftrace_lock
);
3243 ftrace_graph_filter_enabled
= 1;
3248 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
3249 size_t cnt
, loff_t
*ppos
)
3251 struct trace_parser parser
;
3257 mutex_lock(&graph_lock
);
3259 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
)) {
3264 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
3266 if (read
>= 0 && trace_parser_loaded((&parser
))) {
3267 parser
.buffer
[parser
.idx
] = 0;
3269 /* we allow only one expression at a time */
3270 ret
= ftrace_set_func(ftrace_graph_funcs
, &ftrace_graph_count
,
3279 trace_parser_put(&parser
);
3281 mutex_unlock(&graph_lock
);
3286 static const struct file_operations ftrace_graph_fops
= {
3287 .open
= ftrace_graph_open
,
3289 .write
= ftrace_graph_write
,
3290 .release
= ftrace_graph_release
,
3291 .llseek
= seq_lseek
,
3293 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3295 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
3298 trace_create_file("available_filter_functions", 0444,
3299 d_tracer
, NULL
, &ftrace_avail_fops
);
3301 trace_create_file("enabled_functions", 0444,
3302 d_tracer
, NULL
, &ftrace_enabled_fops
);
3304 trace_create_file("set_ftrace_filter", 0644, d_tracer
,
3305 NULL
, &ftrace_filter_fops
);
3307 trace_create_file("set_ftrace_notrace", 0644, d_tracer
,
3308 NULL
, &ftrace_notrace_fops
);
3310 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3311 trace_create_file("set_graph_function", 0444, d_tracer
,
3313 &ftrace_graph_fops
);
3314 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3319 static int ftrace_process_locs(struct module
*mod
,
3320 unsigned long *start
,
3326 mutex_lock(&ftrace_lock
);
3329 addr
= ftrace_call_adjust(*p
++);
3331 * Some architecture linkers will pad between
3332 * the different mcount_loc sections of different
3333 * object files to satisfy alignments.
3334 * Skip any NULL pointers.
3338 ftrace_record_ip(addr
);
3341 ftrace_update_code(mod
);
3342 mutex_unlock(&ftrace_lock
);
3347 #ifdef CONFIG_MODULES
3348 void ftrace_release_mod(struct module
*mod
)
3350 struct dyn_ftrace
*rec
;
3351 struct ftrace_page
*pg
;
3353 mutex_lock(&ftrace_lock
);
3355 if (ftrace_disabled
)
3358 do_for_each_ftrace_rec(pg
, rec
) {
3359 if (within_module_core(rec
->ip
, mod
)) {
3361 * rec->ip is changed in ftrace_free_rec()
3362 * It should not between s and e if record was freed.
3364 FTRACE_WARN_ON(rec
->flags
& FTRACE_FL_FREE
);
3365 ftrace_free_rec(rec
);
3367 } while_for_each_ftrace_rec();
3369 mutex_unlock(&ftrace_lock
);
3372 static void ftrace_init_module(struct module
*mod
,
3373 unsigned long *start
, unsigned long *end
)
3375 if (ftrace_disabled
|| start
== end
)
3377 ftrace_process_locs(mod
, start
, end
);
3380 static int ftrace_module_notify(struct notifier_block
*self
,
3381 unsigned long val
, void *data
)
3383 struct module
*mod
= data
;
3386 case MODULE_STATE_COMING
:
3387 ftrace_init_module(mod
, mod
->ftrace_callsites
,
3388 mod
->ftrace_callsites
+
3389 mod
->num_ftrace_callsites
);
3391 case MODULE_STATE_GOING
:
3392 ftrace_release_mod(mod
);
3399 static int ftrace_module_notify(struct notifier_block
*self
,
3400 unsigned long val
, void *data
)
3404 #endif /* CONFIG_MODULES */
3406 struct notifier_block ftrace_module_nb
= {
3407 .notifier_call
= ftrace_module_notify
,
3411 extern unsigned long __start_mcount_loc
[];
3412 extern unsigned long __stop_mcount_loc
[];
3414 void __init
ftrace_init(void)
3416 unsigned long count
, addr
, flags
;
3419 /* Keep the ftrace pointer to the stub */
3420 addr
= (unsigned long)ftrace_stub
;
3422 local_irq_save(flags
);
3423 ftrace_dyn_arch_init(&addr
);
3424 local_irq_restore(flags
);
3426 /* ftrace_dyn_arch_init places the return code in addr */
3430 count
= __stop_mcount_loc
- __start_mcount_loc
;
3432 ret
= ftrace_dyn_table_alloc(count
);
3436 last_ftrace_enabled
= ftrace_enabled
= 1;
3438 ret
= ftrace_process_locs(NULL
,
3442 ret
= register_module_notifier(&ftrace_module_nb
);
3444 pr_warning("Failed to register trace ftrace module notifier\n");
3446 set_ftrace_early_filters();
3450 ftrace_disabled
= 1;
3455 static struct ftrace_ops global_ops
= {
3456 .func
= ftrace_stub
,
3459 static int __init
ftrace_nodyn_init(void)
3464 device_initcall(ftrace_nodyn_init
);
3466 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
3467 static inline void ftrace_startup_enable(int command
) { }
3468 /* Keep as macros so we do not need to define the commands */
3469 # define ftrace_startup(ops, command) do { } while (0)
3470 # define ftrace_shutdown(ops, command) do { } while (0)
3471 # define ftrace_startup_sysctl() do { } while (0)
3472 # define ftrace_shutdown_sysctl() do { } while (0)
3475 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
)
3480 #endif /* CONFIG_DYNAMIC_FTRACE */
3483 ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
)
3485 struct ftrace_ops
*op
;
3488 * Some of the ops may be dynamically allocated,
3489 * they must be freed after a synchronize_sched().
3491 preempt_disable_notrace();
3492 op
= rcu_dereference_raw(ftrace_ops_list
);
3493 while (op
!= &ftrace_list_end
) {
3494 if (ftrace_ops_test(op
, ip
))
3495 op
->func(ip
, parent_ip
);
3496 op
= rcu_dereference_raw(op
->next
);
3498 preempt_enable_notrace();
3501 static void clear_ftrace_swapper(void)
3503 struct task_struct
*p
;
3507 for_each_online_cpu(cpu
) {
3509 clear_tsk_trace_trace(p
);
3514 static void set_ftrace_swapper(void)
3516 struct task_struct
*p
;
3520 for_each_online_cpu(cpu
) {
3522 set_tsk_trace_trace(p
);
3527 static void clear_ftrace_pid(struct pid
*pid
)
3529 struct task_struct
*p
;
3532 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
3533 clear_tsk_trace_trace(p
);
3534 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
3540 static void set_ftrace_pid(struct pid
*pid
)
3542 struct task_struct
*p
;
3545 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
3546 set_tsk_trace_trace(p
);
3547 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
3551 static void clear_ftrace_pid_task(struct pid
*pid
)
3553 if (pid
== ftrace_swapper_pid
)
3554 clear_ftrace_swapper();
3556 clear_ftrace_pid(pid
);
3559 static void set_ftrace_pid_task(struct pid
*pid
)
3561 if (pid
== ftrace_swapper_pid
)
3562 set_ftrace_swapper();
3564 set_ftrace_pid(pid
);
3567 static int ftrace_pid_add(int p
)
3570 struct ftrace_pid
*fpid
;
3573 mutex_lock(&ftrace_lock
);
3576 pid
= ftrace_swapper_pid
;
3578 pid
= find_get_pid(p
);
3585 list_for_each_entry(fpid
, &ftrace_pids
, list
)
3586 if (fpid
->pid
== pid
)
3591 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
3595 list_add(&fpid
->list
, &ftrace_pids
);
3598 set_ftrace_pid_task(pid
);
3600 ftrace_update_pid_func();
3601 ftrace_startup_enable(0);
3603 mutex_unlock(&ftrace_lock
);
3607 if (pid
!= ftrace_swapper_pid
)
3611 mutex_unlock(&ftrace_lock
);
3615 static void ftrace_pid_reset(void)
3617 struct ftrace_pid
*fpid
, *safe
;
3619 mutex_lock(&ftrace_lock
);
3620 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
3621 struct pid
*pid
= fpid
->pid
;
3623 clear_ftrace_pid_task(pid
);
3625 list_del(&fpid
->list
);
3629 ftrace_update_pid_func();
3630 ftrace_startup_enable(0);
3632 mutex_unlock(&ftrace_lock
);
3635 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
3637 mutex_lock(&ftrace_lock
);
3639 if (list_empty(&ftrace_pids
) && (!*pos
))
3642 return seq_list_start(&ftrace_pids
, *pos
);
3645 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3650 return seq_list_next(v
, &ftrace_pids
, pos
);
3653 static void fpid_stop(struct seq_file
*m
, void *p
)
3655 mutex_unlock(&ftrace_lock
);
3658 static int fpid_show(struct seq_file
*m
, void *v
)
3660 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
3662 if (v
== (void *)1) {
3663 seq_printf(m
, "no pid\n");
3667 if (fpid
->pid
== ftrace_swapper_pid
)
3668 seq_printf(m
, "swapper tasks\n");
3670 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
3675 static const struct seq_operations ftrace_pid_sops
= {
3676 .start
= fpid_start
,
3683 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
3687 if ((file
->f_mode
& FMODE_WRITE
) &&
3688 (file
->f_flags
& O_TRUNC
))
3691 if (file
->f_mode
& FMODE_READ
)
3692 ret
= seq_open(file
, &ftrace_pid_sops
);
3698 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
3699 size_t cnt
, loff_t
*ppos
)
3705 if (cnt
>= sizeof(buf
))
3708 if (copy_from_user(&buf
, ubuf
, cnt
))
3714 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3715 * to clean the filter quietly.
3717 tmp
= strstrip(buf
);
3718 if (strlen(tmp
) == 0)
3721 ret
= strict_strtol(tmp
, 10, &val
);
3725 ret
= ftrace_pid_add(val
);
3727 return ret
? ret
: cnt
;
3731 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
3733 if (file
->f_mode
& FMODE_READ
)
3734 seq_release(inode
, file
);
3739 static const struct file_operations ftrace_pid_fops
= {
3740 .open
= ftrace_pid_open
,
3741 .write
= ftrace_pid_write
,
3743 .llseek
= seq_lseek
,
3744 .release
= ftrace_pid_release
,
3747 static __init
int ftrace_init_debugfs(void)
3749 struct dentry
*d_tracer
;
3751 d_tracer
= tracing_init_dentry();
3755 ftrace_init_dyn_debugfs(d_tracer
);
3757 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
3758 NULL
, &ftrace_pid_fops
);
3760 ftrace_profile_debugfs(d_tracer
);
3764 fs_initcall(ftrace_init_debugfs
);
3767 * ftrace_kill - kill ftrace
3769 * This function should be used by panic code. It stops ftrace
3770 * but in a not so nice way. If you need to simply kill ftrace
3771 * from a non-atomic section, use ftrace_kill.
3773 void ftrace_kill(void)
3775 ftrace_disabled
= 1;
3777 clear_ftrace_function();
3781 * register_ftrace_function - register a function for profiling
3782 * @ops - ops structure that holds the function for profiling.
3784 * Register a function to be called by all functions in the
3787 * Note: @ops->func and all the functions it calls must be labeled
3788 * with "notrace", otherwise it will go into a
3791 int register_ftrace_function(struct ftrace_ops
*ops
)
3795 mutex_lock(&ftrace_lock
);
3797 if (unlikely(ftrace_disabled
))
3800 ret
= __register_ftrace_function(ops
);
3802 ftrace_startup(ops
, 0);
3806 mutex_unlock(&ftrace_lock
);
3809 EXPORT_SYMBOL_GPL(register_ftrace_function
);
3812 * unregister_ftrace_function - unregister a function for profiling.
3813 * @ops - ops structure that holds the function to unregister
3815 * Unregister a function that was added to be called by ftrace profiling.
3817 int unregister_ftrace_function(struct ftrace_ops
*ops
)
3821 mutex_lock(&ftrace_lock
);
3822 ret
= __unregister_ftrace_function(ops
);
3824 ftrace_shutdown(ops
, 0);
3825 mutex_unlock(&ftrace_lock
);
3829 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
3832 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
3833 void __user
*buffer
, size_t *lenp
,
3838 mutex_lock(&ftrace_lock
);
3840 if (unlikely(ftrace_disabled
))
3843 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
3845 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
3848 last_ftrace_enabled
= !!ftrace_enabled
;
3850 if (ftrace_enabled
) {
3852 ftrace_startup_sysctl();
3854 /* we are starting ftrace again */
3855 if (ftrace_ops_list
!= &ftrace_list_end
) {
3856 if (ftrace_ops_list
->next
== &ftrace_list_end
)
3857 ftrace_trace_function
= ftrace_ops_list
->func
;
3859 ftrace_trace_function
= ftrace_ops_list_func
;
3863 /* stopping ftrace calls (just send to ftrace_stub) */
3864 ftrace_trace_function
= ftrace_stub
;
3866 ftrace_shutdown_sysctl();
3870 mutex_unlock(&ftrace_lock
);
3874 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3876 static int ftrace_graph_active
;
3877 static struct notifier_block ftrace_suspend_notifier
;
3879 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
3884 /* The callbacks that hook a function */
3885 trace_func_graph_ret_t ftrace_graph_return
=
3886 (trace_func_graph_ret_t
)ftrace_stub
;
3887 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
3889 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3890 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
3894 unsigned long flags
;
3895 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
3896 struct task_struct
*g
, *t
;
3898 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
3899 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
3900 * sizeof(struct ftrace_ret_stack
),
3902 if (!ret_stack_list
[i
]) {
3910 read_lock_irqsave(&tasklist_lock
, flags
);
3911 do_each_thread(g
, t
) {
3917 if (t
->ret_stack
== NULL
) {
3918 atomic_set(&t
->tracing_graph_pause
, 0);
3919 atomic_set(&t
->trace_overrun
, 0);
3920 t
->curr_ret_stack
= -1;
3921 /* Make sure the tasks see the -1 first: */
3923 t
->ret_stack
= ret_stack_list
[start
++];
3925 } while_each_thread(g
, t
);
3928 read_unlock_irqrestore(&tasklist_lock
, flags
);
3930 for (i
= start
; i
< end
; i
++)
3931 kfree(ret_stack_list
[i
]);
3936 ftrace_graph_probe_sched_switch(void *ignore
,
3937 struct task_struct
*prev
, struct task_struct
*next
)
3939 unsigned long long timestamp
;
3943 * Does the user want to count the time a function was asleep.
3944 * If so, do not update the time stamps.
3946 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
3949 timestamp
= trace_clock_local();
3951 prev
->ftrace_timestamp
= timestamp
;
3953 /* only process tasks that we timestamped */
3954 if (!next
->ftrace_timestamp
)
3958 * Update all the counters in next to make up for the
3959 * time next was sleeping.
3961 timestamp
-= next
->ftrace_timestamp
;
3963 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
3964 next
->ret_stack
[index
].calltime
+= timestamp
;
3967 /* Allocate a return stack for each task */
3968 static int start_graph_tracing(void)
3970 struct ftrace_ret_stack
**ret_stack_list
;
3973 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
3974 sizeof(struct ftrace_ret_stack
*),
3977 if (!ret_stack_list
)
3980 /* The cpu_boot init_task->ret_stack will never be freed */
3981 for_each_online_cpu(cpu
) {
3982 if (!idle_task(cpu
)->ret_stack
)
3983 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
3987 ret
= alloc_retstack_tasklist(ret_stack_list
);
3988 } while (ret
== -EAGAIN
);
3991 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
3993 pr_info("ftrace_graph: Couldn't activate tracepoint"
3994 " probe to kernel_sched_switch\n");
3997 kfree(ret_stack_list
);
4002 * Hibernation protection.
4003 * The state of the current task is too much unstable during
4004 * suspend/restore to disk. We want to protect against that.
4007 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
4011 case PM_HIBERNATION_PREPARE
:
4012 pause_graph_tracing();
4015 case PM_POST_HIBERNATION
:
4016 unpause_graph_tracing();
4022 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
4023 trace_func_graph_ent_t entryfunc
)
4027 mutex_lock(&ftrace_lock
);
4029 /* we currently allow only one tracer registered at a time */
4030 if (ftrace_graph_active
) {
4035 ftrace_suspend_notifier
.notifier_call
= ftrace_suspend_notifier_call
;
4036 register_pm_notifier(&ftrace_suspend_notifier
);
4038 ftrace_graph_active
++;
4039 ret
= start_graph_tracing();
4041 ftrace_graph_active
--;
4045 ftrace_graph_return
= retfunc
;
4046 ftrace_graph_entry
= entryfunc
;
4048 ftrace_startup(&global_ops
, FTRACE_START_FUNC_RET
);
4051 mutex_unlock(&ftrace_lock
);
4055 void unregister_ftrace_graph(void)
4057 mutex_lock(&ftrace_lock
);
4059 if (unlikely(!ftrace_graph_active
))
4062 ftrace_graph_active
--;
4063 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
4064 ftrace_graph_entry
= ftrace_graph_entry_stub
;
4065 ftrace_shutdown(&global_ops
, FTRACE_STOP_FUNC_RET
);
4066 unregister_pm_notifier(&ftrace_suspend_notifier
);
4067 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
4070 mutex_unlock(&ftrace_lock
);
4073 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
4076 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
4078 atomic_set(&t
->tracing_graph_pause
, 0);
4079 atomic_set(&t
->trace_overrun
, 0);
4080 t
->ftrace_timestamp
= 0;
4081 /* make curr_ret_stack visible before we add the ret_stack */
4083 t
->ret_stack
= ret_stack
;
4087 * Allocate a return stack for the idle task. May be the first
4088 * time through, or it may be done by CPU hotplug online.
4090 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
4092 t
->curr_ret_stack
= -1;
4094 * The idle task has no parent, it either has its own
4095 * stack or no stack at all.
4098 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
4100 if (ftrace_graph_active
) {
4101 struct ftrace_ret_stack
*ret_stack
;
4103 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
4105 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
4106 * sizeof(struct ftrace_ret_stack
),
4110 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
4112 graph_init_task(t
, ret_stack
);
4116 /* Allocate a return stack for newly created task */
4117 void ftrace_graph_init_task(struct task_struct
*t
)
4119 /* Make sure we do not use the parent ret_stack */
4120 t
->ret_stack
= NULL
;
4121 t
->curr_ret_stack
= -1;
4123 if (ftrace_graph_active
) {
4124 struct ftrace_ret_stack
*ret_stack
;
4126 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
4127 * sizeof(struct ftrace_ret_stack
),
4131 graph_init_task(t
, ret_stack
);
4135 void ftrace_graph_exit_task(struct task_struct
*t
)
4137 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
4139 t
->ret_stack
= NULL
;
4140 /* NULL must become visible to IRQs before we free it: */
4146 void ftrace_graph_stop(void)