2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/tracefs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
51 #define FTRACE_WARN_ON_ONCE(cond) \
54 if (WARN_ON_ONCE(___r)) \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
79 static struct ftrace_ops ftrace_list_end __read_mostly
= {
81 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
82 INIT_OPS_HASH(ftrace_list_end
)
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly
;
87 static int last_ftrace_enabled
;
89 /* Current function tracing op */
90 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops
*set_function_trace_op
;
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids
);
97 struct list_head list
;
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
105 static int ftrace_disabled __read_mostly
;
107 static DEFINE_MUTEX(ftrace_lock
);
109 static struct ftrace_ops
*ftrace_control_list __read_mostly
= &ftrace_list_end
;
110 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
111 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
112 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
113 static struct ftrace_ops global_ops
;
114 static struct ftrace_ops control_ops
;
116 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
117 struct ftrace_ops
*op
, struct pt_regs
*regs
);
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
121 struct ftrace_ops
*op
, struct pt_regs
*regs
);
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
142 * Optimized for just a single item in the list (as that is the normal case).
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
148 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
152 mutex_init(&ops
->local_hash
.regex_lock
);
153 ops
->func_hash
= &ops
->local_hash
;
154 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
160 * ftrace_nr_registered_ops - return number of ops registered
162 * Returns the number of ftrace_ops registered and tracing functions
164 int ftrace_nr_registered_ops(void)
166 struct ftrace_ops
*ops
;
169 mutex_lock(&ftrace_lock
);
171 for (ops
= ftrace_ops_list
;
172 ops
!= &ftrace_list_end
; ops
= ops
->next
)
175 mutex_unlock(&ftrace_lock
);
180 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
181 struct ftrace_ops
*op
, struct pt_regs
*regs
)
183 if (!test_tsk_trace_trace(current
))
186 ftrace_pid_function(ip
, parent_ip
, op
, regs
);
189 static void set_ftrace_pid_function(ftrace_func_t func
)
191 /* do not set ftrace_pid_function to itself! */
192 if (func
!= ftrace_pid_func
)
193 ftrace_pid_function
= func
;
197 * clear_ftrace_function - reset the ftrace function
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
202 void clear_ftrace_function(void)
204 ftrace_trace_function
= ftrace_stub
;
205 ftrace_pid_function
= ftrace_stub
;
208 static void control_ops_disable_all(struct ftrace_ops
*ops
)
212 for_each_possible_cpu(cpu
)
213 *per_cpu_ptr(ops
->disabled
, cpu
) = 1;
216 static int control_ops_alloc(struct ftrace_ops
*ops
)
218 int __percpu
*disabled
;
220 disabled
= alloc_percpu(int);
224 ops
->disabled
= disabled
;
225 control_ops_disable_all(ops
);
229 static void ftrace_sync(struct work_struct
*work
)
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
236 * Yes, function tracing is rude.
240 static void ftrace_sync_ipi(void *data
)
242 /* Probably not needed, but do it anyway */
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
249 static inline void update_function_graph_func(void) { }
253 static ftrace_func_t
ftrace_ops_get_list_func(struct ftrace_ops
*ops
)
256 * If this is a dynamic ops or we force list func,
257 * then it needs to call the list anyway.
259 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
|| FTRACE_FORCE_LIST_FUNC
)
260 return ftrace_ops_list_func
;
262 return ftrace_ops_get_func(ops
);
265 static void update_ftrace_function(void)
270 * Prepare the ftrace_ops that the arch callback will use.
271 * If there's only one ftrace_ops registered, the ftrace_ops_list
272 * will point to the ops we want.
274 set_function_trace_op
= ftrace_ops_list
;
276 /* If there's no ftrace_ops registered, just call the stub function */
277 if (ftrace_ops_list
== &ftrace_list_end
) {
281 * If we are at the end of the list and this ops is
282 * recursion safe and not dynamic and the arch supports passing ops,
283 * then have the mcount trampoline call the function directly.
285 } else if (ftrace_ops_list
->next
== &ftrace_list_end
) {
286 func
= ftrace_ops_get_list_func(ftrace_ops_list
);
289 /* Just use the default ftrace_ops */
290 set_function_trace_op
= &ftrace_list_end
;
291 func
= ftrace_ops_list_func
;
294 update_function_graph_func();
296 /* If there's no change, then do nothing more here */
297 if (ftrace_trace_function
== func
)
301 * If we are using the list function, it doesn't care
302 * about the function_trace_ops.
304 if (func
== ftrace_ops_list_func
) {
305 ftrace_trace_function
= func
;
307 * Don't even bother setting function_trace_ops,
308 * it would be racy to do so anyway.
313 #ifndef CONFIG_DYNAMIC_FTRACE
315 * For static tracing, we need to be a bit more careful.
316 * The function change takes affect immediately. Thus,
317 * we need to coorditate the setting of the function_trace_ops
318 * with the setting of the ftrace_trace_function.
320 * Set the function to the list ops, which will call the
321 * function we want, albeit indirectly, but it handles the
322 * ftrace_ops and doesn't depend on function_trace_op.
324 ftrace_trace_function
= ftrace_ops_list_func
;
326 * Make sure all CPUs see this. Yes this is slow, but static
327 * tracing is slow and nasty to have enabled.
329 schedule_on_each_cpu(ftrace_sync
);
330 /* Now all cpus are using the list ops. */
331 function_trace_op
= set_function_trace_op
;
332 /* Make sure the function_trace_op is visible on all CPUs */
334 /* Nasty way to force a rmb on all cpus */
335 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
336 /* OK, we are all set to update the ftrace_trace_function now! */
337 #endif /* !CONFIG_DYNAMIC_FTRACE */
339 ftrace_trace_function
= func
;
342 int using_ftrace_ops_list_func(void)
344 return ftrace_trace_function
== ftrace_ops_list_func
;
347 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
351 * We are entering ops into the list but another
352 * CPU might be walking that list. We need to make sure
353 * the ops->next pointer is valid before another CPU sees
354 * the ops pointer included into the list.
356 rcu_assign_pointer(*list
, ops
);
359 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
361 struct ftrace_ops
**p
;
364 * If we are removing the last function, then simply point
365 * to the ftrace_stub.
367 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
368 *list
= &ftrace_list_end
;
372 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
383 static void add_ftrace_list_ops(struct ftrace_ops
**list
,
384 struct ftrace_ops
*main_ops
,
385 struct ftrace_ops
*ops
)
387 int first
= *list
== &ftrace_list_end
;
388 add_ftrace_ops(list
, ops
);
390 add_ftrace_ops(&ftrace_ops_list
, main_ops
);
393 static int remove_ftrace_list_ops(struct ftrace_ops
**list
,
394 struct ftrace_ops
*main_ops
,
395 struct ftrace_ops
*ops
)
397 int ret
= remove_ftrace_ops(list
, ops
);
398 if (!ret
&& *list
== &ftrace_list_end
)
399 ret
= remove_ftrace_ops(&ftrace_ops_list
, main_ops
);
403 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
405 static int __register_ftrace_function(struct ftrace_ops
*ops
)
407 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
410 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
413 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
415 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
416 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
417 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
419 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
420 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
423 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
424 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
427 if (!core_kernel_data((unsigned long)ops
))
428 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
430 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
431 if (control_ops_alloc(ops
))
433 add_ftrace_list_ops(&ftrace_control_list
, &control_ops
, ops
);
434 /* The control_ops needs the trampoline update */
437 add_ftrace_ops(&ftrace_ops_list
, ops
);
439 ftrace_update_trampoline(ops
);
442 update_ftrace_function();
447 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
451 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
454 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
455 ret
= remove_ftrace_list_ops(&ftrace_control_list
,
458 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
464 update_ftrace_function();
469 static void ftrace_update_pid_func(void)
471 /* Only do something if we are tracing something */
472 if (ftrace_trace_function
== ftrace_stub
)
475 update_ftrace_function();
478 #ifdef CONFIG_FUNCTION_PROFILER
479 struct ftrace_profile
{
480 struct hlist_node node
;
482 unsigned long counter
;
483 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
484 unsigned long long time
;
485 unsigned long long time_squared
;
489 struct ftrace_profile_page
{
490 struct ftrace_profile_page
*next
;
492 struct ftrace_profile records
[];
495 struct ftrace_profile_stat
{
497 struct hlist_head
*hash
;
498 struct ftrace_profile_page
*pages
;
499 struct ftrace_profile_page
*start
;
500 struct tracer_stat stat
;
503 #define PROFILE_RECORDS_SIZE \
504 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
506 #define PROFILES_PER_PAGE \
507 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
509 static int ftrace_profile_enabled __read_mostly
;
511 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
512 static DEFINE_MUTEX(ftrace_profile_lock
);
514 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
516 #define FTRACE_PROFILE_HASH_BITS 10
517 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
520 function_stat_next(void *v
, int idx
)
522 struct ftrace_profile
*rec
= v
;
523 struct ftrace_profile_page
*pg
;
525 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
531 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
535 rec
= &pg
->records
[0];
543 static void *function_stat_start(struct tracer_stat
*trace
)
545 struct ftrace_profile_stat
*stat
=
546 container_of(trace
, struct ftrace_profile_stat
, stat
);
548 if (!stat
|| !stat
->start
)
551 return function_stat_next(&stat
->start
->records
[0], 0);
554 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
555 /* function graph compares on total time */
556 static int function_stat_cmp(void *p1
, void *p2
)
558 struct ftrace_profile
*a
= p1
;
559 struct ftrace_profile
*b
= p2
;
561 if (a
->time
< b
->time
)
563 if (a
->time
> b
->time
)
569 /* not function graph compares against hits */
570 static int function_stat_cmp(void *p1
, void *p2
)
572 struct ftrace_profile
*a
= p1
;
573 struct ftrace_profile
*b
= p2
;
575 if (a
->counter
< b
->counter
)
577 if (a
->counter
> b
->counter
)
584 static int function_stat_headers(struct seq_file
*m
)
586 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
587 seq_puts(m
, " Function "
590 "--- ---- --- ---\n");
592 seq_puts(m
, " Function Hit\n"
598 static int function_stat_show(struct seq_file
*m
, void *v
)
600 struct ftrace_profile
*rec
= v
;
601 char str
[KSYM_SYMBOL_LEN
];
603 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
604 static struct trace_seq s
;
605 unsigned long long avg
;
606 unsigned long long stddev
;
608 mutex_lock(&ftrace_profile_lock
);
610 /* we raced with function_profile_reset() */
611 if (unlikely(rec
->counter
== 0)) {
616 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
617 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
622 do_div(avg
, rec
->counter
);
624 /* Sample standard deviation (s^2) */
625 if (rec
->counter
<= 1)
629 * Apply Welford's method:
630 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
632 stddev
= rec
->counter
* rec
->time_squared
-
633 rec
->time
* rec
->time
;
636 * Divide only 1000 for ns^2 -> us^2 conversion.
637 * trace_print_graph_duration will divide 1000 again.
639 do_div(stddev
, rec
->counter
* (rec
->counter
- 1) * 1000);
643 trace_print_graph_duration(rec
->time
, &s
);
644 trace_seq_puts(&s
, " ");
645 trace_print_graph_duration(avg
, &s
);
646 trace_seq_puts(&s
, " ");
647 trace_print_graph_duration(stddev
, &s
);
648 trace_print_seq(m
, &s
);
652 mutex_unlock(&ftrace_profile_lock
);
657 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
659 struct ftrace_profile_page
*pg
;
661 pg
= stat
->pages
= stat
->start
;
664 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
669 memset(stat
->hash
, 0,
670 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
673 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
675 struct ftrace_profile_page
*pg
;
680 /* If we already allocated, do nothing */
684 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
688 #ifdef CONFIG_DYNAMIC_FTRACE
689 functions
= ftrace_update_tot_cnt
;
692 * We do not know the number of functions that exist because
693 * dynamic tracing is what counts them. With past experience
694 * we have around 20K functions. That should be more than enough.
695 * It is highly unlikely we will execute every function in
701 pg
= stat
->start
= stat
->pages
;
703 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
705 for (i
= 1; i
< pages
; i
++) {
706 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
717 unsigned long tmp
= (unsigned long)pg
;
729 static int ftrace_profile_init_cpu(int cpu
)
731 struct ftrace_profile_stat
*stat
;
734 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
737 /* If the profile is already created, simply reset it */
738 ftrace_profile_reset(stat
);
743 * We are profiling all functions, but usually only a few thousand
744 * functions are hit. We'll make a hash of 1024 items.
746 size
= FTRACE_PROFILE_HASH_SIZE
;
748 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
753 /* Preallocate the function profiling pages */
754 if (ftrace_profile_pages_init(stat
) < 0) {
763 static int ftrace_profile_init(void)
768 for_each_possible_cpu(cpu
) {
769 ret
= ftrace_profile_init_cpu(cpu
);
777 /* interrupts must be disabled */
778 static struct ftrace_profile
*
779 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
781 struct ftrace_profile
*rec
;
782 struct hlist_head
*hhd
;
785 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
786 hhd
= &stat
->hash
[key
];
788 if (hlist_empty(hhd
))
791 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
799 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
800 struct ftrace_profile
*rec
)
804 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
805 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
809 * The memory is already allocated, this simply finds a new record to use.
811 static struct ftrace_profile
*
812 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
814 struct ftrace_profile
*rec
= NULL
;
816 /* prevent recursion (from NMIs) */
817 if (atomic_inc_return(&stat
->disabled
) != 1)
821 * Try to find the function again since an NMI
822 * could have added it
824 rec
= ftrace_find_profiled_func(stat
, ip
);
828 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
829 if (!stat
->pages
->next
)
831 stat
->pages
= stat
->pages
->next
;
834 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
836 ftrace_add_profile(stat
, rec
);
839 atomic_dec(&stat
->disabled
);
845 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
846 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
848 struct ftrace_profile_stat
*stat
;
849 struct ftrace_profile
*rec
;
852 if (!ftrace_profile_enabled
)
855 local_irq_save(flags
);
857 stat
= this_cpu_ptr(&ftrace_profile_stats
);
858 if (!stat
->hash
|| !ftrace_profile_enabled
)
861 rec
= ftrace_find_profiled_func(stat
, ip
);
863 rec
= ftrace_profile_alloc(stat
, ip
);
870 local_irq_restore(flags
);
873 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
874 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
876 function_profile_call(trace
->func
, 0, NULL
, NULL
);
880 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
882 struct ftrace_profile_stat
*stat
;
883 unsigned long long calltime
;
884 struct ftrace_profile
*rec
;
887 local_irq_save(flags
);
888 stat
= this_cpu_ptr(&ftrace_profile_stats
);
889 if (!stat
->hash
|| !ftrace_profile_enabled
)
892 /* If the calltime was zero'd ignore it */
893 if (!trace
->calltime
)
896 calltime
= trace
->rettime
- trace
->calltime
;
898 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
901 index
= trace
->depth
;
903 /* Append this call time to the parent time to subtract */
905 current
->ret_stack
[index
- 1].subtime
+= calltime
;
907 if (current
->ret_stack
[index
].subtime
< calltime
)
908 calltime
-= current
->ret_stack
[index
].subtime
;
913 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
915 rec
->time
+= calltime
;
916 rec
->time_squared
+= calltime
* calltime
;
920 local_irq_restore(flags
);
923 static int register_ftrace_profiler(void)
925 return register_ftrace_graph(&profile_graph_return
,
926 &profile_graph_entry
);
929 static void unregister_ftrace_profiler(void)
931 unregister_ftrace_graph();
934 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
935 .func
= function_profile_call
,
936 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
937 INIT_OPS_HASH(ftrace_profile_ops
)
940 static int register_ftrace_profiler(void)
942 return register_ftrace_function(&ftrace_profile_ops
);
945 static void unregister_ftrace_profiler(void)
947 unregister_ftrace_function(&ftrace_profile_ops
);
949 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
952 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
953 size_t cnt
, loff_t
*ppos
)
958 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
964 mutex_lock(&ftrace_profile_lock
);
965 if (ftrace_profile_enabled
^ val
) {
967 ret
= ftrace_profile_init();
973 ret
= register_ftrace_profiler();
978 ftrace_profile_enabled
= 1;
980 ftrace_profile_enabled
= 0;
982 * unregister_ftrace_profiler calls stop_machine
983 * so this acts like an synchronize_sched.
985 unregister_ftrace_profiler();
989 mutex_unlock(&ftrace_profile_lock
);
997 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
998 size_t cnt
, loff_t
*ppos
)
1000 char buf
[64]; /* big enough to hold a number */
1003 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
1004 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1007 static const struct file_operations ftrace_profile_fops
= {
1008 .open
= tracing_open_generic
,
1009 .read
= ftrace_profile_read
,
1010 .write
= ftrace_profile_write
,
1011 .llseek
= default_llseek
,
1014 /* used to initialize the real stat files */
1015 static struct tracer_stat function_stats __initdata
= {
1016 .name
= "functions",
1017 .stat_start
= function_stat_start
,
1018 .stat_next
= function_stat_next
,
1019 .stat_cmp
= function_stat_cmp
,
1020 .stat_headers
= function_stat_headers
,
1021 .stat_show
= function_stat_show
1024 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1026 struct ftrace_profile_stat
*stat
;
1027 struct dentry
*entry
;
1032 for_each_possible_cpu(cpu
) {
1033 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
1035 /* allocate enough for function name + cpu number */
1036 name
= kmalloc(32, GFP_KERNEL
);
1039 * The files created are permanent, if something happens
1040 * we still do not free memory.
1043 "Could not allocate stat file for cpu %d\n",
1047 stat
->stat
= function_stats
;
1048 snprintf(name
, 32, "function%d", cpu
);
1049 stat
->stat
.name
= name
;
1050 ret
= register_stat_tracer(&stat
->stat
);
1053 "Could not register function stat for cpu %d\n",
1060 entry
= tracefs_create_file("function_profile_enabled", 0644,
1061 d_tracer
, NULL
, &ftrace_profile_fops
);
1063 pr_warning("Could not create tracefs "
1064 "'function_profile_enabled' entry\n");
1067 #else /* CONFIG_FUNCTION_PROFILER */
1068 static __init
void ftrace_profile_tracefs(struct dentry
*d_tracer
)
1071 #endif /* CONFIG_FUNCTION_PROFILER */
1073 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
1075 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1076 static int ftrace_graph_active
;
1078 # define ftrace_graph_active 0
1081 #ifdef CONFIG_DYNAMIC_FTRACE
1083 static struct ftrace_ops
*removed_ops
;
1086 * Set when doing a global update, like enabling all recs or disabling them.
1087 * It is not set when just updating a single ftrace_ops.
1089 static bool update_all_ops
;
1091 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1092 # error Dynamic ftrace depends on MCOUNT_RECORD
1095 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
1097 struct ftrace_func_probe
{
1098 struct hlist_node node
;
1099 struct ftrace_probe_ops
*ops
;
1100 unsigned long flags
;
1103 struct list_head free_list
;
1106 struct ftrace_func_entry
{
1107 struct hlist_node hlist
;
1111 struct ftrace_hash
{
1112 unsigned long size_bits
;
1113 struct hlist_head
*buckets
;
1114 unsigned long count
;
1115 struct rcu_head rcu
;
1119 * We make these constant because no one should touch them,
1120 * but they are used as the default "empty hash", to avoid allocating
1121 * it all the time. These are in a read only section such that if
1122 * anyone does try to modify it, it will cause an exception.
1124 static const struct hlist_head empty_buckets
[1];
1125 static const struct ftrace_hash empty_hash
= {
1126 .buckets
= (struct hlist_head
*)empty_buckets
,
1128 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1130 static struct ftrace_ops global_ops
= {
1131 .func
= ftrace_stub
,
1132 .local_hash
.notrace_hash
= EMPTY_HASH
,
1133 .local_hash
.filter_hash
= EMPTY_HASH
,
1134 INIT_OPS_HASH(global_ops
)
1135 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1136 FTRACE_OPS_FL_INITIALIZED
,
1140 * This is used by __kernel_text_address() to return true if the
1141 * address is on a dynamically allocated trampoline that would
1142 * not return true for either core_kernel_text() or
1143 * is_module_text_address().
1145 bool is_ftrace_trampoline(unsigned long addr
)
1147 struct ftrace_ops
*op
;
1151 * Some of the ops may be dynamically allocated,
1152 * they are freed after a synchronize_sched().
1154 preempt_disable_notrace();
1156 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1158 * This is to check for dynamically allocated trampolines.
1159 * Trampolines that are in kernel text will have
1160 * core_kernel_text() return true.
1162 if (op
->trampoline
&& op
->trampoline_size
)
1163 if (addr
>= op
->trampoline
&&
1164 addr
< op
->trampoline
+ op
->trampoline_size
) {
1168 } while_for_each_ftrace_op(op
);
1171 preempt_enable_notrace();
1176 struct ftrace_page
{
1177 struct ftrace_page
*next
;
1178 struct dyn_ftrace
*records
;
1183 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1184 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1186 /* estimate from running different kernels */
1187 #define NR_TO_INIT 10000
1189 static struct ftrace_page
*ftrace_pages_start
;
1190 static struct ftrace_page
*ftrace_pages
;
1192 static bool __always_inline
ftrace_hash_empty(struct ftrace_hash
*hash
)
1194 return !hash
|| !hash
->count
;
1197 static struct ftrace_func_entry
*
1198 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1201 struct ftrace_func_entry
*entry
;
1202 struct hlist_head
*hhd
;
1204 if (ftrace_hash_empty(hash
))
1207 if (hash
->size_bits
> 0)
1208 key
= hash_long(ip
, hash
->size_bits
);
1212 hhd
= &hash
->buckets
[key
];
1214 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1215 if (entry
->ip
== ip
)
1221 static void __add_hash_entry(struct ftrace_hash
*hash
,
1222 struct ftrace_func_entry
*entry
)
1224 struct hlist_head
*hhd
;
1227 if (hash
->size_bits
)
1228 key
= hash_long(entry
->ip
, hash
->size_bits
);
1232 hhd
= &hash
->buckets
[key
];
1233 hlist_add_head(&entry
->hlist
, hhd
);
1237 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1239 struct ftrace_func_entry
*entry
;
1241 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1246 __add_hash_entry(hash
, entry
);
1252 free_hash_entry(struct ftrace_hash
*hash
,
1253 struct ftrace_func_entry
*entry
)
1255 hlist_del(&entry
->hlist
);
1261 remove_hash_entry(struct ftrace_hash
*hash
,
1262 struct ftrace_func_entry
*entry
)
1264 hlist_del(&entry
->hlist
);
1268 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1270 struct hlist_head
*hhd
;
1271 struct hlist_node
*tn
;
1272 struct ftrace_func_entry
*entry
;
1273 int size
= 1 << hash
->size_bits
;
1279 for (i
= 0; i
< size
; i
++) {
1280 hhd
= &hash
->buckets
[i
];
1281 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1282 free_hash_entry(hash
, entry
);
1284 FTRACE_WARN_ON(hash
->count
);
1287 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1289 if (!hash
|| hash
== EMPTY_HASH
)
1291 ftrace_hash_clear(hash
);
1292 kfree(hash
->buckets
);
1296 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1298 struct ftrace_hash
*hash
;
1300 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1301 free_ftrace_hash(hash
);
1304 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1306 if (!hash
|| hash
== EMPTY_HASH
)
1308 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1311 void ftrace_free_filter(struct ftrace_ops
*ops
)
1313 ftrace_ops_init(ops
);
1314 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1315 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1318 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1320 struct ftrace_hash
*hash
;
1323 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1327 size
= 1 << size_bits
;
1328 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1330 if (!hash
->buckets
) {
1335 hash
->size_bits
= size_bits
;
1340 static struct ftrace_hash
*
1341 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1343 struct ftrace_func_entry
*entry
;
1344 struct ftrace_hash
*new_hash
;
1349 new_hash
= alloc_ftrace_hash(size_bits
);
1354 if (ftrace_hash_empty(hash
))
1357 size
= 1 << hash
->size_bits
;
1358 for (i
= 0; i
< size
; i
++) {
1359 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1360 ret
= add_hash_entry(new_hash
, entry
->ip
);
1366 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1371 free_ftrace_hash(new_hash
);
1376 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1378 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1380 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1381 struct ftrace_hash
*new_hash
);
1384 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1385 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1387 struct ftrace_func_entry
*entry
;
1388 struct hlist_node
*tn
;
1389 struct hlist_head
*hhd
;
1390 struct ftrace_hash
*new_hash
;
1391 int size
= src
->count
;
1396 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1397 if (ops
->flags
& FTRACE_OPS_FL_IPMODIFY
&& !enable
)
1401 * If the new source is empty, just free dst and assign it
1405 new_hash
= EMPTY_HASH
;
1410 * Make the hash size about 1/2 the # found
1412 for (size
/= 2; size
; size
>>= 1)
1415 /* Don't allocate too much */
1416 if (bits
> FTRACE_HASH_MAX_BITS
)
1417 bits
= FTRACE_HASH_MAX_BITS
;
1419 new_hash
= alloc_ftrace_hash(bits
);
1423 size
= 1 << src
->size_bits
;
1424 for (i
= 0; i
< size
; i
++) {
1425 hhd
= &src
->buckets
[i
];
1426 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1427 remove_hash_entry(src
, entry
);
1428 __add_hash_entry(new_hash
, entry
);
1433 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1435 /* IPMODIFY should be updated only when filter_hash updating */
1436 ret
= ftrace_hash_ipmodify_update(ops
, new_hash
);
1438 free_ftrace_hash(new_hash
);
1444 * Remove the current set, update the hash and add
1447 ftrace_hash_rec_disable_modify(ops
, enable
);
1449 rcu_assign_pointer(*dst
, new_hash
);
1451 ftrace_hash_rec_enable_modify(ops
, enable
);
1456 static bool hash_contains_ip(unsigned long ip
,
1457 struct ftrace_ops_hash
*hash
)
1460 * The function record is a match if it exists in the filter
1461 * hash and not in the notrace hash. Note, an emty hash is
1462 * considered a match for the filter hash, but an empty
1463 * notrace hash is considered not in the notrace hash.
1465 return (ftrace_hash_empty(hash
->filter_hash
) ||
1466 ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1467 (ftrace_hash_empty(hash
->notrace_hash
) ||
1468 !ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1472 * Test the hashes for this ops to see if we want to call
1473 * the ops->func or not.
1475 * It's a match if the ip is in the ops->filter_hash or
1476 * the filter_hash does not exist or is empty,
1478 * the ip is not in the ops->notrace_hash.
1480 * This needs to be called with preemption disabled as
1481 * the hashes are freed with call_rcu_sched().
1484 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1486 struct ftrace_ops_hash hash
;
1489 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1491 * There's a small race when adding ops that the ftrace handler
1492 * that wants regs, may be called without them. We can not
1493 * allow that handler to be called if regs is NULL.
1495 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1499 hash
.filter_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->filter_hash
);
1500 hash
.notrace_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->notrace_hash
);
1502 if (hash_contains_ip(ip
, &hash
))
1511 * This is a double for. Do not use 'break' to break out of the loop,
1512 * you must use a goto.
1514 #define do_for_each_ftrace_rec(pg, rec) \
1515 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1517 for (_____i = 0; _____i < pg->index; _____i++) { \
1518 rec = &pg->records[_____i];
1520 #define while_for_each_ftrace_rec() \
1525 static int ftrace_cmp_recs(const void *a
, const void *b
)
1527 const struct dyn_ftrace
*key
= a
;
1528 const struct dyn_ftrace
*rec
= b
;
1530 if (key
->flags
< rec
->ip
)
1532 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1537 static unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1539 struct ftrace_page
*pg
;
1540 struct dyn_ftrace
*rec
;
1541 struct dyn_ftrace key
;
1544 key
.flags
= end
; /* overload flags, as it is unsigned long */
1546 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1547 if (end
< pg
->records
[0].ip
||
1548 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1550 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1551 sizeof(struct dyn_ftrace
),
1561 * ftrace_location - return true if the ip giving is a traced location
1562 * @ip: the instruction pointer to check
1564 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1565 * That is, the instruction that is either a NOP or call to
1566 * the function tracer. It checks the ftrace internal tables to
1567 * determine if the address belongs or not.
1569 unsigned long ftrace_location(unsigned long ip
)
1571 return ftrace_location_range(ip
, ip
);
1575 * ftrace_text_reserved - return true if range contains an ftrace location
1576 * @start: start of range to search
1577 * @end: end of range to search (inclusive). @end points to the last byte to check.
1579 * Returns 1 if @start and @end contains a ftrace location.
1580 * That is, the instruction that is either a NOP or call to
1581 * the function tracer. It checks the ftrace internal tables to
1582 * determine if the address belongs or not.
1584 int ftrace_text_reserved(const void *start
, const void *end
)
1588 ret
= ftrace_location_range((unsigned long)start
,
1589 (unsigned long)end
);
1594 /* Test if ops registered to this rec needs regs */
1595 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1597 struct ftrace_ops
*ops
;
1598 bool keep_regs
= false;
1600 for (ops
= ftrace_ops_list
;
1601 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1602 /* pass rec in as regs to have non-NULL val */
1603 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1604 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1614 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1618 struct ftrace_hash
*hash
;
1619 struct ftrace_hash
*other_hash
;
1620 struct ftrace_page
*pg
;
1621 struct dyn_ftrace
*rec
;
1625 /* Only update if the ops has been registered */
1626 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1630 * In the filter_hash case:
1631 * If the count is zero, we update all records.
1632 * Otherwise we just update the items in the hash.
1634 * In the notrace_hash case:
1635 * We enable the update in the hash.
1636 * As disabling notrace means enabling the tracing,
1637 * and enabling notrace means disabling, the inc variable
1641 hash
= ops
->func_hash
->filter_hash
;
1642 other_hash
= ops
->func_hash
->notrace_hash
;
1643 if (ftrace_hash_empty(hash
))
1647 hash
= ops
->func_hash
->notrace_hash
;
1648 other_hash
= ops
->func_hash
->filter_hash
;
1650 * If the notrace hash has no items,
1651 * then there's nothing to do.
1653 if (ftrace_hash_empty(hash
))
1657 do_for_each_ftrace_rec(pg
, rec
) {
1658 int in_other_hash
= 0;
1664 * Only the filter_hash affects all records.
1665 * Update if the record is not in the notrace hash.
1667 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1670 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1671 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1674 * If filter_hash is set, we want to match all functions
1675 * that are in the hash but not in the other hash.
1677 * If filter_hash is not set, then we are decrementing.
1678 * That means we match anything that is in the hash
1679 * and also in the other_hash. That is, we need to turn
1680 * off functions in the other hash because they are disabled
1683 if (filter_hash
&& in_hash
&& !in_other_hash
)
1685 else if (!filter_hash
&& in_hash
&&
1686 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1694 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1698 * If there's only a single callback registered to a
1699 * function, and the ops has a trampoline registered
1700 * for it, then we can call it directly.
1702 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1703 rec
->flags
|= FTRACE_FL_TRAMP
;
1706 * If we are adding another function callback
1707 * to this function, and the previous had a
1708 * custom trampoline in use, then we need to go
1709 * back to the default trampoline.
1711 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1714 * If any ops wants regs saved for this function
1715 * then all ops will get saved regs.
1717 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1718 rec
->flags
|= FTRACE_FL_REGS
;
1720 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1725 * If the rec had REGS enabled and the ops that is
1726 * being removed had REGS set, then see if there is
1727 * still any ops for this record that wants regs.
1728 * If not, we can stop recording them.
1730 if (ftrace_rec_count(rec
) > 0 &&
1731 rec
->flags
& FTRACE_FL_REGS
&&
1732 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1733 if (!test_rec_ops_needs_regs(rec
))
1734 rec
->flags
&= ~FTRACE_FL_REGS
;
1738 * If the rec had TRAMP enabled, then it needs to
1739 * be cleared. As TRAMP can only be enabled iff
1740 * there is only a single ops attached to it.
1741 * In otherwords, always disable it on decrementing.
1742 * In the future, we may set it if rec count is
1743 * decremented to one, and the ops that is left
1746 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1749 * flags will be cleared in ftrace_check_record()
1750 * if rec count is zero.
1754 /* Shortcut, if we handled all records, we are done. */
1755 if (!all
&& count
== hash
->count
)
1757 } while_for_each_ftrace_rec();
1760 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1763 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1766 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1769 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1772 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1773 int filter_hash
, int inc
)
1775 struct ftrace_ops
*op
;
1777 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1779 if (ops
->func_hash
!= &global_ops
.local_hash
)
1783 * If the ops shares the global_ops hash, then we need to update
1784 * all ops that are enabled and use this hash.
1786 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1790 if (op
->func_hash
== &global_ops
.local_hash
)
1791 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1792 } while_for_each_ftrace_op(op
);
1795 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1798 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1801 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1804 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1808 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1809 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1810 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1811 * Note that old_hash and new_hash has below meanings
1812 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1813 * - If the hash is EMPTY_HASH, it hits nothing
1814 * - Anything else hits the recs which match the hash entries.
1816 static int __ftrace_hash_update_ipmodify(struct ftrace_ops
*ops
,
1817 struct ftrace_hash
*old_hash
,
1818 struct ftrace_hash
*new_hash
)
1820 struct ftrace_page
*pg
;
1821 struct dyn_ftrace
*rec
, *end
= NULL
;
1824 /* Only update if the ops has been registered */
1825 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1828 if (!(ops
->flags
& FTRACE_OPS_FL_IPMODIFY
))
1832 * Since the IPMODIFY is a very address sensitive action, we do not
1833 * allow ftrace_ops to set all functions to new hash.
1835 if (!new_hash
|| !old_hash
)
1838 /* Update rec->flags */
1839 do_for_each_ftrace_rec(pg
, rec
) {
1840 /* We need to update only differences of filter_hash */
1841 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1842 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1843 if (in_old
== in_new
)
1847 /* New entries must ensure no others are using it */
1848 if (rec
->flags
& FTRACE_FL_IPMODIFY
)
1850 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1851 } else /* Removed entry */
1852 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1853 } while_for_each_ftrace_rec();
1860 /* Roll back what we did above */
1861 do_for_each_ftrace_rec(pg
, rec
) {
1865 in_old
= !!ftrace_lookup_ip(old_hash
, rec
->ip
);
1866 in_new
= !!ftrace_lookup_ip(new_hash
, rec
->ip
);
1867 if (in_old
== in_new
)
1871 rec
->flags
&= ~FTRACE_FL_IPMODIFY
;
1873 rec
->flags
|= FTRACE_FL_IPMODIFY
;
1874 } while_for_each_ftrace_rec();
1880 static int ftrace_hash_ipmodify_enable(struct ftrace_ops
*ops
)
1882 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1884 if (ftrace_hash_empty(hash
))
1887 return __ftrace_hash_update_ipmodify(ops
, EMPTY_HASH
, hash
);
1890 /* Disabling always succeeds */
1891 static void ftrace_hash_ipmodify_disable(struct ftrace_ops
*ops
)
1893 struct ftrace_hash
*hash
= ops
->func_hash
->filter_hash
;
1895 if (ftrace_hash_empty(hash
))
1898 __ftrace_hash_update_ipmodify(ops
, hash
, EMPTY_HASH
);
1901 static int ftrace_hash_ipmodify_update(struct ftrace_ops
*ops
,
1902 struct ftrace_hash
*new_hash
)
1904 struct ftrace_hash
*old_hash
= ops
->func_hash
->filter_hash
;
1906 if (ftrace_hash_empty(old_hash
))
1909 if (ftrace_hash_empty(new_hash
))
1912 return __ftrace_hash_update_ipmodify(ops
, old_hash
, new_hash
);
1915 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1919 printk(KERN_CONT
"%s", fmt
);
1921 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1922 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1925 static struct ftrace_ops
*
1926 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
);
1929 * ftrace_bug - report and shutdown function tracer
1930 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1931 * @rec: The record that failed
1933 * The arch code that enables or disables the function tracing
1934 * can call ftrace_bug() when it has detected a problem in
1935 * modifying the code. @failed should be one of either:
1936 * EFAULT - if the problem happens on reading the @ip address
1937 * EINVAL - if what is read at @ip is not what was expected
1938 * EPERM - if the problem happens on writting to the @ip address
1940 void ftrace_bug(int failed
, struct dyn_ftrace
*rec
)
1942 unsigned long ip
= rec
? rec
->ip
: 0;
1946 FTRACE_WARN_ON_ONCE(1);
1947 pr_info("ftrace faulted on modifying ");
1951 FTRACE_WARN_ON_ONCE(1);
1952 pr_info("ftrace failed to modify ");
1954 print_ip_ins(" actual: ", (unsigned char *)ip
);
1958 FTRACE_WARN_ON_ONCE(1);
1959 pr_info("ftrace faulted on writing ");
1963 FTRACE_WARN_ON_ONCE(1);
1964 pr_info("ftrace faulted on unknown error ");
1968 struct ftrace_ops
*ops
= NULL
;
1970 pr_info("ftrace record flags: %lx\n", rec
->flags
);
1971 pr_cont(" (%ld)%s", ftrace_rec_count(rec
),
1972 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
1973 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
1974 ops
= ftrace_find_tramp_ops_any(rec
);
1976 pr_cont("\ttramp: %pS",
1977 (void *)ops
->trampoline
);
1979 pr_cont("\ttramp: ERROR!");
1982 ip
= ftrace_get_addr_curr(rec
);
1983 pr_cont(" expected tramp: %lx\n", ip
);
1987 static int ftrace_check_record(struct dyn_ftrace
*rec
, int enable
, int update
)
1989 unsigned long flag
= 0UL;
1992 * If we are updating calls:
1994 * If the record has a ref count, then we need to enable it
1995 * because someone is using it.
1997 * Otherwise we make sure its disabled.
1999 * If we are disabling calls, then disable all records that
2002 if (enable
&& ftrace_rec_count(rec
))
2003 flag
= FTRACE_FL_ENABLED
;
2006 * If enabling and the REGS flag does not match the REGS_EN, or
2007 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2008 * this record. Set flags to fail the compare against ENABLED.
2011 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
2012 !(rec
->flags
& FTRACE_FL_REGS_EN
))
2013 flag
|= FTRACE_FL_REGS
;
2015 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
2016 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
2017 flag
|= FTRACE_FL_TRAMP
;
2020 /* If the state of this record hasn't changed, then do nothing */
2021 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
2022 return FTRACE_UPDATE_IGNORE
;
2025 /* Save off if rec is being enabled (for return value) */
2026 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
2029 rec
->flags
|= FTRACE_FL_ENABLED
;
2030 if (flag
& FTRACE_FL_REGS
) {
2031 if (rec
->flags
& FTRACE_FL_REGS
)
2032 rec
->flags
|= FTRACE_FL_REGS_EN
;
2034 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
2036 if (flag
& FTRACE_FL_TRAMP
) {
2037 if (rec
->flags
& FTRACE_FL_TRAMP
)
2038 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
2040 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
2045 * If this record is being updated from a nop, then
2046 * return UPDATE_MAKE_CALL.
2048 * return UPDATE_MODIFY_CALL to tell the caller to convert
2049 * from the save regs, to a non-save regs function or
2050 * vice versa, or from a trampoline call.
2052 if (flag
& FTRACE_FL_ENABLED
)
2053 return FTRACE_UPDATE_MAKE_CALL
;
2055 return FTRACE_UPDATE_MODIFY_CALL
;
2059 /* If there's no more users, clear all flags */
2060 if (!ftrace_rec_count(rec
))
2064 * Just disable the record, but keep the ops TRAMP
2065 * and REGS states. The _EN flags must be disabled though.
2067 rec
->flags
&= ~(FTRACE_FL_ENABLED
| FTRACE_FL_TRAMP_EN
|
2071 return FTRACE_UPDATE_MAKE_NOP
;
2075 * ftrace_update_record, set a record that now is tracing or not
2076 * @rec: the record to update
2077 * @enable: set to 1 if the record is tracing, zero to force disable
2079 * The records that represent all functions that can be traced need
2080 * to be updated when tracing has been enabled.
2082 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
)
2084 return ftrace_check_record(rec
, enable
, 1);
2088 * ftrace_test_record, check if the record has been enabled or not
2089 * @rec: the record to test
2090 * @enable: set to 1 to check if enabled, 0 if it is disabled
2092 * The arch code may need to test if a record is already set to
2093 * tracing to determine how to modify the function code that it
2096 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
)
2098 return ftrace_check_record(rec
, enable
, 0);
2101 static struct ftrace_ops
*
2102 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
2104 struct ftrace_ops
*op
;
2105 unsigned long ip
= rec
->ip
;
2107 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2109 if (!op
->trampoline
)
2112 if (hash_contains_ip(ip
, op
->func_hash
))
2114 } while_for_each_ftrace_op(op
);
2119 static struct ftrace_ops
*
2120 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
2122 struct ftrace_ops
*op
;
2123 unsigned long ip
= rec
->ip
;
2126 * Need to check removed ops first.
2127 * If they are being removed, and this rec has a tramp,
2128 * and this rec is in the ops list, then it would be the
2129 * one with the tramp.
2132 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
2137 * Need to find the current trampoline for a rec.
2138 * Now, a trampoline is only attached to a rec if there
2139 * was a single 'ops' attached to it. But this can be called
2140 * when we are adding another op to the rec or removing the
2141 * current one. Thus, if the op is being added, we can
2142 * ignore it because it hasn't attached itself to the rec
2145 * If an ops is being modified (hooking to different functions)
2146 * then we don't care about the new functions that are being
2147 * added, just the old ones (that are probably being removed).
2149 * If we are adding an ops to a function that already is using
2150 * a trampoline, it needs to be removed (trampolines are only
2151 * for single ops connected), then an ops that is not being
2152 * modified also needs to be checked.
2154 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2156 if (!op
->trampoline
)
2160 * If the ops is being added, it hasn't gotten to
2161 * the point to be removed from this tree yet.
2163 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
2168 * If the ops is being modified and is in the old
2169 * hash, then it is probably being removed from this
2172 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2173 hash_contains_ip(ip
, &op
->old_hash
))
2176 * If the ops is not being added or modified, and it's
2177 * in its normal filter hash, then this must be the one
2180 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
2181 hash_contains_ip(ip
, op
->func_hash
))
2184 } while_for_each_ftrace_op(op
);
2189 static struct ftrace_ops
*
2190 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
2192 struct ftrace_ops
*op
;
2193 unsigned long ip
= rec
->ip
;
2195 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
2196 /* pass rec in as regs to have non-NULL val */
2197 if (hash_contains_ip(ip
, op
->func_hash
))
2199 } while_for_each_ftrace_op(op
);
2205 * ftrace_get_addr_new - Get the call address to set to
2206 * @rec: The ftrace record descriptor
2208 * If the record has the FTRACE_FL_REGS set, that means that it
2209 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2210 * is not not set, then it wants to convert to the normal callback.
2212 * Returns the address of the trampoline to set to
2214 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2216 struct ftrace_ops
*ops
;
2218 /* Trampolines take precedence over regs */
2219 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2220 ops
= ftrace_find_tramp_ops_new(rec
);
2221 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2222 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2223 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2224 /* Ftrace is shutting down, return anything */
2225 return (unsigned long)FTRACE_ADDR
;
2227 return ops
->trampoline
;
2230 if (rec
->flags
& FTRACE_FL_REGS
)
2231 return (unsigned long)FTRACE_REGS_ADDR
;
2233 return (unsigned long)FTRACE_ADDR
;
2237 * ftrace_get_addr_curr - Get the call address that is already there
2238 * @rec: The ftrace record descriptor
2240 * The FTRACE_FL_REGS_EN is set when the record already points to
2241 * a function that saves all the regs. Basically the '_EN' version
2242 * represents the current state of the function.
2244 * Returns the address of the trampoline that is currently being called
2246 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2248 struct ftrace_ops
*ops
;
2250 /* Trampolines take precedence over regs */
2251 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2252 ops
= ftrace_find_tramp_ops_curr(rec
);
2253 if (FTRACE_WARN_ON(!ops
)) {
2254 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2255 (void *)rec
->ip
, (void *)rec
->ip
);
2256 /* Ftrace is shutting down, return anything */
2257 return (unsigned long)FTRACE_ADDR
;
2259 return ops
->trampoline
;
2262 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2263 return (unsigned long)FTRACE_REGS_ADDR
;
2265 return (unsigned long)FTRACE_ADDR
;
2269 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
2271 unsigned long ftrace_old_addr
;
2272 unsigned long ftrace_addr
;
2275 ftrace_addr
= ftrace_get_addr_new(rec
);
2277 /* This needs to be done before we call ftrace_update_record */
2278 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2280 ret
= ftrace_update_record(rec
, enable
);
2283 case FTRACE_UPDATE_IGNORE
:
2286 case FTRACE_UPDATE_MAKE_CALL
:
2287 return ftrace_make_call(rec
, ftrace_addr
);
2289 case FTRACE_UPDATE_MAKE_NOP
:
2290 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2292 case FTRACE_UPDATE_MODIFY_CALL
:
2293 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2296 return -1; /* unknow ftrace bug */
2299 void __weak
ftrace_replace_code(int enable
)
2301 struct dyn_ftrace
*rec
;
2302 struct ftrace_page
*pg
;
2305 if (unlikely(ftrace_disabled
))
2308 do_for_each_ftrace_rec(pg
, rec
) {
2309 failed
= __ftrace_replace_code(rec
, enable
);
2311 ftrace_bug(failed
, rec
);
2312 /* Stop processing */
2315 } while_for_each_ftrace_rec();
2318 struct ftrace_rec_iter
{
2319 struct ftrace_page
*pg
;
2324 * ftrace_rec_iter_start, start up iterating over traced functions
2326 * Returns an iterator handle that is used to iterate over all
2327 * the records that represent address locations where functions
2330 * May return NULL if no records are available.
2332 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2335 * We only use a single iterator.
2336 * Protected by the ftrace_lock mutex.
2338 static struct ftrace_rec_iter ftrace_rec_iter
;
2339 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2341 iter
->pg
= ftrace_pages_start
;
2344 /* Could have empty pages */
2345 while (iter
->pg
&& !iter
->pg
->index
)
2346 iter
->pg
= iter
->pg
->next
;
2355 * ftrace_rec_iter_next, get the next record to process.
2356 * @iter: The handle to the iterator.
2358 * Returns the next iterator after the given iterator @iter.
2360 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2364 if (iter
->index
>= iter
->pg
->index
) {
2365 iter
->pg
= iter
->pg
->next
;
2368 /* Could have empty pages */
2369 while (iter
->pg
&& !iter
->pg
->index
)
2370 iter
->pg
= iter
->pg
->next
;
2380 * ftrace_rec_iter_record, get the record at the iterator location
2381 * @iter: The current iterator location
2383 * Returns the record that the current @iter is at.
2385 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2387 return &iter
->pg
->records
[iter
->index
];
2391 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
2395 if (unlikely(ftrace_disabled
))
2398 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
2400 ftrace_bug(ret
, rec
);
2407 * archs can override this function if they must do something
2408 * before the modifying code is performed.
2410 int __weak
ftrace_arch_code_modify_prepare(void)
2416 * archs can override this function if they must do something
2417 * after the modifying code is performed.
2419 int __weak
ftrace_arch_code_modify_post_process(void)
2424 void ftrace_modify_all_code(int command
)
2426 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2430 * If the ftrace_caller calls a ftrace_ops func directly,
2431 * we need to make sure that it only traces functions it
2432 * expects to trace. When doing the switch of functions,
2433 * we need to update to the ftrace_ops_list_func first
2434 * before the transition between old and new calls are set,
2435 * as the ftrace_ops_list_func will check the ops hashes
2436 * to make sure the ops are having the right functions
2440 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2441 if (FTRACE_WARN_ON(err
))
2445 if (command
& FTRACE_UPDATE_CALLS
)
2446 ftrace_replace_code(1);
2447 else if (command
& FTRACE_DISABLE_CALLS
)
2448 ftrace_replace_code(0);
2450 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2451 function_trace_op
= set_function_trace_op
;
2453 /* If irqs are disabled, we are in stop machine */
2454 if (!irqs_disabled())
2455 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2456 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2457 if (FTRACE_WARN_ON(err
))
2461 if (command
& FTRACE_START_FUNC_RET
)
2462 err
= ftrace_enable_ftrace_graph_caller();
2463 else if (command
& FTRACE_STOP_FUNC_RET
)
2464 err
= ftrace_disable_ftrace_graph_caller();
2465 FTRACE_WARN_ON(err
);
2468 static int __ftrace_modify_code(void *data
)
2470 int *command
= data
;
2472 ftrace_modify_all_code(*command
);
2478 * ftrace_run_stop_machine, go back to the stop machine method
2479 * @command: The command to tell ftrace what to do
2481 * If an arch needs to fall back to the stop machine method, the
2482 * it can call this function.
2484 void ftrace_run_stop_machine(int command
)
2486 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2490 * arch_ftrace_update_code, modify the code to trace or not trace
2491 * @command: The command that needs to be done
2493 * Archs can override this function if it does not need to
2494 * run stop_machine() to modify code.
2496 void __weak
arch_ftrace_update_code(int command
)
2498 ftrace_run_stop_machine(command
);
2501 static void ftrace_run_update_code(int command
)
2505 ret
= ftrace_arch_code_modify_prepare();
2506 FTRACE_WARN_ON(ret
);
2511 * By default we use stop_machine() to modify the code.
2512 * But archs can do what ever they want as long as it
2513 * is safe. The stop_machine() is the safest, but also
2514 * produces the most overhead.
2516 arch_ftrace_update_code(command
);
2518 ret
= ftrace_arch_code_modify_post_process();
2519 FTRACE_WARN_ON(ret
);
2522 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2523 struct ftrace_ops_hash
*old_hash
)
2525 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2526 ops
->old_hash
.filter_hash
= old_hash
->filter_hash
;
2527 ops
->old_hash
.notrace_hash
= old_hash
->notrace_hash
;
2528 ftrace_run_update_code(command
);
2529 ops
->old_hash
.filter_hash
= NULL
;
2530 ops
->old_hash
.notrace_hash
= NULL
;
2531 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2534 static ftrace_func_t saved_ftrace_func
;
2535 static int ftrace_start_up
;
2537 void __weak
arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
2541 static void control_ops_free(struct ftrace_ops
*ops
)
2543 free_percpu(ops
->disabled
);
2546 static void ftrace_startup_enable(int command
)
2548 if (saved_ftrace_func
!= ftrace_trace_function
) {
2549 saved_ftrace_func
= ftrace_trace_function
;
2550 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2553 if (!command
|| !ftrace_enabled
)
2556 ftrace_run_update_code(command
);
2559 static void ftrace_startup_all(int command
)
2561 update_all_ops
= true;
2562 ftrace_startup_enable(command
);
2563 update_all_ops
= false;
2566 static int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2570 if (unlikely(ftrace_disabled
))
2573 ret
= __register_ftrace_function(ops
);
2578 command
|= FTRACE_UPDATE_CALLS
;
2581 * Note that ftrace probes uses this to start up
2582 * and modify functions it will probe. But we still
2583 * set the ADDING flag for modification, as probes
2584 * do not have trampolines. If they add them in the
2585 * future, then the probes will need to distinguish
2586 * between adding and updating probes.
2588 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2590 ret
= ftrace_hash_ipmodify_enable(ops
);
2592 /* Rollback registration process */
2593 __unregister_ftrace_function(ops
);
2595 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2599 ftrace_hash_rec_enable(ops
, 1);
2601 ftrace_startup_enable(command
);
2603 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2608 static int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2612 if (unlikely(ftrace_disabled
))
2615 ret
= __unregister_ftrace_function(ops
);
2621 * Just warn in case of unbalance, no need to kill ftrace, it's not
2622 * critical but the ftrace_call callers may be never nopped again after
2623 * further ftrace uses.
2625 WARN_ON_ONCE(ftrace_start_up
< 0);
2627 /* Disabling ipmodify never fails */
2628 ftrace_hash_ipmodify_disable(ops
);
2629 ftrace_hash_rec_disable(ops
, 1);
2631 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2633 command
|= FTRACE_UPDATE_CALLS
;
2635 if (saved_ftrace_func
!= ftrace_trace_function
) {
2636 saved_ftrace_func
= ftrace_trace_function
;
2637 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2640 if (!command
|| !ftrace_enabled
) {
2642 * If these are control ops, they still need their
2643 * per_cpu field freed. Since, function tracing is
2644 * not currently active, we can just free them
2645 * without synchronizing all CPUs.
2647 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2648 control_ops_free(ops
);
2653 * If the ops uses a trampoline, then it needs to be
2654 * tested first on update.
2656 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2659 /* The trampoline logic checks the old hashes */
2660 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2661 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2663 ftrace_run_update_code(command
);
2666 * If there's no more ops registered with ftrace, run a
2667 * sanity check to make sure all rec flags are cleared.
2669 if (ftrace_ops_list
== &ftrace_list_end
) {
2670 struct ftrace_page
*pg
;
2671 struct dyn_ftrace
*rec
;
2673 do_for_each_ftrace_rec(pg
, rec
) {
2674 if (FTRACE_WARN_ON_ONCE(rec
->flags
))
2675 pr_warn(" %pS flags:%lx\n",
2676 (void *)rec
->ip
, rec
->flags
);
2677 } while_for_each_ftrace_rec();
2680 ops
->old_hash
.filter_hash
= NULL
;
2681 ops
->old_hash
.notrace_hash
= NULL
;
2684 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2687 * Dynamic ops may be freed, we must make sure that all
2688 * callers are done before leaving this function.
2689 * The same goes for freeing the per_cpu data of the control
2692 * Again, normal synchronize_sched() is not good enough.
2693 * We need to do a hard force of sched synchronization.
2694 * This is because we use preempt_disable() to do RCU, but
2695 * the function tracers can be called where RCU is not watching
2696 * (like before user_exit()). We can not rely on the RCU
2697 * infrastructure to do the synchronization, thus we must do it
2700 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_CONTROL
)) {
2701 schedule_on_each_cpu(ftrace_sync
);
2703 arch_ftrace_trampoline_free(ops
);
2705 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2706 control_ops_free(ops
);
2712 static void ftrace_startup_sysctl(void)
2716 if (unlikely(ftrace_disabled
))
2719 /* Force update next time */
2720 saved_ftrace_func
= NULL
;
2721 /* ftrace_start_up is true if we want ftrace running */
2722 if (ftrace_start_up
) {
2723 command
= FTRACE_UPDATE_CALLS
;
2724 if (ftrace_graph_active
)
2725 command
|= FTRACE_START_FUNC_RET
;
2726 ftrace_startup_enable(command
);
2730 static void ftrace_shutdown_sysctl(void)
2734 if (unlikely(ftrace_disabled
))
2737 /* ftrace_start_up is true if ftrace is running */
2738 if (ftrace_start_up
) {
2739 command
= FTRACE_DISABLE_CALLS
;
2740 if (ftrace_graph_active
)
2741 command
|= FTRACE_STOP_FUNC_RET
;
2742 ftrace_run_update_code(command
);
2746 static cycle_t ftrace_update_time
;
2747 unsigned long ftrace_update_tot_cnt
;
2749 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
2752 * Filter_hash being empty will default to trace module.
2753 * But notrace hash requires a test of individual module functions.
2755 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2756 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
2760 * Check if the current ops references the record.
2762 * If the ops traces all functions, then it was already accounted for.
2763 * If the ops does not trace the current record function, skip it.
2764 * If the ops ignores the function via notrace filter, skip it.
2767 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
2769 /* If ops isn't enabled, ignore it */
2770 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
2773 /* If ops traces all mods, we already accounted for it */
2774 if (ops_traces_mod(ops
))
2777 /* The function must be in the filter */
2778 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2779 !ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
2782 /* If in notrace hash, we ignore it too */
2783 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
2789 static int referenced_filters(struct dyn_ftrace
*rec
)
2791 struct ftrace_ops
*ops
;
2794 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2795 if (ops_references_rec(ops
, rec
))
2802 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
2804 struct ftrace_page
*pg
;
2805 struct dyn_ftrace
*p
;
2806 cycle_t start
, stop
;
2807 unsigned long update_cnt
= 0;
2808 unsigned long ref
= 0;
2813 * When adding a module, we need to check if tracers are
2814 * currently enabled and if they are set to trace all functions.
2815 * If they are, we need to enable the module functions as well
2816 * as update the reference counts for those function records.
2819 struct ftrace_ops
*ops
;
2821 for (ops
= ftrace_ops_list
;
2822 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2823 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
2824 if (ops_traces_mod(ops
))
2832 start
= ftrace_now(raw_smp_processor_id());
2834 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
2836 for (i
= 0; i
< pg
->index
; i
++) {
2839 /* If something went wrong, bail without enabling anything */
2840 if (unlikely(ftrace_disabled
))
2843 p
= &pg
->records
[i
];
2845 cnt
+= referenced_filters(p
);
2849 * Do the initial record conversion from mcount jump
2850 * to the NOP instructions.
2852 if (!ftrace_code_disable(mod
, p
))
2858 * If the tracing is enabled, go ahead and enable the record.
2860 * The reason not to enable the record immediatelly is the
2861 * inherent check of ftrace_make_nop/ftrace_make_call for
2862 * correct previous instructions. Making first the NOP
2863 * conversion puts the module to the correct state, thus
2864 * passing the ftrace_make_call check.
2866 if (ftrace_start_up
&& cnt
) {
2867 int failed
= __ftrace_replace_code(p
, 1);
2869 ftrace_bug(failed
, p
);
2874 stop
= ftrace_now(raw_smp_processor_id());
2875 ftrace_update_time
= stop
- start
;
2876 ftrace_update_tot_cnt
+= update_cnt
;
2881 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
2886 if (WARN_ON(!count
))
2889 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
2892 * We want to fill as much as possible. No more than a page
2895 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
2899 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
2902 /* if we can't allocate this size, try something smaller */
2909 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
2918 static struct ftrace_page
*
2919 ftrace_allocate_pages(unsigned long num_to_init
)
2921 struct ftrace_page
*start_pg
;
2922 struct ftrace_page
*pg
;
2929 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2934 * Try to allocate as much as possible in one continues
2935 * location that fills in all of the space. We want to
2936 * waste as little space as possible.
2939 cnt
= ftrace_allocate_records(pg
, num_to_init
);
2947 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2959 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
2960 free_pages((unsigned long)pg
->records
, order
);
2961 start_pg
= pg
->next
;
2965 pr_info("ftrace: FAILED to allocate memory for functions\n");
2969 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2971 struct ftrace_iterator
{
2974 struct ftrace_page
*pg
;
2975 struct dyn_ftrace
*func
;
2976 struct ftrace_func_probe
*probe
;
2977 struct trace_parser parser
;
2978 struct ftrace_hash
*hash
;
2979 struct ftrace_ops
*ops
;
2986 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
2988 struct ftrace_iterator
*iter
= m
->private;
2989 struct hlist_node
*hnd
= NULL
;
2990 struct hlist_head
*hhd
;
2996 hnd
= &iter
->probe
->node
;
2998 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
3001 hhd
= &ftrace_func_hash
[iter
->hidx
];
3003 if (hlist_empty(hhd
)) {
3019 if (WARN_ON_ONCE(!hnd
))
3022 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
3027 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
3029 struct ftrace_iterator
*iter
= m
->private;
3033 if (!(iter
->flags
& FTRACE_ITER_DO_HASH
))
3036 if (iter
->func_pos
> *pos
)
3040 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
3041 p
= t_hash_next(m
, &l
);
3048 /* Only set this if we have an item */
3049 iter
->flags
|= FTRACE_ITER_HASH
;
3055 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
3057 struct ftrace_func_probe
*rec
;
3060 if (WARN_ON_ONCE(!rec
))
3063 if (rec
->ops
->print
)
3064 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
3066 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
3069 seq_printf(m
, ":%p", rec
->data
);
3076 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3078 struct ftrace_iterator
*iter
= m
->private;
3079 struct ftrace_ops
*ops
= iter
->ops
;
3080 struct dyn_ftrace
*rec
= NULL
;
3082 if (unlikely(ftrace_disabled
))
3085 if (iter
->flags
& FTRACE_ITER_HASH
)
3086 return t_hash_next(m
, pos
);
3089 iter
->pos
= iter
->func_pos
= *pos
;
3091 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
3092 return t_hash_start(m
, pos
);
3095 if (iter
->idx
>= iter
->pg
->index
) {
3096 if (iter
->pg
->next
) {
3097 iter
->pg
= iter
->pg
->next
;
3102 rec
= &iter
->pg
->records
[iter
->idx
++];
3103 if (((iter
->flags
& FTRACE_ITER_FILTER
) &&
3104 !(ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))) ||
3106 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
3107 !ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
)) ||
3109 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
3110 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
3118 return t_hash_start(m
, pos
);
3125 static void reset_iter_read(struct ftrace_iterator
*iter
)
3129 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_HASH
);
3132 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3134 struct ftrace_iterator
*iter
= m
->private;
3135 struct ftrace_ops
*ops
= iter
->ops
;
3139 mutex_lock(&ftrace_lock
);
3141 if (unlikely(ftrace_disabled
))
3145 * If an lseek was done, then reset and start from beginning.
3147 if (*pos
< iter
->pos
)
3148 reset_iter_read(iter
);
3151 * For set_ftrace_filter reading, if we have the filter
3152 * off, we can short cut and just print out that all
3153 * functions are enabled.
3155 if ((iter
->flags
& FTRACE_ITER_FILTER
&&
3156 ftrace_hash_empty(ops
->func_hash
->filter_hash
)) ||
3157 (iter
->flags
& FTRACE_ITER_NOTRACE
&&
3158 ftrace_hash_empty(ops
->func_hash
->notrace_hash
))) {
3160 return t_hash_start(m
, pos
);
3161 iter
->flags
|= FTRACE_ITER_PRINTALL
;
3162 /* reset in case of seek/pread */
3163 iter
->flags
&= ~FTRACE_ITER_HASH
;
3167 if (iter
->flags
& FTRACE_ITER_HASH
)
3168 return t_hash_start(m
, pos
);
3171 * Unfortunately, we need to restart at ftrace_pages_start
3172 * every time we let go of the ftrace_mutex. This is because
3173 * those pointers can change without the lock.
3175 iter
->pg
= ftrace_pages_start
;
3177 for (l
= 0; l
<= *pos
; ) {
3178 p
= t_next(m
, p
, &l
);
3184 return t_hash_start(m
, pos
);
3189 static void t_stop(struct seq_file
*m
, void *p
)
3191 mutex_unlock(&ftrace_lock
);
3195 arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
3200 static void add_trampoline_func(struct seq_file
*m
, struct ftrace_ops
*ops
,
3201 struct dyn_ftrace
*rec
)
3205 ptr
= arch_ftrace_trampoline_func(ops
, rec
);
3207 seq_printf(m
, " ->%pS", ptr
);
3210 static int t_show(struct seq_file
*m
, void *v
)
3212 struct ftrace_iterator
*iter
= m
->private;
3213 struct dyn_ftrace
*rec
;
3215 if (iter
->flags
& FTRACE_ITER_HASH
)
3216 return t_hash_show(m
, iter
);
3218 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
3219 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
3220 seq_puts(m
, "#### no functions disabled ####\n");
3222 seq_puts(m
, "#### all functions enabled ####\n");
3231 seq_printf(m
, "%ps", (void *)rec
->ip
);
3232 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
3233 struct ftrace_ops
*ops
= NULL
;
3235 seq_printf(m
, " (%ld)%s%s",
3236 ftrace_rec_count(rec
),
3237 rec
->flags
& FTRACE_FL_REGS
? " R" : " ",
3238 rec
->flags
& FTRACE_FL_IPMODIFY
? " I" : " ");
3239 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
3240 ops
= ftrace_find_tramp_ops_any(rec
);
3242 seq_printf(m
, "\ttramp: %pS",
3243 (void *)ops
->trampoline
);
3245 seq_puts(m
, "\ttramp: ERROR!");
3248 add_trampoline_func(m
, ops
, rec
);
3256 static const struct seq_operations show_ftrace_seq_ops
= {
3264 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3266 struct ftrace_iterator
*iter
;
3268 if (unlikely(ftrace_disabled
))
3271 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3273 iter
->pg
= ftrace_pages_start
;
3274 iter
->ops
= &global_ops
;
3277 return iter
? 0 : -ENOMEM
;
3281 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3283 struct ftrace_iterator
*iter
;
3285 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3287 iter
->pg
= ftrace_pages_start
;
3288 iter
->flags
= FTRACE_ITER_ENABLED
;
3289 iter
->ops
= &global_ops
;
3292 return iter
? 0 : -ENOMEM
;
3296 * ftrace_regex_open - initialize function tracer filter files
3297 * @ops: The ftrace_ops that hold the hash filters
3298 * @flag: The type of filter to process
3299 * @inode: The inode, usually passed in to your open routine
3300 * @file: The file, usually passed in to your open routine
3302 * ftrace_regex_open() initializes the filter files for the
3303 * @ops. Depending on @flag it may process the filter hash or
3304 * the notrace hash of @ops. With this called from the open
3305 * routine, you can use ftrace_filter_write() for the write
3306 * routine if @flag has FTRACE_ITER_FILTER set, or
3307 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3308 * tracing_lseek() should be used as the lseek routine, and
3309 * release must call ftrace_regex_release().
3312 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3313 struct inode
*inode
, struct file
*file
)
3315 struct ftrace_iterator
*iter
;
3316 struct ftrace_hash
*hash
;
3319 ftrace_ops_init(ops
);
3321 if (unlikely(ftrace_disabled
))
3324 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3328 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
3336 mutex_lock(&ops
->func_hash
->regex_lock
);
3338 if (flag
& FTRACE_ITER_NOTRACE
)
3339 hash
= ops
->func_hash
->notrace_hash
;
3341 hash
= ops
->func_hash
->filter_hash
;
3343 if (file
->f_mode
& FMODE_WRITE
) {
3344 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3346 if (file
->f_flags
& O_TRUNC
)
3347 iter
->hash
= alloc_ftrace_hash(size_bits
);
3349 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3352 trace_parser_put(&iter
->parser
);
3359 if (file
->f_mode
& FMODE_READ
) {
3360 iter
->pg
= ftrace_pages_start
;
3362 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3364 struct seq_file
*m
= file
->private_data
;
3368 free_ftrace_hash(iter
->hash
);
3369 trace_parser_put(&iter
->parser
);
3373 file
->private_data
= iter
;
3376 mutex_unlock(&ops
->func_hash
->regex_lock
);
3382 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3384 struct ftrace_ops
*ops
= inode
->i_private
;
3386 return ftrace_regex_open(ops
,
3387 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_HASH
,
3392 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3394 struct ftrace_ops
*ops
= inode
->i_private
;
3396 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3400 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
3407 if (strcmp(str
, regex
) == 0)
3410 case MATCH_FRONT_ONLY
:
3411 if (strncmp(str
, regex
, len
) == 0)
3414 case MATCH_MIDDLE_ONLY
:
3415 if (strstr(str
, regex
))
3418 case MATCH_END_ONLY
:
3420 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
3429 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
3431 struct ftrace_func_entry
*entry
;
3434 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3436 /* Do nothing if it doesn't exist */
3440 free_hash_entry(hash
, entry
);
3442 /* Do nothing if it exists */
3446 ret
= add_hash_entry(hash
, rec
->ip
);
3452 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
3453 char *regex
, int len
, int type
)
3455 char str
[KSYM_SYMBOL_LEN
];
3458 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3461 /* module lookup requires matching the module */
3462 if (!modname
|| strcmp(modname
, mod
))
3465 /* blank search means to match all funcs in the mod */
3470 return ftrace_match(str
, regex
, len
, type
);
3474 match_records(struct ftrace_hash
*hash
, char *buff
,
3475 int len
, char *mod
, int not)
3477 unsigned search_len
= 0;
3478 struct ftrace_page
*pg
;
3479 struct dyn_ftrace
*rec
;
3480 int type
= MATCH_FULL
;
3481 char *search
= buff
;
3486 type
= filter_parse_regex(buff
, len
, &search
, ¬);
3487 search_len
= strlen(search
);
3490 mutex_lock(&ftrace_lock
);
3492 if (unlikely(ftrace_disabled
))
3495 do_for_each_ftrace_rec(pg
, rec
) {
3496 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
3497 ret
= enter_record(hash
, rec
, not);
3504 } while_for_each_ftrace_rec();
3506 mutex_unlock(&ftrace_lock
);
3512 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
3514 return match_records(hash
, buff
, len
, NULL
, 0);
3518 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
3522 /* blank or '*' mean the same */
3523 if (strcmp(buff
, "*") == 0)
3526 /* handle the case of 'dont filter this module' */
3527 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
3532 return match_records(hash
, buff
, strlen(buff
), mod
, not);
3536 * We register the module command as a template to show others how
3537 * to register the a command as well.
3541 ftrace_mod_callback(struct ftrace_hash
*hash
,
3542 char *func
, char *cmd
, char *param
, int enable
)
3548 * cmd == 'mod' because we only registered this func
3549 * for the 'mod' ftrace_func_command.
3550 * But if you register one func with multiple commands,
3551 * you can tell which command was used by the cmd
3555 /* we must have a module name */
3559 mod
= strsep(¶m
, ":");
3563 ret
= ftrace_match_module_records(hash
, func
, mod
);
3572 static struct ftrace_func_command ftrace_mod_cmd
= {
3574 .func
= ftrace_mod_callback
,
3577 static int __init
ftrace_mod_cmd_init(void)
3579 return register_ftrace_command(&ftrace_mod_cmd
);
3581 core_initcall(ftrace_mod_cmd_init
);
3583 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
3584 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
3586 struct ftrace_func_probe
*entry
;
3587 struct hlist_head
*hhd
;
3590 key
= hash_long(ip
, FTRACE_HASH_BITS
);
3592 hhd
= &ftrace_func_hash
[key
];
3594 if (hlist_empty(hhd
))
3598 * Disable preemption for these calls to prevent a RCU grace
3599 * period. This syncs the hash iteration and freeing of items
3600 * on the hash. rcu_read_lock is too dangerous here.
3602 preempt_disable_notrace();
3603 hlist_for_each_entry_rcu_notrace(entry
, hhd
, node
) {
3604 if (entry
->ip
== ip
)
3605 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
3607 preempt_enable_notrace();
3610 static struct ftrace_ops trace_probe_ops __read_mostly
=
3612 .func
= function_trace_probe_call
,
3613 .flags
= FTRACE_OPS_FL_INITIALIZED
,
3614 INIT_OPS_HASH(trace_probe_ops
)
3617 static int ftrace_probe_registered
;
3619 static void __enable_ftrace_function_probe(struct ftrace_ops_hash
*old_hash
)
3624 if (ftrace_probe_registered
) {
3625 /* still need to update the function call sites */
3627 ftrace_run_modify_code(&trace_probe_ops
, FTRACE_UPDATE_CALLS
,
3632 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3633 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3637 /* Nothing registered? */
3638 if (i
== FTRACE_FUNC_HASHSIZE
)
3641 ret
= ftrace_startup(&trace_probe_ops
, 0);
3643 ftrace_probe_registered
= 1;
3646 static void __disable_ftrace_function_probe(void)
3650 if (!ftrace_probe_registered
)
3653 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3654 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3659 /* no more funcs left */
3660 ftrace_shutdown(&trace_probe_ops
, 0);
3662 ftrace_probe_registered
= 0;
3666 static void ftrace_free_entry(struct ftrace_func_probe
*entry
)
3668 if (entry
->ops
->free
)
3669 entry
->ops
->free(entry
->ops
, entry
->ip
, &entry
->data
);
3674 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3677 struct ftrace_ops_hash old_hash_ops
;
3678 struct ftrace_func_probe
*entry
;
3679 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3680 struct ftrace_hash
*old_hash
= *orig_hash
;
3681 struct ftrace_hash
*hash
;
3682 struct ftrace_page
*pg
;
3683 struct dyn_ftrace
*rec
;
3690 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3691 len
= strlen(search
);
3693 /* we do not support '!' for function probes */
3697 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3699 old_hash_ops
.filter_hash
= old_hash
;
3700 /* Probes only have filters */
3701 old_hash_ops
.notrace_hash
= NULL
;
3703 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
3709 if (unlikely(ftrace_disabled
)) {
3714 mutex_lock(&ftrace_lock
);
3716 do_for_each_ftrace_rec(pg
, rec
) {
3718 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
3721 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
3723 /* If we did not process any, then return error */
3734 * The caller might want to do something special
3735 * for each function we find. We call the callback
3736 * to give the caller an opportunity to do so.
3739 if (ops
->init(ops
, rec
->ip
, &entry
->data
) < 0) {
3740 /* caller does not like this func */
3746 ret
= enter_record(hash
, rec
, 0);
3754 entry
->ip
= rec
->ip
;
3756 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
3757 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
3759 } while_for_each_ftrace_rec();
3761 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3763 __enable_ftrace_function_probe(&old_hash_ops
);
3766 free_ftrace_hash_rcu(old_hash
);
3771 mutex_unlock(&ftrace_lock
);
3773 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3774 free_ftrace_hash(hash
);
3780 PROBE_TEST_FUNC
= 1,
3785 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3786 void *data
, int flags
)
3788 struct ftrace_func_entry
*rec_entry
;
3789 struct ftrace_func_probe
*entry
;
3790 struct ftrace_func_probe
*p
;
3791 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3792 struct ftrace_hash
*old_hash
= *orig_hash
;
3793 struct list_head free_list
;
3794 struct ftrace_hash
*hash
;
3795 struct hlist_node
*tmp
;
3796 char str
[KSYM_SYMBOL_LEN
];
3797 int type
= MATCH_FULL
;
3802 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
3807 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3808 len
= strlen(search
);
3810 /* we do not support '!' for function probes */
3815 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3817 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3819 /* Hmm, should report this somehow */
3822 INIT_LIST_HEAD(&free_list
);
3824 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3825 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3827 hlist_for_each_entry_safe(entry
, tmp
, hhd
, node
) {
3829 /* break up if statements for readability */
3830 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
3833 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
3836 /* do this last, since it is the most expensive */
3838 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
3840 if (!ftrace_match(str
, glob
, len
, type
))
3844 rec_entry
= ftrace_lookup_ip(hash
, entry
->ip
);
3845 /* It is possible more than one entry had this ip */
3847 free_hash_entry(hash
, rec_entry
);
3849 hlist_del_rcu(&entry
->node
);
3850 list_add(&entry
->free_list
, &free_list
);
3853 mutex_lock(&ftrace_lock
);
3854 __disable_ftrace_function_probe();
3856 * Remove after the disable is called. Otherwise, if the last
3857 * probe is removed, a null hash means *all enabled*.
3859 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3860 synchronize_sched();
3862 free_ftrace_hash_rcu(old_hash
);
3864 list_for_each_entry_safe(entry
, p
, &free_list
, free_list
) {
3865 list_del(&entry
->free_list
);
3866 ftrace_free_entry(entry
);
3868 mutex_unlock(&ftrace_lock
);
3871 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3872 free_ftrace_hash(hash
);
3876 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3879 __unregister_ftrace_function_probe(glob
, ops
, data
,
3880 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
3884 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
3886 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
3889 void unregister_ftrace_function_probe_all(char *glob
)
3891 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
3894 static LIST_HEAD(ftrace_commands
);
3895 static DEFINE_MUTEX(ftrace_cmd_mutex
);
3898 * Currently we only register ftrace commands from __init, so mark this
3901 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
3903 struct ftrace_func_command
*p
;
3906 mutex_lock(&ftrace_cmd_mutex
);
3907 list_for_each_entry(p
, &ftrace_commands
, list
) {
3908 if (strcmp(cmd
->name
, p
->name
) == 0) {
3913 list_add(&cmd
->list
, &ftrace_commands
);
3915 mutex_unlock(&ftrace_cmd_mutex
);
3921 * Currently we only unregister ftrace commands from __init, so mark
3924 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
3926 struct ftrace_func_command
*p
, *n
;
3929 mutex_lock(&ftrace_cmd_mutex
);
3930 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
3931 if (strcmp(cmd
->name
, p
->name
) == 0) {
3933 list_del_init(&p
->list
);
3938 mutex_unlock(&ftrace_cmd_mutex
);
3943 static int ftrace_process_regex(struct ftrace_hash
*hash
,
3944 char *buff
, int len
, int enable
)
3946 char *func
, *command
, *next
= buff
;
3947 struct ftrace_func_command
*p
;
3950 func
= strsep(&next
, ":");
3953 ret
= ftrace_match_records(hash
, func
, len
);
3963 command
= strsep(&next
, ":");
3965 mutex_lock(&ftrace_cmd_mutex
);
3966 list_for_each_entry(p
, &ftrace_commands
, list
) {
3967 if (strcmp(p
->name
, command
) == 0) {
3968 ret
= p
->func(hash
, func
, command
, next
, enable
);
3973 mutex_unlock(&ftrace_cmd_mutex
);
3979 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
3980 size_t cnt
, loff_t
*ppos
, int enable
)
3982 struct ftrace_iterator
*iter
;
3983 struct trace_parser
*parser
;
3989 if (file
->f_mode
& FMODE_READ
) {
3990 struct seq_file
*m
= file
->private_data
;
3993 iter
= file
->private_data
;
3995 if (unlikely(ftrace_disabled
))
3998 /* iter->hash is a local copy, so we don't need regex_lock */
4000 parser
= &iter
->parser
;
4001 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
4003 if (read
>= 0 && trace_parser_loaded(parser
) &&
4004 !trace_parser_cont(parser
)) {
4005 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
4006 parser
->idx
, enable
);
4007 trace_parser_clear(parser
);
4018 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
4019 size_t cnt
, loff_t
*ppos
)
4021 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
4025 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
4026 size_t cnt
, loff_t
*ppos
)
4028 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
4032 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
4034 struct ftrace_func_entry
*entry
;
4036 if (!ftrace_location(ip
))
4040 entry
= ftrace_lookup_ip(hash
, ip
);
4043 free_hash_entry(hash
, entry
);
4047 return add_hash_entry(hash
, ip
);
4050 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
4051 struct ftrace_ops_hash
*old_hash
)
4053 struct ftrace_ops
*op
;
4055 if (!ftrace_enabled
)
4058 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
4059 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
4064 * If this is the shared global_ops filter, then we need to
4065 * check if there is another ops that shares it, is enabled.
4066 * If so, we still need to run the modify code.
4068 if (ops
->func_hash
!= &global_ops
.local_hash
)
4071 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4072 if (op
->func_hash
== &global_ops
.local_hash
&&
4073 op
->flags
& FTRACE_OPS_FL_ENABLED
) {
4074 ftrace_run_modify_code(op
, FTRACE_UPDATE_CALLS
, old_hash
);
4075 /* Only need to do this once */
4078 } while_for_each_ftrace_op(op
);
4082 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4083 unsigned long ip
, int remove
, int reset
, int enable
)
4085 struct ftrace_hash
**orig_hash
;
4086 struct ftrace_ops_hash old_hash_ops
;
4087 struct ftrace_hash
*old_hash
;
4088 struct ftrace_hash
*hash
;
4091 if (unlikely(ftrace_disabled
))
4094 mutex_lock(&ops
->func_hash
->regex_lock
);
4097 orig_hash
= &ops
->func_hash
->filter_hash
;
4099 orig_hash
= &ops
->func_hash
->notrace_hash
;
4102 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
4104 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
4108 goto out_regex_unlock
;
4111 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
4113 goto out_regex_unlock
;
4116 ret
= ftrace_match_addr(hash
, ip
, remove
);
4118 goto out_regex_unlock
;
4121 mutex_lock(&ftrace_lock
);
4122 old_hash
= *orig_hash
;
4123 old_hash_ops
.filter_hash
= ops
->func_hash
->filter_hash
;
4124 old_hash_ops
.notrace_hash
= ops
->func_hash
->notrace_hash
;
4125 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
4127 ftrace_ops_update_code(ops
, &old_hash_ops
);
4128 free_ftrace_hash_rcu(old_hash
);
4130 mutex_unlock(&ftrace_lock
);
4133 mutex_unlock(&ops
->func_hash
->regex_lock
);
4135 free_ftrace_hash(hash
);
4140 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
4141 int reset
, int enable
)
4143 return ftrace_set_hash(ops
, 0, 0, ip
, remove
, reset
, enable
);
4147 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4148 * @ops - the ops to set the filter with
4149 * @ip - the address to add to or remove from the filter.
4150 * @remove - non zero to remove the ip from the filter
4151 * @reset - non zero to reset all filters before applying this filter.
4153 * Filters denote which functions should be enabled when tracing is enabled
4154 * If @ip is NULL, it failes to update filter.
4156 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
4157 int remove
, int reset
)
4159 ftrace_ops_init(ops
);
4160 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
4162 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
4165 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
4166 int reset
, int enable
)
4168 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
4172 * ftrace_set_filter - set a function to filter on in ftrace
4173 * @ops - the ops to set the filter with
4174 * @buf - the string that holds the function filter text.
4175 * @len - the length of the string.
4176 * @reset - non zero to reset all filters before applying this filter.
4178 * Filters denote which functions should be enabled when tracing is enabled.
4179 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4181 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
4184 ftrace_ops_init(ops
);
4185 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
4187 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
4190 * ftrace_set_notrace - set a function to not trace in ftrace
4191 * @ops - the ops to set the notrace filter with
4192 * @buf - the string that holds the function notrace text.
4193 * @len - the length of the string.
4194 * @reset - non zero to reset all filters before applying this filter.
4196 * Notrace Filters denote which functions should not be enabled when tracing
4197 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4200 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
4203 ftrace_ops_init(ops
);
4204 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
4206 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
4208 * ftrace_set_global_filter - set a function to filter on with global tracers
4209 * @buf - the string that holds the function filter text.
4210 * @len - the length of the string.
4211 * @reset - non zero to reset all filters before applying this filter.
4213 * Filters denote which functions should be enabled when tracing is enabled.
4214 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4216 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
4218 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
4220 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
4223 * ftrace_set_global_notrace - set a function to not trace with global tracers
4224 * @buf - the string that holds the function notrace text.
4225 * @len - the length of the string.
4226 * @reset - non zero to reset all filters before applying this filter.
4228 * Notrace Filters denote which functions should not be enabled when tracing
4229 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4232 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
4234 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
4236 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
4239 * command line interface to allow users to set filters on boot up.
4241 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4242 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4243 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4245 /* Used by function selftest to not test if filter is set */
4246 bool ftrace_filter_param __initdata
;
4248 static int __init
set_ftrace_notrace(char *str
)
4250 ftrace_filter_param
= true;
4251 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4254 __setup("ftrace_notrace=", set_ftrace_notrace
);
4256 static int __init
set_ftrace_filter(char *str
)
4258 ftrace_filter_param
= true;
4259 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
4262 __setup("ftrace_filter=", set_ftrace_filter
);
4264 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4265 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4266 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
4267 static int ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
);
4269 static unsigned long save_global_trampoline
;
4270 static unsigned long save_global_flags
;
4272 static int __init
set_graph_function(char *str
)
4274 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
4277 __setup("ftrace_graph_filter=", set_graph_function
);
4279 static int __init
set_graph_notrace_function(char *str
)
4281 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
4284 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
4286 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
4290 unsigned long *table
= ftrace_graph_funcs
;
4291 int *count
= &ftrace_graph_count
;
4294 table
= ftrace_graph_notrace_funcs
;
4295 count
= &ftrace_graph_notrace_count
;
4299 func
= strsep(&buf
, ",");
4300 /* we allow only one expression at a time */
4301 ret
= ftrace_set_func(table
, count
, FTRACE_GRAPH_MAX_FUNCS
, func
);
4303 printk(KERN_DEBUG
"ftrace: function %s not "
4304 "traceable\n", func
);
4307 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4310 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
4314 ftrace_ops_init(ops
);
4317 func
= strsep(&buf
, ",");
4318 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
4322 static void __init
set_ftrace_early_filters(void)
4324 if (ftrace_filter_buf
[0])
4325 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
4326 if (ftrace_notrace_buf
[0])
4327 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
4328 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4329 if (ftrace_graph_buf
[0])
4330 set_ftrace_early_graph(ftrace_graph_buf
, 1);
4331 if (ftrace_graph_notrace_buf
[0])
4332 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
4333 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4336 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
4338 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
4339 struct ftrace_ops_hash old_hash_ops
;
4340 struct ftrace_iterator
*iter
;
4341 struct ftrace_hash
**orig_hash
;
4342 struct ftrace_hash
*old_hash
;
4343 struct trace_parser
*parser
;
4347 if (file
->f_mode
& FMODE_READ
) {
4349 seq_release(inode
, file
);
4351 iter
= file
->private_data
;
4353 parser
= &iter
->parser
;
4354 if (trace_parser_loaded(parser
)) {
4355 parser
->buffer
[parser
->idx
] = 0;
4356 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
4359 trace_parser_put(parser
);
4361 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
4363 if (file
->f_mode
& FMODE_WRITE
) {
4364 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
4367 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
4369 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
4371 mutex_lock(&ftrace_lock
);
4372 old_hash
= *orig_hash
;
4373 old_hash_ops
.filter_hash
= iter
->ops
->func_hash
->filter_hash
;
4374 old_hash_ops
.notrace_hash
= iter
->ops
->func_hash
->notrace_hash
;
4375 ret
= ftrace_hash_move(iter
->ops
, filter_hash
,
4376 orig_hash
, iter
->hash
);
4378 ftrace_ops_update_code(iter
->ops
, &old_hash_ops
);
4379 free_ftrace_hash_rcu(old_hash
);
4381 mutex_unlock(&ftrace_lock
);
4384 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
4385 free_ftrace_hash(iter
->hash
);
4391 static const struct file_operations ftrace_avail_fops
= {
4392 .open
= ftrace_avail_open
,
4394 .llseek
= seq_lseek
,
4395 .release
= seq_release_private
,
4398 static const struct file_operations ftrace_enabled_fops
= {
4399 .open
= ftrace_enabled_open
,
4401 .llseek
= seq_lseek
,
4402 .release
= seq_release_private
,
4405 static const struct file_operations ftrace_filter_fops
= {
4406 .open
= ftrace_filter_open
,
4408 .write
= ftrace_filter_write
,
4409 .llseek
= tracing_lseek
,
4410 .release
= ftrace_regex_release
,
4413 static const struct file_operations ftrace_notrace_fops
= {
4414 .open
= ftrace_notrace_open
,
4416 .write
= ftrace_notrace_write
,
4417 .llseek
= tracing_lseek
,
4418 .release
= ftrace_regex_release
,
4421 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4423 static DEFINE_MUTEX(graph_lock
);
4425 int ftrace_graph_count
;
4426 int ftrace_graph_notrace_count
;
4427 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4428 unsigned long ftrace_graph_notrace_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4430 struct ftrace_graph_data
{
4431 unsigned long *table
;
4434 const struct seq_operations
*seq_ops
;
4438 __g_next(struct seq_file
*m
, loff_t
*pos
)
4440 struct ftrace_graph_data
*fgd
= m
->private;
4442 if (*pos
>= *fgd
->count
)
4444 return &fgd
->table
[*pos
];
4448 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4451 return __g_next(m
, pos
);
4454 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
4456 struct ftrace_graph_data
*fgd
= m
->private;
4458 mutex_lock(&graph_lock
);
4460 /* Nothing, tell g_show to print all functions are enabled */
4461 if (!*fgd
->count
&& !*pos
)
4464 return __g_next(m
, pos
);
4467 static void g_stop(struct seq_file
*m
, void *p
)
4469 mutex_unlock(&graph_lock
);
4472 static int g_show(struct seq_file
*m
, void *v
)
4474 unsigned long *ptr
= v
;
4479 if (ptr
== (unsigned long *)1) {
4480 struct ftrace_graph_data
*fgd
= m
->private;
4482 if (fgd
->table
== ftrace_graph_funcs
)
4483 seq_puts(m
, "#### all functions enabled ####\n");
4485 seq_puts(m
, "#### no functions disabled ####\n");
4489 seq_printf(m
, "%ps\n", (void *)*ptr
);
4494 static const struct seq_operations ftrace_graph_seq_ops
= {
4502 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
4503 struct ftrace_graph_data
*fgd
)
4507 mutex_lock(&graph_lock
);
4508 if ((file
->f_mode
& FMODE_WRITE
) &&
4509 (file
->f_flags
& O_TRUNC
)) {
4511 memset(fgd
->table
, 0, fgd
->size
* sizeof(*fgd
->table
));
4513 mutex_unlock(&graph_lock
);
4515 if (file
->f_mode
& FMODE_READ
) {
4516 ret
= seq_open(file
, fgd
->seq_ops
);
4518 struct seq_file
*m
= file
->private_data
;
4522 file
->private_data
= fgd
;
4528 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
4530 struct ftrace_graph_data
*fgd
;
4532 if (unlikely(ftrace_disabled
))
4535 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4539 fgd
->table
= ftrace_graph_funcs
;
4540 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4541 fgd
->count
= &ftrace_graph_count
;
4542 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4544 return __ftrace_graph_open(inode
, file
, fgd
);
4548 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
4550 struct ftrace_graph_data
*fgd
;
4552 if (unlikely(ftrace_disabled
))
4555 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4559 fgd
->table
= ftrace_graph_notrace_funcs
;
4560 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4561 fgd
->count
= &ftrace_graph_notrace_count
;
4562 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4564 return __ftrace_graph_open(inode
, file
, fgd
);
4568 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
4570 if (file
->f_mode
& FMODE_READ
) {
4571 struct seq_file
*m
= file
->private_data
;
4574 seq_release(inode
, file
);
4576 kfree(file
->private_data
);
4583 ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
)
4585 struct dyn_ftrace
*rec
;
4586 struct ftrace_page
*pg
;
4595 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
4596 if (!not && *idx
>= size
)
4599 search_len
= strlen(search
);
4601 mutex_lock(&ftrace_lock
);
4603 if (unlikely(ftrace_disabled
)) {
4604 mutex_unlock(&ftrace_lock
);
4608 do_for_each_ftrace_rec(pg
, rec
) {
4610 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
4611 /* if it is in the array */
4613 for (i
= 0; i
< *idx
; i
++) {
4614 if (array
[i
] == rec
->ip
) {
4623 array
[(*idx
)++] = rec
->ip
;
4629 array
[i
] = array
[--(*idx
)];
4635 } while_for_each_ftrace_rec();
4637 mutex_unlock(&ftrace_lock
);
4646 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
4647 size_t cnt
, loff_t
*ppos
)
4649 struct trace_parser parser
;
4650 ssize_t read
, ret
= 0;
4651 struct ftrace_graph_data
*fgd
= file
->private_data
;
4656 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
))
4659 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
4661 if (read
>= 0 && trace_parser_loaded((&parser
))) {
4662 parser
.buffer
[parser
.idx
] = 0;
4664 mutex_lock(&graph_lock
);
4666 /* we allow only one expression at a time */
4667 ret
= ftrace_set_func(fgd
->table
, fgd
->count
, fgd
->size
,
4670 mutex_unlock(&graph_lock
);
4676 trace_parser_put(&parser
);
4681 static const struct file_operations ftrace_graph_fops
= {
4682 .open
= ftrace_graph_open
,
4684 .write
= ftrace_graph_write
,
4685 .llseek
= tracing_lseek
,
4686 .release
= ftrace_graph_release
,
4689 static const struct file_operations ftrace_graph_notrace_fops
= {
4690 .open
= ftrace_graph_notrace_open
,
4692 .write
= ftrace_graph_write
,
4693 .llseek
= tracing_lseek
,
4694 .release
= ftrace_graph_release
,
4696 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4698 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
4699 struct dentry
*parent
)
4702 trace_create_file("set_ftrace_filter", 0644, parent
,
4703 ops
, &ftrace_filter_fops
);
4705 trace_create_file("set_ftrace_notrace", 0644, parent
,
4706 ops
, &ftrace_notrace_fops
);
4710 * The name "destroy_filter_files" is really a misnomer. Although
4711 * in the future, it may actualy delete the files, but this is
4712 * really intended to make sure the ops passed in are disabled
4713 * and that when this function returns, the caller is free to
4716 * The "destroy" name is only to match the "create" name that this
4717 * should be paired with.
4719 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
4721 mutex_lock(&ftrace_lock
);
4722 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
4723 ftrace_shutdown(ops
, 0);
4724 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
4725 mutex_unlock(&ftrace_lock
);
4728 static __init
int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
)
4731 trace_create_file("available_filter_functions", 0444,
4732 d_tracer
, NULL
, &ftrace_avail_fops
);
4734 trace_create_file("enabled_functions", 0444,
4735 d_tracer
, NULL
, &ftrace_enabled_fops
);
4737 ftrace_create_filter_files(&global_ops
, d_tracer
);
4739 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4740 trace_create_file("set_graph_function", 0444, d_tracer
,
4742 &ftrace_graph_fops
);
4743 trace_create_file("set_graph_notrace", 0444, d_tracer
,
4745 &ftrace_graph_notrace_fops
);
4746 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4751 static int ftrace_cmp_ips(const void *a
, const void *b
)
4753 const unsigned long *ipa
= a
;
4754 const unsigned long *ipb
= b
;
4763 static void ftrace_swap_ips(void *a
, void *b
, int size
)
4765 unsigned long *ipa
= a
;
4766 unsigned long *ipb
= b
;
4774 static int ftrace_process_locs(struct module
*mod
,
4775 unsigned long *start
,
4778 struct ftrace_page
*start_pg
;
4779 struct ftrace_page
*pg
;
4780 struct dyn_ftrace
*rec
;
4781 unsigned long count
;
4784 unsigned long flags
= 0; /* Shut up gcc */
4787 count
= end
- start
;
4792 sort(start
, count
, sizeof(*start
),
4793 ftrace_cmp_ips
, ftrace_swap_ips
);
4795 start_pg
= ftrace_allocate_pages(count
);
4799 mutex_lock(&ftrace_lock
);
4802 * Core and each module needs their own pages, as
4803 * modules will free them when they are removed.
4804 * Force a new page to be allocated for modules.
4807 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
4808 /* First initialization */
4809 ftrace_pages
= ftrace_pages_start
= start_pg
;
4814 if (WARN_ON(ftrace_pages
->next
)) {
4815 /* Hmm, we have free pages? */
4816 while (ftrace_pages
->next
)
4817 ftrace_pages
= ftrace_pages
->next
;
4820 ftrace_pages
->next
= start_pg
;
4826 addr
= ftrace_call_adjust(*p
++);
4828 * Some architecture linkers will pad between
4829 * the different mcount_loc sections of different
4830 * object files to satisfy alignments.
4831 * Skip any NULL pointers.
4836 if (pg
->index
== pg
->size
) {
4837 /* We should have allocated enough */
4838 if (WARN_ON(!pg
->next
))
4843 rec
= &pg
->records
[pg
->index
++];
4847 /* We should have used all pages */
4850 /* Assign the last page to ftrace_pages */
4854 * We only need to disable interrupts on start up
4855 * because we are modifying code that an interrupt
4856 * may execute, and the modification is not atomic.
4857 * But for modules, nothing runs the code we modify
4858 * until we are finished with it, and there's no
4859 * reason to cause large interrupt latencies while we do it.
4862 local_irq_save(flags
);
4863 ftrace_update_code(mod
, start_pg
);
4865 local_irq_restore(flags
);
4868 mutex_unlock(&ftrace_lock
);
4873 #ifdef CONFIG_MODULES
4875 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4877 void ftrace_release_mod(struct module
*mod
)
4879 struct dyn_ftrace
*rec
;
4880 struct ftrace_page
**last_pg
;
4881 struct ftrace_page
*pg
;
4884 mutex_lock(&ftrace_lock
);
4886 if (ftrace_disabled
)
4890 * Each module has its own ftrace_pages, remove
4891 * them from the list.
4893 last_pg
= &ftrace_pages_start
;
4894 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
4895 rec
= &pg
->records
[0];
4896 if (within_module_core(rec
->ip
, mod
)) {
4898 * As core pages are first, the first
4899 * page should never be a module page.
4901 if (WARN_ON(pg
== ftrace_pages_start
))
4904 /* Check if we are deleting the last page */
4905 if (pg
== ftrace_pages
)
4906 ftrace_pages
= next_to_ftrace_page(last_pg
);
4908 *last_pg
= pg
->next
;
4909 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
4910 free_pages((unsigned long)pg
->records
, order
);
4913 last_pg
= &pg
->next
;
4916 mutex_unlock(&ftrace_lock
);
4919 static void ftrace_init_module(struct module
*mod
,
4920 unsigned long *start
, unsigned long *end
)
4922 if (ftrace_disabled
|| start
== end
)
4924 ftrace_process_locs(mod
, start
, end
);
4927 void ftrace_module_init(struct module
*mod
)
4929 ftrace_init_module(mod
, mod
->ftrace_callsites
,
4930 mod
->ftrace_callsites
+
4931 mod
->num_ftrace_callsites
);
4934 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4935 unsigned long val
, void *data
)
4937 struct module
*mod
= data
;
4939 if (val
== MODULE_STATE_GOING
)
4940 ftrace_release_mod(mod
);
4945 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4946 unsigned long val
, void *data
)
4950 #endif /* CONFIG_MODULES */
4952 struct notifier_block ftrace_module_exit_nb
= {
4953 .notifier_call
= ftrace_module_notify_exit
,
4954 .priority
= INT_MIN
, /* Run after anything that can remove kprobes */
4957 void __init
ftrace_init(void)
4959 extern unsigned long __start_mcount_loc
[];
4960 extern unsigned long __stop_mcount_loc
[];
4961 unsigned long count
, flags
;
4964 local_irq_save(flags
);
4965 ret
= ftrace_dyn_arch_init();
4966 local_irq_restore(flags
);
4970 count
= __stop_mcount_loc
- __start_mcount_loc
;
4972 pr_info("ftrace: No functions to be traced?\n");
4976 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4977 count
, count
/ ENTRIES_PER_PAGE
+ 1);
4979 last_ftrace_enabled
= ftrace_enabled
= 1;
4981 ret
= ftrace_process_locs(NULL
,
4985 ret
= register_module_notifier(&ftrace_module_exit_nb
);
4987 pr_warning("Failed to register trace ftrace module exit notifier\n");
4989 set_ftrace_early_filters();
4993 ftrace_disabled
= 1;
4996 /* Do nothing if arch does not support this */
4997 void __weak
arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
5001 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5005 * Currently there's no safe way to free a trampoline when the kernel
5006 * is configured with PREEMPT. That is because a task could be preempted
5007 * when it jumped to the trampoline, it may be preempted for a long time
5008 * depending on the system load, and currently there's no way to know
5009 * when it will be off the trampoline. If the trampoline is freed
5010 * too early, when the task runs again, it will be executing on freed
5013 #ifdef CONFIG_PREEMPT
5014 /* Currently, only non dynamic ops can have a trampoline */
5015 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
5019 arch_ftrace_update_trampoline(ops
);
5024 static struct ftrace_ops global_ops
= {
5025 .func
= ftrace_stub
,
5026 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
5029 static int __init
ftrace_nodyn_init(void)
5034 core_initcall(ftrace_nodyn_init
);
5036 static inline int ftrace_init_dyn_tracefs(struct dentry
*d_tracer
) { return 0; }
5037 static inline void ftrace_startup_enable(int command
) { }
5038 static inline void ftrace_startup_all(int command
) { }
5039 /* Keep as macros so we do not need to define the commands */
5040 # define ftrace_startup(ops, command) \
5042 int ___ret = __register_ftrace_function(ops); \
5044 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
5047 # define ftrace_shutdown(ops, command) \
5049 int ___ret = __unregister_ftrace_function(ops); \
5051 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
5055 # define ftrace_startup_sysctl() do { } while (0)
5056 # define ftrace_shutdown_sysctl() do { } while (0)
5059 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
5064 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
5068 #endif /* CONFIG_DYNAMIC_FTRACE */
5070 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
5072 tr
->ops
= &global_ops
;
5073 tr
->ops
->private = tr
;
5076 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
5078 /* If we filter on pids, update to use the pid function */
5079 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
5080 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
5081 printk("ftrace ops had %pS for function\n",
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids
)) {
5085 set_ftrace_pid_function(func
);
5086 func
= ftrace_pid_func
;
5089 tr
->ops
->func
= func
;
5090 tr
->ops
->private = tr
;
5093 void ftrace_reset_array_ops(struct trace_array
*tr
)
5095 tr
->ops
->func
= ftrace_stub
;
5099 ftrace_ops_control_func(unsigned long ip
, unsigned long parent_ip
,
5100 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5102 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT
)))
5106 * Some of the ops may be dynamically allocated,
5107 * they must be freed after a synchronize_sched().
5109 preempt_disable_notrace();
5110 trace_recursion_set(TRACE_CONTROL_BIT
);
5113 * Control funcs (perf) uses RCU. Only trace if
5114 * RCU is currently active.
5116 if (!rcu_is_watching())
5119 do_for_each_ftrace_op(op
, ftrace_control_list
) {
5120 if (!(op
->flags
& FTRACE_OPS_FL_STUB
) &&
5121 !ftrace_function_local_disabled(op
) &&
5122 ftrace_ops_test(op
, ip
, regs
))
5123 op
->func(ip
, parent_ip
, op
, regs
);
5124 } while_for_each_ftrace_op(op
);
5126 trace_recursion_clear(TRACE_CONTROL_BIT
);
5127 preempt_enable_notrace();
5130 static struct ftrace_ops control_ops
= {
5131 .func
= ftrace_ops_control_func
,
5132 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
5133 INIT_OPS_HASH(control_ops
)
5137 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5138 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
5140 struct ftrace_ops
*op
;
5143 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5148 * Some of the ops may be dynamically allocated,
5149 * they must be freed after a synchronize_sched().
5151 preempt_disable_notrace();
5152 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5153 if (ftrace_ops_test(op
, ip
, regs
)) {
5154 if (FTRACE_WARN_ON(!op
->func
)) {
5155 pr_warn("op=%p %pS\n", op
, op
);
5158 op
->func(ip
, parent_ip
, op
, regs
);
5160 } while_for_each_ftrace_op(op
);
5162 preempt_enable_notrace();
5163 trace_clear_recursion(bit
);
5167 * Some archs only support passing ip and parent_ip. Even though
5168 * the list function ignores the op parameter, we do not want any
5169 * C side effects, where a function is called without the caller
5170 * sending a third parameter.
5171 * Archs are to support both the regs and ftrace_ops at the same time.
5172 * If they support ftrace_ops, it is assumed they support regs.
5173 * If call backs want to use regs, they must either check for regs
5174 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5175 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5176 * An architecture can pass partial regs with ftrace_ops and still
5177 * set the ARCH_SUPPORT_FTARCE_OPS.
5179 #if ARCH_SUPPORTS_FTRACE_OPS
5180 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
5181 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5183 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
5186 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
5188 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
5193 * If there's only one function registered but it does not support
5194 * recursion, this function will be called by the mcount trampoline.
5195 * This function will handle recursion protection.
5197 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
5198 struct ftrace_ops
*op
, struct pt_regs
*regs
)
5202 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
5206 op
->func(ip
, parent_ip
, op
, regs
);
5208 trace_clear_recursion(bit
);
5212 * ftrace_ops_get_func - get the function a trampoline should call
5213 * @ops: the ops to get the function for
5215 * Normally the mcount trampoline will call the ops->func, but there
5216 * are times that it should not. For example, if the ops does not
5217 * have its own recursion protection, then it should call the
5218 * ftrace_ops_recurs_func() instead.
5220 * Returns the function that the trampoline should call for @ops.
5222 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
5225 * If the func handles its own recursion, call it directly.
5226 * Otherwise call the recursion protected function that
5227 * will call the ftrace ops function.
5229 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
))
5230 return ftrace_ops_recurs_func
;
5235 static void clear_ftrace_swapper(void)
5237 struct task_struct
*p
;
5241 for_each_online_cpu(cpu
) {
5243 clear_tsk_trace_trace(p
);
5248 static void set_ftrace_swapper(void)
5250 struct task_struct
*p
;
5254 for_each_online_cpu(cpu
) {
5256 set_tsk_trace_trace(p
);
5261 static void clear_ftrace_pid(struct pid
*pid
)
5263 struct task_struct
*p
;
5266 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5267 clear_tsk_trace_trace(p
);
5268 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5274 static void set_ftrace_pid(struct pid
*pid
)
5276 struct task_struct
*p
;
5279 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
5280 set_tsk_trace_trace(p
);
5281 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
5285 static void clear_ftrace_pid_task(struct pid
*pid
)
5287 if (pid
== ftrace_swapper_pid
)
5288 clear_ftrace_swapper();
5290 clear_ftrace_pid(pid
);
5293 static void set_ftrace_pid_task(struct pid
*pid
)
5295 if (pid
== ftrace_swapper_pid
)
5296 set_ftrace_swapper();
5298 set_ftrace_pid(pid
);
5301 static int ftrace_pid_add(int p
)
5304 struct ftrace_pid
*fpid
;
5307 mutex_lock(&ftrace_lock
);
5310 pid
= ftrace_swapper_pid
;
5312 pid
= find_get_pid(p
);
5319 list_for_each_entry(fpid
, &ftrace_pids
, list
)
5320 if (fpid
->pid
== pid
)
5325 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
5329 list_add(&fpid
->list
, &ftrace_pids
);
5332 set_ftrace_pid_task(pid
);
5334 ftrace_update_pid_func();
5336 ftrace_startup_all(0);
5338 mutex_unlock(&ftrace_lock
);
5342 if (pid
!= ftrace_swapper_pid
)
5346 mutex_unlock(&ftrace_lock
);
5350 static void ftrace_pid_reset(void)
5352 struct ftrace_pid
*fpid
, *safe
;
5354 mutex_lock(&ftrace_lock
);
5355 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
5356 struct pid
*pid
= fpid
->pid
;
5358 clear_ftrace_pid_task(pid
);
5360 list_del(&fpid
->list
);
5364 ftrace_update_pid_func();
5365 ftrace_startup_all(0);
5367 mutex_unlock(&ftrace_lock
);
5370 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
5372 mutex_lock(&ftrace_lock
);
5374 if (list_empty(&ftrace_pids
) && (!*pos
))
5377 return seq_list_start(&ftrace_pids
, *pos
);
5380 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5385 return seq_list_next(v
, &ftrace_pids
, pos
);
5388 static void fpid_stop(struct seq_file
*m
, void *p
)
5390 mutex_unlock(&ftrace_lock
);
5393 static int fpid_show(struct seq_file
*m
, void *v
)
5395 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
5397 if (v
== (void *)1) {
5398 seq_puts(m
, "no pid\n");
5402 if (fpid
->pid
== ftrace_swapper_pid
)
5403 seq_puts(m
, "swapper tasks\n");
5405 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
5410 static const struct seq_operations ftrace_pid_sops
= {
5411 .start
= fpid_start
,
5418 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
5422 if ((file
->f_mode
& FMODE_WRITE
) &&
5423 (file
->f_flags
& O_TRUNC
))
5426 if (file
->f_mode
& FMODE_READ
)
5427 ret
= seq_open(file
, &ftrace_pid_sops
);
5433 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
5434 size_t cnt
, loff_t
*ppos
)
5440 if (cnt
>= sizeof(buf
))
5443 if (copy_from_user(&buf
, ubuf
, cnt
))
5449 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5450 * to clean the filter quietly.
5452 tmp
= strstrip(buf
);
5453 if (strlen(tmp
) == 0)
5456 ret
= kstrtol(tmp
, 10, &val
);
5460 ret
= ftrace_pid_add(val
);
5462 return ret
? ret
: cnt
;
5466 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
5468 if (file
->f_mode
& FMODE_READ
)
5469 seq_release(inode
, file
);
5474 static const struct file_operations ftrace_pid_fops
= {
5475 .open
= ftrace_pid_open
,
5476 .write
= ftrace_pid_write
,
5478 .llseek
= tracing_lseek
,
5479 .release
= ftrace_pid_release
,
5482 static __init
int ftrace_init_tracefs(void)
5484 struct dentry
*d_tracer
;
5486 d_tracer
= tracing_init_dentry();
5487 if (IS_ERR(d_tracer
))
5490 ftrace_init_dyn_tracefs(d_tracer
);
5492 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
5493 NULL
, &ftrace_pid_fops
);
5495 ftrace_profile_tracefs(d_tracer
);
5499 fs_initcall(ftrace_init_tracefs
);
5502 * ftrace_kill - kill ftrace
5504 * This function should be used by panic code. It stops ftrace
5505 * but in a not so nice way. If you need to simply kill ftrace
5506 * from a non-atomic section, use ftrace_kill.
5508 void ftrace_kill(void)
5510 ftrace_disabled
= 1;
5512 clear_ftrace_function();
5516 * Test if ftrace is dead or not.
5518 int ftrace_is_dead(void)
5520 return ftrace_disabled
;
5524 * register_ftrace_function - register a function for profiling
5525 * @ops - ops structure that holds the function for profiling.
5527 * Register a function to be called by all functions in the
5530 * Note: @ops->func and all the functions it calls must be labeled
5531 * with "notrace", otherwise it will go into a
5534 int register_ftrace_function(struct ftrace_ops
*ops
)
5538 ftrace_ops_init(ops
);
5540 mutex_lock(&ftrace_lock
);
5542 ret
= ftrace_startup(ops
, 0);
5544 mutex_unlock(&ftrace_lock
);
5548 EXPORT_SYMBOL_GPL(register_ftrace_function
);
5551 * unregister_ftrace_function - unregister a function for profiling.
5552 * @ops - ops structure that holds the function to unregister
5554 * Unregister a function that was added to be called by ftrace profiling.
5556 int unregister_ftrace_function(struct ftrace_ops
*ops
)
5560 mutex_lock(&ftrace_lock
);
5561 ret
= ftrace_shutdown(ops
, 0);
5562 mutex_unlock(&ftrace_lock
);
5566 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
5569 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
5570 void __user
*buffer
, size_t *lenp
,
5575 mutex_lock(&ftrace_lock
);
5577 if (unlikely(ftrace_disabled
))
5580 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
5582 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
5585 last_ftrace_enabled
= !!ftrace_enabled
;
5587 if (ftrace_enabled
) {
5589 /* we are starting ftrace again */
5590 if (ftrace_ops_list
!= &ftrace_list_end
)
5591 update_ftrace_function();
5593 ftrace_startup_sysctl();
5596 /* stopping ftrace calls (just send to ftrace_stub) */
5597 ftrace_trace_function
= ftrace_stub
;
5599 ftrace_shutdown_sysctl();
5603 mutex_unlock(&ftrace_lock
);
5607 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5609 static struct ftrace_ops graph_ops
= {
5610 .func
= ftrace_stub
,
5611 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5612 FTRACE_OPS_FL_INITIALIZED
|
5614 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline
= FTRACE_GRAPH_TRAMP_ADDR
,
5616 /* trampoline_size is only needed for dynamically allocated tramps */
5618 ASSIGN_OPS_HASH(graph_ops
, &global_ops
.local_hash
)
5621 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
5626 /* The callbacks that hook a function */
5627 trace_func_graph_ret_t ftrace_graph_return
=
5628 (trace_func_graph_ret_t
)ftrace_stub
;
5629 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
5630 static trace_func_graph_ent_t __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5632 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5633 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
5637 unsigned long flags
;
5638 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
5639 struct task_struct
*g
, *t
;
5641 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
5642 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
5643 * sizeof(struct ftrace_ret_stack
),
5645 if (!ret_stack_list
[i
]) {
5653 read_lock_irqsave(&tasklist_lock
, flags
);
5654 do_each_thread(g
, t
) {
5660 if (t
->ret_stack
== NULL
) {
5661 atomic_set(&t
->tracing_graph_pause
, 0);
5662 atomic_set(&t
->trace_overrun
, 0);
5663 t
->curr_ret_stack
= -1;
5664 /* Make sure the tasks see the -1 first: */
5666 t
->ret_stack
= ret_stack_list
[start
++];
5668 } while_each_thread(g
, t
);
5671 read_unlock_irqrestore(&tasklist_lock
, flags
);
5673 for (i
= start
; i
< end
; i
++)
5674 kfree(ret_stack_list
[i
]);
5679 ftrace_graph_probe_sched_switch(void *ignore
,
5680 struct task_struct
*prev
, struct task_struct
*next
)
5682 unsigned long long timestamp
;
5686 * Does the user want to count the time a function was asleep.
5687 * If so, do not update the time stamps.
5689 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
5692 timestamp
= trace_clock_local();
5694 prev
->ftrace_timestamp
= timestamp
;
5696 /* only process tasks that we timestamped */
5697 if (!next
->ftrace_timestamp
)
5701 * Update all the counters in next to make up for the
5702 * time next was sleeping.
5704 timestamp
-= next
->ftrace_timestamp
;
5706 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
5707 next
->ret_stack
[index
].calltime
+= timestamp
;
5710 /* Allocate a return stack for each task */
5711 static int start_graph_tracing(void)
5713 struct ftrace_ret_stack
**ret_stack_list
;
5716 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
5717 sizeof(struct ftrace_ret_stack
*),
5720 if (!ret_stack_list
)
5723 /* The cpu_boot init_task->ret_stack will never be freed */
5724 for_each_online_cpu(cpu
) {
5725 if (!idle_task(cpu
)->ret_stack
)
5726 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
5730 ret
= alloc_retstack_tasklist(ret_stack_list
);
5731 } while (ret
== -EAGAIN
);
5734 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5736 pr_info("ftrace_graph: Couldn't activate tracepoint"
5737 " probe to kernel_sched_switch\n");
5740 kfree(ret_stack_list
);
5745 * Hibernation protection.
5746 * The state of the current task is too much unstable during
5747 * suspend/restore to disk. We want to protect against that.
5750 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
5754 case PM_HIBERNATION_PREPARE
:
5755 pause_graph_tracing();
5758 case PM_POST_HIBERNATION
:
5759 unpause_graph_tracing();
5765 static int ftrace_graph_entry_test(struct ftrace_graph_ent
*trace
)
5767 if (!ftrace_ops_test(&global_ops
, trace
->func
, NULL
))
5769 return __ftrace_graph_entry(trace
);
5773 * The function graph tracer should only trace the functions defined
5774 * by set_ftrace_filter and set_ftrace_notrace. If another function
5775 * tracer ops is registered, the graph tracer requires testing the
5776 * function against the global ops, and not just trace any function
5777 * that any ftrace_ops registered.
5779 static void update_function_graph_func(void)
5781 struct ftrace_ops
*op
;
5782 bool do_test
= false;
5785 * The graph and global ops share the same set of functions
5786 * to test. If any other ops is on the list, then
5787 * the graph tracing needs to test if its the function
5790 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5791 if (op
!= &global_ops
&& op
!= &graph_ops
&&
5792 op
!= &ftrace_list_end
) {
5794 /* in double loop, break out with goto */
5797 } while_for_each_ftrace_op(op
);
5800 ftrace_graph_entry
= ftrace_graph_entry_test
;
5802 ftrace_graph_entry
= __ftrace_graph_entry
;
5805 static struct notifier_block ftrace_suspend_notifier
= {
5806 .notifier_call
= ftrace_suspend_notifier_call
,
5809 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
5810 trace_func_graph_ent_t entryfunc
)
5814 mutex_lock(&ftrace_lock
);
5816 /* we currently allow only one tracer registered at a time */
5817 if (ftrace_graph_active
) {
5822 register_pm_notifier(&ftrace_suspend_notifier
);
5824 ftrace_graph_active
++;
5825 ret
= start_graph_tracing();
5827 ftrace_graph_active
--;
5831 ftrace_graph_return
= retfunc
;
5834 * Update the indirect function to the entryfunc, and the
5835 * function that gets called to the entry_test first. Then
5836 * call the update fgraph entry function to determine if
5837 * the entryfunc should be called directly or not.
5839 __ftrace_graph_entry
= entryfunc
;
5840 ftrace_graph_entry
= ftrace_graph_entry_test
;
5841 update_function_graph_func();
5843 ret
= ftrace_startup(&graph_ops
, FTRACE_START_FUNC_RET
);
5845 mutex_unlock(&ftrace_lock
);
5849 void unregister_ftrace_graph(void)
5851 mutex_lock(&ftrace_lock
);
5853 if (unlikely(!ftrace_graph_active
))
5856 ftrace_graph_active
--;
5857 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
5858 ftrace_graph_entry
= ftrace_graph_entry_stub
;
5859 __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5860 ftrace_shutdown(&graph_ops
, FTRACE_STOP_FUNC_RET
);
5861 unregister_pm_notifier(&ftrace_suspend_notifier
);
5862 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5864 #ifdef CONFIG_DYNAMIC_FTRACE
5866 * Function graph does not allocate the trampoline, but
5867 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5870 global_ops
.trampoline
= save_global_trampoline
;
5871 if (save_global_flags
& FTRACE_OPS_FL_ALLOC_TRAMP
)
5872 global_ops
.flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
5876 mutex_unlock(&ftrace_lock
);
5879 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
5882 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
5884 atomic_set(&t
->tracing_graph_pause
, 0);
5885 atomic_set(&t
->trace_overrun
, 0);
5886 t
->ftrace_timestamp
= 0;
5887 /* make curr_ret_stack visible before we add the ret_stack */
5889 t
->ret_stack
= ret_stack
;
5893 * Allocate a return stack for the idle task. May be the first
5894 * time through, or it may be done by CPU hotplug online.
5896 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
5898 t
->curr_ret_stack
= -1;
5900 * The idle task has no parent, it either has its own
5901 * stack or no stack at all.
5904 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
5906 if (ftrace_graph_active
) {
5907 struct ftrace_ret_stack
*ret_stack
;
5909 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
5911 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5912 * sizeof(struct ftrace_ret_stack
),
5916 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
5918 graph_init_task(t
, ret_stack
);
5922 /* Allocate a return stack for newly created task */
5923 void ftrace_graph_init_task(struct task_struct
*t
)
5925 /* Make sure we do not use the parent ret_stack */
5926 t
->ret_stack
= NULL
;
5927 t
->curr_ret_stack
= -1;
5929 if (ftrace_graph_active
) {
5930 struct ftrace_ret_stack
*ret_stack
;
5932 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5933 * sizeof(struct ftrace_ret_stack
),
5937 graph_init_task(t
, ret_stack
);
5941 void ftrace_graph_exit_task(struct task_struct
*t
)
5943 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
5945 t
->ret_stack
= NULL
;
5946 /* NULL must become visible to IRQs before we free it: */