Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / kernel / trace / ftrace.c
blobc3a037cb43fa939188561e83346120513ad661bf
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/module.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/slab.h>
29 #include <linux/ctype.h>
30 #include <linux/list.h>
31 #include <linux/hash.h>
32 #include <linux/rcupdate.h>
34 #include <trace/events/sched.h>
36 #include <asm/setup.h>
38 #include "trace_output.h"
39 #include "trace_stat.h"
41 #define FTRACE_WARN_ON(cond) \
42 ({ \
43 int ___r = cond; \
44 if (WARN_ON(___r)) \
45 ftrace_kill(); \
46 ___r; \
49 #define FTRACE_WARN_ON_ONCE(cond) \
50 ({ \
51 int ___r = cond; \
52 if (WARN_ON_ONCE(___r)) \
53 ftrace_kill(); \
54 ___r; \
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73 struct list_head list;
74 struct pid *pid;
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
81 static int ftrace_disabled __read_mostly;
83 static DEFINE_MUTEX(ftrace_lock);
85 static struct ftrace_ops ftrace_list_end __read_mostly = {
86 .func = ftrace_stub,
89 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
90 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
97 static void
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
102 * can use rcu_dereference_raw() is that elements removed from this list
103 * are simply leaked, so there is no need to interact with a grace-period
104 * mechanism. The rcu_dereference_raw() calls are needed to handle
105 * concurrent insertions into the ftrace_global_list.
107 * Silly Alpha and silly pointer-speculation compiler optimizations!
109 static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
112 struct ftrace_ops *op;
114 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
115 return;
117 trace_recursion_set(TRACE_GLOBAL_BIT);
118 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
119 while (op != &ftrace_list_end) {
120 op->func(ip, parent_ip);
121 op = rcu_dereference_raw(op->next); /*see above*/
123 trace_recursion_clear(TRACE_GLOBAL_BIT);
126 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
128 if (!test_tsk_trace_trace(current))
129 return;
131 ftrace_pid_function(ip, parent_ip);
134 static void set_ftrace_pid_function(ftrace_func_t func)
136 /* do not set ftrace_pid_function to itself! */
137 if (func != ftrace_pid_func)
138 ftrace_pid_function = func;
142 * clear_ftrace_function - reset the ftrace function
144 * This NULLs the ftrace function and in essence stops
145 * tracing. There may be lag
147 void clear_ftrace_function(void)
149 ftrace_trace_function = ftrace_stub;
150 __ftrace_trace_function = ftrace_stub;
151 __ftrace_trace_function_delay = ftrace_stub;
152 ftrace_pid_function = ftrace_stub;
155 #undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
156 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
158 * For those archs that do not test ftrace_trace_stop in their
159 * mcount call site, we need to do it from C.
161 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
163 if (function_trace_stop)
164 return;
166 __ftrace_trace_function(ip, parent_ip);
168 #endif
170 static void update_global_ops(void)
172 ftrace_func_t func;
175 * If there's only one function registered, then call that
176 * function directly. Otherwise, we need to iterate over the
177 * registered callers.
179 if (ftrace_global_list == &ftrace_list_end ||
180 ftrace_global_list->next == &ftrace_list_end)
181 func = ftrace_global_list->func;
182 else
183 func = ftrace_global_list_func;
185 /* If we filter on pids, update to use the pid function */
186 if (!list_empty(&ftrace_pids)) {
187 set_ftrace_pid_function(func);
188 func = ftrace_pid_func;
191 global_ops.func = func;
194 static void update_ftrace_function(void)
196 ftrace_func_t func;
198 update_global_ops();
201 * If we are at the end of the list and this ops is
202 * not dynamic, then have the mcount trampoline call
203 * the function directly
205 if (ftrace_ops_list == &ftrace_list_end ||
206 (ftrace_ops_list->next == &ftrace_list_end &&
207 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
208 func = ftrace_ops_list->func;
209 else
210 func = ftrace_ops_list_func;
212 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
213 ftrace_trace_function = func;
214 #else
215 #ifdef CONFIG_DYNAMIC_FTRACE
216 /* do not update till all functions have been modified */
217 __ftrace_trace_function_delay = func;
218 #else
219 __ftrace_trace_function = func;
220 #endif
221 ftrace_trace_function = ftrace_test_stop_func;
222 #endif
225 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
227 ops->next = *list;
229 * We are entering ops into the list but another
230 * CPU might be walking that list. We need to make sure
231 * the ops->next pointer is valid before another CPU sees
232 * the ops pointer included into the list.
234 rcu_assign_pointer(*list, ops);
237 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
239 struct ftrace_ops **p;
242 * If we are removing the last function, then simply point
243 * to the ftrace_stub.
245 if (*list == ops && ops->next == &ftrace_list_end) {
246 *list = &ftrace_list_end;
247 return 0;
250 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
251 if (*p == ops)
252 break;
254 if (*p != ops)
255 return -1;
257 *p = (*p)->next;
258 return 0;
261 static int __register_ftrace_function(struct ftrace_ops *ops)
263 if (ftrace_disabled)
264 return -ENODEV;
266 if (FTRACE_WARN_ON(ops == &global_ops))
267 return -EINVAL;
269 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
270 return -EBUSY;
272 if (!core_kernel_data((unsigned long)ops))
273 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
275 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
276 int first = ftrace_global_list == &ftrace_list_end;
277 add_ftrace_ops(&ftrace_global_list, ops);
278 ops->flags |= FTRACE_OPS_FL_ENABLED;
279 if (first)
280 add_ftrace_ops(&ftrace_ops_list, &global_ops);
281 } else
282 add_ftrace_ops(&ftrace_ops_list, ops);
284 if (ftrace_enabled)
285 update_ftrace_function();
287 return 0;
290 static int __unregister_ftrace_function(struct ftrace_ops *ops)
292 int ret;
294 if (ftrace_disabled)
295 return -ENODEV;
297 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
298 return -EBUSY;
300 if (FTRACE_WARN_ON(ops == &global_ops))
301 return -EINVAL;
303 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
304 ret = remove_ftrace_ops(&ftrace_global_list, ops);
305 if (!ret && ftrace_global_list == &ftrace_list_end)
306 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
307 if (!ret)
308 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
309 } else
310 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
312 if (ret < 0)
313 return ret;
315 if (ftrace_enabled)
316 update_ftrace_function();
319 * Dynamic ops may be freed, we must make sure that all
320 * callers are done before leaving this function.
322 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
323 synchronize_sched();
325 return 0;
328 static void ftrace_update_pid_func(void)
330 /* Only do something if we are tracing something */
331 if (ftrace_trace_function == ftrace_stub)
332 return;
334 update_ftrace_function();
337 #ifdef CONFIG_FUNCTION_PROFILER
338 struct ftrace_profile {
339 struct hlist_node node;
340 unsigned long ip;
341 unsigned long counter;
342 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
343 unsigned long long time;
344 unsigned long long time_squared;
345 #endif
348 struct ftrace_profile_page {
349 struct ftrace_profile_page *next;
350 unsigned long index;
351 struct ftrace_profile records[];
354 struct ftrace_profile_stat {
355 atomic_t disabled;
356 struct hlist_head *hash;
357 struct ftrace_profile_page *pages;
358 struct ftrace_profile_page *start;
359 struct tracer_stat stat;
362 #define PROFILE_RECORDS_SIZE \
363 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
365 #define PROFILES_PER_PAGE \
366 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
368 static int ftrace_profile_bits __read_mostly;
369 static int ftrace_profile_enabled __read_mostly;
371 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
372 static DEFINE_MUTEX(ftrace_profile_lock);
374 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
376 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
378 static void *
379 function_stat_next(void *v, int idx)
381 struct ftrace_profile *rec = v;
382 struct ftrace_profile_page *pg;
384 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
386 again:
387 if (idx != 0)
388 rec++;
390 if ((void *)rec >= (void *)&pg->records[pg->index]) {
391 pg = pg->next;
392 if (!pg)
393 return NULL;
394 rec = &pg->records[0];
395 if (!rec->counter)
396 goto again;
399 return rec;
402 static void *function_stat_start(struct tracer_stat *trace)
404 struct ftrace_profile_stat *stat =
405 container_of(trace, struct ftrace_profile_stat, stat);
407 if (!stat || !stat->start)
408 return NULL;
410 return function_stat_next(&stat->start->records[0], 0);
413 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
414 /* function graph compares on total time */
415 static int function_stat_cmp(void *p1, void *p2)
417 struct ftrace_profile *a = p1;
418 struct ftrace_profile *b = p2;
420 if (a->time < b->time)
421 return -1;
422 if (a->time > b->time)
423 return 1;
424 else
425 return 0;
427 #else
428 /* not function graph compares against hits */
429 static int function_stat_cmp(void *p1, void *p2)
431 struct ftrace_profile *a = p1;
432 struct ftrace_profile *b = p2;
434 if (a->counter < b->counter)
435 return -1;
436 if (a->counter > b->counter)
437 return 1;
438 else
439 return 0;
441 #endif
443 static int function_stat_headers(struct seq_file *m)
445 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
446 seq_printf(m, " Function "
447 "Hit Time Avg s^2\n"
448 " -------- "
449 "--- ---- --- ---\n");
450 #else
451 seq_printf(m, " Function Hit\n"
452 " -------- ---\n");
453 #endif
454 return 0;
457 static int function_stat_show(struct seq_file *m, void *v)
459 struct ftrace_profile *rec = v;
460 char str[KSYM_SYMBOL_LEN];
461 int ret = 0;
462 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
463 static struct trace_seq s;
464 unsigned long long avg;
465 unsigned long long stddev;
466 #endif
467 mutex_lock(&ftrace_profile_lock);
469 /* we raced with function_profile_reset() */
470 if (unlikely(rec->counter == 0)) {
471 ret = -EBUSY;
472 goto out;
475 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
476 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
478 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
479 seq_printf(m, " ");
480 avg = rec->time;
481 do_div(avg, rec->counter);
483 /* Sample standard deviation (s^2) */
484 if (rec->counter <= 1)
485 stddev = 0;
486 else {
487 stddev = rec->time_squared - rec->counter * avg * avg;
489 * Divide only 1000 for ns^2 -> us^2 conversion.
490 * trace_print_graph_duration will divide 1000 again.
492 do_div(stddev, (rec->counter - 1) * 1000);
495 trace_seq_init(&s);
496 trace_print_graph_duration(rec->time, &s);
497 trace_seq_puts(&s, " ");
498 trace_print_graph_duration(avg, &s);
499 trace_seq_puts(&s, " ");
500 trace_print_graph_duration(stddev, &s);
501 trace_print_seq(m, &s);
502 #endif
503 seq_putc(m, '\n');
504 out:
505 mutex_unlock(&ftrace_profile_lock);
507 return ret;
510 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
512 struct ftrace_profile_page *pg;
514 pg = stat->pages = stat->start;
516 while (pg) {
517 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
518 pg->index = 0;
519 pg = pg->next;
522 memset(stat->hash, 0,
523 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
526 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
528 struct ftrace_profile_page *pg;
529 int functions;
530 int pages;
531 int i;
533 /* If we already allocated, do nothing */
534 if (stat->pages)
535 return 0;
537 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
538 if (!stat->pages)
539 return -ENOMEM;
541 #ifdef CONFIG_DYNAMIC_FTRACE
542 functions = ftrace_update_tot_cnt;
543 #else
545 * We do not know the number of functions that exist because
546 * dynamic tracing is what counts them. With past experience
547 * we have around 20K functions. That should be more than enough.
548 * It is highly unlikely we will execute every function in
549 * the kernel.
551 functions = 20000;
552 #endif
554 pg = stat->start = stat->pages;
556 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
558 for (i = 0; i < pages; i++) {
559 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
560 if (!pg->next)
561 goto out_free;
562 pg = pg->next;
565 return 0;
567 out_free:
568 pg = stat->start;
569 while (pg) {
570 unsigned long tmp = (unsigned long)pg;
572 pg = pg->next;
573 free_page(tmp);
576 free_page((unsigned long)stat->pages);
577 stat->pages = NULL;
578 stat->start = NULL;
580 return -ENOMEM;
583 static int ftrace_profile_init_cpu(int cpu)
585 struct ftrace_profile_stat *stat;
586 int size;
588 stat = &per_cpu(ftrace_profile_stats, cpu);
590 if (stat->hash) {
591 /* If the profile is already created, simply reset it */
592 ftrace_profile_reset(stat);
593 return 0;
597 * We are profiling all functions, but usually only a few thousand
598 * functions are hit. We'll make a hash of 1024 items.
600 size = FTRACE_PROFILE_HASH_SIZE;
602 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
604 if (!stat->hash)
605 return -ENOMEM;
607 if (!ftrace_profile_bits) {
608 size--;
610 for (; size; size >>= 1)
611 ftrace_profile_bits++;
614 /* Preallocate the function profiling pages */
615 if (ftrace_profile_pages_init(stat) < 0) {
616 kfree(stat->hash);
617 stat->hash = NULL;
618 return -ENOMEM;
621 return 0;
624 static int ftrace_profile_init(void)
626 int cpu;
627 int ret = 0;
629 for_each_online_cpu(cpu) {
630 ret = ftrace_profile_init_cpu(cpu);
631 if (ret)
632 break;
635 return ret;
638 /* interrupts must be disabled */
639 static struct ftrace_profile *
640 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
642 struct ftrace_profile *rec;
643 struct hlist_head *hhd;
644 struct hlist_node *n;
645 unsigned long key;
647 key = hash_long(ip, ftrace_profile_bits);
648 hhd = &stat->hash[key];
650 if (hlist_empty(hhd))
651 return NULL;
653 hlist_for_each_entry_rcu(rec, n, hhd, node) {
654 if (rec->ip == ip)
655 return rec;
658 return NULL;
661 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
662 struct ftrace_profile *rec)
664 unsigned long key;
666 key = hash_long(rec->ip, ftrace_profile_bits);
667 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
671 * The memory is already allocated, this simply finds a new record to use.
673 static struct ftrace_profile *
674 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
676 struct ftrace_profile *rec = NULL;
678 /* prevent recursion (from NMIs) */
679 if (atomic_inc_return(&stat->disabled) != 1)
680 goto out;
683 * Try to find the function again since an NMI
684 * could have added it
686 rec = ftrace_find_profiled_func(stat, ip);
687 if (rec)
688 goto out;
690 if (stat->pages->index == PROFILES_PER_PAGE) {
691 if (!stat->pages->next)
692 goto out;
693 stat->pages = stat->pages->next;
696 rec = &stat->pages->records[stat->pages->index++];
697 rec->ip = ip;
698 ftrace_add_profile(stat, rec);
700 out:
701 atomic_dec(&stat->disabled);
703 return rec;
706 static void
707 function_profile_call(unsigned long ip, unsigned long parent_ip)
709 struct ftrace_profile_stat *stat;
710 struct ftrace_profile *rec;
711 unsigned long flags;
713 if (!ftrace_profile_enabled)
714 return;
716 local_irq_save(flags);
718 stat = &__get_cpu_var(ftrace_profile_stats);
719 if (!stat->hash || !ftrace_profile_enabled)
720 goto out;
722 rec = ftrace_find_profiled_func(stat, ip);
723 if (!rec) {
724 rec = ftrace_profile_alloc(stat, ip);
725 if (!rec)
726 goto out;
729 rec->counter++;
730 out:
731 local_irq_restore(flags);
734 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
735 static int profile_graph_entry(struct ftrace_graph_ent *trace)
737 function_profile_call(trace->func, 0);
738 return 1;
741 static void profile_graph_return(struct ftrace_graph_ret *trace)
743 struct ftrace_profile_stat *stat;
744 unsigned long long calltime;
745 struct ftrace_profile *rec;
746 unsigned long flags;
748 local_irq_save(flags);
749 stat = &__get_cpu_var(ftrace_profile_stats);
750 if (!stat->hash || !ftrace_profile_enabled)
751 goto out;
753 /* If the calltime was zero'd ignore it */
754 if (!trace->calltime)
755 goto out;
757 calltime = trace->rettime - trace->calltime;
759 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
760 int index;
762 index = trace->depth;
764 /* Append this call time to the parent time to subtract */
765 if (index)
766 current->ret_stack[index - 1].subtime += calltime;
768 if (current->ret_stack[index].subtime < calltime)
769 calltime -= current->ret_stack[index].subtime;
770 else
771 calltime = 0;
774 rec = ftrace_find_profiled_func(stat, trace->func);
775 if (rec) {
776 rec->time += calltime;
777 rec->time_squared += calltime * calltime;
780 out:
781 local_irq_restore(flags);
784 static int register_ftrace_profiler(void)
786 return register_ftrace_graph(&profile_graph_return,
787 &profile_graph_entry);
790 static void unregister_ftrace_profiler(void)
792 unregister_ftrace_graph();
794 #else
795 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
796 .func = function_profile_call,
799 static int register_ftrace_profiler(void)
801 return register_ftrace_function(&ftrace_profile_ops);
804 static void unregister_ftrace_profiler(void)
806 unregister_ftrace_function(&ftrace_profile_ops);
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
810 static ssize_t
811 ftrace_profile_write(struct file *filp, const char __user *ubuf,
812 size_t cnt, loff_t *ppos)
814 unsigned long val;
815 int ret;
817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
818 if (ret)
819 return ret;
821 val = !!val;
823 mutex_lock(&ftrace_profile_lock);
824 if (ftrace_profile_enabled ^ val) {
825 if (val) {
826 ret = ftrace_profile_init();
827 if (ret < 0) {
828 cnt = ret;
829 goto out;
832 ret = register_ftrace_profiler();
833 if (ret < 0) {
834 cnt = ret;
835 goto out;
837 ftrace_profile_enabled = 1;
838 } else {
839 ftrace_profile_enabled = 0;
841 * unregister_ftrace_profiler calls stop_machine
842 * so this acts like an synchronize_sched.
844 unregister_ftrace_profiler();
847 out:
848 mutex_unlock(&ftrace_profile_lock);
850 *ppos += cnt;
852 return cnt;
855 static ssize_t
856 ftrace_profile_read(struct file *filp, char __user *ubuf,
857 size_t cnt, loff_t *ppos)
859 char buf[64]; /* big enough to hold a number */
860 int r;
862 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
863 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
866 static const struct file_operations ftrace_profile_fops = {
867 .open = tracing_open_generic,
868 .read = ftrace_profile_read,
869 .write = ftrace_profile_write,
870 .llseek = default_llseek,
873 /* used to initialize the real stat files */
874 static struct tracer_stat function_stats __initdata = {
875 .name = "functions",
876 .stat_start = function_stat_start,
877 .stat_next = function_stat_next,
878 .stat_cmp = function_stat_cmp,
879 .stat_headers = function_stat_headers,
880 .stat_show = function_stat_show
883 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
885 struct ftrace_profile_stat *stat;
886 struct dentry *entry;
887 char *name;
888 int ret;
889 int cpu;
891 for_each_possible_cpu(cpu) {
892 stat = &per_cpu(ftrace_profile_stats, cpu);
894 /* allocate enough for function name + cpu number */
895 name = kmalloc(32, GFP_KERNEL);
896 if (!name) {
898 * The files created are permanent, if something happens
899 * we still do not free memory.
901 WARN(1,
902 "Could not allocate stat file for cpu %d\n",
903 cpu);
904 return;
906 stat->stat = function_stats;
907 snprintf(name, 32, "function%d", cpu);
908 stat->stat.name = name;
909 ret = register_stat_tracer(&stat->stat);
910 if (ret) {
911 WARN(1,
912 "Could not register function stat for cpu %d\n",
913 cpu);
914 kfree(name);
915 return;
919 entry = debugfs_create_file("function_profile_enabled", 0644,
920 d_tracer, NULL, &ftrace_profile_fops);
921 if (!entry)
922 pr_warning("Could not create debugfs "
923 "'function_profile_enabled' entry\n");
926 #else /* CONFIG_FUNCTION_PROFILER */
927 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
930 #endif /* CONFIG_FUNCTION_PROFILER */
932 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
934 #ifdef CONFIG_DYNAMIC_FTRACE
936 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
937 # error Dynamic ftrace depends on MCOUNT_RECORD
938 #endif
940 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
942 struct ftrace_func_probe {
943 struct hlist_node node;
944 struct ftrace_probe_ops *ops;
945 unsigned long flags;
946 unsigned long ip;
947 void *data;
948 struct rcu_head rcu;
951 enum {
952 FTRACE_ENABLE_CALLS = (1 << 0),
953 FTRACE_DISABLE_CALLS = (1 << 1),
954 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
955 FTRACE_START_FUNC_RET = (1 << 3),
956 FTRACE_STOP_FUNC_RET = (1 << 4),
958 struct ftrace_func_entry {
959 struct hlist_node hlist;
960 unsigned long ip;
963 struct ftrace_hash {
964 unsigned long size_bits;
965 struct hlist_head *buckets;
966 unsigned long count;
967 struct rcu_head rcu;
971 * We make these constant because no one should touch them,
972 * but they are used as the default "empty hash", to avoid allocating
973 * it all the time. These are in a read only section such that if
974 * anyone does try to modify it, it will cause an exception.
976 static const struct hlist_head empty_buckets[1];
977 static const struct ftrace_hash empty_hash = {
978 .buckets = (struct hlist_head *)empty_buckets,
980 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
982 static struct ftrace_ops global_ops = {
983 .func = ftrace_stub,
984 .notrace_hash = EMPTY_HASH,
985 .filter_hash = EMPTY_HASH,
988 static struct dyn_ftrace *ftrace_new_addrs;
990 static DEFINE_MUTEX(ftrace_regex_lock);
992 struct ftrace_page {
993 struct ftrace_page *next;
994 int index;
995 struct dyn_ftrace records[];
998 #define ENTRIES_PER_PAGE \
999 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
1001 /* estimate from running different kernels */
1002 #define NR_TO_INIT 10000
1004 static struct ftrace_page *ftrace_pages_start;
1005 static struct ftrace_page *ftrace_pages;
1007 static struct dyn_ftrace *ftrace_free_records;
1009 static struct ftrace_func_entry *
1010 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1012 unsigned long key;
1013 struct ftrace_func_entry *entry;
1014 struct hlist_head *hhd;
1015 struct hlist_node *n;
1017 if (!hash->count)
1018 return NULL;
1020 if (hash->size_bits > 0)
1021 key = hash_long(ip, hash->size_bits);
1022 else
1023 key = 0;
1025 hhd = &hash->buckets[key];
1027 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1028 if (entry->ip == ip)
1029 return entry;
1031 return NULL;
1034 static void __add_hash_entry(struct ftrace_hash *hash,
1035 struct ftrace_func_entry *entry)
1037 struct hlist_head *hhd;
1038 unsigned long key;
1040 if (hash->size_bits)
1041 key = hash_long(entry->ip, hash->size_bits);
1042 else
1043 key = 0;
1045 hhd = &hash->buckets[key];
1046 hlist_add_head(&entry->hlist, hhd);
1047 hash->count++;
1050 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1052 struct ftrace_func_entry *entry;
1054 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1055 if (!entry)
1056 return -ENOMEM;
1058 entry->ip = ip;
1059 __add_hash_entry(hash, entry);
1061 return 0;
1064 static void
1065 free_hash_entry(struct ftrace_hash *hash,
1066 struct ftrace_func_entry *entry)
1068 hlist_del(&entry->hlist);
1069 kfree(entry);
1070 hash->count--;
1073 static void
1074 remove_hash_entry(struct ftrace_hash *hash,
1075 struct ftrace_func_entry *entry)
1077 hlist_del(&entry->hlist);
1078 hash->count--;
1081 static void ftrace_hash_clear(struct ftrace_hash *hash)
1083 struct hlist_head *hhd;
1084 struct hlist_node *tp, *tn;
1085 struct ftrace_func_entry *entry;
1086 int size = 1 << hash->size_bits;
1087 int i;
1089 if (!hash->count)
1090 return;
1092 for (i = 0; i < size; i++) {
1093 hhd = &hash->buckets[i];
1094 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1095 free_hash_entry(hash, entry);
1097 FTRACE_WARN_ON(hash->count);
1100 static void free_ftrace_hash(struct ftrace_hash *hash)
1102 if (!hash || hash == EMPTY_HASH)
1103 return;
1104 ftrace_hash_clear(hash);
1105 kfree(hash->buckets);
1106 kfree(hash);
1109 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1111 struct ftrace_hash *hash;
1113 hash = container_of(rcu, struct ftrace_hash, rcu);
1114 free_ftrace_hash(hash);
1117 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1119 if (!hash || hash == EMPTY_HASH)
1120 return;
1121 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1124 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1126 struct ftrace_hash *hash;
1127 int size;
1129 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1130 if (!hash)
1131 return NULL;
1133 size = 1 << size_bits;
1134 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1136 if (!hash->buckets) {
1137 kfree(hash);
1138 return NULL;
1141 hash->size_bits = size_bits;
1143 return hash;
1146 static struct ftrace_hash *
1147 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1149 struct ftrace_func_entry *entry;
1150 struct ftrace_hash *new_hash;
1151 struct hlist_node *tp;
1152 int size;
1153 int ret;
1154 int i;
1156 new_hash = alloc_ftrace_hash(size_bits);
1157 if (!new_hash)
1158 return NULL;
1160 /* Empty hash? */
1161 if (!hash || !hash->count)
1162 return new_hash;
1164 size = 1 << hash->size_bits;
1165 for (i = 0; i < size; i++) {
1166 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1167 ret = add_hash_entry(new_hash, entry->ip);
1168 if (ret < 0)
1169 goto free_hash;
1173 FTRACE_WARN_ON(new_hash->count != hash->count);
1175 return new_hash;
1177 free_hash:
1178 free_ftrace_hash(new_hash);
1179 return NULL;
1182 static void
1183 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1184 static void
1185 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1187 static int
1188 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1189 struct ftrace_hash **dst, struct ftrace_hash *src)
1191 struct ftrace_func_entry *entry;
1192 struct hlist_node *tp, *tn;
1193 struct hlist_head *hhd;
1194 struct ftrace_hash *old_hash;
1195 struct ftrace_hash *new_hash;
1196 unsigned long key;
1197 int size = src->count;
1198 int bits = 0;
1199 int ret;
1200 int i;
1203 * Remove the current set, update the hash and add
1204 * them back.
1206 ftrace_hash_rec_disable(ops, enable);
1209 * If the new source is empty, just free dst and assign it
1210 * the empty_hash.
1212 if (!src->count) {
1213 free_ftrace_hash_rcu(*dst);
1214 rcu_assign_pointer(*dst, EMPTY_HASH);
1215 return 0;
1219 * Make the hash size about 1/2 the # found
1221 for (size /= 2; size; size >>= 1)
1222 bits++;
1224 /* Don't allocate too much */
1225 if (bits > FTRACE_HASH_MAX_BITS)
1226 bits = FTRACE_HASH_MAX_BITS;
1228 ret = -ENOMEM;
1229 new_hash = alloc_ftrace_hash(bits);
1230 if (!new_hash)
1231 goto out;
1233 size = 1 << src->size_bits;
1234 for (i = 0; i < size; i++) {
1235 hhd = &src->buckets[i];
1236 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1237 if (bits > 0)
1238 key = hash_long(entry->ip, bits);
1239 else
1240 key = 0;
1241 remove_hash_entry(src, entry);
1242 __add_hash_entry(new_hash, entry);
1246 old_hash = *dst;
1247 rcu_assign_pointer(*dst, new_hash);
1248 free_ftrace_hash_rcu(old_hash);
1250 ret = 0;
1251 out:
1253 * Enable regardless of ret:
1254 * On success, we enable the new hash.
1255 * On failure, we re-enable the original hash.
1257 ftrace_hash_rec_enable(ops, enable);
1259 return ret;
1263 * Test the hashes for this ops to see if we want to call
1264 * the ops->func or not.
1266 * It's a match if the ip is in the ops->filter_hash or
1267 * the filter_hash does not exist or is empty,
1268 * AND
1269 * the ip is not in the ops->notrace_hash.
1271 * This needs to be called with preemption disabled as
1272 * the hashes are freed with call_rcu_sched().
1274 static int
1275 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1277 struct ftrace_hash *filter_hash;
1278 struct ftrace_hash *notrace_hash;
1279 int ret;
1281 filter_hash = rcu_dereference_raw(ops->filter_hash);
1282 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1284 if ((!filter_hash || !filter_hash->count ||
1285 ftrace_lookup_ip(filter_hash, ip)) &&
1286 (!notrace_hash || !notrace_hash->count ||
1287 !ftrace_lookup_ip(notrace_hash, ip)))
1288 ret = 1;
1289 else
1290 ret = 0;
1292 return ret;
1296 * This is a double for. Do not use 'break' to break out of the loop,
1297 * you must use a goto.
1299 #define do_for_each_ftrace_rec(pg, rec) \
1300 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1301 int _____i; \
1302 for (_____i = 0; _____i < pg->index; _____i++) { \
1303 rec = &pg->records[_____i];
1305 #define while_for_each_ftrace_rec() \
1309 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1310 int filter_hash,
1311 bool inc)
1313 struct ftrace_hash *hash;
1314 struct ftrace_hash *other_hash;
1315 struct ftrace_page *pg;
1316 struct dyn_ftrace *rec;
1317 int count = 0;
1318 int all = 0;
1320 /* Only update if the ops has been registered */
1321 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1322 return;
1325 * In the filter_hash case:
1326 * If the count is zero, we update all records.
1327 * Otherwise we just update the items in the hash.
1329 * In the notrace_hash case:
1330 * We enable the update in the hash.
1331 * As disabling notrace means enabling the tracing,
1332 * and enabling notrace means disabling, the inc variable
1333 * gets inversed.
1335 if (filter_hash) {
1336 hash = ops->filter_hash;
1337 other_hash = ops->notrace_hash;
1338 if (!hash || !hash->count)
1339 all = 1;
1340 } else {
1341 inc = !inc;
1342 hash = ops->notrace_hash;
1343 other_hash = ops->filter_hash;
1345 * If the notrace hash has no items,
1346 * then there's nothing to do.
1348 if (hash && !hash->count)
1349 return;
1352 do_for_each_ftrace_rec(pg, rec) {
1353 int in_other_hash = 0;
1354 int in_hash = 0;
1355 int match = 0;
1357 if (all) {
1359 * Only the filter_hash affects all records.
1360 * Update if the record is not in the notrace hash.
1362 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1363 match = 1;
1364 } else {
1365 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1366 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1371 if (filter_hash && in_hash && !in_other_hash)
1372 match = 1;
1373 else if (!filter_hash && in_hash &&
1374 (in_other_hash || !other_hash->count))
1375 match = 1;
1377 if (!match)
1378 continue;
1380 if (inc) {
1381 rec->flags++;
1382 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1383 return;
1384 } else {
1385 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1386 return;
1387 rec->flags--;
1389 count++;
1390 /* Shortcut, if we handled all records, we are done. */
1391 if (!all && count == hash->count)
1392 return;
1393 } while_for_each_ftrace_rec();
1396 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1397 int filter_hash)
1399 __ftrace_hash_rec_update(ops, filter_hash, 0);
1402 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1403 int filter_hash)
1405 __ftrace_hash_rec_update(ops, filter_hash, 1);
1408 static void ftrace_free_rec(struct dyn_ftrace *rec)
1410 rec->freelist = ftrace_free_records;
1411 ftrace_free_records = rec;
1412 rec->flags |= FTRACE_FL_FREE;
1415 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1417 struct dyn_ftrace *rec;
1419 /* First check for freed records */
1420 if (ftrace_free_records) {
1421 rec = ftrace_free_records;
1423 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1424 FTRACE_WARN_ON_ONCE(1);
1425 ftrace_free_records = NULL;
1426 return NULL;
1429 ftrace_free_records = rec->freelist;
1430 memset(rec, 0, sizeof(*rec));
1431 return rec;
1434 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1435 if (!ftrace_pages->next) {
1436 /* allocate another page */
1437 ftrace_pages->next =
1438 (void *)get_zeroed_page(GFP_KERNEL);
1439 if (!ftrace_pages->next)
1440 return NULL;
1442 ftrace_pages = ftrace_pages->next;
1445 return &ftrace_pages->records[ftrace_pages->index++];
1448 static struct dyn_ftrace *
1449 ftrace_record_ip(unsigned long ip)
1451 struct dyn_ftrace *rec;
1453 if (ftrace_disabled)
1454 return NULL;
1456 rec = ftrace_alloc_dyn_node(ip);
1457 if (!rec)
1458 return NULL;
1460 rec->ip = ip;
1461 rec->newlist = ftrace_new_addrs;
1462 ftrace_new_addrs = rec;
1464 return rec;
1467 static void print_ip_ins(const char *fmt, unsigned char *p)
1469 int i;
1471 printk(KERN_CONT "%s", fmt);
1473 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1474 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1477 static void ftrace_bug(int failed, unsigned long ip)
1479 switch (failed) {
1480 case -EFAULT:
1481 FTRACE_WARN_ON_ONCE(1);
1482 pr_info("ftrace faulted on modifying ");
1483 print_ip_sym(ip);
1484 break;
1485 case -EINVAL:
1486 FTRACE_WARN_ON_ONCE(1);
1487 pr_info("ftrace failed to modify ");
1488 print_ip_sym(ip);
1489 print_ip_ins(" actual: ", (unsigned char *)ip);
1490 printk(KERN_CONT "\n");
1491 break;
1492 case -EPERM:
1493 FTRACE_WARN_ON_ONCE(1);
1494 pr_info("ftrace faulted on writing ");
1495 print_ip_sym(ip);
1496 break;
1497 default:
1498 FTRACE_WARN_ON_ONCE(1);
1499 pr_info("ftrace faulted on unknown error ");
1500 print_ip_sym(ip);
1505 /* Return 1 if the address range is reserved for ftrace */
1506 int ftrace_text_reserved(void *start, void *end)
1508 struct dyn_ftrace *rec;
1509 struct ftrace_page *pg;
1511 do_for_each_ftrace_rec(pg, rec) {
1512 if (rec->ip <= (unsigned long)end &&
1513 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1514 return 1;
1515 } while_for_each_ftrace_rec();
1516 return 0;
1520 static int
1521 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1523 unsigned long ftrace_addr;
1524 unsigned long flag = 0UL;
1526 ftrace_addr = (unsigned long)FTRACE_ADDR;
1529 * If we are enabling tracing:
1531 * If the record has a ref count, then we need to enable it
1532 * because someone is using it.
1534 * Otherwise we make sure its disabled.
1536 * If we are disabling tracing, then disable all records that
1537 * are enabled.
1539 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1540 flag = FTRACE_FL_ENABLED;
1542 /* If the state of this record hasn't changed, then do nothing */
1543 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1544 return 0;
1546 if (flag) {
1547 rec->flags |= FTRACE_FL_ENABLED;
1548 return ftrace_make_call(rec, ftrace_addr);
1551 rec->flags &= ~FTRACE_FL_ENABLED;
1552 return ftrace_make_nop(NULL, rec, ftrace_addr);
1555 static void ftrace_replace_code(int enable)
1557 struct dyn_ftrace *rec;
1558 struct ftrace_page *pg;
1559 int failed;
1561 if (unlikely(ftrace_disabled))
1562 return;
1564 do_for_each_ftrace_rec(pg, rec) {
1565 /* Skip over free records */
1566 if (rec->flags & FTRACE_FL_FREE)
1567 continue;
1569 failed = __ftrace_replace_code(rec, enable);
1570 if (failed) {
1571 ftrace_bug(failed, rec->ip);
1572 /* Stop processing */
1573 return;
1575 } while_for_each_ftrace_rec();
1578 static int
1579 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1581 unsigned long ip;
1582 int ret;
1584 ip = rec->ip;
1586 if (unlikely(ftrace_disabled))
1587 return 0;
1589 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1590 if (ret) {
1591 ftrace_bug(ret, ip);
1592 return 0;
1594 return 1;
1598 * archs can override this function if they must do something
1599 * before the modifying code is performed.
1601 int __weak ftrace_arch_code_modify_prepare(void)
1603 return 0;
1607 * archs can override this function if they must do something
1608 * after the modifying code is performed.
1610 int __weak ftrace_arch_code_modify_post_process(void)
1612 return 0;
1615 static int __ftrace_modify_code(void *data)
1617 int *command = data;
1620 * Do not call function tracer while we update the code.
1621 * We are in stop machine, no worrying about races.
1623 function_trace_stop++;
1625 if (*command & FTRACE_ENABLE_CALLS)
1626 ftrace_replace_code(1);
1627 else if (*command & FTRACE_DISABLE_CALLS)
1628 ftrace_replace_code(0);
1630 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1631 ftrace_update_ftrace_func(ftrace_trace_function);
1633 if (*command & FTRACE_START_FUNC_RET)
1634 ftrace_enable_ftrace_graph_caller();
1635 else if (*command & FTRACE_STOP_FUNC_RET)
1636 ftrace_disable_ftrace_graph_caller();
1638 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1640 * For archs that call ftrace_test_stop_func(), we must
1641 * wait till after we update all the function callers
1642 * before we update the callback. This keeps different
1643 * ops that record different functions from corrupting
1644 * each other.
1646 __ftrace_trace_function = __ftrace_trace_function_delay;
1647 #endif
1648 function_trace_stop--;
1650 return 0;
1653 static void ftrace_run_update_code(int command)
1655 int ret;
1657 ret = ftrace_arch_code_modify_prepare();
1658 FTRACE_WARN_ON(ret);
1659 if (ret)
1660 return;
1662 stop_machine(__ftrace_modify_code, &command, NULL);
1664 ret = ftrace_arch_code_modify_post_process();
1665 FTRACE_WARN_ON(ret);
1668 static ftrace_func_t saved_ftrace_func;
1669 static int ftrace_start_up;
1670 static int global_start_up;
1672 static void ftrace_startup_enable(int command)
1674 if (saved_ftrace_func != ftrace_trace_function) {
1675 saved_ftrace_func = ftrace_trace_function;
1676 command |= FTRACE_UPDATE_TRACE_FUNC;
1679 if (!command || !ftrace_enabled)
1680 return;
1682 ftrace_run_update_code(command);
1685 static int ftrace_startup(struct ftrace_ops *ops, int command)
1687 bool hash_enable = true;
1689 if (unlikely(ftrace_disabled))
1690 return -ENODEV;
1692 ftrace_start_up++;
1693 command |= FTRACE_ENABLE_CALLS;
1695 /* ops marked global share the filter hashes */
1696 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1697 ops = &global_ops;
1698 /* Don't update hash if global is already set */
1699 if (global_start_up)
1700 hash_enable = false;
1701 global_start_up++;
1704 ops->flags |= FTRACE_OPS_FL_ENABLED;
1705 if (hash_enable)
1706 ftrace_hash_rec_enable(ops, 1);
1708 ftrace_startup_enable(command);
1710 return 0;
1713 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1715 bool hash_disable = true;
1717 if (unlikely(ftrace_disabled))
1718 return;
1720 ftrace_start_up--;
1722 * Just warn in case of unbalance, no need to kill ftrace, it's not
1723 * critical but the ftrace_call callers may be never nopped again after
1724 * further ftrace uses.
1726 WARN_ON_ONCE(ftrace_start_up < 0);
1728 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1729 ops = &global_ops;
1730 global_start_up--;
1731 WARN_ON_ONCE(global_start_up < 0);
1732 /* Don't update hash if global still has users */
1733 if (global_start_up) {
1734 WARN_ON_ONCE(!ftrace_start_up);
1735 hash_disable = false;
1739 if (hash_disable)
1740 ftrace_hash_rec_disable(ops, 1);
1742 if (ops != &global_ops || !global_start_up)
1743 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1745 if (!ftrace_start_up)
1746 command |= FTRACE_DISABLE_CALLS;
1748 if (saved_ftrace_func != ftrace_trace_function) {
1749 saved_ftrace_func = ftrace_trace_function;
1750 command |= FTRACE_UPDATE_TRACE_FUNC;
1753 if (!command || !ftrace_enabled)
1754 return;
1756 ftrace_run_update_code(command);
1759 static void ftrace_startup_sysctl(void)
1761 if (unlikely(ftrace_disabled))
1762 return;
1764 /* Force update next time */
1765 saved_ftrace_func = NULL;
1766 /* ftrace_start_up is true if we want ftrace running */
1767 if (ftrace_start_up)
1768 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1771 static void ftrace_shutdown_sysctl(void)
1773 if (unlikely(ftrace_disabled))
1774 return;
1776 /* ftrace_start_up is true if ftrace is running */
1777 if (ftrace_start_up)
1778 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1781 static cycle_t ftrace_update_time;
1782 static unsigned long ftrace_update_cnt;
1783 unsigned long ftrace_update_tot_cnt;
1785 static int ops_traces_mod(struct ftrace_ops *ops)
1787 struct ftrace_hash *hash;
1789 hash = ops->filter_hash;
1790 return !!(!hash || !hash->count);
1793 static int ftrace_update_code(struct module *mod)
1795 struct dyn_ftrace *p;
1796 cycle_t start, stop;
1797 unsigned long ref = 0;
1800 * When adding a module, we need to check if tracers are
1801 * currently enabled and if they are set to trace all functions.
1802 * If they are, we need to enable the module functions as well
1803 * as update the reference counts for those function records.
1805 if (mod) {
1806 struct ftrace_ops *ops;
1808 for (ops = ftrace_ops_list;
1809 ops != &ftrace_list_end; ops = ops->next) {
1810 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1811 ops_traces_mod(ops))
1812 ref++;
1816 start = ftrace_now(raw_smp_processor_id());
1817 ftrace_update_cnt = 0;
1819 while (ftrace_new_addrs) {
1821 /* If something went wrong, bail without enabling anything */
1822 if (unlikely(ftrace_disabled))
1823 return -1;
1825 p = ftrace_new_addrs;
1826 ftrace_new_addrs = p->newlist;
1827 p->flags = ref;
1830 * Do the initial record conversion from mcount jump
1831 * to the NOP instructions.
1833 if (!ftrace_code_disable(mod, p)) {
1834 ftrace_free_rec(p);
1835 /* Game over */
1836 break;
1839 ftrace_update_cnt++;
1842 * If the tracing is enabled, go ahead and enable the record.
1844 * The reason not to enable the record immediatelly is the
1845 * inherent check of ftrace_make_nop/ftrace_make_call for
1846 * correct previous instructions. Making first the NOP
1847 * conversion puts the module to the correct state, thus
1848 * passing the ftrace_make_call check.
1850 if (ftrace_start_up && ref) {
1851 int failed = __ftrace_replace_code(p, 1);
1852 if (failed) {
1853 ftrace_bug(failed, p->ip);
1854 ftrace_free_rec(p);
1859 stop = ftrace_now(raw_smp_processor_id());
1860 ftrace_update_time = stop - start;
1861 ftrace_update_tot_cnt += ftrace_update_cnt;
1863 return 0;
1866 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1868 struct ftrace_page *pg;
1869 int cnt;
1870 int i;
1872 /* allocate a few pages */
1873 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1874 if (!ftrace_pages_start)
1875 return -1;
1878 * Allocate a few more pages.
1880 * TODO: have some parser search vmlinux before
1881 * final linking to find all calls to ftrace.
1882 * Then we can:
1883 * a) know how many pages to allocate.
1884 * and/or
1885 * b) set up the table then.
1887 * The dynamic code is still necessary for
1888 * modules.
1891 pg = ftrace_pages = ftrace_pages_start;
1893 cnt = num_to_init / ENTRIES_PER_PAGE;
1894 pr_info("ftrace: allocating %ld entries in %d pages\n",
1895 num_to_init, cnt + 1);
1897 for (i = 0; i < cnt; i++) {
1898 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1900 /* If we fail, we'll try later anyway */
1901 if (!pg->next)
1902 break;
1904 pg = pg->next;
1907 return 0;
1910 enum {
1911 FTRACE_ITER_FILTER = (1 << 0),
1912 FTRACE_ITER_NOTRACE = (1 << 1),
1913 FTRACE_ITER_PRINTALL = (1 << 2),
1914 FTRACE_ITER_HASH = (1 << 3),
1915 FTRACE_ITER_ENABLED = (1 << 4),
1918 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1920 struct ftrace_iterator {
1921 loff_t pos;
1922 loff_t func_pos;
1923 struct ftrace_page *pg;
1924 struct dyn_ftrace *func;
1925 struct ftrace_func_probe *probe;
1926 struct trace_parser parser;
1927 struct ftrace_hash *hash;
1928 struct ftrace_ops *ops;
1929 int hidx;
1930 int idx;
1931 unsigned flags;
1934 static void *
1935 t_hash_next(struct seq_file *m, loff_t *pos)
1937 struct ftrace_iterator *iter = m->private;
1938 struct hlist_node *hnd = NULL;
1939 struct hlist_head *hhd;
1941 (*pos)++;
1942 iter->pos = *pos;
1944 if (iter->probe)
1945 hnd = &iter->probe->node;
1946 retry:
1947 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1948 return NULL;
1950 hhd = &ftrace_func_hash[iter->hidx];
1952 if (hlist_empty(hhd)) {
1953 iter->hidx++;
1954 hnd = NULL;
1955 goto retry;
1958 if (!hnd)
1959 hnd = hhd->first;
1960 else {
1961 hnd = hnd->next;
1962 if (!hnd) {
1963 iter->hidx++;
1964 goto retry;
1968 if (WARN_ON_ONCE(!hnd))
1969 return NULL;
1971 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1973 return iter;
1976 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1978 struct ftrace_iterator *iter = m->private;
1979 void *p = NULL;
1980 loff_t l;
1982 if (iter->func_pos > *pos)
1983 return NULL;
1985 iter->hidx = 0;
1986 for (l = 0; l <= (*pos - iter->func_pos); ) {
1987 p = t_hash_next(m, &l);
1988 if (!p)
1989 break;
1991 if (!p)
1992 return NULL;
1994 /* Only set this if we have an item */
1995 iter->flags |= FTRACE_ITER_HASH;
1997 return iter;
2000 static int
2001 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2003 struct ftrace_func_probe *rec;
2005 rec = iter->probe;
2006 if (WARN_ON_ONCE(!rec))
2007 return -EIO;
2009 if (rec->ops->print)
2010 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2012 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2014 if (rec->data)
2015 seq_printf(m, ":%p", rec->data);
2016 seq_putc(m, '\n');
2018 return 0;
2021 static void *
2022 t_next(struct seq_file *m, void *v, loff_t *pos)
2024 struct ftrace_iterator *iter = m->private;
2025 struct ftrace_ops *ops = &global_ops;
2026 struct dyn_ftrace *rec = NULL;
2028 if (unlikely(ftrace_disabled))
2029 return NULL;
2031 if (iter->flags & FTRACE_ITER_HASH)
2032 return t_hash_next(m, pos);
2034 (*pos)++;
2035 iter->pos = iter->func_pos = *pos;
2037 if (iter->flags & FTRACE_ITER_PRINTALL)
2038 return t_hash_start(m, pos);
2040 retry:
2041 if (iter->idx >= iter->pg->index) {
2042 if (iter->pg->next) {
2043 iter->pg = iter->pg->next;
2044 iter->idx = 0;
2045 goto retry;
2047 } else {
2048 rec = &iter->pg->records[iter->idx++];
2049 if ((rec->flags & FTRACE_FL_FREE) ||
2051 ((iter->flags & FTRACE_ITER_FILTER) &&
2052 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2054 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2055 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2057 ((iter->flags & FTRACE_ITER_ENABLED) &&
2058 !(rec->flags & ~FTRACE_FL_MASK))) {
2060 rec = NULL;
2061 goto retry;
2065 if (!rec)
2066 return t_hash_start(m, pos);
2068 iter->func = rec;
2070 return iter;
2073 static void reset_iter_read(struct ftrace_iterator *iter)
2075 iter->pos = 0;
2076 iter->func_pos = 0;
2077 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2080 static void *t_start(struct seq_file *m, loff_t *pos)
2082 struct ftrace_iterator *iter = m->private;
2083 struct ftrace_ops *ops = &global_ops;
2084 void *p = NULL;
2085 loff_t l;
2087 mutex_lock(&ftrace_lock);
2089 if (unlikely(ftrace_disabled))
2090 return NULL;
2093 * If an lseek was done, then reset and start from beginning.
2095 if (*pos < iter->pos)
2096 reset_iter_read(iter);
2099 * For set_ftrace_filter reading, if we have the filter
2100 * off, we can short cut and just print out that all
2101 * functions are enabled.
2103 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2104 if (*pos > 0)
2105 return t_hash_start(m, pos);
2106 iter->flags |= FTRACE_ITER_PRINTALL;
2107 /* reset in case of seek/pread */
2108 iter->flags &= ~FTRACE_ITER_HASH;
2109 return iter;
2112 if (iter->flags & FTRACE_ITER_HASH)
2113 return t_hash_start(m, pos);
2116 * Unfortunately, we need to restart at ftrace_pages_start
2117 * every time we let go of the ftrace_mutex. This is because
2118 * those pointers can change without the lock.
2120 iter->pg = ftrace_pages_start;
2121 iter->idx = 0;
2122 for (l = 0; l <= *pos; ) {
2123 p = t_next(m, p, &l);
2124 if (!p)
2125 break;
2128 if (!p) {
2129 if (iter->flags & FTRACE_ITER_FILTER)
2130 return t_hash_start(m, pos);
2132 return NULL;
2135 return iter;
2138 static void t_stop(struct seq_file *m, void *p)
2140 mutex_unlock(&ftrace_lock);
2143 static int t_show(struct seq_file *m, void *v)
2145 struct ftrace_iterator *iter = m->private;
2146 struct dyn_ftrace *rec;
2148 if (iter->flags & FTRACE_ITER_HASH)
2149 return t_hash_show(m, iter);
2151 if (iter->flags & FTRACE_ITER_PRINTALL) {
2152 seq_printf(m, "#### all functions enabled ####\n");
2153 return 0;
2156 rec = iter->func;
2158 if (!rec)
2159 return 0;
2161 seq_printf(m, "%ps", (void *)rec->ip);
2162 if (iter->flags & FTRACE_ITER_ENABLED)
2163 seq_printf(m, " (%ld)",
2164 rec->flags & ~FTRACE_FL_MASK);
2165 seq_printf(m, "\n");
2167 return 0;
2170 static const struct seq_operations show_ftrace_seq_ops = {
2171 .start = t_start,
2172 .next = t_next,
2173 .stop = t_stop,
2174 .show = t_show,
2177 static int
2178 ftrace_avail_open(struct inode *inode, struct file *file)
2180 struct ftrace_iterator *iter;
2181 int ret;
2183 if (unlikely(ftrace_disabled))
2184 return -ENODEV;
2186 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2187 if (!iter)
2188 return -ENOMEM;
2190 iter->pg = ftrace_pages_start;
2192 ret = seq_open(file, &show_ftrace_seq_ops);
2193 if (!ret) {
2194 struct seq_file *m = file->private_data;
2196 m->private = iter;
2197 } else {
2198 kfree(iter);
2201 return ret;
2204 static int
2205 ftrace_enabled_open(struct inode *inode, struct file *file)
2207 struct ftrace_iterator *iter;
2208 int ret;
2210 if (unlikely(ftrace_disabled))
2211 return -ENODEV;
2213 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2214 if (!iter)
2215 return -ENOMEM;
2217 iter->pg = ftrace_pages_start;
2218 iter->flags = FTRACE_ITER_ENABLED;
2220 ret = seq_open(file, &show_ftrace_seq_ops);
2221 if (!ret) {
2222 struct seq_file *m = file->private_data;
2224 m->private = iter;
2225 } else {
2226 kfree(iter);
2229 return ret;
2232 static void ftrace_filter_reset(struct ftrace_hash *hash)
2234 mutex_lock(&ftrace_lock);
2235 ftrace_hash_clear(hash);
2236 mutex_unlock(&ftrace_lock);
2239 static int
2240 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2241 struct inode *inode, struct file *file)
2243 struct ftrace_iterator *iter;
2244 struct ftrace_hash *hash;
2245 int ret = 0;
2247 if (unlikely(ftrace_disabled))
2248 return -ENODEV;
2250 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2251 if (!iter)
2252 return -ENOMEM;
2254 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2255 kfree(iter);
2256 return -ENOMEM;
2259 if (flag & FTRACE_ITER_NOTRACE)
2260 hash = ops->notrace_hash;
2261 else
2262 hash = ops->filter_hash;
2264 iter->ops = ops;
2265 iter->flags = flag;
2267 if (file->f_mode & FMODE_WRITE) {
2268 mutex_lock(&ftrace_lock);
2269 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2270 mutex_unlock(&ftrace_lock);
2272 if (!iter->hash) {
2273 trace_parser_put(&iter->parser);
2274 kfree(iter);
2275 return -ENOMEM;
2279 mutex_lock(&ftrace_regex_lock);
2281 if ((file->f_mode & FMODE_WRITE) &&
2282 (file->f_flags & O_TRUNC))
2283 ftrace_filter_reset(iter->hash);
2285 if (file->f_mode & FMODE_READ) {
2286 iter->pg = ftrace_pages_start;
2288 ret = seq_open(file, &show_ftrace_seq_ops);
2289 if (!ret) {
2290 struct seq_file *m = file->private_data;
2291 m->private = iter;
2292 } else {
2293 /* Failed */
2294 free_ftrace_hash(iter->hash);
2295 trace_parser_put(&iter->parser);
2296 kfree(iter);
2298 } else
2299 file->private_data = iter;
2300 mutex_unlock(&ftrace_regex_lock);
2302 return ret;
2305 static int
2306 ftrace_filter_open(struct inode *inode, struct file *file)
2308 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2309 inode, file);
2312 static int
2313 ftrace_notrace_open(struct inode *inode, struct file *file)
2315 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2316 inode, file);
2319 static loff_t
2320 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2322 loff_t ret;
2324 if (file->f_mode & FMODE_READ)
2325 ret = seq_lseek(file, offset, origin);
2326 else
2327 file->f_pos = ret = 1;
2329 return ret;
2332 static int ftrace_match(char *str, char *regex, int len, int type)
2334 int matched = 0;
2335 int slen;
2337 switch (type) {
2338 case MATCH_FULL:
2339 if (strcmp(str, regex) == 0)
2340 matched = 1;
2341 break;
2342 case MATCH_FRONT_ONLY:
2343 if (strncmp(str, regex, len) == 0)
2344 matched = 1;
2345 break;
2346 case MATCH_MIDDLE_ONLY:
2347 if (strstr(str, regex))
2348 matched = 1;
2349 break;
2350 case MATCH_END_ONLY:
2351 slen = strlen(str);
2352 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2353 matched = 1;
2354 break;
2357 return matched;
2360 static int
2361 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2363 struct ftrace_func_entry *entry;
2364 int ret = 0;
2366 entry = ftrace_lookup_ip(hash, rec->ip);
2367 if (not) {
2368 /* Do nothing if it doesn't exist */
2369 if (!entry)
2370 return 0;
2372 free_hash_entry(hash, entry);
2373 } else {
2374 /* Do nothing if it exists */
2375 if (entry)
2376 return 0;
2378 ret = add_hash_entry(hash, rec->ip);
2380 return ret;
2383 static int
2384 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2385 char *regex, int len, int type)
2387 char str[KSYM_SYMBOL_LEN];
2388 char *modname;
2390 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2392 if (mod) {
2393 /* module lookup requires matching the module */
2394 if (!modname || strcmp(modname, mod))
2395 return 0;
2397 /* blank search means to match all funcs in the mod */
2398 if (!len)
2399 return 1;
2402 return ftrace_match(str, regex, len, type);
2405 static int
2406 match_records(struct ftrace_hash *hash, char *buff,
2407 int len, char *mod, int not)
2409 unsigned search_len = 0;
2410 struct ftrace_page *pg;
2411 struct dyn_ftrace *rec;
2412 int type = MATCH_FULL;
2413 char *search = buff;
2414 int found = 0;
2415 int ret;
2417 if (len) {
2418 type = filter_parse_regex(buff, len, &search, &not);
2419 search_len = strlen(search);
2422 mutex_lock(&ftrace_lock);
2424 if (unlikely(ftrace_disabled))
2425 goto out_unlock;
2427 do_for_each_ftrace_rec(pg, rec) {
2429 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2430 ret = enter_record(hash, rec, not);
2431 if (ret < 0) {
2432 found = ret;
2433 goto out_unlock;
2435 found = 1;
2437 } while_for_each_ftrace_rec();
2438 out_unlock:
2439 mutex_unlock(&ftrace_lock);
2441 return found;
2444 static int
2445 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2447 return match_records(hash, buff, len, NULL, 0);
2450 static int
2451 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2453 int not = 0;
2455 /* blank or '*' mean the same */
2456 if (strcmp(buff, "*") == 0)
2457 buff[0] = 0;
2459 /* handle the case of 'dont filter this module' */
2460 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2461 buff[0] = 0;
2462 not = 1;
2465 return match_records(hash, buff, strlen(buff), mod, not);
2469 * We register the module command as a template to show others how
2470 * to register the a command as well.
2473 static int
2474 ftrace_mod_callback(struct ftrace_hash *hash,
2475 char *func, char *cmd, char *param, int enable)
2477 char *mod;
2478 int ret = -EINVAL;
2481 * cmd == 'mod' because we only registered this func
2482 * for the 'mod' ftrace_func_command.
2483 * But if you register one func with multiple commands,
2484 * you can tell which command was used by the cmd
2485 * parameter.
2488 /* we must have a module name */
2489 if (!param)
2490 return ret;
2492 mod = strsep(&param, ":");
2493 if (!strlen(mod))
2494 return ret;
2496 ret = ftrace_match_module_records(hash, func, mod);
2497 if (!ret)
2498 ret = -EINVAL;
2499 if (ret < 0)
2500 return ret;
2502 return 0;
2505 static struct ftrace_func_command ftrace_mod_cmd = {
2506 .name = "mod",
2507 .func = ftrace_mod_callback,
2510 static int __init ftrace_mod_cmd_init(void)
2512 return register_ftrace_command(&ftrace_mod_cmd);
2514 device_initcall(ftrace_mod_cmd_init);
2516 static void
2517 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2519 struct ftrace_func_probe *entry;
2520 struct hlist_head *hhd;
2521 struct hlist_node *n;
2522 unsigned long key;
2524 key = hash_long(ip, FTRACE_HASH_BITS);
2526 hhd = &ftrace_func_hash[key];
2528 if (hlist_empty(hhd))
2529 return;
2532 * Disable preemption for these calls to prevent a RCU grace
2533 * period. This syncs the hash iteration and freeing of items
2534 * on the hash. rcu_read_lock is too dangerous here.
2536 preempt_disable_notrace();
2537 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2538 if (entry->ip == ip)
2539 entry->ops->func(ip, parent_ip, &entry->data);
2541 preempt_enable_notrace();
2544 static struct ftrace_ops trace_probe_ops __read_mostly =
2546 .func = function_trace_probe_call,
2549 static int ftrace_probe_registered;
2551 static void __enable_ftrace_function_probe(void)
2553 int ret;
2554 int i;
2556 if (ftrace_probe_registered)
2557 return;
2559 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2560 struct hlist_head *hhd = &ftrace_func_hash[i];
2561 if (hhd->first)
2562 break;
2564 /* Nothing registered? */
2565 if (i == FTRACE_FUNC_HASHSIZE)
2566 return;
2568 ret = __register_ftrace_function(&trace_probe_ops);
2569 if (!ret)
2570 ret = ftrace_startup(&trace_probe_ops, 0);
2572 ftrace_probe_registered = 1;
2575 static void __disable_ftrace_function_probe(void)
2577 int ret;
2578 int i;
2580 if (!ftrace_probe_registered)
2581 return;
2583 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2584 struct hlist_head *hhd = &ftrace_func_hash[i];
2585 if (hhd->first)
2586 return;
2589 /* no more funcs left */
2590 ret = __unregister_ftrace_function(&trace_probe_ops);
2591 if (!ret)
2592 ftrace_shutdown(&trace_probe_ops, 0);
2594 ftrace_probe_registered = 0;
2598 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2600 struct ftrace_func_probe *entry =
2601 container_of(rhp, struct ftrace_func_probe, rcu);
2603 if (entry->ops->free)
2604 entry->ops->free(&entry->data);
2605 kfree(entry);
2610 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2611 void *data)
2613 struct ftrace_func_probe *entry;
2614 struct ftrace_page *pg;
2615 struct dyn_ftrace *rec;
2616 int type, len, not;
2617 unsigned long key;
2618 int count = 0;
2619 char *search;
2621 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2622 len = strlen(search);
2624 /* we do not support '!' for function probes */
2625 if (WARN_ON(not))
2626 return -EINVAL;
2628 mutex_lock(&ftrace_lock);
2630 if (unlikely(ftrace_disabled))
2631 goto out_unlock;
2633 do_for_each_ftrace_rec(pg, rec) {
2635 if (!ftrace_match_record(rec, NULL, search, len, type))
2636 continue;
2638 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2639 if (!entry) {
2640 /* If we did not process any, then return error */
2641 if (!count)
2642 count = -ENOMEM;
2643 goto out_unlock;
2646 count++;
2648 entry->data = data;
2651 * The caller might want to do something special
2652 * for each function we find. We call the callback
2653 * to give the caller an opportunity to do so.
2655 if (ops->callback) {
2656 if (ops->callback(rec->ip, &entry->data) < 0) {
2657 /* caller does not like this func */
2658 kfree(entry);
2659 continue;
2663 entry->ops = ops;
2664 entry->ip = rec->ip;
2666 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2667 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2669 } while_for_each_ftrace_rec();
2670 __enable_ftrace_function_probe();
2672 out_unlock:
2673 mutex_unlock(&ftrace_lock);
2675 return count;
2678 enum {
2679 PROBE_TEST_FUNC = 1,
2680 PROBE_TEST_DATA = 2
2683 static void
2684 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2685 void *data, int flags)
2687 struct ftrace_func_probe *entry;
2688 struct hlist_node *n, *tmp;
2689 char str[KSYM_SYMBOL_LEN];
2690 int type = MATCH_FULL;
2691 int i, len = 0;
2692 char *search;
2694 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2695 glob = NULL;
2696 else if (glob) {
2697 int not;
2699 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2700 len = strlen(search);
2702 /* we do not support '!' for function probes */
2703 if (WARN_ON(not))
2704 return;
2707 mutex_lock(&ftrace_lock);
2708 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2709 struct hlist_head *hhd = &ftrace_func_hash[i];
2711 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2713 /* break up if statements for readability */
2714 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2715 continue;
2717 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2718 continue;
2720 /* do this last, since it is the most expensive */
2721 if (glob) {
2722 kallsyms_lookup(entry->ip, NULL, NULL,
2723 NULL, str);
2724 if (!ftrace_match(str, glob, len, type))
2725 continue;
2728 hlist_del(&entry->node);
2729 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2732 __disable_ftrace_function_probe();
2733 mutex_unlock(&ftrace_lock);
2736 void
2737 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2738 void *data)
2740 __unregister_ftrace_function_probe(glob, ops, data,
2741 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2744 void
2745 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2747 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2750 void unregister_ftrace_function_probe_all(char *glob)
2752 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2755 static LIST_HEAD(ftrace_commands);
2756 static DEFINE_MUTEX(ftrace_cmd_mutex);
2758 int register_ftrace_command(struct ftrace_func_command *cmd)
2760 struct ftrace_func_command *p;
2761 int ret = 0;
2763 mutex_lock(&ftrace_cmd_mutex);
2764 list_for_each_entry(p, &ftrace_commands, list) {
2765 if (strcmp(cmd->name, p->name) == 0) {
2766 ret = -EBUSY;
2767 goto out_unlock;
2770 list_add(&cmd->list, &ftrace_commands);
2771 out_unlock:
2772 mutex_unlock(&ftrace_cmd_mutex);
2774 return ret;
2777 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2779 struct ftrace_func_command *p, *n;
2780 int ret = -ENODEV;
2782 mutex_lock(&ftrace_cmd_mutex);
2783 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2784 if (strcmp(cmd->name, p->name) == 0) {
2785 ret = 0;
2786 list_del_init(&p->list);
2787 goto out_unlock;
2790 out_unlock:
2791 mutex_unlock(&ftrace_cmd_mutex);
2793 return ret;
2796 static int ftrace_process_regex(struct ftrace_hash *hash,
2797 char *buff, int len, int enable)
2799 char *func, *command, *next = buff;
2800 struct ftrace_func_command *p;
2801 int ret = -EINVAL;
2803 func = strsep(&next, ":");
2805 if (!next) {
2806 ret = ftrace_match_records(hash, func, len);
2807 if (!ret)
2808 ret = -EINVAL;
2809 if (ret < 0)
2810 return ret;
2811 return 0;
2814 /* command found */
2816 command = strsep(&next, ":");
2818 mutex_lock(&ftrace_cmd_mutex);
2819 list_for_each_entry(p, &ftrace_commands, list) {
2820 if (strcmp(p->name, command) == 0) {
2821 ret = p->func(hash, func, command, next, enable);
2822 goto out_unlock;
2825 out_unlock:
2826 mutex_unlock(&ftrace_cmd_mutex);
2828 return ret;
2831 static ssize_t
2832 ftrace_regex_write(struct file *file, const char __user *ubuf,
2833 size_t cnt, loff_t *ppos, int enable)
2835 struct ftrace_iterator *iter;
2836 struct trace_parser *parser;
2837 ssize_t ret, read;
2839 if (!cnt)
2840 return 0;
2842 mutex_lock(&ftrace_regex_lock);
2844 ret = -ENODEV;
2845 if (unlikely(ftrace_disabled))
2846 goto out_unlock;
2848 if (file->f_mode & FMODE_READ) {
2849 struct seq_file *m = file->private_data;
2850 iter = m->private;
2851 } else
2852 iter = file->private_data;
2854 parser = &iter->parser;
2855 read = trace_get_user(parser, ubuf, cnt, ppos);
2857 if (read >= 0 && trace_parser_loaded(parser) &&
2858 !trace_parser_cont(parser)) {
2859 ret = ftrace_process_regex(iter->hash, parser->buffer,
2860 parser->idx, enable);
2861 trace_parser_clear(parser);
2862 if (ret)
2863 goto out_unlock;
2866 ret = read;
2867 out_unlock:
2868 mutex_unlock(&ftrace_regex_lock);
2870 return ret;
2873 static ssize_t
2874 ftrace_filter_write(struct file *file, const char __user *ubuf,
2875 size_t cnt, loff_t *ppos)
2877 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2880 static ssize_t
2881 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2882 size_t cnt, loff_t *ppos)
2884 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2887 static int
2888 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2889 int reset, int enable)
2891 struct ftrace_hash **orig_hash;
2892 struct ftrace_hash *hash;
2893 int ret;
2895 /* All global ops uses the global ops filters */
2896 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2897 ops = &global_ops;
2899 if (unlikely(ftrace_disabled))
2900 return -ENODEV;
2902 if (enable)
2903 orig_hash = &ops->filter_hash;
2904 else
2905 orig_hash = &ops->notrace_hash;
2907 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2908 if (!hash)
2909 return -ENOMEM;
2911 mutex_lock(&ftrace_regex_lock);
2912 if (reset)
2913 ftrace_filter_reset(hash);
2914 if (buf)
2915 ftrace_match_records(hash, buf, len);
2917 mutex_lock(&ftrace_lock);
2918 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2919 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2920 && ftrace_enabled)
2921 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2923 mutex_unlock(&ftrace_lock);
2925 mutex_unlock(&ftrace_regex_lock);
2927 free_ftrace_hash(hash);
2928 return ret;
2932 * ftrace_set_filter - set a function to filter on in ftrace
2933 * @ops - the ops to set the filter with
2934 * @buf - the string that holds the function filter text.
2935 * @len - the length of the string.
2936 * @reset - non zero to reset all filters before applying this filter.
2938 * Filters denote which functions should be enabled when tracing is enabled.
2939 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2941 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2942 int len, int reset)
2944 ftrace_set_regex(ops, buf, len, reset, 1);
2946 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2949 * ftrace_set_notrace - set a function to not trace in ftrace
2950 * @ops - the ops to set the notrace filter with
2951 * @buf - the string that holds the function notrace text.
2952 * @len - the length of the string.
2953 * @reset - non zero to reset all filters before applying this filter.
2955 * Notrace Filters denote which functions should not be enabled when tracing
2956 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2957 * for tracing.
2959 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2960 int len, int reset)
2962 ftrace_set_regex(ops, buf, len, reset, 0);
2964 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2966 * ftrace_set_filter - set a function to filter on in ftrace
2967 * @ops - the ops to set the filter with
2968 * @buf - the string that holds the function filter text.
2969 * @len - the length of the string.
2970 * @reset - non zero to reset all filters before applying this filter.
2972 * Filters denote which functions should be enabled when tracing is enabled.
2973 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2975 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2977 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2979 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2982 * ftrace_set_notrace - set a function to not trace in ftrace
2983 * @ops - the ops to set the notrace filter with
2984 * @buf - the string that holds the function notrace text.
2985 * @len - the length of the string.
2986 * @reset - non zero to reset all filters before applying this filter.
2988 * Notrace Filters denote which functions should not be enabled when tracing
2989 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2990 * for tracing.
2992 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2994 ftrace_set_regex(&global_ops, buf, len, reset, 0);
2996 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2999 * command line interface to allow users to set filters on boot up.
3001 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3002 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3003 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3005 static int __init set_ftrace_notrace(char *str)
3007 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3008 return 1;
3010 __setup("ftrace_notrace=", set_ftrace_notrace);
3012 static int __init set_ftrace_filter(char *str)
3014 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3015 return 1;
3017 __setup("ftrace_filter=", set_ftrace_filter);
3019 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3020 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3021 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3023 static int __init set_graph_function(char *str)
3025 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3026 return 1;
3028 __setup("ftrace_graph_filter=", set_graph_function);
3030 static void __init set_ftrace_early_graph(char *buf)
3032 int ret;
3033 char *func;
3035 while (buf) {
3036 func = strsep(&buf, ",");
3037 /* we allow only one expression at a time */
3038 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3039 func);
3040 if (ret)
3041 printk(KERN_DEBUG "ftrace: function %s not "
3042 "traceable\n", func);
3045 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3047 static void __init
3048 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3050 char *func;
3052 while (buf) {
3053 func = strsep(&buf, ",");
3054 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3058 static void __init set_ftrace_early_filters(void)
3060 if (ftrace_filter_buf[0])
3061 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3062 if (ftrace_notrace_buf[0])
3063 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3064 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3065 if (ftrace_graph_buf[0])
3066 set_ftrace_early_graph(ftrace_graph_buf);
3067 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3070 static int
3071 ftrace_regex_release(struct inode *inode, struct file *file)
3073 struct seq_file *m = (struct seq_file *)file->private_data;
3074 struct ftrace_iterator *iter;
3075 struct ftrace_hash **orig_hash;
3076 struct trace_parser *parser;
3077 int filter_hash;
3078 int ret;
3080 mutex_lock(&ftrace_regex_lock);
3081 if (file->f_mode & FMODE_READ) {
3082 iter = m->private;
3084 seq_release(inode, file);
3085 } else
3086 iter = file->private_data;
3088 parser = &iter->parser;
3089 if (trace_parser_loaded(parser)) {
3090 parser->buffer[parser->idx] = 0;
3091 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3094 trace_parser_put(parser);
3096 if (file->f_mode & FMODE_WRITE) {
3097 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3099 if (filter_hash)
3100 orig_hash = &iter->ops->filter_hash;
3101 else
3102 orig_hash = &iter->ops->notrace_hash;
3104 mutex_lock(&ftrace_lock);
3105 ret = ftrace_hash_move(iter->ops, filter_hash,
3106 orig_hash, iter->hash);
3107 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3108 && ftrace_enabled)
3109 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3111 mutex_unlock(&ftrace_lock);
3113 free_ftrace_hash(iter->hash);
3114 kfree(iter);
3116 mutex_unlock(&ftrace_regex_lock);
3117 return 0;
3120 static const struct file_operations ftrace_avail_fops = {
3121 .open = ftrace_avail_open,
3122 .read = seq_read,
3123 .llseek = seq_lseek,
3124 .release = seq_release_private,
3127 static const struct file_operations ftrace_enabled_fops = {
3128 .open = ftrace_enabled_open,
3129 .read = seq_read,
3130 .llseek = seq_lseek,
3131 .release = seq_release_private,
3134 static const struct file_operations ftrace_filter_fops = {
3135 .open = ftrace_filter_open,
3136 .read = seq_read,
3137 .write = ftrace_filter_write,
3138 .llseek = ftrace_regex_lseek,
3139 .release = ftrace_regex_release,
3142 static const struct file_operations ftrace_notrace_fops = {
3143 .open = ftrace_notrace_open,
3144 .read = seq_read,
3145 .write = ftrace_notrace_write,
3146 .llseek = ftrace_regex_lseek,
3147 .release = ftrace_regex_release,
3150 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3152 static DEFINE_MUTEX(graph_lock);
3154 int ftrace_graph_count;
3155 int ftrace_graph_filter_enabled;
3156 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3158 static void *
3159 __g_next(struct seq_file *m, loff_t *pos)
3161 if (*pos >= ftrace_graph_count)
3162 return NULL;
3163 return &ftrace_graph_funcs[*pos];
3166 static void *
3167 g_next(struct seq_file *m, void *v, loff_t *pos)
3169 (*pos)++;
3170 return __g_next(m, pos);
3173 static void *g_start(struct seq_file *m, loff_t *pos)
3175 mutex_lock(&graph_lock);
3177 /* Nothing, tell g_show to print all functions are enabled */
3178 if (!ftrace_graph_filter_enabled && !*pos)
3179 return (void *)1;
3181 return __g_next(m, pos);
3184 static void g_stop(struct seq_file *m, void *p)
3186 mutex_unlock(&graph_lock);
3189 static int g_show(struct seq_file *m, void *v)
3191 unsigned long *ptr = v;
3193 if (!ptr)
3194 return 0;
3196 if (ptr == (unsigned long *)1) {
3197 seq_printf(m, "#### all functions enabled ####\n");
3198 return 0;
3201 seq_printf(m, "%ps\n", (void *)*ptr);
3203 return 0;
3206 static const struct seq_operations ftrace_graph_seq_ops = {
3207 .start = g_start,
3208 .next = g_next,
3209 .stop = g_stop,
3210 .show = g_show,
3213 static int
3214 ftrace_graph_open(struct inode *inode, struct file *file)
3216 int ret = 0;
3218 if (unlikely(ftrace_disabled))
3219 return -ENODEV;
3221 mutex_lock(&graph_lock);
3222 if ((file->f_mode & FMODE_WRITE) &&
3223 (file->f_flags & O_TRUNC)) {
3224 ftrace_graph_filter_enabled = 0;
3225 ftrace_graph_count = 0;
3226 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3228 mutex_unlock(&graph_lock);
3230 if (file->f_mode & FMODE_READ)
3231 ret = seq_open(file, &ftrace_graph_seq_ops);
3233 return ret;
3236 static int
3237 ftrace_graph_release(struct inode *inode, struct file *file)
3239 if (file->f_mode & FMODE_READ)
3240 seq_release(inode, file);
3241 return 0;
3244 static int
3245 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3247 struct dyn_ftrace *rec;
3248 struct ftrace_page *pg;
3249 int search_len;
3250 int fail = 1;
3251 int type, not;
3252 char *search;
3253 bool exists;
3254 int i;
3256 /* decode regex */
3257 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3258 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3259 return -EBUSY;
3261 search_len = strlen(search);
3263 mutex_lock(&ftrace_lock);
3265 if (unlikely(ftrace_disabled)) {
3266 mutex_unlock(&ftrace_lock);
3267 return -ENODEV;
3270 do_for_each_ftrace_rec(pg, rec) {
3272 if (rec->flags & FTRACE_FL_FREE)
3273 continue;
3275 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3276 /* if it is in the array */
3277 exists = false;
3278 for (i = 0; i < *idx; i++) {
3279 if (array[i] == rec->ip) {
3280 exists = true;
3281 break;
3285 if (!not) {
3286 fail = 0;
3287 if (!exists) {
3288 array[(*idx)++] = rec->ip;
3289 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3290 goto out;
3292 } else {
3293 if (exists) {
3294 array[i] = array[--(*idx)];
3295 array[*idx] = 0;
3296 fail = 0;
3300 } while_for_each_ftrace_rec();
3301 out:
3302 mutex_unlock(&ftrace_lock);
3304 if (fail)
3305 return -EINVAL;
3307 ftrace_graph_filter_enabled = 1;
3308 return 0;
3311 static ssize_t
3312 ftrace_graph_write(struct file *file, const char __user *ubuf,
3313 size_t cnt, loff_t *ppos)
3315 struct trace_parser parser;
3316 ssize_t read, ret;
3318 if (!cnt)
3319 return 0;
3321 mutex_lock(&graph_lock);
3323 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3324 ret = -ENOMEM;
3325 goto out_unlock;
3328 read = trace_get_user(&parser, ubuf, cnt, ppos);
3330 if (read >= 0 && trace_parser_loaded((&parser))) {
3331 parser.buffer[parser.idx] = 0;
3333 /* we allow only one expression at a time */
3334 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3335 parser.buffer);
3336 if (ret)
3337 goto out_free;
3340 ret = read;
3342 out_free:
3343 trace_parser_put(&parser);
3344 out_unlock:
3345 mutex_unlock(&graph_lock);
3347 return ret;
3350 static const struct file_operations ftrace_graph_fops = {
3351 .open = ftrace_graph_open,
3352 .read = seq_read,
3353 .write = ftrace_graph_write,
3354 .release = ftrace_graph_release,
3355 .llseek = seq_lseek,
3357 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3359 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3362 trace_create_file("available_filter_functions", 0444,
3363 d_tracer, NULL, &ftrace_avail_fops);
3365 trace_create_file("enabled_functions", 0444,
3366 d_tracer, NULL, &ftrace_enabled_fops);
3368 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3369 NULL, &ftrace_filter_fops);
3371 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3372 NULL, &ftrace_notrace_fops);
3374 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3375 trace_create_file("set_graph_function", 0444, d_tracer,
3376 NULL,
3377 &ftrace_graph_fops);
3378 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3380 return 0;
3383 static int ftrace_process_locs(struct module *mod,
3384 unsigned long *start,
3385 unsigned long *end)
3387 unsigned long *p;
3388 unsigned long addr;
3389 unsigned long flags = 0; /* Shut up gcc */
3391 mutex_lock(&ftrace_lock);
3392 p = start;
3393 while (p < end) {
3394 addr = ftrace_call_adjust(*p++);
3396 * Some architecture linkers will pad between
3397 * the different mcount_loc sections of different
3398 * object files to satisfy alignments.
3399 * Skip any NULL pointers.
3401 if (!addr)
3402 continue;
3403 ftrace_record_ip(addr);
3407 * We only need to disable interrupts on start up
3408 * because we are modifying code that an interrupt
3409 * may execute, and the modification is not atomic.
3410 * But for modules, nothing runs the code we modify
3411 * until we are finished with it, and there's no
3412 * reason to cause large interrupt latencies while we do it.
3414 if (!mod)
3415 local_irq_save(flags);
3416 ftrace_update_code(mod);
3417 if (!mod)
3418 local_irq_restore(flags);
3419 mutex_unlock(&ftrace_lock);
3421 return 0;
3424 #ifdef CONFIG_MODULES
3425 void ftrace_release_mod(struct module *mod)
3427 struct dyn_ftrace *rec;
3428 struct ftrace_page *pg;
3430 mutex_lock(&ftrace_lock);
3432 if (ftrace_disabled)
3433 goto out_unlock;
3435 do_for_each_ftrace_rec(pg, rec) {
3436 if (within_module_core(rec->ip, mod)) {
3438 * rec->ip is changed in ftrace_free_rec()
3439 * It should not between s and e if record was freed.
3441 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3442 ftrace_free_rec(rec);
3444 } while_for_each_ftrace_rec();
3445 out_unlock:
3446 mutex_unlock(&ftrace_lock);
3449 static void ftrace_init_module(struct module *mod,
3450 unsigned long *start, unsigned long *end)
3452 if (ftrace_disabled || start == end)
3453 return;
3454 ftrace_process_locs(mod, start, end);
3457 static int ftrace_module_notify(struct notifier_block *self,
3458 unsigned long val, void *data)
3460 struct module *mod = data;
3462 switch (val) {
3463 case MODULE_STATE_COMING:
3464 ftrace_init_module(mod, mod->ftrace_callsites,
3465 mod->ftrace_callsites +
3466 mod->num_ftrace_callsites);
3467 break;
3468 case MODULE_STATE_GOING:
3469 ftrace_release_mod(mod);
3470 break;
3473 return 0;
3475 #else
3476 static int ftrace_module_notify(struct notifier_block *self,
3477 unsigned long val, void *data)
3479 return 0;
3481 #endif /* CONFIG_MODULES */
3483 struct notifier_block ftrace_module_nb = {
3484 .notifier_call = ftrace_module_notify,
3485 .priority = 0,
3488 extern unsigned long __start_mcount_loc[];
3489 extern unsigned long __stop_mcount_loc[];
3491 void __init ftrace_init(void)
3493 unsigned long count, addr, flags;
3494 int ret;
3496 /* Keep the ftrace pointer to the stub */
3497 addr = (unsigned long)ftrace_stub;
3499 local_irq_save(flags);
3500 ftrace_dyn_arch_init(&addr);
3501 local_irq_restore(flags);
3503 /* ftrace_dyn_arch_init places the return code in addr */
3504 if (addr)
3505 goto failed;
3507 count = __stop_mcount_loc - __start_mcount_loc;
3509 ret = ftrace_dyn_table_alloc(count);
3510 if (ret)
3511 goto failed;
3513 last_ftrace_enabled = ftrace_enabled = 1;
3515 ret = ftrace_process_locs(NULL,
3516 __start_mcount_loc,
3517 __stop_mcount_loc);
3519 ret = register_module_notifier(&ftrace_module_nb);
3520 if (ret)
3521 pr_warning("Failed to register trace ftrace module notifier\n");
3523 set_ftrace_early_filters();
3525 return;
3526 failed:
3527 ftrace_disabled = 1;
3530 #else
3532 static struct ftrace_ops global_ops = {
3533 .func = ftrace_stub,
3536 static int __init ftrace_nodyn_init(void)
3538 ftrace_enabled = 1;
3539 return 0;
3541 device_initcall(ftrace_nodyn_init);
3543 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3544 static inline void ftrace_startup_enable(int command) { }
3545 /* Keep as macros so we do not need to define the commands */
3546 # define ftrace_startup(ops, command) \
3547 ({ \
3548 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3549 0; \
3551 # define ftrace_shutdown(ops, command) do { } while (0)
3552 # define ftrace_startup_sysctl() do { } while (0)
3553 # define ftrace_shutdown_sysctl() do { } while (0)
3555 static inline int
3556 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3558 return 1;
3561 #endif /* CONFIG_DYNAMIC_FTRACE */
3563 static void
3564 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3566 struct ftrace_ops *op;
3568 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3569 return;
3571 trace_recursion_set(TRACE_INTERNAL_BIT);
3573 * Some of the ops may be dynamically allocated,
3574 * they must be freed after a synchronize_sched().
3576 preempt_disable_notrace();
3577 op = rcu_dereference_raw(ftrace_ops_list);
3578 while (op != &ftrace_list_end) {
3579 if (ftrace_ops_test(op, ip))
3580 op->func(ip, parent_ip);
3581 op = rcu_dereference_raw(op->next);
3583 preempt_enable_notrace();
3584 trace_recursion_clear(TRACE_INTERNAL_BIT);
3587 static void clear_ftrace_swapper(void)
3589 struct task_struct *p;
3590 int cpu;
3592 get_online_cpus();
3593 for_each_online_cpu(cpu) {
3594 p = idle_task(cpu);
3595 clear_tsk_trace_trace(p);
3597 put_online_cpus();
3600 static void set_ftrace_swapper(void)
3602 struct task_struct *p;
3603 int cpu;
3605 get_online_cpus();
3606 for_each_online_cpu(cpu) {
3607 p = idle_task(cpu);
3608 set_tsk_trace_trace(p);
3610 put_online_cpus();
3613 static void clear_ftrace_pid(struct pid *pid)
3615 struct task_struct *p;
3617 rcu_read_lock();
3618 do_each_pid_task(pid, PIDTYPE_PID, p) {
3619 clear_tsk_trace_trace(p);
3620 } while_each_pid_task(pid, PIDTYPE_PID, p);
3621 rcu_read_unlock();
3623 put_pid(pid);
3626 static void set_ftrace_pid(struct pid *pid)
3628 struct task_struct *p;
3630 rcu_read_lock();
3631 do_each_pid_task(pid, PIDTYPE_PID, p) {
3632 set_tsk_trace_trace(p);
3633 } while_each_pid_task(pid, PIDTYPE_PID, p);
3634 rcu_read_unlock();
3637 static void clear_ftrace_pid_task(struct pid *pid)
3639 if (pid == ftrace_swapper_pid)
3640 clear_ftrace_swapper();
3641 else
3642 clear_ftrace_pid(pid);
3645 static void set_ftrace_pid_task(struct pid *pid)
3647 if (pid == ftrace_swapper_pid)
3648 set_ftrace_swapper();
3649 else
3650 set_ftrace_pid(pid);
3653 static int ftrace_pid_add(int p)
3655 struct pid *pid;
3656 struct ftrace_pid *fpid;
3657 int ret = -EINVAL;
3659 mutex_lock(&ftrace_lock);
3661 if (!p)
3662 pid = ftrace_swapper_pid;
3663 else
3664 pid = find_get_pid(p);
3666 if (!pid)
3667 goto out;
3669 ret = 0;
3671 list_for_each_entry(fpid, &ftrace_pids, list)
3672 if (fpid->pid == pid)
3673 goto out_put;
3675 ret = -ENOMEM;
3677 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3678 if (!fpid)
3679 goto out_put;
3681 list_add(&fpid->list, &ftrace_pids);
3682 fpid->pid = pid;
3684 set_ftrace_pid_task(pid);
3686 ftrace_update_pid_func();
3687 ftrace_startup_enable(0);
3689 mutex_unlock(&ftrace_lock);
3690 return 0;
3692 out_put:
3693 if (pid != ftrace_swapper_pid)
3694 put_pid(pid);
3696 out:
3697 mutex_unlock(&ftrace_lock);
3698 return ret;
3701 static void ftrace_pid_reset(void)
3703 struct ftrace_pid *fpid, *safe;
3705 mutex_lock(&ftrace_lock);
3706 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3707 struct pid *pid = fpid->pid;
3709 clear_ftrace_pid_task(pid);
3711 list_del(&fpid->list);
3712 kfree(fpid);
3715 ftrace_update_pid_func();
3716 ftrace_startup_enable(0);
3718 mutex_unlock(&ftrace_lock);
3721 static void *fpid_start(struct seq_file *m, loff_t *pos)
3723 mutex_lock(&ftrace_lock);
3725 if (list_empty(&ftrace_pids) && (!*pos))
3726 return (void *) 1;
3728 return seq_list_start(&ftrace_pids, *pos);
3731 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3733 if (v == (void *)1)
3734 return NULL;
3736 return seq_list_next(v, &ftrace_pids, pos);
3739 static void fpid_stop(struct seq_file *m, void *p)
3741 mutex_unlock(&ftrace_lock);
3744 static int fpid_show(struct seq_file *m, void *v)
3746 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3748 if (v == (void *)1) {
3749 seq_printf(m, "no pid\n");
3750 return 0;
3753 if (fpid->pid == ftrace_swapper_pid)
3754 seq_printf(m, "swapper tasks\n");
3755 else
3756 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3758 return 0;
3761 static const struct seq_operations ftrace_pid_sops = {
3762 .start = fpid_start,
3763 .next = fpid_next,
3764 .stop = fpid_stop,
3765 .show = fpid_show,
3768 static int
3769 ftrace_pid_open(struct inode *inode, struct file *file)
3771 int ret = 0;
3773 if ((file->f_mode & FMODE_WRITE) &&
3774 (file->f_flags & O_TRUNC))
3775 ftrace_pid_reset();
3777 if (file->f_mode & FMODE_READ)
3778 ret = seq_open(file, &ftrace_pid_sops);
3780 return ret;
3783 static ssize_t
3784 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3785 size_t cnt, loff_t *ppos)
3787 char buf[64], *tmp;
3788 long val;
3789 int ret;
3791 if (cnt >= sizeof(buf))
3792 return -EINVAL;
3794 if (copy_from_user(&buf, ubuf, cnt))
3795 return -EFAULT;
3797 buf[cnt] = 0;
3800 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3801 * to clean the filter quietly.
3803 tmp = strstrip(buf);
3804 if (strlen(tmp) == 0)
3805 return 1;
3807 ret = strict_strtol(tmp, 10, &val);
3808 if (ret < 0)
3809 return ret;
3811 ret = ftrace_pid_add(val);
3813 return ret ? ret : cnt;
3816 static int
3817 ftrace_pid_release(struct inode *inode, struct file *file)
3819 if (file->f_mode & FMODE_READ)
3820 seq_release(inode, file);
3822 return 0;
3825 static const struct file_operations ftrace_pid_fops = {
3826 .open = ftrace_pid_open,
3827 .write = ftrace_pid_write,
3828 .read = seq_read,
3829 .llseek = seq_lseek,
3830 .release = ftrace_pid_release,
3833 static __init int ftrace_init_debugfs(void)
3835 struct dentry *d_tracer;
3837 d_tracer = tracing_init_dentry();
3838 if (!d_tracer)
3839 return 0;
3841 ftrace_init_dyn_debugfs(d_tracer);
3843 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3844 NULL, &ftrace_pid_fops);
3846 ftrace_profile_debugfs(d_tracer);
3848 return 0;
3850 fs_initcall(ftrace_init_debugfs);
3853 * ftrace_kill - kill ftrace
3855 * This function should be used by panic code. It stops ftrace
3856 * but in a not so nice way. If you need to simply kill ftrace
3857 * from a non-atomic section, use ftrace_kill.
3859 void ftrace_kill(void)
3861 ftrace_disabled = 1;
3862 ftrace_enabled = 0;
3863 clear_ftrace_function();
3867 * register_ftrace_function - register a function for profiling
3868 * @ops - ops structure that holds the function for profiling.
3870 * Register a function to be called by all functions in the
3871 * kernel.
3873 * Note: @ops->func and all the functions it calls must be labeled
3874 * with "notrace", otherwise it will go into a
3875 * recursive loop.
3877 int register_ftrace_function(struct ftrace_ops *ops)
3879 int ret = -1;
3881 mutex_lock(&ftrace_lock);
3883 if (unlikely(ftrace_disabled))
3884 goto out_unlock;
3886 ret = __register_ftrace_function(ops);
3887 if (!ret)
3888 ret = ftrace_startup(ops, 0);
3891 out_unlock:
3892 mutex_unlock(&ftrace_lock);
3893 return ret;
3895 EXPORT_SYMBOL_GPL(register_ftrace_function);
3898 * unregister_ftrace_function - unregister a function for profiling.
3899 * @ops - ops structure that holds the function to unregister
3901 * Unregister a function that was added to be called by ftrace profiling.
3903 int unregister_ftrace_function(struct ftrace_ops *ops)
3905 int ret;
3907 mutex_lock(&ftrace_lock);
3908 ret = __unregister_ftrace_function(ops);
3909 if (!ret)
3910 ftrace_shutdown(ops, 0);
3911 mutex_unlock(&ftrace_lock);
3913 return ret;
3915 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3918 ftrace_enable_sysctl(struct ctl_table *table, int write,
3919 void __user *buffer, size_t *lenp,
3920 loff_t *ppos)
3922 int ret = -ENODEV;
3924 mutex_lock(&ftrace_lock);
3926 if (unlikely(ftrace_disabled))
3927 goto out;
3929 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3931 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3932 goto out;
3934 last_ftrace_enabled = !!ftrace_enabled;
3936 if (ftrace_enabled) {
3938 ftrace_startup_sysctl();
3940 /* we are starting ftrace again */
3941 if (ftrace_ops_list != &ftrace_list_end) {
3942 if (ftrace_ops_list->next == &ftrace_list_end)
3943 ftrace_trace_function = ftrace_ops_list->func;
3944 else
3945 ftrace_trace_function = ftrace_ops_list_func;
3948 } else {
3949 /* stopping ftrace calls (just send to ftrace_stub) */
3950 ftrace_trace_function = ftrace_stub;
3952 ftrace_shutdown_sysctl();
3955 out:
3956 mutex_unlock(&ftrace_lock);
3957 return ret;
3960 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3962 static int ftrace_graph_active;
3963 static struct notifier_block ftrace_suspend_notifier;
3965 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3967 return 0;
3970 /* The callbacks that hook a function */
3971 trace_func_graph_ret_t ftrace_graph_return =
3972 (trace_func_graph_ret_t)ftrace_stub;
3973 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3975 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3976 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3978 int i;
3979 int ret = 0;
3980 unsigned long flags;
3981 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3982 struct task_struct *g, *t;
3984 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3985 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3986 * sizeof(struct ftrace_ret_stack),
3987 GFP_KERNEL);
3988 if (!ret_stack_list[i]) {
3989 start = 0;
3990 end = i;
3991 ret = -ENOMEM;
3992 goto free;
3996 read_lock_irqsave(&tasklist_lock, flags);
3997 do_each_thread(g, t) {
3998 if (start == end) {
3999 ret = -EAGAIN;
4000 goto unlock;
4003 if (t->ret_stack == NULL) {
4004 atomic_set(&t->tracing_graph_pause, 0);
4005 atomic_set(&t->trace_overrun, 0);
4006 t->curr_ret_stack = -1;
4007 /* Make sure the tasks see the -1 first: */
4008 smp_wmb();
4009 t->ret_stack = ret_stack_list[start++];
4011 } while_each_thread(g, t);
4013 unlock:
4014 read_unlock_irqrestore(&tasklist_lock, flags);
4015 free:
4016 for (i = start; i < end; i++)
4017 kfree(ret_stack_list[i]);
4018 return ret;
4021 static void
4022 ftrace_graph_probe_sched_switch(void *ignore,
4023 struct task_struct *prev, struct task_struct *next)
4025 unsigned long long timestamp;
4026 int index;
4029 * Does the user want to count the time a function was asleep.
4030 * If so, do not update the time stamps.
4032 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4033 return;
4035 timestamp = trace_clock_local();
4037 prev->ftrace_timestamp = timestamp;
4039 /* only process tasks that we timestamped */
4040 if (!next->ftrace_timestamp)
4041 return;
4044 * Update all the counters in next to make up for the
4045 * time next was sleeping.
4047 timestamp -= next->ftrace_timestamp;
4049 for (index = next->curr_ret_stack; index >= 0; index--)
4050 next->ret_stack[index].calltime += timestamp;
4053 /* Allocate a return stack for each task */
4054 static int start_graph_tracing(void)
4056 struct ftrace_ret_stack **ret_stack_list;
4057 int ret, cpu;
4059 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4060 sizeof(struct ftrace_ret_stack *),
4061 GFP_KERNEL);
4063 if (!ret_stack_list)
4064 return -ENOMEM;
4066 /* The cpu_boot init_task->ret_stack will never be freed */
4067 for_each_online_cpu(cpu) {
4068 if (!idle_task(cpu)->ret_stack)
4069 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4072 do {
4073 ret = alloc_retstack_tasklist(ret_stack_list);
4074 } while (ret == -EAGAIN);
4076 if (!ret) {
4077 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4078 if (ret)
4079 pr_info("ftrace_graph: Couldn't activate tracepoint"
4080 " probe to kernel_sched_switch\n");
4083 kfree(ret_stack_list);
4084 return ret;
4088 * Hibernation protection.
4089 * The state of the current task is too much unstable during
4090 * suspend/restore to disk. We want to protect against that.
4092 static int
4093 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4094 void *unused)
4096 switch (state) {
4097 case PM_HIBERNATION_PREPARE:
4098 pause_graph_tracing();
4099 break;
4101 case PM_POST_HIBERNATION:
4102 unpause_graph_tracing();
4103 break;
4105 return NOTIFY_DONE;
4108 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4109 trace_func_graph_ent_t entryfunc)
4111 int ret = 0;
4113 mutex_lock(&ftrace_lock);
4115 /* we currently allow only one tracer registered at a time */
4116 if (ftrace_graph_active) {
4117 ret = -EBUSY;
4118 goto out;
4121 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4122 register_pm_notifier(&ftrace_suspend_notifier);
4124 ftrace_graph_active++;
4125 ret = start_graph_tracing();
4126 if (ret) {
4127 ftrace_graph_active--;
4128 goto out;
4131 ftrace_graph_return = retfunc;
4132 ftrace_graph_entry = entryfunc;
4134 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4136 out:
4137 mutex_unlock(&ftrace_lock);
4138 return ret;
4141 void unregister_ftrace_graph(void)
4143 mutex_lock(&ftrace_lock);
4145 if (unlikely(!ftrace_graph_active))
4146 goto out;
4148 ftrace_graph_active--;
4149 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4150 ftrace_graph_entry = ftrace_graph_entry_stub;
4151 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4152 unregister_pm_notifier(&ftrace_suspend_notifier);
4153 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4155 out:
4156 mutex_unlock(&ftrace_lock);
4159 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4161 static void
4162 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4164 atomic_set(&t->tracing_graph_pause, 0);
4165 atomic_set(&t->trace_overrun, 0);
4166 t->ftrace_timestamp = 0;
4167 /* make curr_ret_stack visible before we add the ret_stack */
4168 smp_wmb();
4169 t->ret_stack = ret_stack;
4173 * Allocate a return stack for the idle task. May be the first
4174 * time through, or it may be done by CPU hotplug online.
4176 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4178 t->curr_ret_stack = -1;
4180 * The idle task has no parent, it either has its own
4181 * stack or no stack at all.
4183 if (t->ret_stack)
4184 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4186 if (ftrace_graph_active) {
4187 struct ftrace_ret_stack *ret_stack;
4189 ret_stack = per_cpu(idle_ret_stack, cpu);
4190 if (!ret_stack) {
4191 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4192 * sizeof(struct ftrace_ret_stack),
4193 GFP_KERNEL);
4194 if (!ret_stack)
4195 return;
4196 per_cpu(idle_ret_stack, cpu) = ret_stack;
4198 graph_init_task(t, ret_stack);
4202 /* Allocate a return stack for newly created task */
4203 void ftrace_graph_init_task(struct task_struct *t)
4205 /* Make sure we do not use the parent ret_stack */
4206 t->ret_stack = NULL;
4207 t->curr_ret_stack = -1;
4209 if (ftrace_graph_active) {
4210 struct ftrace_ret_stack *ret_stack;
4212 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4213 * sizeof(struct ftrace_ret_stack),
4214 GFP_KERNEL);
4215 if (!ret_stack)
4216 return;
4217 graph_init_task(t, ret_stack);
4221 void ftrace_graph_exit_task(struct task_struct *t)
4223 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4225 t->ret_stack = NULL;
4226 /* NULL must become visible to IRQs before we free it: */
4227 barrier();
4229 kfree(ret_stack);
4232 void ftrace_graph_stop(void)
4234 ftrace_stop();
4236 #endif