printk removal, receive reordering bugfix
[cor_2_6_31.git] / kernel / trace / ftrace.c
blob1f3ec2afa511977b2882b5829105285762a50d45
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <trace/events/sched.h>
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
37 #include "trace_output.h"
38 #include "trace_stat.h"
40 #define FTRACE_WARN_ON(cond) \
41 do { \
42 if (WARN_ON(cond)) \
43 ftrace_kill(); \
44 } while (0)
46 #define FTRACE_WARN_ON_ONCE(cond) \
47 do { \
48 if (WARN_ON_ONCE(cond)) \
49 ftrace_kill(); \
50 } while (0)
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
67 static int ftrace_disabled __read_mostly;
69 static DEFINE_MUTEX(ftrace_lock);
71 static struct ftrace_ops ftrace_list_end __read_mostly =
73 .func = ftrace_stub,
76 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
81 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
83 struct ftrace_ops *op = ftrace_list;
85 /* in case someone actually ports this to alpha! */
86 read_barrier_depends();
88 while (op != &ftrace_list_end) {
89 /* silly alpha */
90 read_barrier_depends();
91 op->func(ip, parent_ip);
92 op = op->next;
96 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
98 if (!test_tsk_trace_trace(current))
99 return;
101 ftrace_pid_function(ip, parent_ip);
104 static void set_ftrace_pid_function(ftrace_func_t func)
106 /* do not set ftrace_pid_function to itself! */
107 if (func != ftrace_pid_func)
108 ftrace_pid_function = func;
112 * clear_ftrace_function - reset the ftrace function
114 * This NULLs the ftrace function and in essence stops
115 * tracing. There may be lag
117 void clear_ftrace_function(void)
119 ftrace_trace_function = ftrace_stub;
120 __ftrace_trace_function = ftrace_stub;
121 ftrace_pid_function = ftrace_stub;
124 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
126 * For those archs that do not test ftrace_trace_stop in their
127 * mcount call site, we need to do it from C.
129 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
131 if (function_trace_stop)
132 return;
134 __ftrace_trace_function(ip, parent_ip);
136 #endif
138 static int __register_ftrace_function(struct ftrace_ops *ops)
140 ops->next = ftrace_list;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
147 smp_wmb();
148 ftrace_list = ops;
150 if (ftrace_enabled) {
151 ftrace_func_t func;
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
169 #else
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
172 #endif
175 return 0;
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 struct ftrace_ops **p;
183 * If we are removing the last function, then simply point
184 * to the ftrace_stub.
186 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
187 ftrace_trace_function = ftrace_stub;
188 ftrace_list = &ftrace_list_end;
189 return 0;
192 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
193 if (*p == ops)
194 break;
196 if (*p != ops)
197 return -1;
199 *p = (*p)->next;
201 if (ftrace_enabled) {
202 /* If we only have one func left, then call that directly */
203 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func;
206 if (ftrace_pid_trace) {
207 set_ftrace_pid_function(func);
208 func = ftrace_pid_func;
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212 #else
213 __ftrace_trace_function = func;
214 #endif
218 return 0;
221 static void ftrace_update_pid_func(void)
223 ftrace_func_t func;
225 if (ftrace_trace_function == ftrace_stub)
226 return;
228 func = ftrace_trace_function;
230 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func);
232 func = ftrace_pid_func;
233 } else {
234 if (func == ftrace_pid_func)
235 func = ftrace_pid_function;
238 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
239 ftrace_trace_function = func;
240 #else
241 __ftrace_trace_function = func;
242 #endif
245 #ifdef CONFIG_FUNCTION_PROFILER
246 struct ftrace_profile {
247 struct hlist_node node;
248 unsigned long ip;
249 unsigned long counter;
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
252 #endif
255 struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
257 unsigned long index;
258 struct ftrace_profile records[];
261 struct ftrace_profile_stat {
262 atomic_t disabled;
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
269 #define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
272 #define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
275 static int ftrace_profile_bits __read_mostly;
276 static int ftrace_profile_enabled __read_mostly;
278 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
279 static DEFINE_MUTEX(ftrace_profile_lock);
281 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
283 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
285 static void *
286 function_stat_next(void *v, int idx)
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
293 again:
294 if (idx != 0)
295 rec++;
297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
298 pg = pg->next;
299 if (!pg)
300 return NULL;
301 rec = &pg->records[0];
302 if (!rec->counter)
303 goto again;
306 return rec;
309 static void *function_stat_start(struct tracer_stat *trace)
311 struct ftrace_profile_stat *stat =
312 container_of(trace, struct ftrace_profile_stat, stat);
314 if (!stat || !stat->start)
315 return NULL;
317 return function_stat_next(&stat->start->records[0], 0);
320 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
321 /* function graph compares on total time */
322 static int function_stat_cmp(void *p1, void *p2)
324 struct ftrace_profile *a = p1;
325 struct ftrace_profile *b = p2;
327 if (a->time < b->time)
328 return -1;
329 if (a->time > b->time)
330 return 1;
331 else
332 return 0;
334 #else
335 /* not function graph compares against hits */
336 static int function_stat_cmp(void *p1, void *p2)
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
341 if (a->counter < b->counter)
342 return -1;
343 if (a->counter > b->counter)
344 return 1;
345 else
346 return 0;
348 #endif
350 static int function_stat_headers(struct seq_file *m)
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 seq_printf(m, " Function "
354 "Hit Time Avg\n"
355 " -------- "
356 "--- ---- ---\n");
357 #else
358 seq_printf(m, " Function Hit\n"
359 " -------- ---\n");
360 #endif
361 return 0;
364 static int function_stat_show(struct seq_file *m, void *v)
366 struct ftrace_profile *rec = v;
367 char str[KSYM_SYMBOL_LEN];
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 static DEFINE_MUTEX(mutex);
370 static struct trace_seq s;
371 unsigned long long avg;
372 #endif
374 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
375 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
377 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
378 seq_printf(m, " ");
379 avg = rec->time;
380 do_div(avg, rec->counter);
382 mutex_lock(&mutex);
383 trace_seq_init(&s);
384 trace_print_graph_duration(rec->time, &s);
385 trace_seq_puts(&s, " ");
386 trace_print_graph_duration(avg, &s);
387 trace_print_seq(m, &s);
388 mutex_unlock(&mutex);
389 #endif
390 seq_putc(m, '\n');
392 return 0;
395 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
397 struct ftrace_profile_page *pg;
399 pg = stat->pages = stat->start;
401 while (pg) {
402 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
403 pg->index = 0;
404 pg = pg->next;
407 memset(stat->hash, 0,
408 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
411 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
413 struct ftrace_profile_page *pg;
414 int functions;
415 int pages;
416 int i;
418 /* If we already allocated, do nothing */
419 if (stat->pages)
420 return 0;
422 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
423 if (!stat->pages)
424 return -ENOMEM;
426 #ifdef CONFIG_DYNAMIC_FTRACE
427 functions = ftrace_update_tot_cnt;
428 #else
430 * We do not know the number of functions that exist because
431 * dynamic tracing is what counts them. With past experience
432 * we have around 20K functions. That should be more than enough.
433 * It is highly unlikely we will execute every function in
434 * the kernel.
436 functions = 20000;
437 #endif
439 pg = stat->start = stat->pages;
441 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
443 for (i = 0; i < pages; i++) {
444 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
445 if (!pg->next)
446 goto out_free;
447 pg = pg->next;
450 return 0;
452 out_free:
453 pg = stat->start;
454 while (pg) {
455 unsigned long tmp = (unsigned long)pg;
457 pg = pg->next;
458 free_page(tmp);
461 free_page((unsigned long)stat->pages);
462 stat->pages = NULL;
463 stat->start = NULL;
465 return -ENOMEM;
468 static int ftrace_profile_init_cpu(int cpu)
470 struct ftrace_profile_stat *stat;
471 int size;
473 stat = &per_cpu(ftrace_profile_stats, cpu);
475 if (stat->hash) {
476 /* If the profile is already created, simply reset it */
477 ftrace_profile_reset(stat);
478 return 0;
482 * We are profiling all functions, but usually only a few thousand
483 * functions are hit. We'll make a hash of 1024 items.
485 size = FTRACE_PROFILE_HASH_SIZE;
487 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
489 if (!stat->hash)
490 return -ENOMEM;
492 if (!ftrace_profile_bits) {
493 size--;
495 for (; size; size >>= 1)
496 ftrace_profile_bits++;
499 /* Preallocate the function profiling pages */
500 if (ftrace_profile_pages_init(stat) < 0) {
501 kfree(stat->hash);
502 stat->hash = NULL;
503 return -ENOMEM;
506 return 0;
509 static int ftrace_profile_init(void)
511 int cpu;
512 int ret = 0;
514 for_each_online_cpu(cpu) {
515 ret = ftrace_profile_init_cpu(cpu);
516 if (ret)
517 break;
520 return ret;
523 /* interrupts must be disabled */
524 static struct ftrace_profile *
525 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
527 struct ftrace_profile *rec;
528 struct hlist_head *hhd;
529 struct hlist_node *n;
530 unsigned long key;
532 key = hash_long(ip, ftrace_profile_bits);
533 hhd = &stat->hash[key];
535 if (hlist_empty(hhd))
536 return NULL;
538 hlist_for_each_entry_rcu(rec, n, hhd, node) {
539 if (rec->ip == ip)
540 return rec;
543 return NULL;
546 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
547 struct ftrace_profile *rec)
549 unsigned long key;
551 key = hash_long(rec->ip, ftrace_profile_bits);
552 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
556 * The memory is already allocated, this simply finds a new record to use.
558 static struct ftrace_profile *
559 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
561 struct ftrace_profile *rec = NULL;
563 /* prevent recursion (from NMIs) */
564 if (atomic_inc_return(&stat->disabled) != 1)
565 goto out;
568 * Try to find the function again since an NMI
569 * could have added it
571 rec = ftrace_find_profiled_func(stat, ip);
572 if (rec)
573 goto out;
575 if (stat->pages->index == PROFILES_PER_PAGE) {
576 if (!stat->pages->next)
577 goto out;
578 stat->pages = stat->pages->next;
581 rec = &stat->pages->records[stat->pages->index++];
582 rec->ip = ip;
583 ftrace_add_profile(stat, rec);
585 out:
586 atomic_dec(&stat->disabled);
588 return rec;
591 static void
592 function_profile_call(unsigned long ip, unsigned long parent_ip)
594 struct ftrace_profile_stat *stat;
595 struct ftrace_profile *rec;
596 unsigned long flags;
598 if (!ftrace_profile_enabled)
599 return;
601 local_irq_save(flags);
603 stat = &__get_cpu_var(ftrace_profile_stats);
604 if (!stat->hash || !ftrace_profile_enabled)
605 goto out;
607 rec = ftrace_find_profiled_func(stat, ip);
608 if (!rec) {
609 rec = ftrace_profile_alloc(stat, ip);
610 if (!rec)
611 goto out;
614 rec->counter++;
615 out:
616 local_irq_restore(flags);
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620 static int profile_graph_entry(struct ftrace_graph_ent *trace)
622 function_profile_call(trace->func, 0);
623 return 1;
626 static void profile_graph_return(struct ftrace_graph_ret *trace)
628 struct ftrace_profile_stat *stat;
629 unsigned long long calltime;
630 struct ftrace_profile *rec;
631 unsigned long flags;
633 local_irq_save(flags);
634 stat = &__get_cpu_var(ftrace_profile_stats);
635 if (!stat->hash || !ftrace_profile_enabled)
636 goto out;
638 calltime = trace->rettime - trace->calltime;
640 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
641 int index;
643 index = trace->depth;
645 /* Append this call time to the parent time to subtract */
646 if (index)
647 current->ret_stack[index - 1].subtime += calltime;
649 if (current->ret_stack[index].subtime < calltime)
650 calltime -= current->ret_stack[index].subtime;
651 else
652 calltime = 0;
655 rec = ftrace_find_profiled_func(stat, trace->func);
656 if (rec)
657 rec->time += calltime;
659 out:
660 local_irq_restore(flags);
663 static int register_ftrace_profiler(void)
665 return register_ftrace_graph(&profile_graph_return,
666 &profile_graph_entry);
669 static void unregister_ftrace_profiler(void)
671 unregister_ftrace_graph();
673 #else
674 static struct ftrace_ops ftrace_profile_ops __read_mostly =
676 .func = function_profile_call,
679 static int register_ftrace_profiler(void)
681 return register_ftrace_function(&ftrace_profile_ops);
684 static void unregister_ftrace_profiler(void)
686 unregister_ftrace_function(&ftrace_profile_ops);
688 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
690 static ssize_t
691 ftrace_profile_write(struct file *filp, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
694 unsigned long val;
695 char buf[64]; /* big enough to hold a number */
696 int ret;
698 if (cnt >= sizeof(buf))
699 return -EINVAL;
701 if (copy_from_user(&buf, ubuf, cnt))
702 return -EFAULT;
704 buf[cnt] = 0;
706 ret = strict_strtoul(buf, 10, &val);
707 if (ret < 0)
708 return ret;
710 val = !!val;
712 mutex_lock(&ftrace_profile_lock);
713 if (ftrace_profile_enabled ^ val) {
714 if (val) {
715 ret = ftrace_profile_init();
716 if (ret < 0) {
717 cnt = ret;
718 goto out;
721 ret = register_ftrace_profiler();
722 if (ret < 0) {
723 cnt = ret;
724 goto out;
726 ftrace_profile_enabled = 1;
727 } else {
728 ftrace_profile_enabled = 0;
730 * unregister_ftrace_profiler calls stop_machine
731 * so this acts like an synchronize_sched.
733 unregister_ftrace_profiler();
736 out:
737 mutex_unlock(&ftrace_profile_lock);
739 filp->f_pos += cnt;
741 return cnt;
744 static ssize_t
745 ftrace_profile_read(struct file *filp, char __user *ubuf,
746 size_t cnt, loff_t *ppos)
748 char buf[64]; /* big enough to hold a number */
749 int r;
751 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
752 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
755 static const struct file_operations ftrace_profile_fops = {
756 .open = tracing_open_generic,
757 .read = ftrace_profile_read,
758 .write = ftrace_profile_write,
761 /* used to initialize the real stat files */
762 static struct tracer_stat function_stats __initdata = {
763 .name = "functions",
764 .stat_start = function_stat_start,
765 .stat_next = function_stat_next,
766 .stat_cmp = function_stat_cmp,
767 .stat_headers = function_stat_headers,
768 .stat_show = function_stat_show
771 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
773 struct ftrace_profile_stat *stat;
774 struct dentry *entry;
775 char *name;
776 int ret;
777 int cpu;
779 for_each_possible_cpu(cpu) {
780 stat = &per_cpu(ftrace_profile_stats, cpu);
782 /* allocate enough for function name + cpu number */
783 name = kmalloc(32, GFP_KERNEL);
784 if (!name) {
786 * The files created are permanent, if something happens
787 * we still do not free memory.
789 WARN(1,
790 "Could not allocate stat file for cpu %d\n",
791 cpu);
792 return;
794 stat->stat = function_stats;
795 snprintf(name, 32, "function%d", cpu);
796 stat->stat.name = name;
797 ret = register_stat_tracer(&stat->stat);
798 if (ret) {
799 WARN(1,
800 "Could not register function stat for cpu %d\n",
801 cpu);
802 kfree(name);
803 return;
807 entry = debugfs_create_file("function_profile_enabled", 0644,
808 d_tracer, NULL, &ftrace_profile_fops);
809 if (!entry)
810 pr_warning("Could not create debugfs "
811 "'function_profile_enabled' entry\n");
814 #else /* CONFIG_FUNCTION_PROFILER */
815 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
818 #endif /* CONFIG_FUNCTION_PROFILER */
820 /* set when tracing only a pid */
821 struct pid *ftrace_pid_trace;
822 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
824 #ifdef CONFIG_DYNAMIC_FTRACE
826 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
827 # error Dynamic ftrace depends on MCOUNT_RECORD
828 #endif
830 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
832 struct ftrace_func_probe {
833 struct hlist_node node;
834 struct ftrace_probe_ops *ops;
835 unsigned long flags;
836 unsigned long ip;
837 void *data;
838 struct rcu_head rcu;
841 enum {
842 FTRACE_ENABLE_CALLS = (1 << 0),
843 FTRACE_DISABLE_CALLS = (1 << 1),
844 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
845 FTRACE_ENABLE_MCOUNT = (1 << 3),
846 FTRACE_DISABLE_MCOUNT = (1 << 4),
847 FTRACE_START_FUNC_RET = (1 << 5),
848 FTRACE_STOP_FUNC_RET = (1 << 6),
851 static int ftrace_filtered;
853 static struct dyn_ftrace *ftrace_new_addrs;
855 static DEFINE_MUTEX(ftrace_regex_lock);
857 struct ftrace_page {
858 struct ftrace_page *next;
859 int index;
860 struct dyn_ftrace records[];
863 #define ENTRIES_PER_PAGE \
864 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
866 /* estimate from running different kernels */
867 #define NR_TO_INIT 10000
869 static struct ftrace_page *ftrace_pages_start;
870 static struct ftrace_page *ftrace_pages;
872 static struct dyn_ftrace *ftrace_free_records;
875 * This is a double for. Do not use 'break' to break out of the loop,
876 * you must use a goto.
878 #define do_for_each_ftrace_rec(pg, rec) \
879 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
880 int _____i; \
881 for (_____i = 0; _____i < pg->index; _____i++) { \
882 rec = &pg->records[_____i];
884 #define while_for_each_ftrace_rec() \
888 #ifdef CONFIG_KPROBES
890 static int frozen_record_count;
892 static inline void freeze_record(struct dyn_ftrace *rec)
894 if (!(rec->flags & FTRACE_FL_FROZEN)) {
895 rec->flags |= FTRACE_FL_FROZEN;
896 frozen_record_count++;
900 static inline void unfreeze_record(struct dyn_ftrace *rec)
902 if (rec->flags & FTRACE_FL_FROZEN) {
903 rec->flags &= ~FTRACE_FL_FROZEN;
904 frozen_record_count--;
908 static inline int record_frozen(struct dyn_ftrace *rec)
910 return rec->flags & FTRACE_FL_FROZEN;
912 #else
913 # define freeze_record(rec) ({ 0; })
914 # define unfreeze_record(rec) ({ 0; })
915 # define record_frozen(rec) ({ 0; })
916 #endif /* CONFIG_KPROBES */
918 static void ftrace_free_rec(struct dyn_ftrace *rec)
920 rec->freelist = ftrace_free_records;
921 ftrace_free_records = rec;
922 rec->flags |= FTRACE_FL_FREE;
925 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
927 struct dyn_ftrace *rec;
929 /* First check for freed records */
930 if (ftrace_free_records) {
931 rec = ftrace_free_records;
933 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
934 FTRACE_WARN_ON_ONCE(1);
935 ftrace_free_records = NULL;
936 return NULL;
939 ftrace_free_records = rec->freelist;
940 memset(rec, 0, sizeof(*rec));
941 return rec;
944 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
945 if (!ftrace_pages->next) {
946 /* allocate another page */
947 ftrace_pages->next =
948 (void *)get_zeroed_page(GFP_KERNEL);
949 if (!ftrace_pages->next)
950 return NULL;
952 ftrace_pages = ftrace_pages->next;
955 return &ftrace_pages->records[ftrace_pages->index++];
958 static struct dyn_ftrace *
959 ftrace_record_ip(unsigned long ip)
961 struct dyn_ftrace *rec;
963 if (ftrace_disabled)
964 return NULL;
966 rec = ftrace_alloc_dyn_node(ip);
967 if (!rec)
968 return NULL;
970 rec->ip = ip;
971 rec->newlist = ftrace_new_addrs;
972 ftrace_new_addrs = rec;
974 return rec;
977 static void print_ip_ins(const char *fmt, unsigned char *p)
979 int i;
981 printk(KERN_CONT "%s", fmt);
983 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
984 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
987 static void ftrace_bug(int failed, unsigned long ip)
989 switch (failed) {
990 case -EFAULT:
991 FTRACE_WARN_ON_ONCE(1);
992 pr_info("ftrace faulted on modifying ");
993 print_ip_sym(ip);
994 break;
995 case -EINVAL:
996 FTRACE_WARN_ON_ONCE(1);
997 pr_info("ftrace failed to modify ");
998 print_ip_sym(ip);
999 print_ip_ins(" actual: ", (unsigned char *)ip);
1000 printk(KERN_CONT "\n");
1001 break;
1002 case -EPERM:
1003 FTRACE_WARN_ON_ONCE(1);
1004 pr_info("ftrace faulted on writing ");
1005 print_ip_sym(ip);
1006 break;
1007 default:
1008 FTRACE_WARN_ON_ONCE(1);
1009 pr_info("ftrace faulted on unknown error ");
1010 print_ip_sym(ip);
1015 static int
1016 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1018 unsigned long ftrace_addr;
1019 unsigned long ip, fl;
1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1023 ip = rec->ip;
1026 * If this record is not to be traced and
1027 * it is not enabled then do nothing.
1029 * If this record is not to be traced and
1030 * it is enabled then disable it.
1033 if (rec->flags & FTRACE_FL_NOTRACE) {
1034 if (rec->flags & FTRACE_FL_ENABLED)
1035 rec->flags &= ~FTRACE_FL_ENABLED;
1036 else
1037 return 0;
1039 } else if (ftrace_filtered && enable) {
1041 * Filtering is on:
1044 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1046 /* Record is filtered and enabled, do nothing */
1047 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1048 return 0;
1050 /* Record is not filtered or enabled, do nothing */
1051 if (!fl)
1052 return 0;
1054 /* Record is not filtered but enabled, disable it */
1055 if (fl == FTRACE_FL_ENABLED)
1056 rec->flags &= ~FTRACE_FL_ENABLED;
1057 else
1058 /* Otherwise record is filtered but not enabled, enable it */
1059 rec->flags |= FTRACE_FL_ENABLED;
1060 } else {
1061 /* Disable or not filtered */
1063 if (enable) {
1064 /* if record is enabled, do nothing */
1065 if (rec->flags & FTRACE_FL_ENABLED)
1066 return 0;
1068 rec->flags |= FTRACE_FL_ENABLED;
1070 } else {
1072 /* if record is not enabled, do nothing */
1073 if (!(rec->flags & FTRACE_FL_ENABLED))
1074 return 0;
1076 rec->flags &= ~FTRACE_FL_ENABLED;
1080 if (rec->flags & FTRACE_FL_ENABLED)
1081 return ftrace_make_call(rec, ftrace_addr);
1082 else
1083 return ftrace_make_nop(NULL, rec, ftrace_addr);
1086 static void ftrace_replace_code(int enable)
1088 struct dyn_ftrace *rec;
1089 struct ftrace_page *pg;
1090 int failed;
1092 do_for_each_ftrace_rec(pg, rec) {
1094 * Skip over free records, records that have
1095 * failed and not converted.
1097 if (rec->flags & FTRACE_FL_FREE ||
1098 rec->flags & FTRACE_FL_FAILED ||
1099 !(rec->flags & FTRACE_FL_CONVERTED))
1100 continue;
1102 /* ignore updates to this record's mcount site */
1103 if (get_kprobe((void *)rec->ip)) {
1104 freeze_record(rec);
1105 continue;
1106 } else {
1107 unfreeze_record(rec);
1110 failed = __ftrace_replace_code(rec, enable);
1111 if (failed) {
1112 rec->flags |= FTRACE_FL_FAILED;
1113 if ((system_state == SYSTEM_BOOTING) ||
1114 !core_kernel_text(rec->ip)) {
1115 ftrace_free_rec(rec);
1116 } else {
1117 ftrace_bug(failed, rec->ip);
1118 /* Stop processing */
1119 return;
1122 } while_for_each_ftrace_rec();
1125 static int
1126 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1128 unsigned long ip;
1129 int ret;
1131 ip = rec->ip;
1133 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1134 if (ret) {
1135 ftrace_bug(ret, ip);
1136 rec->flags |= FTRACE_FL_FAILED;
1137 return 0;
1139 return 1;
1143 * archs can override this function if they must do something
1144 * before the modifying code is performed.
1146 int __weak ftrace_arch_code_modify_prepare(void)
1148 return 0;
1152 * archs can override this function if they must do something
1153 * after the modifying code is performed.
1155 int __weak ftrace_arch_code_modify_post_process(void)
1157 return 0;
1160 static int __ftrace_modify_code(void *data)
1162 int *command = data;
1164 if (*command & FTRACE_ENABLE_CALLS)
1165 ftrace_replace_code(1);
1166 else if (*command & FTRACE_DISABLE_CALLS)
1167 ftrace_replace_code(0);
1169 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1170 ftrace_update_ftrace_func(ftrace_trace_function);
1172 if (*command & FTRACE_START_FUNC_RET)
1173 ftrace_enable_ftrace_graph_caller();
1174 else if (*command & FTRACE_STOP_FUNC_RET)
1175 ftrace_disable_ftrace_graph_caller();
1177 return 0;
1180 static void ftrace_run_update_code(int command)
1182 int ret;
1184 ret = ftrace_arch_code_modify_prepare();
1185 FTRACE_WARN_ON(ret);
1186 if (ret)
1187 return;
1189 stop_machine(__ftrace_modify_code, &command, NULL);
1191 ret = ftrace_arch_code_modify_post_process();
1192 FTRACE_WARN_ON(ret);
1195 static ftrace_func_t saved_ftrace_func;
1196 static int ftrace_start_up;
1198 static void ftrace_startup_enable(int command)
1200 if (saved_ftrace_func != ftrace_trace_function) {
1201 saved_ftrace_func = ftrace_trace_function;
1202 command |= FTRACE_UPDATE_TRACE_FUNC;
1205 if (!command || !ftrace_enabled)
1206 return;
1208 ftrace_run_update_code(command);
1211 static void ftrace_startup(int command)
1213 if (unlikely(ftrace_disabled))
1214 return;
1216 ftrace_start_up++;
1217 command |= FTRACE_ENABLE_CALLS;
1219 ftrace_startup_enable(command);
1222 static void ftrace_shutdown(int command)
1224 if (unlikely(ftrace_disabled))
1225 return;
1227 ftrace_start_up--;
1229 * Just warn in case of unbalance, no need to kill ftrace, it's not
1230 * critical but the ftrace_call callers may be never nopped again after
1231 * further ftrace uses.
1233 WARN_ON_ONCE(ftrace_start_up < 0);
1235 if (!ftrace_start_up)
1236 command |= FTRACE_DISABLE_CALLS;
1238 if (saved_ftrace_func != ftrace_trace_function) {
1239 saved_ftrace_func = ftrace_trace_function;
1240 command |= FTRACE_UPDATE_TRACE_FUNC;
1243 if (!command || !ftrace_enabled)
1244 return;
1246 ftrace_run_update_code(command);
1249 static void ftrace_startup_sysctl(void)
1251 int command = FTRACE_ENABLE_MCOUNT;
1253 if (unlikely(ftrace_disabled))
1254 return;
1256 /* Force update next time */
1257 saved_ftrace_func = NULL;
1258 /* ftrace_start_up is true if we want ftrace running */
1259 if (ftrace_start_up)
1260 command |= FTRACE_ENABLE_CALLS;
1262 ftrace_run_update_code(command);
1265 static void ftrace_shutdown_sysctl(void)
1267 int command = FTRACE_DISABLE_MCOUNT;
1269 if (unlikely(ftrace_disabled))
1270 return;
1272 /* ftrace_start_up is true if ftrace is running */
1273 if (ftrace_start_up)
1274 command |= FTRACE_DISABLE_CALLS;
1276 ftrace_run_update_code(command);
1279 static cycle_t ftrace_update_time;
1280 static unsigned long ftrace_update_cnt;
1281 unsigned long ftrace_update_tot_cnt;
1283 static int ftrace_update_code(struct module *mod)
1285 struct dyn_ftrace *p;
1286 cycle_t start, stop;
1288 start = ftrace_now(raw_smp_processor_id());
1289 ftrace_update_cnt = 0;
1291 while (ftrace_new_addrs) {
1293 /* If something went wrong, bail without enabling anything */
1294 if (unlikely(ftrace_disabled))
1295 return -1;
1297 p = ftrace_new_addrs;
1298 ftrace_new_addrs = p->newlist;
1299 p->flags = 0L;
1301 /* convert record (i.e, patch mcount-call with NOP) */
1302 if (ftrace_code_disable(mod, p)) {
1303 p->flags |= FTRACE_FL_CONVERTED;
1304 ftrace_update_cnt++;
1305 } else
1306 ftrace_free_rec(p);
1309 stop = ftrace_now(raw_smp_processor_id());
1310 ftrace_update_time = stop - start;
1311 ftrace_update_tot_cnt += ftrace_update_cnt;
1313 return 0;
1316 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1318 struct ftrace_page *pg;
1319 int cnt;
1320 int i;
1322 /* allocate a few pages */
1323 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1324 if (!ftrace_pages_start)
1325 return -1;
1328 * Allocate a few more pages.
1330 * TODO: have some parser search vmlinux before
1331 * final linking to find all calls to ftrace.
1332 * Then we can:
1333 * a) know how many pages to allocate.
1334 * and/or
1335 * b) set up the table then.
1337 * The dynamic code is still necessary for
1338 * modules.
1341 pg = ftrace_pages = ftrace_pages_start;
1343 cnt = num_to_init / ENTRIES_PER_PAGE;
1344 pr_info("ftrace: allocating %ld entries in %d pages\n",
1345 num_to_init, cnt + 1);
1347 for (i = 0; i < cnt; i++) {
1348 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1350 /* If we fail, we'll try later anyway */
1351 if (!pg->next)
1352 break;
1354 pg = pg->next;
1357 return 0;
1360 enum {
1361 FTRACE_ITER_FILTER = (1 << 0),
1362 FTRACE_ITER_CONT = (1 << 1),
1363 FTRACE_ITER_NOTRACE = (1 << 2),
1364 FTRACE_ITER_FAILURES = (1 << 3),
1365 FTRACE_ITER_PRINTALL = (1 << 4),
1366 FTRACE_ITER_HASH = (1 << 5),
1369 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1371 struct ftrace_iterator {
1372 struct ftrace_page *pg;
1373 int hidx;
1374 int idx;
1375 unsigned flags;
1376 unsigned char buffer[FTRACE_BUFF_MAX+1];
1377 unsigned buffer_idx;
1378 unsigned filtered;
1381 static void *
1382 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1384 struct ftrace_iterator *iter = m->private;
1385 struct hlist_node *hnd = v;
1386 struct hlist_head *hhd;
1388 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1390 (*pos)++;
1392 retry:
1393 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1394 return NULL;
1396 hhd = &ftrace_func_hash[iter->hidx];
1398 if (hlist_empty(hhd)) {
1399 iter->hidx++;
1400 hnd = NULL;
1401 goto retry;
1404 if (!hnd)
1405 hnd = hhd->first;
1406 else {
1407 hnd = hnd->next;
1408 if (!hnd) {
1409 iter->hidx++;
1410 goto retry;
1414 return hnd;
1417 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1419 struct ftrace_iterator *iter = m->private;
1420 void *p = NULL;
1421 loff_t l;
1423 if (!(iter->flags & FTRACE_ITER_HASH))
1424 *pos = 0;
1426 iter->flags |= FTRACE_ITER_HASH;
1428 iter->hidx = 0;
1429 for (l = 0; l <= *pos; ) {
1430 p = t_hash_next(m, p, &l);
1431 if (!p)
1432 break;
1434 return p;
1437 static int t_hash_show(struct seq_file *m, void *v)
1439 struct ftrace_func_probe *rec;
1440 struct hlist_node *hnd = v;
1441 char str[KSYM_SYMBOL_LEN];
1443 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1445 if (rec->ops->print)
1446 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1448 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1449 seq_printf(m, "%s:", str);
1451 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1452 seq_printf(m, "%s", str);
1454 if (rec->data)
1455 seq_printf(m, ":%p", rec->data);
1456 seq_putc(m, '\n');
1458 return 0;
1461 static void *
1462 t_next(struct seq_file *m, void *v, loff_t *pos)
1464 struct ftrace_iterator *iter = m->private;
1465 struct dyn_ftrace *rec = NULL;
1467 if (iter->flags & FTRACE_ITER_HASH)
1468 return t_hash_next(m, v, pos);
1470 (*pos)++;
1472 if (iter->flags & FTRACE_ITER_PRINTALL)
1473 return NULL;
1475 retry:
1476 if (iter->idx >= iter->pg->index) {
1477 if (iter->pg->next) {
1478 iter->pg = iter->pg->next;
1479 iter->idx = 0;
1480 goto retry;
1482 } else {
1483 rec = &iter->pg->records[iter->idx++];
1484 if ((rec->flags & FTRACE_FL_FREE) ||
1486 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1487 (rec->flags & FTRACE_FL_FAILED)) ||
1489 ((iter->flags & FTRACE_ITER_FAILURES) &&
1490 !(rec->flags & FTRACE_FL_FAILED)) ||
1492 ((iter->flags & FTRACE_ITER_FILTER) &&
1493 !(rec->flags & FTRACE_FL_FILTER)) ||
1495 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1496 !(rec->flags & FTRACE_FL_NOTRACE))) {
1497 rec = NULL;
1498 goto retry;
1502 return rec;
1505 static void *t_start(struct seq_file *m, loff_t *pos)
1507 struct ftrace_iterator *iter = m->private;
1508 void *p = NULL;
1509 loff_t l;
1511 mutex_lock(&ftrace_lock);
1513 * For set_ftrace_filter reading, if we have the filter
1514 * off, we can short cut and just print out that all
1515 * functions are enabled.
1517 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1518 if (*pos > 0)
1519 return t_hash_start(m, pos);
1520 iter->flags |= FTRACE_ITER_PRINTALL;
1521 return iter;
1524 if (iter->flags & FTRACE_ITER_HASH)
1525 return t_hash_start(m, pos);
1527 iter->pg = ftrace_pages_start;
1528 iter->idx = 0;
1529 for (l = 0; l <= *pos; ) {
1530 p = t_next(m, p, &l);
1531 if (!p)
1532 break;
1535 if (!p && iter->flags & FTRACE_ITER_FILTER)
1536 return t_hash_start(m, pos);
1538 return p;
1541 static void t_stop(struct seq_file *m, void *p)
1543 mutex_unlock(&ftrace_lock);
1546 static int t_show(struct seq_file *m, void *v)
1548 struct ftrace_iterator *iter = m->private;
1549 struct dyn_ftrace *rec = v;
1550 char str[KSYM_SYMBOL_LEN];
1552 if (iter->flags & FTRACE_ITER_HASH)
1553 return t_hash_show(m, v);
1555 if (iter->flags & FTRACE_ITER_PRINTALL) {
1556 seq_printf(m, "#### all functions enabled ####\n");
1557 return 0;
1560 if (!rec)
1561 return 0;
1563 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1565 seq_printf(m, "%s\n", str);
1567 return 0;
1570 static struct seq_operations show_ftrace_seq_ops = {
1571 .start = t_start,
1572 .next = t_next,
1573 .stop = t_stop,
1574 .show = t_show,
1577 static int
1578 ftrace_avail_open(struct inode *inode, struct file *file)
1580 struct ftrace_iterator *iter;
1581 int ret;
1583 if (unlikely(ftrace_disabled))
1584 return -ENODEV;
1586 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1587 if (!iter)
1588 return -ENOMEM;
1590 iter->pg = ftrace_pages_start;
1592 ret = seq_open(file, &show_ftrace_seq_ops);
1593 if (!ret) {
1594 struct seq_file *m = file->private_data;
1596 m->private = iter;
1597 } else {
1598 kfree(iter);
1601 return ret;
1604 int ftrace_avail_release(struct inode *inode, struct file *file)
1606 struct seq_file *m = (struct seq_file *)file->private_data;
1607 struct ftrace_iterator *iter = m->private;
1609 seq_release(inode, file);
1610 kfree(iter);
1612 return 0;
1615 static int
1616 ftrace_failures_open(struct inode *inode, struct file *file)
1618 int ret;
1619 struct seq_file *m;
1620 struct ftrace_iterator *iter;
1622 ret = ftrace_avail_open(inode, file);
1623 if (!ret) {
1624 m = (struct seq_file *)file->private_data;
1625 iter = (struct ftrace_iterator *)m->private;
1626 iter->flags = FTRACE_ITER_FAILURES;
1629 return ret;
1633 static void ftrace_filter_reset(int enable)
1635 struct ftrace_page *pg;
1636 struct dyn_ftrace *rec;
1637 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1639 mutex_lock(&ftrace_lock);
1640 if (enable)
1641 ftrace_filtered = 0;
1642 do_for_each_ftrace_rec(pg, rec) {
1643 if (rec->flags & FTRACE_FL_FAILED)
1644 continue;
1645 rec->flags &= ~type;
1646 } while_for_each_ftrace_rec();
1647 mutex_unlock(&ftrace_lock);
1650 static int
1651 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1653 struct ftrace_iterator *iter;
1654 int ret = 0;
1656 if (unlikely(ftrace_disabled))
1657 return -ENODEV;
1659 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1660 if (!iter)
1661 return -ENOMEM;
1663 mutex_lock(&ftrace_regex_lock);
1664 if ((file->f_mode & FMODE_WRITE) &&
1665 !(file->f_flags & O_APPEND))
1666 ftrace_filter_reset(enable);
1668 if (file->f_mode & FMODE_READ) {
1669 iter->pg = ftrace_pages_start;
1670 iter->flags = enable ? FTRACE_ITER_FILTER :
1671 FTRACE_ITER_NOTRACE;
1673 ret = seq_open(file, &show_ftrace_seq_ops);
1674 if (!ret) {
1675 struct seq_file *m = file->private_data;
1676 m->private = iter;
1677 } else
1678 kfree(iter);
1679 } else
1680 file->private_data = iter;
1681 mutex_unlock(&ftrace_regex_lock);
1683 return ret;
1686 static int
1687 ftrace_filter_open(struct inode *inode, struct file *file)
1689 return ftrace_regex_open(inode, file, 1);
1692 static int
1693 ftrace_notrace_open(struct inode *inode, struct file *file)
1695 return ftrace_regex_open(inode, file, 0);
1698 static loff_t
1699 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1701 loff_t ret;
1703 if (file->f_mode & FMODE_READ)
1704 ret = seq_lseek(file, offset, origin);
1705 else
1706 file->f_pos = ret = 1;
1708 return ret;
1711 enum {
1712 MATCH_FULL,
1713 MATCH_FRONT_ONLY,
1714 MATCH_MIDDLE_ONLY,
1715 MATCH_END_ONLY,
1719 * (static function - no need for kernel doc)
1721 * Pass in a buffer containing a glob and this function will
1722 * set search to point to the search part of the buffer and
1723 * return the type of search it is (see enum above).
1724 * This does modify buff.
1726 * Returns enum type.
1727 * search returns the pointer to use for comparison.
1728 * not returns 1 if buff started with a '!'
1729 * 0 otherwise.
1731 static int
1732 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1734 int type = MATCH_FULL;
1735 int i;
1737 if (buff[0] == '!') {
1738 *not = 1;
1739 buff++;
1740 len--;
1741 } else
1742 *not = 0;
1744 *search = buff;
1746 for (i = 0; i < len; i++) {
1747 if (buff[i] == '*') {
1748 if (!i) {
1749 *search = buff + 1;
1750 type = MATCH_END_ONLY;
1751 } else {
1752 if (type == MATCH_END_ONLY)
1753 type = MATCH_MIDDLE_ONLY;
1754 else
1755 type = MATCH_FRONT_ONLY;
1756 buff[i] = 0;
1757 break;
1762 return type;
1765 static int ftrace_match(char *str, char *regex, int len, int type)
1767 int matched = 0;
1768 char *ptr;
1770 switch (type) {
1771 case MATCH_FULL:
1772 if (strcmp(str, regex) == 0)
1773 matched = 1;
1774 break;
1775 case MATCH_FRONT_ONLY:
1776 if (strncmp(str, regex, len) == 0)
1777 matched = 1;
1778 break;
1779 case MATCH_MIDDLE_ONLY:
1780 if (strstr(str, regex))
1781 matched = 1;
1782 break;
1783 case MATCH_END_ONLY:
1784 ptr = strstr(str, regex);
1785 if (ptr && (ptr[len] == 0))
1786 matched = 1;
1787 break;
1790 return matched;
1793 static int
1794 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1796 char str[KSYM_SYMBOL_LEN];
1798 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1799 return ftrace_match(str, regex, len, type);
1802 static void ftrace_match_records(char *buff, int len, int enable)
1804 unsigned int search_len;
1805 struct ftrace_page *pg;
1806 struct dyn_ftrace *rec;
1807 unsigned long flag;
1808 char *search;
1809 int type;
1810 int not;
1812 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1813 type = ftrace_setup_glob(buff, len, &search, &not);
1815 search_len = strlen(search);
1817 mutex_lock(&ftrace_lock);
1818 do_for_each_ftrace_rec(pg, rec) {
1820 if (rec->flags & FTRACE_FL_FAILED)
1821 continue;
1823 if (ftrace_match_record(rec, search, search_len, type)) {
1824 if (not)
1825 rec->flags &= ~flag;
1826 else
1827 rec->flags |= flag;
1830 * Only enable filtering if we have a function that
1831 * is filtered on.
1833 if (enable && (rec->flags & FTRACE_FL_FILTER))
1834 ftrace_filtered = 1;
1835 } while_for_each_ftrace_rec();
1836 mutex_unlock(&ftrace_lock);
1839 static int
1840 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1841 char *regex, int len, int type)
1843 char str[KSYM_SYMBOL_LEN];
1844 char *modname;
1846 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1848 if (!modname || strcmp(modname, mod))
1849 return 0;
1851 /* blank search means to match all funcs in the mod */
1852 if (len)
1853 return ftrace_match(str, regex, len, type);
1854 else
1855 return 1;
1858 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1860 unsigned search_len = 0;
1861 struct ftrace_page *pg;
1862 struct dyn_ftrace *rec;
1863 int type = MATCH_FULL;
1864 char *search = buff;
1865 unsigned long flag;
1866 int not = 0;
1868 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1870 /* blank or '*' mean the same */
1871 if (strcmp(buff, "*") == 0)
1872 buff[0] = 0;
1874 /* handle the case of 'dont filter this module' */
1875 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1876 buff[0] = 0;
1877 not = 1;
1880 if (strlen(buff)) {
1881 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1882 search_len = strlen(search);
1885 mutex_lock(&ftrace_lock);
1886 do_for_each_ftrace_rec(pg, rec) {
1888 if (rec->flags & FTRACE_FL_FAILED)
1889 continue;
1891 if (ftrace_match_module_record(rec, mod,
1892 search, search_len, type)) {
1893 if (not)
1894 rec->flags &= ~flag;
1895 else
1896 rec->flags |= flag;
1898 if (enable && (rec->flags & FTRACE_FL_FILTER))
1899 ftrace_filtered = 1;
1901 } while_for_each_ftrace_rec();
1902 mutex_unlock(&ftrace_lock);
1906 * We register the module command as a template to show others how
1907 * to register the a command as well.
1910 static int
1911 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1913 char *mod;
1916 * cmd == 'mod' because we only registered this func
1917 * for the 'mod' ftrace_func_command.
1918 * But if you register one func with multiple commands,
1919 * you can tell which command was used by the cmd
1920 * parameter.
1923 /* we must have a module name */
1924 if (!param)
1925 return -EINVAL;
1927 mod = strsep(&param, ":");
1928 if (!strlen(mod))
1929 return -EINVAL;
1931 ftrace_match_module_records(func, mod, enable);
1932 return 0;
1935 static struct ftrace_func_command ftrace_mod_cmd = {
1936 .name = "mod",
1937 .func = ftrace_mod_callback,
1940 static int __init ftrace_mod_cmd_init(void)
1942 return register_ftrace_command(&ftrace_mod_cmd);
1944 device_initcall(ftrace_mod_cmd_init);
1946 static void
1947 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1949 struct ftrace_func_probe *entry;
1950 struct hlist_head *hhd;
1951 struct hlist_node *n;
1952 unsigned long key;
1953 int resched;
1955 key = hash_long(ip, FTRACE_HASH_BITS);
1957 hhd = &ftrace_func_hash[key];
1959 if (hlist_empty(hhd))
1960 return;
1963 * Disable preemption for these calls to prevent a RCU grace
1964 * period. This syncs the hash iteration and freeing of items
1965 * on the hash. rcu_read_lock is too dangerous here.
1967 resched = ftrace_preempt_disable();
1968 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1969 if (entry->ip == ip)
1970 entry->ops->func(ip, parent_ip, &entry->data);
1972 ftrace_preempt_enable(resched);
1975 static struct ftrace_ops trace_probe_ops __read_mostly =
1977 .func = function_trace_probe_call,
1980 static int ftrace_probe_registered;
1982 static void __enable_ftrace_function_probe(void)
1984 int i;
1986 if (ftrace_probe_registered)
1987 return;
1989 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1990 struct hlist_head *hhd = &ftrace_func_hash[i];
1991 if (hhd->first)
1992 break;
1994 /* Nothing registered? */
1995 if (i == FTRACE_FUNC_HASHSIZE)
1996 return;
1998 __register_ftrace_function(&trace_probe_ops);
1999 ftrace_startup(0);
2000 ftrace_probe_registered = 1;
2003 static void __disable_ftrace_function_probe(void)
2005 int i;
2007 if (!ftrace_probe_registered)
2008 return;
2010 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2011 struct hlist_head *hhd = &ftrace_func_hash[i];
2012 if (hhd->first)
2013 return;
2016 /* no more funcs left */
2017 __unregister_ftrace_function(&trace_probe_ops);
2018 ftrace_shutdown(0);
2019 ftrace_probe_registered = 0;
2023 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2025 struct ftrace_func_probe *entry =
2026 container_of(rhp, struct ftrace_func_probe, rcu);
2028 if (entry->ops->free)
2029 entry->ops->free(&entry->data);
2030 kfree(entry);
2035 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2036 void *data)
2038 struct ftrace_func_probe *entry;
2039 struct ftrace_page *pg;
2040 struct dyn_ftrace *rec;
2041 int type, len, not;
2042 unsigned long key;
2043 int count = 0;
2044 char *search;
2046 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2047 len = strlen(search);
2049 /* we do not support '!' for function probes */
2050 if (WARN_ON(not))
2051 return -EINVAL;
2053 mutex_lock(&ftrace_lock);
2054 do_for_each_ftrace_rec(pg, rec) {
2056 if (rec->flags & FTRACE_FL_FAILED)
2057 continue;
2059 if (!ftrace_match_record(rec, search, len, type))
2060 continue;
2062 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2063 if (!entry) {
2064 /* If we did not process any, then return error */
2065 if (!count)
2066 count = -ENOMEM;
2067 goto out_unlock;
2070 count++;
2072 entry->data = data;
2075 * The caller might want to do something special
2076 * for each function we find. We call the callback
2077 * to give the caller an opportunity to do so.
2079 if (ops->callback) {
2080 if (ops->callback(rec->ip, &entry->data) < 0) {
2081 /* caller does not like this func */
2082 kfree(entry);
2083 continue;
2087 entry->ops = ops;
2088 entry->ip = rec->ip;
2090 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2091 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2093 } while_for_each_ftrace_rec();
2094 __enable_ftrace_function_probe();
2096 out_unlock:
2097 mutex_unlock(&ftrace_lock);
2099 return count;
2102 enum {
2103 PROBE_TEST_FUNC = 1,
2104 PROBE_TEST_DATA = 2
2107 static void
2108 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2109 void *data, int flags)
2111 struct ftrace_func_probe *entry;
2112 struct hlist_node *n, *tmp;
2113 char str[KSYM_SYMBOL_LEN];
2114 int type = MATCH_FULL;
2115 int i, len = 0;
2116 char *search;
2118 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2119 glob = NULL;
2120 else {
2121 int not;
2123 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2124 len = strlen(search);
2126 /* we do not support '!' for function probes */
2127 if (WARN_ON(not))
2128 return;
2131 mutex_lock(&ftrace_lock);
2132 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2133 struct hlist_head *hhd = &ftrace_func_hash[i];
2135 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2137 /* break up if statements for readability */
2138 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2139 continue;
2141 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2142 continue;
2144 /* do this last, since it is the most expensive */
2145 if (glob) {
2146 kallsyms_lookup(entry->ip, NULL, NULL,
2147 NULL, str);
2148 if (!ftrace_match(str, glob, len, type))
2149 continue;
2152 hlist_del(&entry->node);
2153 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2156 __disable_ftrace_function_probe();
2157 mutex_unlock(&ftrace_lock);
2160 void
2161 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2162 void *data)
2164 __unregister_ftrace_function_probe(glob, ops, data,
2165 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2168 void
2169 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2171 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2174 void unregister_ftrace_function_probe_all(char *glob)
2176 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2179 static LIST_HEAD(ftrace_commands);
2180 static DEFINE_MUTEX(ftrace_cmd_mutex);
2182 int register_ftrace_command(struct ftrace_func_command *cmd)
2184 struct ftrace_func_command *p;
2185 int ret = 0;
2187 mutex_lock(&ftrace_cmd_mutex);
2188 list_for_each_entry(p, &ftrace_commands, list) {
2189 if (strcmp(cmd->name, p->name) == 0) {
2190 ret = -EBUSY;
2191 goto out_unlock;
2194 list_add(&cmd->list, &ftrace_commands);
2195 out_unlock:
2196 mutex_unlock(&ftrace_cmd_mutex);
2198 return ret;
2201 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2203 struct ftrace_func_command *p, *n;
2204 int ret = -ENODEV;
2206 mutex_lock(&ftrace_cmd_mutex);
2207 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2208 if (strcmp(cmd->name, p->name) == 0) {
2209 ret = 0;
2210 list_del_init(&p->list);
2211 goto out_unlock;
2214 out_unlock:
2215 mutex_unlock(&ftrace_cmd_mutex);
2217 return ret;
2220 static int ftrace_process_regex(char *buff, int len, int enable)
2222 char *func, *command, *next = buff;
2223 struct ftrace_func_command *p;
2224 int ret = -EINVAL;
2226 func = strsep(&next, ":");
2228 if (!next) {
2229 ftrace_match_records(func, len, enable);
2230 return 0;
2233 /* command found */
2235 command = strsep(&next, ":");
2237 mutex_lock(&ftrace_cmd_mutex);
2238 list_for_each_entry(p, &ftrace_commands, list) {
2239 if (strcmp(p->name, command) == 0) {
2240 ret = p->func(func, command, next, enable);
2241 goto out_unlock;
2244 out_unlock:
2245 mutex_unlock(&ftrace_cmd_mutex);
2247 return ret;
2250 static ssize_t
2251 ftrace_regex_write(struct file *file, const char __user *ubuf,
2252 size_t cnt, loff_t *ppos, int enable)
2254 struct ftrace_iterator *iter;
2255 char ch;
2256 size_t read = 0;
2257 ssize_t ret;
2259 if (!cnt || cnt < 0)
2260 return 0;
2262 mutex_lock(&ftrace_regex_lock);
2264 if (file->f_mode & FMODE_READ) {
2265 struct seq_file *m = file->private_data;
2266 iter = m->private;
2267 } else
2268 iter = file->private_data;
2270 if (!*ppos) {
2271 iter->flags &= ~FTRACE_ITER_CONT;
2272 iter->buffer_idx = 0;
2275 ret = get_user(ch, ubuf++);
2276 if (ret)
2277 goto out;
2278 read++;
2279 cnt--;
2281 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2282 /* skip white space */
2283 while (cnt && isspace(ch)) {
2284 ret = get_user(ch, ubuf++);
2285 if (ret)
2286 goto out;
2287 read++;
2288 cnt--;
2291 if (isspace(ch)) {
2292 file->f_pos += read;
2293 ret = read;
2294 goto out;
2297 iter->buffer_idx = 0;
2300 while (cnt && !isspace(ch)) {
2301 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2302 iter->buffer[iter->buffer_idx++] = ch;
2303 else {
2304 ret = -EINVAL;
2305 goto out;
2307 ret = get_user(ch, ubuf++);
2308 if (ret)
2309 goto out;
2310 read++;
2311 cnt--;
2314 if (isspace(ch)) {
2315 iter->filtered++;
2316 iter->buffer[iter->buffer_idx] = 0;
2317 ret = ftrace_process_regex(iter->buffer,
2318 iter->buffer_idx, enable);
2319 if (ret)
2320 goto out;
2321 iter->buffer_idx = 0;
2322 } else
2323 iter->flags |= FTRACE_ITER_CONT;
2326 file->f_pos += read;
2328 ret = read;
2329 out:
2330 mutex_unlock(&ftrace_regex_lock);
2332 return ret;
2335 static ssize_t
2336 ftrace_filter_write(struct file *file, const char __user *ubuf,
2337 size_t cnt, loff_t *ppos)
2339 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2342 static ssize_t
2343 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2344 size_t cnt, loff_t *ppos)
2346 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2349 static void
2350 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2352 if (unlikely(ftrace_disabled))
2353 return;
2355 mutex_lock(&ftrace_regex_lock);
2356 if (reset)
2357 ftrace_filter_reset(enable);
2358 if (buf)
2359 ftrace_match_records(buf, len, enable);
2360 mutex_unlock(&ftrace_regex_lock);
2364 * ftrace_set_filter - set a function to filter on in ftrace
2365 * @buf - the string that holds the function filter text.
2366 * @len - the length of the string.
2367 * @reset - non zero to reset all filters before applying this filter.
2369 * Filters denote which functions should be enabled when tracing is enabled.
2370 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2372 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2374 ftrace_set_regex(buf, len, reset, 1);
2378 * ftrace_set_notrace - set a function to not trace in ftrace
2379 * @buf - the string that holds the function notrace text.
2380 * @len - the length of the string.
2381 * @reset - non zero to reset all filters before applying this filter.
2383 * Notrace Filters denote which functions should not be enabled when tracing
2384 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2385 * for tracing.
2387 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2389 ftrace_set_regex(buf, len, reset, 0);
2393 * command line interface to allow users to set filters on boot up.
2395 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2396 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2397 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2399 static int __init set_ftrace_notrace(char *str)
2401 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2402 return 1;
2404 __setup("ftrace_notrace=", set_ftrace_notrace);
2406 static int __init set_ftrace_filter(char *str)
2408 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2409 return 1;
2411 __setup("ftrace_filter=", set_ftrace_filter);
2413 static void __init set_ftrace_early_filter(char *buf, int enable)
2415 char *func;
2417 while (buf) {
2418 func = strsep(&buf, ",");
2419 ftrace_set_regex(func, strlen(func), 0, enable);
2423 static void __init set_ftrace_early_filters(void)
2425 if (ftrace_filter_buf[0])
2426 set_ftrace_early_filter(ftrace_filter_buf, 1);
2427 if (ftrace_notrace_buf[0])
2428 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2431 static int
2432 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2434 struct seq_file *m = (struct seq_file *)file->private_data;
2435 struct ftrace_iterator *iter;
2437 mutex_lock(&ftrace_regex_lock);
2438 if (file->f_mode & FMODE_READ) {
2439 iter = m->private;
2441 seq_release(inode, file);
2442 } else
2443 iter = file->private_data;
2445 if (iter->buffer_idx) {
2446 iter->filtered++;
2447 iter->buffer[iter->buffer_idx] = 0;
2448 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2451 mutex_lock(&ftrace_lock);
2452 if (ftrace_start_up && ftrace_enabled)
2453 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2454 mutex_unlock(&ftrace_lock);
2456 kfree(iter);
2457 mutex_unlock(&ftrace_regex_lock);
2458 return 0;
2461 static int
2462 ftrace_filter_release(struct inode *inode, struct file *file)
2464 return ftrace_regex_release(inode, file, 1);
2467 static int
2468 ftrace_notrace_release(struct inode *inode, struct file *file)
2470 return ftrace_regex_release(inode, file, 0);
2473 static const struct file_operations ftrace_avail_fops = {
2474 .open = ftrace_avail_open,
2475 .read = seq_read,
2476 .llseek = seq_lseek,
2477 .release = ftrace_avail_release,
2480 static const struct file_operations ftrace_failures_fops = {
2481 .open = ftrace_failures_open,
2482 .read = seq_read,
2483 .llseek = seq_lseek,
2484 .release = ftrace_avail_release,
2487 static const struct file_operations ftrace_filter_fops = {
2488 .open = ftrace_filter_open,
2489 .read = seq_read,
2490 .write = ftrace_filter_write,
2491 .llseek = ftrace_regex_lseek,
2492 .release = ftrace_filter_release,
2495 static const struct file_operations ftrace_notrace_fops = {
2496 .open = ftrace_notrace_open,
2497 .read = seq_read,
2498 .write = ftrace_notrace_write,
2499 .llseek = ftrace_regex_lseek,
2500 .release = ftrace_notrace_release,
2503 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2505 static DEFINE_MUTEX(graph_lock);
2507 int ftrace_graph_count;
2508 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2510 static void *
2511 __g_next(struct seq_file *m, loff_t *pos)
2513 unsigned long *array = m->private;
2515 if (*pos >= ftrace_graph_count)
2516 return NULL;
2517 return &array[*pos];
2520 static void *
2521 g_next(struct seq_file *m, void *v, loff_t *pos)
2523 (*pos)++;
2524 return __g_next(m, pos);
2527 static void *g_start(struct seq_file *m, loff_t *pos)
2529 mutex_lock(&graph_lock);
2531 /* Nothing, tell g_show to print all functions are enabled */
2532 if (!ftrace_graph_count && !*pos)
2533 return (void *)1;
2535 return __g_next(m, pos);
2538 static void g_stop(struct seq_file *m, void *p)
2540 mutex_unlock(&graph_lock);
2543 static int g_show(struct seq_file *m, void *v)
2545 unsigned long *ptr = v;
2546 char str[KSYM_SYMBOL_LEN];
2548 if (!ptr)
2549 return 0;
2551 if (ptr == (unsigned long *)1) {
2552 seq_printf(m, "#### all functions enabled ####\n");
2553 return 0;
2556 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2558 seq_printf(m, "%s\n", str);
2560 return 0;
2563 static struct seq_operations ftrace_graph_seq_ops = {
2564 .start = g_start,
2565 .next = g_next,
2566 .stop = g_stop,
2567 .show = g_show,
2570 static int
2571 ftrace_graph_open(struct inode *inode, struct file *file)
2573 int ret = 0;
2575 if (unlikely(ftrace_disabled))
2576 return -ENODEV;
2578 mutex_lock(&graph_lock);
2579 if ((file->f_mode & FMODE_WRITE) &&
2580 !(file->f_flags & O_APPEND)) {
2581 ftrace_graph_count = 0;
2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2585 if (file->f_mode & FMODE_READ) {
2586 ret = seq_open(file, &ftrace_graph_seq_ops);
2587 if (!ret) {
2588 struct seq_file *m = file->private_data;
2589 m->private = ftrace_graph_funcs;
2591 } else
2592 file->private_data = ftrace_graph_funcs;
2593 mutex_unlock(&graph_lock);
2595 return ret;
2598 static int
2599 ftrace_graph_release(struct inode *inode, struct file *file)
2601 if (file->f_mode & FMODE_READ)
2602 seq_release(inode, file);
2603 return 0;
2606 static int
2607 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2609 struct dyn_ftrace *rec;
2610 struct ftrace_page *pg;
2611 int search_len;
2612 int found = 0;
2613 int type, not;
2614 char *search;
2615 bool exists;
2616 int i;
2618 if (ftrace_disabled)
2619 return -ENODEV;
2621 /* decode regex */
2622 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2623 if (not)
2624 return -EINVAL;
2626 search_len = strlen(search);
2628 mutex_lock(&ftrace_lock);
2629 do_for_each_ftrace_rec(pg, rec) {
2631 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2632 break;
2634 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2635 continue;
2637 if (ftrace_match_record(rec, search, search_len, type)) {
2638 /* ensure it is not already in the array */
2639 exists = false;
2640 for (i = 0; i < *idx; i++)
2641 if (array[i] == rec->ip) {
2642 exists = true;
2643 break;
2645 if (!exists) {
2646 array[(*idx)++] = rec->ip;
2647 found = 1;
2650 } while_for_each_ftrace_rec();
2652 mutex_unlock(&ftrace_lock);
2654 return found ? 0 : -EINVAL;
2657 static ssize_t
2658 ftrace_graph_write(struct file *file, const char __user *ubuf,
2659 size_t cnt, loff_t *ppos)
2661 unsigned char buffer[FTRACE_BUFF_MAX+1];
2662 unsigned long *array;
2663 size_t read = 0;
2664 ssize_t ret;
2665 int index = 0;
2666 char ch;
2668 if (!cnt || cnt < 0)
2669 return 0;
2671 mutex_lock(&graph_lock);
2673 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2674 ret = -EBUSY;
2675 goto out;
2678 if (file->f_mode & FMODE_READ) {
2679 struct seq_file *m = file->private_data;
2680 array = m->private;
2681 } else
2682 array = file->private_data;
2684 ret = get_user(ch, ubuf++);
2685 if (ret)
2686 goto out;
2687 read++;
2688 cnt--;
2690 /* skip white space */
2691 while (cnt && isspace(ch)) {
2692 ret = get_user(ch, ubuf++);
2693 if (ret)
2694 goto out;
2695 read++;
2696 cnt--;
2699 if (isspace(ch)) {
2700 *ppos += read;
2701 ret = read;
2702 goto out;
2705 while (cnt && !isspace(ch)) {
2706 if (index < FTRACE_BUFF_MAX)
2707 buffer[index++] = ch;
2708 else {
2709 ret = -EINVAL;
2710 goto out;
2712 ret = get_user(ch, ubuf++);
2713 if (ret)
2714 goto out;
2715 read++;
2716 cnt--;
2718 buffer[index] = 0;
2720 /* we allow only one expression at a time */
2721 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2722 if (ret)
2723 goto out;
2725 file->f_pos += read;
2727 ret = read;
2728 out:
2729 mutex_unlock(&graph_lock);
2731 return ret;
2734 static const struct file_operations ftrace_graph_fops = {
2735 .open = ftrace_graph_open,
2736 .read = seq_read,
2737 .write = ftrace_graph_write,
2738 .release = ftrace_graph_release,
2740 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2742 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2745 trace_create_file("available_filter_functions", 0444,
2746 d_tracer, NULL, &ftrace_avail_fops);
2748 trace_create_file("failures", 0444,
2749 d_tracer, NULL, &ftrace_failures_fops);
2751 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2752 NULL, &ftrace_filter_fops);
2754 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2755 NULL, &ftrace_notrace_fops);
2757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2758 trace_create_file("set_graph_function", 0444, d_tracer,
2759 NULL,
2760 &ftrace_graph_fops);
2761 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2763 return 0;
2766 static int ftrace_convert_nops(struct module *mod,
2767 unsigned long *start,
2768 unsigned long *end)
2770 unsigned long *p;
2771 unsigned long addr;
2772 unsigned long flags;
2774 mutex_lock(&ftrace_lock);
2775 p = start;
2776 while (p < end) {
2777 addr = ftrace_call_adjust(*p++);
2779 * Some architecture linkers will pad between
2780 * the different mcount_loc sections of different
2781 * object files to satisfy alignments.
2782 * Skip any NULL pointers.
2784 if (!addr)
2785 continue;
2786 ftrace_record_ip(addr);
2789 /* disable interrupts to prevent kstop machine */
2790 local_irq_save(flags);
2791 ftrace_update_code(mod);
2792 local_irq_restore(flags);
2793 mutex_unlock(&ftrace_lock);
2795 return 0;
2798 #ifdef CONFIG_MODULES
2799 void ftrace_release(void *start, void *end)
2801 struct dyn_ftrace *rec;
2802 struct ftrace_page *pg;
2803 unsigned long s = (unsigned long)start;
2804 unsigned long e = (unsigned long)end;
2806 if (ftrace_disabled || !start || start == end)
2807 return;
2809 mutex_lock(&ftrace_lock);
2810 do_for_each_ftrace_rec(pg, rec) {
2811 if ((rec->ip >= s) && (rec->ip < e)) {
2813 * rec->ip is changed in ftrace_free_rec()
2814 * It should not between s and e if record was freed.
2816 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2817 ftrace_free_rec(rec);
2819 } while_for_each_ftrace_rec();
2820 mutex_unlock(&ftrace_lock);
2823 static void ftrace_init_module(struct module *mod,
2824 unsigned long *start, unsigned long *end)
2826 if (ftrace_disabled || start == end)
2827 return;
2828 ftrace_convert_nops(mod, start, end);
2831 static int ftrace_module_notify(struct notifier_block *self,
2832 unsigned long val, void *data)
2834 struct module *mod = data;
2836 switch (val) {
2837 case MODULE_STATE_COMING:
2838 ftrace_init_module(mod, mod->ftrace_callsites,
2839 mod->ftrace_callsites +
2840 mod->num_ftrace_callsites);
2841 break;
2842 case MODULE_STATE_GOING:
2843 ftrace_release(mod->ftrace_callsites,
2844 mod->ftrace_callsites +
2845 mod->num_ftrace_callsites);
2846 break;
2849 return 0;
2851 #else
2852 static int ftrace_module_notify(struct notifier_block *self,
2853 unsigned long val, void *data)
2855 return 0;
2857 #endif /* CONFIG_MODULES */
2859 struct notifier_block ftrace_module_nb = {
2860 .notifier_call = ftrace_module_notify,
2861 .priority = 0,
2864 extern unsigned long __start_mcount_loc[];
2865 extern unsigned long __stop_mcount_loc[];
2867 void __init ftrace_init(void)
2869 unsigned long count, addr, flags;
2870 int ret;
2872 /* Keep the ftrace pointer to the stub */
2873 addr = (unsigned long)ftrace_stub;
2875 local_irq_save(flags);
2876 ftrace_dyn_arch_init(&addr);
2877 local_irq_restore(flags);
2879 /* ftrace_dyn_arch_init places the return code in addr */
2880 if (addr)
2881 goto failed;
2883 count = __stop_mcount_loc - __start_mcount_loc;
2885 ret = ftrace_dyn_table_alloc(count);
2886 if (ret)
2887 goto failed;
2889 last_ftrace_enabled = ftrace_enabled = 1;
2891 ret = ftrace_convert_nops(NULL,
2892 __start_mcount_loc,
2893 __stop_mcount_loc);
2895 ret = register_module_notifier(&ftrace_module_nb);
2896 if (ret)
2897 pr_warning("Failed to register trace ftrace module notifier\n");
2899 set_ftrace_early_filters();
2901 return;
2902 failed:
2903 ftrace_disabled = 1;
2906 #else
2908 static int __init ftrace_nodyn_init(void)
2910 ftrace_enabled = 1;
2911 return 0;
2913 device_initcall(ftrace_nodyn_init);
2915 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2916 static inline void ftrace_startup_enable(int command) { }
2917 /* Keep as macros so we do not need to define the commands */
2918 # define ftrace_startup(command) do { } while (0)
2919 # define ftrace_shutdown(command) do { } while (0)
2920 # define ftrace_startup_sysctl() do { } while (0)
2921 # define ftrace_shutdown_sysctl() do { } while (0)
2922 #endif /* CONFIG_DYNAMIC_FTRACE */
2924 static ssize_t
2925 ftrace_pid_read(struct file *file, char __user *ubuf,
2926 size_t cnt, loff_t *ppos)
2928 char buf[64];
2929 int r;
2931 if (ftrace_pid_trace == ftrace_swapper_pid)
2932 r = sprintf(buf, "swapper tasks\n");
2933 else if (ftrace_pid_trace)
2934 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2935 else
2936 r = sprintf(buf, "no pid\n");
2938 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2941 static void clear_ftrace_swapper(void)
2943 struct task_struct *p;
2944 int cpu;
2946 get_online_cpus();
2947 for_each_online_cpu(cpu) {
2948 p = idle_task(cpu);
2949 clear_tsk_trace_trace(p);
2951 put_online_cpus();
2954 static void set_ftrace_swapper(void)
2956 struct task_struct *p;
2957 int cpu;
2959 get_online_cpus();
2960 for_each_online_cpu(cpu) {
2961 p = idle_task(cpu);
2962 set_tsk_trace_trace(p);
2964 put_online_cpus();
2967 static void clear_ftrace_pid(struct pid *pid)
2969 struct task_struct *p;
2971 rcu_read_lock();
2972 do_each_pid_task(pid, PIDTYPE_PID, p) {
2973 clear_tsk_trace_trace(p);
2974 } while_each_pid_task(pid, PIDTYPE_PID, p);
2975 rcu_read_unlock();
2977 put_pid(pid);
2980 static void set_ftrace_pid(struct pid *pid)
2982 struct task_struct *p;
2984 rcu_read_lock();
2985 do_each_pid_task(pid, PIDTYPE_PID, p) {
2986 set_tsk_trace_trace(p);
2987 } while_each_pid_task(pid, PIDTYPE_PID, p);
2988 rcu_read_unlock();
2991 static void clear_ftrace_pid_task(struct pid **pid)
2993 if (*pid == ftrace_swapper_pid)
2994 clear_ftrace_swapper();
2995 else
2996 clear_ftrace_pid(*pid);
2998 *pid = NULL;
3001 static void set_ftrace_pid_task(struct pid *pid)
3003 if (pid == ftrace_swapper_pid)
3004 set_ftrace_swapper();
3005 else
3006 set_ftrace_pid(pid);
3009 static ssize_t
3010 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3011 size_t cnt, loff_t *ppos)
3013 struct pid *pid;
3014 char buf[64];
3015 long val;
3016 int ret;
3018 if (cnt >= sizeof(buf))
3019 return -EINVAL;
3021 if (copy_from_user(&buf, ubuf, cnt))
3022 return -EFAULT;
3024 buf[cnt] = 0;
3026 ret = strict_strtol(buf, 10, &val);
3027 if (ret < 0)
3028 return ret;
3030 mutex_lock(&ftrace_lock);
3031 if (val < 0) {
3032 /* disable pid tracing */
3033 if (!ftrace_pid_trace)
3034 goto out;
3036 clear_ftrace_pid_task(&ftrace_pid_trace);
3038 } else {
3039 /* swapper task is special */
3040 if (!val) {
3041 pid = ftrace_swapper_pid;
3042 if (pid == ftrace_pid_trace)
3043 goto out;
3044 } else {
3045 pid = find_get_pid(val);
3047 if (pid == ftrace_pid_trace) {
3048 put_pid(pid);
3049 goto out;
3053 if (ftrace_pid_trace)
3054 clear_ftrace_pid_task(&ftrace_pid_trace);
3056 if (!pid)
3057 goto out;
3059 ftrace_pid_trace = pid;
3061 set_ftrace_pid_task(ftrace_pid_trace);
3064 /* update the function call */
3065 ftrace_update_pid_func();
3066 ftrace_startup_enable(0);
3068 out:
3069 mutex_unlock(&ftrace_lock);
3071 return cnt;
3074 static const struct file_operations ftrace_pid_fops = {
3075 .read = ftrace_pid_read,
3076 .write = ftrace_pid_write,
3079 static __init int ftrace_init_debugfs(void)
3081 struct dentry *d_tracer;
3083 d_tracer = tracing_init_dentry();
3084 if (!d_tracer)
3085 return 0;
3087 ftrace_init_dyn_debugfs(d_tracer);
3089 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3090 NULL, &ftrace_pid_fops);
3092 ftrace_profile_debugfs(d_tracer);
3094 return 0;
3096 fs_initcall(ftrace_init_debugfs);
3099 * ftrace_kill - kill ftrace
3101 * This function should be used by panic code. It stops ftrace
3102 * but in a not so nice way. If you need to simply kill ftrace
3103 * from a non-atomic section, use ftrace_kill.
3105 void ftrace_kill(void)
3107 ftrace_disabled = 1;
3108 ftrace_enabled = 0;
3109 clear_ftrace_function();
3113 * register_ftrace_function - register a function for profiling
3114 * @ops - ops structure that holds the function for profiling.
3116 * Register a function to be called by all functions in the
3117 * kernel.
3119 * Note: @ops->func and all the functions it calls must be labeled
3120 * with "notrace", otherwise it will go into a
3121 * recursive loop.
3123 int register_ftrace_function(struct ftrace_ops *ops)
3125 int ret;
3127 if (unlikely(ftrace_disabled))
3128 return -1;
3130 mutex_lock(&ftrace_lock);
3132 ret = __register_ftrace_function(ops);
3133 ftrace_startup(0);
3135 mutex_unlock(&ftrace_lock);
3136 return ret;
3140 * unregister_ftrace_function - unregister a function for profiling.
3141 * @ops - ops structure that holds the function to unregister
3143 * Unregister a function that was added to be called by ftrace profiling.
3145 int unregister_ftrace_function(struct ftrace_ops *ops)
3147 int ret;
3149 mutex_lock(&ftrace_lock);
3150 ret = __unregister_ftrace_function(ops);
3151 ftrace_shutdown(0);
3152 mutex_unlock(&ftrace_lock);
3154 return ret;
3158 ftrace_enable_sysctl(struct ctl_table *table, int write,
3159 struct file *file, void __user *buffer, size_t *lenp,
3160 loff_t *ppos)
3162 int ret;
3164 if (unlikely(ftrace_disabled))
3165 return -ENODEV;
3167 mutex_lock(&ftrace_lock);
3169 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3171 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3172 goto out;
3174 last_ftrace_enabled = !!ftrace_enabled;
3176 if (ftrace_enabled) {
3178 ftrace_startup_sysctl();
3180 /* we are starting ftrace again */
3181 if (ftrace_list != &ftrace_list_end) {
3182 if (ftrace_list->next == &ftrace_list_end)
3183 ftrace_trace_function = ftrace_list->func;
3184 else
3185 ftrace_trace_function = ftrace_list_func;
3188 } else {
3189 /* stopping ftrace calls (just send to ftrace_stub) */
3190 ftrace_trace_function = ftrace_stub;
3192 ftrace_shutdown_sysctl();
3195 out:
3196 mutex_unlock(&ftrace_lock);
3197 return ret;
3200 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3202 static int ftrace_graph_active;
3203 static struct notifier_block ftrace_suspend_notifier;
3205 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3207 return 0;
3210 /* The callbacks that hook a function */
3211 trace_func_graph_ret_t ftrace_graph_return =
3212 (trace_func_graph_ret_t)ftrace_stub;
3213 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3215 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3216 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3218 int i;
3219 int ret = 0;
3220 unsigned long flags;
3221 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3222 struct task_struct *g, *t;
3224 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3225 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3226 * sizeof(struct ftrace_ret_stack),
3227 GFP_KERNEL);
3228 if (!ret_stack_list[i]) {
3229 start = 0;
3230 end = i;
3231 ret = -ENOMEM;
3232 goto free;
3236 read_lock_irqsave(&tasklist_lock, flags);
3237 do_each_thread(g, t) {
3238 if (start == end) {
3239 ret = -EAGAIN;
3240 goto unlock;
3243 if (t->ret_stack == NULL) {
3244 atomic_set(&t->tracing_graph_pause, 0);
3245 atomic_set(&t->trace_overrun, 0);
3246 t->curr_ret_stack = -1;
3247 /* Make sure the tasks see the -1 first: */
3248 smp_wmb();
3249 t->ret_stack = ret_stack_list[start++];
3251 } while_each_thread(g, t);
3253 unlock:
3254 read_unlock_irqrestore(&tasklist_lock, flags);
3255 free:
3256 for (i = start; i < end; i++)
3257 kfree(ret_stack_list[i]);
3258 return ret;
3261 static void
3262 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3263 struct task_struct *next)
3265 unsigned long long timestamp;
3266 int index;
3269 * Does the user want to count the time a function was asleep.
3270 * If so, do not update the time stamps.
3272 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3273 return;
3275 timestamp = trace_clock_local();
3277 prev->ftrace_timestamp = timestamp;
3279 /* only process tasks that we timestamped */
3280 if (!next->ftrace_timestamp)
3281 return;
3284 * Update all the counters in next to make up for the
3285 * time next was sleeping.
3287 timestamp -= next->ftrace_timestamp;
3289 for (index = next->curr_ret_stack; index >= 0; index--)
3290 next->ret_stack[index].calltime += timestamp;
3293 /* Allocate a return stack for each task */
3294 static int start_graph_tracing(void)
3296 struct ftrace_ret_stack **ret_stack_list;
3297 int ret, cpu;
3299 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3300 sizeof(struct ftrace_ret_stack *),
3301 GFP_KERNEL);
3303 if (!ret_stack_list)
3304 return -ENOMEM;
3306 /* The cpu_boot init_task->ret_stack will never be freed */
3307 for_each_online_cpu(cpu) {
3308 if (!idle_task(cpu)->ret_stack)
3309 ftrace_graph_init_task(idle_task(cpu));
3312 do {
3313 ret = alloc_retstack_tasklist(ret_stack_list);
3314 } while (ret == -EAGAIN);
3316 if (!ret) {
3317 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3318 if (ret)
3319 pr_info("ftrace_graph: Couldn't activate tracepoint"
3320 " probe to kernel_sched_switch\n");
3323 kfree(ret_stack_list);
3324 return ret;
3328 * Hibernation protection.
3329 * The state of the current task is too much unstable during
3330 * suspend/restore to disk. We want to protect against that.
3332 static int
3333 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3334 void *unused)
3336 switch (state) {
3337 case PM_HIBERNATION_PREPARE:
3338 pause_graph_tracing();
3339 break;
3341 case PM_POST_HIBERNATION:
3342 unpause_graph_tracing();
3343 break;
3345 return NOTIFY_DONE;
3348 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3349 trace_func_graph_ent_t entryfunc)
3351 int ret = 0;
3353 mutex_lock(&ftrace_lock);
3355 /* we currently allow only one tracer registered at a time */
3356 if (ftrace_graph_active) {
3357 ret = -EBUSY;
3358 goto out;
3361 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3362 register_pm_notifier(&ftrace_suspend_notifier);
3364 ftrace_graph_active++;
3365 ret = start_graph_tracing();
3366 if (ret) {
3367 ftrace_graph_active--;
3368 goto out;
3371 ftrace_graph_return = retfunc;
3372 ftrace_graph_entry = entryfunc;
3374 ftrace_startup(FTRACE_START_FUNC_RET);
3376 out:
3377 mutex_unlock(&ftrace_lock);
3378 return ret;
3381 void unregister_ftrace_graph(void)
3383 mutex_lock(&ftrace_lock);
3385 if (unlikely(!ftrace_graph_active))
3386 goto out;
3388 ftrace_graph_active--;
3389 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3390 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3391 ftrace_graph_entry = ftrace_graph_entry_stub;
3392 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3393 unregister_pm_notifier(&ftrace_suspend_notifier);
3395 out:
3396 mutex_unlock(&ftrace_lock);
3399 /* Allocate a return stack for newly created task */
3400 void ftrace_graph_init_task(struct task_struct *t)
3402 /* Make sure we do not use the parent ret_stack */
3403 t->ret_stack = NULL;
3405 if (ftrace_graph_active) {
3406 struct ftrace_ret_stack *ret_stack;
3408 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3409 * sizeof(struct ftrace_ret_stack),
3410 GFP_KERNEL);
3411 if (!ret_stack)
3412 return;
3413 t->curr_ret_stack = -1;
3414 atomic_set(&t->tracing_graph_pause, 0);
3415 atomic_set(&t->trace_overrun, 0);
3416 t->ftrace_timestamp = 0;
3417 /* make curr_ret_stack visable before we add the ret_stack */
3418 smp_wmb();
3419 t->ret_stack = ret_stack;
3423 void ftrace_graph_exit_task(struct task_struct *t)
3425 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3427 t->ret_stack = NULL;
3428 /* NULL must become visible to IRQs before we free it: */
3429 barrier();
3431 kfree(ret_stack);
3434 void ftrace_graph_stop(void)
3436 ftrace_stop();
3438 #endif