2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
51 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled.
54 static int ftrace_disabled __read_mostly
;
56 static DEFINE_SPINLOCK(ftrace_lock
);
57 static DEFINE_MUTEX(ftrace_sysctl_lock
);
59 static struct ftrace_ops ftrace_list_end __read_mostly
=
64 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
65 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
67 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
69 struct ftrace_ops
*op
= ftrace_list
;
71 /* in case someone actually ports this to alpha! */
72 read_barrier_depends();
74 while (op
!= &ftrace_list_end
) {
76 read_barrier_depends();
77 op
->func(ip
, parent_ip
);
83 * clear_ftrace_function - reset the ftrace function
85 * This NULLs the ftrace function and in essence stops
86 * tracing. There may be lag
88 void clear_ftrace_function(void)
90 ftrace_trace_function
= ftrace_stub
;
93 static int __register_ftrace_function(struct ftrace_ops
*ops
)
95 /* should not be called from interrupt context */
96 spin_lock(&ftrace_lock
);
98 ops
->next
= ftrace_list
;
100 * We are entering ops into the ftrace_list but another
101 * CPU might be walking that list. We need to make sure
102 * the ops->next pointer is valid before another CPU sees
103 * the ops pointer included into the ftrace_list.
108 if (ftrace_enabled
) {
110 * For one func, simply call it directly.
111 * For more than one func, call the chain.
113 if (ops
->next
== &ftrace_list_end
)
114 ftrace_trace_function
= ops
->func
;
116 ftrace_trace_function
= ftrace_list_func
;
119 spin_unlock(&ftrace_lock
);
124 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
126 struct ftrace_ops
**p
;
129 /* should not be called from interrupt context */
130 spin_lock(&ftrace_lock
);
133 * If we are removing the last function, then simply point
134 * to the ftrace_stub.
136 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
137 ftrace_trace_function
= ftrace_stub
;
138 ftrace_list
= &ftrace_list_end
;
142 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
153 if (ftrace_enabled
) {
154 /* If we only have one func left, then call that directly */
155 if (ftrace_list
== &ftrace_list_end
||
156 ftrace_list
->next
== &ftrace_list_end
)
157 ftrace_trace_function
= ftrace_list
->func
;
161 spin_unlock(&ftrace_lock
);
166 #ifdef CONFIG_DYNAMIC_FTRACE
167 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
168 # error Dynamic ftrace depends on MCOUNT_RECORD
172 * Since MCOUNT_ADDR may point to mcount itself, we do not want
173 * to get it confused by reading a reference in the code as we
174 * are parsing on objcopy output of text. Use a variable for
177 static unsigned long mcount_addr
= MCOUNT_ADDR
;
180 FTRACE_ENABLE_CALLS
= (1 << 0),
181 FTRACE_DISABLE_CALLS
= (1 << 1),
182 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
183 FTRACE_ENABLE_MCOUNT
= (1 << 3),
184 FTRACE_DISABLE_MCOUNT
= (1 << 4),
187 static int ftrace_filtered
;
188 static int tracing_on
;
190 static LIST_HEAD(ftrace_new_addrs
);
192 static DEFINE_MUTEX(ftrace_regex_lock
);
195 struct ftrace_page
*next
;
197 struct dyn_ftrace records
[];
200 #define ENTRIES_PER_PAGE \
201 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
203 /* estimate from running different kernels */
204 #define NR_TO_INIT 10000
206 static struct ftrace_page
*ftrace_pages_start
;
207 static struct ftrace_page
*ftrace_pages
;
209 static struct dyn_ftrace
*ftrace_free_records
;
212 #ifdef CONFIG_KPROBES
214 static int frozen_record_count
;
216 static inline void freeze_record(struct dyn_ftrace
*rec
)
218 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
219 rec
->flags
|= FTRACE_FL_FROZEN
;
220 frozen_record_count
++;
224 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
226 if (rec
->flags
& FTRACE_FL_FROZEN
) {
227 rec
->flags
&= ~FTRACE_FL_FROZEN
;
228 frozen_record_count
--;
232 static inline int record_frozen(struct dyn_ftrace
*rec
)
234 return rec
->flags
& FTRACE_FL_FROZEN
;
237 # define freeze_record(rec) ({ 0; })
238 # define unfreeze_record(rec) ({ 0; })
239 # define record_frozen(rec) ({ 0; })
240 #endif /* CONFIG_KPROBES */
242 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
244 rec
->ip
= (unsigned long)ftrace_free_records
;
245 ftrace_free_records
= rec
;
246 rec
->flags
|= FTRACE_FL_FREE
;
249 void ftrace_release(void *start
, unsigned long size
)
251 struct dyn_ftrace
*rec
;
252 struct ftrace_page
*pg
;
253 unsigned long s
= (unsigned long)start
;
254 unsigned long e
= s
+ size
;
257 if (ftrace_disabled
|| !start
)
260 /* should not be called from interrupt context */
261 spin_lock(&ftrace_lock
);
263 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
264 for (i
= 0; i
< pg
->index
; i
++) {
265 rec
= &pg
->records
[i
];
267 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
268 ftrace_free_rec(rec
);
271 spin_unlock(&ftrace_lock
);
274 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
276 struct dyn_ftrace
*rec
;
278 /* First check for freed records */
279 if (ftrace_free_records
) {
280 rec
= ftrace_free_records
;
282 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
283 FTRACE_WARN_ON_ONCE(1);
284 ftrace_free_records
= NULL
;
288 ftrace_free_records
= (void *)rec
->ip
;
289 memset(rec
, 0, sizeof(*rec
));
293 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
294 if (!ftrace_pages
->next
) {
295 /* allocate another page */
297 (void *)get_zeroed_page(GFP_KERNEL
);
298 if (!ftrace_pages
->next
)
301 ftrace_pages
= ftrace_pages
->next
;
304 return &ftrace_pages
->records
[ftrace_pages
->index
++];
307 static struct dyn_ftrace
*
308 ftrace_record_ip(unsigned long ip
)
310 struct dyn_ftrace
*rec
;
312 if (!ftrace_enabled
|| ftrace_disabled
)
315 rec
= ftrace_alloc_dyn_node(ip
);
321 list_add(&rec
->list
, &ftrace_new_addrs
);
326 #define FTRACE_ADDR ((long)(ftrace_caller))
329 __ftrace_replace_code(struct dyn_ftrace
*rec
,
330 unsigned char *old
, unsigned char *new, int enable
)
332 unsigned long ip
, fl
;
336 if (ftrace_filtered
&& enable
) {
338 * If filtering is on:
340 * If this record is set to be filtered and
341 * is enabled then do nothing.
343 * If this record is set to be filtered and
344 * it is not enabled, enable it.
346 * If this record is not set to be filtered
347 * and it is not enabled do nothing.
349 * If this record is set not to trace then
352 * If this record is set not to trace and
353 * it is enabled then disable it.
355 * If this record is not set to be filtered and
356 * it is enabled, disable it.
359 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
|
362 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
363 (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_NOTRACE
)) ||
364 !fl
|| (fl
== FTRACE_FL_NOTRACE
))
368 * If it is enabled disable it,
369 * otherwise enable it!
371 if (fl
& FTRACE_FL_ENABLED
) {
372 /* swap new and old */
374 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
375 rec
->flags
&= ~FTRACE_FL_ENABLED
;
377 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
378 rec
->flags
|= FTRACE_FL_ENABLED
;
384 * If this record is set not to trace and is
385 * not enabled, do nothing.
387 fl
= rec
->flags
& (FTRACE_FL_NOTRACE
| FTRACE_FL_ENABLED
);
388 if (fl
== FTRACE_FL_NOTRACE
)
391 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
393 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
396 if (rec
->flags
& FTRACE_FL_ENABLED
)
398 rec
->flags
|= FTRACE_FL_ENABLED
;
400 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
402 rec
->flags
&= ~FTRACE_FL_ENABLED
;
406 return ftrace_modify_code(ip
, old
, new);
409 static void ftrace_replace_code(int enable
)
412 unsigned char *new = NULL
, *old
= NULL
;
413 struct dyn_ftrace
*rec
;
414 struct ftrace_page
*pg
;
417 old
= ftrace_nop_replace();
419 new = ftrace_nop_replace();
421 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
422 for (i
= 0; i
< pg
->index
; i
++) {
423 rec
= &pg
->records
[i
];
425 /* don't modify code that has already faulted */
426 if (rec
->flags
& FTRACE_FL_FAILED
)
429 /* ignore updates to this record's mcount site */
430 if (get_kprobe((void *)rec
->ip
)) {
434 unfreeze_record(rec
);
437 failed
= __ftrace_replace_code(rec
, old
, new, enable
);
438 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
439 rec
->flags
|= FTRACE_FL_FAILED
;
440 if ((system_state
== SYSTEM_BOOTING
) ||
441 !core_kernel_text(rec
->ip
)) {
442 ftrace_free_rec(rec
);
449 static void print_ip_ins(const char *fmt
, unsigned char *p
)
453 printk(KERN_CONT
"%s", fmt
);
455 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
456 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
460 ftrace_code_disable(struct dyn_ftrace
*rec
)
463 unsigned char *nop
, *call
;
468 nop
= ftrace_nop_replace();
469 call
= ftrace_call_replace(ip
, mcount_addr
);
471 ret
= ftrace_modify_code(ip
, call
, nop
);
475 FTRACE_WARN_ON_ONCE(1);
476 pr_info("ftrace faulted on modifying ");
480 FTRACE_WARN_ON_ONCE(1);
481 pr_info("ftrace failed to modify ");
483 print_ip_ins(" expected: ", call
);
484 print_ip_ins(" actual: ", (unsigned char *)ip
);
485 print_ip_ins(" replace: ", nop
);
486 printk(KERN_CONT
"\n");
489 FTRACE_WARN_ON_ONCE(1);
490 pr_info("ftrace faulted on writing ");
494 FTRACE_WARN_ON_ONCE(1);
495 pr_info("ftrace faulted on unknown error ");
499 rec
->flags
|= FTRACE_FL_FAILED
;
505 static int __ftrace_modify_code(void *data
)
509 if (*command
& FTRACE_ENABLE_CALLS
) {
510 ftrace_replace_code(1);
512 } else if (*command
& FTRACE_DISABLE_CALLS
) {
513 ftrace_replace_code(0);
517 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
518 ftrace_update_ftrace_func(ftrace_trace_function
);
523 static void ftrace_run_update_code(int command
)
525 stop_machine(__ftrace_modify_code
, &command
, NULL
);
528 static ftrace_func_t saved_ftrace_func
;
529 static int ftrace_start
;
530 static DEFINE_MUTEX(ftrace_start_lock
);
532 static void ftrace_startup(void)
536 if (unlikely(ftrace_disabled
))
539 mutex_lock(&ftrace_start_lock
);
541 if (ftrace_start
== 1)
542 command
|= FTRACE_ENABLE_CALLS
;
544 if (saved_ftrace_func
!= ftrace_trace_function
) {
545 saved_ftrace_func
= ftrace_trace_function
;
546 command
|= FTRACE_UPDATE_TRACE_FUNC
;
549 if (!command
|| !ftrace_enabled
)
552 ftrace_run_update_code(command
);
554 mutex_unlock(&ftrace_start_lock
);
557 static void ftrace_shutdown(void)
561 if (unlikely(ftrace_disabled
))
564 mutex_lock(&ftrace_start_lock
);
567 command
|= FTRACE_DISABLE_CALLS
;
569 if (saved_ftrace_func
!= ftrace_trace_function
) {
570 saved_ftrace_func
= ftrace_trace_function
;
571 command
|= FTRACE_UPDATE_TRACE_FUNC
;
574 if (!command
|| !ftrace_enabled
)
577 ftrace_run_update_code(command
);
579 mutex_unlock(&ftrace_start_lock
);
582 static void ftrace_startup_sysctl(void)
584 int command
= FTRACE_ENABLE_MCOUNT
;
586 if (unlikely(ftrace_disabled
))
589 mutex_lock(&ftrace_start_lock
);
590 /* Force update next time */
591 saved_ftrace_func
= NULL
;
592 /* ftrace_start is true if we want ftrace running */
594 command
|= FTRACE_ENABLE_CALLS
;
596 ftrace_run_update_code(command
);
597 mutex_unlock(&ftrace_start_lock
);
600 static void ftrace_shutdown_sysctl(void)
602 int command
= FTRACE_DISABLE_MCOUNT
;
604 if (unlikely(ftrace_disabled
))
607 mutex_lock(&ftrace_start_lock
);
608 /* ftrace_start is true if ftrace is running */
610 command
|= FTRACE_DISABLE_CALLS
;
612 ftrace_run_update_code(command
);
613 mutex_unlock(&ftrace_start_lock
);
616 static cycle_t ftrace_update_time
;
617 static unsigned long ftrace_update_cnt
;
618 unsigned long ftrace_update_tot_cnt
;
620 static int ftrace_update_code(void)
622 struct dyn_ftrace
*p
, *t
;
625 start
= ftrace_now(raw_smp_processor_id());
626 ftrace_update_cnt
= 0;
628 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
630 /* If something went wrong, bail without enabling anything */
631 if (unlikely(ftrace_disabled
))
634 list_del_init(&p
->list
);
636 /* convert record (i.e, patch mcount-call with NOP) */
637 if (ftrace_code_disable(p
)) {
638 p
->flags
|= FTRACE_FL_CONVERTED
;
644 stop
= ftrace_now(raw_smp_processor_id());
645 ftrace_update_time
= stop
- start
;
646 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
651 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
653 struct ftrace_page
*pg
;
657 /* allocate a few pages */
658 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
659 if (!ftrace_pages_start
)
663 * Allocate a few more pages.
665 * TODO: have some parser search vmlinux before
666 * final linking to find all calls to ftrace.
668 * a) know how many pages to allocate.
670 * b) set up the table then.
672 * The dynamic code is still necessary for
676 pg
= ftrace_pages
= ftrace_pages_start
;
678 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
679 pr_info("ftrace: allocating %ld entries in %d pages\n",
682 for (i
= 0; i
< cnt
; i
++) {
683 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
685 /* If we fail, we'll try later anyway */
696 FTRACE_ITER_FILTER
= (1 << 0),
697 FTRACE_ITER_CONT
= (1 << 1),
698 FTRACE_ITER_NOTRACE
= (1 << 2),
699 FTRACE_ITER_FAILURES
= (1 << 3),
702 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
704 struct ftrace_iterator
{
706 struct ftrace_page
*pg
;
709 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
715 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
717 struct ftrace_iterator
*iter
= m
->private;
718 struct dyn_ftrace
*rec
= NULL
;
722 /* should not be called from interrupt context */
723 spin_lock(&ftrace_lock
);
725 if (iter
->idx
>= iter
->pg
->index
) {
726 if (iter
->pg
->next
) {
727 iter
->pg
= iter
->pg
->next
;
732 rec
= &iter
->pg
->records
[iter
->idx
++];
733 if ((rec
->flags
& FTRACE_FL_FREE
) ||
735 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
736 (rec
->flags
& FTRACE_FL_FAILED
)) ||
738 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
739 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
741 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
742 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
747 spin_unlock(&ftrace_lock
);
754 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
756 struct ftrace_iterator
*iter
= m
->private;
760 if (*pos
!= iter
->pos
) {
761 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
765 p
= t_next(m
, p
, &l
);
771 static void t_stop(struct seq_file
*m
, void *p
)
775 static int t_show(struct seq_file
*m
, void *v
)
777 struct dyn_ftrace
*rec
= v
;
778 char str
[KSYM_SYMBOL_LEN
];
783 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
785 seq_printf(m
, "%s\n", str
);
790 static struct seq_operations show_ftrace_seq_ops
= {
798 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
800 struct ftrace_iterator
*iter
;
803 if (unlikely(ftrace_disabled
))
806 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
810 iter
->pg
= ftrace_pages_start
;
813 ret
= seq_open(file
, &show_ftrace_seq_ops
);
815 struct seq_file
*m
= file
->private_data
;
825 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
827 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
828 struct ftrace_iterator
*iter
= m
->private;
830 seq_release(inode
, file
);
837 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
841 struct ftrace_iterator
*iter
;
843 ret
= ftrace_avail_open(inode
, file
);
845 m
= (struct seq_file
*)file
->private_data
;
846 iter
= (struct ftrace_iterator
*)m
->private;
847 iter
->flags
= FTRACE_ITER_FAILURES
;
854 static void ftrace_filter_reset(int enable
)
856 struct ftrace_page
*pg
;
857 struct dyn_ftrace
*rec
;
858 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
861 /* should not be called from interrupt context */
862 spin_lock(&ftrace_lock
);
865 pg
= ftrace_pages_start
;
867 for (i
= 0; i
< pg
->index
; i
++) {
868 rec
= &pg
->records
[i
];
869 if (rec
->flags
& FTRACE_FL_FAILED
)
875 spin_unlock(&ftrace_lock
);
879 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
881 struct ftrace_iterator
*iter
;
884 if (unlikely(ftrace_disabled
))
887 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
891 mutex_lock(&ftrace_regex_lock
);
892 if ((file
->f_mode
& FMODE_WRITE
) &&
893 !(file
->f_flags
& O_APPEND
))
894 ftrace_filter_reset(enable
);
896 if (file
->f_mode
& FMODE_READ
) {
897 iter
->pg
= ftrace_pages_start
;
899 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
902 ret
= seq_open(file
, &show_ftrace_seq_ops
);
904 struct seq_file
*m
= file
->private_data
;
909 file
->private_data
= iter
;
910 mutex_unlock(&ftrace_regex_lock
);
916 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
918 return ftrace_regex_open(inode
, file
, 1);
922 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
924 return ftrace_regex_open(inode
, file
, 0);
928 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
929 size_t cnt
, loff_t
*ppos
)
931 if (file
->f_mode
& FMODE_READ
)
932 return seq_read(file
, ubuf
, cnt
, ppos
);
938 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
942 if (file
->f_mode
& FMODE_READ
)
943 ret
= seq_lseek(file
, offset
, origin
);
945 file
->f_pos
= ret
= 1;
958 ftrace_match(unsigned char *buff
, int len
, int enable
)
960 char str
[KSYM_SYMBOL_LEN
];
962 struct ftrace_page
*pg
;
963 struct dyn_ftrace
*rec
;
964 int type
= MATCH_FULL
;
965 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
966 unsigned i
, match
= 0, search_len
= 0;
968 for (i
= 0; i
< len
; i
++) {
969 if (buff
[i
] == '*') {
971 search
= buff
+ i
+ 1;
972 type
= MATCH_END_ONLY
;
973 search_len
= len
- (i
+ 1);
975 if (type
== MATCH_END_ONLY
) {
976 type
= MATCH_MIDDLE_ONLY
;
979 type
= MATCH_FRONT_ONLY
;
987 /* should not be called from interrupt context */
988 spin_lock(&ftrace_lock
);
991 pg
= ftrace_pages_start
;
993 for (i
= 0; i
< pg
->index
; i
++) {
997 rec
= &pg
->records
[i
];
998 if (rec
->flags
& FTRACE_FL_FAILED
)
1000 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1003 if (strcmp(str
, buff
) == 0)
1006 case MATCH_FRONT_ONLY
:
1007 if (memcmp(str
, buff
, match
) == 0)
1010 case MATCH_MIDDLE_ONLY
:
1011 if (strstr(str
, search
))
1014 case MATCH_END_ONLY
:
1015 ptr
= strstr(str
, search
);
1016 if (ptr
&& (ptr
[search_len
] == 0))
1025 spin_unlock(&ftrace_lock
);
1029 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1030 size_t cnt
, loff_t
*ppos
, int enable
)
1032 struct ftrace_iterator
*iter
;
1037 if (!cnt
|| cnt
< 0)
1040 mutex_lock(&ftrace_regex_lock
);
1042 if (file
->f_mode
& FMODE_READ
) {
1043 struct seq_file
*m
= file
->private_data
;
1046 iter
= file
->private_data
;
1049 iter
->flags
&= ~FTRACE_ITER_CONT
;
1050 iter
->buffer_idx
= 0;
1053 ret
= get_user(ch
, ubuf
++);
1059 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1060 /* skip white space */
1061 while (cnt
&& isspace(ch
)) {
1062 ret
= get_user(ch
, ubuf
++);
1070 file
->f_pos
+= read
;
1075 iter
->buffer_idx
= 0;
1078 while (cnt
&& !isspace(ch
)) {
1079 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1080 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1085 ret
= get_user(ch
, ubuf
++);
1094 iter
->buffer
[iter
->buffer_idx
] = 0;
1095 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1096 iter
->buffer_idx
= 0;
1098 iter
->flags
|= FTRACE_ITER_CONT
;
1101 file
->f_pos
+= read
;
1105 mutex_unlock(&ftrace_regex_lock
);
1111 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1112 size_t cnt
, loff_t
*ppos
)
1114 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1118 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1119 size_t cnt
, loff_t
*ppos
)
1121 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1125 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1127 if (unlikely(ftrace_disabled
))
1130 mutex_lock(&ftrace_regex_lock
);
1132 ftrace_filter_reset(enable
);
1134 ftrace_match(buf
, len
, enable
);
1135 mutex_unlock(&ftrace_regex_lock
);
1139 * ftrace_set_filter - set a function to filter on in ftrace
1140 * @buf - the string that holds the function filter text.
1141 * @len - the length of the string.
1142 * @reset - non zero to reset all filters before applying this filter.
1144 * Filters denote which functions should be enabled when tracing is enabled.
1145 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1147 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1149 ftrace_set_regex(buf
, len
, reset
, 1);
1153 * ftrace_set_notrace - set a function to not trace in ftrace
1154 * @buf - the string that holds the function notrace text.
1155 * @len - the length of the string.
1156 * @reset - non zero to reset all filters before applying this filter.
1158 * Notrace Filters denote which functions should not be enabled when tracing
1159 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1162 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1164 ftrace_set_regex(buf
, len
, reset
, 0);
1168 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1170 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1171 struct ftrace_iterator
*iter
;
1173 mutex_lock(&ftrace_regex_lock
);
1174 if (file
->f_mode
& FMODE_READ
) {
1177 seq_release(inode
, file
);
1179 iter
= file
->private_data
;
1181 if (iter
->buffer_idx
) {
1183 iter
->buffer
[iter
->buffer_idx
] = 0;
1184 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1187 mutex_lock(&ftrace_sysctl_lock
);
1188 mutex_lock(&ftrace_start_lock
);
1189 if (iter
->filtered
&& ftrace_start
&& ftrace_enabled
)
1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1191 mutex_unlock(&ftrace_start_lock
);
1192 mutex_unlock(&ftrace_sysctl_lock
);
1195 mutex_unlock(&ftrace_regex_lock
);
1200 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1202 return ftrace_regex_release(inode
, file
, 1);
1206 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1208 return ftrace_regex_release(inode
, file
, 0);
1211 static struct file_operations ftrace_avail_fops
= {
1212 .open
= ftrace_avail_open
,
1214 .llseek
= seq_lseek
,
1215 .release
= ftrace_avail_release
,
1218 static struct file_operations ftrace_failures_fops
= {
1219 .open
= ftrace_failures_open
,
1221 .llseek
= seq_lseek
,
1222 .release
= ftrace_avail_release
,
1225 static struct file_operations ftrace_filter_fops
= {
1226 .open
= ftrace_filter_open
,
1227 .read
= ftrace_regex_read
,
1228 .write
= ftrace_filter_write
,
1229 .llseek
= ftrace_regex_lseek
,
1230 .release
= ftrace_filter_release
,
1233 static struct file_operations ftrace_notrace_fops
= {
1234 .open
= ftrace_notrace_open
,
1235 .read
= ftrace_regex_read
,
1236 .write
= ftrace_notrace_write
,
1237 .llseek
= ftrace_regex_lseek
,
1238 .release
= ftrace_notrace_release
,
1241 static __init
int ftrace_init_debugfs(void)
1243 struct dentry
*d_tracer
;
1244 struct dentry
*entry
;
1246 d_tracer
= tracing_init_dentry();
1248 entry
= debugfs_create_file("available_filter_functions", 0444,
1249 d_tracer
, NULL
, &ftrace_avail_fops
);
1251 pr_warning("Could not create debugfs "
1252 "'available_filter_functions' entry\n");
1254 entry
= debugfs_create_file("failures", 0444,
1255 d_tracer
, NULL
, &ftrace_failures_fops
);
1257 pr_warning("Could not create debugfs 'failures' entry\n");
1259 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1260 NULL
, &ftrace_filter_fops
);
1262 pr_warning("Could not create debugfs "
1263 "'set_ftrace_filter' entry\n");
1265 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1266 NULL
, &ftrace_notrace_fops
);
1268 pr_warning("Could not create debugfs "
1269 "'set_ftrace_notrace' entry\n");
1274 fs_initcall(ftrace_init_debugfs
);
1276 static int ftrace_convert_nops(unsigned long *start
,
1281 unsigned long flags
;
1283 mutex_lock(&ftrace_start_lock
);
1286 addr
= ftrace_call_adjust(*p
++);
1287 ftrace_record_ip(addr
);
1290 /* disable interrupts to prevent kstop machine */
1291 local_irq_save(flags
);
1292 ftrace_update_code();
1293 local_irq_restore(flags
);
1294 mutex_unlock(&ftrace_start_lock
);
1299 void ftrace_init_module(unsigned long *start
, unsigned long *end
)
1301 if (ftrace_disabled
|| start
== end
)
1303 ftrace_convert_nops(start
, end
);
1306 extern unsigned long __start_mcount_loc
[];
1307 extern unsigned long __stop_mcount_loc
[];
1309 void __init
ftrace_init(void)
1311 unsigned long count
, addr
, flags
;
1314 /* Keep the ftrace pointer to the stub */
1315 addr
= (unsigned long)ftrace_stub
;
1317 local_irq_save(flags
);
1318 ftrace_dyn_arch_init(&addr
);
1319 local_irq_restore(flags
);
1321 /* ftrace_dyn_arch_init places the return code in addr */
1325 count
= __stop_mcount_loc
- __start_mcount_loc
;
1327 ret
= ftrace_dyn_table_alloc(count
);
1331 last_ftrace_enabled
= ftrace_enabled
= 1;
1333 ret
= ftrace_convert_nops(__start_mcount_loc
,
1338 ftrace_disabled
= 1;
1343 static int __init
ftrace_nodyn_init(void)
1348 device_initcall(ftrace_nodyn_init
);
1350 # define ftrace_startup() do { } while (0)
1351 # define ftrace_shutdown() do { } while (0)
1352 # define ftrace_startup_sysctl() do { } while (0)
1353 # define ftrace_shutdown_sysctl() do { } while (0)
1354 #endif /* CONFIG_DYNAMIC_FTRACE */
1357 * ftrace_kill - kill ftrace
1359 * This function should be used by panic code. It stops ftrace
1360 * but in a not so nice way. If you need to simply kill ftrace
1361 * from a non-atomic section, use ftrace_kill.
1363 void ftrace_kill(void)
1365 ftrace_disabled
= 1;
1367 clear_ftrace_function();
1371 * register_ftrace_function - register a function for profiling
1372 * @ops - ops structure that holds the function for profiling.
1374 * Register a function to be called by all functions in the
1377 * Note: @ops->func and all the functions it calls must be labeled
1378 * with "notrace", otherwise it will go into a
1381 int register_ftrace_function(struct ftrace_ops
*ops
)
1385 if (unlikely(ftrace_disabled
))
1388 mutex_lock(&ftrace_sysctl_lock
);
1389 ret
= __register_ftrace_function(ops
);
1391 mutex_unlock(&ftrace_sysctl_lock
);
1397 * unregister_ftrace_function - unresgister a function for profiling.
1398 * @ops - ops structure that holds the function to unregister
1400 * Unregister a function that was added to be called by ftrace profiling.
1402 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1406 mutex_lock(&ftrace_sysctl_lock
);
1407 ret
= __unregister_ftrace_function(ops
);
1409 mutex_unlock(&ftrace_sysctl_lock
);
1415 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1416 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1421 if (unlikely(ftrace_disabled
))
1424 mutex_lock(&ftrace_sysctl_lock
);
1426 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1428 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1431 last_ftrace_enabled
= ftrace_enabled
;
1433 if (ftrace_enabled
) {
1435 ftrace_startup_sysctl();
1437 /* we are starting ftrace again */
1438 if (ftrace_list
!= &ftrace_list_end
) {
1439 if (ftrace_list
->next
== &ftrace_list_end
)
1440 ftrace_trace_function
= ftrace_list
->func
;
1442 ftrace_trace_function
= ftrace_list_func
;
1446 /* stopping ftrace calls (just send to ftrace_stub) */
1447 ftrace_trace_function
= ftrace_stub
;
1449 ftrace_shutdown_sysctl();
1453 mutex_unlock(&ftrace_sysctl_lock
);