1 // SPDX-License-Identifier: GPL-2.0
2 /* Include in trace.c */
4 #include <uapi/linux/sched/types.h>
5 #include <linux/stringify.h>
6 #include <linux/kthread.h>
7 #include <linux/delay.h>
8 #include <linux/slab.h>
10 static inline int trace_valid_entry(struct trace_entry
*entry
)
12 switch (entry
->type
) {
20 case TRACE_GRAPH_RETADDR_ENT
:
27 static int trace_test_buffer_cpu(struct array_buffer
*buf
, int cpu
)
29 struct ring_buffer_event
*event
;
30 struct trace_entry
*entry
;
31 unsigned int loops
= 0;
33 while ((event
= ring_buffer_consume(buf
->buffer
, cpu
, NULL
, NULL
))) {
34 entry
= ring_buffer_event_data(event
);
37 * The ring buffer is a size of trace_buf_size, if
38 * we loop more than the size, there's something wrong
39 * with the ring buffer.
41 if (loops
++ > trace_buf_size
) {
42 printk(KERN_CONT
".. bad ring buffer ");
45 if (!trace_valid_entry(entry
)) {
46 printk(KERN_CONT
".. invalid entry %d ",
56 printk(KERN_CONT
".. corrupted trace buffer .. ");
61 * Test the trace buffer to see if all the elements
64 static int __maybe_unused
trace_test_buffer(struct array_buffer
*buf
, unsigned long *count
)
66 unsigned long flags
, cnt
= 0;
69 /* Don't allow flipping of max traces now */
70 local_irq_save(flags
);
71 arch_spin_lock(&buf
->tr
->max_lock
);
73 cnt
= ring_buffer_entries(buf
->buffer
);
76 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 * If the calling tracer is broken, and is constantly filling
78 * the buffer, this will run forever, and hard lock the box.
79 * We disable the ring buffer while we do this test to prevent
83 for_each_possible_cpu(cpu
) {
84 ret
= trace_test_buffer_cpu(buf
, cpu
);
89 arch_spin_unlock(&buf
->tr
->max_lock
);
90 local_irq_restore(flags
);
98 static inline void warn_failed_init_tracer(struct tracer
*trace
, int init_ret
)
100 printk(KERN_WARNING
"Failed to init %s tracer, init returned %d\n",
101 trace
->name
, init_ret
);
103 #ifdef CONFIG_FUNCTION_TRACER
105 #ifdef CONFIG_DYNAMIC_FTRACE
107 static int trace_selftest_test_probe1_cnt
;
108 static void trace_selftest_test_probe1_func(unsigned long ip
,
110 struct ftrace_ops
*op
,
111 struct ftrace_regs
*fregs
)
113 trace_selftest_test_probe1_cnt
++;
116 static int trace_selftest_test_probe2_cnt
;
117 static void trace_selftest_test_probe2_func(unsigned long ip
,
119 struct ftrace_ops
*op
,
120 struct ftrace_regs
*fregs
)
122 trace_selftest_test_probe2_cnt
++;
125 static int trace_selftest_test_probe3_cnt
;
126 static void trace_selftest_test_probe3_func(unsigned long ip
,
128 struct ftrace_ops
*op
,
129 struct ftrace_regs
*fregs
)
131 trace_selftest_test_probe3_cnt
++;
134 static int trace_selftest_test_global_cnt
;
135 static void trace_selftest_test_global_func(unsigned long ip
,
137 struct ftrace_ops
*op
,
138 struct ftrace_regs
*fregs
)
140 trace_selftest_test_global_cnt
++;
143 static int trace_selftest_test_dyn_cnt
;
144 static void trace_selftest_test_dyn_func(unsigned long ip
,
146 struct ftrace_ops
*op
,
147 struct ftrace_regs
*fregs
)
149 trace_selftest_test_dyn_cnt
++;
152 static struct ftrace_ops test_probe1
= {
153 .func
= trace_selftest_test_probe1_func
,
156 static struct ftrace_ops test_probe2
= {
157 .func
= trace_selftest_test_probe2_func
,
160 static struct ftrace_ops test_probe3
= {
161 .func
= trace_selftest_test_probe3_func
,
164 static void print_counts(void)
166 printk("(%d %d %d %d %d) ",
167 trace_selftest_test_probe1_cnt
,
168 trace_selftest_test_probe2_cnt
,
169 trace_selftest_test_probe3_cnt
,
170 trace_selftest_test_global_cnt
,
171 trace_selftest_test_dyn_cnt
);
174 static void reset_counts(void)
176 trace_selftest_test_probe1_cnt
= 0;
177 trace_selftest_test_probe2_cnt
= 0;
178 trace_selftest_test_probe3_cnt
= 0;
179 trace_selftest_test_global_cnt
= 0;
180 trace_selftest_test_dyn_cnt
= 0;
183 static int trace_selftest_ops(struct trace_array
*tr
, int cnt
)
185 int save_ftrace_enabled
= ftrace_enabled
;
186 struct ftrace_ops
*dyn_ops
;
193 printk(KERN_CONT
"PASSED\n");
194 pr_info("Testing dynamic ftrace ops #%d: ", cnt
);
199 /* Handle PPC64 '.' name */
200 func1_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
201 func2_name
= "*" __stringify(DYN_FTRACE_TEST_NAME2
);
202 len1
= strlen(func1_name
);
203 len2
= strlen(func2_name
);
206 * Probe 1 will trace function 1.
207 * Probe 2 will trace function 2.
208 * Probe 3 will trace functions 1 and 2.
210 ftrace_set_filter(&test_probe1
, func1_name
, len1
, 1);
211 ftrace_set_filter(&test_probe2
, func2_name
, len2
, 1);
212 ftrace_set_filter(&test_probe3
, func1_name
, len1
, 1);
213 ftrace_set_filter(&test_probe3
, func2_name
, len2
, 0);
215 register_ftrace_function(&test_probe1
);
216 register_ftrace_function(&test_probe2
);
217 register_ftrace_function(&test_probe3
);
218 /* First time we are running with main function */
220 ftrace_init_array_ops(tr
, trace_selftest_test_global_func
);
221 register_ftrace_function(tr
->ops
);
224 DYN_FTRACE_TEST_NAME();
228 if (trace_selftest_test_probe1_cnt
!= 1)
230 if (trace_selftest_test_probe2_cnt
!= 0)
232 if (trace_selftest_test_probe3_cnt
!= 1)
235 if (trace_selftest_test_global_cnt
== 0)
239 DYN_FTRACE_TEST_NAME2();
243 if (trace_selftest_test_probe1_cnt
!= 1)
245 if (trace_selftest_test_probe2_cnt
!= 1)
247 if (trace_selftest_test_probe3_cnt
!= 2)
250 /* Add a dynamic probe */
251 dyn_ops
= kzalloc(sizeof(*dyn_ops
), GFP_KERNEL
);
253 printk("MEMORY ERROR ");
257 dyn_ops
->func
= trace_selftest_test_dyn_func
;
259 register_ftrace_function(dyn_ops
);
261 trace_selftest_test_global_cnt
= 0;
263 DYN_FTRACE_TEST_NAME();
267 if (trace_selftest_test_probe1_cnt
!= 2)
269 if (trace_selftest_test_probe2_cnt
!= 1)
271 if (trace_selftest_test_probe3_cnt
!= 3)
274 if (trace_selftest_test_global_cnt
== 0)
277 if (trace_selftest_test_dyn_cnt
== 0)
280 DYN_FTRACE_TEST_NAME2();
284 if (trace_selftest_test_probe1_cnt
!= 2)
286 if (trace_selftest_test_probe2_cnt
!= 2)
288 if (trace_selftest_test_probe3_cnt
!= 4)
291 /* Remove trace function from probe 3 */
292 func1_name
= "!" __stringify(DYN_FTRACE_TEST_NAME
);
293 len1
= strlen(func1_name
);
295 ftrace_set_filter(&test_probe3
, func1_name
, len1
, 0);
297 DYN_FTRACE_TEST_NAME();
301 if (trace_selftest_test_probe1_cnt
!= 3)
303 if (trace_selftest_test_probe2_cnt
!= 2)
305 if (trace_selftest_test_probe3_cnt
!= 4)
308 if (trace_selftest_test_global_cnt
== 0)
311 if (trace_selftest_test_dyn_cnt
== 0)
314 DYN_FTRACE_TEST_NAME2();
318 if (trace_selftest_test_probe1_cnt
!= 3)
320 if (trace_selftest_test_probe2_cnt
!= 3)
322 if (trace_selftest_test_probe3_cnt
!= 5)
327 unregister_ftrace_function(dyn_ops
);
331 /* Purposely unregister in the same order */
332 unregister_ftrace_function(&test_probe1
);
333 unregister_ftrace_function(&test_probe2
);
334 unregister_ftrace_function(&test_probe3
);
336 unregister_ftrace_function(tr
->ops
);
337 ftrace_reset_array_ops(tr
);
339 /* Make sure everything is off */
341 DYN_FTRACE_TEST_NAME();
342 DYN_FTRACE_TEST_NAME();
344 if (trace_selftest_test_probe1_cnt
||
345 trace_selftest_test_probe2_cnt
||
346 trace_selftest_test_probe3_cnt
||
347 trace_selftest_test_global_cnt
||
348 trace_selftest_test_dyn_cnt
)
351 ftrace_enabled
= save_ftrace_enabled
;
356 /* Test dynamic code modification and ftrace filters */
357 static int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
358 struct trace_array
*tr
,
361 int save_ftrace_enabled
= ftrace_enabled
;
366 /* The ftrace test PASSED */
367 printk(KERN_CONT
"PASSED\n");
368 pr_info("Testing dynamic ftrace: ");
370 /* enable tracing, and record the filter function */
373 /* passed in by parameter to fool gcc from optimizing */
377 * Some archs *cough*PowerPC*cough* add characters to the
378 * start of the function names. We simply put a '*' to
381 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
383 /* filter only on our function */
384 ftrace_set_global_filter(func_name
, strlen(func_name
), 1);
387 ret
= tracer_init(trace
, tr
);
389 warn_failed_init_tracer(trace
, ret
);
393 /* Sleep for a 1/10 of a second */
396 /* we should have nothing in the buffer */
397 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
403 printk(KERN_CONT
".. filter did not filter .. ");
407 /* call our function again */
413 /* stop the tracing. */
417 /* check the trace buffer */
418 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
423 /* we should only have one item */
424 if (!ret
&& count
!= 1) {
426 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
431 /* Test the ops with global tracing running */
432 ret
= trace_selftest_ops(tr
, 1);
436 ftrace_enabled
= save_ftrace_enabled
;
438 /* Enable tracing on all functions again */
439 ftrace_set_global_filter(NULL
, 0, 1);
441 /* Test the ops with global tracing off */
443 ret
= trace_selftest_ops(tr
, 2);
448 static int trace_selftest_recursion_cnt
;
449 static void trace_selftest_test_recursion_func(unsigned long ip
,
451 struct ftrace_ops
*op
,
452 struct ftrace_regs
*fregs
)
455 * This function is registered without the recursion safe flag.
456 * The ftrace infrastructure should provide the recursion
457 * protection. If not, this will crash the kernel!
459 if (trace_selftest_recursion_cnt
++ > 10)
461 DYN_FTRACE_TEST_NAME();
464 static void trace_selftest_test_recursion_safe_func(unsigned long ip
,
466 struct ftrace_ops
*op
,
467 struct ftrace_regs
*fregs
)
470 * We said we would provide our own recursion. By calling
471 * this function again, we should recurse back into this function
472 * and count again. But this only happens if the arch supports
473 * all of ftrace features and nothing else is using the function
476 if (trace_selftest_recursion_cnt
++)
478 DYN_FTRACE_TEST_NAME();
481 static struct ftrace_ops test_rec_probe
= {
482 .func
= trace_selftest_test_recursion_func
,
483 .flags
= FTRACE_OPS_FL_RECURSION
,
486 static struct ftrace_ops test_recsafe_probe
= {
487 .func
= trace_selftest_test_recursion_safe_func
,
491 trace_selftest_function_recursion(void)
493 int save_ftrace_enabled
= ftrace_enabled
;
498 /* The previous test PASSED */
500 pr_info("Testing ftrace recursion: ");
503 /* enable tracing, and record the filter function */
506 /* Handle PPC64 '.' name */
507 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
508 len
= strlen(func_name
);
510 ret
= ftrace_set_filter(&test_rec_probe
, func_name
, len
, 1);
512 pr_cont("*Could not set filter* ");
516 ret
= register_ftrace_function(&test_rec_probe
);
518 pr_cont("*could not register callback* ");
522 DYN_FTRACE_TEST_NAME();
524 unregister_ftrace_function(&test_rec_probe
);
528 * Recursion allows for transitions between context,
529 * and may call the callback twice.
531 if (trace_selftest_recursion_cnt
!= 1 &&
532 trace_selftest_recursion_cnt
!= 2) {
533 pr_cont("*callback not called once (or twice) (%d)* ",
534 trace_selftest_recursion_cnt
);
538 trace_selftest_recursion_cnt
= 1;
541 pr_info("Testing ftrace recursion safe: ");
543 ret
= ftrace_set_filter(&test_recsafe_probe
, func_name
, len
, 1);
545 pr_cont("*Could not set filter* ");
549 ret
= register_ftrace_function(&test_recsafe_probe
);
551 pr_cont("*could not register callback* ");
555 DYN_FTRACE_TEST_NAME();
557 unregister_ftrace_function(&test_recsafe_probe
);
560 if (trace_selftest_recursion_cnt
!= 2) {
561 pr_cont("*callback not called expected 2 times (%d)* ",
562 trace_selftest_recursion_cnt
);
568 ftrace_enabled
= save_ftrace_enabled
;
573 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
574 # define trace_selftest_function_recursion() ({ 0; })
575 #endif /* CONFIG_DYNAMIC_FTRACE */
578 TRACE_SELFTEST_REGS_START
,
579 TRACE_SELFTEST_REGS_FOUND
,
580 TRACE_SELFTEST_REGS_NOT_FOUND
,
581 } trace_selftest_regs_stat
;
583 static void trace_selftest_test_regs_func(unsigned long ip
,
585 struct ftrace_ops
*op
,
586 struct ftrace_regs
*fregs
)
588 struct pt_regs
*regs
= ftrace_get_regs(fregs
);
591 trace_selftest_regs_stat
= TRACE_SELFTEST_REGS_FOUND
;
593 trace_selftest_regs_stat
= TRACE_SELFTEST_REGS_NOT_FOUND
;
596 static struct ftrace_ops test_regs_probe
= {
597 .func
= trace_selftest_test_regs_func
,
598 .flags
= FTRACE_OPS_FL_SAVE_REGS
,
602 trace_selftest_function_regs(void)
604 int save_ftrace_enabled
= ftrace_enabled
;
610 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
614 /* The previous test PASSED */
616 pr_info("Testing ftrace regs%s: ",
617 !supported
? "(no arch support)" : "");
619 /* enable tracing, and record the filter function */
622 /* Handle PPC64 '.' name */
623 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
624 len
= strlen(func_name
);
626 ret
= ftrace_set_filter(&test_regs_probe
, func_name
, len
, 1);
628 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
629 * This test really doesn't care.
631 if (ret
&& ret
!= -ENODEV
) {
632 pr_cont("*Could not set filter* ");
636 ret
= register_ftrace_function(&test_regs_probe
);
638 * Now if the arch does not support passing regs, then this should
643 pr_cont("*registered save-regs without arch support* ");
646 test_regs_probe
.flags
|= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
;
647 ret
= register_ftrace_function(&test_regs_probe
);
650 pr_cont("*could not register callback* ");
655 DYN_FTRACE_TEST_NAME();
657 unregister_ftrace_function(&test_regs_probe
);
661 switch (trace_selftest_regs_stat
) {
662 case TRACE_SELFTEST_REGS_START
:
663 pr_cont("*callback never called* ");
666 case TRACE_SELFTEST_REGS_FOUND
:
669 pr_cont("*callback received regs without arch support* ");
672 case TRACE_SELFTEST_REGS_NOT_FOUND
:
675 pr_cont("*callback received NULL regs* ");
681 ftrace_enabled
= save_ftrace_enabled
;
687 * Simple verification test of ftrace function tracer.
688 * Enable ftrace, sleep 1/10 second, and then read the trace
689 * buffer to see if all is in order.
692 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
694 int save_ftrace_enabled
= ftrace_enabled
;
698 #ifdef CONFIG_DYNAMIC_FTRACE
699 if (ftrace_filter_param
) {
700 printk(KERN_CONT
" ... kernel command line filter set: force PASS ... ");
705 /* make sure msleep has been recorded */
708 /* start the tracing */
711 ret
= tracer_init(trace
, tr
);
713 warn_failed_init_tracer(trace
, ret
);
717 /* Sleep for a 1/10 of a second */
719 /* stop the tracing. */
723 /* check the trace buffer */
724 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
730 if (!ret
&& !count
) {
731 printk(KERN_CONT
".. no entries found ..");
736 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
737 DYN_FTRACE_TEST_NAME
);
741 ret
= trace_selftest_function_recursion();
745 ret
= trace_selftest_function_regs();
747 ftrace_enabled
= save_ftrace_enabled
;
749 /* kill ftrace totally if we failed */
755 #endif /* CONFIG_FUNCTION_TRACER */
758 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
760 #ifdef CONFIG_DYNAMIC_FTRACE
762 #define CHAR_NUMBER 123
763 #define SHORT_NUMBER 12345
764 #define WORD_NUMBER 1234567890
765 #define LONG_NUMBER 1234567890123456789LL
766 #define ERRSTR_BUFLEN 128
768 struct fgraph_fixture
{
769 struct fgraph_ops gops
;
771 const char *store_type_name
;
772 char error_str_buf
[ERRSTR_BUFLEN
];
776 static __init
int store_entry(struct ftrace_graph_ent
*trace
,
777 struct fgraph_ops
*gops
)
779 struct fgraph_fixture
*fixture
= container_of(gops
, struct fgraph_fixture
, gops
);
780 const char *type
= fixture
->store_type_name
;
781 int size
= fixture
->store_size
;
784 p
= fgraph_reserve_data(gops
->idx
, size
);
786 snprintf(fixture
->error_str_buf
, ERRSTR_BUFLEN
,
787 "Failed to reserve %s\n", type
);
793 *(char *)p
= CHAR_NUMBER
;
796 *(short *)p
= SHORT_NUMBER
;
799 *(int *)p
= WORD_NUMBER
;
802 *(long long *)p
= LONG_NUMBER
;
809 static __init
void store_return(struct ftrace_graph_ret
*trace
,
810 struct fgraph_ops
*gops
)
812 struct fgraph_fixture
*fixture
= container_of(gops
, struct fgraph_fixture
, gops
);
813 const char *type
= fixture
->store_type_name
;
814 long long expect
= 0;
815 long long found
= -1;
819 p
= fgraph_retrieve_data(gops
->idx
, &size
);
821 snprintf(fixture
->error_str_buf
, ERRSTR_BUFLEN
,
822 "Failed to retrieve %s\n", type
);
825 if (fixture
->store_size
> size
) {
826 snprintf(fixture
->error_str_buf
, ERRSTR_BUFLEN
,
827 "Retrieved size %d is smaller than expected %d\n",
828 size
, (int)fixture
->store_size
);
832 switch (fixture
->store_size
) {
834 expect
= CHAR_NUMBER
;
838 expect
= SHORT_NUMBER
;
842 expect
= WORD_NUMBER
;
846 expect
= LONG_NUMBER
;
847 found
= *(long long *)p
;
851 if (found
!= expect
) {
852 snprintf(fixture
->error_str_buf
, ERRSTR_BUFLEN
,
853 "%s returned not %lld but %lld\n", type
, expect
, found
);
856 fixture
->error_str
= NULL
;
859 static int __init
init_fgraph_fixture(struct fgraph_fixture
*fixture
)
864 snprintf(fixture
->error_str_buf
, ERRSTR_BUFLEN
,
865 "Failed to execute storage %s\n", fixture
->store_type_name
);
866 fixture
->error_str
= fixture
->error_str_buf
;
868 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
869 len
= strlen(func_name
);
871 return ftrace_set_filter(&fixture
->gops
.ops
, func_name
, len
, 1);
874 /* Test fgraph storage for each size */
875 static int __init
test_graph_storage_single(struct fgraph_fixture
*fixture
)
877 int size
= fixture
->store_size
;
881 pr_info("Testing fgraph storage of %d byte%s: ", size
, str_plural(size
));
883 ret
= init_fgraph_fixture(fixture
);
884 if (ret
&& ret
!= -ENODEV
) {
885 pr_cont("*Could not set filter* ");
889 ret
= register_ftrace_graph(&fixture
->gops
);
891 pr_warn("Failed to init store_bytes fgraph tracing\n");
895 DYN_FTRACE_TEST_NAME();
897 unregister_ftrace_graph(&fixture
->gops
);
899 if (fixture
->error_str
) {
900 pr_cont("*** %s ***", fixture
->error_str
);
907 static struct fgraph_fixture store_bytes
[4] __initdata
= {
910 .entryfunc
= store_entry
,
911 .retfunc
= store_return
,
914 .store_type_name
= "byte",
918 .entryfunc
= store_entry
,
919 .retfunc
= store_return
,
922 .store_type_name
= "short",
926 .entryfunc
= store_entry
,
927 .retfunc
= store_return
,
930 .store_type_name
= "word",
934 .entryfunc
= store_entry
,
935 .retfunc
= store_return
,
938 .store_type_name
= "long long",
942 static __init
int test_graph_storage_multi(void)
944 struct fgraph_fixture
*fixture
;
945 bool printed
= false;
949 pr_info("Testing multiple fgraph storage on a function: ");
951 for (i
= 0; i
< ARRAY_SIZE(store_bytes
); i
++) {
952 fixture
= &store_bytes
[i
];
953 ret
= init_fgraph_fixture(fixture
);
954 if (ret
&& ret
!= -ENODEV
) {
955 pr_cont("*Could not set filter* ");
961 for (j
= 0; j
< ARRAY_SIZE(store_bytes
); j
++) {
962 fixture
= &store_bytes
[j
];
963 ret
= register_ftrace_graph(&fixture
->gops
);
965 pr_warn("Failed to init store_bytes fgraph tracing\n");
971 DYN_FTRACE_TEST_NAME();
974 fixture
= &store_bytes
[j
];
975 unregister_ftrace_graph(&fixture
->gops
);
977 if (fixture
->error_str
&& !printed
) {
978 pr_cont("*** %s ***", fixture
->error_str
);
984 fixture
= &store_bytes
[i
];
985 ftrace_free_filter(&fixture
->gops
.ops
);
987 if (fixture
->error_str
&& !printed
) {
988 pr_cont("*** %s ***", fixture
->error_str
);
992 return printed
? -1 : 0;
995 /* Test the storage passed across function_graph entry and return */
996 static __init
int test_graph_storage(void)
1000 ret
= test_graph_storage_single(&store_bytes
[0]);
1003 ret
= test_graph_storage_single(&store_bytes
[1]);
1006 ret
= test_graph_storage_single(&store_bytes
[2]);
1009 ret
= test_graph_storage_single(&store_bytes
[3]);
1012 ret
= test_graph_storage_multi();
1018 static inline int test_graph_storage(void) { return 0; }
1019 #endif /* CONFIG_DYNAMIC_FTRACE */
1021 /* Maximum number of functions to trace before diagnosing a hang */
1022 #define GRAPH_MAX_FUNC_TEST 100000000
1024 static unsigned int graph_hang_thresh
;
1026 /* Wrap the real function entry probe to avoid possible hanging */
1027 static int trace_graph_entry_watchdog(struct ftrace_graph_ent
*trace
,
1028 struct fgraph_ops
*gops
)
1030 /* This is harmlessly racy, we want to approximately detect a hang */
1031 if (unlikely(++graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
)) {
1032 ftrace_graph_stop();
1033 printk(KERN_WARNING
"BUG: Function graph tracer hang!\n");
1034 if (ftrace_dump_on_oops_enabled()) {
1035 ftrace_dump(DUMP_ALL
);
1036 /* ftrace_dump() disables tracing */
1042 return trace_graph_entry(trace
, gops
);
1045 static struct fgraph_ops fgraph_ops __initdata
= {
1046 .entryfunc
= &trace_graph_entry_watchdog
,
1047 .retfunc
= &trace_graph_return
,
1050 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1051 static struct ftrace_ops direct
;
1055 * Pretty much the same than for the function tracer from which the selftest
1056 * has been borrowed.
1059 trace_selftest_startup_function_graph(struct tracer
*trace
,
1060 struct trace_array
*tr
)
1063 unsigned long count
;
1064 char *func_name __maybe_unused
;
1066 #ifdef CONFIG_DYNAMIC_FTRACE
1067 if (ftrace_filter_param
) {
1068 printk(KERN_CONT
" ... kernel command line filter set: force PASS ... ");
1074 * Simulate the init() callback but we attach a watchdog callback
1075 * to detect and recover from possible hangs
1077 tracing_reset_online_cpus(&tr
->array_buffer
);
1078 fgraph_ops
.private = tr
;
1079 ret
= register_ftrace_graph(&fgraph_ops
);
1081 warn_failed_init_tracer(trace
, ret
);
1084 tracing_start_cmdline_record();
1086 /* Sleep for a 1/10 of a second */
1089 /* Have we just recovered from a hang? */
1090 if (graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
) {
1091 disable_tracing_selftest("recovering from a hang");
1098 /* check the trace buffer */
1099 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
1101 /* Need to also simulate the tr->reset to remove this fgraph_ops */
1102 tracing_stop_cmdline_record();
1103 unregister_ftrace_graph(&fgraph_ops
);
1107 if (!ret
&& !count
) {
1108 printk(KERN_CONT
".. no entries found ..");
1113 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1115 * These tests can take some time to run. Make sure on non PREEMPT
1116 * kernels, we do not trigger the softlockup detector.
1120 tracing_reset_online_cpus(&tr
->array_buffer
);
1121 fgraph_ops
.private = tr
;
1124 * Some archs *cough*PowerPC*cough* add characters to the
1125 * start of the function names. We simply put a '*' to
1128 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
1129 ftrace_set_global_filter(func_name
, strlen(func_name
), 1);
1132 * Register direct function together with graph tracer
1133 * and make sure we get graph trace.
1135 ftrace_set_filter_ip(&direct
, (unsigned long)DYN_FTRACE_TEST_NAME
, 0, 0);
1136 ret
= register_ftrace_direct(&direct
,
1137 (unsigned long)ftrace_stub_direct_tramp
);
1143 ret
= register_ftrace_graph(&fgraph_ops
);
1145 warn_failed_init_tracer(trace
, ret
);
1149 DYN_FTRACE_TEST_NAME();
1154 /* check the trace buffer */
1155 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
1157 unregister_ftrace_graph(&fgraph_ops
);
1159 ret
= unregister_ftrace_direct(&direct
,
1160 (unsigned long)ftrace_stub_direct_tramp
,
1169 if (!ret
&& !count
) {
1174 /* Enable tracing on all functions again */
1175 ftrace_set_global_filter(NULL
, 0, 1);
1178 ret
= test_graph_storage();
1180 /* Don't test dynamic tracing, the function tracer already did */
1182 /* Stop it if we failed */
1184 ftrace_graph_stop();
1188 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1191 #ifdef CONFIG_IRQSOFF_TRACER
1193 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
1195 unsigned long save_max
= tr
->max_latency
;
1196 unsigned long count
;
1199 /* start the tracing */
1200 ret
= tracer_init(trace
, tr
);
1202 warn_failed_init_tracer(trace
, ret
);
1206 /* reset the max latency */
1207 tr
->max_latency
= 0;
1208 /* disable interrupts for a bit */
1209 local_irq_disable();
1214 * Stop the tracer to avoid a warning subsequent
1215 * to buffer flipping failure because tracing_stop()
1216 * disables the tr and max buffers, making flipping impossible
1217 * in case of parallels max irqs off latencies.
1220 /* stop the tracing. */
1222 /* check both trace buffers */
1223 ret
= trace_test_buffer(&tr
->array_buffer
, NULL
);
1225 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1229 if (!ret
&& !count
) {
1230 printk(KERN_CONT
".. no entries found ..");
1234 tr
->max_latency
= save_max
;
1238 #endif /* CONFIG_IRQSOFF_TRACER */
1240 #ifdef CONFIG_PREEMPT_TRACER
1242 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
1244 unsigned long save_max
= tr
->max_latency
;
1245 unsigned long count
;
1249 * Now that the big kernel lock is no longer preemptible,
1250 * and this is called with the BKL held, it will always
1251 * fail. If preemption is already disabled, simply
1252 * pass the test. When the BKL is removed, or becomes
1253 * preemptible again, we will once again test this,
1256 if (preempt_count()) {
1257 printk(KERN_CONT
"can not test ... force ");
1261 /* start the tracing */
1262 ret
= tracer_init(trace
, tr
);
1264 warn_failed_init_tracer(trace
, ret
);
1268 /* reset the max latency */
1269 tr
->max_latency
= 0;
1270 /* disable preemption for a bit */
1276 * Stop the tracer to avoid a warning subsequent
1277 * to buffer flipping failure because tracing_stop()
1278 * disables the tr and max buffers, making flipping impossible
1279 * in case of parallels max preempt off latencies.
1282 /* stop the tracing. */
1284 /* check both trace buffers */
1285 ret
= trace_test_buffer(&tr
->array_buffer
, NULL
);
1287 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1291 if (!ret
&& !count
) {
1292 printk(KERN_CONT
".. no entries found ..");
1296 tr
->max_latency
= save_max
;
1300 #endif /* CONFIG_PREEMPT_TRACER */
1302 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1304 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
1306 unsigned long save_max
= tr
->max_latency
;
1307 unsigned long count
;
1311 * Now that the big kernel lock is no longer preemptible,
1312 * and this is called with the BKL held, it will always
1313 * fail. If preemption is already disabled, simply
1314 * pass the test. When the BKL is removed, or becomes
1315 * preemptible again, we will once again test this,
1318 if (preempt_count()) {
1319 printk(KERN_CONT
"can not test ... force ");
1323 /* start the tracing */
1324 ret
= tracer_init(trace
, tr
);
1326 warn_failed_init_tracer(trace
, ret
);
1330 /* reset the max latency */
1331 tr
->max_latency
= 0;
1333 /* disable preemption and interrupts for a bit */
1335 local_irq_disable();
1338 /* reverse the order of preempt vs irqs */
1342 * Stop the tracer to avoid a warning subsequent
1343 * to buffer flipping failure because tracing_stop()
1344 * disables the tr and max buffers, making flipping impossible
1345 * in case of parallels max irqs/preempt off latencies.
1348 /* stop the tracing. */
1350 /* check both trace buffers */
1351 ret
= trace_test_buffer(&tr
->array_buffer
, NULL
);
1355 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1359 if (!ret
&& !count
) {
1360 printk(KERN_CONT
".. no entries found ..");
1365 /* do the test by disabling interrupts first this time */
1366 tr
->max_latency
= 0;
1371 local_irq_disable();
1374 /* reverse the order of preempt vs irqs */
1378 /* stop the tracing. */
1380 /* check both trace buffers */
1381 ret
= trace_test_buffer(&tr
->array_buffer
, NULL
);
1385 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1387 if (!ret
&& !count
) {
1388 printk(KERN_CONT
".. no entries found ..");
1397 tr
->max_latency
= save_max
;
1401 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1403 #ifdef CONFIG_NOP_TRACER
1405 trace_selftest_startup_nop(struct tracer
*trace
, struct trace_array
*tr
)
1407 /* What could possibly go wrong? */
1412 #ifdef CONFIG_SCHED_TRACER
1414 struct wakeup_test_data
{
1415 struct completion is_ready
;
1419 static int trace_wakeup_test_thread(void *data
)
1421 /* Make this a -deadline thread */
1422 static const struct sched_attr attr
= {
1423 .sched_policy
= SCHED_DEADLINE
,
1424 .sched_runtime
= 100000ULL,
1425 .sched_deadline
= 10000000ULL,
1426 .sched_period
= 10000000ULL
1428 struct wakeup_test_data
*x
= data
;
1430 sched_setattr(current
, &attr
);
1432 /* Make it know we have a new prio */
1433 complete(&x
->is_ready
);
1435 /* now go to sleep and let the test wake us up */
1436 set_current_state(TASK_INTERRUPTIBLE
);
1439 set_current_state(TASK_INTERRUPTIBLE
);
1442 complete(&x
->is_ready
);
1444 set_current_state(TASK_INTERRUPTIBLE
);
1446 /* we are awake, now wait to disappear */
1447 while (!kthread_should_stop()) {
1449 set_current_state(TASK_INTERRUPTIBLE
);
1452 __set_current_state(TASK_RUNNING
);
1457 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
1459 unsigned long save_max
= tr
->max_latency
;
1460 struct task_struct
*p
;
1461 struct wakeup_test_data data
;
1462 unsigned long count
;
1465 memset(&data
, 0, sizeof(data
));
1467 init_completion(&data
.is_ready
);
1469 /* create a -deadline thread */
1470 p
= kthread_run(trace_wakeup_test_thread
, &data
, "ftrace-test");
1472 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
1476 /* make sure the thread is running at -deadline policy */
1477 wait_for_completion(&data
.is_ready
);
1479 /* start the tracing */
1480 ret
= tracer_init(trace
, tr
);
1482 warn_failed_init_tracer(trace
, ret
);
1486 /* reset the max latency */
1487 tr
->max_latency
= 0;
1489 while (task_is_runnable(p
)) {
1491 * Sleep to make sure the -deadline thread is asleep too.
1492 * On virtual machines we can't rely on timings,
1493 * but we want to make sure this test still works.
1498 init_completion(&data
.is_ready
);
1501 /* memory barrier is in the wake_up_process() */
1505 /* Wait for the task to wake up */
1506 wait_for_completion(&data
.is_ready
);
1508 /* stop the tracing. */
1510 /* check both trace buffers */
1511 ret
= trace_test_buffer(&tr
->array_buffer
, NULL
);
1513 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1519 tr
->max_latency
= save_max
;
1521 /* kill the thread */
1524 if (!ret
&& !count
) {
1525 printk(KERN_CONT
".. no entries found ..");
1531 #endif /* CONFIG_SCHED_TRACER */
1533 #ifdef CONFIG_BRANCH_TRACER
1535 trace_selftest_startup_branch(struct tracer
*trace
, struct trace_array
*tr
)
1537 unsigned long count
;
1540 /* start the tracing */
1541 ret
= tracer_init(trace
, tr
);
1543 warn_failed_init_tracer(trace
, ret
);
1547 /* Sleep for a 1/10 of a second */
1549 /* stop the tracing. */
1551 /* check the trace buffer */
1552 ret
= trace_test_buffer(&tr
->array_buffer
, &count
);
1556 if (!ret
&& !count
) {
1557 printk(KERN_CONT
".. no entries found ..");
1563 #endif /* CONFIG_BRANCH_TRACER */