1 /* Include in trace.c */
3 #include <uapi/linux/sched/types.h>
4 #include <linux/stringify.h>
5 #include <linux/kthread.h>
6 #include <linux/delay.h>
7 #include <linux/slab.h>
9 static inline int trace_valid_entry(struct trace_entry
*entry
)
11 switch (entry
->type
) {
25 static int trace_test_buffer_cpu(struct trace_buffer
*buf
, int cpu
)
27 struct ring_buffer_event
*event
;
28 struct trace_entry
*entry
;
29 unsigned int loops
= 0;
31 while ((event
= ring_buffer_consume(buf
->buffer
, cpu
, NULL
, NULL
))) {
32 entry
= ring_buffer_event_data(event
);
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
39 if (loops
++ > trace_buf_size
) {
40 printk(KERN_CONT
".. bad ring buffer ");
43 if (!trace_valid_entry(entry
)) {
44 printk(KERN_CONT
".. invalid entry %d ",
54 printk(KERN_CONT
".. corrupted trace buffer .. ");
59 * Test the trace buffer to see if all the elements
62 static int trace_test_buffer(struct trace_buffer
*buf
, unsigned long *count
)
64 unsigned long flags
, cnt
= 0;
67 /* Don't allow flipping of max traces now */
68 local_irq_save(flags
);
69 arch_spin_lock(&buf
->tr
->max_lock
);
71 cnt
= ring_buffer_entries(buf
->buffer
);
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
81 for_each_possible_cpu(cpu
) {
82 ret
= trace_test_buffer_cpu(buf
, cpu
);
87 arch_spin_unlock(&buf
->tr
->max_lock
);
88 local_irq_restore(flags
);
96 static inline void warn_failed_init_tracer(struct tracer
*trace
, int init_ret
)
98 printk(KERN_WARNING
"Failed to init %s tracer, init returned %d\n",
99 trace
->name
, init_ret
);
101 #ifdef CONFIG_FUNCTION_TRACER
103 #ifdef CONFIG_DYNAMIC_FTRACE
105 static int trace_selftest_test_probe1_cnt
;
106 static void trace_selftest_test_probe1_func(unsigned long ip
,
108 struct ftrace_ops
*op
,
109 struct pt_regs
*pt_regs
)
111 trace_selftest_test_probe1_cnt
++;
114 static int trace_selftest_test_probe2_cnt
;
115 static void trace_selftest_test_probe2_func(unsigned long ip
,
117 struct ftrace_ops
*op
,
118 struct pt_regs
*pt_regs
)
120 trace_selftest_test_probe2_cnt
++;
123 static int trace_selftest_test_probe3_cnt
;
124 static void trace_selftest_test_probe3_func(unsigned long ip
,
126 struct ftrace_ops
*op
,
127 struct pt_regs
*pt_regs
)
129 trace_selftest_test_probe3_cnt
++;
132 static int trace_selftest_test_global_cnt
;
133 static void trace_selftest_test_global_func(unsigned long ip
,
135 struct ftrace_ops
*op
,
136 struct pt_regs
*pt_regs
)
138 trace_selftest_test_global_cnt
++;
141 static int trace_selftest_test_dyn_cnt
;
142 static void trace_selftest_test_dyn_func(unsigned long ip
,
144 struct ftrace_ops
*op
,
145 struct pt_regs
*pt_regs
)
147 trace_selftest_test_dyn_cnt
++;
150 static struct ftrace_ops test_probe1
= {
151 .func
= trace_selftest_test_probe1_func
,
152 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
155 static struct ftrace_ops test_probe2
= {
156 .func
= trace_selftest_test_probe2_func
,
157 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
160 static struct ftrace_ops test_probe3
= {
161 .func
= trace_selftest_test_probe3_func
,
162 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
165 static void print_counts(void)
167 printk("(%d %d %d %d %d) ",
168 trace_selftest_test_probe1_cnt
,
169 trace_selftest_test_probe2_cnt
,
170 trace_selftest_test_probe3_cnt
,
171 trace_selftest_test_global_cnt
,
172 trace_selftest_test_dyn_cnt
);
175 static void reset_counts(void)
177 trace_selftest_test_probe1_cnt
= 0;
178 trace_selftest_test_probe2_cnt
= 0;
179 trace_selftest_test_probe3_cnt
= 0;
180 trace_selftest_test_global_cnt
= 0;
181 trace_selftest_test_dyn_cnt
= 0;
184 static int trace_selftest_ops(struct trace_array
*tr
, int cnt
)
186 int save_ftrace_enabled
= ftrace_enabled
;
187 struct ftrace_ops
*dyn_ops
;
194 printk(KERN_CONT
"PASSED\n");
195 pr_info("Testing dynamic ftrace ops #%d: ", cnt
);
200 /* Handle PPC64 '.' name */
201 func1_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
202 func2_name
= "*" __stringify(DYN_FTRACE_TEST_NAME2
);
203 len1
= strlen(func1_name
);
204 len2
= strlen(func2_name
);
207 * Probe 1 will trace function 1.
208 * Probe 2 will trace function 2.
209 * Probe 3 will trace functions 1 and 2.
211 ftrace_set_filter(&test_probe1
, func1_name
, len1
, 1);
212 ftrace_set_filter(&test_probe2
, func2_name
, len2
, 1);
213 ftrace_set_filter(&test_probe3
, func1_name
, len1
, 1);
214 ftrace_set_filter(&test_probe3
, func2_name
, len2
, 0);
216 register_ftrace_function(&test_probe1
);
217 register_ftrace_function(&test_probe2
);
218 register_ftrace_function(&test_probe3
);
219 /* First time we are running with main function */
221 ftrace_init_array_ops(tr
, trace_selftest_test_global_func
);
222 register_ftrace_function(tr
->ops
);
225 DYN_FTRACE_TEST_NAME();
229 if (trace_selftest_test_probe1_cnt
!= 1)
231 if (trace_selftest_test_probe2_cnt
!= 0)
233 if (trace_selftest_test_probe3_cnt
!= 1)
236 if (trace_selftest_test_global_cnt
== 0)
240 DYN_FTRACE_TEST_NAME2();
244 if (trace_selftest_test_probe1_cnt
!= 1)
246 if (trace_selftest_test_probe2_cnt
!= 1)
248 if (trace_selftest_test_probe3_cnt
!= 2)
251 /* Add a dynamic probe */
252 dyn_ops
= kzalloc(sizeof(*dyn_ops
), GFP_KERNEL
);
254 printk("MEMORY ERROR ");
258 dyn_ops
->func
= trace_selftest_test_dyn_func
;
260 register_ftrace_function(dyn_ops
);
262 trace_selftest_test_global_cnt
= 0;
264 DYN_FTRACE_TEST_NAME();
268 if (trace_selftest_test_probe1_cnt
!= 2)
270 if (trace_selftest_test_probe2_cnt
!= 1)
272 if (trace_selftest_test_probe3_cnt
!= 3)
275 if (trace_selftest_test_global_cnt
== 0)
278 if (trace_selftest_test_dyn_cnt
== 0)
281 DYN_FTRACE_TEST_NAME2();
285 if (trace_selftest_test_probe1_cnt
!= 2)
287 if (trace_selftest_test_probe2_cnt
!= 2)
289 if (trace_selftest_test_probe3_cnt
!= 4)
294 unregister_ftrace_function(dyn_ops
);
298 /* Purposely unregister in the same order */
299 unregister_ftrace_function(&test_probe1
);
300 unregister_ftrace_function(&test_probe2
);
301 unregister_ftrace_function(&test_probe3
);
303 unregister_ftrace_function(tr
->ops
);
304 ftrace_reset_array_ops(tr
);
306 /* Make sure everything is off */
308 DYN_FTRACE_TEST_NAME();
309 DYN_FTRACE_TEST_NAME();
311 if (trace_selftest_test_probe1_cnt
||
312 trace_selftest_test_probe2_cnt
||
313 trace_selftest_test_probe3_cnt
||
314 trace_selftest_test_global_cnt
||
315 trace_selftest_test_dyn_cnt
)
318 ftrace_enabled
= save_ftrace_enabled
;
323 /* Test dynamic code modification and ftrace filters */
324 static int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
325 struct trace_array
*tr
,
328 int save_ftrace_enabled
= ftrace_enabled
;
333 /* The ftrace test PASSED */
334 printk(KERN_CONT
"PASSED\n");
335 pr_info("Testing dynamic ftrace: ");
337 /* enable tracing, and record the filter function */
340 /* passed in by parameter to fool gcc from optimizing */
344 * Some archs *cough*PowerPC*cough* add characters to the
345 * start of the function names. We simply put a '*' to
348 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
350 /* filter only on our function */
351 ftrace_set_global_filter(func_name
, strlen(func_name
), 1);
354 ret
= tracer_init(trace
, tr
);
356 warn_failed_init_tracer(trace
, ret
);
360 /* Sleep for a 1/10 of a second */
363 /* we should have nothing in the buffer */
364 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
370 printk(KERN_CONT
".. filter did not filter .. ");
374 /* call our function again */
380 /* stop the tracing. */
384 /* check the trace buffer */
385 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
390 /* we should only have one item */
391 if (!ret
&& count
!= 1) {
393 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
398 /* Test the ops with global tracing running */
399 ret
= trace_selftest_ops(tr
, 1);
403 ftrace_enabled
= save_ftrace_enabled
;
405 /* Enable tracing on all functions again */
406 ftrace_set_global_filter(NULL
, 0, 1);
408 /* Test the ops with global tracing off */
410 ret
= trace_selftest_ops(tr
, 2);
415 static int trace_selftest_recursion_cnt
;
416 static void trace_selftest_test_recursion_func(unsigned long ip
,
418 struct ftrace_ops
*op
,
419 struct pt_regs
*pt_regs
)
422 * This function is registered without the recursion safe flag.
423 * The ftrace infrastructure should provide the recursion
424 * protection. If not, this will crash the kernel!
426 if (trace_selftest_recursion_cnt
++ > 10)
428 DYN_FTRACE_TEST_NAME();
431 static void trace_selftest_test_recursion_safe_func(unsigned long ip
,
433 struct ftrace_ops
*op
,
434 struct pt_regs
*pt_regs
)
437 * We said we would provide our own recursion. By calling
438 * this function again, we should recurse back into this function
439 * and count again. But this only happens if the arch supports
440 * all of ftrace features and nothing else is using the function
443 if (trace_selftest_recursion_cnt
++)
445 DYN_FTRACE_TEST_NAME();
448 static struct ftrace_ops test_rec_probe
= {
449 .func
= trace_selftest_test_recursion_func
,
452 static struct ftrace_ops test_recsafe_probe
= {
453 .func
= trace_selftest_test_recursion_safe_func
,
454 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
,
458 trace_selftest_function_recursion(void)
460 int save_ftrace_enabled
= ftrace_enabled
;
465 /* The previous test PASSED */
467 pr_info("Testing ftrace recursion: ");
470 /* enable tracing, and record the filter function */
473 /* Handle PPC64 '.' name */
474 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
475 len
= strlen(func_name
);
477 ret
= ftrace_set_filter(&test_rec_probe
, func_name
, len
, 1);
479 pr_cont("*Could not set filter* ");
483 ret
= register_ftrace_function(&test_rec_probe
);
485 pr_cont("*could not register callback* ");
489 DYN_FTRACE_TEST_NAME();
491 unregister_ftrace_function(&test_rec_probe
);
494 if (trace_selftest_recursion_cnt
!= 1) {
495 pr_cont("*callback not called once (%d)* ",
496 trace_selftest_recursion_cnt
);
500 trace_selftest_recursion_cnt
= 1;
503 pr_info("Testing ftrace recursion safe: ");
505 ret
= ftrace_set_filter(&test_recsafe_probe
, func_name
, len
, 1);
507 pr_cont("*Could not set filter* ");
511 ret
= register_ftrace_function(&test_recsafe_probe
);
513 pr_cont("*could not register callback* ");
517 DYN_FTRACE_TEST_NAME();
519 unregister_ftrace_function(&test_recsafe_probe
);
522 if (trace_selftest_recursion_cnt
!= 2) {
523 pr_cont("*callback not called expected 2 times (%d)* ",
524 trace_selftest_recursion_cnt
);
530 ftrace_enabled
= save_ftrace_enabled
;
535 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
536 # define trace_selftest_function_recursion() ({ 0; })
537 #endif /* CONFIG_DYNAMIC_FTRACE */
540 TRACE_SELFTEST_REGS_START
,
541 TRACE_SELFTEST_REGS_FOUND
,
542 TRACE_SELFTEST_REGS_NOT_FOUND
,
543 } trace_selftest_regs_stat
;
545 static void trace_selftest_test_regs_func(unsigned long ip
,
547 struct ftrace_ops
*op
,
548 struct pt_regs
*pt_regs
)
551 trace_selftest_regs_stat
= TRACE_SELFTEST_REGS_FOUND
;
553 trace_selftest_regs_stat
= TRACE_SELFTEST_REGS_NOT_FOUND
;
556 static struct ftrace_ops test_regs_probe
= {
557 .func
= trace_selftest_test_regs_func
,
558 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_SAVE_REGS
,
562 trace_selftest_function_regs(void)
564 int save_ftrace_enabled
= ftrace_enabled
;
570 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
574 /* The previous test PASSED */
576 pr_info("Testing ftrace regs%s: ",
577 !supported
? "(no arch support)" : "");
579 /* enable tracing, and record the filter function */
582 /* Handle PPC64 '.' name */
583 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
584 len
= strlen(func_name
);
586 ret
= ftrace_set_filter(&test_regs_probe
, func_name
, len
, 1);
588 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
589 * This test really doesn't care.
591 if (ret
&& ret
!= -ENODEV
) {
592 pr_cont("*Could not set filter* ");
596 ret
= register_ftrace_function(&test_regs_probe
);
598 * Now if the arch does not support passing regs, then this should
603 pr_cont("*registered save-regs without arch support* ");
606 test_regs_probe
.flags
|= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
;
607 ret
= register_ftrace_function(&test_regs_probe
);
610 pr_cont("*could not register callback* ");
615 DYN_FTRACE_TEST_NAME();
617 unregister_ftrace_function(&test_regs_probe
);
621 switch (trace_selftest_regs_stat
) {
622 case TRACE_SELFTEST_REGS_START
:
623 pr_cont("*callback never called* ");
626 case TRACE_SELFTEST_REGS_FOUND
:
629 pr_cont("*callback received regs without arch support* ");
632 case TRACE_SELFTEST_REGS_NOT_FOUND
:
635 pr_cont("*callback received NULL regs* ");
641 ftrace_enabled
= save_ftrace_enabled
;
647 * Simple verification test of ftrace function tracer.
648 * Enable ftrace, sleep 1/10 second, and then read the trace
649 * buffer to see if all is in order.
652 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
654 int save_ftrace_enabled
= ftrace_enabled
;
658 #ifdef CONFIG_DYNAMIC_FTRACE
659 if (ftrace_filter_param
) {
660 printk(KERN_CONT
" ... kernel command line filter set: force PASS ... ");
665 /* make sure msleep has been recorded */
668 /* start the tracing */
671 ret
= tracer_init(trace
, tr
);
673 warn_failed_init_tracer(trace
, ret
);
677 /* Sleep for a 1/10 of a second */
679 /* stop the tracing. */
683 /* check the trace buffer */
684 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
690 if (!ret
&& !count
) {
691 printk(KERN_CONT
".. no entries found ..");
696 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
697 DYN_FTRACE_TEST_NAME
);
701 ret
= trace_selftest_function_recursion();
705 ret
= trace_selftest_function_regs();
707 ftrace_enabled
= save_ftrace_enabled
;
709 /* kill ftrace totally if we failed */
715 #endif /* CONFIG_FUNCTION_TRACER */
718 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
720 /* Maximum number of functions to trace before diagnosing a hang */
721 #define GRAPH_MAX_FUNC_TEST 100000000
723 static unsigned int graph_hang_thresh
;
725 /* Wrap the real function entry probe to avoid possible hanging */
726 static int trace_graph_entry_watchdog(struct ftrace_graph_ent
*trace
)
728 /* This is harmlessly racy, we want to approximately detect a hang */
729 if (unlikely(++graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
)) {
731 printk(KERN_WARNING
"BUG: Function graph tracer hang!\n");
732 if (ftrace_dump_on_oops
) {
733 ftrace_dump(DUMP_ALL
);
734 /* ftrace_dump() disables tracing */
740 return trace_graph_entry(trace
);
744 * Pretty much the same than for the function tracer from which the selftest
748 trace_selftest_startup_function_graph(struct tracer
*trace
,
749 struct trace_array
*tr
)
754 #ifdef CONFIG_DYNAMIC_FTRACE
755 if (ftrace_filter_param
) {
756 printk(KERN_CONT
" ... kernel command line filter set: force PASS ... ");
762 * Simulate the init() callback but we attach a watchdog callback
763 * to detect and recover from possible hangs
765 tracing_reset_online_cpus(&tr
->trace_buffer
);
767 ret
= register_ftrace_graph(&trace_graph_return
,
768 &trace_graph_entry_watchdog
);
770 warn_failed_init_tracer(trace
, ret
);
773 tracing_start_cmdline_record();
775 /* Sleep for a 1/10 of a second */
778 /* Have we just recovered from a hang? */
779 if (graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
) {
780 tracing_selftest_disabled
= true;
787 /* check the trace buffer */
788 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
793 if (!ret
&& !count
) {
794 printk(KERN_CONT
".. no entries found ..");
799 /* Don't test dynamic tracing, the function tracer already did */
802 /* Stop it if we failed */
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
811 #ifdef CONFIG_IRQSOFF_TRACER
813 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
815 unsigned long save_max
= tr
->max_latency
;
819 /* start the tracing */
820 ret
= tracer_init(trace
, tr
);
822 warn_failed_init_tracer(trace
, ret
);
826 /* reset the max latency */
828 /* disable interrupts for a bit */
834 * Stop the tracer to avoid a warning subsequent
835 * to buffer flipping failure because tracing_stop()
836 * disables the tr and max buffers, making flipping impossible
837 * in case of parallels max irqs off latencies.
840 /* stop the tracing. */
842 /* check both trace buffers */
843 ret
= trace_test_buffer(&tr
->trace_buffer
, NULL
);
845 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
849 if (!ret
&& !count
) {
850 printk(KERN_CONT
".. no entries found ..");
854 tr
->max_latency
= save_max
;
858 #endif /* CONFIG_IRQSOFF_TRACER */
860 #ifdef CONFIG_PREEMPT_TRACER
862 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
864 unsigned long save_max
= tr
->max_latency
;
869 * Now that the big kernel lock is no longer preemptable,
870 * and this is called with the BKL held, it will always
871 * fail. If preemption is already disabled, simply
872 * pass the test. When the BKL is removed, or becomes
873 * preemptible again, we will once again test this,
876 if (preempt_count()) {
877 printk(KERN_CONT
"can not test ... force ");
881 /* start the tracing */
882 ret
= tracer_init(trace
, tr
);
884 warn_failed_init_tracer(trace
, ret
);
888 /* reset the max latency */
890 /* disable preemption for a bit */
896 * Stop the tracer to avoid a warning subsequent
897 * to buffer flipping failure because tracing_stop()
898 * disables the tr and max buffers, making flipping impossible
899 * in case of parallels max preempt off latencies.
902 /* stop the tracing. */
904 /* check both trace buffers */
905 ret
= trace_test_buffer(&tr
->trace_buffer
, NULL
);
907 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
911 if (!ret
&& !count
) {
912 printk(KERN_CONT
".. no entries found ..");
916 tr
->max_latency
= save_max
;
920 #endif /* CONFIG_PREEMPT_TRACER */
922 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
924 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
926 unsigned long save_max
= tr
->max_latency
;
931 * Now that the big kernel lock is no longer preemptable,
932 * and this is called with the BKL held, it will always
933 * fail. If preemption is already disabled, simply
934 * pass the test. When the BKL is removed, or becomes
935 * preemptible again, we will once again test this,
938 if (preempt_count()) {
939 printk(KERN_CONT
"can not test ... force ");
943 /* start the tracing */
944 ret
= tracer_init(trace
, tr
);
946 warn_failed_init_tracer(trace
, ret
);
950 /* reset the max latency */
953 /* disable preemption and interrupts for a bit */
958 /* reverse the order of preempt vs irqs */
962 * Stop the tracer to avoid a warning subsequent
963 * to buffer flipping failure because tracing_stop()
964 * disables the tr and max buffers, making flipping impossible
965 * in case of parallels max irqs/preempt off latencies.
968 /* stop the tracing. */
970 /* check both trace buffers */
971 ret
= trace_test_buffer(&tr
->trace_buffer
, NULL
);
975 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
979 if (!ret
&& !count
) {
980 printk(KERN_CONT
".. no entries found ..");
985 /* do the test by disabling interrupts first this time */
994 /* reverse the order of preempt vs irqs */
998 /* stop the tracing. */
1000 /* check both trace buffers */
1001 ret
= trace_test_buffer(&tr
->trace_buffer
, NULL
);
1005 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1007 if (!ret
&& !count
) {
1008 printk(KERN_CONT
".. no entries found ..");
1017 tr
->max_latency
= save_max
;
1021 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1023 #ifdef CONFIG_NOP_TRACER
1025 trace_selftest_startup_nop(struct tracer
*trace
, struct trace_array
*tr
)
1027 /* What could possibly go wrong? */
1032 #ifdef CONFIG_SCHED_TRACER
1034 struct wakeup_test_data
{
1035 struct completion is_ready
;
1039 static int trace_wakeup_test_thread(void *data
)
1041 /* Make this a -deadline thread */
1042 static const struct sched_attr attr
= {
1043 .sched_policy
= SCHED_DEADLINE
,
1044 .sched_runtime
= 100000ULL,
1045 .sched_deadline
= 10000000ULL,
1046 .sched_period
= 10000000ULL
1048 struct wakeup_test_data
*x
= data
;
1050 sched_setattr(current
, &attr
);
1052 /* Make it know we have a new prio */
1053 complete(&x
->is_ready
);
1055 /* now go to sleep and let the test wake us up */
1056 set_current_state(TASK_INTERRUPTIBLE
);
1059 set_current_state(TASK_INTERRUPTIBLE
);
1062 complete(&x
->is_ready
);
1064 set_current_state(TASK_INTERRUPTIBLE
);
1066 /* we are awake, now wait to disappear */
1067 while (!kthread_should_stop()) {
1069 set_current_state(TASK_INTERRUPTIBLE
);
1072 __set_current_state(TASK_RUNNING
);
1077 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
1079 unsigned long save_max
= tr
->max_latency
;
1080 struct task_struct
*p
;
1081 struct wakeup_test_data data
;
1082 unsigned long count
;
1085 memset(&data
, 0, sizeof(data
));
1087 init_completion(&data
.is_ready
);
1089 /* create a -deadline thread */
1090 p
= kthread_run(trace_wakeup_test_thread
, &data
, "ftrace-test");
1092 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
1096 /* make sure the thread is running at -deadline policy */
1097 wait_for_completion(&data
.is_ready
);
1099 /* start the tracing */
1100 ret
= tracer_init(trace
, tr
);
1102 warn_failed_init_tracer(trace
, ret
);
1106 /* reset the max latency */
1107 tr
->max_latency
= 0;
1111 * Sleep to make sure the -deadline thread is asleep too.
1112 * On virtual machines we can't rely on timings,
1113 * but we want to make sure this test still works.
1118 init_completion(&data
.is_ready
);
1121 /* memory barrier is in the wake_up_process() */
1125 /* Wait for the task to wake up */
1126 wait_for_completion(&data
.is_ready
);
1128 /* stop the tracing. */
1130 /* check both trace buffers */
1131 ret
= trace_test_buffer(&tr
->trace_buffer
, NULL
);
1133 ret
= trace_test_buffer(&tr
->max_buffer
, &count
);
1139 tr
->max_latency
= save_max
;
1141 /* kill the thread */
1144 if (!ret
&& !count
) {
1145 printk(KERN_CONT
".. no entries found ..");
1151 #endif /* CONFIG_SCHED_TRACER */
1153 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1155 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
1157 unsigned long count
;
1160 /* start the tracing */
1161 ret
= tracer_init(trace
, tr
);
1163 warn_failed_init_tracer(trace
, ret
);
1167 /* Sleep for a 1/10 of a second */
1169 /* stop the tracing. */
1171 /* check the trace buffer */
1172 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
1176 if (!ret
&& !count
) {
1177 printk(KERN_CONT
".. no entries found ..");
1183 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1185 #ifdef CONFIG_BRANCH_TRACER
1187 trace_selftest_startup_branch(struct tracer
*trace
, struct trace_array
*tr
)
1189 unsigned long count
;
1192 /* start the tracing */
1193 ret
= tracer_init(trace
, tr
);
1195 warn_failed_init_tracer(trace
, ret
);
1199 /* Sleep for a 1/10 of a second */
1201 /* stop the tracing. */
1203 /* check the trace buffer */
1204 ret
= trace_test_buffer(&tr
->trace_buffer
, &count
);
1208 if (!ret
&& !count
) {
1209 printk(KERN_CONT
".. no entries found ..");
1215 #endif /* CONFIG_BRANCH_TRACER */