Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / kernel / trace / trace_selftest.c
blobcb917cebae291bfbf00a86ea67e69d151b626e27
1 /* Include in trace.c */
3 #include <uapi/linux/sched/types.h>
4 #include <linux/stringify.h>
5 #include <linux/kthread.h>
6 #include <linux/delay.h>
7 #include <linux/slab.h>
9 static inline int trace_valid_entry(struct trace_entry *entry)
11 switch (entry->type) {
12 case TRACE_FN:
13 case TRACE_CTX:
14 case TRACE_WAKE:
15 case TRACE_STACK:
16 case TRACE_PRINT:
17 case TRACE_BRANCH:
18 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET:
20 return 1;
22 return 0;
25 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
27 struct ring_buffer_event *event;
28 struct trace_entry *entry;
29 unsigned int loops = 0;
31 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
32 entry = ring_buffer_event_data(event);
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
43 if (!trace_valid_entry(entry)) {
44 printk(KERN_CONT ".. invalid entry %d ",
45 entry->type);
46 goto failed;
49 return 0;
51 failed:
52 /* disable tracing */
53 tracing_disabled = 1;
54 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
59 * Test the trace buffer to see if all the elements
60 * are still sane.
62 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
64 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
67 /* Don't allow flipping of max traces now */
68 local_irq_save(flags);
69 arch_spin_lock(&buf->tr->max_lock);
71 cnt = ring_buffer_entries(buf->buffer);
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
80 tracing_off();
81 for_each_possible_cpu(cpu) {
82 ret = trace_test_buffer_cpu(buf, cpu);
83 if (ret)
84 break;
86 tracing_on();
87 arch_spin_unlock(&buf->tr->max_lock);
88 local_irq_restore(flags);
90 if (count)
91 *count = cnt;
93 return ret;
96 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
101 #ifdef CONFIG_FUNCTION_TRACER
103 #ifdef CONFIG_DYNAMIC_FTRACE
105 static int trace_selftest_test_probe1_cnt;
106 static void trace_selftest_test_probe1_func(unsigned long ip,
107 unsigned long pip,
108 struct ftrace_ops *op,
109 struct pt_regs *pt_regs)
111 trace_selftest_test_probe1_cnt++;
114 static int trace_selftest_test_probe2_cnt;
115 static void trace_selftest_test_probe2_func(unsigned long ip,
116 unsigned long pip,
117 struct ftrace_ops *op,
118 struct pt_regs *pt_regs)
120 trace_selftest_test_probe2_cnt++;
123 static int trace_selftest_test_probe3_cnt;
124 static void trace_selftest_test_probe3_func(unsigned long ip,
125 unsigned long pip,
126 struct ftrace_ops *op,
127 struct pt_regs *pt_regs)
129 trace_selftest_test_probe3_cnt++;
132 static int trace_selftest_test_global_cnt;
133 static void trace_selftest_test_global_func(unsigned long ip,
134 unsigned long pip,
135 struct ftrace_ops *op,
136 struct pt_regs *pt_regs)
138 trace_selftest_test_global_cnt++;
141 static int trace_selftest_test_dyn_cnt;
142 static void trace_selftest_test_dyn_func(unsigned long ip,
143 unsigned long pip,
144 struct ftrace_ops *op,
145 struct pt_regs *pt_regs)
147 trace_selftest_test_dyn_cnt++;
150 static struct ftrace_ops test_probe1 = {
151 .func = trace_selftest_test_probe1_func,
152 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
155 static struct ftrace_ops test_probe2 = {
156 .func = trace_selftest_test_probe2_func,
157 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
160 static struct ftrace_ops test_probe3 = {
161 .func = trace_selftest_test_probe3_func,
162 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
165 static void print_counts(void)
167 printk("(%d %d %d %d %d) ",
168 trace_selftest_test_probe1_cnt,
169 trace_selftest_test_probe2_cnt,
170 trace_selftest_test_probe3_cnt,
171 trace_selftest_test_global_cnt,
172 trace_selftest_test_dyn_cnt);
175 static void reset_counts(void)
177 trace_selftest_test_probe1_cnt = 0;
178 trace_selftest_test_probe2_cnt = 0;
179 trace_selftest_test_probe3_cnt = 0;
180 trace_selftest_test_global_cnt = 0;
181 trace_selftest_test_dyn_cnt = 0;
184 static int trace_selftest_ops(struct trace_array *tr, int cnt)
186 int save_ftrace_enabled = ftrace_enabled;
187 struct ftrace_ops *dyn_ops;
188 char *func1_name;
189 char *func2_name;
190 int len1;
191 int len2;
192 int ret = -1;
194 printk(KERN_CONT "PASSED\n");
195 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
197 ftrace_enabled = 1;
198 reset_counts();
200 /* Handle PPC64 '.' name */
201 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
202 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
203 len1 = strlen(func1_name);
204 len2 = strlen(func2_name);
207 * Probe 1 will trace function 1.
208 * Probe 2 will trace function 2.
209 * Probe 3 will trace functions 1 and 2.
211 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
212 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
213 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
214 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
216 register_ftrace_function(&test_probe1);
217 register_ftrace_function(&test_probe2);
218 register_ftrace_function(&test_probe3);
219 /* First time we are running with main function */
220 if (cnt > 1) {
221 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
222 register_ftrace_function(tr->ops);
225 DYN_FTRACE_TEST_NAME();
227 print_counts();
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (cnt > 1) {
236 if (trace_selftest_test_global_cnt == 0)
237 goto out;
240 DYN_FTRACE_TEST_NAME2();
242 print_counts();
244 if (trace_selftest_test_probe1_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe2_cnt != 1)
247 goto out;
248 if (trace_selftest_test_probe3_cnt != 2)
249 goto out;
251 /* Add a dynamic probe */
252 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
253 if (!dyn_ops) {
254 printk("MEMORY ERROR ");
255 goto out;
258 dyn_ops->func = trace_selftest_test_dyn_func;
260 register_ftrace_function(dyn_ops);
262 trace_selftest_test_global_cnt = 0;
264 DYN_FTRACE_TEST_NAME();
266 print_counts();
268 if (trace_selftest_test_probe1_cnt != 2)
269 goto out_free;
270 if (trace_selftest_test_probe2_cnt != 1)
271 goto out_free;
272 if (trace_selftest_test_probe3_cnt != 3)
273 goto out_free;
274 if (cnt > 1) {
275 if (trace_selftest_test_global_cnt == 0)
276 goto out;
278 if (trace_selftest_test_dyn_cnt == 0)
279 goto out_free;
281 DYN_FTRACE_TEST_NAME2();
283 print_counts();
285 if (trace_selftest_test_probe1_cnt != 2)
286 goto out_free;
287 if (trace_selftest_test_probe2_cnt != 2)
288 goto out_free;
289 if (trace_selftest_test_probe3_cnt != 4)
290 goto out_free;
292 ret = 0;
293 out_free:
294 unregister_ftrace_function(dyn_ops);
295 kfree(dyn_ops);
297 out:
298 /* Purposely unregister in the same order */
299 unregister_ftrace_function(&test_probe1);
300 unregister_ftrace_function(&test_probe2);
301 unregister_ftrace_function(&test_probe3);
302 if (cnt > 1)
303 unregister_ftrace_function(tr->ops);
304 ftrace_reset_array_ops(tr);
306 /* Make sure everything is off */
307 reset_counts();
308 DYN_FTRACE_TEST_NAME();
309 DYN_FTRACE_TEST_NAME();
311 if (trace_selftest_test_probe1_cnt ||
312 trace_selftest_test_probe2_cnt ||
313 trace_selftest_test_probe3_cnt ||
314 trace_selftest_test_global_cnt ||
315 trace_selftest_test_dyn_cnt)
316 ret = -1;
318 ftrace_enabled = save_ftrace_enabled;
320 return ret;
323 /* Test dynamic code modification and ftrace filters */
324 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
325 struct trace_array *tr,
326 int (*func)(void))
328 int save_ftrace_enabled = ftrace_enabled;
329 unsigned long count;
330 char *func_name;
331 int ret;
333 /* The ftrace test PASSED */
334 printk(KERN_CONT "PASSED\n");
335 pr_info("Testing dynamic ftrace: ");
337 /* enable tracing, and record the filter function */
338 ftrace_enabled = 1;
340 /* passed in by parameter to fool gcc from optimizing */
341 func();
344 * Some archs *cough*PowerPC*cough* add characters to the
345 * start of the function names. We simply put a '*' to
346 * accommodate them.
348 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
350 /* filter only on our function */
351 ftrace_set_global_filter(func_name, strlen(func_name), 1);
353 /* enable tracing */
354 ret = tracer_init(trace, tr);
355 if (ret) {
356 warn_failed_init_tracer(trace, ret);
357 goto out;
360 /* Sleep for a 1/10 of a second */
361 msleep(100);
363 /* we should have nothing in the buffer */
364 ret = trace_test_buffer(&tr->trace_buffer, &count);
365 if (ret)
366 goto out;
368 if (count) {
369 ret = -1;
370 printk(KERN_CONT ".. filter did not filter .. ");
371 goto out;
374 /* call our function again */
375 func();
377 /* sleep again */
378 msleep(100);
380 /* stop the tracing. */
381 tracing_stop();
382 ftrace_enabled = 0;
384 /* check the trace buffer */
385 ret = trace_test_buffer(&tr->trace_buffer, &count);
387 ftrace_enabled = 1;
388 tracing_start();
390 /* we should only have one item */
391 if (!ret && count != 1) {
392 trace->reset(tr);
393 printk(KERN_CONT ".. filter failed count=%ld ..", count);
394 ret = -1;
395 goto out;
398 /* Test the ops with global tracing running */
399 ret = trace_selftest_ops(tr, 1);
400 trace->reset(tr);
402 out:
403 ftrace_enabled = save_ftrace_enabled;
405 /* Enable tracing on all functions again */
406 ftrace_set_global_filter(NULL, 0, 1);
408 /* Test the ops with global tracing off */
409 if (!ret)
410 ret = trace_selftest_ops(tr, 2);
412 return ret;
415 static int trace_selftest_recursion_cnt;
416 static void trace_selftest_test_recursion_func(unsigned long ip,
417 unsigned long pip,
418 struct ftrace_ops *op,
419 struct pt_regs *pt_regs)
422 * This function is registered without the recursion safe flag.
423 * The ftrace infrastructure should provide the recursion
424 * protection. If not, this will crash the kernel!
426 if (trace_selftest_recursion_cnt++ > 10)
427 return;
428 DYN_FTRACE_TEST_NAME();
431 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
432 unsigned long pip,
433 struct ftrace_ops *op,
434 struct pt_regs *pt_regs)
437 * We said we would provide our own recursion. By calling
438 * this function again, we should recurse back into this function
439 * and count again. But this only happens if the arch supports
440 * all of ftrace features and nothing else is using the function
441 * tracing utility.
443 if (trace_selftest_recursion_cnt++)
444 return;
445 DYN_FTRACE_TEST_NAME();
448 static struct ftrace_ops test_rec_probe = {
449 .func = trace_selftest_test_recursion_func,
452 static struct ftrace_ops test_recsafe_probe = {
453 .func = trace_selftest_test_recursion_safe_func,
454 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
457 static int
458 trace_selftest_function_recursion(void)
460 int save_ftrace_enabled = ftrace_enabled;
461 char *func_name;
462 int len;
463 int ret;
465 /* The previous test PASSED */
466 pr_cont("PASSED\n");
467 pr_info("Testing ftrace recursion: ");
470 /* enable tracing, and record the filter function */
471 ftrace_enabled = 1;
473 /* Handle PPC64 '.' name */
474 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
475 len = strlen(func_name);
477 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
478 if (ret) {
479 pr_cont("*Could not set filter* ");
480 goto out;
483 ret = register_ftrace_function(&test_rec_probe);
484 if (ret) {
485 pr_cont("*could not register callback* ");
486 goto out;
489 DYN_FTRACE_TEST_NAME();
491 unregister_ftrace_function(&test_rec_probe);
493 ret = -1;
494 if (trace_selftest_recursion_cnt != 1) {
495 pr_cont("*callback not called once (%d)* ",
496 trace_selftest_recursion_cnt);
497 goto out;
500 trace_selftest_recursion_cnt = 1;
502 pr_cont("PASSED\n");
503 pr_info("Testing ftrace recursion safe: ");
505 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
506 if (ret) {
507 pr_cont("*Could not set filter* ");
508 goto out;
511 ret = register_ftrace_function(&test_recsafe_probe);
512 if (ret) {
513 pr_cont("*could not register callback* ");
514 goto out;
517 DYN_FTRACE_TEST_NAME();
519 unregister_ftrace_function(&test_recsafe_probe);
521 ret = -1;
522 if (trace_selftest_recursion_cnt != 2) {
523 pr_cont("*callback not called expected 2 times (%d)* ",
524 trace_selftest_recursion_cnt);
525 goto out;
528 ret = 0;
529 out:
530 ftrace_enabled = save_ftrace_enabled;
532 return ret;
534 #else
535 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
536 # define trace_selftest_function_recursion() ({ 0; })
537 #endif /* CONFIG_DYNAMIC_FTRACE */
539 static enum {
540 TRACE_SELFTEST_REGS_START,
541 TRACE_SELFTEST_REGS_FOUND,
542 TRACE_SELFTEST_REGS_NOT_FOUND,
543 } trace_selftest_regs_stat;
545 static void trace_selftest_test_regs_func(unsigned long ip,
546 unsigned long pip,
547 struct ftrace_ops *op,
548 struct pt_regs *pt_regs)
550 if (pt_regs)
551 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
552 else
553 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
556 static struct ftrace_ops test_regs_probe = {
557 .func = trace_selftest_test_regs_func,
558 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
561 static int
562 trace_selftest_function_regs(void)
564 int save_ftrace_enabled = ftrace_enabled;
565 char *func_name;
566 int len;
567 int ret;
568 int supported = 0;
570 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
571 supported = 1;
572 #endif
574 /* The previous test PASSED */
575 pr_cont("PASSED\n");
576 pr_info("Testing ftrace regs%s: ",
577 !supported ? "(no arch support)" : "");
579 /* enable tracing, and record the filter function */
580 ftrace_enabled = 1;
582 /* Handle PPC64 '.' name */
583 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
584 len = strlen(func_name);
586 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
588 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
589 * This test really doesn't care.
591 if (ret && ret != -ENODEV) {
592 pr_cont("*Could not set filter* ");
593 goto out;
596 ret = register_ftrace_function(&test_regs_probe);
598 * Now if the arch does not support passing regs, then this should
599 * have failed.
601 if (!supported) {
602 if (!ret) {
603 pr_cont("*registered save-regs without arch support* ");
604 goto out;
606 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
607 ret = register_ftrace_function(&test_regs_probe);
609 if (ret) {
610 pr_cont("*could not register callback* ");
611 goto out;
615 DYN_FTRACE_TEST_NAME();
617 unregister_ftrace_function(&test_regs_probe);
619 ret = -1;
621 switch (trace_selftest_regs_stat) {
622 case TRACE_SELFTEST_REGS_START:
623 pr_cont("*callback never called* ");
624 goto out;
626 case TRACE_SELFTEST_REGS_FOUND:
627 if (supported)
628 break;
629 pr_cont("*callback received regs without arch support* ");
630 goto out;
632 case TRACE_SELFTEST_REGS_NOT_FOUND:
633 if (!supported)
634 break;
635 pr_cont("*callback received NULL regs* ");
636 goto out;
639 ret = 0;
640 out:
641 ftrace_enabled = save_ftrace_enabled;
643 return ret;
647 * Simple verification test of ftrace function tracer.
648 * Enable ftrace, sleep 1/10 second, and then read the trace
649 * buffer to see if all is in order.
651 __init int
652 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
654 int save_ftrace_enabled = ftrace_enabled;
655 unsigned long count;
656 int ret;
658 #ifdef CONFIG_DYNAMIC_FTRACE
659 if (ftrace_filter_param) {
660 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
661 return 0;
663 #endif
665 /* make sure msleep has been recorded */
666 msleep(1);
668 /* start the tracing */
669 ftrace_enabled = 1;
671 ret = tracer_init(trace, tr);
672 if (ret) {
673 warn_failed_init_tracer(trace, ret);
674 goto out;
677 /* Sleep for a 1/10 of a second */
678 msleep(100);
679 /* stop the tracing. */
680 tracing_stop();
681 ftrace_enabled = 0;
683 /* check the trace buffer */
684 ret = trace_test_buffer(&tr->trace_buffer, &count);
686 ftrace_enabled = 1;
687 trace->reset(tr);
688 tracing_start();
690 if (!ret && !count) {
691 printk(KERN_CONT ".. no entries found ..");
692 ret = -1;
693 goto out;
696 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
697 DYN_FTRACE_TEST_NAME);
698 if (ret)
699 goto out;
701 ret = trace_selftest_function_recursion();
702 if (ret)
703 goto out;
705 ret = trace_selftest_function_regs();
706 out:
707 ftrace_enabled = save_ftrace_enabled;
709 /* kill ftrace totally if we failed */
710 if (ret)
711 ftrace_kill();
713 return ret;
715 #endif /* CONFIG_FUNCTION_TRACER */
718 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
720 /* Maximum number of functions to trace before diagnosing a hang */
721 #define GRAPH_MAX_FUNC_TEST 100000000
723 static unsigned int graph_hang_thresh;
725 /* Wrap the real function entry probe to avoid possible hanging */
726 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
728 /* This is harmlessly racy, we want to approximately detect a hang */
729 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
730 ftrace_graph_stop();
731 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
732 if (ftrace_dump_on_oops) {
733 ftrace_dump(DUMP_ALL);
734 /* ftrace_dump() disables tracing */
735 tracing_on();
737 return 0;
740 return trace_graph_entry(trace);
744 * Pretty much the same than for the function tracer from which the selftest
745 * has been borrowed.
747 __init int
748 trace_selftest_startup_function_graph(struct tracer *trace,
749 struct trace_array *tr)
751 int ret;
752 unsigned long count;
754 #ifdef CONFIG_DYNAMIC_FTRACE
755 if (ftrace_filter_param) {
756 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
757 return 0;
759 #endif
762 * Simulate the init() callback but we attach a watchdog callback
763 * to detect and recover from possible hangs
765 tracing_reset_online_cpus(&tr->trace_buffer);
766 set_graph_array(tr);
767 ret = register_ftrace_graph(&trace_graph_return,
768 &trace_graph_entry_watchdog);
769 if (ret) {
770 warn_failed_init_tracer(trace, ret);
771 goto out;
773 tracing_start_cmdline_record();
775 /* Sleep for a 1/10 of a second */
776 msleep(100);
778 /* Have we just recovered from a hang? */
779 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
780 tracing_selftest_disabled = true;
781 ret = -1;
782 goto out;
785 tracing_stop();
787 /* check the trace buffer */
788 ret = trace_test_buffer(&tr->trace_buffer, &count);
790 trace->reset(tr);
791 tracing_start();
793 if (!ret && !count) {
794 printk(KERN_CONT ".. no entries found ..");
795 ret = -1;
796 goto out;
799 /* Don't test dynamic tracing, the function tracer already did */
801 out:
802 /* Stop it if we failed */
803 if (ret)
804 ftrace_graph_stop();
806 return ret;
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
811 #ifdef CONFIG_IRQSOFF_TRACER
813 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
815 unsigned long save_max = tr->max_latency;
816 unsigned long count;
817 int ret;
819 /* start the tracing */
820 ret = tracer_init(trace, tr);
821 if (ret) {
822 warn_failed_init_tracer(trace, ret);
823 return ret;
826 /* reset the max latency */
827 tr->max_latency = 0;
828 /* disable interrupts for a bit */
829 local_irq_disable();
830 udelay(100);
831 local_irq_enable();
834 * Stop the tracer to avoid a warning subsequent
835 * to buffer flipping failure because tracing_stop()
836 * disables the tr and max buffers, making flipping impossible
837 * in case of parallels max irqs off latencies.
839 trace->stop(tr);
840 /* stop the tracing. */
841 tracing_stop();
842 /* check both trace buffers */
843 ret = trace_test_buffer(&tr->trace_buffer, NULL);
844 if (!ret)
845 ret = trace_test_buffer(&tr->max_buffer, &count);
846 trace->reset(tr);
847 tracing_start();
849 if (!ret && !count) {
850 printk(KERN_CONT ".. no entries found ..");
851 ret = -1;
854 tr->max_latency = save_max;
856 return ret;
858 #endif /* CONFIG_IRQSOFF_TRACER */
860 #ifdef CONFIG_PREEMPT_TRACER
862 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
864 unsigned long save_max = tr->max_latency;
865 unsigned long count;
866 int ret;
869 * Now that the big kernel lock is no longer preemptable,
870 * and this is called with the BKL held, it will always
871 * fail. If preemption is already disabled, simply
872 * pass the test. When the BKL is removed, or becomes
873 * preemptible again, we will once again test this,
874 * so keep it in.
876 if (preempt_count()) {
877 printk(KERN_CONT "can not test ... force ");
878 return 0;
881 /* start the tracing */
882 ret = tracer_init(trace, tr);
883 if (ret) {
884 warn_failed_init_tracer(trace, ret);
885 return ret;
888 /* reset the max latency */
889 tr->max_latency = 0;
890 /* disable preemption for a bit */
891 preempt_disable();
892 udelay(100);
893 preempt_enable();
896 * Stop the tracer to avoid a warning subsequent
897 * to buffer flipping failure because tracing_stop()
898 * disables the tr and max buffers, making flipping impossible
899 * in case of parallels max preempt off latencies.
901 trace->stop(tr);
902 /* stop the tracing. */
903 tracing_stop();
904 /* check both trace buffers */
905 ret = trace_test_buffer(&tr->trace_buffer, NULL);
906 if (!ret)
907 ret = trace_test_buffer(&tr->max_buffer, &count);
908 trace->reset(tr);
909 tracing_start();
911 if (!ret && !count) {
912 printk(KERN_CONT ".. no entries found ..");
913 ret = -1;
916 tr->max_latency = save_max;
918 return ret;
920 #endif /* CONFIG_PREEMPT_TRACER */
922 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
924 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
926 unsigned long save_max = tr->max_latency;
927 unsigned long count;
928 int ret;
931 * Now that the big kernel lock is no longer preemptable,
932 * and this is called with the BKL held, it will always
933 * fail. If preemption is already disabled, simply
934 * pass the test. When the BKL is removed, or becomes
935 * preemptible again, we will once again test this,
936 * so keep it in.
938 if (preempt_count()) {
939 printk(KERN_CONT "can not test ... force ");
940 return 0;
943 /* start the tracing */
944 ret = tracer_init(trace, tr);
945 if (ret) {
946 warn_failed_init_tracer(trace, ret);
947 goto out_no_start;
950 /* reset the max latency */
951 tr->max_latency = 0;
953 /* disable preemption and interrupts for a bit */
954 preempt_disable();
955 local_irq_disable();
956 udelay(100);
957 preempt_enable();
958 /* reverse the order of preempt vs irqs */
959 local_irq_enable();
962 * Stop the tracer to avoid a warning subsequent
963 * to buffer flipping failure because tracing_stop()
964 * disables the tr and max buffers, making flipping impossible
965 * in case of parallels max irqs/preempt off latencies.
967 trace->stop(tr);
968 /* stop the tracing. */
969 tracing_stop();
970 /* check both trace buffers */
971 ret = trace_test_buffer(&tr->trace_buffer, NULL);
972 if (ret)
973 goto out;
975 ret = trace_test_buffer(&tr->max_buffer, &count);
976 if (ret)
977 goto out;
979 if (!ret && !count) {
980 printk(KERN_CONT ".. no entries found ..");
981 ret = -1;
982 goto out;
985 /* do the test by disabling interrupts first this time */
986 tr->max_latency = 0;
987 tracing_start();
988 trace->start(tr);
990 preempt_disable();
991 local_irq_disable();
992 udelay(100);
993 preempt_enable();
994 /* reverse the order of preempt vs irqs */
995 local_irq_enable();
997 trace->stop(tr);
998 /* stop the tracing. */
999 tracing_stop();
1000 /* check both trace buffers */
1001 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1002 if (ret)
1003 goto out;
1005 ret = trace_test_buffer(&tr->max_buffer, &count);
1007 if (!ret && !count) {
1008 printk(KERN_CONT ".. no entries found ..");
1009 ret = -1;
1010 goto out;
1013 out:
1014 tracing_start();
1015 out_no_start:
1016 trace->reset(tr);
1017 tr->max_latency = save_max;
1019 return ret;
1021 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1023 #ifdef CONFIG_NOP_TRACER
1025 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1027 /* What could possibly go wrong? */
1028 return 0;
1030 #endif
1032 #ifdef CONFIG_SCHED_TRACER
1034 struct wakeup_test_data {
1035 struct completion is_ready;
1036 int go;
1039 static int trace_wakeup_test_thread(void *data)
1041 /* Make this a -deadline thread */
1042 static const struct sched_attr attr = {
1043 .sched_policy = SCHED_DEADLINE,
1044 .sched_runtime = 100000ULL,
1045 .sched_deadline = 10000000ULL,
1046 .sched_period = 10000000ULL
1048 struct wakeup_test_data *x = data;
1050 sched_setattr(current, &attr);
1052 /* Make it know we have a new prio */
1053 complete(&x->is_ready);
1055 /* now go to sleep and let the test wake us up */
1056 set_current_state(TASK_INTERRUPTIBLE);
1057 while (!x->go) {
1058 schedule();
1059 set_current_state(TASK_INTERRUPTIBLE);
1062 complete(&x->is_ready);
1064 set_current_state(TASK_INTERRUPTIBLE);
1066 /* we are awake, now wait to disappear */
1067 while (!kthread_should_stop()) {
1068 schedule();
1069 set_current_state(TASK_INTERRUPTIBLE);
1072 __set_current_state(TASK_RUNNING);
1074 return 0;
1077 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1079 unsigned long save_max = tr->max_latency;
1080 struct task_struct *p;
1081 struct wakeup_test_data data;
1082 unsigned long count;
1083 int ret;
1085 memset(&data, 0, sizeof(data));
1087 init_completion(&data.is_ready);
1089 /* create a -deadline thread */
1090 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1091 if (IS_ERR(p)) {
1092 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1093 return -1;
1096 /* make sure the thread is running at -deadline policy */
1097 wait_for_completion(&data.is_ready);
1099 /* start the tracing */
1100 ret = tracer_init(trace, tr);
1101 if (ret) {
1102 warn_failed_init_tracer(trace, ret);
1103 return ret;
1106 /* reset the max latency */
1107 tr->max_latency = 0;
1109 while (p->on_rq) {
1111 * Sleep to make sure the -deadline thread is asleep too.
1112 * On virtual machines we can't rely on timings,
1113 * but we want to make sure this test still works.
1115 msleep(100);
1118 init_completion(&data.is_ready);
1120 data.go = 1;
1121 /* memory barrier is in the wake_up_process() */
1123 wake_up_process(p);
1125 /* Wait for the task to wake up */
1126 wait_for_completion(&data.is_ready);
1128 /* stop the tracing. */
1129 tracing_stop();
1130 /* check both trace buffers */
1131 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1132 if (!ret)
1133 ret = trace_test_buffer(&tr->max_buffer, &count);
1136 trace->reset(tr);
1137 tracing_start();
1139 tr->max_latency = save_max;
1141 /* kill the thread */
1142 kthread_stop(p);
1144 if (!ret && !count) {
1145 printk(KERN_CONT ".. no entries found ..");
1146 ret = -1;
1149 return ret;
1151 #endif /* CONFIG_SCHED_TRACER */
1153 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1155 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1157 unsigned long count;
1158 int ret;
1160 /* start the tracing */
1161 ret = tracer_init(trace, tr);
1162 if (ret) {
1163 warn_failed_init_tracer(trace, ret);
1164 return ret;
1167 /* Sleep for a 1/10 of a second */
1168 msleep(100);
1169 /* stop the tracing. */
1170 tracing_stop();
1171 /* check the trace buffer */
1172 ret = trace_test_buffer(&tr->trace_buffer, &count);
1173 trace->reset(tr);
1174 tracing_start();
1176 if (!ret && !count) {
1177 printk(KERN_CONT ".. no entries found ..");
1178 ret = -1;
1181 return ret;
1183 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1185 #ifdef CONFIG_BRANCH_TRACER
1187 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1189 unsigned long count;
1190 int ret;
1192 /* start the tracing */
1193 ret = tracer_init(trace, tr);
1194 if (ret) {
1195 warn_failed_init_tracer(trace, ret);
1196 return ret;
1199 /* Sleep for a 1/10 of a second */
1200 msleep(100);
1201 /* stop the tracing. */
1202 tracing_stop();
1203 /* check the trace buffer */
1204 ret = trace_test_buffer(&tr->trace_buffer, &count);
1205 trace->reset(tr);
1206 tracing_start();
1208 if (!ret && !count) {
1209 printk(KERN_CONT ".. no entries found ..");
1210 ret = -1;
1213 return ret;
1215 #endif /* CONFIG_BRANCH_TRACER */