Merge tag 'usb-5.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[linux/fpc-iii.git] / kernel / trace / trace_selftest.c
blob73ef120922507458088ea1e38749acc87163d2a1
1 // SPDX-License-Identifier: GPL-2.0
2 /* Include in trace.c */
4 #include <uapi/linux/sched/types.h>
5 #include <linux/stringify.h>
6 #include <linux/kthread.h>
7 #include <linux/delay.h>
8 #include <linux/slab.h>
10 static inline int trace_valid_entry(struct trace_entry *entry)
12 switch (entry->type) {
13 case TRACE_FN:
14 case TRACE_CTX:
15 case TRACE_WAKE:
16 case TRACE_STACK:
17 case TRACE_PRINT:
18 case TRACE_BRANCH:
19 case TRACE_GRAPH_ENT:
20 case TRACE_GRAPH_RET:
21 return 1;
23 return 0;
26 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
28 struct ring_buffer_event *event;
29 struct trace_entry *entry;
30 unsigned int loops = 0;
32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
33 entry = ring_buffer_event_data(event);
36 * The ring buffer is a size of trace_buf_size, if
37 * we loop more than the size, there's something wrong
38 * with the ring buffer.
40 if (loops++ > trace_buf_size) {
41 printk(KERN_CONT ".. bad ring buffer ");
42 goto failed;
44 if (!trace_valid_entry(entry)) {
45 printk(KERN_CONT ".. invalid entry %d ",
46 entry->type);
47 goto failed;
50 return 0;
52 failed:
53 /* disable tracing */
54 tracing_disabled = 1;
55 printk(KERN_CONT ".. corrupted trace buffer .. ");
56 return -1;
60 * Test the trace buffer to see if all the elements
61 * are still sane.
63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
65 unsigned long flags, cnt = 0;
66 int cpu, ret = 0;
68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags);
70 arch_spin_lock(&buf->tr->max_lock);
72 cnt = ring_buffer_entries(buf->buffer);
75 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 * If the calling tracer is broken, and is constantly filling
77 * the buffer, this will run forever, and hard lock the box.
78 * We disable the ring buffer while we do this test to prevent
79 * a hard lock up.
81 tracing_off();
82 for_each_possible_cpu(cpu) {
83 ret = trace_test_buffer_cpu(buf, cpu);
84 if (ret)
85 break;
87 tracing_on();
88 arch_spin_unlock(&buf->tr->max_lock);
89 local_irq_restore(flags);
91 if (count)
92 *count = cnt;
94 return ret;
97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 trace->name, init_ret);
102 #ifdef CONFIG_FUNCTION_TRACER
104 #ifdef CONFIG_DYNAMIC_FTRACE
106 static int trace_selftest_test_probe1_cnt;
107 static void trace_selftest_test_probe1_func(unsigned long ip,
108 unsigned long pip,
109 struct ftrace_ops *op,
110 struct ftrace_regs *fregs)
112 trace_selftest_test_probe1_cnt++;
115 static int trace_selftest_test_probe2_cnt;
116 static void trace_selftest_test_probe2_func(unsigned long ip,
117 unsigned long pip,
118 struct ftrace_ops *op,
119 struct ftrace_regs *fregs)
121 trace_selftest_test_probe2_cnt++;
124 static int trace_selftest_test_probe3_cnt;
125 static void trace_selftest_test_probe3_func(unsigned long ip,
126 unsigned long pip,
127 struct ftrace_ops *op,
128 struct ftrace_regs *fregs)
130 trace_selftest_test_probe3_cnt++;
133 static int trace_selftest_test_global_cnt;
134 static void trace_selftest_test_global_func(unsigned long ip,
135 unsigned long pip,
136 struct ftrace_ops *op,
137 struct ftrace_regs *fregs)
139 trace_selftest_test_global_cnt++;
142 static int trace_selftest_test_dyn_cnt;
143 static void trace_selftest_test_dyn_func(unsigned long ip,
144 unsigned long pip,
145 struct ftrace_ops *op,
146 struct ftrace_regs *fregs)
148 trace_selftest_test_dyn_cnt++;
151 static struct ftrace_ops test_probe1 = {
152 .func = trace_selftest_test_probe1_func,
155 static struct ftrace_ops test_probe2 = {
156 .func = trace_selftest_test_probe2_func,
159 static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
163 static void print_counts(void)
165 printk("(%d %d %d %d %d) ",
166 trace_selftest_test_probe1_cnt,
167 trace_selftest_test_probe2_cnt,
168 trace_selftest_test_probe3_cnt,
169 trace_selftest_test_global_cnt,
170 trace_selftest_test_dyn_cnt);
173 static void reset_counts(void)
175 trace_selftest_test_probe1_cnt = 0;
176 trace_selftest_test_probe2_cnt = 0;
177 trace_selftest_test_probe3_cnt = 0;
178 trace_selftest_test_global_cnt = 0;
179 trace_selftest_test_dyn_cnt = 0;
182 static int trace_selftest_ops(struct trace_array *tr, int cnt)
184 int save_ftrace_enabled = ftrace_enabled;
185 struct ftrace_ops *dyn_ops;
186 char *func1_name;
187 char *func2_name;
188 int len1;
189 int len2;
190 int ret = -1;
192 printk(KERN_CONT "PASSED\n");
193 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
195 ftrace_enabled = 1;
196 reset_counts();
198 /* Handle PPC64 '.' name */
199 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
200 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
201 len1 = strlen(func1_name);
202 len2 = strlen(func2_name);
205 * Probe 1 will trace function 1.
206 * Probe 2 will trace function 2.
207 * Probe 3 will trace functions 1 and 2.
209 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
210 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
211 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
212 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
214 register_ftrace_function(&test_probe1);
215 register_ftrace_function(&test_probe2);
216 register_ftrace_function(&test_probe3);
217 /* First time we are running with main function */
218 if (cnt > 1) {
219 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
220 register_ftrace_function(tr->ops);
223 DYN_FTRACE_TEST_NAME();
225 print_counts();
227 if (trace_selftest_test_probe1_cnt != 1)
228 goto out;
229 if (trace_selftest_test_probe2_cnt != 0)
230 goto out;
231 if (trace_selftest_test_probe3_cnt != 1)
232 goto out;
233 if (cnt > 1) {
234 if (trace_selftest_test_global_cnt == 0)
235 goto out;
238 DYN_FTRACE_TEST_NAME2();
240 print_counts();
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
256 dyn_ops->func = trace_selftest_test_dyn_func;
258 register_ftrace_function(dyn_ops);
260 trace_selftest_test_global_cnt = 0;
262 DYN_FTRACE_TEST_NAME();
264 print_counts();
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (cnt > 1) {
273 if (trace_selftest_test_global_cnt == 0)
274 goto out_free;
276 if (trace_selftest_test_dyn_cnt == 0)
277 goto out_free;
279 DYN_FTRACE_TEST_NAME2();
281 print_counts();
283 if (trace_selftest_test_probe1_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe2_cnt != 2)
286 goto out_free;
287 if (trace_selftest_test_probe3_cnt != 4)
288 goto out_free;
290 ret = 0;
291 out_free:
292 unregister_ftrace_function(dyn_ops);
293 kfree(dyn_ops);
295 out:
296 /* Purposely unregister in the same order */
297 unregister_ftrace_function(&test_probe1);
298 unregister_ftrace_function(&test_probe2);
299 unregister_ftrace_function(&test_probe3);
300 if (cnt > 1)
301 unregister_ftrace_function(tr->ops);
302 ftrace_reset_array_ops(tr);
304 /* Make sure everything is off */
305 reset_counts();
306 DYN_FTRACE_TEST_NAME();
307 DYN_FTRACE_TEST_NAME();
309 if (trace_selftest_test_probe1_cnt ||
310 trace_selftest_test_probe2_cnt ||
311 trace_selftest_test_probe3_cnt ||
312 trace_selftest_test_global_cnt ||
313 trace_selftest_test_dyn_cnt)
314 ret = -1;
316 ftrace_enabled = save_ftrace_enabled;
318 return ret;
321 /* Test dynamic code modification and ftrace filters */
322 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
323 struct trace_array *tr,
324 int (*func)(void))
326 int save_ftrace_enabled = ftrace_enabled;
327 unsigned long count;
328 char *func_name;
329 int ret;
331 /* The ftrace test PASSED */
332 printk(KERN_CONT "PASSED\n");
333 pr_info("Testing dynamic ftrace: ");
335 /* enable tracing, and record the filter function */
336 ftrace_enabled = 1;
338 /* passed in by parameter to fool gcc from optimizing */
339 func();
342 * Some archs *cough*PowerPC*cough* add characters to the
343 * start of the function names. We simply put a '*' to
344 * accommodate them.
346 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
348 /* filter only on our function */
349 ftrace_set_global_filter(func_name, strlen(func_name), 1);
351 /* enable tracing */
352 ret = tracer_init(trace, tr);
353 if (ret) {
354 warn_failed_init_tracer(trace, ret);
355 goto out;
358 /* Sleep for a 1/10 of a second */
359 msleep(100);
361 /* we should have nothing in the buffer */
362 ret = trace_test_buffer(&tr->array_buffer, &count);
363 if (ret)
364 goto out;
366 if (count) {
367 ret = -1;
368 printk(KERN_CONT ".. filter did not filter .. ");
369 goto out;
372 /* call our function again */
373 func();
375 /* sleep again */
376 msleep(100);
378 /* stop the tracing. */
379 tracing_stop();
380 ftrace_enabled = 0;
382 /* check the trace buffer */
383 ret = trace_test_buffer(&tr->array_buffer, &count);
385 ftrace_enabled = 1;
386 tracing_start();
388 /* we should only have one item */
389 if (!ret && count != 1) {
390 trace->reset(tr);
391 printk(KERN_CONT ".. filter failed count=%ld ..", count);
392 ret = -1;
393 goto out;
396 /* Test the ops with global tracing running */
397 ret = trace_selftest_ops(tr, 1);
398 trace->reset(tr);
400 out:
401 ftrace_enabled = save_ftrace_enabled;
403 /* Enable tracing on all functions again */
404 ftrace_set_global_filter(NULL, 0, 1);
406 /* Test the ops with global tracing off */
407 if (!ret)
408 ret = trace_selftest_ops(tr, 2);
410 return ret;
413 static int trace_selftest_recursion_cnt;
414 static void trace_selftest_test_recursion_func(unsigned long ip,
415 unsigned long pip,
416 struct ftrace_ops *op,
417 struct ftrace_regs *fregs)
420 * This function is registered without the recursion safe flag.
421 * The ftrace infrastructure should provide the recursion
422 * protection. If not, this will crash the kernel!
424 if (trace_selftest_recursion_cnt++ > 10)
425 return;
426 DYN_FTRACE_TEST_NAME();
429 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
430 unsigned long pip,
431 struct ftrace_ops *op,
432 struct ftrace_regs *fregs)
435 * We said we would provide our own recursion. By calling
436 * this function again, we should recurse back into this function
437 * and count again. But this only happens if the arch supports
438 * all of ftrace features and nothing else is using the function
439 * tracing utility.
441 if (trace_selftest_recursion_cnt++)
442 return;
443 DYN_FTRACE_TEST_NAME();
446 static struct ftrace_ops test_rec_probe = {
447 .func = trace_selftest_test_recursion_func,
448 .flags = FTRACE_OPS_FL_RECURSION,
451 static struct ftrace_ops test_recsafe_probe = {
452 .func = trace_selftest_test_recursion_safe_func,
455 static int
456 trace_selftest_function_recursion(void)
458 int save_ftrace_enabled = ftrace_enabled;
459 char *func_name;
460 int len;
461 int ret;
463 /* The previous test PASSED */
464 pr_cont("PASSED\n");
465 pr_info("Testing ftrace recursion: ");
468 /* enable tracing, and record the filter function */
469 ftrace_enabled = 1;
471 /* Handle PPC64 '.' name */
472 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
473 len = strlen(func_name);
475 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
476 if (ret) {
477 pr_cont("*Could not set filter* ");
478 goto out;
481 ret = register_ftrace_function(&test_rec_probe);
482 if (ret) {
483 pr_cont("*could not register callback* ");
484 goto out;
487 DYN_FTRACE_TEST_NAME();
489 unregister_ftrace_function(&test_rec_probe);
491 ret = -1;
493 * Recursion allows for transitions between context,
494 * and may call the callback twice.
496 if (trace_selftest_recursion_cnt != 1 &&
497 trace_selftest_recursion_cnt != 2) {
498 pr_cont("*callback not called once (or twice) (%d)* ",
499 trace_selftest_recursion_cnt);
500 goto out;
503 trace_selftest_recursion_cnt = 1;
505 pr_cont("PASSED\n");
506 pr_info("Testing ftrace recursion safe: ");
508 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
509 if (ret) {
510 pr_cont("*Could not set filter* ");
511 goto out;
514 ret = register_ftrace_function(&test_recsafe_probe);
515 if (ret) {
516 pr_cont("*could not register callback* ");
517 goto out;
520 DYN_FTRACE_TEST_NAME();
522 unregister_ftrace_function(&test_recsafe_probe);
524 ret = -1;
525 if (trace_selftest_recursion_cnt != 2) {
526 pr_cont("*callback not called expected 2 times (%d)* ",
527 trace_selftest_recursion_cnt);
528 goto out;
531 ret = 0;
532 out:
533 ftrace_enabled = save_ftrace_enabled;
535 return ret;
537 #else
538 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
539 # define trace_selftest_function_recursion() ({ 0; })
540 #endif /* CONFIG_DYNAMIC_FTRACE */
542 static enum {
543 TRACE_SELFTEST_REGS_START,
544 TRACE_SELFTEST_REGS_FOUND,
545 TRACE_SELFTEST_REGS_NOT_FOUND,
546 } trace_selftest_regs_stat;
548 static void trace_selftest_test_regs_func(unsigned long ip,
549 unsigned long pip,
550 struct ftrace_ops *op,
551 struct ftrace_regs *fregs)
553 struct pt_regs *regs = ftrace_get_regs(fregs);
555 if (regs)
556 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
557 else
558 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
561 static struct ftrace_ops test_regs_probe = {
562 .func = trace_selftest_test_regs_func,
563 .flags = FTRACE_OPS_FL_SAVE_REGS,
566 static int
567 trace_selftest_function_regs(void)
569 int save_ftrace_enabled = ftrace_enabled;
570 char *func_name;
571 int len;
572 int ret;
573 int supported = 0;
575 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
576 supported = 1;
577 #endif
579 /* The previous test PASSED */
580 pr_cont("PASSED\n");
581 pr_info("Testing ftrace regs%s: ",
582 !supported ? "(no arch support)" : "");
584 /* enable tracing, and record the filter function */
585 ftrace_enabled = 1;
587 /* Handle PPC64 '.' name */
588 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
589 len = strlen(func_name);
591 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
593 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
594 * This test really doesn't care.
596 if (ret && ret != -ENODEV) {
597 pr_cont("*Could not set filter* ");
598 goto out;
601 ret = register_ftrace_function(&test_regs_probe);
603 * Now if the arch does not support passing regs, then this should
604 * have failed.
606 if (!supported) {
607 if (!ret) {
608 pr_cont("*registered save-regs without arch support* ");
609 goto out;
611 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
612 ret = register_ftrace_function(&test_regs_probe);
614 if (ret) {
615 pr_cont("*could not register callback* ");
616 goto out;
620 DYN_FTRACE_TEST_NAME();
622 unregister_ftrace_function(&test_regs_probe);
624 ret = -1;
626 switch (trace_selftest_regs_stat) {
627 case TRACE_SELFTEST_REGS_START:
628 pr_cont("*callback never called* ");
629 goto out;
631 case TRACE_SELFTEST_REGS_FOUND:
632 if (supported)
633 break;
634 pr_cont("*callback received regs without arch support* ");
635 goto out;
637 case TRACE_SELFTEST_REGS_NOT_FOUND:
638 if (!supported)
639 break;
640 pr_cont("*callback received NULL regs* ");
641 goto out;
644 ret = 0;
645 out:
646 ftrace_enabled = save_ftrace_enabled;
648 return ret;
652 * Simple verification test of ftrace function tracer.
653 * Enable ftrace, sleep 1/10 second, and then read the trace
654 * buffer to see if all is in order.
656 __init int
657 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
659 int save_ftrace_enabled = ftrace_enabled;
660 unsigned long count;
661 int ret;
663 #ifdef CONFIG_DYNAMIC_FTRACE
664 if (ftrace_filter_param) {
665 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
666 return 0;
668 #endif
670 /* make sure msleep has been recorded */
671 msleep(1);
673 /* start the tracing */
674 ftrace_enabled = 1;
676 ret = tracer_init(trace, tr);
677 if (ret) {
678 warn_failed_init_tracer(trace, ret);
679 goto out;
682 /* Sleep for a 1/10 of a second */
683 msleep(100);
684 /* stop the tracing. */
685 tracing_stop();
686 ftrace_enabled = 0;
688 /* check the trace buffer */
689 ret = trace_test_buffer(&tr->array_buffer, &count);
691 ftrace_enabled = 1;
692 trace->reset(tr);
693 tracing_start();
695 if (!ret && !count) {
696 printk(KERN_CONT ".. no entries found ..");
697 ret = -1;
698 goto out;
701 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
702 DYN_FTRACE_TEST_NAME);
703 if (ret)
704 goto out;
706 ret = trace_selftest_function_recursion();
707 if (ret)
708 goto out;
710 ret = trace_selftest_function_regs();
711 out:
712 ftrace_enabled = save_ftrace_enabled;
714 /* kill ftrace totally if we failed */
715 if (ret)
716 ftrace_kill();
718 return ret;
720 #endif /* CONFIG_FUNCTION_TRACER */
723 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
725 /* Maximum number of functions to trace before diagnosing a hang */
726 #define GRAPH_MAX_FUNC_TEST 100000000
728 static unsigned int graph_hang_thresh;
730 /* Wrap the real function entry probe to avoid possible hanging */
731 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
733 /* This is harmlessly racy, we want to approximately detect a hang */
734 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
735 ftrace_graph_stop();
736 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
737 if (ftrace_dump_on_oops) {
738 ftrace_dump(DUMP_ALL);
739 /* ftrace_dump() disables tracing */
740 tracing_on();
742 return 0;
745 return trace_graph_entry(trace);
748 static struct fgraph_ops fgraph_ops __initdata = {
749 .entryfunc = &trace_graph_entry_watchdog,
750 .retfunc = &trace_graph_return,
754 * Pretty much the same than for the function tracer from which the selftest
755 * has been borrowed.
757 __init int
758 trace_selftest_startup_function_graph(struct tracer *trace,
759 struct trace_array *tr)
761 int ret;
762 unsigned long count;
764 #ifdef CONFIG_DYNAMIC_FTRACE
765 if (ftrace_filter_param) {
766 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
767 return 0;
769 #endif
772 * Simulate the init() callback but we attach a watchdog callback
773 * to detect and recover from possible hangs
775 tracing_reset_online_cpus(&tr->array_buffer);
776 set_graph_array(tr);
777 ret = register_ftrace_graph(&fgraph_ops);
778 if (ret) {
779 warn_failed_init_tracer(trace, ret);
780 goto out;
782 tracing_start_cmdline_record();
784 /* Sleep for a 1/10 of a second */
785 msleep(100);
787 /* Have we just recovered from a hang? */
788 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
789 disable_tracing_selftest("recovering from a hang");
790 ret = -1;
791 goto out;
794 tracing_stop();
796 /* check the trace buffer */
797 ret = trace_test_buffer(&tr->array_buffer, &count);
799 /* Need to also simulate the tr->reset to remove this fgraph_ops */
800 tracing_stop_cmdline_record();
801 unregister_ftrace_graph(&fgraph_ops);
803 tracing_start();
805 if (!ret && !count) {
806 printk(KERN_CONT ".. no entries found ..");
807 ret = -1;
808 goto out;
811 /* Don't test dynamic tracing, the function tracer already did */
813 out:
814 /* Stop it if we failed */
815 if (ret)
816 ftrace_graph_stop();
818 return ret;
820 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
823 #ifdef CONFIG_IRQSOFF_TRACER
825 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
827 unsigned long save_max = tr->max_latency;
828 unsigned long count;
829 int ret;
831 /* start the tracing */
832 ret = tracer_init(trace, tr);
833 if (ret) {
834 warn_failed_init_tracer(trace, ret);
835 return ret;
838 /* reset the max latency */
839 tr->max_latency = 0;
840 /* disable interrupts for a bit */
841 local_irq_disable();
842 udelay(100);
843 local_irq_enable();
846 * Stop the tracer to avoid a warning subsequent
847 * to buffer flipping failure because tracing_stop()
848 * disables the tr and max buffers, making flipping impossible
849 * in case of parallels max irqs off latencies.
851 trace->stop(tr);
852 /* stop the tracing. */
853 tracing_stop();
854 /* check both trace buffers */
855 ret = trace_test_buffer(&tr->array_buffer, NULL);
856 if (!ret)
857 ret = trace_test_buffer(&tr->max_buffer, &count);
858 trace->reset(tr);
859 tracing_start();
861 if (!ret && !count) {
862 printk(KERN_CONT ".. no entries found ..");
863 ret = -1;
866 tr->max_latency = save_max;
868 return ret;
870 #endif /* CONFIG_IRQSOFF_TRACER */
872 #ifdef CONFIG_PREEMPT_TRACER
874 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
876 unsigned long save_max = tr->max_latency;
877 unsigned long count;
878 int ret;
881 * Now that the big kernel lock is no longer preemptable,
882 * and this is called with the BKL held, it will always
883 * fail. If preemption is already disabled, simply
884 * pass the test. When the BKL is removed, or becomes
885 * preemptible again, we will once again test this,
886 * so keep it in.
888 if (preempt_count()) {
889 printk(KERN_CONT "can not test ... force ");
890 return 0;
893 /* start the tracing */
894 ret = tracer_init(trace, tr);
895 if (ret) {
896 warn_failed_init_tracer(trace, ret);
897 return ret;
900 /* reset the max latency */
901 tr->max_latency = 0;
902 /* disable preemption for a bit */
903 preempt_disable();
904 udelay(100);
905 preempt_enable();
908 * Stop the tracer to avoid a warning subsequent
909 * to buffer flipping failure because tracing_stop()
910 * disables the tr and max buffers, making flipping impossible
911 * in case of parallels max preempt off latencies.
913 trace->stop(tr);
914 /* stop the tracing. */
915 tracing_stop();
916 /* check both trace buffers */
917 ret = trace_test_buffer(&tr->array_buffer, NULL);
918 if (!ret)
919 ret = trace_test_buffer(&tr->max_buffer, &count);
920 trace->reset(tr);
921 tracing_start();
923 if (!ret && !count) {
924 printk(KERN_CONT ".. no entries found ..");
925 ret = -1;
928 tr->max_latency = save_max;
930 return ret;
932 #endif /* CONFIG_PREEMPT_TRACER */
934 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
936 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
938 unsigned long save_max = tr->max_latency;
939 unsigned long count;
940 int ret;
943 * Now that the big kernel lock is no longer preemptable,
944 * and this is called with the BKL held, it will always
945 * fail. If preemption is already disabled, simply
946 * pass the test. When the BKL is removed, or becomes
947 * preemptible again, we will once again test this,
948 * so keep it in.
950 if (preempt_count()) {
951 printk(KERN_CONT "can not test ... force ");
952 return 0;
955 /* start the tracing */
956 ret = tracer_init(trace, tr);
957 if (ret) {
958 warn_failed_init_tracer(trace, ret);
959 goto out_no_start;
962 /* reset the max latency */
963 tr->max_latency = 0;
965 /* disable preemption and interrupts for a bit */
966 preempt_disable();
967 local_irq_disable();
968 udelay(100);
969 preempt_enable();
970 /* reverse the order of preempt vs irqs */
971 local_irq_enable();
974 * Stop the tracer to avoid a warning subsequent
975 * to buffer flipping failure because tracing_stop()
976 * disables the tr and max buffers, making flipping impossible
977 * in case of parallels max irqs/preempt off latencies.
979 trace->stop(tr);
980 /* stop the tracing. */
981 tracing_stop();
982 /* check both trace buffers */
983 ret = trace_test_buffer(&tr->array_buffer, NULL);
984 if (ret)
985 goto out;
987 ret = trace_test_buffer(&tr->max_buffer, &count);
988 if (ret)
989 goto out;
991 if (!ret && !count) {
992 printk(KERN_CONT ".. no entries found ..");
993 ret = -1;
994 goto out;
997 /* do the test by disabling interrupts first this time */
998 tr->max_latency = 0;
999 tracing_start();
1000 trace->start(tr);
1002 preempt_disable();
1003 local_irq_disable();
1004 udelay(100);
1005 preempt_enable();
1006 /* reverse the order of preempt vs irqs */
1007 local_irq_enable();
1009 trace->stop(tr);
1010 /* stop the tracing. */
1011 tracing_stop();
1012 /* check both trace buffers */
1013 ret = trace_test_buffer(&tr->array_buffer, NULL);
1014 if (ret)
1015 goto out;
1017 ret = trace_test_buffer(&tr->max_buffer, &count);
1019 if (!ret && !count) {
1020 printk(KERN_CONT ".. no entries found ..");
1021 ret = -1;
1022 goto out;
1025 out:
1026 tracing_start();
1027 out_no_start:
1028 trace->reset(tr);
1029 tr->max_latency = save_max;
1031 return ret;
1033 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1035 #ifdef CONFIG_NOP_TRACER
1037 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1039 /* What could possibly go wrong? */
1040 return 0;
1042 #endif
1044 #ifdef CONFIG_SCHED_TRACER
1046 struct wakeup_test_data {
1047 struct completion is_ready;
1048 int go;
1051 static int trace_wakeup_test_thread(void *data)
1053 /* Make this a -deadline thread */
1054 static const struct sched_attr attr = {
1055 .sched_policy = SCHED_DEADLINE,
1056 .sched_runtime = 100000ULL,
1057 .sched_deadline = 10000000ULL,
1058 .sched_period = 10000000ULL
1060 struct wakeup_test_data *x = data;
1062 sched_setattr(current, &attr);
1064 /* Make it know we have a new prio */
1065 complete(&x->is_ready);
1067 /* now go to sleep and let the test wake us up */
1068 set_current_state(TASK_INTERRUPTIBLE);
1069 while (!x->go) {
1070 schedule();
1071 set_current_state(TASK_INTERRUPTIBLE);
1074 complete(&x->is_ready);
1076 set_current_state(TASK_INTERRUPTIBLE);
1078 /* we are awake, now wait to disappear */
1079 while (!kthread_should_stop()) {
1080 schedule();
1081 set_current_state(TASK_INTERRUPTIBLE);
1084 __set_current_state(TASK_RUNNING);
1086 return 0;
1089 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1091 unsigned long save_max = tr->max_latency;
1092 struct task_struct *p;
1093 struct wakeup_test_data data;
1094 unsigned long count;
1095 int ret;
1097 memset(&data, 0, sizeof(data));
1099 init_completion(&data.is_ready);
1101 /* create a -deadline thread */
1102 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1103 if (IS_ERR(p)) {
1104 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1105 return -1;
1108 /* make sure the thread is running at -deadline policy */
1109 wait_for_completion(&data.is_ready);
1111 /* start the tracing */
1112 ret = tracer_init(trace, tr);
1113 if (ret) {
1114 warn_failed_init_tracer(trace, ret);
1115 return ret;
1118 /* reset the max latency */
1119 tr->max_latency = 0;
1121 while (p->on_rq) {
1123 * Sleep to make sure the -deadline thread is asleep too.
1124 * On virtual machines we can't rely on timings,
1125 * but we want to make sure this test still works.
1127 msleep(100);
1130 init_completion(&data.is_ready);
1132 data.go = 1;
1133 /* memory barrier is in the wake_up_process() */
1135 wake_up_process(p);
1137 /* Wait for the task to wake up */
1138 wait_for_completion(&data.is_ready);
1140 /* stop the tracing. */
1141 tracing_stop();
1142 /* check both trace buffers */
1143 ret = trace_test_buffer(&tr->array_buffer, NULL);
1144 if (!ret)
1145 ret = trace_test_buffer(&tr->max_buffer, &count);
1148 trace->reset(tr);
1149 tracing_start();
1151 tr->max_latency = save_max;
1153 /* kill the thread */
1154 kthread_stop(p);
1156 if (!ret && !count) {
1157 printk(KERN_CONT ".. no entries found ..");
1158 ret = -1;
1161 return ret;
1163 #endif /* CONFIG_SCHED_TRACER */
1165 #ifdef CONFIG_BRANCH_TRACER
1167 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1169 unsigned long count;
1170 int ret;
1172 /* start the tracing */
1173 ret = tracer_init(trace, tr);
1174 if (ret) {
1175 warn_failed_init_tracer(trace, ret);
1176 return ret;
1179 /* Sleep for a 1/10 of a second */
1180 msleep(100);
1181 /* stop the tracing. */
1182 tracing_stop();
1183 /* check the trace buffer */
1184 ret = trace_test_buffer(&tr->array_buffer, &count);
1185 trace->reset(tr);
1186 tracing_start();
1188 if (!ret && !count) {
1189 printk(KERN_CONT ".. no entries found ..");
1190 ret = -1;
1193 return ret;
1195 #endif /* CONFIG_BRANCH_TRACER */