1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry
*entry
)
10 switch (entry
->type
) {
20 case TRACE_HW_BRANCHES
:
27 static int trace_test_buffer_cpu(struct trace_array
*tr
, int cpu
)
29 struct ring_buffer_event
*event
;
30 struct trace_entry
*entry
;
31 unsigned int loops
= 0;
33 while ((event
= ring_buffer_consume(tr
->buffer
, cpu
, NULL
))) {
34 entry
= ring_buffer_event_data(event
);
37 * The ring buffer is a size of trace_buf_size, if
38 * we loop more than the size, there's something wrong
39 * with the ring buffer.
41 if (loops
++ > trace_buf_size
) {
42 printk(KERN_CONT
".. bad ring buffer ");
45 if (!trace_valid_entry(entry
)) {
46 printk(KERN_CONT
".. invalid entry %d ",
56 printk(KERN_CONT
".. corrupted trace buffer .. ");
61 * Test the trace buffer to see if all the elements
64 static int trace_test_buffer(struct trace_array
*tr
, unsigned long *count
)
66 unsigned long flags
, cnt
= 0;
69 /* Don't allow flipping of max traces now */
70 local_irq_save(flags
);
71 arch_spin_lock(&ftrace_max_lock
);
73 cnt
= ring_buffer_entries(tr
->buffer
);
76 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 * If the calling tracer is broken, and is constantly filling
78 * the buffer, this will run forever, and hard lock the box.
79 * We disable the ring buffer while we do this test to prevent
83 for_each_possible_cpu(cpu
) {
84 ret
= trace_test_buffer_cpu(tr
, cpu
);
89 arch_spin_unlock(&ftrace_max_lock
);
90 local_irq_restore(flags
);
98 static inline void warn_failed_init_tracer(struct tracer
*trace
, int init_ret
)
100 printk(KERN_WARNING
"Failed to init %s tracer, init returned %d\n",
101 trace
->name
, init_ret
);
103 #ifdef CONFIG_FUNCTION_TRACER
105 #ifdef CONFIG_DYNAMIC_FTRACE
107 /* Test dynamic code modification and ftrace filters */
108 int trace_selftest_startup_dynamic_tracing(struct tracer
*trace
,
109 struct trace_array
*tr
,
112 int save_ftrace_enabled
= ftrace_enabled
;
113 int save_tracer_enabled
= tracer_enabled
;
118 /* The ftrace test PASSED */
119 printk(KERN_CONT
"PASSED\n");
120 pr_info("Testing dynamic ftrace: ");
122 /* enable tracing, and record the filter function */
126 /* passed in by parameter to fool gcc from optimizing */
130 * Some archs *cough*PowerPC*cough* add characters to the
131 * start of the function names. We simply put a '*' to
134 func_name
= "*" __stringify(DYN_FTRACE_TEST_NAME
);
136 /* filter only on our function */
137 ftrace_set_filter(func_name
, strlen(func_name
), 1);
140 ret
= tracer_init(trace
, tr
);
142 warn_failed_init_tracer(trace
, ret
);
146 /* Sleep for a 1/10 of a second */
149 /* we should have nothing in the buffer */
150 ret
= trace_test_buffer(tr
, &count
);
156 printk(KERN_CONT
".. filter did not filter .. ");
160 /* call our function again */
166 /* stop the tracing. */
170 /* check the trace buffer */
171 ret
= trace_test_buffer(tr
, &count
);
175 /* we should only have one item */
176 if (!ret
&& count
!= 1) {
177 printk(KERN_CONT
".. filter failed count=%ld ..", count
);
183 ftrace_enabled
= save_ftrace_enabled
;
184 tracer_enabled
= save_tracer_enabled
;
186 /* Enable tracing on all functions again */
187 ftrace_set_filter(NULL
, 0, 1);
192 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
193 #endif /* CONFIG_DYNAMIC_FTRACE */
196 * Simple verification test of ftrace function tracer.
197 * Enable ftrace, sleep 1/10 second, and then read the trace
198 * buffer to see if all is in order.
201 trace_selftest_startup_function(struct tracer
*trace
, struct trace_array
*tr
)
203 int save_ftrace_enabled
= ftrace_enabled
;
204 int save_tracer_enabled
= tracer_enabled
;
208 /* make sure msleep has been recorded */
211 /* start the tracing */
215 ret
= tracer_init(trace
, tr
);
217 warn_failed_init_tracer(trace
, ret
);
221 /* Sleep for a 1/10 of a second */
223 /* stop the tracing. */
227 /* check the trace buffer */
228 ret
= trace_test_buffer(tr
, &count
);
232 if (!ret
&& !count
) {
233 printk(KERN_CONT
".. no entries found ..");
238 ret
= trace_selftest_startup_dynamic_tracing(trace
, tr
,
239 DYN_FTRACE_TEST_NAME
);
242 ftrace_enabled
= save_ftrace_enabled
;
243 tracer_enabled
= save_tracer_enabled
;
245 /* kill ftrace totally if we failed */
251 #endif /* CONFIG_FUNCTION_TRACER */
254 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
256 /* Maximum number of functions to trace before diagnosing a hang */
257 #define GRAPH_MAX_FUNC_TEST 100000000
259 static void __ftrace_dump(bool disable_tracing
);
260 static unsigned int graph_hang_thresh
;
262 /* Wrap the real function entry probe to avoid possible hanging */
263 static int trace_graph_entry_watchdog(struct ftrace_graph_ent
*trace
)
265 /* This is harmlessly racy, we want to approximately detect a hang */
266 if (unlikely(++graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
)) {
268 printk(KERN_WARNING
"BUG: Function graph tracer hang!\n");
269 if (ftrace_dump_on_oops
)
270 __ftrace_dump(false);
274 return trace_graph_entry(trace
);
278 * Pretty much the same than for the function tracer from which the selftest
282 trace_selftest_startup_function_graph(struct tracer
*trace
,
283 struct trace_array
*tr
)
289 * Simulate the init() callback but we attach a watchdog callback
290 * to detect and recover from possible hangs
292 tracing_reset_online_cpus(tr
);
294 ret
= register_ftrace_graph(&trace_graph_return
,
295 &trace_graph_entry_watchdog
);
297 warn_failed_init_tracer(trace
, ret
);
300 tracing_start_cmdline_record();
302 /* Sleep for a 1/10 of a second */
305 /* Have we just recovered from a hang? */
306 if (graph_hang_thresh
> GRAPH_MAX_FUNC_TEST
) {
307 tracing_selftest_disabled
= true;
314 /* check the trace buffer */
315 ret
= trace_test_buffer(tr
, &count
);
320 if (!ret
&& !count
) {
321 printk(KERN_CONT
".. no entries found ..");
326 /* Don't test dynamic tracing, the function tracer already did */
329 /* Stop it if we failed */
335 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
338 #ifdef CONFIG_IRQSOFF_TRACER
340 trace_selftest_startup_irqsoff(struct tracer
*trace
, struct trace_array
*tr
)
342 unsigned long save_max
= tracing_max_latency
;
346 /* start the tracing */
347 ret
= tracer_init(trace
, tr
);
349 warn_failed_init_tracer(trace
, ret
);
353 /* reset the max latency */
354 tracing_max_latency
= 0;
355 /* disable interrupts for a bit */
361 * Stop the tracer to avoid a warning subsequent
362 * to buffer flipping failure because tracing_stop()
363 * disables the tr and max buffers, making flipping impossible
364 * in case of parallels max irqs off latencies.
367 /* stop the tracing. */
369 /* check both trace buffers */
370 ret
= trace_test_buffer(tr
, NULL
);
372 ret
= trace_test_buffer(&max_tr
, &count
);
376 if (!ret
&& !count
) {
377 printk(KERN_CONT
".. no entries found ..");
381 tracing_max_latency
= save_max
;
385 #endif /* CONFIG_IRQSOFF_TRACER */
387 #ifdef CONFIG_PREEMPT_TRACER
389 trace_selftest_startup_preemptoff(struct tracer
*trace
, struct trace_array
*tr
)
391 unsigned long save_max
= tracing_max_latency
;
396 * Now that the big kernel lock is no longer preemptable,
397 * and this is called with the BKL held, it will always
398 * fail. If preemption is already disabled, simply
399 * pass the test. When the BKL is removed, or becomes
400 * preemptible again, we will once again test this,
403 if (preempt_count()) {
404 printk(KERN_CONT
"can not test ... force ");
408 /* start the tracing */
409 ret
= tracer_init(trace
, tr
);
411 warn_failed_init_tracer(trace
, ret
);
415 /* reset the max latency */
416 tracing_max_latency
= 0;
417 /* disable preemption for a bit */
423 * Stop the tracer to avoid a warning subsequent
424 * to buffer flipping failure because tracing_stop()
425 * disables the tr and max buffers, making flipping impossible
426 * in case of parallels max preempt off latencies.
429 /* stop the tracing. */
431 /* check both trace buffers */
432 ret
= trace_test_buffer(tr
, NULL
);
434 ret
= trace_test_buffer(&max_tr
, &count
);
438 if (!ret
&& !count
) {
439 printk(KERN_CONT
".. no entries found ..");
443 tracing_max_latency
= save_max
;
447 #endif /* CONFIG_PREEMPT_TRACER */
449 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
451 trace_selftest_startup_preemptirqsoff(struct tracer
*trace
, struct trace_array
*tr
)
453 unsigned long save_max
= tracing_max_latency
;
458 * Now that the big kernel lock is no longer preemptable,
459 * and this is called with the BKL held, it will always
460 * fail. If preemption is already disabled, simply
461 * pass the test. When the BKL is removed, or becomes
462 * preemptible again, we will once again test this,
465 if (preempt_count()) {
466 printk(KERN_CONT
"can not test ... force ");
470 /* start the tracing */
471 ret
= tracer_init(trace
, tr
);
473 warn_failed_init_tracer(trace
, ret
);
477 /* reset the max latency */
478 tracing_max_latency
= 0;
480 /* disable preemption and interrupts for a bit */
485 /* reverse the order of preempt vs irqs */
489 * Stop the tracer to avoid a warning subsequent
490 * to buffer flipping failure because tracing_stop()
491 * disables the tr and max buffers, making flipping impossible
492 * in case of parallels max irqs/preempt off latencies.
495 /* stop the tracing. */
497 /* check both trace buffers */
498 ret
= trace_test_buffer(tr
, NULL
);
502 ret
= trace_test_buffer(&max_tr
, &count
);
506 if (!ret
&& !count
) {
507 printk(KERN_CONT
".. no entries found ..");
512 /* do the test by disabling interrupts first this time */
513 tracing_max_latency
= 0;
521 /* reverse the order of preempt vs irqs */
525 /* stop the tracing. */
527 /* check both trace buffers */
528 ret
= trace_test_buffer(tr
, NULL
);
532 ret
= trace_test_buffer(&max_tr
, &count
);
534 if (!ret
&& !count
) {
535 printk(KERN_CONT
".. no entries found ..");
544 tracing_max_latency
= save_max
;
548 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
550 #ifdef CONFIG_NOP_TRACER
552 trace_selftest_startup_nop(struct tracer
*trace
, struct trace_array
*tr
)
554 /* What could possibly go wrong? */
559 #ifdef CONFIG_SCHED_TRACER
560 static int trace_wakeup_test_thread(void *data
)
562 /* Make this a RT thread, doesn't need to be too high */
563 struct sched_param param
= { .sched_priority
= 5 };
564 struct completion
*x
= data
;
566 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
568 /* Make it know we have a new prio */
571 /* now go to sleep and let the test wake us up */
572 set_current_state(TASK_INTERRUPTIBLE
);
575 /* we are awake, now wait to disappear */
576 while (!kthread_should_stop()) {
578 * This is an RT task, do short sleeps to let
588 trace_selftest_startup_wakeup(struct tracer
*trace
, struct trace_array
*tr
)
590 unsigned long save_max
= tracing_max_latency
;
591 struct task_struct
*p
;
592 struct completion isrt
;
596 init_completion(&isrt
);
598 /* create a high prio thread */
599 p
= kthread_run(trace_wakeup_test_thread
, &isrt
, "ftrace-test");
601 printk(KERN_CONT
"Failed to create ftrace wakeup test thread ");
605 /* make sure the thread is running at an RT prio */
606 wait_for_completion(&isrt
);
608 /* start the tracing */
609 ret
= tracer_init(trace
, tr
);
611 warn_failed_init_tracer(trace
, ret
);
615 /* reset the max latency */
616 tracing_max_latency
= 0;
618 /* sleep to let the RT thread sleep too */
622 * Yes this is slightly racy. It is possible that for some
623 * strange reason that the RT thread we created, did not
624 * call schedule for 100ms after doing the completion,
625 * and we do a wakeup on a task that already is awake.
626 * But that is extremely unlikely, and the worst thing that
627 * happens in such a case, is that we disable tracing.
628 * Honestly, if this race does happen something is horrible
629 * wrong with the system.
634 /* give a little time to let the thread wake up */
637 /* stop the tracing. */
639 /* check both trace buffers */
640 ret
= trace_test_buffer(tr
, NULL
);
642 ret
= trace_test_buffer(&max_tr
, &count
);
648 tracing_max_latency
= save_max
;
650 /* kill the thread */
653 if (!ret
&& !count
) {
654 printk(KERN_CONT
".. no entries found ..");
660 #endif /* CONFIG_SCHED_TRACER */
662 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
664 trace_selftest_startup_sched_switch(struct tracer
*trace
, struct trace_array
*tr
)
669 /* start the tracing */
670 ret
= tracer_init(trace
, tr
);
672 warn_failed_init_tracer(trace
, ret
);
676 /* Sleep for a 1/10 of a second */
678 /* stop the tracing. */
680 /* check the trace buffer */
681 ret
= trace_test_buffer(tr
, &count
);
685 if (!ret
&& !count
) {
686 printk(KERN_CONT
".. no entries found ..");
692 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
694 #ifdef CONFIG_SYSPROF_TRACER
696 trace_selftest_startup_sysprof(struct tracer
*trace
, struct trace_array
*tr
)
701 /* start the tracing */
702 ret
= tracer_init(trace
, tr
);
704 warn_failed_init_tracer(trace
, ret
);
708 /* Sleep for a 1/10 of a second */
710 /* stop the tracing. */
712 /* check the trace buffer */
713 ret
= trace_test_buffer(tr
, &count
);
717 if (!ret
&& !count
) {
718 printk(KERN_CONT
".. no entries found ..");
724 #endif /* CONFIG_SYSPROF_TRACER */
726 #ifdef CONFIG_BRANCH_TRACER
728 trace_selftest_startup_branch(struct tracer
*trace
, struct trace_array
*tr
)
733 /* start the tracing */
734 ret
= tracer_init(trace
, tr
);
736 warn_failed_init_tracer(trace
, ret
);
740 /* Sleep for a 1/10 of a second */
742 /* stop the tracing. */
744 /* check the trace buffer */
745 ret
= trace_test_buffer(tr
, &count
);
749 if (!ret
&& !count
) {
750 printk(KERN_CONT
".. no entries found ..");
756 #endif /* CONFIG_BRANCH_TRACER */
758 #ifdef CONFIG_HW_BRANCH_TRACER
760 trace_selftest_startup_hw_branches(struct tracer
*trace
,
761 struct trace_array
*tr
)
763 struct trace_iterator
*iter
;
764 struct tracer tracer
;
769 printk(KERN_CONT
"missing open function...");
773 ret
= tracer_init(trace
, tr
);
775 warn_failed_init_tracer(trace
, ret
);
780 * The hw-branch tracer needs to collect the trace from the various
781 * cpu trace buffers - before tracing is stopped.
783 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
787 memcpy(&tracer
, trace
, sizeof(tracer
));
789 iter
->trace
= &tracer
;
792 mutex_init(&iter
->mutex
);
796 mutex_destroy(&iter
->mutex
);
801 ret
= trace_test_buffer(tr
, &count
);
805 if (!ret
&& !count
) {
806 printk(KERN_CONT
"no entries found..");
812 #endif /* CONFIG_HW_BRANCH_TRACER */
814 #ifdef CONFIG_KSYM_TRACER
815 static int ksym_selftest_dummy
;
818 trace_selftest_startup_ksym(struct tracer
*trace
, struct trace_array
*tr
)
823 /* start the tracing */
824 ret
= tracer_init(trace
, tr
);
826 warn_failed_init_tracer(trace
, ret
);
830 ksym_selftest_dummy
= 0;
831 /* Register the read-write tracing request */
833 ret
= process_new_ksym_entry("ksym_selftest_dummy",
834 HW_BREAKPOINT_R
| HW_BREAKPOINT_W
,
835 (unsigned long)(&ksym_selftest_dummy
));
838 printk(KERN_CONT
"ksym_trace read-write startup test failed\n");
841 /* Perform a read and a write operation over the dummy variable to
844 if (ksym_selftest_dummy
== 0)
845 ksym_selftest_dummy
++;
847 /* stop the tracing. */
849 /* check the trace buffer */
850 ret
= trace_test_buffer(tr
, &count
);
854 /* read & write operations - one each is performed on the dummy variable
855 * triggering two entries in the trace buffer
857 if (!ret
&& count
!= 2) {
858 printk(KERN_CONT
"Ksym tracer startup test failed");
865 #endif /* CONFIG_KSYM_TRACER */