Linux 2.6.28-rc5
[cris-mirror.git] / kernel / trace / trace_selftest.c
blob90bc752a7580b3d4800417d2f7a28abe604c1d65
1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 case TRACE_WAKE:
12 case TRACE_CONT:
13 case TRACE_STACK:
14 case TRACE_PRINT:
15 case TRACE_SPECIAL:
16 return 1;
18 return 0;
21 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
23 struct ring_buffer_event *event;
24 struct trace_entry *entry;
26 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27 entry = ring_buffer_event_data(event);
29 if (!trace_valid_entry(entry)) {
30 printk(KERN_CONT ".. invalid entry %d ",
31 entry->type);
32 goto failed;
35 return 0;
37 failed:
38 /* disable tracing */
39 tracing_disabled = 1;
40 printk(KERN_CONT ".. corrupted trace buffer .. ");
41 return -1;
45 * Test the trace buffer to see if all the elements
46 * are still sane.
48 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
50 unsigned long flags, cnt = 0;
51 int cpu, ret = 0;
53 /* Don't allow flipping of max traces now */
54 raw_local_irq_save(flags);
55 __raw_spin_lock(&ftrace_max_lock);
57 cnt = ring_buffer_entries(tr->buffer);
59 for_each_possible_cpu(cpu) {
60 ret = trace_test_buffer_cpu(tr, cpu);
61 if (ret)
62 break;
64 __raw_spin_unlock(&ftrace_max_lock);
65 raw_local_irq_restore(flags);
67 if (count)
68 *count = cnt;
70 return ret;
73 #ifdef CONFIG_FUNCTION_TRACER
75 #ifdef CONFIG_DYNAMIC_FTRACE
77 #define __STR(x) #x
78 #define STR(x) __STR(x)
80 /* Test dynamic code modification and ftrace filters */
81 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
82 struct trace_array *tr,
83 int (*func)(void))
85 int save_ftrace_enabled = ftrace_enabled;
86 int save_tracer_enabled = tracer_enabled;
87 unsigned long count;
88 char *func_name;
89 int ret;
91 /* The ftrace test PASSED */
92 printk(KERN_CONT "PASSED\n");
93 pr_info("Testing dynamic ftrace: ");
95 /* enable tracing, and record the filter function */
96 ftrace_enabled = 1;
97 tracer_enabled = 1;
99 /* passed in by parameter to fool gcc from optimizing */
100 func();
103 * Some archs *cough*PowerPC*cough* add charachters to the
104 * start of the function names. We simply put a '*' to
105 * accomodate them.
107 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
109 /* filter only on our function */
110 ftrace_set_filter(func_name, strlen(func_name), 1);
112 /* enable tracing */
113 tr->ctrl = 1;
114 trace->init(tr);
116 /* Sleep for a 1/10 of a second */
117 msleep(100);
119 /* we should have nothing in the buffer */
120 ret = trace_test_buffer(tr, &count);
121 if (ret)
122 goto out;
124 if (count) {
125 ret = -1;
126 printk(KERN_CONT ".. filter did not filter .. ");
127 goto out;
130 /* call our function again */
131 func();
133 /* sleep again */
134 msleep(100);
136 /* stop the tracing. */
137 tr->ctrl = 0;
138 trace->ctrl_update(tr);
139 ftrace_enabled = 0;
141 /* check the trace buffer */
142 ret = trace_test_buffer(tr, &count);
143 trace->reset(tr);
145 /* we should only have one item */
146 if (!ret && count != 1) {
147 printk(KERN_CONT ".. filter failed count=%ld ..", count);
148 ret = -1;
149 goto out;
151 out:
152 ftrace_enabled = save_ftrace_enabled;
153 tracer_enabled = save_tracer_enabled;
155 /* Enable tracing on all functions again */
156 ftrace_set_filter(NULL, 0, 1);
158 return ret;
160 #else
161 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
162 #endif /* CONFIG_DYNAMIC_FTRACE */
164 * Simple verification test of ftrace function tracer.
165 * Enable ftrace, sleep 1/10 second, and then read the trace
166 * buffer to see if all is in order.
169 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
171 int save_ftrace_enabled = ftrace_enabled;
172 int save_tracer_enabled = tracer_enabled;
173 unsigned long count;
174 int ret;
176 /* make sure msleep has been recorded */
177 msleep(1);
179 /* start the tracing */
180 ftrace_enabled = 1;
181 tracer_enabled = 1;
183 tr->ctrl = 1;
184 trace->init(tr);
185 /* Sleep for a 1/10 of a second */
186 msleep(100);
187 /* stop the tracing. */
188 tr->ctrl = 0;
189 trace->ctrl_update(tr);
190 ftrace_enabled = 0;
192 /* check the trace buffer */
193 ret = trace_test_buffer(tr, &count);
194 trace->reset(tr);
196 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found ..");
198 ret = -1;
199 goto out;
202 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
203 DYN_FTRACE_TEST_NAME);
205 out:
206 ftrace_enabled = save_ftrace_enabled;
207 tracer_enabled = save_tracer_enabled;
209 /* kill ftrace totally if we failed */
210 if (ret)
211 ftrace_kill();
213 return ret;
215 #endif /* CONFIG_FUNCTION_TRACER */
217 #ifdef CONFIG_IRQSOFF_TRACER
219 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
221 unsigned long save_max = tracing_max_latency;
222 unsigned long count;
223 int ret;
225 /* start the tracing */
226 tr->ctrl = 1;
227 trace->init(tr);
228 /* reset the max latency */
229 tracing_max_latency = 0;
230 /* disable interrupts for a bit */
231 local_irq_disable();
232 udelay(100);
233 local_irq_enable();
234 /* stop the tracing. */
235 tr->ctrl = 0;
236 trace->ctrl_update(tr);
237 /* check both trace buffers */
238 ret = trace_test_buffer(tr, NULL);
239 if (!ret)
240 ret = trace_test_buffer(&max_tr, &count);
241 trace->reset(tr);
243 if (!ret && !count) {
244 printk(KERN_CONT ".. no entries found ..");
245 ret = -1;
248 tracing_max_latency = save_max;
250 return ret;
252 #endif /* CONFIG_IRQSOFF_TRACER */
254 #ifdef CONFIG_PREEMPT_TRACER
256 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
258 unsigned long save_max = tracing_max_latency;
259 unsigned long count;
260 int ret;
262 /* start the tracing */
263 tr->ctrl = 1;
264 trace->init(tr);
265 /* reset the max latency */
266 tracing_max_latency = 0;
267 /* disable preemption for a bit */
268 preempt_disable();
269 udelay(100);
270 preempt_enable();
271 /* stop the tracing. */
272 tr->ctrl = 0;
273 trace->ctrl_update(tr);
274 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL);
276 if (!ret)
277 ret = trace_test_buffer(&max_tr, &count);
278 trace->reset(tr);
280 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found ..");
282 ret = -1;
285 tracing_max_latency = save_max;
287 return ret;
289 #endif /* CONFIG_PREEMPT_TRACER */
291 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
293 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
295 unsigned long save_max = tracing_max_latency;
296 unsigned long count;
297 int ret;
299 /* start the tracing */
300 tr->ctrl = 1;
301 trace->init(tr);
303 /* reset the max latency */
304 tracing_max_latency = 0;
306 /* disable preemption and interrupts for a bit */
307 preempt_disable();
308 local_irq_disable();
309 udelay(100);
310 preempt_enable();
311 /* reverse the order of preempt vs irqs */
312 local_irq_enable();
314 /* stop the tracing. */
315 tr->ctrl = 0;
316 trace->ctrl_update(tr);
317 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL);
319 if (ret)
320 goto out;
322 ret = trace_test_buffer(&max_tr, &count);
323 if (ret)
324 goto out;
326 if (!ret && !count) {
327 printk(KERN_CONT ".. no entries found ..");
328 ret = -1;
329 goto out;
332 /* do the test by disabling interrupts first this time */
333 tracing_max_latency = 0;
334 tr->ctrl = 1;
335 trace->ctrl_update(tr);
336 preempt_disable();
337 local_irq_disable();
338 udelay(100);
339 preempt_enable();
340 /* reverse the order of preempt vs irqs */
341 local_irq_enable();
343 /* stop the tracing. */
344 tr->ctrl = 0;
345 trace->ctrl_update(tr);
346 /* check both trace buffers */
347 ret = trace_test_buffer(tr, NULL);
348 if (ret)
349 goto out;
351 ret = trace_test_buffer(&max_tr, &count);
353 if (!ret && !count) {
354 printk(KERN_CONT ".. no entries found ..");
355 ret = -1;
356 goto out;
359 out:
360 trace->reset(tr);
361 tracing_max_latency = save_max;
363 return ret;
365 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
367 #ifdef CONFIG_NOP_TRACER
369 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
371 /* What could possibly go wrong? */
372 return 0;
374 #endif
376 #ifdef CONFIG_SCHED_TRACER
377 static int trace_wakeup_test_thread(void *data)
379 /* Make this a RT thread, doesn't need to be too high */
380 struct sched_param param = { .sched_priority = 5 };
381 struct completion *x = data;
383 sched_setscheduler(current, SCHED_FIFO, &param);
385 /* Make it know we have a new prio */
386 complete(x);
388 /* now go to sleep and let the test wake us up */
389 set_current_state(TASK_INTERRUPTIBLE);
390 schedule();
392 /* we are awake, now wait to disappear */
393 while (!kthread_should_stop()) {
395 * This is an RT task, do short sleeps to let
396 * others run.
398 msleep(100);
401 return 0;
405 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
407 unsigned long save_max = tracing_max_latency;
408 struct task_struct *p;
409 struct completion isrt;
410 unsigned long count;
411 int ret;
413 init_completion(&isrt);
415 /* create a high prio thread */
416 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
417 if (IS_ERR(p)) {
418 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
419 return -1;
422 /* make sure the thread is running at an RT prio */
423 wait_for_completion(&isrt);
425 /* start the tracing */
426 tr->ctrl = 1;
427 trace->init(tr);
428 /* reset the max latency */
429 tracing_max_latency = 0;
431 /* sleep to let the RT thread sleep too */
432 msleep(100);
435 * Yes this is slightly racy. It is possible that for some
436 * strange reason that the RT thread we created, did not
437 * call schedule for 100ms after doing the completion,
438 * and we do a wakeup on a task that already is awake.
439 * But that is extremely unlikely, and the worst thing that
440 * happens in such a case, is that we disable tracing.
441 * Honestly, if this race does happen something is horrible
442 * wrong with the system.
445 wake_up_process(p);
447 /* give a little time to let the thread wake up */
448 msleep(100);
450 /* stop the tracing. */
451 tr->ctrl = 0;
452 trace->ctrl_update(tr);
453 /* check both trace buffers */
454 ret = trace_test_buffer(tr, NULL);
455 if (!ret)
456 ret = trace_test_buffer(&max_tr, &count);
459 trace->reset(tr);
461 tracing_max_latency = save_max;
463 /* kill the thread */
464 kthread_stop(p);
466 if (!ret && !count) {
467 printk(KERN_CONT ".. no entries found ..");
468 ret = -1;
471 return ret;
473 #endif /* CONFIG_SCHED_TRACER */
475 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
477 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
479 unsigned long count;
480 int ret;
482 /* start the tracing */
483 tr->ctrl = 1;
484 trace->init(tr);
485 /* Sleep for a 1/10 of a second */
486 msleep(100);
487 /* stop the tracing. */
488 tr->ctrl = 0;
489 trace->ctrl_update(tr);
490 /* check the trace buffer */
491 ret = trace_test_buffer(tr, &count);
492 trace->reset(tr);
494 if (!ret && !count) {
495 printk(KERN_CONT ".. no entries found ..");
496 ret = -1;
499 return ret;
501 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
503 #ifdef CONFIG_SYSPROF_TRACER
505 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
507 unsigned long count;
508 int ret;
510 /* start the tracing */
511 tr->ctrl = 1;
512 trace->init(tr);
513 /* Sleep for a 1/10 of a second */
514 msleep(100);
515 /* stop the tracing. */
516 tr->ctrl = 0;
517 trace->ctrl_update(tr);
518 /* check the trace buffer */
519 ret = trace_test_buffer(tr, &count);
520 trace->reset(tr);
522 return ret;
524 #endif /* CONFIG_SYSPROF_TRACER */