Linux 3.18.4
[linux/fpc-iii.git] / kernel / trace / trace_functions_graph.c
blobf0a0c982cde38e8002269f56c091b6131b90a283
1 /*
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
15 #include "trace.h"
16 #include "trace_output.h"
18 static bool kill_ftrace_graph;
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
27 bool ftrace_graph_is_dead(void)
29 return kill_ftrace_graph;
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
40 void ftrace_graph_stop(void)
42 kill_ftrace_graph = true;
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
66 #define TRACE_GRAPH_INDENT 2
68 static unsigned int max_depth;
70 static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 { } /* Empty entry */
90 static struct tracer_flags tracer_flags = {
91 /* Don't display overruns, proc, or tail by default */
92 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
93 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
94 .opts = trace_opts
97 static struct trace_array *graph_array;
100 * DURATION column is being also used to display IRQ signs,
101 * following values are used by print_graph_irq and others
102 * to fill in space into DURATION column.
104 enum {
105 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
106 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
107 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110 static enum print_line_t
111 print_graph_duration(unsigned long long duration, struct trace_seq *s,
112 u32 flags);
114 /* Add a function return address to the trace stack on thread info.*/
116 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
117 unsigned long frame_pointer)
119 unsigned long long calltime;
120 int index;
122 if (unlikely(ftrace_graph_is_dead()))
123 return -EBUSY;
125 if (!current->ret_stack)
126 return -EBUSY;
129 * We must make sure the ret_stack is tested before we read
130 * anything else.
132 smp_rmb();
134 /* The return trace stack is full */
135 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
136 atomic_inc(&current->trace_overrun);
137 return -EBUSY;
141 * The curr_ret_stack is an index to ftrace return stack of
142 * current task. Its value should be in [0, FTRACE_RETFUNC_
143 * DEPTH) when the function graph tracer is used. To support
144 * filtering out specific functions, it makes the index
145 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
146 * so when it sees a negative index the ftrace will ignore
147 * the record. And the index gets recovered when returning
148 * from the filtered function by adding the FTRACE_NOTRACE_
149 * DEPTH and then it'll continue to record functions normally.
151 * The curr_ret_stack is initialized to -1 and get increased
152 * in this function. So it can be less than -1 only if it was
153 * filtered out via ftrace_graph_notrace_addr() which can be
154 * set from set_graph_notrace file in debugfs by user.
156 if (current->curr_ret_stack < -1)
157 return -EBUSY;
159 calltime = trace_clock_local();
161 index = ++current->curr_ret_stack;
162 if (ftrace_graph_notrace_addr(func))
163 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
164 barrier();
165 current->ret_stack[index].ret = ret;
166 current->ret_stack[index].func = func;
167 current->ret_stack[index].calltime = calltime;
168 current->ret_stack[index].subtime = 0;
169 current->ret_stack[index].fp = frame_pointer;
170 *depth = current->curr_ret_stack;
172 return 0;
175 /* Retrieve a function return address to the trace stack on thread info.*/
176 static void
177 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
178 unsigned long frame_pointer)
180 int index;
182 index = current->curr_ret_stack;
185 * A negative index here means that it's just returned from a
186 * notrace'd function. Recover index to get an original
187 * return address. See ftrace_push_return_trace().
189 * TODO: Need to check whether the stack gets corrupted.
191 if (index < 0)
192 index += FTRACE_NOTRACE_DEPTH;
194 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
195 ftrace_graph_stop();
196 WARN_ON(1);
197 /* Might as well panic, otherwise we have no where to go */
198 *ret = (unsigned long)panic;
199 return;
202 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
204 * The arch may choose to record the frame pointer used
205 * and check it here to make sure that it is what we expect it
206 * to be. If gcc does not set the place holder of the return
207 * address in the frame pointer, and does a copy instead, then
208 * the function graph trace will fail. This test detects this
209 * case.
211 * Currently, x86_32 with optimize for size (-Os) makes the latest
212 * gcc do the above.
214 * Note, -mfentry does not use frame pointers, and this test
215 * is not needed if CC_USING_FENTRY is set.
217 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
218 ftrace_graph_stop();
219 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
220 " from func %ps return to %lx\n",
221 current->ret_stack[index].fp,
222 frame_pointer,
223 (void *)current->ret_stack[index].func,
224 current->ret_stack[index].ret);
225 *ret = (unsigned long)panic;
226 return;
228 #endif
230 *ret = current->ret_stack[index].ret;
231 trace->func = current->ret_stack[index].func;
232 trace->calltime = current->ret_stack[index].calltime;
233 trace->overrun = atomic_read(&current->trace_overrun);
234 trace->depth = index;
238 * Send the trace to the ring-buffer.
239 * @return the original return address.
241 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
243 struct ftrace_graph_ret trace;
244 unsigned long ret;
246 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
247 trace.rettime = trace_clock_local();
248 barrier();
249 current->curr_ret_stack--;
251 * The curr_ret_stack can be less than -1 only if it was
252 * filtered out and it's about to return from the function.
253 * Recover the index and continue to trace normal functions.
255 if (current->curr_ret_stack < -1) {
256 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
257 return ret;
261 * The trace should run after decrementing the ret counter
262 * in case an interrupt were to come in. We don't want to
263 * lose the interrupt if max_depth is set.
265 ftrace_graph_return(&trace);
267 if (unlikely(!ret)) {
268 ftrace_graph_stop();
269 WARN_ON(1);
270 /* Might as well panic. What else to do? */
271 ret = (unsigned long)panic;
274 return ret;
277 int __trace_graph_entry(struct trace_array *tr,
278 struct ftrace_graph_ent *trace,
279 unsigned long flags,
280 int pc)
282 struct ftrace_event_call *call = &event_funcgraph_entry;
283 struct ring_buffer_event *event;
284 struct ring_buffer *buffer = tr->trace_buffer.buffer;
285 struct ftrace_graph_ent_entry *entry;
287 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
288 return 0;
290 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
291 sizeof(*entry), flags, pc);
292 if (!event)
293 return 0;
294 entry = ring_buffer_event_data(event);
295 entry->graph_ent = *trace;
296 if (!call_filter_check_discard(call, entry, buffer, event))
297 __buffer_unlock_commit(buffer, event);
299 return 1;
302 static inline int ftrace_graph_ignore_irqs(void)
304 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
305 return 0;
307 return in_irq();
310 int trace_graph_entry(struct ftrace_graph_ent *trace)
312 struct trace_array *tr = graph_array;
313 struct trace_array_cpu *data;
314 unsigned long flags;
315 long disabled;
316 int ret;
317 int cpu;
318 int pc;
320 if (!ftrace_trace_task(current))
321 return 0;
323 /* trace it when it is-nested-in or is a function enabled. */
324 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
325 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
326 (max_depth && trace->depth >= max_depth))
327 return 0;
330 * Do not trace a function if it's filtered by set_graph_notrace.
331 * Make the index of ret stack negative to indicate that it should
332 * ignore further functions. But it needs its own ret stack entry
333 * to recover the original index in order to continue tracing after
334 * returning from the function.
336 if (ftrace_graph_notrace_addr(trace->func))
337 return 1;
339 local_irq_save(flags);
340 cpu = raw_smp_processor_id();
341 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
342 disabled = atomic_inc_return(&data->disabled);
343 if (likely(disabled == 1)) {
344 pc = preempt_count();
345 ret = __trace_graph_entry(tr, trace, flags, pc);
346 } else {
347 ret = 0;
350 atomic_dec(&data->disabled);
351 local_irq_restore(flags);
353 return ret;
356 static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
358 if (tracing_thresh)
359 return 1;
360 else
361 return trace_graph_entry(trace);
364 static void
365 __trace_graph_function(struct trace_array *tr,
366 unsigned long ip, unsigned long flags, int pc)
368 u64 time = trace_clock_local();
369 struct ftrace_graph_ent ent = {
370 .func = ip,
371 .depth = 0,
373 struct ftrace_graph_ret ret = {
374 .func = ip,
375 .depth = 0,
376 .calltime = time,
377 .rettime = time,
380 __trace_graph_entry(tr, &ent, flags, pc);
381 __trace_graph_return(tr, &ret, flags, pc);
384 void
385 trace_graph_function(struct trace_array *tr,
386 unsigned long ip, unsigned long parent_ip,
387 unsigned long flags, int pc)
389 __trace_graph_function(tr, ip, flags, pc);
392 void __trace_graph_return(struct trace_array *tr,
393 struct ftrace_graph_ret *trace,
394 unsigned long flags,
395 int pc)
397 struct ftrace_event_call *call = &event_funcgraph_exit;
398 struct ring_buffer_event *event;
399 struct ring_buffer *buffer = tr->trace_buffer.buffer;
400 struct ftrace_graph_ret_entry *entry;
402 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
403 return;
405 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
406 sizeof(*entry), flags, pc);
407 if (!event)
408 return;
409 entry = ring_buffer_event_data(event);
410 entry->ret = *trace;
411 if (!call_filter_check_discard(call, entry, buffer, event))
412 __buffer_unlock_commit(buffer, event);
415 void trace_graph_return(struct ftrace_graph_ret *trace)
417 struct trace_array *tr = graph_array;
418 struct trace_array_cpu *data;
419 unsigned long flags;
420 long disabled;
421 int cpu;
422 int pc;
424 local_irq_save(flags);
425 cpu = raw_smp_processor_id();
426 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
427 disabled = atomic_inc_return(&data->disabled);
428 if (likely(disabled == 1)) {
429 pc = preempt_count();
430 __trace_graph_return(tr, trace, flags, pc);
432 atomic_dec(&data->disabled);
433 local_irq_restore(flags);
436 void set_graph_array(struct trace_array *tr)
438 graph_array = tr;
440 /* Make graph_array visible before we start tracing */
442 smp_mb();
445 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
447 if (tracing_thresh &&
448 (trace->rettime - trace->calltime < tracing_thresh))
449 return;
450 else
451 trace_graph_return(trace);
454 static int graph_trace_init(struct trace_array *tr)
456 int ret;
458 set_graph_array(tr);
459 if (tracing_thresh)
460 ret = register_ftrace_graph(&trace_graph_thresh_return,
461 &trace_graph_thresh_entry);
462 else
463 ret = register_ftrace_graph(&trace_graph_return,
464 &trace_graph_entry);
465 if (ret)
466 return ret;
467 tracing_start_cmdline_record();
469 return 0;
472 static void graph_trace_reset(struct trace_array *tr)
474 tracing_stop_cmdline_record();
475 unregister_ftrace_graph();
478 static int graph_trace_update_thresh(struct trace_array *tr)
480 graph_trace_reset(tr);
481 return graph_trace_init(tr);
484 static int max_bytes_for_cpu;
486 static enum print_line_t
487 print_graph_cpu(struct trace_seq *s, int cpu)
489 int ret;
492 * Start with a space character - to make it stand out
493 * to the right a bit when trace output is pasted into
494 * email:
496 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
497 if (!ret)
498 return TRACE_TYPE_PARTIAL_LINE;
500 return TRACE_TYPE_HANDLED;
503 #define TRACE_GRAPH_PROCINFO_LENGTH 14
505 static enum print_line_t
506 print_graph_proc(struct trace_seq *s, pid_t pid)
508 char comm[TASK_COMM_LEN];
509 /* sign + log10(MAX_INT) + '\0' */
510 char pid_str[11];
511 int spaces = 0;
512 int ret;
513 int len;
514 int i;
516 trace_find_cmdline(pid, comm);
517 comm[7] = '\0';
518 sprintf(pid_str, "%d", pid);
520 /* 1 stands for the "-" character */
521 len = strlen(comm) + strlen(pid_str) + 1;
523 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
524 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
526 /* First spaces to align center */
527 for (i = 0; i < spaces / 2; i++) {
528 ret = trace_seq_putc(s, ' ');
529 if (!ret)
530 return TRACE_TYPE_PARTIAL_LINE;
533 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
534 if (!ret)
535 return TRACE_TYPE_PARTIAL_LINE;
537 /* Last spaces to align center */
538 for (i = 0; i < spaces - (spaces / 2); i++) {
539 ret = trace_seq_putc(s, ' ');
540 if (!ret)
541 return TRACE_TYPE_PARTIAL_LINE;
543 return TRACE_TYPE_HANDLED;
547 static enum print_line_t
548 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
550 if (!trace_seq_putc(s, ' '))
551 return 0;
553 return trace_print_lat_fmt(s, entry);
556 /* If the pid changed since the last trace, output this event */
557 static enum print_line_t
558 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
560 pid_t prev_pid;
561 pid_t *last_pid;
562 int ret;
564 if (!data)
565 return TRACE_TYPE_HANDLED;
567 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
569 if (*last_pid == pid)
570 return TRACE_TYPE_HANDLED;
572 prev_pid = *last_pid;
573 *last_pid = pid;
575 if (prev_pid == -1)
576 return TRACE_TYPE_HANDLED;
578 * Context-switch trace line:
580 ------------------------------------------
581 | 1) migration/0--1 => sshd-1755
582 ------------------------------------------
585 ret = trace_seq_puts(s,
586 " ------------------------------------------\n");
587 if (!ret)
588 return TRACE_TYPE_PARTIAL_LINE;
590 ret = print_graph_cpu(s, cpu);
591 if (ret == TRACE_TYPE_PARTIAL_LINE)
592 return TRACE_TYPE_PARTIAL_LINE;
594 ret = print_graph_proc(s, prev_pid);
595 if (ret == TRACE_TYPE_PARTIAL_LINE)
596 return TRACE_TYPE_PARTIAL_LINE;
598 ret = trace_seq_puts(s, " => ");
599 if (!ret)
600 return TRACE_TYPE_PARTIAL_LINE;
602 ret = print_graph_proc(s, pid);
603 if (ret == TRACE_TYPE_PARTIAL_LINE)
604 return TRACE_TYPE_PARTIAL_LINE;
606 ret = trace_seq_puts(s,
607 "\n ------------------------------------------\n\n");
608 if (!ret)
609 return TRACE_TYPE_PARTIAL_LINE;
611 return TRACE_TYPE_HANDLED;
614 static struct ftrace_graph_ret_entry *
615 get_return_for_leaf(struct trace_iterator *iter,
616 struct ftrace_graph_ent_entry *curr)
618 struct fgraph_data *data = iter->private;
619 struct ring_buffer_iter *ring_iter = NULL;
620 struct ring_buffer_event *event;
621 struct ftrace_graph_ret_entry *next;
624 * If the previous output failed to write to the seq buffer,
625 * then we just reuse the data from before.
627 if (data && data->failed) {
628 curr = &data->ent;
629 next = &data->ret;
630 } else {
632 ring_iter = trace_buffer_iter(iter, iter->cpu);
634 /* First peek to compare current entry and the next one */
635 if (ring_iter)
636 event = ring_buffer_iter_peek(ring_iter, NULL);
637 else {
639 * We need to consume the current entry to see
640 * the next one.
642 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
643 NULL, NULL);
644 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
645 NULL, NULL);
648 if (!event)
649 return NULL;
651 next = ring_buffer_event_data(event);
653 if (data) {
655 * Save current and next entries for later reference
656 * if the output fails.
658 data->ent = *curr;
660 * If the next event is not a return type, then
661 * we only care about what type it is. Otherwise we can
662 * safely copy the entire event.
664 if (next->ent.type == TRACE_GRAPH_RET)
665 data->ret = *next;
666 else
667 data->ret.ent.type = next->ent.type;
671 if (next->ent.type != TRACE_GRAPH_RET)
672 return NULL;
674 if (curr->ent.pid != next->ent.pid ||
675 curr->graph_ent.func != next->ret.func)
676 return NULL;
678 /* this is a leaf, now advance the iterator */
679 if (ring_iter)
680 ring_buffer_read(ring_iter, NULL);
682 return next;
685 static int print_graph_abs_time(u64 t, struct trace_seq *s)
687 unsigned long usecs_rem;
689 usecs_rem = do_div(t, NSEC_PER_SEC);
690 usecs_rem /= 1000;
692 return trace_seq_printf(s, "%5lu.%06lu | ",
693 (unsigned long)t, usecs_rem);
696 static enum print_line_t
697 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
698 enum trace_type type, int cpu, pid_t pid, u32 flags)
700 int ret;
701 struct trace_seq *s = &iter->seq;
703 if (addr < (unsigned long)__irqentry_text_start ||
704 addr >= (unsigned long)__irqentry_text_end)
705 return TRACE_TYPE_UNHANDLED;
707 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
708 /* Absolute time */
709 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
710 ret = print_graph_abs_time(iter->ts, s);
711 if (!ret)
712 return TRACE_TYPE_PARTIAL_LINE;
715 /* Cpu */
716 if (flags & TRACE_GRAPH_PRINT_CPU) {
717 ret = print_graph_cpu(s, cpu);
718 if (ret == TRACE_TYPE_PARTIAL_LINE)
719 return TRACE_TYPE_PARTIAL_LINE;
722 /* Proc */
723 if (flags & TRACE_GRAPH_PRINT_PROC) {
724 ret = print_graph_proc(s, pid);
725 if (ret == TRACE_TYPE_PARTIAL_LINE)
726 return TRACE_TYPE_PARTIAL_LINE;
727 ret = trace_seq_puts(s, " | ");
728 if (!ret)
729 return TRACE_TYPE_PARTIAL_LINE;
733 /* No overhead */
734 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
735 if (ret != TRACE_TYPE_HANDLED)
736 return ret;
738 if (type == TRACE_GRAPH_ENT)
739 ret = trace_seq_puts(s, "==========>");
740 else
741 ret = trace_seq_puts(s, "<==========");
743 if (!ret)
744 return TRACE_TYPE_PARTIAL_LINE;
746 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
747 if (ret != TRACE_TYPE_HANDLED)
748 return ret;
750 ret = trace_seq_putc(s, '\n');
752 if (!ret)
753 return TRACE_TYPE_PARTIAL_LINE;
754 return TRACE_TYPE_HANDLED;
757 enum print_line_t
758 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
760 unsigned long nsecs_rem = do_div(duration, 1000);
761 /* log10(ULONG_MAX) + '\0' */
762 char msecs_str[21];
763 char nsecs_str[5];
764 int ret, len;
765 int i;
767 sprintf(msecs_str, "%lu", (unsigned long) duration);
769 /* Print msecs */
770 ret = trace_seq_printf(s, "%s", msecs_str);
771 if (!ret)
772 return TRACE_TYPE_PARTIAL_LINE;
774 len = strlen(msecs_str);
776 /* Print nsecs (we don't want to exceed 7 numbers) */
777 if (len < 7) {
778 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
780 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
781 ret = trace_seq_printf(s, ".%s", nsecs_str);
782 if (!ret)
783 return TRACE_TYPE_PARTIAL_LINE;
784 len += strlen(nsecs_str);
787 ret = trace_seq_puts(s, " us ");
788 if (!ret)
789 return TRACE_TYPE_PARTIAL_LINE;
791 /* Print remaining spaces to fit the row's width */
792 for (i = len; i < 7; i++) {
793 ret = trace_seq_putc(s, ' ');
794 if (!ret)
795 return TRACE_TYPE_PARTIAL_LINE;
797 return TRACE_TYPE_HANDLED;
800 static enum print_line_t
801 print_graph_duration(unsigned long long duration, struct trace_seq *s,
802 u32 flags)
804 int ret = -1;
806 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
807 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
808 return TRACE_TYPE_HANDLED;
810 /* No real adata, just filling the column with spaces */
811 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
812 case FLAGS_FILL_FULL:
813 ret = trace_seq_puts(s, " | ");
814 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
815 case FLAGS_FILL_START:
816 ret = trace_seq_puts(s, " ");
817 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
818 case FLAGS_FILL_END:
819 ret = trace_seq_puts(s, " |");
820 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
823 /* Signal a overhead of time execution to the output */
824 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
825 /* Duration exceeded 100 msecs */
826 if (duration > 100000ULL)
827 ret = trace_seq_puts(s, "! ");
828 /* Duration exceeded 10 msecs */
829 else if (duration > 10000ULL)
830 ret = trace_seq_puts(s, "+ ");
834 * The -1 means we either did not exceed the duration tresholds
835 * or we dont want to print out the overhead. Either way we need
836 * to fill out the space.
838 if (ret == -1)
839 ret = trace_seq_puts(s, " ");
841 /* Catching here any failure happenned above */
842 if (!ret)
843 return TRACE_TYPE_PARTIAL_LINE;
845 ret = trace_print_graph_duration(duration, s);
846 if (ret != TRACE_TYPE_HANDLED)
847 return ret;
849 ret = trace_seq_puts(s, "| ");
850 if (!ret)
851 return TRACE_TYPE_PARTIAL_LINE;
853 return TRACE_TYPE_HANDLED;
856 /* Case of a leaf function on its call entry */
857 static enum print_line_t
858 print_graph_entry_leaf(struct trace_iterator *iter,
859 struct ftrace_graph_ent_entry *entry,
860 struct ftrace_graph_ret_entry *ret_entry,
861 struct trace_seq *s, u32 flags)
863 struct fgraph_data *data = iter->private;
864 struct ftrace_graph_ret *graph_ret;
865 struct ftrace_graph_ent *call;
866 unsigned long long duration;
867 int ret;
868 int i;
870 graph_ret = &ret_entry->ret;
871 call = &entry->graph_ent;
872 duration = graph_ret->rettime - graph_ret->calltime;
874 if (data) {
875 struct fgraph_cpu_data *cpu_data;
876 int cpu = iter->cpu;
878 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
881 * Comments display at + 1 to depth. Since
882 * this is a leaf function, keep the comments
883 * equal to this depth.
885 cpu_data->depth = call->depth - 1;
887 /* No need to keep this function around for this depth */
888 if (call->depth < FTRACE_RETFUNC_DEPTH)
889 cpu_data->enter_funcs[call->depth] = 0;
892 /* Overhead and duration */
893 ret = print_graph_duration(duration, s, flags);
894 if (ret == TRACE_TYPE_PARTIAL_LINE)
895 return TRACE_TYPE_PARTIAL_LINE;
897 /* Function */
898 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
899 ret = trace_seq_putc(s, ' ');
900 if (!ret)
901 return TRACE_TYPE_PARTIAL_LINE;
904 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
905 if (!ret)
906 return TRACE_TYPE_PARTIAL_LINE;
908 return TRACE_TYPE_HANDLED;
911 static enum print_line_t
912 print_graph_entry_nested(struct trace_iterator *iter,
913 struct ftrace_graph_ent_entry *entry,
914 struct trace_seq *s, int cpu, u32 flags)
916 struct ftrace_graph_ent *call = &entry->graph_ent;
917 struct fgraph_data *data = iter->private;
918 int ret;
919 int i;
921 if (data) {
922 struct fgraph_cpu_data *cpu_data;
923 int cpu = iter->cpu;
925 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
926 cpu_data->depth = call->depth;
928 /* Save this function pointer to see if the exit matches */
929 if (call->depth < FTRACE_RETFUNC_DEPTH)
930 cpu_data->enter_funcs[call->depth] = call->func;
933 /* No time */
934 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
935 if (ret != TRACE_TYPE_HANDLED)
936 return ret;
938 /* Function */
939 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
940 ret = trace_seq_putc(s, ' ');
941 if (!ret)
942 return TRACE_TYPE_PARTIAL_LINE;
945 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
946 if (!ret)
947 return TRACE_TYPE_PARTIAL_LINE;
950 * we already consumed the current entry to check the next one
951 * and see if this is a leaf.
953 return TRACE_TYPE_NO_CONSUME;
956 static enum print_line_t
957 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
958 int type, unsigned long addr, u32 flags)
960 struct fgraph_data *data = iter->private;
961 struct trace_entry *ent = iter->ent;
962 int cpu = iter->cpu;
963 int ret;
965 /* Pid */
966 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
967 return TRACE_TYPE_PARTIAL_LINE;
969 if (type) {
970 /* Interrupt */
971 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
972 if (ret == TRACE_TYPE_PARTIAL_LINE)
973 return TRACE_TYPE_PARTIAL_LINE;
976 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
977 return 0;
979 /* Absolute time */
980 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
981 ret = print_graph_abs_time(iter->ts, s);
982 if (!ret)
983 return TRACE_TYPE_PARTIAL_LINE;
986 /* Cpu */
987 if (flags & TRACE_GRAPH_PRINT_CPU) {
988 ret = print_graph_cpu(s, cpu);
989 if (ret == TRACE_TYPE_PARTIAL_LINE)
990 return TRACE_TYPE_PARTIAL_LINE;
993 /* Proc */
994 if (flags & TRACE_GRAPH_PRINT_PROC) {
995 ret = print_graph_proc(s, ent->pid);
996 if (ret == TRACE_TYPE_PARTIAL_LINE)
997 return TRACE_TYPE_PARTIAL_LINE;
999 ret = trace_seq_puts(s, " | ");
1000 if (!ret)
1001 return TRACE_TYPE_PARTIAL_LINE;
1004 /* Latency format */
1005 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1006 ret = print_graph_lat_fmt(s, ent);
1007 if (ret == TRACE_TYPE_PARTIAL_LINE)
1008 return TRACE_TYPE_PARTIAL_LINE;
1011 return 0;
1015 * Entry check for irq code
1017 * returns 1 if
1018 * - we are inside irq code
1019 * - we just entered irq code
1021 * retunns 0 if
1022 * - funcgraph-interrupts option is set
1023 * - we are not inside irq code
1025 static int
1026 check_irq_entry(struct trace_iterator *iter, u32 flags,
1027 unsigned long addr, int depth)
1029 int cpu = iter->cpu;
1030 int *depth_irq;
1031 struct fgraph_data *data = iter->private;
1034 * If we are either displaying irqs, or we got called as
1035 * a graph event and private data does not exist,
1036 * then we bypass the irq check.
1038 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1039 (!data))
1040 return 0;
1042 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1045 * We are inside the irq code
1047 if (*depth_irq >= 0)
1048 return 1;
1050 if ((addr < (unsigned long)__irqentry_text_start) ||
1051 (addr >= (unsigned long)__irqentry_text_end))
1052 return 0;
1055 * We are entering irq code.
1057 *depth_irq = depth;
1058 return 1;
1062 * Return check for irq code
1064 * returns 1 if
1065 * - we are inside irq code
1066 * - we just left irq code
1068 * returns 0 if
1069 * - funcgraph-interrupts option is set
1070 * - we are not inside irq code
1072 static int
1073 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1075 int cpu = iter->cpu;
1076 int *depth_irq;
1077 struct fgraph_data *data = iter->private;
1080 * If we are either displaying irqs, or we got called as
1081 * a graph event and private data does not exist,
1082 * then we bypass the irq check.
1084 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1085 (!data))
1086 return 0;
1088 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1091 * We are not inside the irq code.
1093 if (*depth_irq == -1)
1094 return 0;
1097 * We are inside the irq code, and this is returning entry.
1098 * Let's not trace it and clear the entry depth, since
1099 * we are out of irq code.
1101 * This condition ensures that we 'leave the irq code' once
1102 * we are out of the entry depth. Thus protecting us from
1103 * the RETURN entry loss.
1105 if (*depth_irq >= depth) {
1106 *depth_irq = -1;
1107 return 1;
1111 * We are inside the irq code, and this is not the entry.
1113 return 1;
1116 static enum print_line_t
1117 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1118 struct trace_iterator *iter, u32 flags)
1120 struct fgraph_data *data = iter->private;
1121 struct ftrace_graph_ent *call = &field->graph_ent;
1122 struct ftrace_graph_ret_entry *leaf_ret;
1123 static enum print_line_t ret;
1124 int cpu = iter->cpu;
1126 if (check_irq_entry(iter, flags, call->func, call->depth))
1127 return TRACE_TYPE_HANDLED;
1129 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1130 return TRACE_TYPE_PARTIAL_LINE;
1132 leaf_ret = get_return_for_leaf(iter, field);
1133 if (leaf_ret)
1134 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1135 else
1136 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1138 if (data) {
1140 * If we failed to write our output, then we need to make
1141 * note of it. Because we already consumed our entry.
1143 if (s->full) {
1144 data->failed = 1;
1145 data->cpu = cpu;
1146 } else
1147 data->failed = 0;
1150 return ret;
1153 static enum print_line_t
1154 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1155 struct trace_entry *ent, struct trace_iterator *iter,
1156 u32 flags)
1158 unsigned long long duration = trace->rettime - trace->calltime;
1159 struct fgraph_data *data = iter->private;
1160 pid_t pid = ent->pid;
1161 int cpu = iter->cpu;
1162 int func_match = 1;
1163 int ret;
1164 int i;
1166 if (check_irq_return(iter, flags, trace->depth))
1167 return TRACE_TYPE_HANDLED;
1169 if (data) {
1170 struct fgraph_cpu_data *cpu_data;
1171 int cpu = iter->cpu;
1173 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1176 * Comments display at + 1 to depth. This is the
1177 * return from a function, we now want the comments
1178 * to display at the same level of the bracket.
1180 cpu_data->depth = trace->depth - 1;
1182 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1183 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1184 func_match = 0;
1185 cpu_data->enter_funcs[trace->depth] = 0;
1189 if (print_graph_prologue(iter, s, 0, 0, flags))
1190 return TRACE_TYPE_PARTIAL_LINE;
1192 /* Overhead and duration */
1193 ret = print_graph_duration(duration, s, flags);
1194 if (ret == TRACE_TYPE_PARTIAL_LINE)
1195 return TRACE_TYPE_PARTIAL_LINE;
1197 /* Closing brace */
1198 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1199 ret = trace_seq_putc(s, ' ');
1200 if (!ret)
1201 return TRACE_TYPE_PARTIAL_LINE;
1205 * If the return function does not have a matching entry,
1206 * then the entry was lost. Instead of just printing
1207 * the '}' and letting the user guess what function this
1208 * belongs to, write out the function name. Always do
1209 * that if the funcgraph-tail option is enabled.
1211 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1212 ret = trace_seq_puts(s, "}\n");
1213 if (!ret)
1214 return TRACE_TYPE_PARTIAL_LINE;
1215 } else {
1216 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1217 if (!ret)
1218 return TRACE_TYPE_PARTIAL_LINE;
1221 /* Overrun */
1222 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1223 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1224 trace->overrun);
1225 if (!ret)
1226 return TRACE_TYPE_PARTIAL_LINE;
1229 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1230 cpu, pid, flags);
1231 if (ret == TRACE_TYPE_PARTIAL_LINE)
1232 return TRACE_TYPE_PARTIAL_LINE;
1234 return TRACE_TYPE_HANDLED;
1237 static enum print_line_t
1238 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1239 struct trace_iterator *iter, u32 flags)
1241 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1242 struct fgraph_data *data = iter->private;
1243 struct trace_event *event;
1244 int depth = 0;
1245 int ret;
1246 int i;
1248 if (data)
1249 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1251 if (print_graph_prologue(iter, s, 0, 0, flags))
1252 return TRACE_TYPE_PARTIAL_LINE;
1254 /* No time */
1255 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1256 if (ret != TRACE_TYPE_HANDLED)
1257 return ret;
1259 /* Indentation */
1260 if (depth > 0)
1261 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1262 ret = trace_seq_putc(s, ' ');
1263 if (!ret)
1264 return TRACE_TYPE_PARTIAL_LINE;
1267 /* The comment */
1268 ret = trace_seq_puts(s, "/* ");
1269 if (!ret)
1270 return TRACE_TYPE_PARTIAL_LINE;
1272 switch (iter->ent->type) {
1273 case TRACE_BPRINT:
1274 ret = trace_print_bprintk_msg_only(iter);
1275 if (ret != TRACE_TYPE_HANDLED)
1276 return ret;
1277 break;
1278 case TRACE_PRINT:
1279 ret = trace_print_printk_msg_only(iter);
1280 if (ret != TRACE_TYPE_HANDLED)
1281 return ret;
1282 break;
1283 default:
1284 event = ftrace_find_event(ent->type);
1285 if (!event)
1286 return TRACE_TYPE_UNHANDLED;
1288 ret = event->funcs->trace(iter, sym_flags, event);
1289 if (ret != TRACE_TYPE_HANDLED)
1290 return ret;
1293 /* Strip ending newline */
1294 if (s->buffer[s->len - 1] == '\n') {
1295 s->buffer[s->len - 1] = '\0';
1296 s->len--;
1299 ret = trace_seq_puts(s, " */\n");
1300 if (!ret)
1301 return TRACE_TYPE_PARTIAL_LINE;
1303 return TRACE_TYPE_HANDLED;
1307 enum print_line_t
1308 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1310 struct ftrace_graph_ent_entry *field;
1311 struct fgraph_data *data = iter->private;
1312 struct trace_entry *entry = iter->ent;
1313 struct trace_seq *s = &iter->seq;
1314 int cpu = iter->cpu;
1315 int ret;
1317 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1318 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1319 return TRACE_TYPE_HANDLED;
1323 * If the last output failed, there's a possibility we need
1324 * to print out the missing entry which would never go out.
1326 if (data && data->failed) {
1327 field = &data->ent;
1328 iter->cpu = data->cpu;
1329 ret = print_graph_entry(field, s, iter, flags);
1330 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1331 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1332 ret = TRACE_TYPE_NO_CONSUME;
1334 iter->cpu = cpu;
1335 return ret;
1338 switch (entry->type) {
1339 case TRACE_GRAPH_ENT: {
1341 * print_graph_entry() may consume the current event,
1342 * thus @field may become invalid, so we need to save it.
1343 * sizeof(struct ftrace_graph_ent_entry) is very small,
1344 * it can be safely saved at the stack.
1346 struct ftrace_graph_ent_entry saved;
1347 trace_assign_type(field, entry);
1348 saved = *field;
1349 return print_graph_entry(&saved, s, iter, flags);
1351 case TRACE_GRAPH_RET: {
1352 struct ftrace_graph_ret_entry *field;
1353 trace_assign_type(field, entry);
1354 return print_graph_return(&field->ret, s, entry, iter, flags);
1356 case TRACE_STACK:
1357 case TRACE_FN:
1358 /* dont trace stack and functions as comments */
1359 return TRACE_TYPE_UNHANDLED;
1361 default:
1362 return print_graph_comment(s, entry, iter, flags);
1365 return TRACE_TYPE_HANDLED;
1368 static enum print_line_t
1369 print_graph_function(struct trace_iterator *iter)
1371 return print_graph_function_flags(iter, tracer_flags.val);
1374 static enum print_line_t
1375 print_graph_function_event(struct trace_iterator *iter, int flags,
1376 struct trace_event *event)
1378 return print_graph_function(iter);
1381 static void print_lat_header(struct seq_file *s, u32 flags)
1383 static const char spaces[] = " " /* 16 spaces */
1384 " " /* 4 spaces */
1385 " "; /* 17 spaces */
1386 int size = 0;
1388 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1389 size += 16;
1390 if (flags & TRACE_GRAPH_PRINT_CPU)
1391 size += 4;
1392 if (flags & TRACE_GRAPH_PRINT_PROC)
1393 size += 17;
1395 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1396 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1397 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1398 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1399 seq_printf(s, "#%.*s||| / \n", size, spaces);
1402 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1404 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1406 if (lat)
1407 print_lat_header(s, flags);
1409 /* 1st line */
1410 seq_printf(s, "#");
1411 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1412 seq_printf(s, " TIME ");
1413 if (flags & TRACE_GRAPH_PRINT_CPU)
1414 seq_printf(s, " CPU");
1415 if (flags & TRACE_GRAPH_PRINT_PROC)
1416 seq_printf(s, " TASK/PID ");
1417 if (lat)
1418 seq_printf(s, "||||");
1419 if (flags & TRACE_GRAPH_PRINT_DURATION)
1420 seq_printf(s, " DURATION ");
1421 seq_printf(s, " FUNCTION CALLS\n");
1423 /* 2nd line */
1424 seq_printf(s, "#");
1425 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1426 seq_printf(s, " | ");
1427 if (flags & TRACE_GRAPH_PRINT_CPU)
1428 seq_printf(s, " | ");
1429 if (flags & TRACE_GRAPH_PRINT_PROC)
1430 seq_printf(s, " | | ");
1431 if (lat)
1432 seq_printf(s, "||||");
1433 if (flags & TRACE_GRAPH_PRINT_DURATION)
1434 seq_printf(s, " | | ");
1435 seq_printf(s, " | | | |\n");
1438 static void print_graph_headers(struct seq_file *s)
1440 print_graph_headers_flags(s, tracer_flags.val);
1443 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1445 struct trace_iterator *iter = s->private;
1447 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1448 return;
1450 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1451 /* print nothing if the buffers are empty */
1452 if (trace_empty(iter))
1453 return;
1455 print_trace_header(s, iter);
1458 __print_graph_headers_flags(s, flags);
1461 void graph_trace_open(struct trace_iterator *iter)
1463 /* pid and depth on the last trace processed */
1464 struct fgraph_data *data;
1465 int cpu;
1467 iter->private = NULL;
1469 data = kzalloc(sizeof(*data), GFP_KERNEL);
1470 if (!data)
1471 goto out_err;
1473 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1474 if (!data->cpu_data)
1475 goto out_err_free;
1477 for_each_possible_cpu(cpu) {
1478 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1479 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1480 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1481 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1483 *pid = -1;
1484 *depth = 0;
1485 *ignore = 0;
1486 *depth_irq = -1;
1489 iter->private = data;
1491 return;
1493 out_err_free:
1494 kfree(data);
1495 out_err:
1496 pr_warning("function graph tracer: not enough memory\n");
1499 void graph_trace_close(struct trace_iterator *iter)
1501 struct fgraph_data *data = iter->private;
1503 if (data) {
1504 free_percpu(data->cpu_data);
1505 kfree(data);
1509 static int
1510 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1512 if (bit == TRACE_GRAPH_PRINT_IRQS)
1513 ftrace_graph_skip_irqs = !set;
1515 return 0;
1518 static struct trace_event_functions graph_functions = {
1519 .trace = print_graph_function_event,
1522 static struct trace_event graph_trace_entry_event = {
1523 .type = TRACE_GRAPH_ENT,
1524 .funcs = &graph_functions,
1527 static struct trace_event graph_trace_ret_event = {
1528 .type = TRACE_GRAPH_RET,
1529 .funcs = &graph_functions
1532 static struct tracer graph_trace __tracer_data = {
1533 .name = "function_graph",
1534 .update_thresh = graph_trace_update_thresh,
1535 .open = graph_trace_open,
1536 .pipe_open = graph_trace_open,
1537 .close = graph_trace_close,
1538 .pipe_close = graph_trace_close,
1539 .init = graph_trace_init,
1540 .reset = graph_trace_reset,
1541 .print_line = print_graph_function,
1542 .print_header = print_graph_headers,
1543 .flags = &tracer_flags,
1544 .set_flag = func_graph_set_flag,
1545 #ifdef CONFIG_FTRACE_SELFTEST
1546 .selftest = trace_selftest_startup_function_graph,
1547 #endif
1551 static ssize_t
1552 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1553 loff_t *ppos)
1555 unsigned long val;
1556 int ret;
1558 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1559 if (ret)
1560 return ret;
1562 max_depth = val;
1564 *ppos += cnt;
1566 return cnt;
1569 static ssize_t
1570 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1571 loff_t *ppos)
1573 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1574 int n;
1576 n = sprintf(buf, "%d\n", max_depth);
1578 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1581 static const struct file_operations graph_depth_fops = {
1582 .open = tracing_open_generic,
1583 .write = graph_depth_write,
1584 .read = graph_depth_read,
1585 .llseek = generic_file_llseek,
1588 static __init int init_graph_debugfs(void)
1590 struct dentry *d_tracer;
1592 d_tracer = tracing_init_dentry();
1593 if (!d_tracer)
1594 return 0;
1596 trace_create_file("max_graph_depth", 0644, d_tracer,
1597 NULL, &graph_depth_fops);
1599 return 0;
1601 fs_initcall(init_graph_debugfs);
1603 static __init int init_graph_trace(void)
1605 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1607 if (!register_ftrace_event(&graph_trace_entry_event)) {
1608 pr_warning("Warning: could not register graph trace events\n");
1609 return 1;
1612 if (!register_ftrace_event(&graph_trace_ret_event)) {
1613 pr_warning("Warning: could not register graph trace events\n");
1614 return 1;
1617 return register_tracer(&graph_trace);
1620 core_initcall(init_graph_trace);