3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
15 #include "trace_output.h"
22 #define TRACE_GRAPH_INDENT 2
25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
26 #define TRACE_GRAPH_PRINT_CPU 0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
28 #define TRACE_GRAPH_PRINT_PROC 0x8
29 #define TRACE_GRAPH_PRINT_DURATION 0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20
32 static struct tracer_opt trace_opts
[] = {
33 /* Display overruns? (for self-debug purpose) */
34 { TRACER_OPT(funcgraph
-overrun
, TRACE_GRAPH_PRINT_OVERRUN
) },
36 { TRACER_OPT(funcgraph
-cpu
, TRACE_GRAPH_PRINT_CPU
) },
37 /* Display Overhead ? */
38 { TRACER_OPT(funcgraph
-overhead
, TRACE_GRAPH_PRINT_OVERHEAD
) },
39 /* Display proc name/pid */
40 { TRACER_OPT(funcgraph
-proc
, TRACE_GRAPH_PRINT_PROC
) },
41 /* Display duration of execution */
42 { TRACER_OPT(funcgraph
-duration
, TRACE_GRAPH_PRINT_DURATION
) },
43 /* Display absolute time of an entry */
44 { TRACER_OPT(funcgraph
-abstime
, TRACE_GRAPH_PRINT_ABS_TIME
) },
48 static struct tracer_flags tracer_flags
= {
49 /* Don't display overruns and proc by default */
50 .val
= TRACE_GRAPH_PRINT_CPU
| TRACE_GRAPH_PRINT_OVERHEAD
|
51 TRACE_GRAPH_PRINT_DURATION
,
55 static struct trace_array
*graph_array
;
58 /* Add a function return address to the trace stack on thread info.*/
60 ftrace_push_return_trace(unsigned long ret
, unsigned long func
, int *depth
,
61 unsigned long frame_pointer
)
63 unsigned long long calltime
;
66 if (!current
->ret_stack
)
70 * We must make sure the ret_stack is tested before we read
75 /* The return trace stack is full */
76 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
77 atomic_inc(¤t
->trace_overrun
);
81 calltime
= trace_clock_local();
83 index
= ++current
->curr_ret_stack
;
85 current
->ret_stack
[index
].ret
= ret
;
86 current
->ret_stack
[index
].func
= func
;
87 current
->ret_stack
[index
].calltime
= calltime
;
88 current
->ret_stack
[index
].subtime
= 0;
89 current
->ret_stack
[index
].fp
= frame_pointer
;
95 /* Retrieve a function return address to the trace stack on thread info.*/
97 ftrace_pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
,
98 unsigned long frame_pointer
)
102 index
= current
->curr_ret_stack
;
104 if (unlikely(index
< 0)) {
107 /* Might as well panic, otherwise we have no where to go */
108 *ret
= (unsigned long)panic
;
112 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
114 * The arch may choose to record the frame pointer used
115 * and check it here to make sure that it is what we expect it
116 * to be. If gcc does not set the place holder of the return
117 * address in the frame pointer, and does a copy instead, then
118 * the function graph trace will fail. This test detects this
121 * Currently, x86_32 with optimize for size (-Os) makes the latest
124 if (unlikely(current
->ret_stack
[index
].fp
!= frame_pointer
)) {
126 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
127 " from func %ps return to %lx\n",
128 current
->ret_stack
[index
].fp
,
130 (void *)current
->ret_stack
[index
].func
,
131 current
->ret_stack
[index
].ret
);
132 *ret
= (unsigned long)panic
;
137 *ret
= current
->ret_stack
[index
].ret
;
138 trace
->func
= current
->ret_stack
[index
].func
;
139 trace
->calltime
= current
->ret_stack
[index
].calltime
;
140 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
141 trace
->depth
= index
;
145 * Send the trace to the ring-buffer.
146 * @return the original return address.
148 unsigned long ftrace_return_to_handler(unsigned long frame_pointer
)
150 struct ftrace_graph_ret trace
;
153 ftrace_pop_return_trace(&trace
, &ret
, frame_pointer
);
154 trace
.rettime
= trace_clock_local();
155 ftrace_graph_return(&trace
);
157 current
->curr_ret_stack
--;
159 if (unlikely(!ret
)) {
162 /* Might as well panic. What else to do? */
163 ret
= (unsigned long)panic
;
169 static int __trace_graph_entry(struct trace_array
*tr
,
170 struct ftrace_graph_ent
*trace
,
174 struct ftrace_event_call
*call
= &event_funcgraph_entry
;
175 struct ring_buffer_event
*event
;
176 struct ring_buffer
*buffer
= tr
->buffer
;
177 struct ftrace_graph_ent_entry
*entry
;
179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled
))))
182 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_ENT
,
183 sizeof(*entry
), flags
, pc
);
186 entry
= ring_buffer_event_data(event
);
187 entry
->graph_ent
= *trace
;
188 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
189 ring_buffer_unlock_commit(buffer
, event
);
194 int trace_graph_entry(struct ftrace_graph_ent
*trace
)
196 struct trace_array
*tr
= graph_array
;
197 struct trace_array_cpu
*data
;
207 if (!ftrace_trace_task(current
))
210 if (!ftrace_graph_addr(trace
->func
))
213 local_irq_save(flags
);
214 cpu
= raw_smp_processor_id();
215 data
= tr
->data
[cpu
];
216 disabled
= atomic_inc_return(&data
->disabled
);
217 if (likely(disabled
== 1)) {
218 pc
= preempt_count();
219 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
223 /* Only do the atomic if it is not already set */
224 if (!test_tsk_trace_graph(current
))
225 set_tsk_trace_graph(current
);
227 atomic_dec(&data
->disabled
);
228 local_irq_restore(flags
);
233 static void __trace_graph_return(struct trace_array
*tr
,
234 struct ftrace_graph_ret
*trace
,
238 struct ftrace_event_call
*call
= &event_funcgraph_exit
;
239 struct ring_buffer_event
*event
;
240 struct ring_buffer
*buffer
= tr
->buffer
;
241 struct ftrace_graph_ret_entry
*entry
;
243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled
))))
246 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_RET
,
247 sizeof(*entry
), flags
, pc
);
250 entry
= ring_buffer_event_data(event
);
252 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
253 ring_buffer_unlock_commit(buffer
, event
);
256 void trace_graph_return(struct ftrace_graph_ret
*trace
)
258 struct trace_array
*tr
= graph_array
;
259 struct trace_array_cpu
*data
;
265 local_irq_save(flags
);
266 cpu
= raw_smp_processor_id();
267 data
= tr
->data
[cpu
];
268 disabled
= atomic_inc_return(&data
->disabled
);
269 if (likely(disabled
== 1)) {
270 pc
= preempt_count();
271 __trace_graph_return(tr
, trace
, flags
, pc
);
274 clear_tsk_trace_graph(current
);
275 atomic_dec(&data
->disabled
);
276 local_irq_restore(flags
);
279 static int graph_trace_init(struct trace_array
*tr
)
284 ret
= register_ftrace_graph(&trace_graph_return
,
288 tracing_start_cmdline_record();
293 void set_graph_array(struct trace_array
*tr
)
298 static void graph_trace_reset(struct trace_array
*tr
)
300 tracing_stop_cmdline_record();
301 unregister_ftrace_graph();
304 static int max_bytes_for_cpu
;
306 static enum print_line_t
307 print_graph_cpu(struct trace_seq
*s
, int cpu
)
312 * Start with a space character - to make it stand out
313 * to the right a bit when trace output is pasted into
316 ret
= trace_seq_printf(s
, " %*d) ", max_bytes_for_cpu
, cpu
);
318 return TRACE_TYPE_PARTIAL_LINE
;
320 return TRACE_TYPE_HANDLED
;
323 #define TRACE_GRAPH_PROCINFO_LENGTH 14
325 static enum print_line_t
326 print_graph_proc(struct trace_seq
*s
, pid_t pid
)
328 char comm
[TASK_COMM_LEN
];
329 /* sign + log10(MAX_INT) + '\0' */
336 trace_find_cmdline(pid
, comm
);
338 sprintf(pid_str
, "%d", pid
);
340 /* 1 stands for the "-" character */
341 len
= strlen(comm
) + strlen(pid_str
) + 1;
343 if (len
< TRACE_GRAPH_PROCINFO_LENGTH
)
344 spaces
= TRACE_GRAPH_PROCINFO_LENGTH
- len
;
346 /* First spaces to align center */
347 for (i
= 0; i
< spaces
/ 2; i
++) {
348 ret
= trace_seq_printf(s
, " ");
350 return TRACE_TYPE_PARTIAL_LINE
;
353 ret
= trace_seq_printf(s
, "%s-%s", comm
, pid_str
);
355 return TRACE_TYPE_PARTIAL_LINE
;
357 /* Last spaces to align center */
358 for (i
= 0; i
< spaces
- (spaces
/ 2); i
++) {
359 ret
= trace_seq_printf(s
, " ");
361 return TRACE_TYPE_PARTIAL_LINE
;
363 return TRACE_TYPE_HANDLED
;
367 static enum print_line_t
368 print_graph_lat_fmt(struct trace_seq
*s
, struct trace_entry
*entry
)
370 if (!trace_seq_putc(s
, ' '))
373 return trace_print_lat_fmt(s
, entry
);
376 /* If the pid changed since the last trace, output this event */
377 static enum print_line_t
378 verif_pid(struct trace_seq
*s
, pid_t pid
, int cpu
, struct fgraph_data
*data
)
385 return TRACE_TYPE_HANDLED
;
387 last_pid
= &(per_cpu_ptr(data
, cpu
)->last_pid
);
389 if (*last_pid
== pid
)
390 return TRACE_TYPE_HANDLED
;
392 prev_pid
= *last_pid
;
396 return TRACE_TYPE_HANDLED
;
398 * Context-switch trace line:
400 ------------------------------------------
401 | 1) migration/0--1 => sshd-1755
402 ------------------------------------------
405 ret
= trace_seq_printf(s
,
406 " ------------------------------------------\n");
408 return TRACE_TYPE_PARTIAL_LINE
;
410 ret
= print_graph_cpu(s
, cpu
);
411 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
412 return TRACE_TYPE_PARTIAL_LINE
;
414 ret
= print_graph_proc(s
, prev_pid
);
415 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
416 return TRACE_TYPE_PARTIAL_LINE
;
418 ret
= trace_seq_printf(s
, " => ");
420 return TRACE_TYPE_PARTIAL_LINE
;
422 ret
= print_graph_proc(s
, pid
);
423 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
424 return TRACE_TYPE_PARTIAL_LINE
;
426 ret
= trace_seq_printf(s
,
427 "\n ------------------------------------------\n\n");
429 return TRACE_TYPE_PARTIAL_LINE
;
431 return TRACE_TYPE_HANDLED
;
434 static struct ftrace_graph_ret_entry
*
435 get_return_for_leaf(struct trace_iterator
*iter
,
436 struct ftrace_graph_ent_entry
*curr
)
438 struct ring_buffer_iter
*ring_iter
;
439 struct ring_buffer_event
*event
;
440 struct ftrace_graph_ret_entry
*next
;
442 ring_iter
= iter
->buffer_iter
[iter
->cpu
];
444 /* First peek to compare current entry and the next one */
446 event
= ring_buffer_iter_peek(ring_iter
, NULL
);
448 /* We need to consume the current entry to see the next one */
449 ring_buffer_consume(iter
->tr
->buffer
, iter
->cpu
, NULL
);
450 event
= ring_buffer_peek(iter
->tr
->buffer
, iter
->cpu
,
457 next
= ring_buffer_event_data(event
);
459 if (next
->ent
.type
!= TRACE_GRAPH_RET
)
462 if (curr
->ent
.pid
!= next
->ent
.pid
||
463 curr
->graph_ent
.func
!= next
->ret
.func
)
466 /* this is a leaf, now advance the iterator */
468 ring_buffer_read(ring_iter
, NULL
);
473 /* Signal a overhead of time execution to the output */
475 print_graph_overhead(unsigned long long duration
, struct trace_seq
*s
)
477 /* If duration disappear, we don't need anything */
478 if (!(tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
))
481 /* Non nested entry or return */
483 return trace_seq_printf(s
, " ");
485 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERHEAD
) {
486 /* Duration exceeded 100 msecs */
487 if (duration
> 100000ULL)
488 return trace_seq_printf(s
, "! ");
490 /* Duration exceeded 10 msecs */
491 if (duration
> 10000ULL)
492 return trace_seq_printf(s
, "+ ");
495 return trace_seq_printf(s
, " ");
498 static int print_graph_abs_time(u64 t
, struct trace_seq
*s
)
500 unsigned long usecs_rem
;
502 usecs_rem
= do_div(t
, NSEC_PER_SEC
);
505 return trace_seq_printf(s
, "%5lu.%06lu | ",
506 (unsigned long)t
, usecs_rem
);
509 static enum print_line_t
510 print_graph_irq(struct trace_iterator
*iter
, unsigned long addr
,
511 enum trace_type type
, int cpu
, pid_t pid
)
514 struct trace_seq
*s
= &iter
->seq
;
516 if (addr
< (unsigned long)__irqentry_text_start
||
517 addr
>= (unsigned long)__irqentry_text_end
)
518 return TRACE_TYPE_UNHANDLED
;
521 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_ABS_TIME
) {
522 ret
= print_graph_abs_time(iter
->ts
, s
);
524 return TRACE_TYPE_PARTIAL_LINE
;
528 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
529 ret
= print_graph_cpu(s
, cpu
);
530 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
531 return TRACE_TYPE_PARTIAL_LINE
;
535 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
536 ret
= print_graph_proc(s
, pid
);
537 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
538 return TRACE_TYPE_PARTIAL_LINE
;
539 ret
= trace_seq_printf(s
, " | ");
541 return TRACE_TYPE_PARTIAL_LINE
;
545 ret
= print_graph_overhead(-1, s
);
547 return TRACE_TYPE_PARTIAL_LINE
;
549 if (type
== TRACE_GRAPH_ENT
)
550 ret
= trace_seq_printf(s
, "==========>");
552 ret
= trace_seq_printf(s
, "<==========");
555 return TRACE_TYPE_PARTIAL_LINE
;
557 /* Don't close the duration column if haven't one */
558 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
)
559 trace_seq_printf(s
, " |");
560 ret
= trace_seq_printf(s
, "\n");
563 return TRACE_TYPE_PARTIAL_LINE
;
564 return TRACE_TYPE_HANDLED
;
568 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
570 unsigned long nsecs_rem
= do_div(duration
, 1000);
571 /* log10(ULONG_MAX) + '\0' */
577 sprintf(msecs_str
, "%lu", (unsigned long) duration
);
580 ret
= trace_seq_printf(s
, "%s", msecs_str
);
582 return TRACE_TYPE_PARTIAL_LINE
;
584 len
= strlen(msecs_str
);
586 /* Print nsecs (we don't want to exceed 7 numbers) */
588 snprintf(nsecs_str
, 8 - len
, "%03lu", nsecs_rem
);
589 ret
= trace_seq_printf(s
, ".%s", nsecs_str
);
591 return TRACE_TYPE_PARTIAL_LINE
;
592 len
+= strlen(nsecs_str
);
595 ret
= trace_seq_printf(s
, " us ");
597 return TRACE_TYPE_PARTIAL_LINE
;
599 /* Print remaining spaces to fit the row's width */
600 for (i
= len
; i
< 7; i
++) {
601 ret
= trace_seq_printf(s
, " ");
603 return TRACE_TYPE_PARTIAL_LINE
;
605 return TRACE_TYPE_HANDLED
;
608 static enum print_line_t
609 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
613 ret
= trace_print_graph_duration(duration
, s
);
614 if (ret
!= TRACE_TYPE_HANDLED
)
617 ret
= trace_seq_printf(s
, "| ");
619 return TRACE_TYPE_PARTIAL_LINE
;
621 return TRACE_TYPE_HANDLED
;
624 /* Case of a leaf function on its call entry */
625 static enum print_line_t
626 print_graph_entry_leaf(struct trace_iterator
*iter
,
627 struct ftrace_graph_ent_entry
*entry
,
628 struct ftrace_graph_ret_entry
*ret_entry
, struct trace_seq
*s
)
630 struct fgraph_data
*data
= iter
->private;
631 struct ftrace_graph_ret
*graph_ret
;
632 struct ftrace_graph_ent
*call
;
633 unsigned long long duration
;
637 graph_ret
= &ret_entry
->ret
;
638 call
= &entry
->graph_ent
;
639 duration
= graph_ret
->rettime
- graph_ret
->calltime
;
643 int *depth
= &(per_cpu_ptr(data
, cpu
)->depth
);
646 * Comments display at + 1 to depth. Since
647 * this is a leaf function, keep the comments
648 * equal to this depth.
650 *depth
= call
->depth
- 1;
654 ret
= print_graph_overhead(duration
, s
);
656 return TRACE_TYPE_PARTIAL_LINE
;
659 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
) {
660 ret
= print_graph_duration(duration
, s
);
661 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
662 return TRACE_TYPE_PARTIAL_LINE
;
666 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
667 ret
= trace_seq_printf(s
, " ");
669 return TRACE_TYPE_PARTIAL_LINE
;
672 ret
= trace_seq_printf(s
, "%ps();\n", (void *)call
->func
);
674 return TRACE_TYPE_PARTIAL_LINE
;
676 return TRACE_TYPE_HANDLED
;
679 static enum print_line_t
680 print_graph_entry_nested(struct trace_iterator
*iter
,
681 struct ftrace_graph_ent_entry
*entry
,
682 struct trace_seq
*s
, int cpu
)
684 struct ftrace_graph_ent
*call
= &entry
->graph_ent
;
685 struct fgraph_data
*data
= iter
->private;
691 int *depth
= &(per_cpu_ptr(data
, cpu
)->depth
);
693 *depth
= call
->depth
;
697 ret
= print_graph_overhead(-1, s
);
699 return TRACE_TYPE_PARTIAL_LINE
;
702 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
) {
703 ret
= trace_seq_printf(s
, " | ");
705 return TRACE_TYPE_PARTIAL_LINE
;
709 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
710 ret
= trace_seq_printf(s
, " ");
712 return TRACE_TYPE_PARTIAL_LINE
;
715 ret
= trace_seq_printf(s
, "%ps() {\n", (void *)call
->func
);
717 return TRACE_TYPE_PARTIAL_LINE
;
720 * we already consumed the current entry to check the next one
721 * and see if this is a leaf.
723 return TRACE_TYPE_NO_CONSUME
;
726 static enum print_line_t
727 print_graph_prologue(struct trace_iterator
*iter
, struct trace_seq
*s
,
728 int type
, unsigned long addr
)
730 struct fgraph_data
*data
= iter
->private;
731 struct trace_entry
*ent
= iter
->ent
;
736 if (verif_pid(s
, ent
->pid
, cpu
, data
) == TRACE_TYPE_PARTIAL_LINE
)
737 return TRACE_TYPE_PARTIAL_LINE
;
741 ret
= print_graph_irq(iter
, addr
, type
, cpu
, ent
->pid
);
742 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
743 return TRACE_TYPE_PARTIAL_LINE
;
747 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_ABS_TIME
) {
748 ret
= print_graph_abs_time(iter
->ts
, s
);
750 return TRACE_TYPE_PARTIAL_LINE
;
754 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
) {
755 ret
= print_graph_cpu(s
, cpu
);
756 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
757 return TRACE_TYPE_PARTIAL_LINE
;
761 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
) {
762 ret
= print_graph_proc(s
, ent
->pid
);
763 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
764 return TRACE_TYPE_PARTIAL_LINE
;
766 ret
= trace_seq_printf(s
, " | ");
768 return TRACE_TYPE_PARTIAL_LINE
;
772 if (trace_flags
& TRACE_ITER_LATENCY_FMT
) {
773 ret
= print_graph_lat_fmt(s
, ent
);
774 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
775 return TRACE_TYPE_PARTIAL_LINE
;
781 static enum print_line_t
782 print_graph_entry(struct ftrace_graph_ent_entry
*field
, struct trace_seq
*s
,
783 struct trace_iterator
*iter
)
786 struct ftrace_graph_ent
*call
= &field
->graph_ent
;
787 struct ftrace_graph_ret_entry
*leaf_ret
;
789 if (print_graph_prologue(iter
, s
, TRACE_GRAPH_ENT
, call
->func
))
790 return TRACE_TYPE_PARTIAL_LINE
;
792 leaf_ret
= get_return_for_leaf(iter
, field
);
794 return print_graph_entry_leaf(iter
, field
, leaf_ret
, s
);
796 return print_graph_entry_nested(iter
, field
, s
, cpu
);
800 static enum print_line_t
801 print_graph_return(struct ftrace_graph_ret
*trace
, struct trace_seq
*s
,
802 struct trace_entry
*ent
, struct trace_iterator
*iter
)
804 unsigned long long duration
= trace
->rettime
- trace
->calltime
;
805 struct fgraph_data
*data
= iter
->private;
806 pid_t pid
= ent
->pid
;
813 int *depth
= &(per_cpu_ptr(data
, cpu
)->depth
);
816 * Comments display at + 1 to depth. This is the
817 * return from a function, we now want the comments
818 * to display at the same level of the bracket.
820 *depth
= trace
->depth
- 1;
823 if (print_graph_prologue(iter
, s
, 0, 0))
824 return TRACE_TYPE_PARTIAL_LINE
;
827 ret
= print_graph_overhead(duration
, s
);
829 return TRACE_TYPE_PARTIAL_LINE
;
832 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
) {
833 ret
= print_graph_duration(duration
, s
);
834 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
835 return TRACE_TYPE_PARTIAL_LINE
;
839 for (i
= 0; i
< trace
->depth
* TRACE_GRAPH_INDENT
; i
++) {
840 ret
= trace_seq_printf(s
, " ");
842 return TRACE_TYPE_PARTIAL_LINE
;
845 ret
= trace_seq_printf(s
, "}\n");
847 return TRACE_TYPE_PARTIAL_LINE
;
850 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_OVERRUN
) {
851 ret
= trace_seq_printf(s
, " (Overruns: %lu)\n",
854 return TRACE_TYPE_PARTIAL_LINE
;
857 ret
= print_graph_irq(iter
, trace
->func
, TRACE_GRAPH_RET
, cpu
, pid
);
858 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
859 return TRACE_TYPE_PARTIAL_LINE
;
861 return TRACE_TYPE_HANDLED
;
864 static enum print_line_t
865 print_graph_comment(struct trace_seq
*s
, struct trace_entry
*ent
,
866 struct trace_iterator
*iter
)
868 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
869 struct fgraph_data
*data
= iter
->private;
870 struct trace_event
*event
;
876 depth
= per_cpu_ptr(data
, iter
->cpu
)->depth
;
878 if (print_graph_prologue(iter
, s
, 0, 0))
879 return TRACE_TYPE_PARTIAL_LINE
;
882 ret
= print_graph_overhead(-1, s
);
884 return TRACE_TYPE_PARTIAL_LINE
;
887 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
) {
888 ret
= trace_seq_printf(s
, " | ");
890 return TRACE_TYPE_PARTIAL_LINE
;
895 for (i
= 0; i
< (depth
+ 1) * TRACE_GRAPH_INDENT
; i
++) {
896 ret
= trace_seq_printf(s
, " ");
898 return TRACE_TYPE_PARTIAL_LINE
;
902 ret
= trace_seq_printf(s
, "/* ");
904 return TRACE_TYPE_PARTIAL_LINE
;
906 switch (iter
->ent
->type
) {
908 ret
= trace_print_bprintk_msg_only(iter
);
909 if (ret
!= TRACE_TYPE_HANDLED
)
913 ret
= trace_print_printk_msg_only(iter
);
914 if (ret
!= TRACE_TYPE_HANDLED
)
918 event
= ftrace_find_event(ent
->type
);
920 return TRACE_TYPE_UNHANDLED
;
922 ret
= event
->trace(iter
, sym_flags
);
923 if (ret
!= TRACE_TYPE_HANDLED
)
927 /* Strip ending newline */
928 if (s
->buffer
[s
->len
- 1] == '\n') {
929 s
->buffer
[s
->len
- 1] = '\0';
933 ret
= trace_seq_printf(s
, " */\n");
935 return TRACE_TYPE_PARTIAL_LINE
;
937 return TRACE_TYPE_HANDLED
;
942 print_graph_function(struct trace_iterator
*iter
)
944 struct trace_entry
*entry
= iter
->ent
;
945 struct trace_seq
*s
= &iter
->seq
;
947 switch (entry
->type
) {
948 case TRACE_GRAPH_ENT
: {
950 * print_graph_entry() may consume the current event,
951 * thus @field may become invalid, so we need to save it.
952 * sizeof(struct ftrace_graph_ent_entry) is very small,
953 * it can be safely saved at the stack.
955 struct ftrace_graph_ent_entry
*field
, saved
;
956 trace_assign_type(field
, entry
);
958 return print_graph_entry(&saved
, s
, iter
);
960 case TRACE_GRAPH_RET
: {
961 struct ftrace_graph_ret_entry
*field
;
962 trace_assign_type(field
, entry
);
963 return print_graph_return(&field
->ret
, s
, entry
, iter
);
966 return print_graph_comment(s
, entry
, iter
);
969 return TRACE_TYPE_HANDLED
;
972 static void print_lat_header(struct seq_file
*s
)
974 static const char spaces
[] = " " /* 16 spaces */
979 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_ABS_TIME
)
981 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
)
983 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
)
986 seq_printf(s
, "#%.*s _-----=> irqs-off \n", size
, spaces
);
987 seq_printf(s
, "#%.*s / _----=> need-resched \n", size
, spaces
);
988 seq_printf(s
, "#%.*s| / _---=> hardirq/softirq \n", size
, spaces
);
989 seq_printf(s
, "#%.*s|| / _--=> preempt-depth \n", size
, spaces
);
990 seq_printf(s
, "#%.*s||| / _-=> lock-depth \n", size
, spaces
);
991 seq_printf(s
, "#%.*s|||| / \n", size
, spaces
);
994 static void print_graph_headers(struct seq_file
*s
)
996 int lat
= trace_flags
& TRACE_ITER_LATENCY_FMT
;
1003 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_ABS_TIME
)
1004 seq_printf(s
, " TIME ");
1005 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
)
1006 seq_printf(s
, " CPU");
1007 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
)
1008 seq_printf(s
, " TASK/PID ");
1010 seq_printf(s
, "|||||");
1011 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
)
1012 seq_printf(s
, " DURATION ");
1013 seq_printf(s
, " FUNCTION CALLS\n");
1017 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_ABS_TIME
)
1018 seq_printf(s
, " | ");
1019 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_CPU
)
1020 seq_printf(s
, " | ");
1021 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_PROC
)
1022 seq_printf(s
, " | | ");
1024 seq_printf(s
, "|||||");
1025 if (tracer_flags
.val
& TRACE_GRAPH_PRINT_DURATION
)
1026 seq_printf(s
, " | | ");
1027 seq_printf(s
, " | | | |\n");
1030 static void graph_trace_open(struct trace_iterator
*iter
)
1032 /* pid and depth on the last trace processed */
1033 struct fgraph_data
*data
= alloc_percpu(struct fgraph_data
);
1037 pr_warning("function graph tracer: not enough memory\n");
1039 for_each_possible_cpu(cpu
) {
1040 pid_t
*pid
= &(per_cpu_ptr(data
, cpu
)->last_pid
);
1041 int *depth
= &(per_cpu_ptr(data
, cpu
)->depth
);
1046 iter
->private = data
;
1049 static void graph_trace_close(struct trace_iterator
*iter
)
1051 free_percpu(iter
->private);
1054 static struct tracer graph_trace __read_mostly
= {
1055 .name
= "function_graph",
1056 .open
= graph_trace_open
,
1057 .close
= graph_trace_close
,
1058 .wait_pipe
= poll_wait_pipe
,
1059 .init
= graph_trace_init
,
1060 .reset
= graph_trace_reset
,
1061 .print_line
= print_graph_function
,
1062 .print_header
= print_graph_headers
,
1063 .flags
= &tracer_flags
,
1064 #ifdef CONFIG_FTRACE_SELFTEST
1065 .selftest
= trace_selftest_startup_function_graph
,
1069 static __init
int init_graph_trace(void)
1071 max_bytes_for_cpu
= snprintf(NULL
, 0, "%d", nr_cpu_ids
- 1);
1073 return register_tracer(&graph_trace
);
1076 device_initcall(init_graph_trace
);