3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
16 #include "trace_output.h"
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs
;
21 struct fgraph_cpu_data
{
26 unsigned long enter_funcs
[FTRACE_RETFUNC_DEPTH
];
30 struct fgraph_cpu_data __percpu
*cpu_data
;
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent
;
34 struct ftrace_graph_ret_entry ret
;
39 #define TRACE_GRAPH_INDENT 2
42 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
43 #define TRACE_GRAPH_PRINT_CPU 0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45 #define TRACE_GRAPH_PRINT_PROC 0x8
46 #define TRACE_GRAPH_PRINT_DURATION 0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48 #define TRACE_GRAPH_PRINT_IRQS 0x40
50 static struct tracer_opt trace_opts
[] = {
51 /* Display overruns? (for self-debug purpose) */
52 { TRACER_OPT(funcgraph
-overrun
, TRACE_GRAPH_PRINT_OVERRUN
) },
54 { TRACER_OPT(funcgraph
-cpu
, TRACE_GRAPH_PRINT_CPU
) },
55 /* Display Overhead ? */
56 { TRACER_OPT(funcgraph
-overhead
, TRACE_GRAPH_PRINT_OVERHEAD
) },
57 /* Display proc name/pid */
58 { TRACER_OPT(funcgraph
-proc
, TRACE_GRAPH_PRINT_PROC
) },
59 /* Display duration of execution */
60 { TRACER_OPT(funcgraph
-duration
, TRACE_GRAPH_PRINT_DURATION
) },
61 /* Display absolute time of an entry */
62 { TRACER_OPT(funcgraph
-abstime
, TRACE_GRAPH_PRINT_ABS_TIME
) },
63 /* Display interrupts */
64 { TRACER_OPT(funcgraph
-irqs
, TRACE_GRAPH_PRINT_IRQS
) },
68 static struct tracer_flags tracer_flags
= {
69 /* Don't display overruns and proc by default */
70 .val
= TRACE_GRAPH_PRINT_CPU
| TRACE_GRAPH_PRINT_OVERHEAD
|
71 TRACE_GRAPH_PRINT_DURATION
| TRACE_GRAPH_PRINT_IRQS
,
75 static struct trace_array
*graph_array
;
78 * DURATION column is being also used to display IRQ signs,
79 * following values are used by print_graph_irq and others
80 * to fill in space into DURATION column.
83 DURATION_FILL_FULL
= -1,
84 DURATION_FILL_START
= -2,
85 DURATION_FILL_END
= -3,
88 static enum print_line_t
89 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
,
92 /* Add a function return address to the trace stack on thread info.*/
94 ftrace_push_return_trace(unsigned long ret
, unsigned long func
, int *depth
,
95 unsigned long frame_pointer
)
97 unsigned long long calltime
;
100 if (!current
->ret_stack
)
104 * We must make sure the ret_stack is tested before we read
109 /* The return trace stack is full */
110 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
111 atomic_inc(¤t
->trace_overrun
);
115 calltime
= trace_clock_local();
117 index
= ++current
->curr_ret_stack
;
119 current
->ret_stack
[index
].ret
= ret
;
120 current
->ret_stack
[index
].func
= func
;
121 current
->ret_stack
[index
].calltime
= calltime
;
122 current
->ret_stack
[index
].subtime
= 0;
123 current
->ret_stack
[index
].fp
= frame_pointer
;
129 /* Retrieve a function return address to the trace stack on thread info.*/
131 ftrace_pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
,
132 unsigned long frame_pointer
)
136 index
= current
->curr_ret_stack
;
138 if (unlikely(index
< 0)) {
141 /* Might as well panic, otherwise we have no where to go */
142 *ret
= (unsigned long)panic
;
146 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
148 * The arch may choose to record the frame pointer used
149 * and check it here to make sure that it is what we expect it
150 * to be. If gcc does not set the place holder of the return
151 * address in the frame pointer, and does a copy instead, then
152 * the function graph trace will fail. This test detects this
155 * Currently, x86_32 with optimize for size (-Os) makes the latest
158 * Note, -mfentry does not use frame pointers, and this test
159 * is not needed if CC_USING_FENTRY is set.
161 if (unlikely(current
->ret_stack
[index
].fp
!= frame_pointer
)) {
163 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
164 " from func %ps return to %lx\n",
165 current
->ret_stack
[index
].fp
,
167 (void *)current
->ret_stack
[index
].func
,
168 current
->ret_stack
[index
].ret
);
169 *ret
= (unsigned long)panic
;
174 *ret
= current
->ret_stack
[index
].ret
;
175 trace
->func
= current
->ret_stack
[index
].func
;
176 trace
->calltime
= current
->ret_stack
[index
].calltime
;
177 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
178 trace
->depth
= index
;
182 * Send the trace to the ring-buffer.
183 * @return the original return address.
185 unsigned long ftrace_return_to_handler(unsigned long frame_pointer
)
187 struct ftrace_graph_ret trace
;
190 ftrace_pop_return_trace(&trace
, &ret
, frame_pointer
);
191 trace
.rettime
= trace_clock_local();
192 ftrace_graph_return(&trace
);
194 current
->curr_ret_stack
--;
196 if (unlikely(!ret
)) {
199 /* Might as well panic. What else to do? */
200 ret
= (unsigned long)panic
;
206 int __trace_graph_entry(struct trace_array
*tr
,
207 struct ftrace_graph_ent
*trace
,
211 struct ftrace_event_call
*call
= &event_funcgraph_entry
;
212 struct ring_buffer_event
*event
;
213 struct ring_buffer
*buffer
= tr
->buffer
;
214 struct ftrace_graph_ent_entry
*entry
;
216 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
219 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_ENT
,
220 sizeof(*entry
), flags
, pc
);
223 entry
= ring_buffer_event_data(event
);
224 entry
->graph_ent
= *trace
;
225 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
226 ring_buffer_unlock_commit(buffer
, event
);
231 static inline int ftrace_graph_ignore_irqs(void)
233 if (!ftrace_graph_skip_irqs
|| trace_recursion_test(TRACE_IRQ_BIT
))
239 int trace_graph_entry(struct ftrace_graph_ent
*trace
)
241 struct trace_array
*tr
= graph_array
;
242 struct trace_array_cpu
*data
;
249 if (!ftrace_trace_task(current
))
252 /* trace it when it is-nested-in or is a function enabled. */
253 if (!(trace
->depth
|| ftrace_graph_addr(trace
->func
)) ||
254 ftrace_graph_ignore_irqs())
257 local_irq_save(flags
);
258 cpu
= raw_smp_processor_id();
259 data
= tr
->data
[cpu
];
260 disabled
= atomic_inc_return(&data
->disabled
);
261 if (likely(disabled
== 1)) {
262 pc
= preempt_count();
263 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
268 atomic_dec(&data
->disabled
);
269 local_irq_restore(flags
);
274 int trace_graph_thresh_entry(struct ftrace_graph_ent
*trace
)
279 return trace_graph_entry(trace
);
283 __trace_graph_function(struct trace_array
*tr
,
284 unsigned long ip
, unsigned long flags
, int pc
)
286 u64 time
= trace_clock_local();
287 struct ftrace_graph_ent ent
= {
291 struct ftrace_graph_ret ret
= {
298 __trace_graph_entry(tr
, &ent
, flags
, pc
);
299 __trace_graph_return(tr
, &ret
, flags
, pc
);
303 trace_graph_function(struct trace_array
*tr
,
304 unsigned long ip
, unsigned long parent_ip
,
305 unsigned long flags
, int pc
)
307 __trace_graph_function(tr
, ip
, flags
, pc
);
310 void __trace_graph_return(struct trace_array
*tr
,
311 struct ftrace_graph_ret
*trace
,
315 struct ftrace_event_call
*call
= &event_funcgraph_exit
;
316 struct ring_buffer_event
*event
;
317 struct ring_buffer
*buffer
= tr
->buffer
;
318 struct ftrace_graph_ret_entry
*entry
;
320 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
323 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_RET
,
324 sizeof(*entry
), flags
, pc
);
327 entry
= ring_buffer_event_data(event
);
329 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
330 ring_buffer_unlock_commit(buffer
, event
);
333 void trace_graph_return(struct ftrace_graph_ret
*trace
)
335 struct trace_array
*tr
= graph_array
;
336 struct trace_array_cpu
*data
;
342 local_irq_save(flags
);
343 cpu
= raw_smp_processor_id();
344 data
= tr
->data
[cpu
];
345 disabled
= atomic_inc_return(&data
->disabled
);
346 if (likely(disabled
== 1)) {
347 pc
= preempt_count();
348 __trace_graph_return(tr
, trace
, flags
, pc
);
350 atomic_dec(&data
->disabled
);
351 local_irq_restore(flags
);
354 void set_graph_array(struct trace_array
*tr
)
358 /* Make graph_array visible before we start tracing */
363 void trace_graph_thresh_return(struct ftrace_graph_ret
*trace
)
365 if (tracing_thresh
&&
366 (trace
->rettime
- trace
->calltime
< tracing_thresh
))
369 trace_graph_return(trace
);
372 static int graph_trace_init(struct trace_array
*tr
)
378 ret
= register_ftrace_graph(&trace_graph_thresh_return
,
379 &trace_graph_thresh_entry
);
381 ret
= register_ftrace_graph(&trace_graph_return
,
385 tracing_start_cmdline_record();
390 static void graph_trace_reset(struct trace_array
*tr
)
392 tracing_stop_cmdline_record();
393 unregister_ftrace_graph();
396 static int max_bytes_for_cpu
;
398 static enum print_line_t
399 print_graph_cpu(struct trace_seq
*s
, int cpu
)
404 * Start with a space character - to make it stand out
405 * to the right a bit when trace output is pasted into
408 ret
= trace_seq_printf(s
, " %*d) ", max_bytes_for_cpu
, cpu
);
410 return TRACE_TYPE_PARTIAL_LINE
;
412 return TRACE_TYPE_HANDLED
;
415 #define TRACE_GRAPH_PROCINFO_LENGTH 14
417 static enum print_line_t
418 print_graph_proc(struct trace_seq
*s
, pid_t pid
)
420 char comm
[TASK_COMM_LEN
];
421 /* sign + log10(MAX_INT) + '\0' */
428 trace_find_cmdline(pid
, comm
);
430 sprintf(pid_str
, "%d", pid
);
432 /* 1 stands for the "-" character */
433 len
= strlen(comm
) + strlen(pid_str
) + 1;
435 if (len
< TRACE_GRAPH_PROCINFO_LENGTH
)
436 spaces
= TRACE_GRAPH_PROCINFO_LENGTH
- len
;
438 /* First spaces to align center */
439 for (i
= 0; i
< spaces
/ 2; i
++) {
440 ret
= trace_seq_printf(s
, " ");
442 return TRACE_TYPE_PARTIAL_LINE
;
445 ret
= trace_seq_printf(s
, "%s-%s", comm
, pid_str
);
447 return TRACE_TYPE_PARTIAL_LINE
;
449 /* Last spaces to align center */
450 for (i
= 0; i
< spaces
- (spaces
/ 2); i
++) {
451 ret
= trace_seq_printf(s
, " ");
453 return TRACE_TYPE_PARTIAL_LINE
;
455 return TRACE_TYPE_HANDLED
;
459 static enum print_line_t
460 print_graph_lat_fmt(struct trace_seq
*s
, struct trace_entry
*entry
)
462 if (!trace_seq_putc(s
, ' '))
465 return trace_print_lat_fmt(s
, entry
);
468 /* If the pid changed since the last trace, output this event */
469 static enum print_line_t
470 verif_pid(struct trace_seq
*s
, pid_t pid
, int cpu
, struct fgraph_data
*data
)
477 return TRACE_TYPE_HANDLED
;
479 last_pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
481 if (*last_pid
== pid
)
482 return TRACE_TYPE_HANDLED
;
484 prev_pid
= *last_pid
;
488 return TRACE_TYPE_HANDLED
;
490 * Context-switch trace line:
492 ------------------------------------------
493 | 1) migration/0--1 => sshd-1755
494 ------------------------------------------
497 ret
= trace_seq_printf(s
,
498 " ------------------------------------------\n");
500 return TRACE_TYPE_PARTIAL_LINE
;
502 ret
= print_graph_cpu(s
, cpu
);
503 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
504 return TRACE_TYPE_PARTIAL_LINE
;
506 ret
= print_graph_proc(s
, prev_pid
);
507 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
508 return TRACE_TYPE_PARTIAL_LINE
;
510 ret
= trace_seq_printf(s
, " => ");
512 return TRACE_TYPE_PARTIAL_LINE
;
514 ret
= print_graph_proc(s
, pid
);
515 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
516 return TRACE_TYPE_PARTIAL_LINE
;
518 ret
= trace_seq_printf(s
,
519 "\n ------------------------------------------\n\n");
521 return TRACE_TYPE_PARTIAL_LINE
;
523 return TRACE_TYPE_HANDLED
;
526 static struct ftrace_graph_ret_entry
*
527 get_return_for_leaf(struct trace_iterator
*iter
,
528 struct ftrace_graph_ent_entry
*curr
)
530 struct fgraph_data
*data
= iter
->private;
531 struct ring_buffer_iter
*ring_iter
= NULL
;
532 struct ring_buffer_event
*event
;
533 struct ftrace_graph_ret_entry
*next
;
536 * If the previous output failed to write to the seq buffer,
537 * then we just reuse the data from before.
539 if (data
&& data
->failed
) {
544 ring_iter
= trace_buffer_iter(iter
, iter
->cpu
);
546 /* First peek to compare current entry and the next one */
548 event
= ring_buffer_iter_peek(ring_iter
, NULL
);
551 * We need to consume the current entry to see
554 ring_buffer_consume(iter
->tr
->buffer
, iter
->cpu
,
556 event
= ring_buffer_peek(iter
->tr
->buffer
, iter
->cpu
,
563 next
= ring_buffer_event_data(event
);
567 * Save current and next entries for later reference
568 * if the output fails.
572 * If the next event is not a return type, then
573 * we only care about what type it is. Otherwise we can
574 * safely copy the entire event.
576 if (next
->ent
.type
== TRACE_GRAPH_RET
)
579 data
->ret
.ent
.type
= next
->ent
.type
;
583 if (next
->ent
.type
!= TRACE_GRAPH_RET
)
586 if (curr
->ent
.pid
!= next
->ent
.pid
||
587 curr
->graph_ent
.func
!= next
->ret
.func
)
590 /* this is a leaf, now advance the iterator */
592 ring_buffer_read(ring_iter
, NULL
);
597 static int print_graph_abs_time(u64 t
, struct trace_seq
*s
)
599 unsigned long usecs_rem
;
601 usecs_rem
= do_div(t
, NSEC_PER_SEC
);
604 return trace_seq_printf(s
, "%5lu.%06lu | ",
605 (unsigned long)t
, usecs_rem
);
608 static enum print_line_t
609 print_graph_irq(struct trace_iterator
*iter
, unsigned long addr
,
610 enum trace_type type
, int cpu
, pid_t pid
, u32 flags
)
613 struct trace_seq
*s
= &iter
->seq
;
615 if (addr
< (unsigned long)__irqentry_text_start
||
616 addr
>= (unsigned long)__irqentry_text_end
)
617 return TRACE_TYPE_UNHANDLED
;
619 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
621 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
) {
622 ret
= print_graph_abs_time(iter
->ts
, s
);
624 return TRACE_TYPE_PARTIAL_LINE
;
628 if (flags
& TRACE_GRAPH_PRINT_CPU
) {
629 ret
= print_graph_cpu(s
, cpu
);
630 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
631 return TRACE_TYPE_PARTIAL_LINE
;
635 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
636 ret
= print_graph_proc(s
, pid
);
637 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
638 return TRACE_TYPE_PARTIAL_LINE
;
639 ret
= trace_seq_printf(s
, " | ");
641 return TRACE_TYPE_PARTIAL_LINE
;
646 ret
= print_graph_duration(DURATION_FILL_START
, s
, flags
);
647 if (ret
!= TRACE_TYPE_HANDLED
)
650 if (type
== TRACE_GRAPH_ENT
)
651 ret
= trace_seq_printf(s
, "==========>");
653 ret
= trace_seq_printf(s
, "<==========");
656 return TRACE_TYPE_PARTIAL_LINE
;
658 ret
= print_graph_duration(DURATION_FILL_END
, s
, flags
);
659 if (ret
!= TRACE_TYPE_HANDLED
)
662 ret
= trace_seq_printf(s
, "\n");
665 return TRACE_TYPE_PARTIAL_LINE
;
666 return TRACE_TYPE_HANDLED
;
670 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
672 unsigned long nsecs_rem
= do_div(duration
, 1000);
673 /* log10(ULONG_MAX) + '\0' */
679 sprintf(msecs_str
, "%lu", (unsigned long) duration
);
682 ret
= trace_seq_printf(s
, "%s", msecs_str
);
684 return TRACE_TYPE_PARTIAL_LINE
;
686 len
= strlen(msecs_str
);
688 /* Print nsecs (we don't want to exceed 7 numbers) */
690 size_t slen
= min_t(size_t, sizeof(nsecs_str
), 8UL - len
);
692 snprintf(nsecs_str
, slen
, "%03lu", nsecs_rem
);
693 ret
= trace_seq_printf(s
, ".%s", nsecs_str
);
695 return TRACE_TYPE_PARTIAL_LINE
;
696 len
+= strlen(nsecs_str
);
699 ret
= trace_seq_printf(s
, " us ");
701 return TRACE_TYPE_PARTIAL_LINE
;
703 /* Print remaining spaces to fit the row's width */
704 for (i
= len
; i
< 7; i
++) {
705 ret
= trace_seq_printf(s
, " ");
707 return TRACE_TYPE_PARTIAL_LINE
;
709 return TRACE_TYPE_HANDLED
;
712 static enum print_line_t
713 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
,
718 if (!(flags
& TRACE_GRAPH_PRINT_DURATION
) ||
719 !(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
720 return TRACE_TYPE_HANDLED
;
722 /* No real adata, just filling the column with spaces */
724 case DURATION_FILL_FULL
:
725 ret
= trace_seq_printf(s
, " | ");
726 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
727 case DURATION_FILL_START
:
728 ret
= trace_seq_printf(s
, " ");
729 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
730 case DURATION_FILL_END
:
731 ret
= trace_seq_printf(s
, " |");
732 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
735 /* Signal a overhead of time execution to the output */
736 if (flags
& TRACE_GRAPH_PRINT_OVERHEAD
) {
737 /* Duration exceeded 100 msecs */
738 if (duration
> 100000ULL)
739 ret
= trace_seq_printf(s
, "! ");
740 /* Duration exceeded 10 msecs */
741 else if (duration
> 10000ULL)
742 ret
= trace_seq_printf(s
, "+ ");
746 * The -1 means we either did not exceed the duration tresholds
747 * or we dont want to print out the overhead. Either way we need
748 * to fill out the space.
751 ret
= trace_seq_printf(s
, " ");
753 /* Catching here any failure happenned above */
755 return TRACE_TYPE_PARTIAL_LINE
;
757 ret
= trace_print_graph_duration(duration
, s
);
758 if (ret
!= TRACE_TYPE_HANDLED
)
761 ret
= trace_seq_printf(s
, "| ");
763 return TRACE_TYPE_PARTIAL_LINE
;
765 return TRACE_TYPE_HANDLED
;
768 /* Case of a leaf function on its call entry */
769 static enum print_line_t
770 print_graph_entry_leaf(struct trace_iterator
*iter
,
771 struct ftrace_graph_ent_entry
*entry
,
772 struct ftrace_graph_ret_entry
*ret_entry
,
773 struct trace_seq
*s
, u32 flags
)
775 struct fgraph_data
*data
= iter
->private;
776 struct ftrace_graph_ret
*graph_ret
;
777 struct ftrace_graph_ent
*call
;
778 unsigned long long duration
;
782 graph_ret
= &ret_entry
->ret
;
783 call
= &entry
->graph_ent
;
784 duration
= graph_ret
->rettime
- graph_ret
->calltime
;
787 struct fgraph_cpu_data
*cpu_data
;
790 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
793 * Comments display at + 1 to depth. Since
794 * this is a leaf function, keep the comments
795 * equal to this depth.
797 cpu_data
->depth
= call
->depth
- 1;
799 /* No need to keep this function around for this depth */
800 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
801 cpu_data
->enter_funcs
[call
->depth
] = 0;
804 /* Overhead and duration */
805 ret
= print_graph_duration(duration
, s
, flags
);
806 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
807 return TRACE_TYPE_PARTIAL_LINE
;
810 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
811 ret
= trace_seq_printf(s
, " ");
813 return TRACE_TYPE_PARTIAL_LINE
;
816 ret
= trace_seq_printf(s
, "%ps();\n", (void *)call
->func
);
818 return TRACE_TYPE_PARTIAL_LINE
;
820 return TRACE_TYPE_HANDLED
;
823 static enum print_line_t
824 print_graph_entry_nested(struct trace_iterator
*iter
,
825 struct ftrace_graph_ent_entry
*entry
,
826 struct trace_seq
*s
, int cpu
, u32 flags
)
828 struct ftrace_graph_ent
*call
= &entry
->graph_ent
;
829 struct fgraph_data
*data
= iter
->private;
834 struct fgraph_cpu_data
*cpu_data
;
837 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
838 cpu_data
->depth
= call
->depth
;
840 /* Save this function pointer to see if the exit matches */
841 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
842 cpu_data
->enter_funcs
[call
->depth
] = call
->func
;
846 ret
= print_graph_duration(DURATION_FILL_FULL
, s
, flags
);
847 if (ret
!= TRACE_TYPE_HANDLED
)
851 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
852 ret
= trace_seq_printf(s
, " ");
854 return TRACE_TYPE_PARTIAL_LINE
;
857 ret
= trace_seq_printf(s
, "%ps() {\n", (void *)call
->func
);
859 return TRACE_TYPE_PARTIAL_LINE
;
862 * we already consumed the current entry to check the next one
863 * and see if this is a leaf.
865 return TRACE_TYPE_NO_CONSUME
;
868 static enum print_line_t
869 print_graph_prologue(struct trace_iterator
*iter
, struct trace_seq
*s
,
870 int type
, unsigned long addr
, u32 flags
)
872 struct fgraph_data
*data
= iter
->private;
873 struct trace_entry
*ent
= iter
->ent
;
878 if (verif_pid(s
, ent
->pid
, cpu
, data
) == TRACE_TYPE_PARTIAL_LINE
)
879 return TRACE_TYPE_PARTIAL_LINE
;
883 ret
= print_graph_irq(iter
, addr
, type
, cpu
, ent
->pid
, flags
);
884 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
885 return TRACE_TYPE_PARTIAL_LINE
;
888 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
892 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
) {
893 ret
= print_graph_abs_time(iter
->ts
, s
);
895 return TRACE_TYPE_PARTIAL_LINE
;
899 if (flags
& TRACE_GRAPH_PRINT_CPU
) {
900 ret
= print_graph_cpu(s
, cpu
);
901 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
902 return TRACE_TYPE_PARTIAL_LINE
;
906 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
907 ret
= print_graph_proc(s
, ent
->pid
);
908 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
909 return TRACE_TYPE_PARTIAL_LINE
;
911 ret
= trace_seq_printf(s
, " | ");
913 return TRACE_TYPE_PARTIAL_LINE
;
917 if (trace_flags
& TRACE_ITER_LATENCY_FMT
) {
918 ret
= print_graph_lat_fmt(s
, ent
);
919 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
920 return TRACE_TYPE_PARTIAL_LINE
;
927 * Entry check for irq code
930 * - we are inside irq code
931 * - we just entered irq code
934 * - funcgraph-interrupts option is set
935 * - we are not inside irq code
938 check_irq_entry(struct trace_iterator
*iter
, u32 flags
,
939 unsigned long addr
, int depth
)
943 struct fgraph_data
*data
= iter
->private;
946 * If we are either displaying irqs, or we got called as
947 * a graph event and private data does not exist,
948 * then we bypass the irq check.
950 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
954 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
957 * We are inside the irq code
962 if ((addr
< (unsigned long)__irqentry_text_start
) ||
963 (addr
>= (unsigned long)__irqentry_text_end
))
967 * We are entering irq code.
974 * Return check for irq code
977 * - we are inside irq code
978 * - we just left irq code
981 * - funcgraph-interrupts option is set
982 * - we are not inside irq code
985 check_irq_return(struct trace_iterator
*iter
, u32 flags
, int depth
)
989 struct fgraph_data
*data
= iter
->private;
992 * If we are either displaying irqs, or we got called as
993 * a graph event and private data does not exist,
994 * then we bypass the irq check.
996 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
1000 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1003 * We are not inside the irq code.
1005 if (*depth_irq
== -1)
1009 * We are inside the irq code, and this is returning entry.
1010 * Let's not trace it and clear the entry depth, since
1011 * we are out of irq code.
1013 * This condition ensures that we 'leave the irq code' once
1014 * we are out of the entry depth. Thus protecting us from
1015 * the RETURN entry loss.
1017 if (*depth_irq
>= depth
) {
1023 * We are inside the irq code, and this is not the entry.
1028 static enum print_line_t
1029 print_graph_entry(struct ftrace_graph_ent_entry
*field
, struct trace_seq
*s
,
1030 struct trace_iterator
*iter
, u32 flags
)
1032 struct fgraph_data
*data
= iter
->private;
1033 struct ftrace_graph_ent
*call
= &field
->graph_ent
;
1034 struct ftrace_graph_ret_entry
*leaf_ret
;
1035 static enum print_line_t ret
;
1036 int cpu
= iter
->cpu
;
1038 if (check_irq_entry(iter
, flags
, call
->func
, call
->depth
))
1039 return TRACE_TYPE_HANDLED
;
1041 if (print_graph_prologue(iter
, s
, TRACE_GRAPH_ENT
, call
->func
, flags
))
1042 return TRACE_TYPE_PARTIAL_LINE
;
1044 leaf_ret
= get_return_for_leaf(iter
, field
);
1046 ret
= print_graph_entry_leaf(iter
, field
, leaf_ret
, s
, flags
);
1048 ret
= print_graph_entry_nested(iter
, field
, s
, cpu
, flags
);
1052 * If we failed to write our output, then we need to make
1053 * note of it. Because we already consumed our entry.
1065 static enum print_line_t
1066 print_graph_return(struct ftrace_graph_ret
*trace
, struct trace_seq
*s
,
1067 struct trace_entry
*ent
, struct trace_iterator
*iter
,
1070 unsigned long long duration
= trace
->rettime
- trace
->calltime
;
1071 struct fgraph_data
*data
= iter
->private;
1072 pid_t pid
= ent
->pid
;
1073 int cpu
= iter
->cpu
;
1078 if (check_irq_return(iter
, flags
, trace
->depth
))
1079 return TRACE_TYPE_HANDLED
;
1082 struct fgraph_cpu_data
*cpu_data
;
1083 int cpu
= iter
->cpu
;
1085 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
1088 * Comments display at + 1 to depth. This is the
1089 * return from a function, we now want the comments
1090 * to display at the same level of the bracket.
1092 cpu_data
->depth
= trace
->depth
- 1;
1094 if (trace
->depth
< FTRACE_RETFUNC_DEPTH
) {
1095 if (cpu_data
->enter_funcs
[trace
->depth
] != trace
->func
)
1097 cpu_data
->enter_funcs
[trace
->depth
] = 0;
1101 if (print_graph_prologue(iter
, s
, 0, 0, flags
))
1102 return TRACE_TYPE_PARTIAL_LINE
;
1104 /* Overhead and duration */
1105 ret
= print_graph_duration(duration
, s
, flags
);
1106 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
1107 return TRACE_TYPE_PARTIAL_LINE
;
1110 for (i
= 0; i
< trace
->depth
* TRACE_GRAPH_INDENT
; i
++) {
1111 ret
= trace_seq_printf(s
, " ");
1113 return TRACE_TYPE_PARTIAL_LINE
;
1117 * If the return function does not have a matching entry,
1118 * then the entry was lost. Instead of just printing
1119 * the '}' and letting the user guess what function this
1120 * belongs to, write out the function name.
1123 ret
= trace_seq_printf(s
, "}\n");
1125 return TRACE_TYPE_PARTIAL_LINE
;
1127 ret
= trace_seq_printf(s
, "} /* %ps */\n", (void *)trace
->func
);
1129 return TRACE_TYPE_PARTIAL_LINE
;
1133 if (flags
& TRACE_GRAPH_PRINT_OVERRUN
) {
1134 ret
= trace_seq_printf(s
, " (Overruns: %lu)\n",
1137 return TRACE_TYPE_PARTIAL_LINE
;
1140 ret
= print_graph_irq(iter
, trace
->func
, TRACE_GRAPH_RET
,
1142 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
1143 return TRACE_TYPE_PARTIAL_LINE
;
1145 return TRACE_TYPE_HANDLED
;
1148 static enum print_line_t
1149 print_graph_comment(struct trace_seq
*s
, struct trace_entry
*ent
,
1150 struct trace_iterator
*iter
, u32 flags
)
1152 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
1153 struct fgraph_data
*data
= iter
->private;
1154 struct trace_event
*event
;
1160 depth
= per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->depth
;
1162 if (print_graph_prologue(iter
, s
, 0, 0, flags
))
1163 return TRACE_TYPE_PARTIAL_LINE
;
1166 ret
= print_graph_duration(DURATION_FILL_FULL
, s
, flags
);
1167 if (ret
!= TRACE_TYPE_HANDLED
)
1172 for (i
= 0; i
< (depth
+ 1) * TRACE_GRAPH_INDENT
; i
++) {
1173 ret
= trace_seq_printf(s
, " ");
1175 return TRACE_TYPE_PARTIAL_LINE
;
1179 ret
= trace_seq_printf(s
, "/* ");
1181 return TRACE_TYPE_PARTIAL_LINE
;
1183 switch (iter
->ent
->type
) {
1185 ret
= trace_print_bprintk_msg_only(iter
);
1186 if (ret
!= TRACE_TYPE_HANDLED
)
1190 ret
= trace_print_printk_msg_only(iter
);
1191 if (ret
!= TRACE_TYPE_HANDLED
)
1195 event
= ftrace_find_event(ent
->type
);
1197 return TRACE_TYPE_UNHANDLED
;
1199 ret
= event
->funcs
->trace(iter
, sym_flags
, event
);
1200 if (ret
!= TRACE_TYPE_HANDLED
)
1204 /* Strip ending newline */
1205 if (s
->buffer
[s
->len
- 1] == '\n') {
1206 s
->buffer
[s
->len
- 1] = '\0';
1210 ret
= trace_seq_printf(s
, " */\n");
1212 return TRACE_TYPE_PARTIAL_LINE
;
1214 return TRACE_TYPE_HANDLED
;
1219 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
1221 struct ftrace_graph_ent_entry
*field
;
1222 struct fgraph_data
*data
= iter
->private;
1223 struct trace_entry
*entry
= iter
->ent
;
1224 struct trace_seq
*s
= &iter
->seq
;
1225 int cpu
= iter
->cpu
;
1228 if (data
&& per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
) {
1229 per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
= 0;
1230 return TRACE_TYPE_HANDLED
;
1234 * If the last output failed, there's a possibility we need
1235 * to print out the missing entry which would never go out.
1237 if (data
&& data
->failed
) {
1239 iter
->cpu
= data
->cpu
;
1240 ret
= print_graph_entry(field
, s
, iter
, flags
);
1241 if (ret
== TRACE_TYPE_HANDLED
&& iter
->cpu
!= cpu
) {
1242 per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->ignore
= 1;
1243 ret
= TRACE_TYPE_NO_CONSUME
;
1249 switch (entry
->type
) {
1250 case TRACE_GRAPH_ENT
: {
1252 * print_graph_entry() may consume the current event,
1253 * thus @field may become invalid, so we need to save it.
1254 * sizeof(struct ftrace_graph_ent_entry) is very small,
1255 * it can be safely saved at the stack.
1257 struct ftrace_graph_ent_entry saved
;
1258 trace_assign_type(field
, entry
);
1260 return print_graph_entry(&saved
, s
, iter
, flags
);
1262 case TRACE_GRAPH_RET
: {
1263 struct ftrace_graph_ret_entry
*field
;
1264 trace_assign_type(field
, entry
);
1265 return print_graph_return(&field
->ret
, s
, entry
, iter
, flags
);
1269 /* dont trace stack and functions as comments */
1270 return TRACE_TYPE_UNHANDLED
;
1273 return print_graph_comment(s
, entry
, iter
, flags
);
1276 return TRACE_TYPE_HANDLED
;
1279 static enum print_line_t
1280 print_graph_function(struct trace_iterator
*iter
)
1282 return print_graph_function_flags(iter
, tracer_flags
.val
);
1285 static enum print_line_t
1286 print_graph_function_event(struct trace_iterator
*iter
, int flags
,
1287 struct trace_event
*event
)
1289 return print_graph_function(iter
);
1292 static void print_lat_header(struct seq_file
*s
, u32 flags
)
1294 static const char spaces
[] = " " /* 16 spaces */
1296 " "; /* 17 spaces */
1299 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1301 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1303 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1306 seq_printf(s
, "#%.*s _-----=> irqs-off \n", size
, spaces
);
1307 seq_printf(s
, "#%.*s / _----=> need-resched \n", size
, spaces
);
1308 seq_printf(s
, "#%.*s| / _---=> hardirq/softirq \n", size
, spaces
);
1309 seq_printf(s
, "#%.*s|| / _--=> preempt-depth \n", size
, spaces
);
1310 seq_printf(s
, "#%.*s||| / \n", size
, spaces
);
1313 static void __print_graph_headers_flags(struct seq_file
*s
, u32 flags
)
1315 int lat
= trace_flags
& TRACE_ITER_LATENCY_FMT
;
1318 print_lat_header(s
, flags
);
1322 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1323 seq_printf(s
, " TIME ");
1324 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1325 seq_printf(s
, " CPU");
1326 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1327 seq_printf(s
, " TASK/PID ");
1329 seq_printf(s
, "||||");
1330 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1331 seq_printf(s
, " DURATION ");
1332 seq_printf(s
, " FUNCTION CALLS\n");
1336 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1337 seq_printf(s
, " | ");
1338 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1339 seq_printf(s
, " | ");
1340 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1341 seq_printf(s
, " | | ");
1343 seq_printf(s
, "||||");
1344 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1345 seq_printf(s
, " | | ");
1346 seq_printf(s
, " | | | |\n");
1349 void print_graph_headers(struct seq_file
*s
)
1351 print_graph_headers_flags(s
, tracer_flags
.val
);
1354 void print_graph_headers_flags(struct seq_file
*s
, u32 flags
)
1356 struct trace_iterator
*iter
= s
->private;
1358 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
1361 if (trace_flags
& TRACE_ITER_LATENCY_FMT
) {
1362 /* print nothing if the buffers are empty */
1363 if (trace_empty(iter
))
1366 print_trace_header(s
, iter
);
1369 __print_graph_headers_flags(s
, flags
);
1372 void graph_trace_open(struct trace_iterator
*iter
)
1374 /* pid and depth on the last trace processed */
1375 struct fgraph_data
*data
;
1378 iter
->private = NULL
;
1380 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1384 data
->cpu_data
= alloc_percpu(struct fgraph_cpu_data
);
1385 if (!data
->cpu_data
)
1388 for_each_possible_cpu(cpu
) {
1389 pid_t
*pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
1390 int *depth
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth
);
1391 int *ignore
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
);
1392 int *depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1400 iter
->private = data
;
1407 pr_warning("function graph tracer: not enough memory\n");
1410 void graph_trace_close(struct trace_iterator
*iter
)
1412 struct fgraph_data
*data
= iter
->private;
1415 free_percpu(data
->cpu_data
);
1420 static int func_graph_set_flag(u32 old_flags
, u32 bit
, int set
)
1422 if (bit
== TRACE_GRAPH_PRINT_IRQS
)
1423 ftrace_graph_skip_irqs
= !set
;
1428 static struct trace_event_functions graph_functions
= {
1429 .trace
= print_graph_function_event
,
1432 static struct trace_event graph_trace_entry_event
= {
1433 .type
= TRACE_GRAPH_ENT
,
1434 .funcs
= &graph_functions
,
1437 static struct trace_event graph_trace_ret_event
= {
1438 .type
= TRACE_GRAPH_RET
,
1439 .funcs
= &graph_functions
1442 static struct tracer graph_trace __read_mostly
= {
1443 .name
= "function_graph",
1444 .open
= graph_trace_open
,
1445 .pipe_open
= graph_trace_open
,
1446 .close
= graph_trace_close
,
1447 .pipe_close
= graph_trace_close
,
1448 .wait_pipe
= poll_wait_pipe
,
1449 .init
= graph_trace_init
,
1450 .reset
= graph_trace_reset
,
1451 .print_line
= print_graph_function
,
1452 .print_header
= print_graph_headers
,
1453 .flags
= &tracer_flags
,
1454 .set_flag
= func_graph_set_flag
,
1455 #ifdef CONFIG_FTRACE_SELFTEST
1456 .selftest
= trace_selftest_startup_function_graph
,
1460 static __init
int init_graph_trace(void)
1462 max_bytes_for_cpu
= snprintf(NULL
, 0, "%d", nr_cpu_ids
- 1);
1464 if (!register_ftrace_event(&graph_trace_entry_event
)) {
1465 pr_warning("Warning: could not register graph trace events\n");
1469 if (!register_ftrace_event(&graph_trace_ret_event
)) {
1470 pr_warning("Warning: could not register graph trace events\n");
1474 return register_tracer(&graph_trace
);
1477 device_initcall(init_graph_trace
);