kvm: x86: optimize dr6 restore
[linux/fpc-iii.git] / kernel / trace / trace_functions_graph.c
blob169b3c44ee97f3cf00bc574b185f16fa572a12d5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
16 #include "trace.h"
17 #include "trace_output.h"
19 static bool kill_ftrace_graph;
21 /**
22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
24 * ftrace_graph_stop() is called when a severe error is detected in
25 * the function graph tracing. This function is called by the critical
26 * paths of function graph to keep those paths from doing any more harm.
28 bool ftrace_graph_is_dead(void)
30 return kill_ftrace_graph;
33 /**
34 * ftrace_graph_stop - set to permanently disable function graph tracincg
36 * In case of an error int function graph tracing, this is called
37 * to try to keep function graph tracing from causing any more harm.
38 * Usually this is pretty severe and this is called to try to at least
39 * get a warning out to the user.
41 void ftrace_graph_stop(void)
43 kill_ftrace_graph = true;
46 /* When set, irq functions will be ignored */
47 static int ftrace_graph_skip_irqs;
49 struct fgraph_cpu_data {
50 pid_t last_pid;
51 int depth;
52 int depth_irq;
53 int ignore;
54 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
57 struct fgraph_data {
58 struct fgraph_cpu_data __percpu *cpu_data;
60 /* Place to preserve last processed entry. */
61 struct ftrace_graph_ent_entry ent;
62 struct ftrace_graph_ret_entry ret;
63 int failed;
64 int cpu;
67 #define TRACE_GRAPH_INDENT 2
69 unsigned int fgraph_max_depth;
71 static struct tracer_opt trace_opts[] = {
72 /* Display overruns? (for self-debug purpose) */
73 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
74 /* Display CPU ? */
75 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
76 /* Display Overhead ? */
77 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78 /* Display proc name/pid */
79 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80 /* Display duration of execution */
81 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
82 /* Display absolute time of an entry */
83 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84 /* Display interrupts */
85 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86 /* Display function name after trailing } */
87 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88 /* Include sleep time (scheduled out) between entry and return */
89 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
90 /* Include time within nested functions */
91 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 { } /* Empty entry */
95 static struct tracer_flags tracer_flags = {
96 /* Don't display overruns, proc, or tail by default */
97 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
99 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 .opts = trace_opts
103 static struct trace_array *graph_array;
106 * DURATION column is being also used to display IRQ signs,
107 * following values are used by print_graph_irq and others
108 * to fill in space into DURATION column.
110 enum {
111 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
116 static void
117 print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags);
120 /* Add a function return address to the trace stack on thread info.*/
122 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
123 unsigned long frame_pointer, unsigned long *retp)
125 unsigned long long calltime;
126 int index;
128 if (unlikely(ftrace_graph_is_dead()))
129 return -EBUSY;
131 if (!current->ret_stack)
132 return -EBUSY;
135 * We must make sure the ret_stack is tested before we read
136 * anything else.
138 smp_rmb();
140 /* The return trace stack is full */
141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142 atomic_inc(&current->trace_overrun);
143 return -EBUSY;
147 * The curr_ret_stack is an index to ftrace return stack of
148 * current task. Its value should be in [0, FTRACE_RETFUNC_
149 * DEPTH) when the function graph tracer is used. To support
150 * filtering out specific functions, it makes the index
151 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
152 * so when it sees a negative index the ftrace will ignore
153 * the record. And the index gets recovered when returning
154 * from the filtered function by adding the FTRACE_NOTRACE_
155 * DEPTH and then it'll continue to record functions normally.
157 * The curr_ret_stack is initialized to -1 and get increased
158 * in this function. So it can be less than -1 only if it was
159 * filtered out via ftrace_graph_notrace_addr() which can be
160 * set from set_graph_notrace file in tracefs by user.
162 if (current->curr_ret_stack < -1)
163 return -EBUSY;
165 calltime = trace_clock_local();
167 index = ++current->curr_ret_stack;
168 if (ftrace_graph_notrace_addr(func))
169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 barrier();
171 current->ret_stack[index].ret = ret;
172 current->ret_stack[index].func = func;
173 current->ret_stack[index].calltime = calltime;
174 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176 #endif
177 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179 #endif
180 *depth = current->curr_ret_stack;
182 return 0;
185 /* Retrieve a function return address to the trace stack on thread info.*/
186 static void
187 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
188 unsigned long frame_pointer)
190 int index;
192 index = current->curr_ret_stack;
195 * A negative index here means that it's just returned from a
196 * notrace'd function. Recover index to get an original
197 * return address. See ftrace_push_return_trace().
199 * TODO: Need to check whether the stack gets corrupted.
201 if (index < 0)
202 index += FTRACE_NOTRACE_DEPTH;
204 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
205 ftrace_graph_stop();
206 WARN_ON(1);
207 /* Might as well panic, otherwise we have no where to go */
208 *ret = (unsigned long)panic;
209 return;
212 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
214 * The arch may choose to record the frame pointer used
215 * and check it here to make sure that it is what we expect it
216 * to be. If gcc does not set the place holder of the return
217 * address in the frame pointer, and does a copy instead, then
218 * the function graph trace will fail. This test detects this
219 * case.
221 * Currently, x86_32 with optimize for size (-Os) makes the latest
222 * gcc do the above.
224 * Note, -mfentry does not use frame pointers, and this test
225 * is not needed if CC_USING_FENTRY is set.
227 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
228 ftrace_graph_stop();
229 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
230 " from func %ps return to %lx\n",
231 current->ret_stack[index].fp,
232 frame_pointer,
233 (void *)current->ret_stack[index].func,
234 current->ret_stack[index].ret);
235 *ret = (unsigned long)panic;
236 return;
238 #endif
240 *ret = current->ret_stack[index].ret;
241 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(&current->trace_overrun);
244 trace->depth = index;
248 * Send the trace to the ring-buffer.
249 * @return the original return address.
251 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
253 struct ftrace_graph_ret trace;
254 unsigned long ret;
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local();
258 barrier();
259 current->curr_ret_stack--;
261 * The curr_ret_stack can be less than -1 only if it was
262 * filtered out and it's about to return from the function.
263 * Recover the index and continue to trace normal functions.
265 if (current->curr_ret_stack < -1) {
266 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
267 return ret;
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
275 ftrace_graph_return(&trace);
277 if (unlikely(!ret)) {
278 ftrace_graph_stop();
279 WARN_ON(1);
280 /* Might as well panic. What else to do? */
281 ret = (unsigned long)panic;
284 return ret;
288 * ftrace_graph_ret_addr - convert a potentially modified stack return address
289 * to its original value
291 * This function can be called by stack unwinding code to convert a found stack
292 * return address ('ret') to its original value, in case the function graph
293 * tracer has modified it to be 'return_to_handler'. If the address hasn't
294 * been modified, the unchanged value of 'ret' is returned.
296 * 'idx' is a state variable which should be initialized by the caller to zero
297 * before the first call.
299 * 'retp' is a pointer to the return address on the stack. It's ignored if
300 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
302 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
303 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
304 unsigned long ret, unsigned long *retp)
306 int index = task->curr_ret_stack;
307 int i;
309 if (ret != (unsigned long)return_to_handler)
310 return ret;
312 if (index < -1)
313 index += FTRACE_NOTRACE_DEPTH;
315 if (index < 0)
316 return ret;
318 for (i = 0; i <= index; i++)
319 if (task->ret_stack[i].retp == retp)
320 return task->ret_stack[i].ret;
322 return ret;
324 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
325 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
326 unsigned long ret, unsigned long *retp)
328 int task_idx;
330 if (ret != (unsigned long)return_to_handler)
331 return ret;
333 task_idx = task->curr_ret_stack;
335 if (!task->ret_stack || task_idx < *idx)
336 return ret;
338 task_idx -= *idx;
339 (*idx)++;
341 return task->ret_stack[task_idx].ret;
343 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
345 int __trace_graph_entry(struct trace_array *tr,
346 struct ftrace_graph_ent *trace,
347 unsigned long flags,
348 int pc)
350 struct trace_event_call *call = &event_funcgraph_entry;
351 struct ring_buffer_event *event;
352 struct ring_buffer *buffer = tr->trace_buffer.buffer;
353 struct ftrace_graph_ent_entry *entry;
355 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
356 sizeof(*entry), flags, pc);
357 if (!event)
358 return 0;
359 entry = ring_buffer_event_data(event);
360 entry->graph_ent = *trace;
361 if (!call_filter_check_discard(call, entry, buffer, event))
362 trace_buffer_unlock_commit_nostack(buffer, event);
364 return 1;
367 static inline int ftrace_graph_ignore_irqs(void)
369 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
370 return 0;
372 return in_irq();
375 int trace_graph_entry(struct ftrace_graph_ent *trace)
377 struct trace_array *tr = graph_array;
378 struct trace_array_cpu *data;
379 unsigned long flags;
380 long disabled;
381 int ret;
382 int cpu;
383 int pc;
385 if (!ftrace_trace_task(tr))
386 return 0;
388 if (ftrace_graph_ignore_func(trace))
389 return 0;
391 if (ftrace_graph_ignore_irqs())
392 return 0;
395 * Do not trace a function if it's filtered by set_graph_notrace.
396 * Make the index of ret stack negative to indicate that it should
397 * ignore further functions. But it needs its own ret stack entry
398 * to recover the original index in order to continue tracing after
399 * returning from the function.
401 if (ftrace_graph_notrace_addr(trace->func))
402 return 1;
405 * Stop here if tracing_threshold is set. We only write function return
406 * events to the ring buffer.
408 if (tracing_thresh)
409 return 1;
411 local_irq_save(flags);
412 cpu = raw_smp_processor_id();
413 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
414 disabled = atomic_inc_return(&data->disabled);
415 if (likely(disabled == 1)) {
416 pc = preempt_count();
417 ret = __trace_graph_entry(tr, trace, flags, pc);
418 } else {
419 ret = 0;
422 atomic_dec(&data->disabled);
423 local_irq_restore(flags);
425 return ret;
428 static void
429 __trace_graph_function(struct trace_array *tr,
430 unsigned long ip, unsigned long flags, int pc)
432 u64 time = trace_clock_local();
433 struct ftrace_graph_ent ent = {
434 .func = ip,
435 .depth = 0,
437 struct ftrace_graph_ret ret = {
438 .func = ip,
439 .depth = 0,
440 .calltime = time,
441 .rettime = time,
444 __trace_graph_entry(tr, &ent, flags, pc);
445 __trace_graph_return(tr, &ret, flags, pc);
448 void
449 trace_graph_function(struct trace_array *tr,
450 unsigned long ip, unsigned long parent_ip,
451 unsigned long flags, int pc)
453 __trace_graph_function(tr, ip, flags, pc);
456 void __trace_graph_return(struct trace_array *tr,
457 struct ftrace_graph_ret *trace,
458 unsigned long flags,
459 int pc)
461 struct trace_event_call *call = &event_funcgraph_exit;
462 struct ring_buffer_event *event;
463 struct ring_buffer *buffer = tr->trace_buffer.buffer;
464 struct ftrace_graph_ret_entry *entry;
466 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
467 sizeof(*entry), flags, pc);
468 if (!event)
469 return;
470 entry = ring_buffer_event_data(event);
471 entry->ret = *trace;
472 if (!call_filter_check_discard(call, entry, buffer, event))
473 trace_buffer_unlock_commit_nostack(buffer, event);
476 void trace_graph_return(struct ftrace_graph_ret *trace)
478 struct trace_array *tr = graph_array;
479 struct trace_array_cpu *data;
480 unsigned long flags;
481 long disabled;
482 int cpu;
483 int pc;
485 local_irq_save(flags);
486 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
488 disabled = atomic_inc_return(&data->disabled);
489 if (likely(disabled == 1)) {
490 pc = preempt_count();
491 __trace_graph_return(tr, trace, flags, pc);
493 atomic_dec(&data->disabled);
494 local_irq_restore(flags);
497 void set_graph_array(struct trace_array *tr)
499 graph_array = tr;
501 /* Make graph_array visible before we start tracing */
503 smp_mb();
506 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
508 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh))
510 return;
511 else
512 trace_graph_return(trace);
515 static int graph_trace_init(struct trace_array *tr)
517 int ret;
519 set_graph_array(tr);
520 if (tracing_thresh)
521 ret = register_ftrace_graph(&trace_graph_thresh_return,
522 &trace_graph_entry);
523 else
524 ret = register_ftrace_graph(&trace_graph_return,
525 &trace_graph_entry);
526 if (ret)
527 return ret;
528 tracing_start_cmdline_record();
530 return 0;
533 static void graph_trace_reset(struct trace_array *tr)
535 tracing_stop_cmdline_record();
536 unregister_ftrace_graph();
539 static int graph_trace_update_thresh(struct trace_array *tr)
541 graph_trace_reset(tr);
542 return graph_trace_init(tr);
545 static int max_bytes_for_cpu;
547 static void print_graph_cpu(struct trace_seq *s, int cpu)
550 * Start with a space character - to make it stand out
551 * to the right a bit when trace output is pasted into
552 * email:
554 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
557 #define TRACE_GRAPH_PROCINFO_LENGTH 14
559 static void print_graph_proc(struct trace_seq *s, pid_t pid)
561 char comm[TASK_COMM_LEN];
562 /* sign + log10(MAX_INT) + '\0' */
563 char pid_str[11];
564 int spaces = 0;
565 int len;
566 int i;
568 trace_find_cmdline(pid, comm);
569 comm[7] = '\0';
570 sprintf(pid_str, "%d", pid);
572 /* 1 stands for the "-" character */
573 len = strlen(comm) + strlen(pid_str) + 1;
575 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
576 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
578 /* First spaces to align center */
579 for (i = 0; i < spaces / 2; i++)
580 trace_seq_putc(s, ' ');
582 trace_seq_printf(s, "%s-%s", comm, pid_str);
584 /* Last spaces to align center */
585 for (i = 0; i < spaces - (spaces / 2); i++)
586 trace_seq_putc(s, ' ');
590 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
592 trace_seq_putc(s, ' ');
593 trace_print_lat_fmt(s, entry);
596 /* If the pid changed since the last trace, output this event */
597 static void
598 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
600 pid_t prev_pid;
601 pid_t *last_pid;
603 if (!data)
604 return;
606 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
608 if (*last_pid == pid)
609 return;
611 prev_pid = *last_pid;
612 *last_pid = pid;
614 if (prev_pid == -1)
615 return;
617 * Context-switch trace line:
619 ------------------------------------------
620 | 1) migration/0--1 => sshd-1755
621 ------------------------------------------
624 trace_seq_puts(s, " ------------------------------------------\n");
625 print_graph_cpu(s, cpu);
626 print_graph_proc(s, prev_pid);
627 trace_seq_puts(s, " => ");
628 print_graph_proc(s, pid);
629 trace_seq_puts(s, "\n ------------------------------------------\n\n");
632 static struct ftrace_graph_ret_entry *
633 get_return_for_leaf(struct trace_iterator *iter,
634 struct ftrace_graph_ent_entry *curr)
636 struct fgraph_data *data = iter->private;
637 struct ring_buffer_iter *ring_iter = NULL;
638 struct ring_buffer_event *event;
639 struct ftrace_graph_ret_entry *next;
642 * If the previous output failed to write to the seq buffer,
643 * then we just reuse the data from before.
645 if (data && data->failed) {
646 curr = &data->ent;
647 next = &data->ret;
648 } else {
650 ring_iter = trace_buffer_iter(iter, iter->cpu);
652 /* First peek to compare current entry and the next one */
653 if (ring_iter)
654 event = ring_buffer_iter_peek(ring_iter, NULL);
655 else {
657 * We need to consume the current entry to see
658 * the next one.
660 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
661 NULL, NULL);
662 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
663 NULL, NULL);
666 if (!event)
667 return NULL;
669 next = ring_buffer_event_data(event);
671 if (data) {
673 * Save current and next entries for later reference
674 * if the output fails.
676 data->ent = *curr;
678 * If the next event is not a return type, then
679 * we only care about what type it is. Otherwise we can
680 * safely copy the entire event.
682 if (next->ent.type == TRACE_GRAPH_RET)
683 data->ret = *next;
684 else
685 data->ret.ent.type = next->ent.type;
689 if (next->ent.type != TRACE_GRAPH_RET)
690 return NULL;
692 if (curr->ent.pid != next->ent.pid ||
693 curr->graph_ent.func != next->ret.func)
694 return NULL;
696 /* this is a leaf, now advance the iterator */
697 if (ring_iter)
698 ring_buffer_read(ring_iter, NULL);
700 return next;
703 static void print_graph_abs_time(u64 t, struct trace_seq *s)
705 unsigned long usecs_rem;
707 usecs_rem = do_div(t, NSEC_PER_SEC);
708 usecs_rem /= 1000;
710 trace_seq_printf(s, "%5lu.%06lu | ",
711 (unsigned long)t, usecs_rem);
714 static void
715 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
716 enum trace_type type, int cpu, pid_t pid, u32 flags)
718 struct trace_array *tr = iter->tr;
719 struct trace_seq *s = &iter->seq;
720 struct trace_entry *ent = iter->ent;
722 if (addr < (unsigned long)__irqentry_text_start ||
723 addr >= (unsigned long)__irqentry_text_end)
724 return;
726 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
727 /* Absolute time */
728 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
729 print_graph_abs_time(iter->ts, s);
731 /* Cpu */
732 if (flags & TRACE_GRAPH_PRINT_CPU)
733 print_graph_cpu(s, cpu);
735 /* Proc */
736 if (flags & TRACE_GRAPH_PRINT_PROC) {
737 print_graph_proc(s, pid);
738 trace_seq_puts(s, " | ");
741 /* Latency format */
742 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
743 print_graph_lat_fmt(s, ent);
746 /* No overhead */
747 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
749 if (type == TRACE_GRAPH_ENT)
750 trace_seq_puts(s, "==========>");
751 else
752 trace_seq_puts(s, "<==========");
754 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
755 trace_seq_putc(s, '\n');
758 void
759 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
761 unsigned long nsecs_rem = do_div(duration, 1000);
762 /* log10(ULONG_MAX) + '\0' */
763 char usecs_str[21];
764 char nsecs_str[5];
765 int len;
766 int i;
768 sprintf(usecs_str, "%lu", (unsigned long) duration);
770 /* Print msecs */
771 trace_seq_printf(s, "%s", usecs_str);
773 len = strlen(usecs_str);
775 /* Print nsecs (we don't want to exceed 7 numbers) */
776 if (len < 7) {
777 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
779 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
780 trace_seq_printf(s, ".%s", nsecs_str);
781 len += strlen(nsecs_str) + 1;
784 trace_seq_puts(s, " us ");
786 /* Print remaining spaces to fit the row's width */
787 for (i = len; i < 8; i++)
788 trace_seq_putc(s, ' ');
791 static void
792 print_graph_duration(struct trace_array *tr, unsigned long long duration,
793 struct trace_seq *s, u32 flags)
795 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
796 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
797 return;
799 /* No real adata, just filling the column with spaces */
800 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
801 case FLAGS_FILL_FULL:
802 trace_seq_puts(s, " | ");
803 return;
804 case FLAGS_FILL_START:
805 trace_seq_puts(s, " ");
806 return;
807 case FLAGS_FILL_END:
808 trace_seq_puts(s, " |");
809 return;
812 /* Signal a overhead of time execution to the output */
813 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
814 trace_seq_printf(s, "%c ", trace_find_mark(duration));
815 else
816 trace_seq_puts(s, " ");
818 trace_print_graph_duration(duration, s);
819 trace_seq_puts(s, "| ");
822 /* Case of a leaf function on its call entry */
823 static enum print_line_t
824 print_graph_entry_leaf(struct trace_iterator *iter,
825 struct ftrace_graph_ent_entry *entry,
826 struct ftrace_graph_ret_entry *ret_entry,
827 struct trace_seq *s, u32 flags)
829 struct fgraph_data *data = iter->private;
830 struct trace_array *tr = iter->tr;
831 struct ftrace_graph_ret *graph_ret;
832 struct ftrace_graph_ent *call;
833 unsigned long long duration;
834 int cpu = iter->cpu;
835 int i;
837 graph_ret = &ret_entry->ret;
838 call = &entry->graph_ent;
839 duration = graph_ret->rettime - graph_ret->calltime;
841 if (data) {
842 struct fgraph_cpu_data *cpu_data;
844 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
846 /* If a graph tracer ignored set_graph_notrace */
847 if (call->depth < -1)
848 call->depth += FTRACE_NOTRACE_DEPTH;
851 * Comments display at + 1 to depth. Since
852 * this is a leaf function, keep the comments
853 * equal to this depth.
855 cpu_data->depth = call->depth - 1;
857 /* No need to keep this function around for this depth */
858 if (call->depth < FTRACE_RETFUNC_DEPTH &&
859 !WARN_ON_ONCE(call->depth < 0))
860 cpu_data->enter_funcs[call->depth] = 0;
863 /* Overhead and duration */
864 print_graph_duration(tr, duration, s, flags);
866 /* Function */
867 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
868 trace_seq_putc(s, ' ');
870 trace_seq_printf(s, "%ps();\n", (void *)call->func);
872 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
873 cpu, iter->ent->pid, flags);
875 return trace_handle_return(s);
878 static enum print_line_t
879 print_graph_entry_nested(struct trace_iterator *iter,
880 struct ftrace_graph_ent_entry *entry,
881 struct trace_seq *s, int cpu, u32 flags)
883 struct ftrace_graph_ent *call = &entry->graph_ent;
884 struct fgraph_data *data = iter->private;
885 struct trace_array *tr = iter->tr;
886 int i;
888 if (data) {
889 struct fgraph_cpu_data *cpu_data;
890 int cpu = iter->cpu;
892 /* If a graph tracer ignored set_graph_notrace */
893 if (call->depth < -1)
894 call->depth += FTRACE_NOTRACE_DEPTH;
896 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
897 cpu_data->depth = call->depth;
899 /* Save this function pointer to see if the exit matches */
900 if (call->depth < FTRACE_RETFUNC_DEPTH &&
901 !WARN_ON_ONCE(call->depth < 0))
902 cpu_data->enter_funcs[call->depth] = call->func;
905 /* No time */
906 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
908 /* Function */
909 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
910 trace_seq_putc(s, ' ');
912 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
914 if (trace_seq_has_overflowed(s))
915 return TRACE_TYPE_PARTIAL_LINE;
918 * we already consumed the current entry to check the next one
919 * and see if this is a leaf.
921 return TRACE_TYPE_NO_CONSUME;
924 static void
925 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
926 int type, unsigned long addr, u32 flags)
928 struct fgraph_data *data = iter->private;
929 struct trace_entry *ent = iter->ent;
930 struct trace_array *tr = iter->tr;
931 int cpu = iter->cpu;
933 /* Pid */
934 verif_pid(s, ent->pid, cpu, data);
936 if (type)
937 /* Interrupt */
938 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
940 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
941 return;
943 /* Absolute time */
944 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
945 print_graph_abs_time(iter->ts, s);
947 /* Cpu */
948 if (flags & TRACE_GRAPH_PRINT_CPU)
949 print_graph_cpu(s, cpu);
951 /* Proc */
952 if (flags & TRACE_GRAPH_PRINT_PROC) {
953 print_graph_proc(s, ent->pid);
954 trace_seq_puts(s, " | ");
957 /* Latency format */
958 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
959 print_graph_lat_fmt(s, ent);
961 return;
965 * Entry check for irq code
967 * returns 1 if
968 * - we are inside irq code
969 * - we just entered irq code
971 * retunns 0 if
972 * - funcgraph-interrupts option is set
973 * - we are not inside irq code
975 static int
976 check_irq_entry(struct trace_iterator *iter, u32 flags,
977 unsigned long addr, int depth)
979 int cpu = iter->cpu;
980 int *depth_irq;
981 struct fgraph_data *data = iter->private;
984 * If we are either displaying irqs, or we got called as
985 * a graph event and private data does not exist,
986 * then we bypass the irq check.
988 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
989 (!data))
990 return 0;
992 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
995 * We are inside the irq code
997 if (*depth_irq >= 0)
998 return 1;
1000 if ((addr < (unsigned long)__irqentry_text_start) ||
1001 (addr >= (unsigned long)__irqentry_text_end))
1002 return 0;
1005 * We are entering irq code.
1007 *depth_irq = depth;
1008 return 1;
1012 * Return check for irq code
1014 * returns 1 if
1015 * - we are inside irq code
1016 * - we just left irq code
1018 * returns 0 if
1019 * - funcgraph-interrupts option is set
1020 * - we are not inside irq code
1022 static int
1023 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1025 int cpu = iter->cpu;
1026 int *depth_irq;
1027 struct fgraph_data *data = iter->private;
1030 * If we are either displaying irqs, or we got called as
1031 * a graph event and private data does not exist,
1032 * then we bypass the irq check.
1034 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1035 (!data))
1036 return 0;
1038 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1041 * We are not inside the irq code.
1043 if (*depth_irq == -1)
1044 return 0;
1047 * We are inside the irq code, and this is returning entry.
1048 * Let's not trace it and clear the entry depth, since
1049 * we are out of irq code.
1051 * This condition ensures that we 'leave the irq code' once
1052 * we are out of the entry depth. Thus protecting us from
1053 * the RETURN entry loss.
1055 if (*depth_irq >= depth) {
1056 *depth_irq = -1;
1057 return 1;
1061 * We are inside the irq code, and this is not the entry.
1063 return 1;
1066 static enum print_line_t
1067 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1068 struct trace_iterator *iter, u32 flags)
1070 struct fgraph_data *data = iter->private;
1071 struct ftrace_graph_ent *call = &field->graph_ent;
1072 struct ftrace_graph_ret_entry *leaf_ret;
1073 static enum print_line_t ret;
1074 int cpu = iter->cpu;
1076 if (check_irq_entry(iter, flags, call->func, call->depth))
1077 return TRACE_TYPE_HANDLED;
1079 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1081 leaf_ret = get_return_for_leaf(iter, field);
1082 if (leaf_ret)
1083 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1084 else
1085 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1087 if (data) {
1089 * If we failed to write our output, then we need to make
1090 * note of it. Because we already consumed our entry.
1092 if (s->full) {
1093 data->failed = 1;
1094 data->cpu = cpu;
1095 } else
1096 data->failed = 0;
1099 return ret;
1102 static enum print_line_t
1103 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1104 struct trace_entry *ent, struct trace_iterator *iter,
1105 u32 flags)
1107 unsigned long long duration = trace->rettime - trace->calltime;
1108 struct fgraph_data *data = iter->private;
1109 struct trace_array *tr = iter->tr;
1110 pid_t pid = ent->pid;
1111 int cpu = iter->cpu;
1112 int func_match = 1;
1113 int i;
1115 if (check_irq_return(iter, flags, trace->depth))
1116 return TRACE_TYPE_HANDLED;
1118 if (data) {
1119 struct fgraph_cpu_data *cpu_data;
1120 int cpu = iter->cpu;
1122 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1125 * Comments display at + 1 to depth. This is the
1126 * return from a function, we now want the comments
1127 * to display at the same level of the bracket.
1129 cpu_data->depth = trace->depth - 1;
1131 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1132 !WARN_ON_ONCE(trace->depth < 0)) {
1133 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1134 func_match = 0;
1135 cpu_data->enter_funcs[trace->depth] = 0;
1139 print_graph_prologue(iter, s, 0, 0, flags);
1141 /* Overhead and duration */
1142 print_graph_duration(tr, duration, s, flags);
1144 /* Closing brace */
1145 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1146 trace_seq_putc(s, ' ');
1149 * If the return function does not have a matching entry,
1150 * then the entry was lost. Instead of just printing
1151 * the '}' and letting the user guess what function this
1152 * belongs to, write out the function name. Always do
1153 * that if the funcgraph-tail option is enabled.
1155 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1156 trace_seq_puts(s, "}\n");
1157 else
1158 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1160 /* Overrun */
1161 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1162 trace_seq_printf(s, " (Overruns: %lu)\n",
1163 trace->overrun);
1165 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1166 cpu, pid, flags);
1168 return trace_handle_return(s);
1171 static enum print_line_t
1172 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1173 struct trace_iterator *iter, u32 flags)
1175 struct trace_array *tr = iter->tr;
1176 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1177 struct fgraph_data *data = iter->private;
1178 struct trace_event *event;
1179 int depth = 0;
1180 int ret;
1181 int i;
1183 if (data)
1184 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1186 print_graph_prologue(iter, s, 0, 0, flags);
1188 /* No time */
1189 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1191 /* Indentation */
1192 if (depth > 0)
1193 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1194 trace_seq_putc(s, ' ');
1196 /* The comment */
1197 trace_seq_puts(s, "/* ");
1199 switch (iter->ent->type) {
1200 case TRACE_BPUTS:
1201 ret = trace_print_bputs_msg_only(iter);
1202 if (ret != TRACE_TYPE_HANDLED)
1203 return ret;
1204 break;
1205 case TRACE_BPRINT:
1206 ret = trace_print_bprintk_msg_only(iter);
1207 if (ret != TRACE_TYPE_HANDLED)
1208 return ret;
1209 break;
1210 case TRACE_PRINT:
1211 ret = trace_print_printk_msg_only(iter);
1212 if (ret != TRACE_TYPE_HANDLED)
1213 return ret;
1214 break;
1215 default:
1216 event = ftrace_find_event(ent->type);
1217 if (!event)
1218 return TRACE_TYPE_UNHANDLED;
1220 ret = event->funcs->trace(iter, sym_flags, event);
1221 if (ret != TRACE_TYPE_HANDLED)
1222 return ret;
1225 if (trace_seq_has_overflowed(s))
1226 goto out;
1228 /* Strip ending newline */
1229 if (s->buffer[s->seq.len - 1] == '\n') {
1230 s->buffer[s->seq.len - 1] = '\0';
1231 s->seq.len--;
1234 trace_seq_puts(s, " */\n");
1235 out:
1236 return trace_handle_return(s);
1240 enum print_line_t
1241 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1243 struct ftrace_graph_ent_entry *field;
1244 struct fgraph_data *data = iter->private;
1245 struct trace_entry *entry = iter->ent;
1246 struct trace_seq *s = &iter->seq;
1247 int cpu = iter->cpu;
1248 int ret;
1250 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1251 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1252 return TRACE_TYPE_HANDLED;
1256 * If the last output failed, there's a possibility we need
1257 * to print out the missing entry which would never go out.
1259 if (data && data->failed) {
1260 field = &data->ent;
1261 iter->cpu = data->cpu;
1262 ret = print_graph_entry(field, s, iter, flags);
1263 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1264 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1265 ret = TRACE_TYPE_NO_CONSUME;
1267 iter->cpu = cpu;
1268 return ret;
1271 switch (entry->type) {
1272 case TRACE_GRAPH_ENT: {
1274 * print_graph_entry() may consume the current event,
1275 * thus @field may become invalid, so we need to save it.
1276 * sizeof(struct ftrace_graph_ent_entry) is very small,
1277 * it can be safely saved at the stack.
1279 struct ftrace_graph_ent_entry saved;
1280 trace_assign_type(field, entry);
1281 saved = *field;
1282 return print_graph_entry(&saved, s, iter, flags);
1284 case TRACE_GRAPH_RET: {
1285 struct ftrace_graph_ret_entry *field;
1286 trace_assign_type(field, entry);
1287 return print_graph_return(&field->ret, s, entry, iter, flags);
1289 case TRACE_STACK:
1290 case TRACE_FN:
1291 /* dont trace stack and functions as comments */
1292 return TRACE_TYPE_UNHANDLED;
1294 default:
1295 return print_graph_comment(s, entry, iter, flags);
1298 return TRACE_TYPE_HANDLED;
1301 static enum print_line_t
1302 print_graph_function(struct trace_iterator *iter)
1304 return print_graph_function_flags(iter, tracer_flags.val);
1307 static enum print_line_t
1308 print_graph_function_event(struct trace_iterator *iter, int flags,
1309 struct trace_event *event)
1311 return print_graph_function(iter);
1314 static void print_lat_header(struct seq_file *s, u32 flags)
1316 static const char spaces[] = " " /* 16 spaces */
1317 " " /* 4 spaces */
1318 " "; /* 17 spaces */
1319 int size = 0;
1321 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1322 size += 16;
1323 if (flags & TRACE_GRAPH_PRINT_CPU)
1324 size += 4;
1325 if (flags & TRACE_GRAPH_PRINT_PROC)
1326 size += 17;
1328 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1329 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1330 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1331 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1332 seq_printf(s, "#%.*s||| / \n", size, spaces);
1335 static void __print_graph_headers_flags(struct trace_array *tr,
1336 struct seq_file *s, u32 flags)
1338 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1340 if (lat)
1341 print_lat_header(s, flags);
1343 /* 1st line */
1344 seq_putc(s, '#');
1345 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1346 seq_puts(s, " TIME ");
1347 if (flags & TRACE_GRAPH_PRINT_CPU)
1348 seq_puts(s, " CPU");
1349 if (flags & TRACE_GRAPH_PRINT_PROC)
1350 seq_puts(s, " TASK/PID ");
1351 if (lat)
1352 seq_puts(s, "||||");
1353 if (flags & TRACE_GRAPH_PRINT_DURATION)
1354 seq_puts(s, " DURATION ");
1355 seq_puts(s, " FUNCTION CALLS\n");
1357 /* 2nd line */
1358 seq_putc(s, '#');
1359 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1360 seq_puts(s, " | ");
1361 if (flags & TRACE_GRAPH_PRINT_CPU)
1362 seq_puts(s, " | ");
1363 if (flags & TRACE_GRAPH_PRINT_PROC)
1364 seq_puts(s, " | | ");
1365 if (lat)
1366 seq_puts(s, "||||");
1367 if (flags & TRACE_GRAPH_PRINT_DURATION)
1368 seq_puts(s, " | | ");
1369 seq_puts(s, " | | | |\n");
1372 static void print_graph_headers(struct seq_file *s)
1374 print_graph_headers_flags(s, tracer_flags.val);
1377 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1379 struct trace_iterator *iter = s->private;
1380 struct trace_array *tr = iter->tr;
1382 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1383 return;
1385 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1386 /* print nothing if the buffers are empty */
1387 if (trace_empty(iter))
1388 return;
1390 print_trace_header(s, iter);
1393 __print_graph_headers_flags(tr, s, flags);
1396 void graph_trace_open(struct trace_iterator *iter)
1398 /* pid and depth on the last trace processed */
1399 struct fgraph_data *data;
1400 gfp_t gfpflags;
1401 int cpu;
1403 iter->private = NULL;
1405 /* We can be called in atomic context via ftrace_dump() */
1406 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1408 data = kzalloc(sizeof(*data), gfpflags);
1409 if (!data)
1410 goto out_err;
1412 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1413 if (!data->cpu_data)
1414 goto out_err_free;
1416 for_each_possible_cpu(cpu) {
1417 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1418 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1419 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1420 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1422 *pid = -1;
1423 *depth = 0;
1424 *ignore = 0;
1425 *depth_irq = -1;
1428 iter->private = data;
1430 return;
1432 out_err_free:
1433 kfree(data);
1434 out_err:
1435 pr_warn("function graph tracer: not enough memory\n");
1438 void graph_trace_close(struct trace_iterator *iter)
1440 struct fgraph_data *data = iter->private;
1442 if (data) {
1443 free_percpu(data->cpu_data);
1444 kfree(data);
1448 static int
1449 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1451 if (bit == TRACE_GRAPH_PRINT_IRQS)
1452 ftrace_graph_skip_irqs = !set;
1454 if (bit == TRACE_GRAPH_SLEEP_TIME)
1455 ftrace_graph_sleep_time_control(set);
1457 if (bit == TRACE_GRAPH_GRAPH_TIME)
1458 ftrace_graph_graph_time_control(set);
1460 return 0;
1463 static struct trace_event_functions graph_functions = {
1464 .trace = print_graph_function_event,
1467 static struct trace_event graph_trace_entry_event = {
1468 .type = TRACE_GRAPH_ENT,
1469 .funcs = &graph_functions,
1472 static struct trace_event graph_trace_ret_event = {
1473 .type = TRACE_GRAPH_RET,
1474 .funcs = &graph_functions
1477 static struct tracer graph_trace __tracer_data = {
1478 .name = "function_graph",
1479 .update_thresh = graph_trace_update_thresh,
1480 .open = graph_trace_open,
1481 .pipe_open = graph_trace_open,
1482 .close = graph_trace_close,
1483 .pipe_close = graph_trace_close,
1484 .init = graph_trace_init,
1485 .reset = graph_trace_reset,
1486 .print_line = print_graph_function,
1487 .print_header = print_graph_headers,
1488 .flags = &tracer_flags,
1489 .set_flag = func_graph_set_flag,
1490 #ifdef CONFIG_FTRACE_SELFTEST
1491 .selftest = trace_selftest_startup_function_graph,
1492 #endif
1496 static ssize_t
1497 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1498 loff_t *ppos)
1500 unsigned long val;
1501 int ret;
1503 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1504 if (ret)
1505 return ret;
1507 fgraph_max_depth = val;
1509 *ppos += cnt;
1511 return cnt;
1514 static ssize_t
1515 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1516 loff_t *ppos)
1518 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1519 int n;
1521 n = sprintf(buf, "%d\n", fgraph_max_depth);
1523 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1526 static const struct file_operations graph_depth_fops = {
1527 .open = tracing_open_generic,
1528 .write = graph_depth_write,
1529 .read = graph_depth_read,
1530 .llseek = generic_file_llseek,
1533 static __init int init_graph_tracefs(void)
1535 struct dentry *d_tracer;
1537 d_tracer = tracing_init_dentry();
1538 if (IS_ERR(d_tracer))
1539 return 0;
1541 trace_create_file("max_graph_depth", 0644, d_tracer,
1542 NULL, &graph_depth_fops);
1544 return 0;
1546 fs_initcall(init_graph_tracefs);
1548 static __init int init_graph_trace(void)
1550 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1552 if (!register_trace_event(&graph_trace_entry_event)) {
1553 pr_warn("Warning: could not register graph trace events\n");
1554 return 1;
1557 if (!register_trace_event(&graph_trace_ret_event)) {
1558 pr_warn("Warning: could not register graph trace events\n");
1559 return 1;
1562 return register_tracer(&graph_trace);
1565 core_initcall(init_graph_trace);