mm-only debug patch...
[mmotm.git] / kernel / trace / trace_boot.c
blobc21d5f3956ad790f58a3672a94d5ba09243260fe
1 /*
2 * ring buffer based initcalls tracer
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/init.h>
9 #include <linux/debugfs.h>
10 #include <linux/ftrace.h>
11 #include <linux/kallsyms.h>
12 #include <linux/time.h>
14 #include "trace.h"
15 #include "trace_output.h"
17 static struct trace_array *boot_trace;
18 static bool pre_initcalls_finished;
20 /* Tells the boot tracer that the pre_smp_initcalls are finished.
21 * So we are ready .
22 * It doesn't enable sched events tracing however.
23 * You have to call enable_boot_trace to do so.
25 void start_boot_trace(void)
27 pre_initcalls_finished = true;
30 void enable_boot_trace(void)
32 if (boot_trace && pre_initcalls_finished)
33 tracing_start_sched_switch_record();
36 void disable_boot_trace(void)
38 if (boot_trace && pre_initcalls_finished)
39 tracing_stop_sched_switch_record();
42 static int boot_trace_init(struct trace_array *tr)
44 boot_trace = tr;
46 if (!tr)
47 return 0;
49 tracing_reset_online_cpus(tr);
51 tracing_sched_switch_assign_trace(tr);
52 return 0;
55 static enum print_line_t
56 initcall_call_print_line(struct trace_iterator *iter)
58 struct trace_entry *entry = iter->ent;
59 struct trace_seq *s = &iter->seq;
60 struct trace_boot_call *field;
61 struct boot_trace_call *call;
62 u64 ts;
63 unsigned long nsec_rem;
64 int ret;
66 trace_assign_type(field, entry);
67 call = &field->boot_call;
68 ts = iter->ts;
69 nsec_rem = do_div(ts, NSEC_PER_SEC);
71 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
72 (unsigned long)ts, nsec_rem, call->func, call->caller);
74 if (!ret)
75 return TRACE_TYPE_PARTIAL_LINE;
76 else
77 return TRACE_TYPE_HANDLED;
80 static enum print_line_t
81 initcall_ret_print_line(struct trace_iterator *iter)
83 struct trace_entry *entry = iter->ent;
84 struct trace_seq *s = &iter->seq;
85 struct trace_boot_ret *field;
86 struct boot_trace_ret *init_ret;
87 u64 ts;
88 unsigned long nsec_rem;
89 int ret;
91 trace_assign_type(field, entry);
92 init_ret = &field->boot_ret;
93 ts = iter->ts;
94 nsec_rem = do_div(ts, NSEC_PER_SEC);
96 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
97 "returned %d after %llu msecs\n",
98 (unsigned long) ts,
99 nsec_rem,
100 init_ret->func, init_ret->result, init_ret->duration);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104 else
105 return TRACE_TYPE_HANDLED;
108 static enum print_line_t initcall_print_line(struct trace_iterator *iter)
110 struct trace_entry *entry = iter->ent;
112 switch (entry->type) {
113 case TRACE_BOOT_CALL:
114 return initcall_call_print_line(iter);
115 case TRACE_BOOT_RET:
116 return initcall_ret_print_line(iter);
117 default:
118 return TRACE_TYPE_UNHANDLED;
122 struct tracer boot_tracer __read_mostly =
124 .name = "initcall",
125 .init = boot_trace_init,
126 .reset = tracing_reset_online_cpus,
127 .print_line = initcall_print_line,
130 void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
132 struct ftrace_event_call *call = &event_boot_call;
133 struct ring_buffer_event *event;
134 struct ring_buffer *buffer;
135 struct trace_boot_call *entry;
136 struct trace_array *tr = boot_trace;
138 if (!tr || !pre_initcalls_finished)
139 return;
141 /* Get its name now since this function could
142 * disappear because it is in the .init section.
144 sprint_symbol(bt->func, (unsigned long)fn);
145 preempt_disable();
147 buffer = tr->buffer;
148 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
149 sizeof(*entry), 0, 0);
150 if (!event)
151 goto out;
152 entry = ring_buffer_event_data(event);
153 entry->boot_call = *bt;
154 if (!filter_check_discard(call, entry, buffer, event))
155 trace_buffer_unlock_commit(buffer, event, 0, 0);
156 out:
157 preempt_enable();
160 void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
162 struct ftrace_event_call *call = &event_boot_ret;
163 struct ring_buffer_event *event;
164 struct ring_buffer *buffer;
165 struct trace_boot_ret *entry;
166 struct trace_array *tr = boot_trace;
168 if (!tr || !pre_initcalls_finished)
169 return;
171 sprint_symbol(bt->func, (unsigned long)fn);
172 preempt_disable();
174 buffer = tr->buffer;
175 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
176 sizeof(*entry), 0, 0);
177 if (!event)
178 goto out;
179 entry = ring_buffer_event_data(event);
180 entry->boot_ret = *bt;
181 if (!filter_check_discard(call, entry, buffer, event))
182 trace_buffer_unlock_commit(buffer, event, 0, 0);
183 out:
184 preempt_enable();