conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / kernel / trace / trace_boot.c
bloba29ef23ffb47080d81c41950ac4385566b193058
1 /*
2 * ring buffer based initcalls tracer
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 */
8 #include <linux/init.h>
9 #include <linux/debugfs.h>
10 #include <linux/ftrace.h>
11 #include <linux/kallsyms.h>
12 #include <linux/time.h>
14 #include "trace.h"
15 #include "trace_output.h"
17 static struct trace_array *boot_trace;
18 static bool pre_initcalls_finished;
20 /* Tells the boot tracer that the pre_smp_initcalls are finished.
21 * So we are ready .
22 * It doesn't enable sched events tracing however.
23 * You have to call enable_boot_trace to do so.
25 void start_boot_trace(void)
27 pre_initcalls_finished = true;
30 void enable_boot_trace(void)
32 if (boot_trace && pre_initcalls_finished)
33 tracing_start_sched_switch_record();
36 void disable_boot_trace(void)
38 if (boot_trace && pre_initcalls_finished)
39 tracing_stop_sched_switch_record();
42 static int boot_trace_init(struct trace_array *tr)
44 int cpu;
45 boot_trace = tr;
47 if (!tr)
48 return 0;
50 for_each_cpu(cpu, cpu_possible_mask)
51 tracing_reset(tr, cpu);
53 tracing_sched_switch_assign_trace(tr);
54 return 0;
57 static enum print_line_t
58 initcall_call_print_line(struct trace_iterator *iter)
60 struct trace_entry *entry = iter->ent;
61 struct trace_seq *s = &iter->seq;
62 struct trace_boot_call *field;
63 struct boot_trace_call *call;
64 u64 ts;
65 unsigned long nsec_rem;
66 int ret;
68 trace_assign_type(field, entry);
69 call = &field->boot_call;
70 ts = iter->ts;
71 nsec_rem = do_div(ts, NSEC_PER_SEC);
73 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
74 (unsigned long)ts, nsec_rem, call->func, call->caller);
76 if (!ret)
77 return TRACE_TYPE_PARTIAL_LINE;
78 else
79 return TRACE_TYPE_HANDLED;
82 static enum print_line_t
83 initcall_ret_print_line(struct trace_iterator *iter)
85 struct trace_entry *entry = iter->ent;
86 struct trace_seq *s = &iter->seq;
87 struct trace_boot_ret *field;
88 struct boot_trace_ret *init_ret;
89 u64 ts;
90 unsigned long nsec_rem;
91 int ret;
93 trace_assign_type(field, entry);
94 init_ret = &field->boot_ret;
95 ts = iter->ts;
96 nsec_rem = do_div(ts, NSEC_PER_SEC);
98 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
99 "returned %d after %llu msecs\n",
100 (unsigned long) ts,
101 nsec_rem,
102 init_ret->func, init_ret->result, init_ret->duration);
104 if (!ret)
105 return TRACE_TYPE_PARTIAL_LINE;
106 else
107 return TRACE_TYPE_HANDLED;
110 static enum print_line_t initcall_print_line(struct trace_iterator *iter)
112 struct trace_entry *entry = iter->ent;
114 switch (entry->type) {
115 case TRACE_BOOT_CALL:
116 return initcall_call_print_line(iter);
117 case TRACE_BOOT_RET:
118 return initcall_ret_print_line(iter);
119 default:
120 return TRACE_TYPE_UNHANDLED;
124 struct tracer boot_tracer __read_mostly =
126 .name = "initcall",
127 .init = boot_trace_init,
128 .reset = tracing_reset_online_cpus,
129 .print_line = initcall_print_line,
132 void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
134 struct ring_buffer_event *event;
135 struct trace_boot_call *entry;
136 struct trace_array *tr = boot_trace;
138 if (!tr || !pre_initcalls_finished)
139 return;
141 /* Get its name now since this function could
142 * disappear because it is in the .init section.
144 sprint_symbol(bt->func, (unsigned long)fn);
145 preempt_disable();
147 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
148 sizeof(*entry), 0, 0);
149 if (!event)
150 goto out;
151 entry = ring_buffer_event_data(event);
152 entry->boot_call = *bt;
153 trace_buffer_unlock_commit(tr, event, 0, 0);
154 out:
155 preempt_enable();
158 void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
160 struct ring_buffer_event *event;
161 struct trace_boot_ret *entry;
162 struct trace_array *tr = boot_trace;
164 if (!tr || !pre_initcalls_finished)
165 return;
167 sprint_symbol(bt->func, (unsigned long)fn);
168 preempt_disable();
170 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
171 sizeof(*entry), 0, 0);
172 if (!event)
173 goto out;
174 entry = ring_buffer_event_data(event);
175 entry->boot_ret = *bt;
176 trace_buffer_unlock_commit(tr, event, 0, 0);
177 out:
178 preempt_enable();