slab: setup cpu caches later on when interrupts are enabled
[linux/fpc-iii.git] / arch / parisc / kernel / ftrace.c
blob9877372ffdba75b209d25e9be21210f19d5b91d9
1 /*
2 * Code for tracing calls in Linux kernel.
3 * Copyright (C) 2009 Helge Deller <deller@gmx.de>
5 * based on code for x86 which is:
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 * future possible enhancements:
9 * - add CONFIG_DYNAMIC_FTRACE
10 * - add CONFIG_STACK_TRACER
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
16 #include <asm/sections.h>
17 #include <asm/ftrace.h>
21 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
23 /* Add a function return address to the trace stack on thread info.*/
24 static int push_return_trace(unsigned long ret, unsigned long long time,
25 unsigned long func, int *depth)
27 int index;
29 if (!current->ret_stack)
30 return -EBUSY;
32 /* The return trace stack is full */
33 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
34 atomic_inc(&current->trace_overrun);
35 return -EBUSY;
38 index = ++current->curr_ret_stack;
39 barrier();
40 current->ret_stack[index].ret = ret;
41 current->ret_stack[index].func = func;
42 current->ret_stack[index].calltime = time;
43 *depth = index;
45 return 0;
48 /* Retrieve a function return address to the trace stack on thread info.*/
49 static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
51 int index;
53 index = current->curr_ret_stack;
55 if (unlikely(index < 0)) {
56 ftrace_graph_stop();
57 WARN_ON(1);
58 /* Might as well panic, otherwise we have no where to go */
59 *ret = (unsigned long)
60 dereference_function_descriptor(&panic);
61 return;
64 *ret = current->ret_stack[index].ret;
65 trace->func = current->ret_stack[index].func;
66 trace->calltime = current->ret_stack[index].calltime;
67 trace->overrun = atomic_read(&current->trace_overrun);
68 trace->depth = index;
69 barrier();
70 current->curr_ret_stack--;
75 * Send the trace to the ring-buffer.
76 * @return the original return address.
78 unsigned long ftrace_return_to_handler(unsigned long retval0,
79 unsigned long retval1)
81 struct ftrace_graph_ret trace;
82 unsigned long ret;
84 pop_return_trace(&trace, &ret);
85 trace.rettime = cpu_clock(raw_smp_processor_id());
86 ftrace_graph_return(&trace);
88 if (unlikely(!ret)) {
89 ftrace_graph_stop();
90 WARN_ON(1);
91 /* Might as well panic. What else to do? */
92 ret = (unsigned long)
93 dereference_function_descriptor(&panic);
96 /* HACK: we hand over the old functions' return values
97 in %r23 and %r24. Assembly in entry.S will take care
98 and move those to their final registers %ret0 and %ret1 */
99 asm( "copy %0, %%r23 \n\t"
100 "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
102 return ret;
106 * Hook the return address and push it in the stack of return addrs
107 * in current thread info.
109 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
111 unsigned long old;
112 unsigned long long calltime;
113 struct ftrace_graph_ent trace;
115 if (unlikely(atomic_read(&current->tracing_graph_pause)))
116 return;
118 old = *parent;
119 *parent = (unsigned long)
120 dereference_function_descriptor(&return_to_handler);
122 if (unlikely(!__kernel_text_address(old))) {
123 ftrace_graph_stop();
124 *parent = old;
125 WARN_ON(1);
126 return;
129 calltime = cpu_clock(raw_smp_processor_id());
131 if (push_return_trace(old, calltime,
132 self_addr, &trace.depth) == -EBUSY) {
133 *parent = old;
134 return;
137 trace.func = self_addr;
139 /* Only trace if the calling function expects to */
140 if (!ftrace_graph_entry(&trace)) {
141 current->curr_ret_stack--;
142 *parent = old;
146 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
149 void ftrace_function_trampoline(unsigned long parent,
150 unsigned long self_addr,
151 unsigned long org_sp_gr3)
153 extern ftrace_func_t ftrace_trace_function;
155 if (function_trace_stop)
156 return;
158 if (ftrace_trace_function != ftrace_stub) {
159 ftrace_trace_function(parent, self_addr);
160 return;
162 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
163 if (ftrace_graph_entry && ftrace_graph_return) {
164 unsigned long sp;
165 unsigned long *parent_rp;
167 asm volatile ("copy %%r30, %0" : "=r"(sp));
168 /* sanity check: is stack pointer which we got from
169 assembler function in entry.S in a reasonable
170 range compared to current stack pointer? */
171 if ((sp - org_sp_gr3) > 0x400)
172 return;
174 /* calculate pointer to %rp in stack */
175 parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
176 /* sanity check: parent_rp should hold parent */
177 if (*parent_rp != parent)
178 return;
180 prepare_ftrace_return(parent_rp, self_addr);
181 return;
183 #endif