2 * Stack trace management functions
4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include <linux/module.h>
9 #include <linux/uaccess.h>
10 #include <asm/stacktrace.h>
12 static int save_stack_stack(void *data
, char *name
)
18 __save_stack_address(void *data
, unsigned long addr
, bool reliable
, bool nosched
)
20 struct stack_trace
*trace
= data
;
21 #ifdef CONFIG_FRAME_POINTER
25 if (nosched
&& in_sched_functions(addr
))
27 if (trace
->skip
> 0) {
31 if (trace
->nr_entries
< trace
->max_entries
)
32 trace
->entries
[trace
->nr_entries
++] = addr
;
35 static void save_stack_address(void *data
, unsigned long addr
, int reliable
)
37 return __save_stack_address(data
, addr
, reliable
, false);
41 save_stack_address_nosched(void *data
, unsigned long addr
, int reliable
)
43 return __save_stack_address(data
, addr
, reliable
, true);
46 static const struct stacktrace_ops save_stack_ops
= {
47 .stack
= save_stack_stack
,
48 .address
= save_stack_address
,
49 .walk_stack
= print_context_stack
,
52 static const struct stacktrace_ops save_stack_ops_nosched
= {
53 .stack
= save_stack_stack
,
54 .address
= save_stack_address_nosched
,
55 .walk_stack
= print_context_stack
,
59 * Save stack-backtrace addresses into a stack_trace buffer.
61 void save_stack_trace(struct stack_trace
*trace
)
63 dump_trace(current
, NULL
, NULL
, 0, &save_stack_ops
, trace
);
64 if (trace
->nr_entries
< trace
->max_entries
)
65 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
67 EXPORT_SYMBOL_GPL(save_stack_trace
);
69 void save_stack_trace_regs(struct stack_trace
*trace
, struct pt_regs
*regs
)
71 dump_trace(current
, regs
, NULL
, 0, &save_stack_ops
, trace
);
72 if (trace
->nr_entries
< trace
->max_entries
)
73 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
76 void save_stack_trace_tsk(struct task_struct
*tsk
, struct stack_trace
*trace
)
78 dump_trace(tsk
, NULL
, NULL
, 0, &save_stack_ops_nosched
, trace
);
79 if (trace
->nr_entries
< trace
->max_entries
)
80 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
82 EXPORT_SYMBOL_GPL(save_stack_trace_tsk
);
84 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
86 struct stack_frame_user
{
87 const void __user
*next_fp
;
88 unsigned long ret_addr
;
92 copy_stack_frame(const void __user
*fp
, struct stack_frame_user
*frame
)
96 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
101 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
108 static inline void __save_stack_trace_user(struct stack_trace
*trace
)
110 const struct pt_regs
*regs
= task_pt_regs(current
);
111 const void __user
*fp
= (const void __user
*)regs
->bp
;
113 if (trace
->nr_entries
< trace
->max_entries
)
114 trace
->entries
[trace
->nr_entries
++] = regs
->ip
;
116 while (trace
->nr_entries
< trace
->max_entries
) {
117 struct stack_frame_user frame
;
119 frame
.next_fp
= NULL
;
121 if (!copy_stack_frame(fp
, &frame
))
123 if ((unsigned long)fp
< regs
->sp
)
125 if (frame
.ret_addr
) {
126 trace
->entries
[trace
->nr_entries
++] =
129 if (fp
== frame
.next_fp
)
135 void save_stack_trace_user(struct stack_trace
*trace
)
138 * Trace user stack if we are not a kernel thread
141 __save_stack_trace_user(trace
);
143 if (trace
->nr_entries
< trace
->max_entries
)
144 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;