2 * Stack trace management functions
4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include <linux/module.h>
9 #include <linux/uaccess.h>
10 #include <asm/stacktrace.h>
12 static int save_stack_stack(void *data
, char *name
)
18 __save_stack_address(void *data
, unsigned long addr
, bool reliable
, bool nosched
)
20 struct stack_trace
*trace
= data
;
21 #ifdef CONFIG_FRAME_POINTER
25 if (nosched
&& in_sched_functions(addr
))
27 if (trace
->skip
> 0) {
31 if (trace
->nr_entries
< trace
->max_entries
) {
32 trace
->entries
[trace
->nr_entries
++] = addr
;
35 return -1; /* no more room, stop walking the stack */
39 static int save_stack_address(void *data
, unsigned long addr
, int reliable
)
41 return __save_stack_address(data
, addr
, reliable
, false);
45 save_stack_address_nosched(void *data
, unsigned long addr
, int reliable
)
47 return __save_stack_address(data
, addr
, reliable
, true);
50 static const struct stacktrace_ops save_stack_ops
= {
51 .stack
= save_stack_stack
,
52 .address
= save_stack_address
,
53 .walk_stack
= print_context_stack
,
56 static const struct stacktrace_ops save_stack_ops_nosched
= {
57 .stack
= save_stack_stack
,
58 .address
= save_stack_address_nosched
,
59 .walk_stack
= print_context_stack
,
63 * Save stack-backtrace addresses into a stack_trace buffer.
65 void save_stack_trace(struct stack_trace
*trace
)
67 dump_trace(current
, NULL
, NULL
, 0, &save_stack_ops
, trace
);
68 if (trace
->nr_entries
< trace
->max_entries
)
69 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
71 EXPORT_SYMBOL_GPL(save_stack_trace
);
73 void save_stack_trace_regs(struct pt_regs
*regs
, struct stack_trace
*trace
)
75 dump_trace(current
, regs
, NULL
, 0, &save_stack_ops
, trace
);
76 if (trace
->nr_entries
< trace
->max_entries
)
77 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
80 void save_stack_trace_tsk(struct task_struct
*tsk
, struct stack_trace
*trace
)
82 dump_trace(tsk
, NULL
, NULL
, 0, &save_stack_ops_nosched
, trace
);
83 if (trace
->nr_entries
< trace
->max_entries
)
84 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
86 EXPORT_SYMBOL_GPL(save_stack_trace_tsk
);
88 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
90 struct stack_frame_user
{
91 const void __user
*next_fp
;
92 unsigned long ret_addr
;
96 copy_stack_frame(const void __user
*fp
, struct stack_frame_user
*frame
)
100 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
105 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
112 static inline void __save_stack_trace_user(struct stack_trace
*trace
)
114 const struct pt_regs
*regs
= task_pt_regs(current
);
115 const void __user
*fp
= (const void __user
*)regs
->bp
;
117 if (trace
->nr_entries
< trace
->max_entries
)
118 trace
->entries
[trace
->nr_entries
++] = regs
->ip
;
120 while (trace
->nr_entries
< trace
->max_entries
) {
121 struct stack_frame_user frame
;
123 frame
.next_fp
= NULL
;
125 if (!copy_stack_frame(fp
, &frame
))
127 if ((unsigned long)fp
< regs
->sp
)
129 if (frame
.ret_addr
) {
130 trace
->entries
[trace
->nr_entries
++] =
133 if (fp
== frame
.next_fp
)
139 void save_stack_trace_user(struct stack_trace
*trace
)
142 * Trace user stack if we are not a kernel thread
145 __save_stack_trace_user(trace
);
147 if (trace
->nr_entries
< trace
->max_entries
)
148 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;