2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9 #include <linux/kdebug.h>
10 #include <linux/module.h>
11 #include <linux/ptrace.h>
12 #include <linux/kexec.h>
13 #include <linux/sysfs.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
17 #include <asm/stacktrace.h>
20 #define N_EXCEPTION_STACKS_END \
21 (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
23 static char x86_stack_ids
[][8] = {
24 [ DEBUG_STACK
-1 ] = "#DB",
25 [ NMI_STACK
-1 ] = "NMI",
26 [ DOUBLEFAULT_STACK
-1 ] = "#DF",
27 [ MCE_STACK
-1 ] = "#MC",
28 #if DEBUG_STKSZ > EXCEPTION_STKSZ
29 [ N_EXCEPTION_STACKS
...
30 N_EXCEPTION_STACKS_END
] = "#DB[?]"
34 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
35 unsigned *usedp
, char **idp
)
40 * Iterate over all exception stacks, and figure out whether
41 * 'stack' is in one of them:
43 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
44 unsigned long end
= per_cpu(orig_ist
, cpu
).ist
[k
];
46 * Is 'stack' above this exception frame's end?
47 * If yes then skip to the next frame.
52 * Is 'stack' above this exception frame's start address?
53 * If yes then we found the right frame.
55 if (stack
>= end
- EXCEPTION_STKSZ
) {
57 * Make sure we only iterate through an exception
58 * stack once. If it comes up for the second time
59 * then there's something wrong going on - just
60 * break out and return NULL:
62 if (*usedp
& (1U << k
))
65 *idp
= x86_stack_ids
[k
];
66 return (unsigned long *)end
;
69 * If this is a debug stack, and if it has a larger size than
70 * the usual exception stacks, then 'stack' might still
71 * be within the lower portion of the debug stack:
73 #if DEBUG_STKSZ > EXCEPTION_STKSZ
74 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
75 unsigned j
= N_EXCEPTION_STACKS
- 1;
78 * Black magic. A large debug stack is composed of
79 * multiple exception stack entries, which we
80 * iterate through now. Dont look:
84 end
-= EXCEPTION_STKSZ
;
85 x86_stack_ids
[j
][4] = '1' +
86 (j
- N_EXCEPTION_STACKS
);
87 } while (stack
< end
- EXCEPTION_STKSZ
);
88 if (*usedp
& (1U << j
))
91 *idp
= x86_stack_ids
[j
];
92 return (unsigned long *)end
;
100 in_irq_stack(unsigned long *stack
, unsigned long *irq_stack
,
101 unsigned long *irq_stack_end
)
103 return (stack
>= irq_stack
&& stack
< irq_stack_end
);
106 static const unsigned long irq_stack_size
=
107 (IRQ_STACK_SIZE
- 64) / sizeof(unsigned long);
116 static enum stack_type
117 analyze_stack(int cpu
, struct task_struct
*task
, unsigned long *stack
,
118 unsigned long **stack_end
, unsigned long *irq_stack
,
119 unsigned *used
, char **id
)
123 addr
= ((unsigned long)stack
& (~(THREAD_SIZE
- 1)));
124 if ((unsigned long)task_stack_page(task
) == addr
)
125 return STACK_IS_NORMAL
;
127 *stack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
130 return STACK_IS_EXCEPTION
;
133 return STACK_IS_NORMAL
;
135 *stack_end
= irq_stack
;
136 irq_stack
= irq_stack
- irq_stack_size
;
138 if (in_irq_stack(stack
, irq_stack
, *stack_end
))
141 return STACK_IS_UNKNOWN
;
145 * x86-64 can have up to three kernel stacks:
148 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
151 void dump_trace(struct task_struct
*task
, struct pt_regs
*regs
,
152 unsigned long *stack
, unsigned long bp
,
153 const struct stacktrace_ops
*ops
, void *data
)
155 const unsigned cpu
= get_cpu();
156 struct thread_info
*tinfo
;
157 unsigned long *irq_stack
= (unsigned long *)per_cpu(irq_stack_ptr
, cpu
);
168 stack
= (unsigned long *)regs
->sp
;
169 else if (task
!= current
)
170 stack
= (unsigned long *)task
->thread
.sp
;
176 bp
= stack_frame(task
, regs
);
178 * Print function call entries in all stacks, starting at the
179 * current stack address. If the stacks consist of nested
182 tinfo
= task_thread_info(task
);
184 unsigned long *stack_end
;
185 enum stack_type stype
;
188 stype
= analyze_stack(cpu
, task
, stack
, &stack_end
,
189 irq_stack
, &used
, &id
);
191 /* Default finish unless specified to continue */
196 /* Break out early if we are on the thread stack */
197 case STACK_IS_NORMAL
:
200 case STACK_IS_EXCEPTION
:
202 if (ops
->stack(data
, id
) < 0)
205 bp
= ops
->walk_stack(tinfo
, stack
, bp
, ops
,
206 data
, stack_end
, &graph
);
207 ops
->stack(data
, "<EOE>");
209 * We link to the next stack via the
210 * second-to-last pointer (index -2 to end) in the
213 stack
= (unsigned long *) stack_end
[-2];
219 if (ops
->stack(data
, "IRQ") < 0)
221 bp
= ops
->walk_stack(tinfo
, stack
, bp
,
222 ops
, data
, stack_end
, &graph
);
224 * We link to the next stack (which would be
225 * the process stack normally) the last
226 * pointer (index -1 to end) in the IRQ stack:
228 stack
= (unsigned long *) (stack_end
[-1]);
230 ops
->stack(data
, "EOI");
234 case STACK_IS_UNKNOWN
:
235 ops
->stack(data
, "UNK");
241 * This handles the process stack:
243 bp
= ops
->walk_stack(tinfo
, stack
, bp
, ops
, data
, NULL
, &graph
);
246 EXPORT_SYMBOL(dump_trace
);
249 show_stack_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
250 unsigned long *sp
, unsigned long bp
, char *log_lvl
)
252 unsigned long *irq_stack_end
;
253 unsigned long *irq_stack
;
254 unsigned long *stack
;
259 cpu
= smp_processor_id();
261 irq_stack_end
= (unsigned long *)(per_cpu(irq_stack_ptr
, cpu
));
262 irq_stack
= (unsigned long *)(per_cpu(irq_stack_ptr
, cpu
) - IRQ_STACK_SIZE
);
265 * Debugging aid: "show_stack(NULL, NULL);" prints the
266 * back trace for this cpu:
270 sp
= (unsigned long *)task
->thread
.sp
;
272 sp
= (unsigned long *)&sp
;
276 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
277 if (stack
>= irq_stack
&& stack
<= irq_stack_end
) {
278 if (stack
== irq_stack_end
) {
279 stack
= (unsigned long *) (irq_stack_end
[-1]);
283 if (kstack_end(stack
))
286 if ((i
% STACKSLOTS_PER_LINE
) == 0) {
289 printk("%s %016lx", log_lvl
, *stack
++);
291 pr_cont(" %016lx", *stack
++);
292 touch_nmi_watchdog();
297 show_trace_log_lvl(task
, regs
, sp
, bp
, log_lvl
);
300 void show_regs(struct pt_regs
*regs
)
306 show_regs_print_info(KERN_DEFAULT
);
307 __show_regs(regs
, 1);
310 * When in-kernel, we also print out the stack and code at the
311 * time of the fault..
313 if (!user_mode(regs
)) {
314 unsigned int code_prologue
= code_bytes
* 43 / 64;
315 unsigned int code_len
= code_bytes
;
319 printk(KERN_DEFAULT
"Stack:\n");
320 show_stack_log_lvl(NULL
, regs
, (unsigned long *)sp
,
323 printk(KERN_DEFAULT
"Code: ");
325 ip
= (u8
*)regs
->ip
- code_prologue
;
326 if (ip
< (u8
*)PAGE_OFFSET
|| probe_kernel_address(ip
, c
)) {
327 /* try starting at IP */
329 code_len
= code_len
- code_prologue
+ 1;
331 for (i
= 0; i
< code_len
; i
++, ip
++) {
332 if (ip
< (u8
*)PAGE_OFFSET
||
333 probe_kernel_address(ip
, c
)) {
334 pr_cont(" Bad RIP value.");
337 if (ip
== (u8
*)regs
->ip
)
338 pr_cont("<%02x> ", c
);
346 int is_valid_bugaddr(unsigned long ip
)
350 if (__copy_from_user(&ud2
, (const void __user
*) ip
, sizeof(ud2
)))
353 return ud2
== 0x0b0f;