Linux 4.1.18
[linux/fpc-iii.git] / arch / x86 / kernel / dumpstack_32.c
blob464ffd69b92e9ef376b9c534aec3c12973d6ad7a
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9 #include <linux/kdebug.h>
10 #include <linux/module.h>
11 #include <linux/ptrace.h>
12 #include <linux/kexec.h>
13 #include <linux/sysfs.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
17 #include <asm/stacktrace.h>
19 static void *is_irq_stack(void *p, void *irq)
21 if (p < irq || p >= (irq + THREAD_SIZE))
22 return NULL;
23 return irq + THREAD_SIZE;
27 static void *is_hardirq_stack(unsigned long *stack, int cpu)
29 void *irq = per_cpu(hardirq_stack, cpu);
31 return is_irq_stack(stack, irq);
34 static void *is_softirq_stack(unsigned long *stack, int cpu)
36 void *irq = per_cpu(softirq_stack, cpu);
38 return is_irq_stack(stack, irq);
41 void dump_trace(struct task_struct *task, struct pt_regs *regs,
42 unsigned long *stack, unsigned long bp,
43 const struct stacktrace_ops *ops, void *data)
45 const unsigned cpu = get_cpu();
46 int graph = 0;
47 u32 *prev_esp;
49 if (!task)
50 task = current;
52 if (!stack) {
53 unsigned long dummy;
55 stack = &dummy;
56 if (task != current)
57 stack = (unsigned long *)task->thread.sp;
60 if (!bp)
61 bp = stack_frame(task, regs);
63 for (;;) {
64 struct thread_info *context;
65 void *end_stack;
67 end_stack = is_hardirq_stack(stack, cpu);
68 if (!end_stack)
69 end_stack = is_softirq_stack(stack, cpu);
71 context = task_thread_info(task);
72 bp = ops->walk_stack(context, stack, bp, ops, data,
73 end_stack, &graph);
75 /* Stop if not on irq stack */
76 if (!end_stack)
77 break;
79 /* The previous esp is saved on the bottom of the stack */
80 prev_esp = (u32 *)(end_stack - THREAD_SIZE);
81 stack = (unsigned long *)*prev_esp;
82 if (!stack)
83 break;
85 if (ops->stack(data, "IRQ") < 0)
86 break;
87 touch_nmi_watchdog();
89 put_cpu();
91 EXPORT_SYMBOL(dump_trace);
93 void
94 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
95 unsigned long *sp, unsigned long bp, char *log_lvl)
97 unsigned long *stack;
98 int i;
100 if (sp == NULL) {
101 if (task)
102 sp = (unsigned long *)task->thread.sp;
103 else
104 sp = (unsigned long *)&sp;
107 stack = sp;
108 for (i = 0; i < kstack_depth_to_print; i++) {
109 if (kstack_end(stack))
110 break;
111 if ((i % STACKSLOTS_PER_LINE) == 0) {
112 if (i != 0)
113 pr_cont("\n");
114 printk("%s %08lx", log_lvl, *stack++);
115 } else
116 pr_cont(" %08lx", *stack++);
117 touch_nmi_watchdog();
119 pr_cont("\n");
120 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
124 void show_regs(struct pt_regs *regs)
126 int i;
128 show_regs_print_info(KERN_EMERG);
129 __show_regs(regs, !user_mode(regs));
132 * When in-kernel, we also print out the stack and code at the
133 * time of the fault..
135 if (!user_mode(regs)) {
136 unsigned int code_prologue = code_bytes * 43 / 64;
137 unsigned int code_len = code_bytes;
138 unsigned char c;
139 u8 *ip;
141 pr_emerg("Stack:\n");
142 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
144 pr_emerg("Code:");
146 ip = (u8 *)regs->ip - code_prologue;
147 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
148 /* try starting at IP */
149 ip = (u8 *)regs->ip;
150 code_len = code_len - code_prologue + 1;
152 for (i = 0; i < code_len; i++, ip++) {
153 if (ip < (u8 *)PAGE_OFFSET ||
154 probe_kernel_address(ip, c)) {
155 pr_cont(" Bad EIP value.");
156 break;
158 if (ip == (u8 *)regs->ip)
159 pr_cont(" <%02x>", c);
160 else
161 pr_cont(" %02x", c);
164 pr_cont("\n");
167 int is_valid_bugaddr(unsigned long ip)
169 unsigned short ud2;
171 if (ip < PAGE_OFFSET)
172 return 0;
173 if (probe_kernel_address((unsigned short *)ip, ud2))
174 return 0;
176 return ud2 == 0x0b0f;