[NETFILTER]: PPTP conntrack: simplify expectation handling
[hh.org.git] / arch / x86_64 / kernel / stacktrace.c
blob32cf55eb9af87b33259afe4537504b958ebe6b53
1 /*
2 * arch/x86_64/kernel/stacktrace.c
4 * Stack trace management functions
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8 #include <linux/sched.h>
9 #include <linux/stacktrace.h>
11 #include <asm/smp.h>
13 static inline int
14 in_range(unsigned long start, unsigned long addr, unsigned long end)
16 return addr >= start && addr <= end;
19 static unsigned long
20 get_stack_end(struct task_struct *task, unsigned long stack)
22 unsigned long stack_start, stack_end, flags;
23 int i, cpu;
26 * The most common case is that we are in the task stack:
28 stack_start = (unsigned long)task->thread_info;
29 stack_end = stack_start + THREAD_SIZE;
31 if (in_range(stack_start, stack, stack_end))
32 return stack_end;
35 * We are in an interrupt if irqstackptr is set:
37 raw_local_irq_save(flags);
38 cpu = safe_smp_processor_id();
39 stack_end = (unsigned long)cpu_pda(cpu)->irqstackptr;
41 if (stack_end) {
42 stack_start = stack_end & ~(IRQSTACKSIZE-1);
43 if (in_range(stack_start, stack, stack_end))
44 goto out_restore;
46 * We get here if we are in an IRQ context but we
47 * are also in an exception stack.
52 * Iterate over all exception stacks, and figure out whether
53 * 'stack' is in one of them:
55 for (i = 0; i < N_EXCEPTION_STACKS; i++) {
57 * set 'end' to the end of the exception stack.
59 stack_end = per_cpu(init_tss, cpu).ist[i];
60 stack_start = stack_end - EXCEPTION_STKSZ;
63 * Is 'stack' above this exception frame's end?
64 * If yes then skip to the next frame.
66 if (stack >= stack_end)
67 continue;
69 * Is 'stack' above this exception frame's start address?
70 * If yes then we found the right frame.
72 if (stack >= stack_start)
73 goto out_restore;
76 * If this is a debug stack, and if it has a larger size than
77 * the usual exception stacks, then 'stack' might still
78 * be within the lower portion of the debug stack:
80 #if DEBUG_STKSZ > EXCEPTION_STKSZ
81 if (i == DEBUG_STACK - 1 && stack >= stack_end - DEBUG_STKSZ) {
83 * Black magic. A large debug stack is composed of
84 * multiple exception stack entries, which we
85 * iterate through now. Dont look:
87 do {
88 stack_end -= EXCEPTION_STKSZ;
89 stack_start -= EXCEPTION_STKSZ;
90 } while (stack < stack_start);
92 goto out_restore;
94 #endif
97 * Ok, 'stack' is not pointing to any of the system stacks.
99 stack_end = 0;
101 out_restore:
102 raw_local_irq_restore(flags);
104 return stack_end;
109 * Save stack-backtrace addresses into a stack_trace buffer:
111 static inline unsigned long
112 save_context_stack(struct stack_trace *trace, unsigned int skip,
113 unsigned long stack, unsigned long stack_end)
115 unsigned long addr;
117 #ifdef CONFIG_FRAME_POINTER
118 unsigned long prev_stack = 0;
120 while (in_range(prev_stack, stack, stack_end)) {
121 pr_debug("stack: %p\n", (void *)stack);
122 addr = (unsigned long)(((unsigned long *)stack)[1]);
123 pr_debug("addr: %p\n", (void *)addr);
124 if (!skip)
125 trace->entries[trace->nr_entries++] = addr-1;
126 else
127 skip--;
128 if (trace->nr_entries >= trace->max_entries)
129 break;
130 if (!addr)
131 return 0;
133 * Stack frames must go forwards (otherwise a loop could
134 * happen if the stackframe is corrupted), so we move
135 * prev_stack forwards:
137 prev_stack = stack;
138 stack = (unsigned long)(((unsigned long *)stack)[0]);
140 pr_debug("invalid: %p\n", (void *)stack);
141 #else
142 while (stack < stack_end) {
143 addr = ((unsigned long *)stack)[0];
144 stack += sizeof(long);
145 if (__kernel_text_address(addr)) {
146 if (!skip)
147 trace->entries[trace->nr_entries++] = addr-1;
148 else
149 skip--;
150 if (trace->nr_entries >= trace->max_entries)
151 break;
154 #endif
155 return stack;
158 #define MAX_STACKS 10
161 * Save stack-backtrace addresses into a stack_trace buffer.
162 * If all_contexts is set, all contexts (hardirq, softirq and process)
163 * are saved. If not set then only the current context is saved.
165 void save_stack_trace(struct stack_trace *trace,
166 struct task_struct *task, int all_contexts,
167 unsigned int skip)
169 unsigned long stack = (unsigned long)&stack;
170 int i, nr_stacks = 0, stacks_done[MAX_STACKS];
172 WARN_ON(trace->nr_entries || !trace->max_entries);
174 if (!task)
175 task = current;
177 pr_debug("task: %p, ti: %p\n", task, task->thread_info);
179 if (!task || task == current) {
180 /* Grab rbp right from our regs: */
181 asm ("mov %%rbp, %0" : "=r" (stack));
182 pr_debug("rbp: %p\n", (void *)stack);
183 } else {
184 /* rbp is the last reg pushed by switch_to(): */
185 stack = task->thread.rsp;
186 pr_debug("other task rsp: %p\n", (void *)stack);
187 stack = (unsigned long)(((unsigned long *)stack)[0]);
188 pr_debug("other task rbp: %p\n", (void *)stack);
191 while (1) {
192 unsigned long stack_end = get_stack_end(task, stack);
194 pr_debug("stack: %p\n", (void *)stack);
195 pr_debug("stack end: %p\n", (void *)stack_end);
198 * Invalid stack addres?
200 if (!stack_end)
201 return;
203 * Were we in this stack already? (recursion)
205 for (i = 0; i < nr_stacks; i++)
206 if (stacks_done[i] == stack_end)
207 return;
208 stacks_done[nr_stacks] = stack_end;
210 stack = save_context_stack(trace, skip, stack, stack_end);
211 if (!all_contexts || !stack ||
212 trace->nr_entries >= trace->max_entries)
213 return;
214 trace->entries[trace->nr_entries++] = ULONG_MAX;
215 if (trace->nr_entries >= trace->max_entries)
216 return;
217 if (++nr_stacks >= MAX_STACKS)
218 return;