irqchip/s3c24xx: Mark init_eint as __maybe_unused
[linux/fpc-iii.git] / arch / tile / kernel / stack.c
blob402b9c85a894dc4b10982462178dd08529bca49d
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <linux/dcache.h>
25 #include <linux/fs.h>
26 #include <linux/hardirq.h>
27 #include <linux/string.h>
28 #include <asm/backtrace.h>
29 #include <asm/page.h>
30 #include <asm/ucontext.h>
31 #include <asm/switch_to.h>
32 #include <asm/sigframe.h>
33 #include <asm/stack.h>
34 #include <asm/vdso.h>
35 #include <arch/abi.h>
36 #include <arch/interrupts.h>
38 #define KBT_ONGOING 0 /* Backtrace still ongoing */
39 #define KBT_DONE 1 /* Backtrace cleanly completed */
40 #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
41 #define KBT_LOOP 3 /* Backtrace entered a loop */
43 /* Is address on the specified kernel stack? */
44 static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
46 ulong kstack_base = (ulong) kbt->task->stack;
47 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
48 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
49 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
52 /* Callback for backtracer; basically a glorified memcpy */
53 static bool read_memory_func(void *result, unsigned long address,
54 unsigned int size, void *vkbt)
56 int retval;
57 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
59 if (address == 0)
60 return 0;
61 if (__kernel_text_address(address)) {
62 /* OK to read kernel code. */
63 } else if (address >= PAGE_OFFSET) {
64 /* We only tolerate kernel-space reads of this task's stack */
65 if (!in_kernel_stack(kbt, address))
66 return 0;
67 } else if (!kbt->is_current) {
68 return 0; /* can't read from other user address spaces */
70 pagefault_disable();
71 retval = __copy_from_user_inatomic(result,
72 (void __user __force *)address,
73 size);
74 pagefault_enable();
75 return (retval == 0);
78 /* Return a pt_regs pointer for a valid fault handler frame */
79 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
81 const char *fault = NULL; /* happy compiler */
82 char fault_buf[64];
83 unsigned long sp = kbt->it.sp;
84 struct pt_regs *p;
86 if (sp % sizeof(long) != 0)
87 return NULL;
88 if (!in_kernel_stack(kbt, sp))
89 return NULL;
90 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
91 return NULL;
92 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
93 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
94 fault = "syscall";
95 else {
96 if (kbt->verbose) { /* else we aren't going to use it */
97 snprintf(fault_buf, sizeof(fault_buf),
98 "interrupt %ld", p->faultnum);
99 fault = fault_buf;
102 if (EX1_PL(p->ex1) == KERNEL_PL &&
103 __kernel_text_address(p->pc) &&
104 in_kernel_stack(kbt, p->sp) &&
105 p->sp >= sp) {
106 if (kbt->verbose)
107 pr_err(" <%s while in kernel mode>\n", fault);
108 } else if (user_mode(p) &&
109 p->sp < PAGE_OFFSET && p->sp != 0) {
110 if (kbt->verbose)
111 pr_err(" <%s while in user mode>\n", fault);
112 } else {
113 if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
114 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
115 p->pc, p->sp, p->ex1);
116 return NULL;
118 if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
119 return NULL;
120 return p;
123 /* Is the iterator pointing to a sigreturn trampoline? */
124 static int is_sigreturn(struct KBacktraceIterator *kbt)
126 return kbt->task->mm &&
127 (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
128 (ulong)&__vdso_rt_sigreturn));
131 /* Return a pt_regs pointer for a valid signal handler frame */
132 static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
133 struct rt_sigframe* kframe)
135 BacktraceIterator *b = &kbt->it;
137 if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
138 b->sp % sizeof(long) == 0) {
139 int retval;
140 pagefault_disable();
141 retval = __copy_from_user_inatomic(
142 kframe, (void __user __force *)b->sp,
143 sizeof(*kframe));
144 pagefault_enable();
145 if (retval != 0 ||
146 (unsigned int)(kframe->info.si_signo) >= _NSIG)
147 return NULL;
148 if (kbt->verbose) {
149 pr_err(" <received signal %d>\n",
150 kframe->info.si_signo);
152 return (struct pt_regs *)&kframe->uc.uc_mcontext;
154 return NULL;
157 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
159 struct pt_regs *p;
160 struct rt_sigframe kframe;
162 p = valid_fault_handler(kbt);
163 if (p == NULL)
164 p = valid_sigframe(kbt, &kframe);
165 if (p == NULL)
166 return 0;
167 backtrace_init(&kbt->it, read_memory_func, kbt,
168 p->pc, p->lr, p->sp, p->regs[52]);
169 kbt->new_context = 1;
170 return 1;
173 /* Find a frame that isn't a sigreturn, if there is one. */
174 static int KBacktraceIterator_next_item_inclusive(
175 struct KBacktraceIterator *kbt)
177 for (;;) {
178 do {
179 if (!is_sigreturn(kbt))
180 return KBT_ONGOING;
181 } while (backtrace_next(&kbt->it));
183 if (!KBacktraceIterator_restart(kbt))
184 return KBT_DONE;
189 * If the current sp is on a page different than what we recorded
190 * as the top-of-kernel-stack last time we context switched, we have
191 * probably blown the stack, and nothing is going to work out well.
192 * If we can at least get out a warning, that may help the debug,
193 * though we probably won't be able to backtrace into the code that
194 * actually did the recursive damage.
196 static void validate_stack(struct pt_regs *regs)
198 int cpu = raw_smp_processor_id();
199 unsigned long ksp0 = get_current_ksp0();
200 unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
201 unsigned long sp = stack_pointer;
203 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
204 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
205 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
206 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
209 else if (sp < ksp0_base + sizeof(struct thread_info)) {
210 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
211 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
212 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
216 void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
217 struct task_struct *t, struct pt_regs *regs)
219 unsigned long pc, lr, sp, r52;
220 int is_current;
223 * Set up callback information. We grab the kernel stack base
224 * so we will allow reads of that address range.
226 is_current = (t == NULL || t == current);
227 kbt->is_current = is_current;
228 if (is_current)
229 t = validate_current();
230 kbt->task = t;
231 kbt->verbose = 0; /* override in caller if desired */
232 kbt->profile = 0; /* override in caller if desired */
233 kbt->end = KBT_ONGOING;
234 kbt->new_context = 1;
235 if (is_current)
236 validate_stack(regs);
238 if (regs == NULL) {
239 if (is_current || t->state == TASK_RUNNING) {
240 /* Can't do this; we need registers */
241 kbt->end = KBT_RUNNING;
242 return;
244 pc = get_switch_to_pc();
245 lr = t->thread.pc;
246 sp = t->thread.ksp;
247 r52 = 0;
248 } else {
249 pc = regs->pc;
250 lr = regs->lr;
251 sp = regs->sp;
252 r52 = regs->regs[52];
255 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
256 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
258 EXPORT_SYMBOL(KBacktraceIterator_init);
260 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
262 return kbt->end != KBT_ONGOING;
264 EXPORT_SYMBOL(KBacktraceIterator_end);
266 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
268 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
269 kbt->new_context = 0;
270 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
271 kbt->end = KBT_DONE;
272 return;
274 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
275 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
276 /* Trapped in a loop; give up. */
277 kbt->end = KBT_LOOP;
280 EXPORT_SYMBOL(KBacktraceIterator_next);
282 static void describe_addr(struct KBacktraceIterator *kbt,
283 unsigned long address,
284 int have_mmap_sem, char *buf, size_t bufsize)
286 struct vm_area_struct *vma;
287 size_t namelen, remaining;
288 unsigned long size, offset, adjust;
289 char *p, *modname;
290 const char *name;
291 int rc;
294 * Look one byte back for every caller frame (i.e. those that
295 * aren't a new context) so we look up symbol data for the
296 * call itself, not the following instruction, which may be on
297 * a different line (or in a different function).
299 adjust = !kbt->new_context;
300 address -= adjust;
302 if (address >= PAGE_OFFSET) {
303 /* Handle kernel symbols. */
304 BUG_ON(bufsize < KSYM_NAME_LEN);
305 name = kallsyms_lookup(address, &size, &offset,
306 &modname, buf);
307 if (name == NULL) {
308 buf[0] = '\0';
309 return;
311 namelen = strlen(buf);
312 remaining = (bufsize - 1) - namelen;
313 p = buf + namelen;
314 rc = snprintf(p, remaining, "+%#lx/%#lx ",
315 offset + adjust, size);
316 if (modname && rc < remaining)
317 snprintf(p + rc, remaining - rc, "[%s] ", modname);
318 buf[bufsize-1] = '\0';
319 return;
322 /* If we don't have the mmap_sem, we can't show any more info. */
323 buf[0] = '\0';
324 if (!have_mmap_sem)
325 return;
327 /* Find vma info. */
328 vma = find_vma(kbt->task->mm, address);
329 if (vma == NULL || address < vma->vm_start) {
330 snprintf(buf, bufsize, "[unmapped address] ");
331 return;
334 if (vma->vm_file) {
335 p = file_path(vma->vm_file, buf, bufsize);
336 if (IS_ERR(p))
337 p = "?";
338 name = kbasename(p);
339 } else {
340 name = "anon";
343 /* Generate a string description of the vma info. */
344 namelen = strlen(name);
345 remaining = (bufsize - 1) - namelen;
346 memmove(buf, name, namelen);
347 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
348 vma->vm_start, vma->vm_end - vma->vm_start);
352 * Avoid possible crash recursion during backtrace. If it happens, it
353 * makes it easy to lose the actual root cause of the failure, so we
354 * put a simple guard on all the backtrace loops.
356 static bool start_backtrace(void)
358 if (current_thread_info()->in_backtrace) {
359 pr_err("Backtrace requested while in backtrace!\n");
360 return false;
362 current_thread_info()->in_backtrace = true;
363 return true;
366 static void end_backtrace(void)
368 current_thread_info()->in_backtrace = false;
372 * This method wraps the backtracer's more generic support.
373 * It is only invoked from the architecture-specific code; show_stack()
374 * and dump_stack() are architecture-independent entry points.
376 void tile_show_stack(struct KBacktraceIterator *kbt)
378 int i;
379 int have_mmap_sem = 0;
381 if (!start_backtrace())
382 return;
383 kbt->verbose = 1;
384 i = 0;
385 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
386 char namebuf[KSYM_NAME_LEN+100];
387 unsigned long address = kbt->it.pc;
390 * Try to acquire the mmap_sem as we pass into userspace.
391 * If we're in an interrupt context, don't even try, since
392 * it's not safe to call e.g. d_path() from an interrupt,
393 * since it uses spin locks without disabling interrupts.
394 * Note we test "kbt->task == current", not "kbt->is_current",
395 * since we're checking that "current" will work in d_path().
397 if (kbt->task == current && address < PAGE_OFFSET &&
398 !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
399 have_mmap_sem =
400 down_read_trylock(&kbt->task->mm->mmap_sem);
403 describe_addr(kbt, address, have_mmap_sem,
404 namebuf, sizeof(namebuf));
406 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
407 i++, address, namebuf, (unsigned long)(kbt->it.sp));
409 if (i >= 100) {
410 pr_err("Stack dump truncated (%d frames)\n", i);
411 break;
414 if (kbt->end == KBT_LOOP)
415 pr_err("Stack dump stopped; next frame identical to this one\n");
416 if (have_mmap_sem)
417 up_read(&kbt->task->mm->mmap_sem);
418 end_backtrace();
420 EXPORT_SYMBOL(tile_show_stack);
422 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
423 ulong pc, ulong lr, ulong sp, ulong r52)
425 memset(regs, 0, sizeof(struct pt_regs));
426 regs->pc = pc;
427 regs->lr = lr;
428 regs->sp = sp;
429 regs->regs[52] = r52;
430 return regs;
433 /* Deprecated function currently only used by kernel_double_fault(). */
434 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
436 struct KBacktraceIterator kbt;
437 struct pt_regs regs;
439 regs_to_pt_regs(&regs, pc, lr, sp, r52);
440 KBacktraceIterator_init(&kbt, NULL, &regs);
441 tile_show_stack(&kbt);
444 /* This is called from KBacktraceIterator_init_current() */
445 void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
446 ulong lr, ulong sp, ulong r52)
448 struct pt_regs regs;
449 KBacktraceIterator_init(kbt, NULL,
450 regs_to_pt_regs(&regs, pc, lr, sp, r52));
454 * Called from sched_show_task() with task != NULL, or dump_stack()
455 * with task == NULL. The esp argument is always NULL.
457 void show_stack(struct task_struct *task, unsigned long *esp)
459 struct KBacktraceIterator kbt;
460 if (task == NULL || task == current) {
461 KBacktraceIterator_init_current(&kbt);
462 KBacktraceIterator_next(&kbt); /* don't show first frame */
463 } else {
464 KBacktraceIterator_init(&kbt, task, NULL);
466 tile_show_stack(&kbt);
469 #ifdef CONFIG_STACKTRACE
471 /* Support generic Linux stack API too */
473 static void save_stack_trace_common(struct task_struct *task,
474 struct pt_regs *regs,
475 bool user,
476 struct stack_trace *trace)
478 struct KBacktraceIterator kbt;
479 int skip = trace->skip;
480 int i = 0;
482 if (!start_backtrace())
483 goto done;
484 if (regs != NULL) {
485 KBacktraceIterator_init(&kbt, NULL, regs);
486 } else if (task == NULL || task == current) {
487 KBacktraceIterator_init_current(&kbt);
488 skip++; /* don't show KBacktraceIterator_init_current */
489 } else {
490 KBacktraceIterator_init(&kbt, task, NULL);
492 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
493 if (skip) {
494 --skip;
495 continue;
497 if (i >= trace->max_entries ||
498 (!user && kbt.it.pc < PAGE_OFFSET))
499 break;
500 trace->entries[i++] = kbt.it.pc;
502 end_backtrace();
503 done:
504 if (i < trace->max_entries)
505 trace->entries[i++] = ULONG_MAX;
506 trace->nr_entries = i;
509 void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
511 save_stack_trace_common(task, NULL, false, trace);
513 EXPORT_SYMBOL(save_stack_trace_tsk);
515 void save_stack_trace(struct stack_trace *trace)
517 save_stack_trace_common(NULL, NULL, false, trace);
519 EXPORT_SYMBOL_GPL(save_stack_trace);
521 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
523 save_stack_trace_common(NULL, regs, false, trace);
526 void save_stack_trace_user(struct stack_trace *trace)
528 /* Trace user stack if we are not a kernel thread. */
529 if (current->mm)
530 save_stack_trace_common(NULL, task_pt_regs(current),
531 true, trace);
532 else if (trace->nr_entries < trace->max_entries)
533 trace->entries[trace->nr_entries++] = ULONG_MAX;
535 #endif
537 /* In entry.S */
538 EXPORT_SYMBOL(KBacktraceIterator_init_current);