msm: scm: Mark inline asm as volatile
[linux/fpc-iii.git] / arch / tile / kernel / stack.c
blob0d54106be3d66b33f089cc499c0b33d82a3635b6
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <asm/backtrace.h>
25 #include <asm/page.h>
26 #include <asm/tlbflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/sigframe.h>
29 #include <asm/stack.h>
30 #include <arch/abi.h>
31 #include <arch/interrupts.h>
33 #define KBT_ONGOING 0 /* Backtrace still ongoing */
34 #define KBT_DONE 1 /* Backtrace cleanly completed */
35 #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36 #define KBT_LOOP 3 /* Backtrace entered a loop */
38 /* Is address on the specified kernel stack? */
39 static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
41 ulong kstack_base = (ulong) kbt->task->stack;
42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
43 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
47 /* Is address in the specified kernel code? */
48 static int in_kernel_text(VirtualAddress address)
50 return (address >= MEM_SV_INTRPT &&
51 address < MEM_SV_INTRPT + HPAGE_SIZE);
54 /* Is address valid for reading? */
55 static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
57 HV_PTE *l1_pgtable = kbt->pgtable;
58 HV_PTE *l2_pgtable;
59 unsigned long pfn;
60 HV_PTE pte;
61 struct page *page;
63 if (l1_pgtable == NULL)
64 return 0; /* can't read user space in other tasks */
66 pte = l1_pgtable[HV_L1_INDEX(address)];
67 if (!hv_pte_get_present(pte))
68 return 0;
69 pfn = hv_pte_get_pfn(pte);
70 if (pte_huge(pte)) {
71 if (!pfn_valid(pfn)) {
72 pr_err("huge page has bad pfn %#lx\n", pfn);
73 return 0;
75 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
78 page = pfn_to_page(pfn);
79 if (PageHighMem(page)) {
80 pr_err("L2 page table not in LOWMEM (%#llx)\n",
81 HV_PFN_TO_CPA(pfn));
82 return 0;
84 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
85 pte = l2_pgtable[HV_L2_INDEX(address)];
86 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
89 /* Callback for backtracer; basically a glorified memcpy */
90 static bool read_memory_func(void *result, VirtualAddress address,
91 unsigned int size, void *vkbt)
93 int retval;
94 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
95 if (in_kernel_text(address)) {
96 /* OK to read kernel code. */
97 } else if (address >= PAGE_OFFSET) {
98 /* We only tolerate kernel-space reads of this task's stack */
99 if (!in_kernel_stack(kbt, address))
100 return 0;
101 } else if (!valid_address(kbt, address)) {
102 return 0; /* invalid user-space address */
104 pagefault_disable();
105 retval = __copy_from_user_inatomic(result,
106 (void __user __force *)address,
107 size);
108 pagefault_enable();
109 return (retval == 0);
112 /* Return a pt_regs pointer for a valid fault handler frame */
113 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
115 const char *fault = NULL; /* happy compiler */
116 char fault_buf[64];
117 VirtualAddress sp = kbt->it.sp;
118 struct pt_regs *p;
120 if (!in_kernel_stack(kbt, sp))
121 return NULL;
122 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
123 return NULL;
124 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
125 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
126 fault = "syscall";
127 else {
128 if (kbt->verbose) { /* else we aren't going to use it */
129 snprintf(fault_buf, sizeof(fault_buf),
130 "interrupt %ld", p->faultnum);
131 fault = fault_buf;
134 if (EX1_PL(p->ex1) == KERNEL_PL &&
135 in_kernel_text(p->pc) &&
136 in_kernel_stack(kbt, p->sp) &&
137 p->sp >= sp) {
138 if (kbt->verbose)
139 pr_err(" <%s while in kernel mode>\n", fault);
140 } else if (EX1_PL(p->ex1) == USER_PL &&
141 p->pc < PAGE_OFFSET &&
142 p->sp < PAGE_OFFSET) {
143 if (kbt->verbose)
144 pr_err(" <%s while in user mode>\n", fault);
145 } else if (kbt->verbose) {
146 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
147 p->pc, p->sp, p->ex1);
148 p = NULL;
150 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
151 return p;
152 return NULL;
155 /* Is the pc pointing to a sigreturn trampoline? */
156 static int is_sigreturn(VirtualAddress pc)
158 return (pc == VDSO_BASE);
161 /* Return a pt_regs pointer for a valid signal handler frame */
162 static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
164 BacktraceIterator *b = &kbt->it;
166 if (b->pc == VDSO_BASE) {
167 struct rt_sigframe *frame;
168 unsigned long sigframe_top =
169 b->sp + sizeof(struct rt_sigframe) - 1;
170 if (!valid_address(kbt, b->sp) ||
171 !valid_address(kbt, sigframe_top)) {
172 if (kbt->verbose)
173 pr_err(" (odd signal: sp %#lx?)\n",
174 (unsigned long)(b->sp));
175 return NULL;
177 frame = (struct rt_sigframe *)b->sp;
178 if (kbt->verbose) {
179 pr_err(" <received signal %d>\n",
180 frame->info.si_signo);
182 return (struct pt_regs *)&frame->uc.uc_mcontext;
184 return NULL;
187 static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
189 return is_sigreturn(kbt->it.pc);
192 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
194 struct pt_regs *p;
196 p = valid_fault_handler(kbt);
197 if (p == NULL)
198 p = valid_sigframe(kbt);
199 if (p == NULL)
200 return 0;
201 backtrace_init(&kbt->it, read_memory_func, kbt,
202 p->pc, p->lr, p->sp, p->regs[52]);
203 kbt->new_context = 1;
204 return 1;
207 /* Find a frame that isn't a sigreturn, if there is one. */
208 static int KBacktraceIterator_next_item_inclusive(
209 struct KBacktraceIterator *kbt)
211 for (;;) {
212 do {
213 if (!KBacktraceIterator_is_sigreturn(kbt))
214 return KBT_ONGOING;
215 } while (backtrace_next(&kbt->it));
217 if (!KBacktraceIterator_restart(kbt))
218 return KBT_DONE;
223 * If the current sp is on a page different than what we recorded
224 * as the top-of-kernel-stack last time we context switched, we have
225 * probably blown the stack, and nothing is going to work out well.
226 * If we can at least get out a warning, that may help the debug,
227 * though we probably won't be able to backtrace into the code that
228 * actually did the recursive damage.
230 static void validate_stack(struct pt_regs *regs)
232 int cpu = smp_processor_id();
233 unsigned long ksp0 = get_current_ksp0();
234 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
235 unsigned long sp = stack_pointer;
237 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
238 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
239 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
240 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
243 else if (sp < ksp0_base + sizeof(struct thread_info)) {
244 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
245 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
246 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
250 void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
251 struct task_struct *t, struct pt_regs *regs)
253 VirtualAddress pc, lr, sp, r52;
254 int is_current;
257 * Set up callback information. We grab the kernel stack base
258 * so we will allow reads of that address range, and if we're
259 * asking about the current process we grab the page table
260 * so we can check user accesses before trying to read them.
261 * We flush the TLB to avoid any weird skew issues.
263 is_current = (t == NULL);
264 kbt->is_current = is_current;
265 if (is_current)
266 t = validate_current();
267 kbt->task = t;
268 kbt->pgtable = NULL;
269 kbt->verbose = 0; /* override in caller if desired */
270 kbt->profile = 0; /* override in caller if desired */
271 kbt->end = KBT_ONGOING;
272 kbt->new_context = 0;
273 if (is_current) {
274 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
275 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
277 * Not just an optimization: this also allows
278 * this to work at all before va/pa mappings
279 * are set up.
281 kbt->pgtable = swapper_pg_dir;
282 } else {
283 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
284 if (!PageHighMem(page))
285 kbt->pgtable = __va(pgdir_pa);
286 else
287 pr_err("page table not in LOWMEM"
288 " (%#llx)\n", pgdir_pa);
290 local_flush_tlb_all();
291 validate_stack(regs);
294 if (regs == NULL) {
295 if (is_current || t->state == TASK_RUNNING) {
296 /* Can't do this; we need registers */
297 kbt->end = KBT_RUNNING;
298 return;
300 pc = get_switch_to_pc();
301 lr = t->thread.pc;
302 sp = t->thread.ksp;
303 r52 = 0;
304 } else {
305 pc = regs->pc;
306 lr = regs->lr;
307 sp = regs->sp;
308 r52 = regs->regs[52];
311 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
312 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
314 EXPORT_SYMBOL(KBacktraceIterator_init);
316 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
318 return kbt->end != KBT_ONGOING;
320 EXPORT_SYMBOL(KBacktraceIterator_end);
322 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
324 VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
325 kbt->new_context = 0;
326 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
327 kbt->end = KBT_DONE;
328 return;
330 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
331 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
332 /* Trapped in a loop; give up. */
333 kbt->end = KBT_LOOP;
336 EXPORT_SYMBOL(KBacktraceIterator_next);
339 * This method wraps the backtracer's more generic support.
340 * It is only invoked from the architecture-specific code; show_stack()
341 * and dump_stack() (in entry.S) are architecture-independent entry points.
343 void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
345 int i;
347 if (headers) {
349 * Add a blank line since if we are called from panic(),
350 * then bust_spinlocks() spit out a space in front of us
351 * and it will mess up our KERN_ERR.
353 pr_err("\n");
354 pr_err("Starting stack dump of tid %d, pid %d (%s)"
355 " on cpu %d at cycle %lld\n",
356 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
357 smp_processor_id(), get_cycles());
359 kbt->verbose = 1;
360 i = 0;
361 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
362 char *modname;
363 const char *name;
364 unsigned long address = kbt->it.pc;
365 unsigned long offset, size;
366 char namebuf[KSYM_NAME_LEN+100];
368 if (address >= PAGE_OFFSET)
369 name = kallsyms_lookup(address, &size, &offset,
370 &modname, namebuf);
371 else
372 name = NULL;
374 if (!name)
375 namebuf[0] = '\0';
376 else {
377 size_t namelen = strlen(namebuf);
378 size_t remaining = (sizeof(namebuf) - 1) - namelen;
379 char *p = namebuf + namelen;
380 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
381 offset, size);
382 if (modname && rc < remaining)
383 snprintf(p + rc, remaining - rc,
384 "[%s] ", modname);
385 namebuf[sizeof(namebuf)-1] = '\0';
388 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
389 i++, address, namebuf, (unsigned long)(kbt->it.sp));
391 if (i >= 100) {
392 pr_err("Stack dump truncated"
393 " (%d frames)\n", i);
394 break;
397 if (kbt->end == KBT_LOOP)
398 pr_err("Stack dump stopped; next frame identical to this one\n");
399 if (headers)
400 pr_err("Stack dump complete\n");
402 EXPORT_SYMBOL(tile_show_stack);
405 /* This is called from show_regs() and _dump_stack() */
406 void dump_stack_regs(struct pt_regs *regs)
408 struct KBacktraceIterator kbt;
409 KBacktraceIterator_init(&kbt, NULL, regs);
410 tile_show_stack(&kbt, 1);
412 EXPORT_SYMBOL(dump_stack_regs);
414 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
415 ulong pc, ulong lr, ulong sp, ulong r52)
417 memset(regs, 0, sizeof(struct pt_regs));
418 regs->pc = pc;
419 regs->lr = lr;
420 regs->sp = sp;
421 regs->regs[52] = r52;
422 return regs;
425 /* This is called from dump_stack() and just converts to pt_regs */
426 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
428 struct pt_regs regs;
429 dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
432 /* This is called from KBacktraceIterator_init_current() */
433 void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
434 ulong lr, ulong sp, ulong r52)
436 struct pt_regs regs;
437 KBacktraceIterator_init(kbt, NULL,
438 regs_to_pt_regs(&regs, pc, lr, sp, r52));
441 /* This is called only from kernel/sched.c, with esp == NULL */
442 void show_stack(struct task_struct *task, unsigned long *esp)
444 struct KBacktraceIterator kbt;
445 if (task == NULL || task == current)
446 KBacktraceIterator_init_current(&kbt);
447 else
448 KBacktraceIterator_init(&kbt, task, NULL);
449 tile_show_stack(&kbt, 0);
452 #ifdef CONFIG_STACKTRACE
454 /* Support generic Linux stack API too */
456 void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
458 struct KBacktraceIterator kbt;
459 int skip = trace->skip;
460 int i = 0;
462 if (task == NULL || task == current)
463 KBacktraceIterator_init_current(&kbt);
464 else
465 KBacktraceIterator_init(&kbt, task, NULL);
466 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
467 if (skip) {
468 --skip;
469 continue;
471 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
472 break;
473 trace->entries[i++] = kbt.it.pc;
475 trace->nr_entries = i;
477 EXPORT_SYMBOL(save_stack_trace_tsk);
479 void save_stack_trace(struct stack_trace *trace)
481 save_stack_trace_tsk(NULL, trace);
484 #endif
486 /* In entry.S */
487 EXPORT_SYMBOL(KBacktraceIterator_init_current);