2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <asm/backtrace.h>
26 #include <asm/tlbflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/sigframe.h>
29 #include <asm/stack.h>
31 #include <arch/interrupts.h>
33 #define KBT_ONGOING 0 /* Backtrace still ongoing */
34 #define KBT_DONE 1 /* Backtrace cleanly completed */
35 #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36 #define KBT_LOOP 3 /* Backtrace entered a loop */
38 /* Is address on the specified kernel stack? */
39 static int in_kernel_stack(struct KBacktraceIterator
*kbt
, unsigned long sp
)
41 ulong kstack_base
= (ulong
) kbt
->task
->stack
;
42 if (kstack_base
== 0) /* corrupt task pointer; just follow stack... */
43 return sp
>= PAGE_OFFSET
&& sp
< (unsigned long)high_memory
;
44 return sp
>= kstack_base
&& sp
< kstack_base
+ THREAD_SIZE
;
47 /* Is address valid for reading? */
48 static int valid_address(struct KBacktraceIterator
*kbt
, unsigned long address
)
50 HV_PTE
*l1_pgtable
= kbt
->pgtable
;
56 if (l1_pgtable
== NULL
)
57 return 0; /* can't read user space in other tasks */
60 /* Find the real l1_pgtable by looking in the l0_pgtable. */
61 pte
= l1_pgtable
[HV_L0_INDEX(address
)];
62 if (!hv_pte_get_present(pte
))
64 pfn
= hv_pte_get_pfn(pte
);
66 if (!pfn_valid(pfn
)) {
67 pr_err("L0 huge page has bad pfn %#lx\n", pfn
);
70 return hv_pte_get_present(pte
) && hv_pte_get_readable(pte
);
72 page
= pfn_to_page(pfn
);
73 BUG_ON(PageHighMem(page
)); /* No HIGHMEM on 64-bit. */
74 l1_pgtable
= (HV_PTE
*)pfn_to_kaddr(pfn
);
76 pte
= l1_pgtable
[HV_L1_INDEX(address
)];
77 if (!hv_pte_get_present(pte
))
79 pfn
= hv_pte_get_pfn(pte
);
81 if (!pfn_valid(pfn
)) {
82 pr_err("huge page has bad pfn %#lx\n", pfn
);
85 return hv_pte_get_present(pte
) && hv_pte_get_readable(pte
);
88 page
= pfn_to_page(pfn
);
89 if (PageHighMem(page
)) {
90 pr_err("L2 page table not in LOWMEM (%#llx)\n",
94 l2_pgtable
= (HV_PTE
*)pfn_to_kaddr(pfn
);
95 pte
= l2_pgtable
[HV_L2_INDEX(address
)];
96 return hv_pte_get_present(pte
) && hv_pte_get_readable(pte
);
99 /* Callback for backtracer; basically a glorified memcpy */
100 static bool read_memory_func(void *result
, unsigned long address
,
101 unsigned int size
, void *vkbt
)
104 struct KBacktraceIterator
*kbt
= (struct KBacktraceIterator
*)vkbt
;
105 if (__kernel_text_address(address
)) {
106 /* OK to read kernel code. */
107 } else if (address
>= PAGE_OFFSET
) {
108 /* We only tolerate kernel-space reads of this task's stack */
109 if (!in_kernel_stack(kbt
, address
))
111 } else if (!valid_address(kbt
, address
)) {
112 return 0; /* invalid user-space address */
115 retval
= __copy_from_user_inatomic(result
,
116 (void __user __force
*)address
,
119 return (retval
== 0);
122 /* Return a pt_regs pointer for a valid fault handler frame */
123 static struct pt_regs
*valid_fault_handler(struct KBacktraceIterator
* kbt
)
125 const char *fault
= NULL
; /* happy compiler */
127 unsigned long sp
= kbt
->it
.sp
;
130 if (!in_kernel_stack(kbt
, sp
))
132 if (!in_kernel_stack(kbt
, sp
+ C_ABI_SAVE_AREA_SIZE
+ PTREGS_SIZE
-1))
134 p
= (struct pt_regs
*)(sp
+ C_ABI_SAVE_AREA_SIZE
);
135 if (p
->faultnum
== INT_SWINT_1
|| p
->faultnum
== INT_SWINT_1_SIGRETURN
)
138 if (kbt
->verbose
) { /* else we aren't going to use it */
139 snprintf(fault_buf
, sizeof(fault_buf
),
140 "interrupt %ld", p
->faultnum
);
144 if (EX1_PL(p
->ex1
) == KERNEL_PL
&&
145 __kernel_text_address(p
->pc
) &&
146 in_kernel_stack(kbt
, p
->sp
) &&
149 pr_err(" <%s while in kernel mode>\n", fault
);
150 } else if (EX1_PL(p
->ex1
) == USER_PL
&&
151 p
->pc
< PAGE_OFFSET
&&
152 p
->sp
< PAGE_OFFSET
) {
154 pr_err(" <%s while in user mode>\n", fault
);
155 } else if (kbt
->verbose
) {
156 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
157 p
->pc
, p
->sp
, p
->ex1
);
160 if (!kbt
->profile
|| (INT_MASK(p
->faultnum
) & QUEUED_INTERRUPTS
) == 0)
165 /* Is the pc pointing to a sigreturn trampoline? */
166 static int is_sigreturn(unsigned long pc
)
168 return (pc
== VDSO_BASE
);
171 /* Return a pt_regs pointer for a valid signal handler frame */
172 static struct pt_regs
*valid_sigframe(struct KBacktraceIterator
* kbt
)
174 BacktraceIterator
*b
= &kbt
->it
;
176 if (b
->pc
== VDSO_BASE
) {
177 struct rt_sigframe
*frame
;
178 unsigned long sigframe_top
=
179 b
->sp
+ sizeof(struct rt_sigframe
) - 1;
180 if (!valid_address(kbt
, b
->sp
) ||
181 !valid_address(kbt
, sigframe_top
)) {
183 pr_err(" (odd signal: sp %#lx?)\n",
184 (unsigned long)(b
->sp
));
187 frame
= (struct rt_sigframe
*)b
->sp
;
189 pr_err(" <received signal %d>\n",
190 frame
->info
.si_signo
);
192 return (struct pt_regs
*)&frame
->uc
.uc_mcontext
;
197 static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator
*kbt
)
199 return is_sigreturn(kbt
->it
.pc
);
202 static int KBacktraceIterator_restart(struct KBacktraceIterator
*kbt
)
206 p
= valid_fault_handler(kbt
);
208 p
= valid_sigframe(kbt
);
211 backtrace_init(&kbt
->it
, read_memory_func
, kbt
,
212 p
->pc
, p
->lr
, p
->sp
, p
->regs
[52]);
213 kbt
->new_context
= 1;
217 /* Find a frame that isn't a sigreturn, if there is one. */
218 static int KBacktraceIterator_next_item_inclusive(
219 struct KBacktraceIterator
*kbt
)
223 if (!KBacktraceIterator_is_sigreturn(kbt
))
225 } while (backtrace_next(&kbt
->it
));
227 if (!KBacktraceIterator_restart(kbt
))
233 * If the current sp is on a page different than what we recorded
234 * as the top-of-kernel-stack last time we context switched, we have
235 * probably blown the stack, and nothing is going to work out well.
236 * If we can at least get out a warning, that may help the debug,
237 * though we probably won't be able to backtrace into the code that
238 * actually did the recursive damage.
240 static void validate_stack(struct pt_regs
*regs
)
242 int cpu
= smp_processor_id();
243 unsigned long ksp0
= get_current_ksp0();
244 unsigned long ksp0_base
= ksp0
- THREAD_SIZE
;
245 unsigned long sp
= stack_pointer
;
247 if (EX1_PL(regs
->ex1
) == KERNEL_PL
&& regs
->sp
>= ksp0
) {
248 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
249 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
250 cpu
, ksp0_base
, sp
, regs
->sp
, regs
->pc
, regs
->lr
);
253 else if (sp
< ksp0_base
+ sizeof(struct thread_info
)) {
254 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
255 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
256 cpu
, ksp0_base
, sp
, regs
->sp
, regs
->pc
, regs
->lr
);
260 void KBacktraceIterator_init(struct KBacktraceIterator
*kbt
,
261 struct task_struct
*t
, struct pt_regs
*regs
)
263 unsigned long pc
, lr
, sp
, r52
;
267 * Set up callback information. We grab the kernel stack base
268 * so we will allow reads of that address range, and if we're
269 * asking about the current process we grab the page table
270 * so we can check user accesses before trying to read them.
271 * We flush the TLB to avoid any weird skew issues.
273 is_current
= (t
== NULL
);
274 kbt
->is_current
= is_current
;
276 t
= validate_current();
279 kbt
->verbose
= 0; /* override in caller if desired */
280 kbt
->profile
= 0; /* override in caller if desired */
281 kbt
->end
= KBT_ONGOING
;
282 kbt
->new_context
= 0;
284 HV_PhysAddr pgdir_pa
= hv_inquire_context().page_table
;
285 if (pgdir_pa
== (unsigned long)swapper_pg_dir
- PAGE_OFFSET
) {
287 * Not just an optimization: this also allows
288 * this to work at all before va/pa mappings
291 kbt
->pgtable
= swapper_pg_dir
;
293 struct page
*page
= pfn_to_page(PFN_DOWN(pgdir_pa
));
294 if (!PageHighMem(page
))
295 kbt
->pgtable
= __va(pgdir_pa
);
297 pr_err("page table not in LOWMEM"
298 " (%#llx)\n", pgdir_pa
);
300 local_flush_tlb_all();
301 validate_stack(regs
);
305 if (is_current
|| t
->state
== TASK_RUNNING
) {
306 /* Can't do this; we need registers */
307 kbt
->end
= KBT_RUNNING
;
310 pc
= get_switch_to_pc();
318 r52
= regs
->regs
[52];
321 backtrace_init(&kbt
->it
, read_memory_func
, kbt
, pc
, lr
, sp
, r52
);
322 kbt
->end
= KBacktraceIterator_next_item_inclusive(kbt
);
324 EXPORT_SYMBOL(KBacktraceIterator_init
);
326 int KBacktraceIterator_end(struct KBacktraceIterator
*kbt
)
328 return kbt
->end
!= KBT_ONGOING
;
330 EXPORT_SYMBOL(KBacktraceIterator_end
);
332 void KBacktraceIterator_next(struct KBacktraceIterator
*kbt
)
334 unsigned long old_pc
= kbt
->it
.pc
, old_sp
= kbt
->it
.sp
;
335 kbt
->new_context
= 0;
336 if (!backtrace_next(&kbt
->it
) && !KBacktraceIterator_restart(kbt
)) {
340 kbt
->end
= KBacktraceIterator_next_item_inclusive(kbt
);
341 if (old_pc
== kbt
->it
.pc
&& old_sp
== kbt
->it
.sp
) {
342 /* Trapped in a loop; give up. */
346 EXPORT_SYMBOL(KBacktraceIterator_next
);
349 * This method wraps the backtracer's more generic support.
350 * It is only invoked from the architecture-specific code; show_stack()
351 * and dump_stack() (in entry.S) are architecture-independent entry points.
353 void tile_show_stack(struct KBacktraceIterator
*kbt
, int headers
)
359 * Add a blank line since if we are called from panic(),
360 * then bust_spinlocks() spit out a space in front of us
361 * and it will mess up our KERN_ERR.
364 pr_err("Starting stack dump of tid %d, pid %d (%s)"
365 " on cpu %d at cycle %lld\n",
366 kbt
->task
->pid
, kbt
->task
->tgid
, kbt
->task
->comm
,
367 smp_processor_id(), get_cycles());
371 for (; !KBacktraceIterator_end(kbt
); KBacktraceIterator_next(kbt
)) {
374 unsigned long address
= kbt
->it
.pc
;
375 unsigned long offset
, size
;
376 char namebuf
[KSYM_NAME_LEN
+100];
378 if (address
>= PAGE_OFFSET
)
379 name
= kallsyms_lookup(address
, &size
, &offset
,
387 size_t namelen
= strlen(namebuf
);
388 size_t remaining
= (sizeof(namebuf
) - 1) - namelen
;
389 char *p
= namebuf
+ namelen
;
390 int rc
= snprintf(p
, remaining
, "+%#lx/%#lx ",
392 if (modname
&& rc
< remaining
)
393 snprintf(p
+ rc
, remaining
- rc
,
395 namebuf
[sizeof(namebuf
)-1] = '\0';
398 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
399 i
++, address
, namebuf
, (unsigned long)(kbt
->it
.sp
));
402 pr_err("Stack dump truncated"
403 " (%d frames)\n", i
);
407 if (kbt
->end
== KBT_LOOP
)
408 pr_err("Stack dump stopped; next frame identical to this one\n");
410 pr_err("Stack dump complete\n");
412 EXPORT_SYMBOL(tile_show_stack
);
415 /* This is called from show_regs() and _dump_stack() */
416 void dump_stack_regs(struct pt_regs
*regs
)
418 struct KBacktraceIterator kbt
;
419 KBacktraceIterator_init(&kbt
, NULL
, regs
);
420 tile_show_stack(&kbt
, 1);
422 EXPORT_SYMBOL(dump_stack_regs
);
424 static struct pt_regs
*regs_to_pt_regs(struct pt_regs
*regs
,
425 ulong pc
, ulong lr
, ulong sp
, ulong r52
)
427 memset(regs
, 0, sizeof(struct pt_regs
));
431 regs
->regs
[52] = r52
;
435 /* This is called from dump_stack() and just converts to pt_regs */
436 void _dump_stack(int dummy
, ulong pc
, ulong lr
, ulong sp
, ulong r52
)
439 dump_stack_regs(regs_to_pt_regs(®s
, pc
, lr
, sp
, r52
));
442 /* This is called from KBacktraceIterator_init_current() */
443 void _KBacktraceIterator_init_current(struct KBacktraceIterator
*kbt
, ulong pc
,
444 ulong lr
, ulong sp
, ulong r52
)
447 KBacktraceIterator_init(kbt
, NULL
,
448 regs_to_pt_regs(®s
, pc
, lr
, sp
, r52
));
451 /* This is called only from kernel/sched.c, with esp == NULL */
452 void show_stack(struct task_struct
*task
, unsigned long *esp
)
454 struct KBacktraceIterator kbt
;
455 if (task
== NULL
|| task
== current
)
456 KBacktraceIterator_init_current(&kbt
);
458 KBacktraceIterator_init(&kbt
, task
, NULL
);
459 tile_show_stack(&kbt
, 0);
462 #ifdef CONFIG_STACKTRACE
464 /* Support generic Linux stack API too */
466 void save_stack_trace_tsk(struct task_struct
*task
, struct stack_trace
*trace
)
468 struct KBacktraceIterator kbt
;
469 int skip
= trace
->skip
;
472 if (task
== NULL
|| task
== current
)
473 KBacktraceIterator_init_current(&kbt
);
475 KBacktraceIterator_init(&kbt
, task
, NULL
);
476 for (; !KBacktraceIterator_end(&kbt
); KBacktraceIterator_next(&kbt
)) {
481 if (i
>= trace
->max_entries
|| kbt
.it
.pc
< PAGE_OFFSET
)
483 trace
->entries
[i
++] = kbt
.it
.pc
;
485 trace
->nr_entries
= i
;
487 EXPORT_SYMBOL(save_stack_trace_tsk
);
489 void save_stack_trace(struct stack_trace
*trace
)
491 save_stack_trace_tsk(NULL
, trace
);
497 EXPORT_SYMBOL(KBacktraceIterator_init_current
);