2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/pfn.h>
22 #include <linux/kallsyms.h>
23 #include <linux/stacktrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/mmzone.h>
26 #include <linux/dcache.h>
28 #include <linux/hardirq.h>
29 #include <linux/string.h>
30 #include <asm/backtrace.h>
32 #include <asm/ucontext.h>
33 #include <asm/switch_to.h>
34 #include <asm/sigframe.h>
35 #include <asm/stack.h>
38 #include <arch/interrupts.h>
40 #define KBT_ONGOING 0 /* Backtrace still ongoing */
41 #define KBT_DONE 1 /* Backtrace cleanly completed */
42 #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
43 #define KBT_LOOP 3 /* Backtrace entered a loop */
45 /* Is address on the specified kernel stack? */
46 static int in_kernel_stack(struct KBacktraceIterator
*kbt
, unsigned long sp
)
48 ulong kstack_base
= (ulong
) kbt
->task
->stack
;
49 if (kstack_base
== 0) /* corrupt task pointer; just follow stack... */
50 return sp
>= PAGE_OFFSET
&& sp
< (unsigned long)high_memory
;
51 return sp
>= kstack_base
&& sp
< kstack_base
+ THREAD_SIZE
;
54 /* Callback for backtracer; basically a glorified memcpy */
55 static bool read_memory_func(void *result
, unsigned long address
,
56 unsigned int size
, void *vkbt
)
59 struct KBacktraceIterator
*kbt
= (struct KBacktraceIterator
*)vkbt
;
63 if (__kernel_text_address(address
)) {
64 /* OK to read kernel code. */
65 } else if (address
>= PAGE_OFFSET
) {
66 /* We only tolerate kernel-space reads of this task's stack */
67 if (!in_kernel_stack(kbt
, address
))
69 } else if (!kbt
->is_current
) {
70 return 0; /* can't read from other user address spaces */
73 retval
= __copy_from_user_inatomic(result
,
74 (void __user __force
*)address
,
80 /* Return a pt_regs pointer for a valid fault handler frame */
81 static struct pt_regs
*valid_fault_handler(struct KBacktraceIterator
* kbt
)
84 unsigned long sp
= kbt
->it
.sp
;
87 if (sp
% sizeof(long) != 0)
89 if (!in_kernel_stack(kbt
, sp
))
91 if (!in_kernel_stack(kbt
, sp
+ C_ABI_SAVE_AREA_SIZE
+ PTREGS_SIZE
-1))
93 p
= (struct pt_regs
*)(sp
+ C_ABI_SAVE_AREA_SIZE
);
94 if (kbt
->verbose
) { /* else we aren't going to use it */
95 if (p
->faultnum
== INT_SWINT_1
||
96 p
->faultnum
== INT_SWINT_1_SIGRETURN
)
97 snprintf(fault
, sizeof(fault
),
98 "syscall %ld", p
->regs
[TREG_SYSCALL_NR
]);
100 snprintf(fault
, sizeof(fault
),
101 "interrupt %ld", p
->faultnum
);
103 if (EX1_PL(p
->ex1
) == KERNEL_PL
&&
104 __kernel_text_address(p
->pc
) &&
105 in_kernel_stack(kbt
, p
->sp
) &&
108 pr_err(" <%s while in kernel mode>\n", fault
);
109 } else if (user_mode(p
) &&
110 p
->sp
< PAGE_OFFSET
&& p
->sp
!= 0) {
112 pr_err(" <%s while in user mode>\n", fault
);
114 if (kbt
->verbose
&& (p
->pc
!= 0 || p
->sp
!= 0 || p
->ex1
!= 0))
115 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
116 p
->pc
, p
->sp
, p
->ex1
);
119 if (kbt
->profile
&& ((1ULL << p
->faultnum
) & QUEUED_INTERRUPTS
) != 0)
124 /* Is the iterator pointing to a sigreturn trampoline? */
125 static int is_sigreturn(struct KBacktraceIterator
*kbt
)
127 return kbt
->task
->mm
&&
128 (kbt
->it
.pc
== ((ulong
)kbt
->task
->mm
->context
.vdso_base
+
129 (ulong
)&__vdso_rt_sigreturn
));
132 /* Return a pt_regs pointer for a valid signal handler frame */
133 static struct pt_regs
*valid_sigframe(struct KBacktraceIterator
* kbt
,
134 struct rt_sigframe
* kframe
)
136 BacktraceIterator
*b
= &kbt
->it
;
138 if (is_sigreturn(kbt
) && b
->sp
< PAGE_OFFSET
&&
139 b
->sp
% sizeof(long) == 0) {
142 retval
= __copy_from_user_inatomic(
143 kframe
, (void __user __force
*)b
->sp
,
147 (unsigned int)(kframe
->info
.si_signo
) >= _NSIG
)
150 pr_err(" <received signal %d>\n",
151 kframe
->info
.si_signo
);
153 return (struct pt_regs
*)&kframe
->uc
.uc_mcontext
;
158 static int KBacktraceIterator_restart(struct KBacktraceIterator
*kbt
)
161 struct rt_sigframe kframe
;
163 p
= valid_fault_handler(kbt
);
165 p
= valid_sigframe(kbt
, &kframe
);
168 backtrace_init(&kbt
->it
, read_memory_func
, kbt
,
169 p
->pc
, p
->lr
, p
->sp
, p
->regs
[52]);
170 kbt
->new_context
= 1;
174 /* Find a frame that isn't a sigreturn, if there is one. */
175 static int KBacktraceIterator_next_item_inclusive(
176 struct KBacktraceIterator
*kbt
)
180 if (!is_sigreturn(kbt
))
182 } while (backtrace_next(&kbt
->it
));
184 if (!KBacktraceIterator_restart(kbt
))
190 * If the current sp is on a page different than what we recorded
191 * as the top-of-kernel-stack last time we context switched, we have
192 * probably blown the stack, and nothing is going to work out well.
193 * If we can at least get out a warning, that may help the debug,
194 * though we probably won't be able to backtrace into the code that
195 * actually did the recursive damage.
197 static void validate_stack(struct pt_regs
*regs
)
199 int cpu
= raw_smp_processor_id();
200 unsigned long ksp0
= get_current_ksp0();
201 unsigned long ksp0_base
= ksp0
& -THREAD_SIZE
;
202 unsigned long sp
= stack_pointer
;
204 if (EX1_PL(regs
->ex1
) == KERNEL_PL
&& regs
->sp
>= ksp0
) {
205 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
206 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
207 cpu
, ksp0_base
, ksp0
, sp
, regs
->sp
, regs
->pc
, regs
->lr
);
210 else if (sp
< ksp0_base
+ sizeof(struct thread_info
)) {
211 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
212 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
213 cpu
, ksp0_base
, ksp0
, sp
, regs
->sp
, regs
->pc
, regs
->lr
);
217 void KBacktraceIterator_init(struct KBacktraceIterator
*kbt
,
218 struct task_struct
*t
, struct pt_regs
*regs
)
220 unsigned long pc
, lr
, sp
, r52
;
224 * Set up callback information. We grab the kernel stack base
225 * so we will allow reads of that address range.
227 is_current
= (t
== NULL
|| t
== current
);
228 kbt
->is_current
= is_current
;
230 t
= validate_current();
232 kbt
->verbose
= 0; /* override in caller if desired */
233 kbt
->profile
= 0; /* override in caller if desired */
234 kbt
->end
= KBT_ONGOING
;
235 kbt
->new_context
= 1;
237 validate_stack(regs
);
240 if (is_current
|| t
->state
== TASK_RUNNING
) {
241 /* Can't do this; we need registers */
242 kbt
->end
= KBT_RUNNING
;
245 pc
= get_switch_to_pc();
253 r52
= regs
->regs
[52];
256 backtrace_init(&kbt
->it
, read_memory_func
, kbt
, pc
, lr
, sp
, r52
);
257 kbt
->end
= KBacktraceIterator_next_item_inclusive(kbt
);
259 EXPORT_SYMBOL(KBacktraceIterator_init
);
261 int KBacktraceIterator_end(struct KBacktraceIterator
*kbt
)
263 return kbt
->end
!= KBT_ONGOING
;
265 EXPORT_SYMBOL(KBacktraceIterator_end
);
267 void KBacktraceIterator_next(struct KBacktraceIterator
*kbt
)
269 unsigned long old_pc
= kbt
->it
.pc
, old_sp
= kbt
->it
.sp
;
270 kbt
->new_context
= 0;
271 if (!backtrace_next(&kbt
->it
) && !KBacktraceIterator_restart(kbt
)) {
275 kbt
->end
= KBacktraceIterator_next_item_inclusive(kbt
);
276 if (old_pc
== kbt
->it
.pc
&& old_sp
== kbt
->it
.sp
) {
277 /* Trapped in a loop; give up. */
281 EXPORT_SYMBOL(KBacktraceIterator_next
);
283 static void describe_addr(struct KBacktraceIterator
*kbt
,
284 unsigned long address
,
285 int have_mmap_sem
, char *buf
, size_t bufsize
)
287 struct vm_area_struct
*vma
;
288 size_t namelen
, remaining
;
289 unsigned long size
, offset
, adjust
;
295 * Look one byte back for every caller frame (i.e. those that
296 * aren't a new context) so we look up symbol data for the
297 * call itself, not the following instruction, which may be on
298 * a different line (or in a different function).
300 adjust
= !kbt
->new_context
;
303 if (address
>= PAGE_OFFSET
) {
304 /* Handle kernel symbols. */
305 BUG_ON(bufsize
< KSYM_NAME_LEN
);
306 name
= kallsyms_lookup(address
, &size
, &offset
,
312 namelen
= strlen(buf
);
313 remaining
= (bufsize
- 1) - namelen
;
315 rc
= snprintf(p
, remaining
, "+%#lx/%#lx ",
316 offset
+ adjust
, size
);
317 if (modname
&& rc
< remaining
)
318 snprintf(p
+ rc
, remaining
- rc
, "[%s] ", modname
);
319 buf
[bufsize
-1] = '\0';
323 /* If we don't have the mmap_sem, we can't show any more info. */
329 vma
= find_vma(kbt
->task
->mm
, address
);
330 if (vma
== NULL
|| address
< vma
->vm_start
) {
331 snprintf(buf
, bufsize
, "[unmapped address] ");
336 p
= file_path(vma
->vm_file
, buf
, bufsize
);
344 /* Generate a string description of the vma info. */
345 namelen
= strlen(name
);
346 remaining
= (bufsize
- 1) - namelen
;
347 memmove(buf
, name
, namelen
);
348 snprintf(buf
+ namelen
, remaining
, "[%lx+%lx] ",
349 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
353 * Avoid possible crash recursion during backtrace. If it happens, it
354 * makes it easy to lose the actual root cause of the failure, so we
355 * put a simple guard on all the backtrace loops.
357 static bool start_backtrace(void)
359 if (current_thread_info()->in_backtrace
) {
360 pr_err("Backtrace requested while in backtrace!\n");
363 current_thread_info()->in_backtrace
= true;
367 static void end_backtrace(void)
369 current_thread_info()->in_backtrace
= false;
373 * This method wraps the backtracer's more generic support.
374 * It is only invoked from the architecture-specific code; show_stack()
375 * and dump_stack() are architecture-independent entry points.
377 void tile_show_stack(struct KBacktraceIterator
*kbt
)
380 int have_mmap_sem
= 0;
382 if (!start_backtrace())
386 for (; !KBacktraceIterator_end(kbt
); KBacktraceIterator_next(kbt
)) {
387 char namebuf
[KSYM_NAME_LEN
+100];
388 unsigned long address
= kbt
->it
.pc
;
391 * Try to acquire the mmap_sem as we pass into userspace.
392 * If we're in an interrupt context, don't even try, since
393 * it's not safe to call e.g. d_path() from an interrupt,
394 * since it uses spin locks without disabling interrupts.
395 * Note we test "kbt->task == current", not "kbt->is_current",
396 * since we're checking that "current" will work in d_path().
398 if (kbt
->task
== current
&& address
< PAGE_OFFSET
&&
399 !have_mmap_sem
&& kbt
->task
->mm
&& !in_interrupt()) {
401 down_read_trylock(&kbt
->task
->mm
->mmap_sem
);
404 describe_addr(kbt
, address
, have_mmap_sem
,
405 namebuf
, sizeof(namebuf
));
407 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
408 i
++, address
, namebuf
, (unsigned long)(kbt
->it
.sp
));
411 pr_err("Stack dump truncated (%d frames)\n", i
);
415 if (kbt
->end
== KBT_LOOP
)
416 pr_err("Stack dump stopped; next frame identical to this one\n");
418 up_read(&kbt
->task
->mm
->mmap_sem
);
421 EXPORT_SYMBOL(tile_show_stack
);
423 static struct pt_regs
*regs_to_pt_regs(struct pt_regs
*regs
,
424 ulong pc
, ulong lr
, ulong sp
, ulong r52
)
426 memset(regs
, 0, sizeof(struct pt_regs
));
430 regs
->regs
[52] = r52
;
434 /* Deprecated function currently only used by kernel_double_fault(). */
435 void _dump_stack(int dummy
, ulong pc
, ulong lr
, ulong sp
, ulong r52
)
437 struct KBacktraceIterator kbt
;
440 regs_to_pt_regs(®s
, pc
, lr
, sp
, r52
);
441 KBacktraceIterator_init(&kbt
, NULL
, ®s
);
442 tile_show_stack(&kbt
);
445 /* This is called from KBacktraceIterator_init_current() */
446 void _KBacktraceIterator_init_current(struct KBacktraceIterator
*kbt
, ulong pc
,
447 ulong lr
, ulong sp
, ulong r52
)
450 KBacktraceIterator_init(kbt
, NULL
,
451 regs_to_pt_regs(®s
, pc
, lr
, sp
, r52
));
455 * Called from sched_show_task() with task != NULL, or dump_stack()
456 * with task == NULL. The esp argument is always NULL.
458 void show_stack(struct task_struct
*task
, unsigned long *esp
)
460 struct KBacktraceIterator kbt
;
461 if (task
== NULL
|| task
== current
) {
462 KBacktraceIterator_init_current(&kbt
);
463 KBacktraceIterator_next(&kbt
); /* don't show first frame */
465 KBacktraceIterator_init(&kbt
, task
, NULL
);
467 tile_show_stack(&kbt
);
470 #ifdef CONFIG_STACKTRACE
472 /* Support generic Linux stack API too */
474 static void save_stack_trace_common(struct task_struct
*task
,
475 struct pt_regs
*regs
,
477 struct stack_trace
*trace
)
479 struct KBacktraceIterator kbt
;
480 int skip
= trace
->skip
;
483 if (!start_backtrace())
486 KBacktraceIterator_init(&kbt
, NULL
, regs
);
487 } else if (task
== NULL
|| task
== current
) {
488 KBacktraceIterator_init_current(&kbt
);
489 skip
++; /* don't show KBacktraceIterator_init_current */
491 KBacktraceIterator_init(&kbt
, task
, NULL
);
493 for (; !KBacktraceIterator_end(&kbt
); KBacktraceIterator_next(&kbt
)) {
498 if (i
>= trace
->max_entries
||
499 (!user
&& kbt
.it
.pc
< PAGE_OFFSET
))
501 trace
->entries
[i
++] = kbt
.it
.pc
;
505 if (i
< trace
->max_entries
)
506 trace
->entries
[i
++] = ULONG_MAX
;
507 trace
->nr_entries
= i
;
510 void save_stack_trace_tsk(struct task_struct
*task
, struct stack_trace
*trace
)
512 save_stack_trace_common(task
, NULL
, false, trace
);
514 EXPORT_SYMBOL(save_stack_trace_tsk
);
516 void save_stack_trace(struct stack_trace
*trace
)
518 save_stack_trace_common(NULL
, NULL
, false, trace
);
520 EXPORT_SYMBOL_GPL(save_stack_trace
);
522 void save_stack_trace_regs(struct pt_regs
*regs
, struct stack_trace
*trace
)
524 save_stack_trace_common(NULL
, regs
, false, trace
);
527 void save_stack_trace_user(struct stack_trace
*trace
)
529 /* Trace user stack if we are not a kernel thread. */
531 save_stack_trace_common(NULL
, task_pt_regs(current
),
533 else if (trace
->nr_entries
< trace
->max_entries
)
534 trace
->entries
[trace
->nr_entries
++] = ULONG_MAX
;
539 EXPORT_SYMBOL(KBacktraceIterator_init_current
);