2 * linux/arch/arm/kernel/traps.c
4 * Copyright (C) 1995-2002 Russell King
5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 'traps.c' handles hardware exceptions after we have saved some state in
12 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
13 * kill the offending process.
15 #include <linux/module.h>
16 #include <linux/signal.h>
17 #include <linux/spinlock.h>
18 #include <linux/personality.h>
19 #include <linux/kallsyms.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
23 #include <asm/atomic.h>
24 #include <asm/cacheflush.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
28 #include <asm/traps.h>
34 static const char *handler
[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
36 #ifdef CONFIG_DEBUG_USER
37 unsigned int user_debug
;
39 static int __init
user_debug_setup(char *str
)
41 get_option(&str
, &user_debug
);
44 __setup("user_debug=", user_debug_setup
);
47 static void dump_mem(const char *str
, unsigned long bottom
, unsigned long top
);
49 static inline int in_exception_text(unsigned long ptr
)
51 extern char __exception_text_start
[];
52 extern char __exception_text_end
[];
54 return ptr
>= (unsigned long)&__exception_text_start
&&
55 ptr
< (unsigned long)&__exception_text_end
;
58 void dump_backtrace_entry(unsigned long where
, unsigned long from
, unsigned long frame
)
60 #ifdef CONFIG_KALLSYMS
61 printk("[<%08lx>] ", where
);
62 print_symbol("(%s) ", where
);
63 printk("from [<%08lx>] ", from
);
64 print_symbol("(%s)\n", from
);
66 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where
, from
);
69 if (in_exception_text(where
))
70 dump_mem("Exception stack", frame
+ 4, frame
+ 4 + sizeof(struct pt_regs
));
74 * Stack pointers should always be within the kernels view of
75 * physical memory. If it is not there, then we can't dump
76 * out any information relating to the stack.
78 static int verify_stack(unsigned long sp
)
80 if (sp
< PAGE_OFFSET
|| (sp
> (unsigned long)high_memory
&& high_memory
!= 0))
87 * Dump out the contents of some memory nicely...
89 static void dump_mem(const char *str
, unsigned long bottom
, unsigned long top
)
91 unsigned long p
= bottom
& ~31;
96 * We need to switch to kernel mode so that we can use __get_user
97 * to safely read from kernel space. Note that we now dump the
98 * code first, just in case the backtrace kills us.
103 printk("%s(0x%08lx to 0x%08lx)\n", str
, bottom
, top
);
105 for (p
= bottom
& ~31; p
< top
;) {
106 printk("%04lx: ", p
& 0xffff);
108 for (i
= 0; i
< 8; i
++, p
+= 4) {
111 if (p
< bottom
|| p
>= top
)
114 __get_user(val
, (unsigned long *)p
);
115 printk("%08x ", val
);
124 static void dump_instr(struct pt_regs
*regs
)
126 unsigned long addr
= instruction_pointer(regs
);
127 const int thumb
= thumb_mode(regs
);
128 const int width
= thumb
? 4 : 8;
133 * We need to switch to kernel mode so that we can use __get_user
134 * to safely read from kernel space. Note that we now dump the
135 * code first, just in case the backtrace kills us.
141 for (i
= -4; i
< 1; i
++) {
142 unsigned int val
, bad
;
145 bad
= __get_user(val
, &((u16
*)addr
)[i
]);
147 bad
= __get_user(val
, &((u32
*)addr
)[i
]);
150 printk(i
== 0 ? "(%0*x) " : "%0*x ", width
, val
);
152 printk("bad PC value.");
161 static void dump_backtrace(struct pt_regs
*regs
, struct task_struct
*tsk
)
166 printk("Backtrace: ");
169 printk("no frame pointer");
171 } else if (verify_stack(fp
)) {
172 printk("invalid frame pointer 0x%08x", fp
);
174 } else if (fp
< (unsigned long)end_of_stack(tsk
))
175 printk("frame pointer underflow");
179 c_backtrace(fp
, processor_mode(regs
));
182 void dump_stack(void)
184 #ifdef CONFIG_DEBUG_ERRORS
189 EXPORT_SYMBOL(dump_stack
);
191 void show_stack(struct task_struct
*tsk
, unsigned long *sp
)
199 fp
= thread_saved_fp(tsk
);
201 asm("mov %0, fp" : "=r" (fp
) : : "cc");
203 c_backtrace(fp
, 0x10);
207 static void __die(const char *str
, int err
, struct thread_info
*thread
, struct pt_regs
*regs
)
209 struct task_struct
*tsk
= thread
->task
;
210 static int die_counter
;
212 printk("Internal error: %s: %x [#%d]\n", str
, err
, ++die_counter
);
215 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
216 tsk
->comm
, tsk
->pid
, thread
+ 1);
218 if (!user_mode(regs
) || in_interrupt()) {
219 dump_mem("Stack: ", regs
->ARM_sp
,
220 THREAD_SIZE
+ (unsigned long)task_stack_page(tsk
));
221 dump_backtrace(regs
, tsk
);
226 DEFINE_SPINLOCK(die_lock
);
229 * This function is protected against re-entrancy.
231 NORET_TYPE
void die(const char *str
, struct pt_regs
*regs
, int err
)
233 struct thread_info
*thread
= current_thread_info();
236 spin_lock_irq(&die_lock
);
238 __die(str
, err
, thread
, regs
);
240 spin_unlock_irq(&die_lock
);
243 panic("Fatal exception");
248 void arm_notify_die(const char *str
, struct pt_regs
*regs
,
249 struct siginfo
*info
, unsigned long err
, unsigned long trap
)
251 if (user_mode(regs
)) {
252 current
->thread
.error_code
= err
;
253 current
->thread
.trap_no
= trap
;
255 force_sig_info(info
->si_signo
, info
, current
);
261 static LIST_HEAD(undef_hook
);
262 static DEFINE_SPINLOCK(undef_lock
);
264 void register_undef_hook(struct undef_hook
*hook
)
268 spin_lock_irqsave(&undef_lock
, flags
);
269 list_add(&hook
->node
, &undef_hook
);
270 spin_unlock_irqrestore(&undef_lock
, flags
);
273 void unregister_undef_hook(struct undef_hook
*hook
)
277 spin_lock_irqsave(&undef_lock
, flags
);
278 list_del(&hook
->node
);
279 spin_unlock_irqrestore(&undef_lock
, flags
);
282 asmlinkage
void __exception
do_undefinstr(struct pt_regs
*regs
)
284 unsigned int correction
= thumb_mode(regs
) ? 2 : 4;
286 struct undef_hook
*hook
;
292 * According to the ARM ARM, PC is 2 or 4 bytes ahead,
293 * depending whether we're in Thumb mode or not.
294 * Correct this offset.
296 regs
->ARM_pc
-= correction
;
298 pc
= (void __user
*)instruction_pointer(regs
);
300 if (processor_mode(regs
) == SVC_MODE
) {
302 } else if (thumb_mode(regs
)) {
303 get_user(instr
, (u16 __user
*)pc
);
305 get_user(instr
, (u32 __user
*)pc
);
308 spin_lock_irqsave(&undef_lock
, flags
);
309 list_for_each_entry(hook
, &undef_hook
, node
) {
310 if ((instr
& hook
->instr_mask
) == hook
->instr_val
&&
311 (regs
->ARM_cpsr
& hook
->cpsr_mask
) == hook
->cpsr_val
) {
312 if (hook
->fn(regs
, instr
) == 0) {
313 spin_unlock_irq(&undef_lock
);
318 spin_unlock_irqrestore(&undef_lock
, flags
);
320 #ifdef CONFIG_DEBUG_USER
321 if (user_debug
& UDBG_UNDEFINED
) {
322 printk(KERN_INFO
"%s (%d): undefined instruction: pc=%p\n",
323 current
->comm
, current
->pid
, pc
);
328 info
.si_signo
= SIGILL
;
330 info
.si_code
= ILL_ILLOPC
;
333 arm_notify_die("Oops - undefined instruction", regs
, &info
, 0, 6);
336 asmlinkage
void do_unexp_fiq (struct pt_regs
*regs
)
338 #ifndef CONFIG_IGNORE_FIQ
339 printk("Hmm. Unexpected FIQ received, but trying to continue\n");
340 printk("You may have a hardware problem...\n");
345 * bad_mode handles the impossible case in the vectors. If you see one of
346 * these, then it's extremely serious, and could mean you have buggy hardware.
347 * It never returns, and never tries to sync. We hope that we can at least
348 * dump out some state information...
350 asmlinkage
void bad_mode(struct pt_regs
*regs
, int reason
)
354 printk(KERN_CRIT
"Bad mode in %s handler detected\n", handler
[reason
]);
356 die("Oops - bad mode", regs
, 0);
361 static int bad_syscall(int n
, struct pt_regs
*regs
)
363 struct thread_info
*thread
= current_thread_info();
366 if (current
->personality
!= PER_LINUX
&&
367 current
->personality
!= PER_LINUX_32BIT
&&
368 thread
->exec_domain
->handler
) {
369 thread
->exec_domain
->handler(n
, regs
);
373 #ifdef CONFIG_DEBUG_USER
374 if (user_debug
& UDBG_SYSCALL
) {
375 printk(KERN_ERR
"[%d] %s: obsolete system call %08x.\n",
376 current
->pid
, current
->comm
, n
);
381 info
.si_signo
= SIGILL
;
383 info
.si_code
= ILL_ILLTRP
;
384 info
.si_addr
= (void __user
*)instruction_pointer(regs
) -
385 (thumb_mode(regs
) ? 2 : 4);
387 arm_notify_die("Oops - bad syscall", regs
, &info
, n
, 0);
393 do_cache_op(unsigned long start
, unsigned long end
, int flags
)
395 struct vm_area_struct
*vma
;
397 if (end
< start
|| flags
)
400 vma
= find_vma(current
->active_mm
, start
);
401 if (vma
&& vma
->vm_start
< end
) {
402 if (start
< vma
->vm_start
)
403 start
= vma
->vm_start
;
404 if (end
> vma
->vm_end
)
407 flush_cache_user_range(vma
, start
, end
);
412 * Handle all unrecognised system calls.
413 * 0x9f0000 - 0x9fffff are some more esoteric system calls
415 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
416 asmlinkage
int arm_syscall(int no
, struct pt_regs
*regs
)
418 struct thread_info
*thread
= current_thread_info();
421 if ((no
>> 16) != (__ARM_NR_BASE
>> 16))
422 return bad_syscall(no
, regs
);
424 switch (no
& 0xffff) {
425 case 0: /* branch through 0 */
426 info
.si_signo
= SIGSEGV
;
428 info
.si_code
= SEGV_MAPERR
;
431 arm_notify_die("branch through zero", regs
, &info
, 0, 0);
434 case NR(breakpoint
): /* SWI BREAK_POINT */
435 regs
->ARM_pc
-= thumb_mode(regs
) ? 2 : 4;
436 ptrace_break(current
, regs
);
440 * Flush a region from virtual address 'r0' to virtual address 'r1'
441 * _exclusive_. There is no alignment requirement on either address;
442 * user space does not need to know the hardware cache layout.
444 * r2 contains flags. It should ALWAYS be passed as ZERO until it
445 * is defined to be something else. For now we ignore it, but may
446 * the fires of hell burn in your belly if you break this rule. ;)
448 * (at a later date, we may want to allow this call to not flush
449 * various aspects of the cache. Passing '0' will guarantee that
450 * everything necessary gets flushed to maintain consistency in
451 * the specified region).
454 do_cache_op(regs
->ARM_r0
, regs
->ARM_r1
, regs
->ARM_r2
);
458 if (!(elf_hwcap
& HWCAP_26BIT
))
460 regs
->ARM_cpsr
&= ~MODE32_BIT
;
464 if (!(elf_hwcap
& HWCAP_26BIT
))
466 regs
->ARM_cpsr
|= MODE32_BIT
;
470 thread
->tp_value
= regs
->ARM_r0
;
471 #if defined(CONFIG_HAS_TLS_REG)
472 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs
->ARM_r0
) );
473 #elif !defined(CONFIG_TLS_REG_EMUL)
475 * User space must never try to access this directly.
476 * Expect your app to break eventually if you do so.
477 * The user helper at 0xffff0fe0 must be used instead.
478 * (see entry-armv.S for details)
480 *((unsigned int *)0xffff0ff0) = regs
->ARM_r0
;
484 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
486 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
487 * Return zero in r0 if *MEM was changed or non-zero if no exchange
488 * happened. Also set the user C flag accordingly.
489 * If access permissions have to be fixed up then non-zero is
490 * returned and the operation has to be re-attempted.
492 * *NOTE*: This is a ghost syscall private to the kernel. Only the
493 * __kuser_cmpxchg code in entry-armv.S should be aware of its
494 * existence. Don't ever use this from user code.
498 extern void do_DataAbort(unsigned long addr
, unsigned int fsr
,
499 struct pt_regs
*regs
);
501 unsigned long addr
= regs
->ARM_r2
;
502 struct mm_struct
*mm
= current
->mm
;
503 pgd_t
*pgd
; pmd_t
*pmd
; pte_t
*pte
;
506 regs
->ARM_cpsr
&= ~PSR_C_BIT
;
507 down_read(&mm
->mmap_sem
);
508 pgd
= pgd_offset(mm
, addr
);
509 if (!pgd_present(*pgd
))
511 pmd
= pmd_offset(pgd
, addr
);
512 if (!pmd_present(*pmd
))
514 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
515 if (!pte_present(*pte
) || !pte_dirty(*pte
)) {
516 pte_unmap_unlock(pte
, ptl
);
519 val
= *(unsigned long *)addr
;
522 *(unsigned long *)addr
= regs
->ARM_r1
;
523 regs
->ARM_cpsr
|= PSR_C_BIT
;
525 pte_unmap_unlock(pte
, ptl
);
526 up_read(&mm
->mmap_sem
);
530 up_read(&mm
->mmap_sem
);
531 /* simulate a write access fault */
532 do_DataAbort(addr
, 15 + (1 << 11), regs
);
538 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
539 if not implemented, rather than raising SIGILL. This
540 way the calling program can gracefully determine whether
541 a feature is supported. */
546 #ifdef CONFIG_DEBUG_USER
548 * experience shows that these seem to indicate that
549 * something catastrophic has happened
551 if (user_debug
& UDBG_SYSCALL
) {
552 printk("[%d] %s: arm syscall %d\n",
553 current
->pid
, current
->comm
, no
);
555 if (user_mode(regs
)) {
557 c_backtrace(regs
->ARM_fp
, processor_mode(regs
));
561 info
.si_signo
= SIGILL
;
563 info
.si_code
= ILL_ILLTRP
;
564 info
.si_addr
= (void __user
*)instruction_pointer(regs
) -
565 (thumb_mode(regs
) ? 2 : 4);
567 arm_notify_die("Oops - bad syscall(2)", regs
, &info
, no
, 0);
571 #ifdef CONFIG_TLS_REG_EMUL
574 * We might be running on an ARMv6+ processor which should have the TLS
575 * register but for some reason we can't use it, or maybe an SMP system
576 * using a pre-ARMv6 processor (there are apparently a few prototypes like
577 * that in existence) and therefore access to that register must be
581 static int get_tp_trap(struct pt_regs
*regs
, unsigned int instr
)
583 int reg
= (instr
>> 12) & 15;
586 regs
->uregs
[reg
] = current_thread_info()->tp_value
;
591 static struct undef_hook arm_mrc_hook
= {
592 .instr_mask
= 0x0fff0fff,
593 .instr_val
= 0x0e1d0f70,
594 .cpsr_mask
= PSR_T_BIT
,
599 static int __init
arm_mrc_hook_init(void)
601 register_undef_hook(&arm_mrc_hook
);
605 late_initcall(arm_mrc_hook_init
);
609 void __bad_xchg(volatile void *ptr
, int size
)
611 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
612 __builtin_return_address(0), ptr
, size
);
615 EXPORT_SYMBOL(__bad_xchg
);
618 * A data abort trap was taken, but we did not handle the instruction.
619 * Try to abort the user program, or panic if it was the kernel.
622 baddataabort(int code
, unsigned long instr
, struct pt_regs
*regs
)
624 unsigned long addr
= instruction_pointer(regs
);
627 #ifdef CONFIG_DEBUG_USER
628 if (user_debug
& UDBG_BADABORT
) {
629 printk(KERN_ERR
"[%d] %s: bad data abort: code %d instr 0x%08lx\n",
630 current
->pid
, current
->comm
, code
, instr
);
632 show_pte(current
->mm
, addr
);
636 info
.si_signo
= SIGILL
;
638 info
.si_code
= ILL_ILLOPC
;
639 info
.si_addr
= (void __user
*)addr
;
641 arm_notify_die("unknown data abort code", regs
, &info
, instr
, 0);
644 void __attribute__((noreturn
)) __bug(const char *file
, int line
)
646 printk(KERN_CRIT
"kernel BUG at %s:%d!\n", file
, line
);
649 /* Avoid "noreturn function does return" */
652 EXPORT_SYMBOL(__bug
);
654 void __readwrite_bug(const char *fn
)
656 printk("%s called, but not implemented\n", fn
);
659 EXPORT_SYMBOL(__readwrite_bug
);
661 void __pte_error(const char *file
, int line
, unsigned long val
)
663 printk("%s:%d: bad pte %08lx.\n", file
, line
, val
);
666 void __pmd_error(const char *file
, int line
, unsigned long val
)
668 printk("%s:%d: bad pmd %08lx.\n", file
, line
, val
);
671 void __pgd_error(const char *file
, int line
, unsigned long val
)
673 printk("%s:%d: bad pgd %08lx.\n", file
, line
, val
);
676 asmlinkage
void __div0(void)
678 printk("Division by zero in kernel.\n");
681 EXPORT_SYMBOL(__div0
);
687 /* if that doesn't kill us, halt */
688 panic("Oops failed to kill thread");
690 EXPORT_SYMBOL(abort
);
692 void __init
trap_init(void)
694 unsigned long vectors
= CONFIG_VECTORS_BASE
;
695 extern char __stubs_start
[], __stubs_end
[];
696 extern char __vectors_start
[], __vectors_end
[];
697 extern char __kuser_helper_start
[], __kuser_helper_end
[];
698 int kuser_sz
= __kuser_helper_end
- __kuser_helper_start
;
701 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
702 * into the vector page, mapped at 0xffff0000, and ensure these
703 * are visible to the instruction stream.
705 memcpy((void *)vectors
, __vectors_start
, __vectors_end
- __vectors_start
);
706 memcpy((void *)vectors
+ 0x200, __stubs_start
, __stubs_end
- __stubs_start
);
707 memcpy((void *)vectors
+ 0x1000 - kuser_sz
, __kuser_helper_start
, kuser_sz
);
710 * Copy signal return handlers into the vector page, and
711 * set sigreturn to be a pointer to these.
713 memcpy((void *)KERN_SIGRETURN_CODE
, sigreturn_codes
,
714 sizeof(sigreturn_codes
));
716 flush_icache_range(vectors
, vectors
+ PAGE_SIZE
);
717 modify_domain(DOMAIN_USER
, DOMAIN_CLIENT
);