Merge branch 'akpm'
[linux-2.6/next.git] / arch / m32r / kernel / traps.c
blobee6a9199561ca8b46b3e8c633966c61f44e2d824
1 /*
2 * linux/arch/m32r/kernel/traps.c
4 * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
5 * Hitoshi Yamamoto
6 */
8 /*
9 * 'traps.c' handles hardware traps and faults after we have saved some
10 * state in 'entry.S'.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kallsyms.h>
15 #include <linux/stddef.h>
16 #include <linux/ptrace.h>
17 #include <linux/mm.h>
18 #include <asm/page.h>
19 #include <asm/processor.h>
21 #include <asm/system.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <linux/atomic.h>
26 #include <asm/smp.h>
28 #include <linux/module.h>
30 asmlinkage void alignment_check(void);
31 asmlinkage void ei_handler(void);
32 asmlinkage void rie_handler(void);
33 asmlinkage void debug_trap(void);
34 asmlinkage void cache_flushing_handler(void);
35 asmlinkage void ill_trap(void);
37 #ifdef CONFIG_SMP
38 extern void smp_reschedule_interrupt(void);
39 extern void smp_invalidate_interrupt(void);
40 extern void smp_call_function_interrupt(void);
41 extern void smp_ipi_timer_interrupt(void);
42 extern void smp_flush_cache_all_interrupt(void);
43 extern void smp_call_function_single_interrupt(void);
46 * for Boot AP function
48 asm (
49 " .section .eit_vector4,\"ax\" \n"
50 " .global _AP_RE \n"
51 " .global startup_AP \n"
52 "_AP_RE: \n"
53 " .fill 32, 4, 0 \n"
54 "_AP_EI: bra startup_AP \n"
55 " .previous \n"
57 #endif /* CONFIG_SMP */
59 extern unsigned long eit_vector[];
60 #define BRA_INSN(func, entry) \
61 ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
62 + 0xff000000UL
64 static void set_eit_vector_entries(void)
66 extern void default_eit_handler(void);
67 extern void system_call(void);
68 extern void pie_handler(void);
69 extern void ace_handler(void);
70 extern void tme_handler(void);
71 extern void _flush_cache_copyback_all(void);
73 eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
74 eit_vector[1] = BRA_INSN(default_eit_handler, 1);
75 eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
76 eit_vector[5] = BRA_INSN(default_eit_handler, 5);
77 eit_vector[8] = BRA_INSN(rie_handler, 8);
78 eit_vector[12] = BRA_INSN(alignment_check, 12);
79 eit_vector[16] = BRA_INSN(ill_trap, 16);
80 eit_vector[17] = BRA_INSN(debug_trap, 17);
81 eit_vector[18] = BRA_INSN(system_call, 18);
82 eit_vector[19] = BRA_INSN(ill_trap, 19);
83 eit_vector[20] = BRA_INSN(ill_trap, 20);
84 eit_vector[21] = BRA_INSN(ill_trap, 21);
85 eit_vector[22] = BRA_INSN(ill_trap, 22);
86 eit_vector[23] = BRA_INSN(ill_trap, 23);
87 eit_vector[24] = BRA_INSN(ill_trap, 24);
88 eit_vector[25] = BRA_INSN(ill_trap, 25);
89 eit_vector[26] = BRA_INSN(ill_trap, 26);
90 eit_vector[27] = BRA_INSN(ill_trap, 27);
91 eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
92 eit_vector[29] = BRA_INSN(ill_trap, 29);
93 eit_vector[30] = BRA_INSN(ill_trap, 30);
94 eit_vector[31] = BRA_INSN(ill_trap, 31);
95 eit_vector[32] = BRA_INSN(ei_handler, 32);
96 eit_vector[64] = BRA_INSN(pie_handler, 64);
97 #ifdef CONFIG_MMU
98 eit_vector[68] = BRA_INSN(ace_handler, 68);
99 eit_vector[72] = BRA_INSN(tme_handler, 72);
100 #endif /* CONFIG_MMU */
101 #ifdef CONFIG_SMP
102 eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
103 eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
104 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
107 eit_vector[189] = 0; /* CPU_BOOT_IPI */
108 eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
109 eit_vector[191] = 0;
110 #endif
111 _flush_cache_copyback_all();
114 void __init trap_init(void)
116 set_eit_vector_entries();
119 * Should be a barrier for any external CPU state.
121 cpu_init();
124 static int kstack_depth_to_print = 24;
126 static void show_trace(struct task_struct *task, unsigned long *stack)
128 unsigned long addr;
130 if (!stack)
131 stack = (unsigned long*)&stack;
133 printk("Call Trace: ");
134 while (!kstack_end(stack)) {
135 addr = *stack++;
136 if (__kernel_text_address(addr)) {
137 printk("[<%08lx>] ", addr);
138 print_symbol("%s\n", addr);
141 printk("\n");
144 void show_stack(struct task_struct *task, unsigned long *sp)
146 unsigned long *stack;
147 int i;
150 * debugging aid: "show_stack(NULL);" prints the
151 * back trace for this cpu.
154 if(sp==NULL) {
155 if (task)
156 sp = (unsigned long *)task->thread.sp;
157 else
158 sp=(unsigned long*)&sp;
161 stack = sp;
162 for(i=0; i < kstack_depth_to_print; i++) {
163 if (kstack_end(stack))
164 break;
165 if (i && ((i % 4) == 0))
166 printk("\n ");
167 printk("%08lx ", *stack++);
169 printk("\n");
170 show_trace(task, sp);
173 void dump_stack(void)
175 unsigned long stack;
177 show_trace(current, &stack);
180 EXPORT_SYMBOL(dump_stack);
182 static void show_registers(struct pt_regs *regs)
184 int i = 0;
185 int in_kernel = 1;
186 unsigned long sp;
188 printk("CPU: %d\n", smp_processor_id());
189 show_regs(regs);
191 sp = (unsigned long) (1+regs);
192 if (user_mode(regs)) {
193 in_kernel = 0;
194 sp = regs->spu;
195 printk("SPU: %08lx\n", sp);
196 } else {
197 printk("SPI: %08lx\n", sp);
199 printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
200 current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
203 * When in-kernel, we also print out the stack and code at the
204 * time of the fault..
206 if (in_kernel) {
207 printk("\nStack: ");
208 show_stack(current, (unsigned long*) sp);
210 printk("\nCode: ");
211 if (regs->bpc < PAGE_OFFSET)
212 goto bad;
214 for(i=0;i<20;i++) {
215 unsigned char c;
216 if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
217 bad:
218 printk(" Bad PC value.");
219 break;
221 printk("%02x ", c);
224 printk("\n");
227 static DEFINE_SPINLOCK(die_lock);
229 void die(const char * str, struct pt_regs * regs, long err)
231 console_verbose();
232 spin_lock_irq(&die_lock);
233 bust_spinlocks(1);
234 printk("%s: %04lx\n", str, err & 0xffff);
235 show_registers(regs);
236 bust_spinlocks(0);
237 spin_unlock_irq(&die_lock);
238 do_exit(SIGSEGV);
241 static __inline__ void die_if_kernel(const char * str,
242 struct pt_regs * regs, long err)
244 if (!user_mode(regs))
245 die(str, regs, err);
248 static __inline__ void do_trap(int trapnr, int signr, const char * str,
249 struct pt_regs * regs, long error_code, siginfo_t *info)
251 if (user_mode(regs)) {
252 /* trap_signal */
253 struct task_struct *tsk = current;
254 tsk->thread.error_code = error_code;
255 tsk->thread.trap_no = trapnr;
256 if (info)
257 force_sig_info(signr, info, tsk);
258 else
259 force_sig(signr, tsk);
260 return;
261 } else {
262 /* kernel_trap */
263 if (!fixup_exception(regs))
264 die(str, regs, error_code);
265 return;
269 #define DO_ERROR(trapnr, signr, str, name) \
270 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
272 do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
275 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
276 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
278 siginfo_t info; \
279 info.si_signo = signr; \
280 info.si_errno = 0; \
281 info.si_code = sicode; \
282 info.si_addr = (void __user *)siaddr; \
283 do_trap(trapnr, signr, str, regs, error_code, &info); \
286 DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
287 DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
288 DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
289 DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
291 extern int handle_unaligned_access(unsigned long, struct pt_regs *);
293 /* This code taken from arch/sh/kernel/traps.c */
294 asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
296 mm_segment_t oldfs;
297 unsigned long insn;
298 int tmp;
300 oldfs = get_fs();
302 if (user_mode(regs)) {
303 local_irq_enable();
304 current->thread.error_code = error_code;
305 current->thread.trap_no = 0x17;
307 set_fs(USER_DS);
308 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
309 set_fs(oldfs);
310 goto uspace_segv;
312 tmp = handle_unaligned_access(insn, regs);
313 set_fs(oldfs);
315 if (!tmp)
316 return;
318 uspace_segv:
319 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
320 "access\n", current->comm);
321 force_sig(SIGSEGV, current);
322 } else {
323 set_fs(KERNEL_DS);
324 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
325 set_fs(oldfs);
326 die("insn faulting in do_address_error", regs, 0);
328 handle_unaligned_access(insn, regs);
329 set_fs(oldfs);