repo init
[linux-rt-nao.git] / arch / powerpc / kernel / traps.c
blob07138281f25971891e7bfd75a65fc32b201676f4
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
14 * This file handles the architecture-dependent parts of hardware exceptions
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/machdep.h>
42 #include <asm/rtas.h>
43 #include <asm/pmc.h>
44 #ifdef CONFIG_PPC32
45 #include <asm/reg.h>
46 #endif
47 #ifdef CONFIG_PMAC_BACKLIGHT
48 #include <asm/backlight.h>
49 #endif
50 #ifdef CONFIG_PPC64
51 #include <asm/firmware.h>
52 #include <asm/processor.h>
53 #endif
54 #include <asm/kexec.h>
56 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
57 int (*__debugger)(struct pt_regs *regs);
58 int (*__debugger_ipi)(struct pt_regs *regs);
59 int (*__debugger_bpt)(struct pt_regs *regs);
60 int (*__debugger_sstep)(struct pt_regs *regs);
61 int (*__debugger_iabr_match)(struct pt_regs *regs);
62 int (*__debugger_dabr_match)(struct pt_regs *regs);
63 int (*__debugger_fault_handler)(struct pt_regs *regs);
65 EXPORT_SYMBOL(__debugger);
66 EXPORT_SYMBOL(__debugger_ipi);
67 EXPORT_SYMBOL(__debugger_bpt);
68 EXPORT_SYMBOL(__debugger_sstep);
69 EXPORT_SYMBOL(__debugger_iabr_match);
70 EXPORT_SYMBOL(__debugger_dabr_match);
71 EXPORT_SYMBOL(__debugger_fault_handler);
72 #endif
75 * Trap & Exception support
78 #ifdef CONFIG_PMAC_BACKLIGHT
79 static void pmac_backlight_unblank(void)
81 mutex_lock(&pmac_backlight_mutex);
82 if (pmac_backlight) {
83 struct backlight_properties *props;
85 props = &pmac_backlight->props;
86 props->brightness = props->max_brightness;
87 props->power = FB_BLANK_UNBLANK;
88 backlight_update_status(pmac_backlight);
90 mutex_unlock(&pmac_backlight_mutex);
92 #else
93 static inline void pmac_backlight_unblank(void) { }
94 #endif
96 int die(const char *str, struct pt_regs *regs, long err)
98 static struct {
99 raw_spinlock_t lock;
100 u32 lock_owner;
101 int lock_owner_depth;
102 } die = {
103 .lock = _RAW_SPIN_LOCK_UNLOCKED(die.lock),
104 .lock_owner = -1,
105 .lock_owner_depth = 0
107 static int die_counter;
108 unsigned long flags;
110 if (debugger(regs))
111 return 1;
113 oops_enter();
115 if (die.lock_owner != raw_smp_processor_id()) {
116 console_verbose();
117 spin_lock_irqsave(&die.lock, flags);
118 die.lock_owner = smp_processor_id();
119 die.lock_owner_depth = 0;
120 bust_spinlocks(1);
121 if (machine_is(powermac))
122 pmac_backlight_unblank();
123 } else {
124 local_save_flags(flags);
127 if (++die.lock_owner_depth < 3) {
128 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
129 #ifdef CONFIG_PREEMPT
130 printk("PREEMPT ");
131 #endif
132 #ifdef CONFIG_SMP
133 printk("SMP NR_CPUS=%d ", NR_CPUS);
134 #endif
135 #ifdef CONFIG_DEBUG_PAGEALLOC
136 printk("DEBUG_PAGEALLOC ");
137 #endif
138 #ifdef CONFIG_NUMA
139 printk("NUMA ");
140 #endif
141 printk("%s\n", ppc_md.name ? ppc_md.name : "");
143 print_modules();
144 show_regs(regs);
145 } else {
146 printk("Recursive die() failure, output suppressed\n");
149 bust_spinlocks(0);
150 die.lock_owner = -1;
151 add_taint(TAINT_DIE);
152 spin_unlock_irqrestore(&die.lock, flags);
154 if (kexec_should_crash(current) ||
155 kexec_sr_activated(smp_processor_id()))
156 crash_kexec(regs);
157 crash_kexec_secondary(regs);
159 if (in_interrupt())
160 panic("Fatal exception in interrupt");
162 if (panic_on_oops)
163 panic("Fatal exception");
165 oops_exit();
166 do_exit(err);
168 return 0;
171 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
173 siginfo_t info;
174 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
175 "at %08lx nip %08lx lr %08lx code %x\n";
176 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
177 "at %016lx nip %016lx lr %016lx code %x\n";
179 if (!user_mode(regs)) {
180 if (die("Exception in kernel mode", regs, signr))
181 return;
182 } else if (show_unhandled_signals &&
183 unhandled_signal(current, signr) &&
184 printk_ratelimit()) {
185 printk(regs->msr & MSR_SF ? fmt64 : fmt32,
186 current->comm, current->pid, signr,
187 addr, regs->nip, regs->link, code);
190 #ifdef CONFIG_PREEMPT_RT
191 local_irq_enable();
192 preempt_check_resched();
193 #endif
195 memset(&info, 0, sizeof(info));
196 info.si_signo = signr;
197 info.si_code = code;
198 info.si_addr = (void __user *) addr;
199 force_sig_info(signr, &info, current);
202 * Init gets no signals that it doesn't have a handler for.
203 * That's all very well, but if it has caused a synchronous
204 * exception and we ignore the resulting signal, it will just
205 * generate the same exception over and over again and we get
206 * nowhere. Better to kill it and let the kernel panic.
208 if (is_global_init(current)) {
209 __sighandler_t handler;
211 spin_lock_irq(&current->sighand->siglock);
212 handler = current->sighand->action[signr-1].sa.sa_handler;
213 spin_unlock_irq(&current->sighand->siglock);
214 if (handler == SIG_DFL) {
215 /* init has generated a synchronous exception
216 and it doesn't have a handler for the signal */
217 printk(KERN_CRIT "init has generated signal %d "
218 "but has no handler for it\n", signr);
219 do_exit(signr);
224 #ifdef CONFIG_PPC64
225 void system_reset_exception(struct pt_regs *regs)
227 /* See if any machine dependent calls */
228 if (ppc_md.system_reset_exception) {
229 if (ppc_md.system_reset_exception(regs))
230 return;
233 #ifdef CONFIG_KEXEC
234 cpu_set(smp_processor_id(), cpus_in_sr);
235 #endif
237 die("System Reset", regs, SIGABRT);
240 * Some CPUs when released from the debugger will execute this path.
241 * These CPUs entered the debugger via a soft-reset. If the CPU was
242 * hung before entering the debugger it will return to the hung
243 * state when exiting this function. This causes a problem in
244 * kdump since the hung CPU(s) will not respond to the IPI sent
245 * from kdump. To prevent the problem we call crash_kexec_secondary()
246 * here. If a kdump had not been initiated or we exit the debugger
247 * with the "exit and recover" command (x) crash_kexec_secondary()
248 * will return after 5ms and the CPU returns to its previous state.
250 crash_kexec_secondary(regs);
252 /* Must die if the interrupt is not recoverable */
253 if (!(regs->msr & MSR_RI))
254 panic("Unrecoverable System Reset");
256 /* What should we do here? We could issue a shutdown or hard reset. */
258 #endif
261 * I/O accesses can cause machine checks on powermacs.
262 * Check if the NIP corresponds to the address of a sync
263 * instruction for which there is an entry in the exception
264 * table.
265 * Note that the 601 only takes a machine check on TEA
266 * (transfer error ack) signal assertion, and does not
267 * set any of the top 16 bits of SRR1.
268 * -- paulus.
270 static inline int check_io_access(struct pt_regs *regs)
272 #ifdef CONFIG_PPC32
273 unsigned long msr = regs->msr;
274 const struct exception_table_entry *entry;
275 unsigned int *nip = (unsigned int *)regs->nip;
277 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
278 && (entry = search_exception_tables(regs->nip)) != NULL) {
280 * Check that it's a sync instruction, or somewhere
281 * in the twi; isync; nop sequence that inb/inw/inl uses.
282 * As the address is in the exception table
283 * we should be able to read the instr there.
284 * For the debug message, we look at the preceding
285 * load or store.
287 if (*nip == 0x60000000) /* nop */
288 nip -= 2;
289 else if (*nip == 0x4c00012c) /* isync */
290 --nip;
291 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
292 /* sync or twi */
293 unsigned int rb;
295 --nip;
296 rb = (*nip >> 11) & 0x1f;
297 printk(KERN_DEBUG "%s bad port %lx at %p\n",
298 (*nip & 0x100)? "OUT to": "IN from",
299 regs->gpr[rb] - _IO_BASE, nip);
300 regs->msr |= MSR_RI;
301 regs->nip = entry->fixup;
302 return 1;
305 #endif /* CONFIG_PPC32 */
306 return 0;
309 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
310 /* On 4xx, the reason for the machine check or program exception
311 is in the ESR. */
312 #define get_reason(regs) ((regs)->dsisr)
313 #ifndef CONFIG_FSL_BOOKE
314 #define get_mc_reason(regs) ((regs)->dsisr)
315 #else
316 #define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK)
317 #endif
318 #define REASON_FP ESR_FP
319 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
320 #define REASON_PRIVILEGED ESR_PPR
321 #define REASON_TRAP ESR_PTR
323 /* single-step stuff */
324 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
325 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
327 #else
328 /* On non-4xx, the reason for the machine check or program
329 exception is in the MSR. */
330 #define get_reason(regs) ((regs)->msr)
331 #define get_mc_reason(regs) ((regs)->msr)
332 #define REASON_FP 0x100000
333 #define REASON_ILLEGAL 0x80000
334 #define REASON_PRIVILEGED 0x40000
335 #define REASON_TRAP 0x20000
337 #define single_stepping(regs) ((regs)->msr & MSR_SE)
338 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
339 #endif
341 #if defined(CONFIG_4xx)
342 int machine_check_4xx(struct pt_regs *regs)
344 unsigned long reason = get_mc_reason(regs);
346 if (reason & ESR_IMCP) {
347 printk("Instruction");
348 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
349 } else
350 printk("Data");
351 printk(" machine check in kernel mode.\n");
353 return 0;
356 int machine_check_440A(struct pt_regs *regs)
358 unsigned long reason = get_mc_reason(regs);
360 printk("Machine check in kernel mode.\n");
361 if (reason & ESR_IMCP){
362 printk("Instruction Synchronous Machine Check exception\n");
363 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
365 else {
366 u32 mcsr = mfspr(SPRN_MCSR);
367 if (mcsr & MCSR_IB)
368 printk("Instruction Read PLB Error\n");
369 if (mcsr & MCSR_DRB)
370 printk("Data Read PLB Error\n");
371 if (mcsr & MCSR_DWB)
372 printk("Data Write PLB Error\n");
373 if (mcsr & MCSR_TLBP)
374 printk("TLB Parity Error\n");
375 if (mcsr & MCSR_ICP){
376 flush_instruction_cache();
377 printk("I-Cache Parity Error\n");
379 if (mcsr & MCSR_DCSP)
380 printk("D-Cache Search Parity Error\n");
381 if (mcsr & MCSR_DCFP)
382 printk("D-Cache Flush Parity Error\n");
383 if (mcsr & MCSR_IMPE)
384 printk("Machine Check exception is imprecise\n");
386 /* Clear MCSR */
387 mtspr(SPRN_MCSR, mcsr);
389 return 0;
391 #elif defined(CONFIG_E500)
392 int machine_check_e500(struct pt_regs *regs)
394 unsigned long reason = get_mc_reason(regs);
396 printk("Machine check in kernel mode.\n");
397 printk("Caused by (from MCSR=%lx): ", reason);
399 if (reason & MCSR_MCP)
400 printk("Machine Check Signal\n");
401 if (reason & MCSR_ICPERR)
402 printk("Instruction Cache Parity Error\n");
403 if (reason & MCSR_DCP_PERR)
404 printk("Data Cache Push Parity Error\n");
405 if (reason & MCSR_DCPERR)
406 printk("Data Cache Parity Error\n");
407 if (reason & MCSR_BUS_IAERR)
408 printk("Bus - Instruction Address Error\n");
409 if (reason & MCSR_BUS_RAERR)
410 printk("Bus - Read Address Error\n");
411 if (reason & MCSR_BUS_WAERR)
412 printk("Bus - Write Address Error\n");
413 if (reason & MCSR_BUS_IBERR)
414 printk("Bus - Instruction Data Error\n");
415 if (reason & MCSR_BUS_RBERR)
416 printk("Bus - Read Data Bus Error\n");
417 if (reason & MCSR_BUS_WBERR)
418 printk("Bus - Read Data Bus Error\n");
419 if (reason & MCSR_BUS_IPERR)
420 printk("Bus - Instruction Parity Error\n");
421 if (reason & MCSR_BUS_RPERR)
422 printk("Bus - Read Parity Error\n");
424 return 0;
426 #elif defined(CONFIG_E200)
427 int machine_check_e200(struct pt_regs *regs)
429 unsigned long reason = get_mc_reason(regs);
431 printk("Machine check in kernel mode.\n");
432 printk("Caused by (from MCSR=%lx): ", reason);
434 if (reason & MCSR_MCP)
435 printk("Machine Check Signal\n");
436 if (reason & MCSR_CP_PERR)
437 printk("Cache Push Parity Error\n");
438 if (reason & MCSR_CPERR)
439 printk("Cache Parity Error\n");
440 if (reason & MCSR_EXCP_ERR)
441 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
442 if (reason & MCSR_BUS_IRERR)
443 printk("Bus - Read Bus Error on instruction fetch\n");
444 if (reason & MCSR_BUS_DRERR)
445 printk("Bus - Read Bus Error on data load\n");
446 if (reason & MCSR_BUS_WRERR)
447 printk("Bus - Write Bus Error on buffered store or cache line push\n");
449 return 0;
451 #else
452 int machine_check_generic(struct pt_regs *regs)
454 unsigned long reason = get_mc_reason(regs);
456 printk("Machine check in kernel mode.\n");
457 printk("Caused by (from SRR1=%lx): ", reason);
458 switch (reason & 0x601F0000) {
459 case 0x80000:
460 printk("Machine check signal\n");
461 break;
462 case 0: /* for 601 */
463 case 0x40000:
464 case 0x140000: /* 7450 MSS error and TEA */
465 printk("Transfer error ack signal\n");
466 break;
467 case 0x20000:
468 printk("Data parity error signal\n");
469 break;
470 case 0x10000:
471 printk("Address parity error signal\n");
472 break;
473 case 0x20000000:
474 printk("L1 Data Cache error\n");
475 break;
476 case 0x40000000:
477 printk("L1 Instruction Cache error\n");
478 break;
479 case 0x00100000:
480 printk("L2 data cache parity error\n");
481 break;
482 default:
483 printk("Unknown values in msr\n");
485 return 0;
487 #endif /* everything else */
489 void machine_check_exception(struct pt_regs *regs)
491 int recover = 0;
493 /* See if any machine dependent calls. In theory, we would want
494 * to call the CPU first, and call the ppc_md. one if the CPU
495 * one returns a positive number. However there is existing code
496 * that assumes the board gets a first chance, so let's keep it
497 * that way for now and fix things later. --BenH.
499 if (ppc_md.machine_check_exception)
500 recover = ppc_md.machine_check_exception(regs);
501 else if (cur_cpu_spec->machine_check)
502 recover = cur_cpu_spec->machine_check(regs);
504 if (recover > 0)
505 return;
507 if (user_mode(regs)) {
508 regs->msr |= MSR_RI;
509 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
510 return;
513 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
514 /* the qspan pci read routines can cause machine checks -- Cort
516 * yuck !!! that totally needs to go away ! There are better ways
517 * to deal with that than having a wart in the mcheck handler.
518 * -- BenH
520 bad_page_fault(regs, regs->dar, SIGBUS);
521 return;
522 #endif
524 if (debugger_fault_handler(regs)) {
525 regs->msr |= MSR_RI;
526 return;
529 if (check_io_access(regs))
530 return;
532 if (debugger_fault_handler(regs))
533 return;
534 die("Machine check", regs, SIGBUS);
536 /* Must die if the interrupt is not recoverable */
537 if (!(regs->msr & MSR_RI))
538 panic("Unrecoverable Machine check");
541 void SMIException(struct pt_regs *regs)
543 die("System Management Interrupt", regs, SIGABRT);
546 void unknown_exception(struct pt_regs *regs)
548 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
549 regs->nip, regs->msr, regs->trap);
551 _exception(SIGTRAP, regs, 0, 0);
554 void instruction_breakpoint_exception(struct pt_regs *regs)
556 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
557 5, SIGTRAP) == NOTIFY_STOP)
558 return;
559 if (debugger_iabr_match(regs))
560 return;
561 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
564 void RunModeException(struct pt_regs *regs)
566 _exception(SIGTRAP, regs, 0, 0);
569 void __kprobes single_step_exception(struct pt_regs *regs)
571 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
573 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
574 5, SIGTRAP) == NOTIFY_STOP)
575 return;
576 if (debugger_sstep(regs))
577 return;
579 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
583 * After we have successfully emulated an instruction, we have to
584 * check if the instruction was being single-stepped, and if so,
585 * pretend we got a single-step exception. This was pointed out
586 * by Kumar Gala. -- paulus
588 static void emulate_single_step(struct pt_regs *regs)
590 if (single_stepping(regs)) {
591 clear_single_step(regs);
592 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
596 static inline int __parse_fpscr(unsigned long fpscr)
598 int ret = 0;
600 /* Invalid operation */
601 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
602 ret = FPE_FLTINV;
604 /* Overflow */
605 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
606 ret = FPE_FLTOVF;
608 /* Underflow */
609 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
610 ret = FPE_FLTUND;
612 /* Divide by zero */
613 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
614 ret = FPE_FLTDIV;
616 /* Inexact result */
617 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
618 ret = FPE_FLTRES;
620 return ret;
623 static void parse_fpe(struct pt_regs *regs)
625 int code = 0;
627 flush_fp_to_thread(current);
629 code = __parse_fpscr(current->thread.fpscr.val);
631 _exception(SIGFPE, regs, code, regs->nip);
635 * Illegal instruction emulation support. Originally written to
636 * provide the PVR to user applications using the mfspr rd, PVR.
637 * Return non-zero if we can't emulate, or -EFAULT if the associated
638 * memory access caused an access fault. Return zero on success.
640 * There are a couple of ways to do this, either "decode" the instruction
641 * or directly match lots of bits. In this case, matching lots of
642 * bits is faster and easier.
645 #define INST_MFSPR_PVR 0x7c1f42a6
646 #define INST_MFSPR_PVR_MASK 0xfc1fffff
648 #define INST_DCBA 0x7c0005ec
649 #define INST_DCBA_MASK 0xfc0007fe
651 #define INST_MCRXR 0x7c000400
652 #define INST_MCRXR_MASK 0xfc0007fe
654 #define INST_STRING 0x7c00042a
655 #define INST_STRING_MASK 0xfc0007fe
656 #define INST_STRING_GEN_MASK 0xfc00067e
657 #define INST_LSWI 0x7c0004aa
658 #define INST_LSWX 0x7c00042a
659 #define INST_STSWI 0x7c0005aa
660 #define INST_STSWX 0x7c00052a
662 #define INST_POPCNTB 0x7c0000f4
663 #define INST_POPCNTB_MASK 0xfc0007fe
665 #define INST_ISEL 0x7c00001e
666 #define INST_ISEL_MASK 0xfc00003e
668 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
670 u8 rT = (instword >> 21) & 0x1f;
671 u8 rA = (instword >> 16) & 0x1f;
672 u8 NB_RB = (instword >> 11) & 0x1f;
673 u32 num_bytes;
674 unsigned long EA;
675 int pos = 0;
677 /* Early out if we are an invalid form of lswx */
678 if ((instword & INST_STRING_MASK) == INST_LSWX)
679 if ((rT == rA) || (rT == NB_RB))
680 return -EINVAL;
682 EA = (rA == 0) ? 0 : regs->gpr[rA];
684 switch (instword & INST_STRING_MASK) {
685 case INST_LSWX:
686 case INST_STSWX:
687 EA += NB_RB;
688 num_bytes = regs->xer & 0x7f;
689 break;
690 case INST_LSWI:
691 case INST_STSWI:
692 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
693 break;
694 default:
695 return -EINVAL;
698 while (num_bytes != 0)
700 u8 val;
701 u32 shift = 8 * (3 - (pos & 0x3));
703 switch ((instword & INST_STRING_MASK)) {
704 case INST_LSWX:
705 case INST_LSWI:
706 if (get_user(val, (u8 __user *)EA))
707 return -EFAULT;
708 /* first time updating this reg,
709 * zero it out */
710 if (pos == 0)
711 regs->gpr[rT] = 0;
712 regs->gpr[rT] |= val << shift;
713 break;
714 case INST_STSWI:
715 case INST_STSWX:
716 val = regs->gpr[rT] >> shift;
717 if (put_user(val, (u8 __user *)EA))
718 return -EFAULT;
719 break;
721 /* move EA to next address */
722 EA += 1;
723 num_bytes--;
725 /* manage our position within the register */
726 if (++pos == 4) {
727 pos = 0;
728 if (++rT == 32)
729 rT = 0;
733 return 0;
736 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
738 u32 ra,rs;
739 unsigned long tmp;
741 ra = (instword >> 16) & 0x1f;
742 rs = (instword >> 21) & 0x1f;
744 tmp = regs->gpr[rs];
745 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
746 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
747 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
748 regs->gpr[ra] = tmp;
750 return 0;
753 static int emulate_isel(struct pt_regs *regs, u32 instword)
755 u8 rT = (instword >> 21) & 0x1f;
756 u8 rA = (instword >> 16) & 0x1f;
757 u8 rB = (instword >> 11) & 0x1f;
758 u8 BC = (instword >> 6) & 0x1f;
759 u8 bit;
760 unsigned long tmp;
762 tmp = (rA == 0) ? 0 : regs->gpr[rA];
763 bit = (regs->ccr >> (31 - BC)) & 0x1;
765 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
767 return 0;
770 static int emulate_instruction(struct pt_regs *regs)
772 u32 instword;
773 u32 rd;
775 if (!user_mode(regs) || (regs->msr & MSR_LE))
776 return -EINVAL;
777 CHECK_FULL_REGS(regs);
779 if (get_user(instword, (u32 __user *)(regs->nip)))
780 return -EFAULT;
782 /* Emulate the mfspr rD, PVR. */
783 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
784 rd = (instword >> 21) & 0x1f;
785 regs->gpr[rd] = mfspr(SPRN_PVR);
786 return 0;
789 /* Emulating the dcba insn is just a no-op. */
790 if ((instword & INST_DCBA_MASK) == INST_DCBA)
791 return 0;
793 /* Emulate the mcrxr insn. */
794 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
795 int shift = (instword >> 21) & 0x1c;
796 unsigned long msk = 0xf0000000UL >> shift;
798 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
799 regs->xer &= ~0xf0000000UL;
800 return 0;
803 /* Emulate load/store string insn. */
804 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
805 return emulate_string_inst(regs, instword);
807 /* Emulate the popcntb (Population Count Bytes) instruction. */
808 if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
809 return emulate_popcntb_inst(regs, instword);
812 /* Emulate isel (Integer Select) instruction */
813 if ((instword & INST_ISEL_MASK) == INST_ISEL) {
814 return emulate_isel(regs, instword);
817 return -EINVAL;
820 int is_valid_bugaddr(unsigned long addr)
822 return is_kernel_addr(addr);
825 void __kprobes program_check_exception(struct pt_regs *regs)
827 unsigned int reason = get_reason(regs);
828 extern int do_mathemu(struct pt_regs *regs);
830 /* We can now get here via a FP Unavailable exception if the core
831 * has no FPU, in that case the reason flags will be 0 */
833 if (reason & REASON_FP) {
834 /* IEEE FP exception */
835 parse_fpe(regs);
836 return;
838 if (reason & REASON_TRAP) {
839 /* trap exception */
840 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
841 == NOTIFY_STOP)
842 return;
843 if (debugger_bpt(regs))
844 return;
846 if (!(regs->msr & MSR_PR) && /* not user-mode */
847 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
848 regs->nip += 4;
849 return;
851 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
852 return;
855 local_irq_enable();
857 #ifdef CONFIG_MATH_EMULATION
858 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
859 * but there seems to be a hardware bug on the 405GP (RevD)
860 * that means ESR is sometimes set incorrectly - either to
861 * ESR_DST (!?) or 0. In the process of chasing this with the
862 * hardware people - not sure if it can happen on any illegal
863 * instruction or only on FP instructions, whether there is a
864 * pattern to occurences etc. -dgibson 31/Mar/2003 */
865 switch (do_mathemu(regs)) {
866 case 0:
867 emulate_single_step(regs);
868 return;
869 case 1: {
870 int code = 0;
871 code = __parse_fpscr(current->thread.fpscr.val);
872 _exception(SIGFPE, regs, code, regs->nip);
873 return;
875 case -EFAULT:
876 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
877 return;
879 /* fall through on any other errors */
880 #endif /* CONFIG_MATH_EMULATION */
882 /* Try to emulate it if we should. */
883 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
884 switch (emulate_instruction(regs)) {
885 case 0:
886 regs->nip += 4;
887 emulate_single_step(regs);
888 return;
889 case -EFAULT:
890 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
891 return;
895 if (reason & REASON_PRIVILEGED)
896 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
897 else
898 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
901 void alignment_exception(struct pt_regs *regs)
903 int sig, code, fixed = 0;
905 /* we don't implement logging of alignment exceptions */
906 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
907 fixed = fix_alignment(regs);
909 if (fixed == 1) {
910 regs->nip += 4; /* skip over emulated instruction */
911 emulate_single_step(regs);
912 return;
915 /* Operand address was bad */
916 if (fixed == -EFAULT) {
917 sig = SIGSEGV;
918 code = SEGV_ACCERR;
919 } else {
920 sig = SIGBUS;
921 code = BUS_ADRALN;
923 if (user_mode(regs))
924 _exception(sig, regs, code, regs->dar);
925 else
926 bad_page_fault(regs, regs->dar, sig);
929 void StackOverflow(struct pt_regs *regs)
931 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
932 current, regs->gpr[1]);
933 debugger(regs);
934 show_regs(regs);
935 panic("kernel stack overflow");
938 void nonrecoverable_exception(struct pt_regs *regs)
940 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
941 regs->nip, regs->msr);
942 debugger(regs);
943 die("nonrecoverable exception", regs, SIGKILL);
946 void trace_syscall(struct pt_regs *regs)
948 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
949 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
950 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
953 void kernel_fp_unavailable_exception(struct pt_regs *regs)
955 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
956 "%lx at %lx\n", regs->trap, regs->nip);
957 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
960 void altivec_unavailable_exception(struct pt_regs *regs)
962 if (user_mode(regs)) {
963 /* A user program has executed an altivec instruction,
964 but this kernel doesn't support altivec. */
965 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
966 return;
969 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
970 "%lx at %lx\n", regs->trap, regs->nip);
971 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
974 void vsx_unavailable_exception(struct pt_regs *regs)
976 if (user_mode(regs)) {
977 /* A user program has executed an vsx instruction,
978 but this kernel doesn't support vsx. */
979 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
980 return;
983 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
984 "%lx at %lx\n", regs->trap, regs->nip);
985 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
988 void performance_monitor_exception(struct pt_regs *regs)
990 perf_irq(regs);
993 #ifdef CONFIG_8xx
994 void SoftwareEmulation(struct pt_regs *regs)
996 extern int do_mathemu(struct pt_regs *);
997 extern int Soft_emulate_8xx(struct pt_regs *);
998 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
999 int errcode;
1000 #endif
1002 CHECK_FULL_REGS(regs);
1004 if (!user_mode(regs)) {
1005 debugger(regs);
1006 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1009 #ifdef CONFIG_MATH_EMULATION
1010 errcode = do_mathemu(regs);
1012 switch (errcode) {
1013 case 0:
1014 emulate_single_step(regs);
1015 return;
1016 case 1: {
1017 int code = 0;
1018 code = __parse_fpscr(current->thread.fpscr.val);
1019 _exception(SIGFPE, regs, code, regs->nip);
1020 return;
1022 case -EFAULT:
1023 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1024 return;
1025 default:
1026 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1027 return;
1030 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1031 errcode = Soft_emulate_8xx(regs);
1032 switch (errcode) {
1033 case 0:
1034 emulate_single_step(regs);
1035 return;
1036 case 1:
1037 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1038 return;
1039 case -EFAULT:
1040 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1041 return;
1043 #else
1044 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1045 #endif
1047 #endif /* CONFIG_8xx */
1049 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
1051 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1053 if (debug_status & DBSR_IC) { /* instruction completion */
1054 regs->msr &= ~MSR_DE;
1056 /* Disable instruction completion */
1057 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1058 /* Clear the instruction completion event */
1059 mtspr(SPRN_DBSR, DBSR_IC);
1061 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1062 5, SIGTRAP) == NOTIFY_STOP) {
1063 return;
1066 if (debugger_sstep(regs))
1067 return;
1069 if (user_mode(regs)) {
1070 current->thread.dbcr0 &= ~DBCR0_IC;
1073 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1074 } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1075 regs->msr &= ~MSR_DE;
1077 if (user_mode(regs)) {
1078 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1079 DBCR0_IDM);
1080 } else {
1081 /* Disable DAC interupts */
1082 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1083 DBSR_DAC1W | DBCR0_IDM));
1085 /* Clear the DAC event */
1086 mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1088 /* Setup and send the trap to the handler */
1089 do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1092 #endif /* CONFIG_4xx || CONFIG_BOOKE */
1094 #if !defined(CONFIG_TAU_INT)
1095 void TAUException(struct pt_regs *regs)
1097 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1098 regs->nip, regs->msr, regs->trap, print_tainted());
1100 #endif /* CONFIG_INT_TAU */
1102 #ifdef CONFIG_ALTIVEC
1103 void altivec_assist_exception(struct pt_regs *regs)
1105 int err;
1107 if (!user_mode(regs)) {
1108 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1109 " at %lx\n", regs->nip);
1110 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1113 flush_altivec_to_thread(current);
1115 err = emulate_altivec(regs);
1116 if (err == 0) {
1117 regs->nip += 4; /* skip emulated instruction */
1118 emulate_single_step(regs);
1119 return;
1122 if (err == -EFAULT) {
1123 /* got an error reading the instruction */
1124 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1125 } else {
1126 /* didn't recognize the instruction */
1127 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1128 if (printk_ratelimit())
1129 printk(KERN_ERR "Unrecognized altivec instruction "
1130 "in %s at %lx\n", current->comm, regs->nip);
1131 current->thread.vscr.u[3] |= 0x10000;
1134 #endif /* CONFIG_ALTIVEC */
1136 #ifdef CONFIG_VSX
1137 void vsx_assist_exception(struct pt_regs *regs)
1139 if (!user_mode(regs)) {
1140 printk(KERN_EMERG "VSX assist exception in kernel mode"
1141 " at %lx\n", regs->nip);
1142 die("Kernel VSX assist exception", regs, SIGILL);
1145 flush_vsx_to_thread(current);
1146 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1147 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1149 #endif /* CONFIG_VSX */
1151 #ifdef CONFIG_FSL_BOOKE
1152 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1153 unsigned long error_code)
1155 /* We treat cache locking instructions from the user
1156 * as priv ops, in the future we could try to do
1157 * something smarter
1159 if (error_code & (ESR_DLK|ESR_ILK))
1160 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1161 return;
1163 #endif /* CONFIG_FSL_BOOKE */
1165 #ifdef CONFIG_SPE
1166 void SPEFloatingPointException(struct pt_regs *regs)
1168 extern int do_spe_mathemu(struct pt_regs *regs);
1169 unsigned long spefscr;
1170 int fpexc_mode;
1171 int code = 0;
1172 int err;
1174 preempt_disable();
1175 if (regs->msr & MSR_SPE)
1176 giveup_spe(current);
1177 preempt_enable();
1179 spefscr = current->thread.spefscr;
1180 fpexc_mode = current->thread.fpexc_mode;
1182 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1183 code = FPE_FLTOVF;
1185 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1186 code = FPE_FLTUND;
1188 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1189 code = FPE_FLTDIV;
1190 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1191 code = FPE_FLTINV;
1193 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1194 code = FPE_FLTRES;
1196 err = do_spe_mathemu(regs);
1197 if (err == 0) {
1198 regs->nip += 4; /* skip emulated instruction */
1199 emulate_single_step(regs);
1200 return;
1203 if (err == -EFAULT) {
1204 /* got an error reading the instruction */
1205 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1206 } else if (err == -EINVAL) {
1207 /* didn't recognize the instruction */
1208 printk(KERN_ERR "unrecognized spe instruction "
1209 "in %s at %lx\n", current->comm, regs->nip);
1210 } else {
1211 _exception(SIGFPE, regs, code, regs->nip);
1214 return;
1217 void SPEFloatingPointRoundException(struct pt_regs *regs)
1219 extern int speround_handler(struct pt_regs *regs);
1220 int err;
1222 preempt_disable();
1223 if (regs->msr & MSR_SPE)
1224 giveup_spe(current);
1225 preempt_enable();
1227 regs->nip -= 4;
1228 err = speround_handler(regs);
1229 if (err == 0) {
1230 regs->nip += 4; /* skip emulated instruction */
1231 emulate_single_step(regs);
1232 return;
1235 if (err == -EFAULT) {
1236 /* got an error reading the instruction */
1237 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1238 } else if (err == -EINVAL) {
1239 /* didn't recognize the instruction */
1240 printk(KERN_ERR "unrecognized spe instruction "
1241 "in %s at %lx\n", current->comm, regs->nip);
1242 } else {
1243 _exception(SIGFPE, regs, 0, regs->nip);
1244 return;
1247 #endif
1250 * We enter here if we get an unrecoverable exception, that is, one
1251 * that happened at a point where the RI (recoverable interrupt) bit
1252 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1253 * we therefore lost state by taking this exception.
1255 void unrecoverable_exception(struct pt_regs *regs)
1257 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1258 regs->trap, regs->nip);
1259 die("Unrecoverable exception", regs, SIGABRT);
1262 #ifdef CONFIG_BOOKE_WDT
1264 * Default handler for a Watchdog exception,
1265 * spins until a reboot occurs
1267 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1269 /* Generic WatchdogHandler, implement your own */
1270 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1271 return;
1274 void WatchdogException(struct pt_regs *regs)
1276 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1277 WatchdogHandler(regs);
1279 #endif
1282 * We enter here if we discover during exception entry that we are
1283 * running in supervisor mode with a userspace value in the stack pointer.
1285 void kernel_bad_stack(struct pt_regs *regs)
1287 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1288 regs->gpr[1], regs->nip);
1289 die("Bad kernel stack pointer", regs, SIGABRT);
1292 void __init trap_init(void)