2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later
8 #include <linux/uaccess.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/kallsyms.h>
13 #include <linux/rbtree.h>
14 #include <asm/traps.h>
15 #include <asm/cacheflush.h>
18 #include <asm/blackfin.h>
19 #include <asm/irq_handler.h>
20 #include <linux/irq.h>
21 #include <asm/trace.h>
22 #include <asm/fixed_code.h>
25 # include <linux/kgdb.h>
27 # define CHK_DEBUGGER_TRAP() \
29 kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
31 # define CHK_DEBUGGER_TRAP_MAYBE() \
34 CHK_DEBUGGER_TRAP(); \
37 # define CHK_DEBUGGER_TRAP() do { } while (0)
38 # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
42 #ifdef CONFIG_DEBUG_VERBOSE
43 #define verbose_printk(fmt, arg...) \
46 #define verbose_printk(fmt, arg...) \
47 ({ if (0) printk(fmt, ##arg); 0; })
50 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
52 #ifdef CONFIG_DEBUG_MMRS_MODULE
53 EXPORT_SYMBOL(last_seqstat
);
57 /* Initiate the event table handler */
58 void __init
trap_init(void)
61 bfin_write_EVT3(trap
);
65 static void decode_address(char *buf
, unsigned long address
)
67 #ifdef CONFIG_DEBUG_VERBOSE
68 struct task_struct
*p
;
70 unsigned long flags
, offset
;
71 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
74 #ifdef CONFIG_KALLSYMS
75 unsigned long symsize
;
82 buf
+= sprintf(buf
, "<0x%08lx> ", address
);
84 #ifdef CONFIG_KALLSYMS
85 /* look up the address and see if we are in kernel space */
86 symname
= kallsyms_lookup(address
, &symsize
, &offset
, &modname
, namebuf
);
89 /* yeah! kernel space! */
92 sprintf(buf
, "{ %s%s%s%s + 0x%lx }",
93 delim
, modname
, delim
, symname
,
94 (unsigned long)offset
);
99 if (address
>= FIXED_CODE_START
&& address
< FIXED_CODE_END
) {
100 /* Problem in fixed code section? */
101 strcat(buf
, "/* Maybe fixed code section */");
104 } else if (address
< CONFIG_BOOT_LOAD
) {
105 /* Problem somewhere before the kernel start address */
106 strcat(buf
, "/* Maybe null pointer? */");
109 } else if (address
>= COREMMR_BASE
) {
110 strcat(buf
, "/* core mmrs */");
113 } else if (address
>= SYSMMR_BASE
) {
114 strcat(buf
, "/* system mmrs */");
117 } else if (address
>= L1_ROM_START
&& address
< L1_ROM_START
+ L1_ROM_LENGTH
) {
118 strcat(buf
, "/* on-chip L1 ROM */");
122 /* looks like we're off in user-land, so let's walk all the
123 * mappings of all our processes and see if we can't be a whee
126 write_lock_irqsave(&tasklist_lock
, flags
);
127 for_each_process(p
) {
128 mm
= (in_atomic
? p
->mm
: get_task_mm(p
));
132 for (n
= rb_first(&mm
->mm_rb
); n
; n
= rb_next(n
)) {
133 struct vm_area_struct
*vma
;
135 vma
= rb_entry(n
, struct vm_area_struct
, vm_rb
);
137 if (address
>= vma
->vm_start
&& address
< vma
->vm_end
) {
139 char *name
= p
->comm
;
140 struct file
*file
= vma
->vm_file
;
143 char *d_name
= d_path(&file
->f_path
, _tmpbuf
,
149 /* FLAT does not have its text aligned to the start of
150 * the map while FDPIC ELF does ...
153 /* before we can check flat/fdpic, we need to
154 * make sure current is valid
156 if ((unsigned long)current
>= FIXED_CODE_START
&&
157 !((unsigned long)current
& 0x3)) {
159 (address
> current
->mm
->start_code
) &&
160 (address
< current
->mm
->end_code
))
161 offset
= address
- current
->mm
->start_code
;
163 offset
= (address
- vma
->vm_start
) +
164 (vma
->vm_pgoff
<< PAGE_SHIFT
);
166 sprintf(buf
, "[ %s + 0x%lx ]", name
, offset
);
168 sprintf(buf
, "[ %s vma:0x%lx-0x%lx]",
169 name
, vma
->vm_start
, vma
->vm_end
);
175 sprintf(buf
, "[ %s ] dynamic memory", name
);
184 /* we were unable to find this address anywhere */
185 sprintf(buf
, "/* kernel dynamic memory */");
188 write_unlock_irqrestore(&tasklist_lock
, flags
);
194 asmlinkage
void double_fault_c(struct pt_regs
*fp
)
196 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
198 trace_buffer_save(j
);
202 oops_in_progress
= 1;
203 #ifdef CONFIG_DEBUG_VERBOSE
204 printk(KERN_EMERG
"Double Fault\n");
205 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
206 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_UNCOV
) {
207 unsigned int cpu
= raw_smp_processor_id();
209 decode_address(buf
, cpu_pda
[cpu
].retx_doublefault
);
210 printk(KERN_EMERG
"While handling exception (EXCAUSE = 0x%x) at %s:\n",
211 (unsigned int)cpu_pda
[cpu
].seqstat_doublefault
& SEQSTAT_EXCAUSE
, buf
);
212 decode_address(buf
, cpu_pda
[cpu
].dcplb_doublefault_addr
);
213 printk(KERN_NOTICE
" DCPLB_FAULT_ADDR: %s\n", buf
);
214 decode_address(buf
, cpu_pda
[cpu
].icplb_doublefault_addr
);
215 printk(KERN_NOTICE
" ICPLB_FAULT_ADDR: %s\n", buf
);
217 decode_address(buf
, fp
->retx
);
218 printk(KERN_NOTICE
"The instruction at %s caused a double exception\n", buf
);
222 dump_bfin_process(fp
);
225 dump_bfin_trace_buffer();
228 panic("Double Fault - unrecoverable event");
232 static int kernel_mode_regs(struct pt_regs
*regs
)
234 return regs
->ipend
& 0xffc0;
237 asmlinkage notrace
void trap_c(struct pt_regs
*fp
)
239 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
242 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
243 unsigned int cpu
= raw_smp_processor_id();
245 const char *strerror
= NULL
;
248 unsigned long trapnr
= fp
->seqstat
& SEQSTAT_EXCAUSE
;
250 trace_buffer_save(j
);
251 #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
252 last_seqstat
= (u32
)fp
->seqstat
;
255 /* Important - be very careful dereferncing pointers - will lead to
256 * double faults if the stack has become corrupt
259 /* trap_c() will be called for exceptions. During exceptions
260 * processing, the pc value should be set with retx value.
261 * With this change we can cleanup some code in signal.c- TODO
263 fp
->orig_pc
= fp
->retx
;
264 /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
265 trapnr, fp->ipend, fp->pc, fp->retx); */
267 /* send the appropriate signal to the user program */
270 /* This table works in conjuction with the one in ./mach-common/entry.S
271 * Some exceptions are handled there (in assembly, in exception space)
272 * Some are handled here, (in C, in interrupt space)
273 * Some, like CPLB, are handled in both, where the normal path is
274 * handled in assembly/exception space, and the error path is handled
278 /* 0x00 - Linux Syscall, getting here is an error */
279 /* 0x01 - userspace gdb breakpoint, handled here */
281 info
.si_code
= TRAP_ILLTRAP
;
283 CHK_DEBUGGER_TRAP_MAYBE();
284 /* Check if this is a breakpoint in kernel space */
285 if (kernel_mode_regs(fp
))
289 /* 0x03 - User Defined, userspace stack overflow */
291 info
.si_code
= SEGV_STACKFLOW
;
293 strerror
= KERN_NOTICE
EXC_0x03(KERN_NOTICE
);
294 CHK_DEBUGGER_TRAP_MAYBE();
296 /* 0x02 - KGDB initial connection and break signal trap */
299 info
.si_code
= TRAP_ILLTRAP
;
304 /* 0x04 - User Defined */
305 /* 0x05 - User Defined */
306 /* 0x06 - User Defined */
307 /* 0x07 - User Defined */
308 /* 0x08 - User Defined */
309 /* 0x09 - User Defined */
310 /* 0x0A - User Defined */
311 /* 0x0B - User Defined */
312 /* 0x0C - User Defined */
313 /* 0x0D - User Defined */
314 /* 0x0E - User Defined */
315 /* 0x0F - User Defined */
316 /* If we got here, it is most likely that someone was trying to use a
317 * custom exception handler, and it is not actually installed properly
319 case VEC_EXCPT04
... VEC_EXCPT15
:
320 info
.si_code
= ILL_ILLPARAOP
;
322 strerror
= KERN_NOTICE
EXC_0x04(KERN_NOTICE
);
323 CHK_DEBUGGER_TRAP_MAYBE();
325 /* 0x10 HW Single step, handled here */
327 info
.si_code
= TRAP_STEP
;
329 CHK_DEBUGGER_TRAP_MAYBE();
330 /* Check if this is a single step in kernel space */
331 if (kernel_mode_regs(fp
))
335 /* 0x11 - Trace Buffer Full, handled here */
337 info
.si_code
= TRAP_TRACEFLOW
;
339 strerror
= KERN_NOTICE
EXC_0x11(KERN_NOTICE
);
340 CHK_DEBUGGER_TRAP_MAYBE();
342 /* 0x12 - Reserved, Caught by default */
343 /* 0x13 - Reserved, Caught by default */
344 /* 0x14 - Reserved, Caught by default */
345 /* 0x15 - Reserved, Caught by default */
346 /* 0x16 - Reserved, Caught by default */
347 /* 0x17 - Reserved, Caught by default */
348 /* 0x18 - Reserved, Caught by default */
349 /* 0x19 - Reserved, Caught by default */
350 /* 0x1A - Reserved, Caught by default */
351 /* 0x1B - Reserved, Caught by default */
352 /* 0x1C - Reserved, Caught by default */
353 /* 0x1D - Reserved, Caught by default */
354 /* 0x1E - Reserved, Caught by default */
355 /* 0x1F - Reserved, Caught by default */
356 /* 0x20 - Reserved, Caught by default */
357 /* 0x21 - Undefined Instruction, handled here */
360 if (kernel_mode_regs(fp
)) {
361 switch (report_bug(fp
->pc
, fp
)) {
362 case BUG_TRAP_TYPE_NONE
:
364 case BUG_TRAP_TYPE_WARN
:
365 dump_bfin_trace_buffer();
368 case BUG_TRAP_TYPE_BUG
:
369 /* call to panic() will dump trace, and it is
370 * off at this point, so it won't be clobbered
376 info
.si_code
= ILL_ILLOPC
;
378 strerror
= KERN_NOTICE
EXC_0x21(KERN_NOTICE
);
379 CHK_DEBUGGER_TRAP_MAYBE();
381 /* 0x22 - Illegal Instruction Combination, handled here */
383 info
.si_code
= ILL_ILLPARAOP
;
385 strerror
= KERN_NOTICE
EXC_0x22(KERN_NOTICE
);
386 CHK_DEBUGGER_TRAP_MAYBE();
388 /* 0x23 - Data CPLB protection violation, handled here */
390 info
.si_code
= ILL_CPLB_VI
;
392 strerror
= KERN_NOTICE
EXC_0x23(KERN_NOTICE
);
393 CHK_DEBUGGER_TRAP_MAYBE();
395 /* 0x24 - Data access misaligned, handled here */
397 info
.si_code
= BUS_ADRALN
;
399 strerror
= KERN_NOTICE
EXC_0x24(KERN_NOTICE
);
400 CHK_DEBUGGER_TRAP_MAYBE();
402 /* 0x25 - Unrecoverable Event, handled here */
404 info
.si_code
= ILL_ILLEXCPT
;
406 strerror
= KERN_NOTICE
EXC_0x25(KERN_NOTICE
);
407 CHK_DEBUGGER_TRAP_MAYBE();
409 /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
410 error case is handled here */
412 info
.si_code
= BUS_ADRALN
;
414 strerror
= KERN_NOTICE
EXC_0x26(KERN_NOTICE
);
416 /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
418 info
.si_code
= ILL_CPLB_MULHIT
;
420 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
421 if (cpu_pda
[cpu
].dcplb_fault_addr
< FIXED_CODE_START
)
422 strerror
= KERN_NOTICE
"NULL pointer access\n";
425 strerror
= KERN_NOTICE
EXC_0x27(KERN_NOTICE
);
426 CHK_DEBUGGER_TRAP_MAYBE();
428 /* 0x28 - Emulation Watchpoint, handled here */
430 info
.si_code
= TRAP_WATCHPT
;
432 pr_debug(EXC_0x28(KERN_DEBUG
));
433 CHK_DEBUGGER_TRAP_MAYBE();
434 /* Check if this is a watchpoint in kernel space */
435 if (kernel_mode_regs(fp
))
440 /* 0x29 - Instruction fetch access error (535 only) */
441 case VEC_ISTRU_VL
: /* ADSP-BF535 only (MH) */
442 info
.si_code
= BUS_OPFETCH
;
444 strerror
= KERN_NOTICE
"BF535: VEC_ISTRU_VL\n";
445 CHK_DEBUGGER_TRAP_MAYBE();
448 /* 0x29 - Reserved, Caught by default */
450 /* 0x2A - Instruction fetch misaligned, handled here */
452 info
.si_code
= BUS_ADRALN
;
454 strerror
= KERN_NOTICE
EXC_0x2A(KERN_NOTICE
);
455 CHK_DEBUGGER_TRAP_MAYBE();
457 /* 0x2B - Instruction CPLB protection violation, handled here */
459 info
.si_code
= ILL_CPLB_VI
;
461 strerror
= KERN_NOTICE
EXC_0x2B(KERN_NOTICE
);
462 CHK_DEBUGGER_TRAP_MAYBE();
464 /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
466 info
.si_code
= ILL_CPLB_MISS
;
468 strerror
= KERN_NOTICE
EXC_0x2C(KERN_NOTICE
);
470 /* 0x2D - Instruction CPLB Multiple Hits, handled here */
471 case VEC_CPLB_I_MHIT
:
472 info
.si_code
= ILL_CPLB_MULHIT
;
474 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
475 if (cpu_pda
[cpu
].icplb_fault_addr
< FIXED_CODE_START
)
476 strerror
= KERN_NOTICE
"Jump to NULL address\n";
479 strerror
= KERN_NOTICE
EXC_0x2D(KERN_NOTICE
);
480 CHK_DEBUGGER_TRAP_MAYBE();
482 /* 0x2E - Illegal use of Supervisor Resource, handled here */
484 info
.si_code
= ILL_PRVOPC
;
486 strerror
= KERN_NOTICE
EXC_0x2E(KERN_NOTICE
);
487 CHK_DEBUGGER_TRAP_MAYBE();
489 /* 0x2F - Reserved, Caught by default */
490 /* 0x30 - Reserved, Caught by default */
491 /* 0x31 - Reserved, Caught by default */
492 /* 0x32 - Reserved, Caught by default */
493 /* 0x33 - Reserved, Caught by default */
494 /* 0x34 - Reserved, Caught by default */
495 /* 0x35 - Reserved, Caught by default */
496 /* 0x36 - Reserved, Caught by default */
497 /* 0x37 - Reserved, Caught by default */
498 /* 0x38 - Reserved, Caught by default */
499 /* 0x39 - Reserved, Caught by default */
500 /* 0x3A - Reserved, Caught by default */
501 /* 0x3B - Reserved, Caught by default */
502 /* 0x3C - Reserved, Caught by default */
503 /* 0x3D - Reserved, Caught by default */
504 /* 0x3E - Reserved, Caught by default */
505 /* 0x3F - Reserved, Caught by default */
507 info
.si_code
= BUS_ADRALN
;
509 switch (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) {
510 /* System MMR Error */
511 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR
):
512 info
.si_code
= BUS_ADRALN
;
514 strerror
= KERN_NOTICE
HWC_x2(KERN_NOTICE
);
516 /* External Memory Addressing Error */
517 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR
):
518 info
.si_code
= BUS_ADRERR
;
520 strerror
= KERN_NOTICE
HWC_x3(KERN_NOTICE
);
522 /* Performance Monitor Overflow */
523 case (SEQSTAT_HWERRCAUSE_PERF_FLOW
):
524 strerror
= KERN_NOTICE
HWC_x12(KERN_NOTICE
);
526 /* RAISE 5 instruction */
527 case (SEQSTAT_HWERRCAUSE_RAISE_5
):
528 printk(KERN_NOTICE
HWC_x18(KERN_NOTICE
));
530 default: /* Reserved */
531 printk(KERN_NOTICE
HWC_default(KERN_NOTICE
));
534 CHK_DEBUGGER_TRAP_MAYBE();
537 * We should be handling all known exception types above,
538 * if we get here we hit a reserved one, so panic
541 info
.si_code
= ILL_ILLPARAOP
;
543 verbose_printk(KERN_EMERG
"Caught Unhandled Exception, code = %08lx\n",
544 (fp
->seqstat
& SEQSTAT_EXCAUSE
));
545 CHK_DEBUGGER_TRAP_MAYBE();
551 /* If the fault was caused by a kernel thread, or interrupt handler
552 * we will kernel panic, so the system reboots.
554 if (kernel_mode_regs(fp
) || (current
&& !current
->mm
)) {
556 oops_in_progress
= 1;
559 if (sig
!= SIGTRAP
) {
561 verbose_printk(strerror
);
563 dump_bfin_process(fp
);
567 /* Print out the trace buffer if it makes sense */
568 #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
569 if (trapnr
== VEC_CPLB_I_M
|| trapnr
== VEC_CPLB_M
)
570 verbose_printk(KERN_NOTICE
"No trace since you do not have "
571 "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
574 dump_bfin_trace_buffer();
576 if (oops_in_progress
) {
577 /* Dump the current kernel stack */
578 verbose_printk(KERN_NOTICE
"Kernel Stack\n");
579 show_stack(current
, NULL
);
581 #ifndef CONFIG_ACCESS_CHECK
582 verbose_printk(KERN_EMERG
"Please turn on "
583 "CONFIG_ACCESS_CHECK\n");
585 panic("Kernel exception");
587 #ifdef CONFIG_DEBUG_VERBOSE
588 unsigned long *stack
;
589 /* Dump the user space stack */
590 stack
= (unsigned long *)rdusp();
591 verbose_printk(KERN_NOTICE
"Userspace Stack\n");
592 show_stack(NULL
, stack
);
598 if (!ipipe_trap_notify(fp
->seqstat
& 0x3f, fp
))
603 info
.si_addr
= (void __user
*)fp
->pc
;
604 force_sig_info(sig
, &info
, current
);
607 if ((ANOMALY_05000461
&& trapnr
== VEC_HWERR
&& !access_ok(VERIFY_READ
, fp
->pc
, 8)) ||
608 (ANOMALY_05000281
&& trapnr
== VEC_HWERR
) ||
609 (ANOMALY_05000189
&& (trapnr
== VEC_CPLB_I_VL
|| trapnr
== VEC_CPLB_VL
)))
610 fp
->pc
= SAFE_USER_INSTRUCTION
;
613 trace_buffer_restore(j
);
616 /* Typical exception handling routines */
618 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
621 * Similar to get_user, do some address checking, then dereference
622 * Return true on sucess, false on bad address
624 static bool get_instruction(unsigned short *val
, unsigned short *address
)
626 unsigned long addr
= (unsigned long)address
;
628 /* Check for odd addresses */
632 /* MMR region will never have instructions */
633 if (addr
>= SYSMMR_BASE
)
636 switch (bfin_mem_access_type(addr
, 2)) {
637 case BFIN_MEM_ACCESS_CORE
:
638 case BFIN_MEM_ACCESS_CORE_ONLY
:
641 case BFIN_MEM_ACCESS_DMA
:
642 dma_memcpy(val
, address
, 2);
644 case BFIN_MEM_ACCESS_ITEST
:
645 isram_memcpy(val
, address
, 2);
647 default: /* invalid access */
653 * decode the instruction if we are printing out the trace, as it
654 * makes things easier to follow, without running it through objdump
655 * These are the normal instructions which cause change of flow, which
656 * would be at the source of the trace buffer
658 #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
659 static void decode_instruction(unsigned short *address
)
661 unsigned short opcode
;
663 if (get_instruction(&opcode
, address
)) {
664 if (opcode
== 0x0010)
665 verbose_printk("RTS");
666 else if (opcode
== 0x0011)
667 verbose_printk("RTI");
668 else if (opcode
== 0x0012)
669 verbose_printk("RTX");
670 else if (opcode
== 0x0013)
671 verbose_printk("RTN");
672 else if (opcode
== 0x0014)
673 verbose_printk("RTE");
674 else if (opcode
== 0x0025)
675 verbose_printk("EMUEXCPT");
676 else if (opcode
== 0x0040 && opcode
<= 0x0047)
677 verbose_printk("STI R%i", opcode
& 7);
678 else if (opcode
>= 0x0050 && opcode
<= 0x0057)
679 verbose_printk("JUMP (P%i)", opcode
& 7);
680 else if (opcode
>= 0x0060 && opcode
<= 0x0067)
681 verbose_printk("CALL (P%i)", opcode
& 7);
682 else if (opcode
>= 0x0070 && opcode
<= 0x0077)
683 verbose_printk("CALL (PC+P%i)", opcode
& 7);
684 else if (opcode
>= 0x0080 && opcode
<= 0x0087)
685 verbose_printk("JUMP (PC+P%i)", opcode
& 7);
686 else if (opcode
>= 0x0090 && opcode
<= 0x009F)
687 verbose_printk("RAISE 0x%x", opcode
& 0xF);
688 else if (opcode
>= 0x00A0 && opcode
<= 0x00AF)
689 verbose_printk("EXCPT 0x%x", opcode
& 0xF);
690 else if ((opcode
>= 0x1000 && opcode
<= 0x13FF) || (opcode
>= 0x1800 && opcode
<= 0x1BFF))
691 verbose_printk("IF !CC JUMP");
692 else if ((opcode
>= 0x1400 && opcode
<= 0x17ff) || (opcode
>= 0x1c00 && opcode
<= 0x1fff))
693 verbose_printk("IF CC JUMP");
694 else if (opcode
>= 0x2000 && opcode
<= 0x2fff)
695 verbose_printk("JUMP.S");
696 else if (opcode
>= 0xe080 && opcode
<= 0xe0ff)
697 verbose_printk("LSETUP");
698 else if (opcode
>= 0xe200 && opcode
<= 0xe2ff)
699 verbose_printk("JUMP.L");
700 else if (opcode
>= 0xe300 && opcode
<= 0xe3ff)
701 verbose_printk("CALL pcrel");
703 verbose_printk("0x%04x", opcode
);
709 void dump_bfin_trace_buffer(void)
711 #ifdef CONFIG_DEBUG_VERBOSE
712 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
715 unsigned short *addr
;
716 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
720 trace_buffer_save(tflags
);
722 printk(KERN_NOTICE
"Hardware Trace:\n");
724 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
725 printk(KERN_NOTICE
"WARNING: Expanded trace turned on - can not trace exceptions\n");
728 if (likely(bfin_read_TBUFSTAT() & TBUFCNT
)) {
729 for (; bfin_read_TBUFSTAT() & TBUFCNT
; i
++) {
730 decode_address(buf
, (unsigned long)bfin_read_TBUF());
731 printk(KERN_NOTICE
"%4i Target : %s\n", i
, buf
);
732 addr
= (unsigned short *)bfin_read_TBUF();
733 decode_address(buf
, (unsigned long)addr
);
734 printk(KERN_NOTICE
" Source : %s ", buf
);
735 decode_instruction(addr
);
740 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
741 if (trace_buff_offset
)
742 index
= trace_buff_offset
/ 4;
746 j
= (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN
) * 128;
748 decode_address(buf
, software_trace_buff
[index
]);
749 printk(KERN_NOTICE
"%4i Target : %s\n", i
, buf
);
753 decode_address(buf
, software_trace_buff
[index
]);
754 printk(KERN_NOTICE
" Source : %s ", buf
);
755 decode_instruction((unsigned short *)software_trace_buff
[index
]);
765 trace_buffer_restore(tflags
);
769 EXPORT_SYMBOL(dump_bfin_trace_buffer
);
772 int is_valid_bugaddr(unsigned long addr
)
774 unsigned short opcode
;
776 if (!get_instruction(&opcode
, (unsigned short *)addr
))
779 return opcode
== BFIN_BUG_OPCODE
;
784 * Checks to see if the address pointed to is either a
785 * 16-bit CALL instruction, or a 32-bit CALL instruction
787 static bool is_bfin_call(unsigned short *addr
)
789 unsigned short opcode
= 0, *ins_addr
;
790 ins_addr
= (unsigned short *)addr
;
792 if (!get_instruction(&opcode
, ins_addr
))
795 if ((opcode
>= 0x0060 && opcode
<= 0x0067) ||
796 (opcode
>= 0x0070 && opcode
<= 0x0077))
800 if (!get_instruction(&opcode
, ins_addr
))
803 if (opcode
>= 0xE300 && opcode
<= 0xE3FF)
810 void show_stack(struct task_struct
*task
, unsigned long *stack
)
813 unsigned int *addr
, *endstack
, *fp
= 0, *frame
;
814 unsigned short *ins_addr
;
816 unsigned int i
, j
, ret_addr
, frame_no
= 0;
819 * If we have been passed a specific stack, use that one otherwise
820 * if we have been passed a task structure, use that, otherwise
821 * use the stack of where the variable "stack" exists
826 /* We know this is a kernel stack, so this is the start/end */
827 stack
= (unsigned long *)task
->thread
.ksp
;
828 endstack
= (unsigned int *)(((unsigned int)(stack
) & ~(THREAD_SIZE
- 1)) + THREAD_SIZE
);
830 /* print out the existing stack info */
831 stack
= (unsigned long *)&stack
;
832 endstack
= (unsigned int *)PAGE_ALIGN((unsigned int)stack
);
835 endstack
= (unsigned int *)PAGE_ALIGN((unsigned int)stack
);
837 printk(KERN_NOTICE
"Stack info:\n");
838 decode_address(buf
, (unsigned int)stack
);
839 printk(KERN_NOTICE
" SP: [0x%p] %s\n", stack
, buf
);
841 if (!access_ok(VERIFY_READ
, stack
, (unsigned int)endstack
- (unsigned int)stack
)) {
842 printk(KERN_NOTICE
"Invalid stack pointer\n");
846 /* First thing is to look for a frame pointer */
847 for (addr
= (unsigned int *)((unsigned int)stack
& ~0xF); addr
< endstack
; addr
++) {
850 ins_addr
= (unsigned short *)*addr
;
852 if (is_bfin_call(ins_addr
))
856 /* Let's check to see if it is a frame pointer */
857 while (fp
>= (addr
- 1) && fp
< endstack
858 && fp
&& ((unsigned int) fp
& 0x3) == 0)
859 fp
= (unsigned int *)*fp
;
860 if (fp
== 0 || fp
== endstack
) {
869 printk(KERN_NOTICE
" FP: (0x%p)\n", fp
);
874 * Now that we think we know where things are, we
875 * walk the stack again, this time printing things out
876 * incase there is no frame pointer, we still look for
877 * valid return addresses
880 /* First time print out data, next time, print out symbols */
881 for (j
= 0; j
<= 1; j
++) {
883 printk(KERN_NOTICE
"Return addresses in stack:\n");
885 printk(KERN_NOTICE
" Memory from 0x%08lx to %p", ((long unsigned int)stack
& ~0xF), endstack
);
890 for (addr
= (unsigned int *)((unsigned int)stack
& ~0xF), i
= 0;
891 addr
< endstack
; addr
++, i
++) {
894 if (!j
&& i
% 8 == 0)
895 printk(KERN_NOTICE
"%p:",addr
);
897 /* if it is an odd address, or zero, just skip it */
898 if (*addr
& 0x1 || !*addr
)
901 ins_addr
= (unsigned short *)*addr
;
903 /* Go back one instruction, and see if it is a CALL */
905 ret_addr
= is_bfin_call(ins_addr
);
907 if (!j
&& stack
== (unsigned long *)addr
)
908 printk("[%08x]", *addr
);
911 decode_address(buf
, (unsigned int)*addr
);
913 printk(KERN_NOTICE
" frame %2i : %s\n", frame_no
, buf
);
916 printk(KERN_NOTICE
" address : %s\n", buf
);
918 printk("<%08x>", *addr
);
919 else if (fp
== addr
) {
923 printk("(%08x)", *addr
);
925 fp
= (unsigned int *)*addr
;
929 printk(" %08x ", *addr
);
936 EXPORT_SYMBOL(show_stack
);
938 void dump_stack(void)
941 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
944 trace_buffer_save(tflags
);
945 dump_bfin_trace_buffer();
946 show_stack(current
, &stack
);
947 trace_buffer_restore(tflags
);
949 EXPORT_SYMBOL(dump_stack
);
951 void dump_bfin_process(struct pt_regs
*fp
)
953 #ifdef CONFIG_DEBUG_VERBOSE
954 /* We should be able to look at fp->ipend, but we don't push it on the
955 * stack all the time, so do this until we fix that */
956 unsigned int context
= bfin_read_IPEND();
958 if (oops_in_progress
)
959 verbose_printk(KERN_EMERG
"Kernel OOPS in progress\n");
961 if (context
& 0x0020 && (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
)
962 verbose_printk(KERN_NOTICE
"HW Error context\n");
963 else if (context
& 0x0020)
964 verbose_printk(KERN_NOTICE
"Deferred Exception context\n");
965 else if (context
& 0x3FC0)
966 verbose_printk(KERN_NOTICE
"Interrupt context\n");
967 else if (context
& 0x4000)
968 verbose_printk(KERN_NOTICE
"Deferred Interrupt context\n");
969 else if (context
& 0x8000)
970 verbose_printk(KERN_NOTICE
"Kernel process context\n");
972 /* Because we are crashing, and pointers could be bad, we check things
973 * pretty closely before we use them
975 if ((unsigned long)current
>= FIXED_CODE_START
&&
976 !((unsigned long)current
& 0x3) && current
->pid
) {
977 verbose_printk(KERN_NOTICE
"CURRENT PROCESS:\n");
978 if (current
->comm
>= (char *)FIXED_CODE_START
)
979 verbose_printk(KERN_NOTICE
"COMM=%s PID=%d\n",
980 current
->comm
, current
->pid
);
982 verbose_printk(KERN_NOTICE
"COMM= invalid\n");
984 printk(KERN_NOTICE
"CPU = %d\n", current_thread_info()->cpu
);
985 if (!((unsigned long)current
->mm
& 0x3) && (unsigned long)current
->mm
>= FIXED_CODE_START
)
986 verbose_printk(KERN_NOTICE
987 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
988 " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
989 (void *)current
->mm
->start_code
,
990 (void *)current
->mm
->end_code
,
991 (void *)current
->mm
->start_data
,
992 (void *)current
->mm
->end_data
,
993 (void *)current
->mm
->end_data
,
994 (void *)current
->mm
->brk
,
995 (void *)current
->mm
->start_stack
);
997 verbose_printk(KERN_NOTICE
"invalid mm\n");
999 verbose_printk(KERN_NOTICE
1000 "No Valid process in current context\n");
1004 void dump_bfin_mem(struct pt_regs
*fp
)
1006 #ifdef CONFIG_DEBUG_VERBOSE
1007 unsigned short *addr
, *erraddr
, val
= 0, err
= 0;
1008 char sti
= 0, buf
[6];
1010 erraddr
= (void *)fp
->pc
;
1012 verbose_printk(KERN_NOTICE
"return address: [0x%p]; contents of:", erraddr
);
1014 for (addr
= (unsigned short *)((unsigned long)erraddr
& ~0xF) - 0x10;
1015 addr
< (unsigned short *)((unsigned long)erraddr
& ~0xF) + 0x10;
1017 if (!((unsigned long)addr
& 0xF))
1018 verbose_printk(KERN_NOTICE
"0x%p: ", addr
);
1020 if (!get_instruction(&val
, addr
)) {
1022 sprintf(buf
, "????");
1024 sprintf(buf
, "%04x", val
);
1026 if (addr
== erraddr
) {
1027 verbose_printk("[%s]", buf
);
1030 verbose_printk(" %s ", buf
);
1032 /* Do any previous instructions turn on interrupts? */
1033 if (addr
<= erraddr
&& /* in the past */
1034 ((val
>= 0x0040 && val
<= 0x0047) || /* STI instruction */
1035 val
== 0x017b)) /* [SP++] = RETI */
1039 verbose_printk("\n");
1041 /* Hardware error interrupts can be deferred */
1042 if (unlikely(sti
&& (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
&&
1044 verbose_printk(KERN_NOTICE
"Looks like this was a deferred error - sorry\n");
1045 #ifndef CONFIG_DEBUG_HWERR
1046 verbose_printk(KERN_NOTICE
1047 "The remaining message may be meaningless\n"
1048 "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
1050 /* If we are handling only one peripheral interrupt
1051 * and current mm and pid are valid, and the last error
1052 * was in that user space process's text area
1053 * print it out - because that is where the problem exists
1055 if ((!(((fp
)->ipend
& ~0x30) & (((fp
)->ipend
& ~0x30) - 1))) &&
1056 (current
->pid
&& current
->mm
)) {
1057 /* And the last RETI points to the current userspace context */
1058 if ((fp
+ 1)->pc
>= current
->mm
->start_code
&&
1059 (fp
+ 1)->pc
<= current
->mm
->end_code
) {
1060 verbose_printk(KERN_NOTICE
"It might be better to look around here : \n");
1061 verbose_printk(KERN_NOTICE
"-------------------------------------------\n");
1063 verbose_printk(KERN_NOTICE
"-------------------------------------------\n");
1071 void show_regs(struct pt_regs
*fp
)
1073 #ifdef CONFIG_DEBUG_VERBOSE
1075 struct irqaction
*action
;
1077 unsigned long flags
= 0;
1078 unsigned int cpu
= raw_smp_processor_id();
1079 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
1081 verbose_printk(KERN_NOTICE
"\n");
1082 if (CPUID
!= bfin_cpuid())
1083 verbose_printk(KERN_NOTICE
"Compiled for cpu family 0x%04x (Rev %d), "
1084 "but running on:0x%04x (Rev %d)\n",
1085 CPUID
, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1087 verbose_printk(KERN_NOTICE
"ADSP-%s-0.%d",
1088 CPU
, bfin_compiled_revid());
1090 if (bfin_compiled_revid() != bfin_revid())
1091 verbose_printk("(Detected 0.%d)", bfin_revid());
1093 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1094 get_cclk()/1000000, get_sclk()/1000000,
1102 verbose_printk(KERN_NOTICE
"%s", linux_banner
);
1104 verbose_printk(KERN_NOTICE
"\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
1105 verbose_printk(KERN_NOTICE
" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
1106 (long)fp
->seqstat
, fp
->ipend
, cpu_pda
[raw_smp_processor_id()].ex_imask
, fp
->syscfg
);
1107 if (fp
->ipend
& EVT_IRPTEN
)
1108 verbose_printk(KERN_NOTICE
" Global Interrupts Disabled (IPEND[4])\n");
1109 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG13
| EVT_IVG12
| EVT_IVG11
|
1110 EVT_IVG10
| EVT_IVG9
| EVT_IVG8
| EVT_IVG7
| EVT_IVTMR
)))
1111 verbose_printk(KERN_NOTICE
" Peripheral interrupts masked off\n");
1112 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG15
| EVT_IVG14
)))
1113 verbose_printk(KERN_NOTICE
" Kernel interrupts masked off\n");
1114 if ((fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
) {
1115 verbose_printk(KERN_NOTICE
" HWERRCAUSE: 0x%lx\n",
1116 (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) >> 14);
1118 /* If the error was from the EBIU, print it out */
1119 if (bfin_read_EBIU_ERRMST() & CORE_ERROR
) {
1120 verbose_printk(KERN_NOTICE
" EBIU Error Reason : 0x%04x\n",
1121 bfin_read_EBIU_ERRMST());
1122 verbose_printk(KERN_NOTICE
" EBIU Error Address : 0x%08x\n",
1123 bfin_read_EBIU_ERRADD());
1127 verbose_printk(KERN_NOTICE
" EXCAUSE : 0x%lx\n",
1128 fp
->seqstat
& SEQSTAT_EXCAUSE
);
1129 for (i
= 2; i
<= 15 ; i
++) {
1130 if (fp
->ipend
& (1 << i
)) {
1132 decode_address(buf
, bfin_read32(EVT0
+ 4*i
));
1133 verbose_printk(KERN_NOTICE
" physical IVG%i asserted : %s\n", i
, buf
);
1135 verbose_printk(KERN_NOTICE
" interrupts disabled\n");
1139 /* if no interrupts are going off, don't print this out */
1140 if (fp
->ipend
& ~0x3F) {
1141 for (i
= 0; i
< (NR_IRQS
- 1); i
++) {
1143 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
1145 action
= irq_desc
[i
].action
;
1149 decode_address(buf
, (unsigned int)action
->handler
);
1150 verbose_printk(KERN_NOTICE
" logical irq %3d mapped : %s", i
, buf
);
1151 for (action
= action
->next
; action
; action
= action
->next
) {
1152 decode_address(buf
, (unsigned int)action
->handler
);
1153 verbose_printk(", %s", buf
);
1155 verbose_printk("\n");
1158 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
1162 decode_address(buf
, fp
->rete
);
1163 verbose_printk(KERN_NOTICE
" RETE: %s\n", buf
);
1164 decode_address(buf
, fp
->retn
);
1165 verbose_printk(KERN_NOTICE
" RETN: %s\n", buf
);
1166 decode_address(buf
, fp
->retx
);
1167 verbose_printk(KERN_NOTICE
" RETX: %s\n", buf
);
1168 decode_address(buf
, fp
->rets
);
1169 verbose_printk(KERN_NOTICE
" RETS: %s\n", buf
);
1170 decode_address(buf
, fp
->pc
);
1171 verbose_printk(KERN_NOTICE
" PC : %s\n", buf
);
1173 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) &&
1174 (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) != VEC_HWERR
)) {
1175 decode_address(buf
, cpu_pda
[cpu
].dcplb_fault_addr
);
1176 verbose_printk(KERN_NOTICE
"DCPLB_FAULT_ADDR: %s\n", buf
);
1177 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
1178 verbose_printk(KERN_NOTICE
"ICPLB_FAULT_ADDR: %s\n", buf
);
1181 verbose_printk(KERN_NOTICE
"PROCESSOR STATE:\n");
1182 verbose_printk(KERN_NOTICE
" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
1183 fp
->r0
, fp
->r1
, fp
->r2
, fp
->r3
);
1184 verbose_printk(KERN_NOTICE
" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
1185 fp
->r4
, fp
->r5
, fp
->r6
, fp
->r7
);
1186 verbose_printk(KERN_NOTICE
" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
1187 fp
->p0
, fp
->p1
, fp
->p2
, fp
->p3
);
1188 verbose_printk(KERN_NOTICE
" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
1189 fp
->p4
, fp
->p5
, fp
->fp
, (long)fp
);
1190 verbose_printk(KERN_NOTICE
" LB0: %08lx LT0: %08lx LC0: %08lx\n",
1191 fp
->lb0
, fp
->lt0
, fp
->lc0
);
1192 verbose_printk(KERN_NOTICE
" LB1: %08lx LT1: %08lx LC1: %08lx\n",
1193 fp
->lb1
, fp
->lt1
, fp
->lc1
);
1194 verbose_printk(KERN_NOTICE
" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
1195 fp
->b0
, fp
->l0
, fp
->m0
, fp
->i0
);
1196 verbose_printk(KERN_NOTICE
" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
1197 fp
->b1
, fp
->l1
, fp
->m1
, fp
->i1
);
1198 verbose_printk(KERN_NOTICE
" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
1199 fp
->b2
, fp
->l2
, fp
->m2
, fp
->i2
);
1200 verbose_printk(KERN_NOTICE
" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
1201 fp
->b3
, fp
->l3
, fp
->m3
, fp
->i3
);
1202 verbose_printk(KERN_NOTICE
"A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
1203 fp
->a0w
, fp
->a0x
, fp
->a1w
, fp
->a1x
);
1205 verbose_printk(KERN_NOTICE
"USP : %08lx ASTAT: %08lx\n",
1206 rdusp(), fp
->astat
);
1208 verbose_printk(KERN_NOTICE
"\n");
1212 #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
1213 asmlinkage
int sys_bfin_spinlock(int *spinlock
)__attribute__((l1_text
));
1216 static DEFINE_SPINLOCK(bfin_spinlock_lock
);
1218 asmlinkage
int sys_bfin_spinlock(int *p
)
1222 spin_lock(&bfin_spinlock_lock
); /* This would also hold kernel preemption. */
1223 ret
= get_user(tmp
, p
);
1224 if (likely(ret
== 0)) {
1230 spin_unlock(&bfin_spinlock_lock
);
1234 int bfin_request_exception(unsigned int exception
, void (*handler
)(void))
1236 void (*curr_handler
)(void);
1238 if (exception
> 0x3F)
1241 curr_handler
= ex_table
[exception
];
1243 if (curr_handler
!= ex_replaceable
)
1246 ex_table
[exception
] = handler
;
1250 EXPORT_SYMBOL(bfin_request_exception
);
1252 int bfin_free_exception(unsigned int exception
, void (*handler
)(void))
1254 void (*curr_handler
)(void);
1256 if (exception
> 0x3F)
1259 curr_handler
= ex_table
[exception
];
1261 if (curr_handler
!= handler
)
1264 ex_table
[exception
] = ex_replaceable
;
1268 EXPORT_SYMBOL(bfin_free_exception
);
1270 void panic_cplb_error(int cplb_panic
, struct pt_regs
*fp
)
1272 switch (cplb_panic
) {
1273 case CPLB_NO_UNLOCKED
:
1274 printk(KERN_EMERG
"All CPLBs are locked\n");
1276 case CPLB_PROT_VIOL
:
1278 case CPLB_NO_ADDR_MATCH
:
1280 case CPLB_UNKNOWN_ERR
:
1281 printk(KERN_EMERG
"Unknown CPLB Exception\n");
1285 oops_in_progress
= 1;
1287 dump_bfin_process(fp
);
1291 panic("Unrecoverable event");