1 /* provide some functions which dump the trace buffer, in a nice way for people
2 * to read it, and understand what is going on
4 * Copyright 2004-2010 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/kernel.h>
10 #include <linux/hardirq.h>
11 #include <linux/thread_info.h>
13 #include <linux/oom.h>
14 #include <linux/sched.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/kallsyms.h>
18 #include <linux/err.h>
20 #include <linux/irq.h>
22 #include <asm/trace.h>
23 #include <asm/fixed_code.h>
24 #include <asm/traps.h>
25 #include <asm/irq_handler.h>
28 void decode_address(char *buf
, unsigned long address
)
30 struct task_struct
*p
;
35 #ifdef CONFIG_KALLSYMS
36 unsigned long symsize
;
43 buf
+= sprintf(buf
, "<0x%08lx> ", address
);
45 #ifdef CONFIG_KALLSYMS
46 /* look up the address and see if we are in kernel space */
47 symname
= kallsyms_lookup(address
, &symsize
, &offset
, &modname
, namebuf
);
50 /* yeah! kernel space! */
53 sprintf(buf
, "{ %s%s%s%s + 0x%lx }",
54 delim
, modname
, delim
, symname
,
55 (unsigned long)offset
);
60 if (address
>= FIXED_CODE_START
&& address
< FIXED_CODE_END
) {
61 /* Problem in fixed code section? */
62 strcat(buf
, "/* Maybe fixed code section */");
65 } else if (address
< CONFIG_BOOT_LOAD
) {
66 /* Problem somewhere before the kernel start address */
67 strcat(buf
, "/* Maybe null pointer? */");
70 } else if (address
>= COREMMR_BASE
) {
71 strcat(buf
, "/* core mmrs */");
74 } else if (address
>= SYSMMR_BASE
) {
75 strcat(buf
, "/* system mmrs */");
78 } else if (address
>= L1_ROM_START
&& address
< L1_ROM_START
+ L1_ROM_LENGTH
) {
79 strcat(buf
, "/* on-chip L1 ROM */");
82 } else if (address
>= L1_SCRATCH_START
&& address
< L1_SCRATCH_START
+ L1_SCRATCH_LENGTH
) {
83 strcat(buf
, "/* on-chip scratchpad */");
86 } else if (address
>= physical_mem_end
&& address
< ASYNC_BANK0_BASE
) {
87 strcat(buf
, "/* unconnected memory */");
90 } else if (address
>= ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
&& address
< BOOT_ROM_START
) {
91 strcat(buf
, "/* reserved memory */");
94 } else if (address
>= L1_DATA_A_START
&& address
< L1_DATA_A_START
+ L1_DATA_A_LENGTH
) {
95 strcat(buf
, "/* on-chip Data Bank A */");
98 } else if (address
>= L1_DATA_B_START
&& address
< L1_DATA_B_START
+ L1_DATA_B_LENGTH
) {
99 strcat(buf
, "/* on-chip Data Bank B */");
104 * Don't walk any of the vmas if we are oopsing, it has been known
105 * to cause problems - corrupt vmas (kernel crashes) cause double faults
107 if (oops_in_progress
) {
108 strcat(buf
, "/* kernel dynamic memory (maybe user-space) */");
112 /* looks like we're off in user-land, so let's walk all the
113 * mappings of all our processes and see if we can't be a whee
116 read_lock(&tasklist_lock
);
117 for_each_process(p
) {
118 struct task_struct
*t
;
120 t
= find_lock_task_mm(p
);
125 if (!down_read_trylock(&mm
->mmap_sem
))
128 for (n
= rb_first(&mm
->mm_rb
); n
; n
= rb_next(n
)) {
129 struct vm_area_struct
*vma
;
131 vma
= rb_entry(n
, struct vm_area_struct
, vm_rb
);
133 if (address
>= vma
->vm_start
&& address
< vma
->vm_end
) {
135 char *name
= t
->comm
;
136 struct file
*file
= vma
->vm_file
;
139 char *d_name
= d_path(&file
->f_path
, _tmpbuf
,
145 /* FLAT does not have its text aligned to the start of
146 * the map while FDPIC ELF does ...
149 /* before we can check flat/fdpic, we need to
150 * make sure current is valid
152 if ((unsigned long)current
>= FIXED_CODE_START
&&
153 !((unsigned long)current
& 0x3)) {
155 (address
> current
->mm
->start_code
) &&
156 (address
< current
->mm
->end_code
))
157 offset
= address
- current
->mm
->start_code
;
159 offset
= (address
- vma
->vm_start
) +
160 (vma
->vm_pgoff
<< PAGE_SHIFT
);
162 sprintf(buf
, "[ %s + 0x%lx ]", name
, offset
);
164 sprintf(buf
, "[ %s vma:0x%lx-0x%lx]",
165 name
, vma
->vm_start
, vma
->vm_end
);
167 up_read(&mm
->mmap_sem
);
171 sprintf(buf
, "[ %s ] dynamic memory", name
);
177 up_read(&mm
->mmap_sem
);
183 * we were unable to find this address anywhere,
184 * or some MMs were skipped because they were in use.
186 sprintf(buf
, "/* kernel dynamic memory */");
189 read_unlock(&tasklist_lock
);
192 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
195 * Similar to get_user, do some address checking, then dereference
196 * Return true on success, false on bad address
198 bool get_mem16(unsigned short *val
, unsigned short *address
)
200 unsigned long addr
= (unsigned long)address
;
202 /* Check for odd addresses */
206 switch (bfin_mem_access_type(addr
, 2)) {
207 case BFIN_MEM_ACCESS_CORE
:
208 case BFIN_MEM_ACCESS_CORE_ONLY
:
211 case BFIN_MEM_ACCESS_DMA
:
212 dma_memcpy(val
, address
, 2);
214 case BFIN_MEM_ACCESS_ITEST
:
215 isram_memcpy(val
, address
, 2);
217 default: /* invalid access */
222 bool get_instruction(unsigned int *val
, unsigned short *address
)
224 unsigned long addr
= (unsigned long)address
;
225 unsigned short opcode0
, opcode1
;
227 /* Check for odd addresses */
231 /* MMR region will never have instructions */
232 if (addr
>= SYSMMR_BASE
)
235 /* Scratchpad will never have instructions */
236 if (addr
>= L1_SCRATCH_START
&& addr
< L1_SCRATCH_START
+ L1_SCRATCH_LENGTH
)
239 /* Data banks will never have instructions */
240 if (addr
>= BOOT_ROM_START
+ BOOT_ROM_LENGTH
&& addr
< L1_CODE_START
)
243 if (!get_mem16(&opcode0
, address
))
246 /* was this a 32-bit instruction? If so, get the next 16 bits */
247 if ((opcode0
& 0xc000) == 0xc000) {
248 if (!get_mem16(&opcode1
, address
+ 1))
250 *val
= (opcode0
<< 16) + opcode1
;
257 #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
259 * decode the instruction if we are printing out the trace, as it
260 * makes things easier to follow, without running it through objdump
261 * Decode the change of flow, and the common load/store instructions
262 * which are the main cause for faults, and discontinuities in the trace
266 #define ProgCtrl_opcode 0x0000
267 #define ProgCtrl_poprnd_bits 0
268 #define ProgCtrl_poprnd_mask 0xf
269 #define ProgCtrl_prgfunc_bits 4
270 #define ProgCtrl_prgfunc_mask 0xf
271 #define ProgCtrl_code_bits 8
272 #define ProgCtrl_code_mask 0xff
274 static void decode_ProgCtrl_0(unsigned int opcode
)
276 int poprnd
= ((opcode
>> ProgCtrl_poprnd_bits
) & ProgCtrl_poprnd_mask
);
277 int prgfunc
= ((opcode
>> ProgCtrl_prgfunc_bits
) & ProgCtrl_prgfunc_mask
);
279 if (prgfunc
== 0 && poprnd
== 0)
281 else if (prgfunc
== 1 && poprnd
== 0)
283 else if (prgfunc
== 1 && poprnd
== 1)
285 else if (prgfunc
== 1 && poprnd
== 2)
287 else if (prgfunc
== 1 && poprnd
== 3)
289 else if (prgfunc
== 1 && poprnd
== 4)
291 else if (prgfunc
== 2 && poprnd
== 0)
293 else if (prgfunc
== 2 && poprnd
== 3)
295 else if (prgfunc
== 2 && poprnd
== 4)
297 else if (prgfunc
== 2 && poprnd
== 5)
299 else if (prgfunc
== 3)
300 pr_cont("CLI R%i", poprnd
);
301 else if (prgfunc
== 4)
302 pr_cont("STI R%i", poprnd
);
303 else if (prgfunc
== 5)
304 pr_cont("JUMP (P%i)", poprnd
);
305 else if (prgfunc
== 6)
306 pr_cont("CALL (P%i)", poprnd
);
307 else if (prgfunc
== 7)
308 pr_cont("CALL (PC + P%i)", poprnd
);
309 else if (prgfunc
== 8)
310 pr_cont("JUMP (PC + P%i", poprnd
);
311 else if (prgfunc
== 9)
312 pr_cont("RAISE %i", poprnd
);
313 else if (prgfunc
== 10)
314 pr_cont("EXCPT %i", poprnd
);
316 pr_cont("0x%04x", opcode
);
320 #define BRCC_opcode 0x1000
321 #define BRCC_offset_bits 0
322 #define BRCC_offset_mask 0x3ff
323 #define BRCC_B_bits 10
324 #define BRCC_B_mask 0x1
325 #define BRCC_T_bits 11
326 #define BRCC_T_mask 0x1
327 #define BRCC_code_bits 12
328 #define BRCC_code_mask 0xf
330 static void decode_BRCC_0(unsigned int opcode
)
332 int B
= ((opcode
>> BRCC_B_bits
) & BRCC_B_mask
);
333 int T
= ((opcode
>> BRCC_T_bits
) & BRCC_T_mask
);
335 pr_cont("IF %sCC JUMP pcrel %s", T
? "" : "!", B
? "(BP)" : "");
338 #define CALLa_opcode 0xe2000000
339 #define CALLa_addr_bits 0
340 #define CALLa_addr_mask 0xffffff
341 #define CALLa_S_bits 24
342 #define CALLa_S_mask 0x1
343 #define CALLa_code_bits 25
344 #define CALLa_code_mask 0x7f
346 static void decode_CALLa_0(unsigned int opcode
)
348 int S
= ((opcode
>> (CALLa_S_bits
- 16)) & CALLa_S_mask
);
351 pr_cont("CALL pcrel");
356 #define LoopSetup_opcode 0xe0800000
357 #define LoopSetup_eoffset_bits 0
358 #define LoopSetup_eoffset_mask 0x3ff
359 #define LoopSetup_dontcare_bits 10
360 #define LoopSetup_dontcare_mask 0x3
361 #define LoopSetup_reg_bits 12
362 #define LoopSetup_reg_mask 0xf
363 #define LoopSetup_soffset_bits 16
364 #define LoopSetup_soffset_mask 0xf
365 #define LoopSetup_c_bits 20
366 #define LoopSetup_c_mask 0x1
367 #define LoopSetup_rop_bits 21
368 #define LoopSetup_rop_mask 0x3
369 #define LoopSetup_code_bits 23
370 #define LoopSetup_code_mask 0x1ff
372 static void decode_LoopSetup_0(unsigned int opcode
)
374 int c
= ((opcode
>> LoopSetup_c_bits
) & LoopSetup_c_mask
);
375 int reg
= ((opcode
>> LoopSetup_reg_bits
) & LoopSetup_reg_mask
);
376 int rop
= ((opcode
>> LoopSetup_rop_bits
) & LoopSetup_rop_mask
);
378 pr_cont("LSETUP <> LC%i", c
);
380 pr_cont("= P%i", reg
);
385 #define DspLDST_opcode 0x9c00
386 #define DspLDST_reg_bits 0
387 #define DspLDST_reg_mask 0x7
388 #define DspLDST_i_bits 3
389 #define DspLDST_i_mask 0x3
390 #define DspLDST_m_bits 5
391 #define DspLDST_m_mask 0x3
392 #define DspLDST_aop_bits 7
393 #define DspLDST_aop_mask 0x3
394 #define DspLDST_W_bits 9
395 #define DspLDST_W_mask 0x1
396 #define DspLDST_code_bits 10
397 #define DspLDST_code_mask 0x3f
399 static void decode_dspLDST_0(unsigned int opcode
)
401 int i
= ((opcode
>> DspLDST_i_bits
) & DspLDST_i_mask
);
402 int m
= ((opcode
>> DspLDST_m_bits
) & DspLDST_m_mask
);
403 int W
= ((opcode
>> DspLDST_W_bits
) & DspLDST_W_mask
);
404 int aop
= ((opcode
>> DspLDST_aop_bits
) & DspLDST_aop_mask
);
405 int reg
= ((opcode
>> DspLDST_reg_bits
) & DspLDST_reg_mask
);
434 pr_cont(" = R%i", reg
);
446 #define LDST_opcode 0x9000
447 #define LDST_reg_bits 0
448 #define LDST_reg_mask 0x7
449 #define LDST_ptr_bits 3
450 #define LDST_ptr_mask 0x7
451 #define LDST_Z_bits 6
452 #define LDST_Z_mask 0x1
453 #define LDST_aop_bits 7
454 #define LDST_aop_mask 0x3
455 #define LDST_W_bits 9
456 #define LDST_W_mask 0x1
457 #define LDST_sz_bits 10
458 #define LDST_sz_mask 0x3
459 #define LDST_code_bits 12
460 #define LDST_code_mask 0xf
462 static void decode_LDST_0(unsigned int opcode
)
464 int Z
= ((opcode
>> LDST_Z_bits
) & LDST_Z_mask
);
465 int W
= ((opcode
>> LDST_W_bits
) & LDST_W_mask
);
466 int sz
= ((opcode
>> LDST_sz_bits
) & LDST_sz_mask
);
467 int aop
= ((opcode
>> LDST_aop_bits
) & LDST_aop_mask
);
468 int reg
= ((opcode
>> LDST_reg_bits
) & LDST_reg_mask
);
469 int ptr
= ((opcode
>> LDST_ptr_bits
) & LDST_ptr_mask
);
472 pr_cont("%s%i = ", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
483 pr_cont("[P%i", ptr
);
496 pr_cont(" = %s%i ", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
506 #define LDSTii_opcode 0xa000
507 #define LDSTii_reg_bit 0
508 #define LDSTii_reg_mask 0x7
509 #define LDSTii_ptr_bit 3
510 #define LDSTii_ptr_mask 0x7
511 #define LDSTii_offset_bit 6
512 #define LDSTii_offset_mask 0xf
513 #define LDSTii_op_bit 10
514 #define LDSTii_op_mask 0x3
515 #define LDSTii_W_bit 12
516 #define LDSTii_W_mask 0x1
517 #define LDSTii_code_bit 13
518 #define LDSTii_code_mask 0x7
520 static void decode_LDSTii_0(unsigned int opcode
)
522 int reg
= ((opcode
>> LDSTii_reg_bit
) & LDSTii_reg_mask
);
523 int ptr
= ((opcode
>> LDSTii_ptr_bit
) & LDSTii_ptr_mask
);
524 int offset
= ((opcode
>> LDSTii_offset_bit
) & LDSTii_offset_mask
);
525 int op
= ((opcode
>> LDSTii_op_bit
) & LDSTii_op_mask
);
526 int W
= ((opcode
>> LDSTii_W_bit
) & LDSTii_W_mask
);
529 pr_cont("%s%i = %s[P%i + %i]", op
== 3 ? "R" : "P", reg
,
530 op
== 1 || op
== 2 ? "" : "W", ptr
, offset
);
536 pr_cont("%s[P%i + %i] = %s%i", op
== 0 ? "" : "W", ptr
,
537 offset
, op
== 3 ? "P" : "R", reg
);
541 #define LDSTidxI_opcode 0xe4000000
542 #define LDSTidxI_offset_bits 0
543 #define LDSTidxI_offset_mask 0xffff
544 #define LDSTidxI_reg_bits 16
545 #define LDSTidxI_reg_mask 0x7
546 #define LDSTidxI_ptr_bits 19
547 #define LDSTidxI_ptr_mask 0x7
548 #define LDSTidxI_sz_bits 22
549 #define LDSTidxI_sz_mask 0x3
550 #define LDSTidxI_Z_bits 24
551 #define LDSTidxI_Z_mask 0x1
552 #define LDSTidxI_W_bits 25
553 #define LDSTidxI_W_mask 0x1
554 #define LDSTidxI_code_bits 26
555 #define LDSTidxI_code_mask 0x3f
557 static void decode_LDSTidxI_0(unsigned int opcode
)
559 int Z
= ((opcode
>> LDSTidxI_Z_bits
) & LDSTidxI_Z_mask
);
560 int W
= ((opcode
>> LDSTidxI_W_bits
) & LDSTidxI_W_mask
);
561 int sz
= ((opcode
>> LDSTidxI_sz_bits
) & LDSTidxI_sz_mask
);
562 int reg
= ((opcode
>> LDSTidxI_reg_bits
) & LDSTidxI_reg_mask
);
563 int ptr
= ((opcode
>> LDSTidxI_ptr_bits
) & LDSTidxI_ptr_mask
);
564 int offset
= ((opcode
>> LDSTidxI_offset_bits
) & LDSTidxI_offset_mask
);
567 pr_cont("%s%i = ", sz
== 0 && Z
== 1 ? "P" : "R", reg
);
574 pr_cont("[P%i + %s0x%x]", ptr
, offset
& 0x20 ? "-" : "",
575 (offset
& 0x1f) << 2);
577 if (W
== 0 && sz
!= 0) {
585 pr_cont("= %s%i", (sz
== 0 && Z
== 1) ? "P" : "R", reg
);
589 static void decode_opcode(unsigned int opcode
)
592 if (opcode
== BFIN_BUG_OPCODE
)
596 if ((opcode
& 0xffffff00) == ProgCtrl_opcode
)
597 decode_ProgCtrl_0(opcode
);
598 else if ((opcode
& 0xfffff000) == BRCC_opcode
)
599 decode_BRCC_0(opcode
);
600 else if ((opcode
& 0xfffff000) == 0x2000)
602 else if ((opcode
& 0xfe000000) == CALLa_opcode
)
603 decode_CALLa_0(opcode
);
604 else if ((opcode
& 0xff8000C0) == LoopSetup_opcode
)
605 decode_LoopSetup_0(opcode
);
606 else if ((opcode
& 0xfffffc00) == DspLDST_opcode
)
607 decode_dspLDST_0(opcode
);
608 else if ((opcode
& 0xfffff000) == LDST_opcode
)
609 decode_LDST_0(opcode
);
610 else if ((opcode
& 0xffffe000) == LDSTii_opcode
)
611 decode_LDSTii_0(opcode
);
612 else if ((opcode
& 0xfc000000) == LDSTidxI_opcode
)
613 decode_LDSTidxI_0(opcode
);
614 else if (opcode
& 0xffff0000)
615 pr_cont("0x%08x", opcode
);
617 pr_cont("0x%04x", opcode
);
620 #define BIT_MULTI_INS 0x08000000
621 static void decode_instruction(unsigned short *address
)
625 if (!get_instruction(&opcode
, address
))
628 decode_opcode(opcode
);
630 /* If things are a 32-bit instruction, it has the possibility of being
631 * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
632 * This test collidates with the unlink instruction, so disallow that
634 if ((opcode
& 0xc0000000) == 0xc0000000 &&
635 (opcode
& BIT_MULTI_INS
) &&
636 (opcode
& 0xe8000000) != 0xe8000000) {
638 if (!get_instruction(&opcode
, address
+ 2))
640 decode_opcode(opcode
);
642 if (!get_instruction(&opcode
, address
+ 3))
644 decode_opcode(opcode
);
649 void dump_bfin_trace_buffer(void)
651 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
652 int tflags
, i
= 0, fault
= 0;
654 unsigned short *addr
;
655 unsigned int cpu
= raw_smp_processor_id();
656 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
660 trace_buffer_save(tflags
);
662 pr_notice("Hardware Trace:\n");
664 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
665 pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
668 if (likely(bfin_read_TBUFSTAT() & TBUFCNT
)) {
669 for (; bfin_read_TBUFSTAT() & TBUFCNT
; i
++) {
670 addr
= (unsigned short *)bfin_read_TBUF();
671 decode_address(buf
, (unsigned long)addr
);
672 pr_notice("%4i Target : %s\n", i
, buf
);
673 /* Normally, the faulting instruction doesn't go into
674 * the trace buffer, (since it doesn't commit), so
675 * we print out the fault address here
677 if (!fault
&& addr
== ((unsigned short *)evt_ivhw
)) {
678 addr
= (unsigned short *)bfin_read_TBUF();
679 decode_address(buf
, (unsigned long)addr
);
680 pr_notice(" FAULT : %s ", buf
);
681 decode_instruction(addr
);
686 if (!fault
&& addr
== (unsigned short *)trap
&&
687 (cpu_pda
[cpu
].seqstat
& SEQSTAT_EXCAUSE
) > VEC_EXCPT15
) {
688 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
689 pr_notice(" FAULT : %s ", buf
);
690 decode_instruction((unsigned short *)cpu_pda
[cpu
].icplb_fault_addr
);
694 addr
= (unsigned short *)bfin_read_TBUF();
695 decode_address(buf
, (unsigned long)addr
);
696 pr_notice(" Source : %s ", buf
);
697 decode_instruction(addr
);
702 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
703 if (trace_buff_offset
)
704 index
= trace_buff_offset
/ 4;
708 j
= (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN
) * 128;
710 decode_address(buf
, software_trace_buff
[index
]);
711 pr_notice("%4i Target : %s\n", i
, buf
);
715 decode_address(buf
, software_trace_buff
[index
]);
716 pr_notice(" Source : %s ", buf
);
717 decode_instruction((unsigned short *)software_trace_buff
[index
]);
727 trace_buffer_restore(tflags
);
730 EXPORT_SYMBOL(dump_bfin_trace_buffer
);
732 void dump_bfin_process(struct pt_regs
*fp
)
734 /* We should be able to look at fp->ipend, but we don't push it on the
735 * stack all the time, so do this until we fix that */
736 unsigned int context
= bfin_read_IPEND();
738 if (oops_in_progress
)
739 pr_emerg("Kernel OOPS in progress\n");
741 if (context
& 0x0020 && (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
)
742 pr_notice("HW Error context\n");
743 else if (context
& 0x0020)
744 pr_notice("Deferred Exception context\n");
745 else if (context
& 0x3FC0)
746 pr_notice("Interrupt context\n");
747 else if (context
& 0x4000)
748 pr_notice("Deferred Interrupt context\n");
749 else if (context
& 0x8000)
750 pr_notice("Kernel process context\n");
752 /* Because we are crashing, and pointers could be bad, we check things
753 * pretty closely before we use them
755 if ((unsigned long)current
>= FIXED_CODE_START
&&
756 !((unsigned long)current
& 0x3) && current
->pid
) {
757 pr_notice("CURRENT PROCESS:\n");
758 if (current
->comm
>= (char *)FIXED_CODE_START
)
759 pr_notice("COMM=%s PID=%d",
760 current
->comm
, current
->pid
);
762 pr_notice("COMM= invalid");
764 pr_cont(" CPU=%d\n", current_thread_info()->cpu
);
765 if (!((unsigned long)current
->mm
& 0x3) &&
766 (unsigned long)current
->mm
>= FIXED_CODE_START
) {
767 pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
768 (void *)current
->mm
->start_code
,
769 (void *)current
->mm
->end_code
,
770 (void *)current
->mm
->start_data
,
771 (void *)current
->mm
->end_data
);
772 pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
773 (void *)current
->mm
->end_data
,
774 (void *)current
->mm
->brk
,
775 (void *)current
->mm
->start_stack
);
777 pr_notice("invalid mm\n");
779 pr_notice("No Valid process in current context\n");
782 void dump_bfin_mem(struct pt_regs
*fp
)
784 unsigned short *addr
, *erraddr
, val
= 0, err
= 0;
785 char sti
= 0, buf
[6];
787 erraddr
= (void *)fp
->pc
;
789 pr_notice("return address: [0x%p]; contents of:", erraddr
);
791 for (addr
= (unsigned short *)((unsigned long)erraddr
& ~0xF) - 0x10;
792 addr
< (unsigned short *)((unsigned long)erraddr
& ~0xF) + 0x10;
794 if (!((unsigned long)addr
& 0xF))
795 pr_notice("0x%p: ", addr
);
797 if (!get_mem16(&val
, addr
)) {
799 sprintf(buf
, "????");
801 sprintf(buf
, "%04x", val
);
803 if (addr
== erraddr
) {
804 pr_cont("[%s]", buf
);
807 pr_cont(" %s ", buf
);
809 /* Do any previous instructions turn on interrupts? */
810 if (addr
<= erraddr
&& /* in the past */
811 ((val
>= 0x0040 && val
<= 0x0047) || /* STI instruction */
812 val
== 0x017b)) /* [SP++] = RETI */
818 /* Hardware error interrupts can be deferred */
819 if (unlikely(sti
&& (fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
&&
821 pr_notice("Looks like this was a deferred error - sorry\n");
822 #ifndef CONFIG_DEBUG_HWERR
823 pr_notice("The remaining message may be meaningless\n");
824 pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
826 /* If we are handling only one peripheral interrupt
827 * and current mm and pid are valid, and the last error
828 * was in that user space process's text area
829 * print it out - because that is where the problem exists
831 if ((!(((fp
)->ipend
& ~0x30) & (((fp
)->ipend
& ~0x30) - 1))) &&
832 (current
->pid
&& current
->mm
)) {
833 /* And the last RETI points to the current userspace context */
834 if ((fp
+ 1)->pc
>= current
->mm
->start_code
&&
835 (fp
+ 1)->pc
<= current
->mm
->end_code
) {
836 pr_notice("It might be better to look around here :\n");
837 pr_notice("-------------------------------------------\n");
839 pr_notice("-------------------------------------------\n");
846 void show_regs(struct pt_regs
*fp
)
849 struct irqaction
*action
;
851 unsigned long flags
= 0;
852 unsigned int cpu
= raw_smp_processor_id();
853 unsigned char in_atomic
= (bfin_read_IPEND() & 0x10) || in_atomic();
856 show_regs_print_info(KERN_NOTICE
);
858 if (CPUID
!= bfin_cpuid())
859 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
860 "but running on:0x%04x (Rev %d)\n",
861 CPUID
, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
863 pr_notice("ADSP-%s-0.%d",
864 CPU
, bfin_compiled_revid());
866 if (bfin_compiled_revid() != bfin_revid())
867 pr_cont("(Detected 0.%d)", bfin_revid());
869 pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
870 get_cclk()/1000000, get_sclk()/1000000,
878 pr_notice("%s", linux_banner
);
880 pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
881 pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
882 (long)fp
->seqstat
, fp
->ipend
, cpu_pda
[raw_smp_processor_id()].ex_imask
, fp
->syscfg
);
883 if (fp
->ipend
& EVT_IRPTEN
)
884 pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
885 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG13
| EVT_IVG12
| EVT_IVG11
|
886 EVT_IVG10
| EVT_IVG9
| EVT_IVG8
| EVT_IVG7
| EVT_IVTMR
)))
887 pr_notice(" Peripheral interrupts masked off\n");
888 if (!(cpu_pda
[raw_smp_processor_id()].ex_imask
& (EVT_IVG15
| EVT_IVG14
)))
889 pr_notice(" Kernel interrupts masked off\n");
890 if ((fp
->seqstat
& SEQSTAT_EXCAUSE
) == VEC_HWERR
) {
891 pr_notice(" HWERRCAUSE: 0x%lx\n",
892 (fp
->seqstat
& SEQSTAT_HWERRCAUSE
) >> 14);
894 /* If the error was from the EBIU, print it out */
895 if (bfin_read_EBIU_ERRMST() & CORE_ERROR
) {
896 pr_notice(" EBIU Error Reason : 0x%04x\n",
897 bfin_read_EBIU_ERRMST());
898 pr_notice(" EBIU Error Address : 0x%08x\n",
899 bfin_read_EBIU_ERRADD());
903 pr_notice(" EXCAUSE : 0x%lx\n",
904 fp
->seqstat
& SEQSTAT_EXCAUSE
);
905 for (i
= 2; i
<= 15 ; i
++) {
906 if (fp
->ipend
& (1 << i
)) {
908 decode_address(buf
, bfin_read32(EVT0
+ 4*i
));
909 pr_notice(" physical IVG%i asserted : %s\n", i
, buf
);
911 pr_notice(" interrupts disabled\n");
915 /* if no interrupts are going off, don't print this out */
916 if (fp
->ipend
& ~0x3F) {
917 for (i
= 0; i
< (NR_IRQS
- 1); i
++) {
918 struct irq_desc
*desc
= irq_to_desc(i
);
920 raw_spin_lock_irqsave(&desc
->lock
, flags
);
922 action
= desc
->action
;
926 decode_address(buf
, (unsigned int)action
->handler
);
927 pr_notice(" logical irq %3d mapped : %s", i
, buf
);
928 for (action
= action
->next
; action
; action
= action
->next
) {
929 decode_address(buf
, (unsigned int)action
->handler
);
930 pr_cont(", %s", buf
);
935 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
939 decode_address(buf
, fp
->rete
);
940 pr_notice(" RETE: %s\n", buf
);
941 decode_address(buf
, fp
->retn
);
942 pr_notice(" RETN: %s\n", buf
);
943 decode_address(buf
, fp
->retx
);
944 pr_notice(" RETX: %s\n", buf
);
945 decode_address(buf
, fp
->rets
);
946 pr_notice(" RETS: %s\n", buf
);
947 decode_address(buf
, fp
->pc
);
948 pr_notice(" PC : %s\n", buf
);
950 if (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) &&
951 (((long)fp
->seqstat
& SEQSTAT_EXCAUSE
) != VEC_HWERR
)) {
952 decode_address(buf
, cpu_pda
[cpu
].dcplb_fault_addr
);
953 pr_notice("DCPLB_FAULT_ADDR: %s\n", buf
);
954 decode_address(buf
, cpu_pda
[cpu
].icplb_fault_addr
);
955 pr_notice("ICPLB_FAULT_ADDR: %s\n", buf
);
958 pr_notice("PROCESSOR STATE:\n");
959 pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
960 fp
->r0
, fp
->r1
, fp
->r2
, fp
->r3
);
961 pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
962 fp
->r4
, fp
->r5
, fp
->r6
, fp
->r7
);
963 pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
964 fp
->p0
, fp
->p1
, fp
->p2
, fp
->p3
);
965 pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
966 fp
->p4
, fp
->p5
, fp
->fp
, (long)fp
);
967 pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
968 fp
->lb0
, fp
->lt0
, fp
->lc0
);
969 pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
970 fp
->lb1
, fp
->lt1
, fp
->lc1
);
971 pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
972 fp
->b0
, fp
->l0
, fp
->m0
, fp
->i0
);
973 pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
974 fp
->b1
, fp
->l1
, fp
->m1
, fp
->i1
);
975 pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
976 fp
->b2
, fp
->l2
, fp
->m2
, fp
->i2
);
977 pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
978 fp
->b3
, fp
->l3
, fp
->m3
, fp
->i3
);
979 pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
980 fp
->a0w
, fp
->a0x
, fp
->a1w
, fp
->a1x
);
982 pr_notice("USP : %08lx ASTAT: %08lx\n",