1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
5 * This is an implementation of a DWARF unwinder. Its main purpose is
6 * for generating stacktrace information. Based on the DWARF 3
7 * specification from http://www.dwarfstd.org.
10 * - DWARF64 doesn't work.
11 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
15 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
20 #include <linux/elf.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <asm/dwarf.h>
25 #include <asm/unwinder.h>
26 #include <asm/sections.h>
27 #include <linux/unaligned.h>
28 #include <asm/stacktrace.h>
30 /* Reserve enough memory for two stack frames */
31 #define DWARF_FRAME_MIN_REQ 2
32 /* ... with 4 registers per frame. */
33 #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
35 static struct kmem_cache
*dwarf_frame_cachep
;
36 static mempool_t
*dwarf_frame_pool
;
38 static struct kmem_cache
*dwarf_reg_cachep
;
39 static mempool_t
*dwarf_reg_pool
;
41 static struct rb_root cie_root
;
42 static DEFINE_SPINLOCK(dwarf_cie_lock
);
44 static struct rb_root fde_root
;
45 static DEFINE_SPINLOCK(dwarf_fde_lock
);
47 static struct dwarf_cie
*cached_cie
;
49 static unsigned int dwarf_unwinder_ready
;
52 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
53 * @frame: the DWARF frame whose list of registers we insert on
54 * @reg_num: the register number
56 * Allocate space for, and initialise, a dwarf reg from
57 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
58 * dwarf registers for @frame.
60 * Return the initialised DWARF reg.
62 static struct dwarf_reg
*dwarf_frame_alloc_reg(struct dwarf_frame
*frame
,
65 struct dwarf_reg
*reg
;
67 reg
= mempool_alloc(dwarf_reg_pool
, GFP_ATOMIC
);
69 printk(KERN_WARNING
"Unable to allocate a DWARF register\n");
71 * Let's just bomb hard here, we have no way to
77 reg
->number
= reg_num
;
81 list_add(®
->link
, &frame
->reg_list
);
86 static void dwarf_frame_free_regs(struct dwarf_frame
*frame
)
88 struct dwarf_reg
*reg
, *n
;
90 list_for_each_entry_safe(reg
, n
, &frame
->reg_list
, link
) {
92 mempool_free(reg
, dwarf_reg_pool
);
97 * dwarf_frame_reg - return a DWARF register
98 * @frame: the DWARF frame to search in for @reg_num
99 * @reg_num: the register number to search for
101 * Lookup and return the dwarf reg @reg_num for this frame. Return
102 * NULL if @reg_num is an register invalid number.
104 static struct dwarf_reg
*dwarf_frame_reg(struct dwarf_frame
*frame
,
105 unsigned int reg_num
)
107 struct dwarf_reg
*reg
;
109 list_for_each_entry(reg
, &frame
->reg_list
, link
) {
110 if (reg
->number
== reg_num
)
118 * dwarf_read_addr - read dwarf data
119 * @src: source address of data
120 * @dst: destination address to store the data to
122 * Read 'n' bytes from @src, where 'n' is the size of an address on
123 * the native machine. We return the number of bytes read, which
124 * should always be 'n'. We also have to be careful when reading
125 * from @src and writing to @dst, because they can be arbitrarily
126 * aligned. Return 'n' - the number of bytes read.
128 static inline int dwarf_read_addr(unsigned long *src
, unsigned long *dst
)
130 u32 val
= get_unaligned(src
);
131 put_unaligned(val
, dst
);
132 return sizeof(unsigned long *);
136 * dwarf_read_uleb128 - read unsigned LEB128 data
137 * @addr: the address where the ULEB128 data is stored
138 * @ret: address to store the result
140 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
141 * from Appendix C of the DWARF 3 spec. For information on the
142 * encodings refer to section "7.6 - Variable Length Data". Return
143 * the number of bytes read.
145 static inline unsigned long dwarf_read_uleb128(char *addr
, unsigned int *ret
)
156 byte
= __raw_readb(addr
);
160 result
|= (byte
& 0x7f) << shift
;
173 * dwarf_read_leb128 - read signed LEB128 data
174 * @addr: the address of the LEB128 encoded data
175 * @ret: address to store the result
177 * Decode signed LEB128 data. The algorithm is taken from Appendix
178 * C of the DWARF 3 spec. Return the number of bytes read.
180 static inline unsigned long dwarf_read_leb128(char *addr
, int *ret
)
192 byte
= __raw_readb(addr
);
194 result
|= (byte
& 0x7f) << shift
;
202 /* The number of bits in a signed integer. */
203 num_bits
= 8 * sizeof(result
);
205 if ((shift
< num_bits
) && (byte
& 0x40))
206 result
|= (-1 << shift
);
214 * dwarf_read_encoded_value - return the decoded value at @addr
215 * @addr: the address of the encoded value
216 * @val: where to write the decoded value
217 * @encoding: the encoding with which we can decode @addr
219 * GCC emits encoded address in the .eh_frame FDE entries. Decode
220 * the value at @addr using @encoding. The decoded value is written
221 * to @val and the number of bytes read is returned.
223 static int dwarf_read_encoded_value(char *addr
, unsigned long *val
,
226 unsigned long decoded_addr
= 0;
229 switch (encoding
& 0x70) {
230 case DW_EH_PE_absptr
:
233 decoded_addr
= (unsigned long)addr
;
236 pr_debug("encoding=0x%x\n", (encoding
& 0x70));
240 if ((encoding
& 0x07) == 0x00)
241 encoding
|= DW_EH_PE_udata4
;
243 switch (encoding
& 0x0f) {
244 case DW_EH_PE_sdata4
:
245 case DW_EH_PE_udata4
:
247 decoded_addr
+= get_unaligned((u32
*)addr
);
248 __raw_writel(decoded_addr
, val
);
251 pr_debug("encoding=0x%x\n", encoding
);
259 * dwarf_entry_len - return the length of an FDE or CIE
260 * @addr: the address of the entry
261 * @len: the length of the entry
263 * Read the initial_length field of the entry and store the size of
264 * the entry in @len. We return the number of bytes read. Return a
265 * count of 0 on error.
267 static inline int dwarf_entry_len(char *addr
, unsigned long *len
)
272 initial_len
= get_unaligned((u32
*)addr
);
276 * An initial length field value in the range DW_LEN_EXT_LO -
277 * DW_LEN_EXT_HI indicates an extension, and should not be
278 * interpreted as a length. The only extension that we currently
279 * understand is the use of DWARF64 addresses.
281 if (initial_len
>= DW_EXT_LO
&& initial_len
<= DW_EXT_HI
) {
283 * The 64-bit length field immediately follows the
284 * compulsory 32-bit length field.
286 if (initial_len
== DW_EXT_DWARF64
) {
287 *len
= get_unaligned((u64
*)addr
+ 4);
290 printk(KERN_WARNING
"Unknown DWARF extension\n");
300 * dwarf_lookup_cie - locate the cie
301 * @cie_ptr: pointer to help with lookup
303 static struct dwarf_cie
*dwarf_lookup_cie(unsigned long cie_ptr
)
305 struct rb_node
**rb_node
= &cie_root
.rb_node
;
306 struct dwarf_cie
*cie
= NULL
;
309 spin_lock_irqsave(&dwarf_cie_lock
, flags
);
312 * We've cached the last CIE we looked up because chances are
313 * that the FDE wants this CIE.
315 if (cached_cie
&& cached_cie
->cie_pointer
== cie_ptr
) {
321 struct dwarf_cie
*cie_tmp
;
323 cie_tmp
= rb_entry(*rb_node
, struct dwarf_cie
, node
);
326 if (cie_ptr
== cie_tmp
->cie_pointer
) {
328 cached_cie
= cie_tmp
;
331 if (cie_ptr
< cie_tmp
->cie_pointer
)
332 rb_node
= &(*rb_node
)->rb_left
;
334 rb_node
= &(*rb_node
)->rb_right
;
339 spin_unlock_irqrestore(&dwarf_cie_lock
, flags
);
344 * dwarf_lookup_fde - locate the FDE that covers pc
345 * @pc: the program counter
347 static struct dwarf_fde
*dwarf_lookup_fde(unsigned long pc
)
349 struct rb_node
**rb_node
= &fde_root
.rb_node
;
350 struct dwarf_fde
*fde
= NULL
;
353 spin_lock_irqsave(&dwarf_fde_lock
, flags
);
356 struct dwarf_fde
*fde_tmp
;
357 unsigned long tmp_start
, tmp_end
;
359 fde_tmp
= rb_entry(*rb_node
, struct dwarf_fde
, node
);
362 tmp_start
= fde_tmp
->initial_location
;
363 tmp_end
= fde_tmp
->initial_location
+ fde_tmp
->address_range
;
365 if (pc
< tmp_start
) {
366 rb_node
= &(*rb_node
)->rb_left
;
372 rb_node
= &(*rb_node
)->rb_right
;
377 spin_unlock_irqrestore(&dwarf_fde_lock
, flags
);
383 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
384 * @insn_start: address of the first instruction
385 * @insn_end: address of the last instruction
386 * @cie: the CIE for this function
387 * @fde: the FDE for this function
388 * @frame: the instructions calculate the CFA for this frame
389 * @pc: the program counter of the address we're interested in
391 * Execute the Call Frame instruction sequence starting at
392 * @insn_start and ending at @insn_end. The instructions describe
393 * how to calculate the Canonical Frame Address of a stackframe.
394 * Store the results in @frame.
396 static int dwarf_cfa_execute_insns(unsigned char *insn_start
,
397 unsigned char *insn_end
,
398 struct dwarf_cie
*cie
,
399 struct dwarf_fde
*fde
,
400 struct dwarf_frame
*frame
,
404 unsigned char *current_insn
;
405 unsigned int count
, delta
, reg
, expr_len
, offset
;
406 struct dwarf_reg
*regp
;
408 current_insn
= insn_start
;
410 while (current_insn
< insn_end
&& frame
->pc
<= pc
) {
411 insn
= __raw_readb(current_insn
++);
414 * Firstly, handle the opcodes that embed their operands
415 * in the instructions.
417 switch (DW_CFA_opcode(insn
)) {
418 case DW_CFA_advance_loc
:
419 delta
= DW_CFA_operand(insn
);
420 delta
*= cie
->code_alignment_factor
;
425 reg
= DW_CFA_operand(insn
);
426 count
= dwarf_read_uleb128(current_insn
, &offset
);
427 current_insn
+= count
;
428 offset
*= cie
->data_alignment_factor
;
429 regp
= dwarf_frame_alloc_reg(frame
, reg
);
431 regp
->flags
|= DWARF_REG_OFFSET
;
435 reg
= DW_CFA_operand(insn
);
441 * Secondly, handle the opcodes that don't embed their
442 * operands in the instruction.
447 case DW_CFA_advance_loc1
:
448 delta
= *current_insn
++;
449 frame
->pc
+= delta
* cie
->code_alignment_factor
;
451 case DW_CFA_advance_loc2
:
452 delta
= get_unaligned((u16
*)current_insn
);
454 frame
->pc
+= delta
* cie
->code_alignment_factor
;
456 case DW_CFA_advance_loc4
:
457 delta
= get_unaligned((u32
*)current_insn
);
459 frame
->pc
+= delta
* cie
->code_alignment_factor
;
461 case DW_CFA_offset_extended
:
462 count
= dwarf_read_uleb128(current_insn
, ®
);
463 current_insn
+= count
;
464 count
= dwarf_read_uleb128(current_insn
, &offset
);
465 current_insn
+= count
;
466 offset
*= cie
->data_alignment_factor
;
468 case DW_CFA_restore_extended
:
469 count
= dwarf_read_uleb128(current_insn
, ®
);
470 current_insn
+= count
;
472 case DW_CFA_undefined
:
473 count
= dwarf_read_uleb128(current_insn
, ®
);
474 current_insn
+= count
;
475 regp
= dwarf_frame_alloc_reg(frame
, reg
);
476 regp
->flags
|= DWARF_UNDEFINED
;
479 count
= dwarf_read_uleb128(current_insn
,
480 &frame
->cfa_register
);
481 current_insn
+= count
;
482 count
= dwarf_read_uleb128(current_insn
,
484 current_insn
+= count
;
486 frame
->flags
|= DWARF_FRAME_CFA_REG_OFFSET
;
488 case DW_CFA_def_cfa_register
:
489 count
= dwarf_read_uleb128(current_insn
,
490 &frame
->cfa_register
);
491 current_insn
+= count
;
492 frame
->flags
|= DWARF_FRAME_CFA_REG_OFFSET
;
494 case DW_CFA_def_cfa_offset
:
495 count
= dwarf_read_uleb128(current_insn
, &offset
);
496 current_insn
+= count
;
497 frame
->cfa_offset
= offset
;
499 case DW_CFA_def_cfa_expression
:
500 count
= dwarf_read_uleb128(current_insn
, &expr_len
);
501 current_insn
+= count
;
503 frame
->cfa_expr
= current_insn
;
504 frame
->cfa_expr_len
= expr_len
;
505 current_insn
+= expr_len
;
507 frame
->flags
|= DWARF_FRAME_CFA_REG_EXP
;
509 case DW_CFA_offset_extended_sf
:
510 count
= dwarf_read_uleb128(current_insn
, ®
);
511 current_insn
+= count
;
512 count
= dwarf_read_leb128(current_insn
, &offset
);
513 current_insn
+= count
;
514 offset
*= cie
->data_alignment_factor
;
515 regp
= dwarf_frame_alloc_reg(frame
, reg
);
516 regp
->flags
|= DWARF_REG_OFFSET
;
519 case DW_CFA_val_offset
:
520 count
= dwarf_read_uleb128(current_insn
, ®
);
521 current_insn
+= count
;
522 count
= dwarf_read_leb128(current_insn
, &offset
);
523 offset
*= cie
->data_alignment_factor
;
524 regp
= dwarf_frame_alloc_reg(frame
, reg
);
525 regp
->flags
|= DWARF_VAL_OFFSET
;
528 case DW_CFA_GNU_args_size
:
529 count
= dwarf_read_uleb128(current_insn
, &offset
);
530 current_insn
+= count
;
532 case DW_CFA_GNU_negative_offset_extended
:
533 count
= dwarf_read_uleb128(current_insn
, ®
);
534 current_insn
+= count
;
535 count
= dwarf_read_uleb128(current_insn
, &offset
);
536 offset
*= cie
->data_alignment_factor
;
538 regp
= dwarf_frame_alloc_reg(frame
, reg
);
539 regp
->flags
|= DWARF_REG_OFFSET
;
540 regp
->addr
= -offset
;
543 pr_debug("unhandled DWARF instruction 0x%x\n", insn
);
553 * dwarf_free_frame - free the memory allocated for @frame
554 * @frame: the frame to free
556 void dwarf_free_frame(struct dwarf_frame
*frame
)
558 dwarf_frame_free_regs(frame
);
559 mempool_free(frame
, dwarf_frame_pool
);
562 extern void ret_from_irq(void);
565 * dwarf_unwind_stack - unwind the stack
567 * @pc: address of the function to unwind
568 * @prev: struct dwarf_frame of the previous stackframe on the callstack
570 * Return a struct dwarf_frame representing the most recent frame
571 * on the callstack. Each of the lower (older) stack frames are
572 * linked via the "prev" member.
574 struct dwarf_frame
*dwarf_unwind_stack(unsigned long pc
,
575 struct dwarf_frame
*prev
)
577 struct dwarf_frame
*frame
;
578 struct dwarf_cie
*cie
;
579 struct dwarf_fde
*fde
;
580 struct dwarf_reg
*reg
;
584 * If we've been called in to before initialization has
585 * completed, bail out immediately.
587 if (!dwarf_unwinder_ready
)
591 * If we're starting at the top of the stack we need get the
592 * contents of a physical register to get the CFA in order to
593 * begin the virtual unwinding of the stack.
595 * NOTE: the return address is guaranteed to be setup by the
596 * time this function makes its first function call.
601 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603 * If our stack has been patched by the function graph tracer
604 * then we might see the address of return_to_handler() where we
605 * expected to find the real return address.
607 if (pc
== (unsigned long)&return_to_handler
) {
608 struct ftrace_ret_stack
*ret_stack
;
610 ret_stack
= ftrace_graph_get_ret_stack(current
, 0);
614 * We currently have no way of tracking how many
615 * return_to_handler()'s we've seen. If there is more
616 * than one patched return address on our stack,
619 WARN_ON(ftrace_graph_get_ret_stack(current
, 1));
623 frame
= mempool_alloc(dwarf_frame_pool
, GFP_ATOMIC
);
625 printk(KERN_ERR
"Unable to allocate a dwarf frame\n");
629 INIT_LIST_HEAD(&frame
->reg_list
);
632 frame
->return_addr
= 0;
634 fde
= dwarf_lookup_fde(pc
);
637 * This is our normal exit path. There are two reasons
638 * why we might exit here,
640 * a) pc has no asscociated DWARF frame info and so
641 * we don't know how to unwind this frame. This is
642 * usually the case when we're trying to unwind a
643 * frame that was called from some assembly code
644 * that has no DWARF info, e.g. syscalls.
646 * b) the DEBUG info for pc is bogus. There's
647 * really no way to distinguish this case from the
648 * case above, which sucks because we could print a
654 cie
= dwarf_lookup_cie(fde
->cie_pointer
);
656 frame
->pc
= fde
->initial_location
;
658 /* CIE initial instructions */
659 dwarf_cfa_execute_insns(cie
->initial_instructions
,
660 cie
->instructions_end
, cie
, fde
,
663 /* FDE instructions */
664 dwarf_cfa_execute_insns(fde
->instructions
, fde
->end
, cie
,
667 /* Calculate the CFA */
668 switch (frame
->flags
) {
669 case DWARF_FRAME_CFA_REG_OFFSET
:
671 reg
= dwarf_frame_reg(prev
, frame
->cfa_register
);
672 UNWINDER_BUG_ON(!reg
);
673 UNWINDER_BUG_ON(reg
->flags
!= DWARF_REG_OFFSET
);
675 addr
= prev
->cfa
+ reg
->addr
;
676 frame
->cfa
= __raw_readl(addr
);
680 * Again, we're starting from the top of the
681 * stack. We need to physically read
682 * the contents of a register in order to get
683 * the Canonical Frame Address for this
686 frame
->cfa
= dwarf_read_arch_reg(frame
->cfa_register
);
689 frame
->cfa
+= frame
->cfa_offset
;
695 reg
= dwarf_frame_reg(frame
, DWARF_ARCH_RA_REG
);
698 * If we haven't seen the return address register or the return
699 * address column is undefined then we must assume that this is
700 * the end of the callstack.
702 if (!reg
|| reg
->flags
== DWARF_UNDEFINED
)
705 UNWINDER_BUG_ON(reg
->flags
!= DWARF_REG_OFFSET
);
707 addr
= frame
->cfa
+ reg
->addr
;
708 frame
->return_addr
= __raw_readl(addr
);
711 * Ah, the joys of unwinding through interrupts.
713 * Interrupts are tricky - the DWARF info needs to be _really_
714 * accurate and unfortunately I'm seeing a lot of bogus DWARF
715 * info. For example, I've seen interrupts occur in epilogues
716 * just after the frame pointer (r14) had been restored. The
717 * problem was that the DWARF info claimed that the CFA could be
718 * reached by using the value of the frame pointer before it was
721 * So until the compiler can be trusted to produce reliable
722 * DWARF info when it really matters, let's stop unwinding once
723 * we've calculated the function that was interrupted.
725 if (prev
&& prev
->pc
== (unsigned long)ret_from_irq
)
726 frame
->return_addr
= 0;
731 dwarf_free_frame(frame
);
735 static int dwarf_parse_cie(void *entry
, void *p
, unsigned long len
,
736 unsigned char *end
, struct module
*mod
)
738 struct rb_node
**rb_node
= &cie_root
.rb_node
;
739 struct rb_node
*parent
= *rb_node
;
740 struct dwarf_cie
*cie
;
744 cie
= kzalloc(sizeof(*cie
), GFP_KERNEL
);
751 * Record the offset into the .eh_frame section
752 * for this CIE. It allows this CIE to be
753 * quickly and easily looked up from the
756 cie
->cie_pointer
= (unsigned long)entry
;
758 cie
->version
= *(char *)p
++;
759 UNWINDER_BUG_ON(cie
->version
!= 1);
761 cie
->augmentation
= p
;
762 p
+= strlen(cie
->augmentation
) + 1;
764 count
= dwarf_read_uleb128(p
, &cie
->code_alignment_factor
);
767 count
= dwarf_read_leb128(p
, &cie
->data_alignment_factor
);
771 * Which column in the rule table contains the
774 if (cie
->version
== 1) {
775 cie
->return_address_reg
= __raw_readb(p
);
778 count
= dwarf_read_uleb128(p
, &cie
->return_address_reg
);
782 if (cie
->augmentation
[0] == 'z') {
783 unsigned int length
, count
;
784 cie
->flags
|= DWARF_CIE_Z_AUGMENTATION
;
786 count
= dwarf_read_uleb128(p
, &length
);
789 UNWINDER_BUG_ON((unsigned char *)p
> end
);
791 cie
->initial_instructions
= p
+ length
;
795 while (*cie
->augmentation
) {
797 * "L" indicates a byte showing how the
798 * LSDA pointer is encoded. Skip it.
800 if (*cie
->augmentation
== 'L') {
803 } else if (*cie
->augmentation
== 'R') {
805 * "R" indicates a byte showing
806 * how FDE addresses are
809 cie
->encoding
= *(char *)p
++;
811 } else if (*cie
->augmentation
== 'P') {
813 * "R" indicates a personality
818 } else if (*cie
->augmentation
== 'S') {
822 * Unknown augmentation. Assume
825 p
= cie
->initial_instructions
;
831 cie
->initial_instructions
= p
;
832 cie
->instructions_end
= end
;
835 spin_lock_irqsave(&dwarf_cie_lock
, flags
);
838 struct dwarf_cie
*cie_tmp
;
840 cie_tmp
= rb_entry(*rb_node
, struct dwarf_cie
, node
);
844 if (cie
->cie_pointer
< cie_tmp
->cie_pointer
)
845 rb_node
= &parent
->rb_left
;
846 else if (cie
->cie_pointer
>= cie_tmp
->cie_pointer
)
847 rb_node
= &parent
->rb_right
;
852 rb_link_node(&cie
->node
, parent
, rb_node
);
853 rb_insert_color(&cie
->node
, &cie_root
);
855 #ifdef CONFIG_MODULES
857 list_add_tail(&cie
->link
, &mod
->arch
.cie_list
);
860 spin_unlock_irqrestore(&dwarf_cie_lock
, flags
);
865 static int dwarf_parse_fde(void *entry
, u32 entry_type
,
866 void *start
, unsigned long len
,
867 unsigned char *end
, struct module
*mod
)
869 struct rb_node
**rb_node
= &fde_root
.rb_node
;
870 struct rb_node
*parent
= *rb_node
;
871 struct dwarf_fde
*fde
;
872 struct dwarf_cie
*cie
;
877 fde
= kzalloc(sizeof(*fde
), GFP_KERNEL
);
884 * In a .eh_frame section the CIE pointer is the
885 * delta between the address within the FDE
887 fde
->cie_pointer
= (unsigned long)(p
- entry_type
- 4);
889 cie
= dwarf_lookup_cie(fde
->cie_pointer
);
893 count
= dwarf_read_encoded_value(p
, &fde
->initial_location
,
896 count
= dwarf_read_addr(p
, &fde
->initial_location
);
901 count
= dwarf_read_encoded_value(p
, &fde
->address_range
,
902 cie
->encoding
& 0x0f);
904 count
= dwarf_read_addr(p
, &fde
->address_range
);
908 if (fde
->cie
->flags
& DWARF_CIE_Z_AUGMENTATION
) {
910 count
= dwarf_read_uleb128(p
, &length
);
914 /* Call frame instructions. */
915 fde
->instructions
= p
;
919 spin_lock_irqsave(&dwarf_fde_lock
, flags
);
922 struct dwarf_fde
*fde_tmp
;
923 unsigned long tmp_start
, tmp_end
;
924 unsigned long start
, end
;
926 fde_tmp
= rb_entry(*rb_node
, struct dwarf_fde
, node
);
928 start
= fde
->initial_location
;
929 end
= fde
->initial_location
+ fde
->address_range
;
931 tmp_start
= fde_tmp
->initial_location
;
932 tmp_end
= fde_tmp
->initial_location
+ fde_tmp
->address_range
;
936 if (start
< tmp_start
)
937 rb_node
= &parent
->rb_left
;
938 else if (start
>= tmp_end
)
939 rb_node
= &parent
->rb_right
;
944 rb_link_node(&fde
->node
, parent
, rb_node
);
945 rb_insert_color(&fde
->node
, &fde_root
);
947 #ifdef CONFIG_MODULES
949 list_add_tail(&fde
->link
, &mod
->arch
.fde_list
);
952 spin_unlock_irqrestore(&dwarf_fde_lock
, flags
);
957 static void dwarf_unwinder_dump(struct task_struct
*task
,
958 struct pt_regs
*regs
,
960 const struct stacktrace_ops
*ops
,
963 struct dwarf_frame
*frame
, *_frame
;
964 unsigned long return_addr
;
970 frame
= dwarf_unwind_stack(return_addr
, _frame
);
973 dwarf_free_frame(_frame
);
977 if (!frame
|| !frame
->return_addr
)
980 return_addr
= frame
->return_addr
;
981 ops
->address(data
, return_addr
, 1);
985 dwarf_free_frame(frame
);
988 static struct unwinder dwarf_unwinder
= {
989 .name
= "dwarf-unwinder",
990 .dump
= dwarf_unwinder_dump
,
994 static void __init
dwarf_unwinder_cleanup(void)
996 struct dwarf_fde
*fde
, *next_fde
;
997 struct dwarf_cie
*cie
, *next_cie
;
1000 * Deallocate all the memory allocated for the DWARF unwinder.
1001 * Traverse all the FDE/CIE lists and remove and free all the
1002 * memory associated with those data structures.
1004 rbtree_postorder_for_each_entry_safe(fde
, next_fde
, &fde_root
, node
)
1007 rbtree_postorder_for_each_entry_safe(cie
, next_cie
, &cie_root
, node
)
1010 mempool_destroy(dwarf_reg_pool
);
1011 mempool_destroy(dwarf_frame_pool
);
1012 kmem_cache_destroy(dwarf_reg_cachep
);
1013 kmem_cache_destroy(dwarf_frame_cachep
);
1017 * dwarf_parse_section - parse DWARF section
1018 * @eh_frame_start: start address of the .eh_frame section
1019 * @eh_frame_end: end address of the .eh_frame section
1020 * @mod: the kernel module containing the .eh_frame section
1022 * Parse the information in a .eh_frame section.
1024 static int dwarf_parse_section(char *eh_frame_start
, char *eh_frame_end
,
1030 unsigned long len
= 0;
1031 unsigned int c_entries
, f_entries
;
1036 entry
= eh_frame_start
;
1038 while ((char *)entry
< eh_frame_end
) {
1041 count
= dwarf_entry_len(p
, &len
);
1044 * We read a bogus length field value. There is
1045 * nothing we can do here apart from disabling
1046 * the DWARF unwinder. We can't even skip this
1047 * entry and move to the next one because 'len'
1048 * tells us where our next entry is.
1055 /* initial length does not include itself */
1058 entry_type
= get_unaligned((u32
*)p
);
1061 if (entry_type
== DW_EH_FRAME_CIE
) {
1062 err
= dwarf_parse_cie(entry
, p
, len
, end
, mod
);
1068 err
= dwarf_parse_fde(entry
, entry_type
, p
, len
,
1076 entry
= (char *)entry
+ len
+ 4;
1079 printk(KERN_INFO
"DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
1080 c_entries
, f_entries
);
1088 #ifdef CONFIG_MODULES
1089 int module_dwarf_finalize(const Elf_Ehdr
*hdr
, const Elf_Shdr
*sechdrs
,
1092 unsigned int i
, err
;
1093 unsigned long start
, end
;
1094 char *secstrings
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
1098 for (i
= 1; i
< hdr
->e_shnum
; i
++) {
1099 /* Alloc bit cleared means "ignore it." */
1100 if ((sechdrs
[i
].sh_flags
& SHF_ALLOC
)
1101 && !strcmp(secstrings
+sechdrs
[i
].sh_name
, ".eh_frame")) {
1102 start
= sechdrs
[i
].sh_addr
;
1103 end
= start
+ sechdrs
[i
].sh_size
;
1108 /* Did we find the .eh_frame section? */
1109 if (i
!= hdr
->e_shnum
) {
1110 INIT_LIST_HEAD(&me
->arch
.cie_list
);
1111 INIT_LIST_HEAD(&me
->arch
.fde_list
);
1112 err
= dwarf_parse_section((char *)start
, (char *)end
, me
);
1114 printk(KERN_WARNING
"%s: failed to parse DWARF info\n",
1124 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1125 * @mod: the module that is being unloaded
1127 * Remove any FDEs and CIEs from the global lists that came from
1128 * @mod's .eh_frame section because @mod is being unloaded.
1130 void module_dwarf_cleanup(struct module
*mod
)
1132 struct dwarf_fde
*fde
, *ftmp
;
1133 struct dwarf_cie
*cie
, *ctmp
;
1134 unsigned long flags
;
1136 spin_lock_irqsave(&dwarf_cie_lock
, flags
);
1138 list_for_each_entry_safe(cie
, ctmp
, &mod
->arch
.cie_list
, link
) {
1139 list_del(&cie
->link
);
1140 rb_erase(&cie
->node
, &cie_root
);
1144 spin_unlock_irqrestore(&dwarf_cie_lock
, flags
);
1146 spin_lock_irqsave(&dwarf_fde_lock
, flags
);
1148 list_for_each_entry_safe(fde
, ftmp
, &mod
->arch
.fde_list
, link
) {
1149 list_del(&fde
->link
);
1150 rb_erase(&fde
->node
, &fde_root
);
1154 spin_unlock_irqrestore(&dwarf_fde_lock
, flags
);
1156 #endif /* CONFIG_MODULES */
1159 * dwarf_unwinder_init - initialise the dwarf unwinder
1161 * Build the data structures describing the .dwarf_frame section to
1162 * make it easier to lookup CIE and FDE entries. Because the
1163 * .eh_frame section is packed as tightly as possible it is not
1164 * easy to lookup the FDE for a given PC, so we build a list of FDE
1165 * and CIE entries that make it easier.
1167 static int __init
dwarf_unwinder_init(void)
1171 dwarf_frame_cachep
= kmem_cache_create("dwarf_frames",
1172 sizeof(struct dwarf_frame
), 0,
1173 SLAB_PANIC
| SLAB_HWCACHE_ALIGN
, NULL
);
1175 dwarf_reg_cachep
= kmem_cache_create("dwarf_regs",
1176 sizeof(struct dwarf_reg
), 0,
1177 SLAB_PANIC
| SLAB_HWCACHE_ALIGN
, NULL
);
1179 dwarf_frame_pool
= mempool_create_slab_pool(DWARF_FRAME_MIN_REQ
,
1180 dwarf_frame_cachep
);
1181 if (!dwarf_frame_pool
)
1184 dwarf_reg_pool
= mempool_create_slab_pool(DWARF_REG_MIN_REQ
,
1186 if (!dwarf_reg_pool
)
1189 err
= dwarf_parse_section(__start_eh_frame
, __stop_eh_frame
, NULL
);
1193 err
= unwinder_register(&dwarf_unwinder
);
1197 dwarf_unwinder_ready
= 1;
1202 printk(KERN_ERR
"Failed to initialise DWARF unwinder: %d\n", err
);
1203 dwarf_unwinder_cleanup();
1206 early_initcall(dwarf_unwinder_init
);