1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* Shortcuts to overlay sections. */
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count
;
268 /* The stub section for each overlay section. */
271 struct elf_link_hash_entry
*ovly_load
;
272 struct elf_link_hash_entry
*ovly_return
;
273 unsigned long ovly_load_r_symndx
;
275 /* Number of overlay buffers. */
276 unsigned int num_buf
;
278 /* Total number of overlays. */
279 unsigned int num_overlays
;
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms
:1;
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs
: 1;
289 unsigned int stub_err
: 1;
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis
: 1;
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms
: 1;
298 /* Hijack the generic got fields for overlay stub accounting. */
302 struct got_entry
*next
;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 /* Create a spu ELF linker hash table. */
313 static struct bfd_link_hash_table
*
314 spu_elf_link_hash_table_create (bfd
*abfd
)
316 struct spu_link_hash_table
*htab
;
318 htab
= bfd_malloc (sizeof (*htab
));
322 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
323 _bfd_elf_link_hash_newfunc
,
324 sizeof (struct elf_link_hash_entry
)))
330 memset (&htab
->ovtab
, 0,
331 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
333 htab
->elf
.init_got_refcount
.refcount
= 0;
334 htab
->elf
.init_got_refcount
.glist
= NULL
;
335 htab
->elf
.init_got_offset
.offset
= 0;
336 htab
->elf
.init_got_offset
.glist
= NULL
;
337 return &htab
->elf
.root
;
340 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
341 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
342 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
345 get_sym_h (struct elf_link_hash_entry
**hp
,
346 Elf_Internal_Sym
**symp
,
348 Elf_Internal_Sym
**locsymsp
,
349 unsigned long r_symndx
,
352 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
354 if (r_symndx
>= symtab_hdr
->sh_info
)
356 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
357 struct elf_link_hash_entry
*h
;
359 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
360 while (h
->root
.type
== bfd_link_hash_indirect
361 || h
->root
.type
== bfd_link_hash_warning
)
362 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
372 asection
*symsec
= NULL
;
373 if (h
->root
.type
== bfd_link_hash_defined
374 || h
->root
.type
== bfd_link_hash_defweak
)
375 symsec
= h
->root
.u
.def
.section
;
381 Elf_Internal_Sym
*sym
;
382 Elf_Internal_Sym
*locsyms
= *locsymsp
;
386 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
389 size_t symcount
= symtab_hdr
->sh_info
;
391 /* If we are reading symbols into the contents, then
392 read the global syms too. This is done to cache
393 syms for later stack analysis. */
394 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
395 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
396 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
403 sym
= locsyms
+ r_symndx
;
412 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
418 /* Create the note section if not already present. This is done early so
419 that the linker maps the sections to the right place in the output. */
422 spu_elf_create_sections (struct bfd_link_info
*info
,
427 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
429 /* Stash some options away where we can get at them later. */
430 htab
->stack_analysis
= stack_analysis
;
431 htab
->emit_stack_syms
= emit_stack_syms
;
433 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
434 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
439 /* Make SPU_PTNOTE_SPUNAME section. */
446 ibfd
= info
->input_bfds
;
447 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
448 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
450 || !bfd_set_section_alignment (ibfd
, s
, 4))
453 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
454 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
455 size
+= (name_len
+ 3) & -4;
457 if (!bfd_set_section_size (ibfd
, s
, size
))
460 data
= bfd_zalloc (ibfd
, size
);
464 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
465 bfd_put_32 (ibfd
, name_len
, data
+ 4);
466 bfd_put_32 (ibfd
, 1, data
+ 8);
467 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
468 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
469 bfd_get_filename (info
->output_bfd
), name_len
);
476 /* qsort predicate to sort sections by vma. */
479 sort_sections (const void *a
, const void *b
)
481 const asection
*const *s1
= a
;
482 const asection
*const *s2
= b
;
483 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
486 return delta
< 0 ? -1 : 1;
488 return (*s1
)->index
- (*s2
)->index
;
491 /* Identify overlays in the output bfd, and number them. */
494 spu_elf_find_overlays (struct bfd_link_info
*info
)
496 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
497 asection
**alloc_sec
;
498 unsigned int i
, n
, ovl_index
, num_buf
;
502 if (info
->output_bfd
->section_count
< 2)
506 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
507 if (alloc_sec
== NULL
)
510 /* Pick out all the alloced sections. */
511 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
512 if ((s
->flags
& SEC_ALLOC
) != 0
513 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
523 /* Sort them by vma. */
524 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
526 /* Look for overlapping vmas. Any with overlap must be overlays.
527 Count them. Also count the number of overlay regions. */
528 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
529 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
532 if (s
->vma
< ovl_end
)
534 asection
*s0
= alloc_sec
[i
- 1];
536 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
538 alloc_sec
[ovl_index
] = s0
;
539 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
540 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
542 alloc_sec
[ovl_index
] = s
;
543 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
544 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
545 if (s0
->vma
!= s
->vma
)
547 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
548 "do not start at the same address.\n"),
552 if (ovl_end
< s
->vma
+ s
->size
)
553 ovl_end
= s
->vma
+ s
->size
;
556 ovl_end
= s
->vma
+ s
->size
;
559 htab
->num_overlays
= ovl_index
;
560 htab
->num_buf
= num_buf
;
561 htab
->ovl_sec
= alloc_sec
;
562 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
563 FALSE
, FALSE
, FALSE
);
564 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
565 FALSE
, FALSE
, FALSE
);
566 return ovl_index
!= 0;
569 /* Support two sizes of overlay stubs, a slower more compact stub of two
570 intructions, and a faster stub of four instructions. */
571 #ifndef OVL_STUB_SIZE
572 /* Default to faster. */
573 #define OVL_STUB_SIZE 16
574 /* #define OVL_STUB_SIZE 8 */
576 #define BRSL 0x33000000
577 #define BR 0x32000000
578 #define NOP 0x40200000
579 #define LNOP 0x00200000
580 #define ILA 0x42000000
582 /* Return true for all relative and absolute branch instructions.
590 brhnz 00100011 0.. */
593 is_branch (const unsigned char *insn
)
595 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
598 /* Return true for all indirect branch instructions.
606 bihnz 00100101 011 */
609 is_indirect_branch (const unsigned char *insn
)
611 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
614 /* Return true for branch hint instructions.
619 is_hint (const unsigned char *insn
)
621 return (insn
[0] & 0xfc) == 0x10;
624 /* True if INPUT_SECTION might need overlay stubs. */
627 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
629 /* No stubs for debug sections and suchlike. */
630 if ((input_section
->flags
& SEC_ALLOC
) == 0)
633 /* No stubs for link-once sections that will be discarded. */
634 if (input_section
->output_section
== NULL
635 || input_section
->output_section
->owner
!= output_bfd
)
638 /* Don't create stubs for .eh_frame references. */
639 if (strcmp (input_section
->name
, ".eh_frame") == 0)
653 /* Return non-zero if this reloc symbol should go via an overlay stub.
654 Return 2 if the stub must be in non-overlay area. */
656 static enum _stub_type
657 needs_ovl_stub (struct elf_link_hash_entry
*h
,
658 Elf_Internal_Sym
*sym
,
660 asection
*input_section
,
661 Elf_Internal_Rela
*irela
,
663 struct bfd_link_info
*info
)
665 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
666 enum elf_spu_reloc_type r_type
;
667 unsigned int sym_type
;
669 enum _stub_type ret
= no_stub
;
672 || sym_sec
->output_section
== NULL
673 || sym_sec
->output_section
->owner
!= info
->output_bfd
674 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
679 /* Ensure no stubs for user supplied overlay manager syms. */
680 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
683 /* setjmp always goes via an overlay stub, because then the return
684 and hence the longjmp goes via __ovly_return. That magically
685 makes setjmp/longjmp between overlays work. */
686 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
687 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
691 /* Usually, symbols in non-overlay sections don't need stubs. */
692 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
693 && !htab
->non_overlay_stubs
)
699 sym_type
= ELF_ST_TYPE (sym
->st_info
);
701 r_type
= ELF32_R_TYPE (irela
->r_info
);
703 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
707 if (contents
== NULL
)
710 if (!bfd_get_section_contents (input_section
->owner
,
717 contents
+= irela
->r_offset
;
719 if (is_branch (contents
) || is_hint (contents
))
722 if ((contents
[0] & 0xfd) == 0x31
723 && sym_type
!= STT_FUNC
726 /* It's common for people to write assembly and forget
727 to give function symbols the right type. Handle
728 calls to such symbols, but warn so that (hopefully)
729 people will fix their code. We need the symbol
730 type to be correct to distinguish function pointer
731 initialisation from other pointer initialisations. */
732 const char *sym_name
;
735 sym_name
= h
->root
.root
.string
;
738 Elf_Internal_Shdr
*symtab_hdr
;
739 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
740 sym_name
= bfd_elf_sym_name (input_section
->owner
,
745 (*_bfd_error_handler
) (_("warning: call to non-function"
746 " symbol %s defined in %B"),
747 sym_sec
->owner
, sym_name
);
753 if (sym_type
!= STT_FUNC
755 && (sym_sec
->flags
& SEC_CODE
) == 0)
758 /* A reference from some other section to a symbol in an overlay
759 section needs a stub. */
760 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
761 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
764 /* If this insn isn't a branch then we are possibly taking the
765 address of a function and passing it out somehow. */
766 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
770 count_stub (struct spu_link_hash_table
*htab
,
773 enum _stub_type stub_type
,
774 struct elf_link_hash_entry
*h
,
775 const Elf_Internal_Rela
*irela
)
777 unsigned int ovl
= 0;
778 struct got_entry
*g
, **head
;
781 /* If this instruction is a branch or call, we need a stub
782 for it. One stub per function per overlay.
783 If it isn't a branch, then we are taking the address of
784 this function so need a stub in the non-overlay area
785 for it. One stub per function. */
786 if (stub_type
!= nonovl_stub
)
787 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
790 head
= &h
->got
.glist
;
793 if (elf_local_got_ents (ibfd
) == NULL
)
795 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
796 * sizeof (*elf_local_got_ents (ibfd
)));
797 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
798 if (elf_local_got_ents (ibfd
) == NULL
)
801 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
806 addend
= irela
->r_addend
;
810 struct got_entry
*gnext
;
812 for (g
= *head
; g
!= NULL
; g
= g
->next
)
813 if (g
->addend
== addend
&& g
->ovl
== 0)
818 /* Need a new non-overlay area stub. Zap other stubs. */
819 for (g
= *head
; g
!= NULL
; g
= gnext
)
822 if (g
->addend
== addend
)
824 htab
->stub_count
[g
->ovl
] -= 1;
832 for (g
= *head
; g
!= NULL
; g
= g
->next
)
833 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
839 g
= bfd_malloc (sizeof *g
);
844 g
->stub_addr
= (bfd_vma
) -1;
848 htab
->stub_count
[ovl
] += 1;
854 /* Two instruction overlay stubs look like:
857 .word target_ovl_and_address
859 ovl_and_address is a word with the overlay number in the top 14 bits
860 and local store address in the bottom 18 bits.
862 Four instruction overlay stubs look like:
866 ila $79,target_address
870 build_stub (struct spu_link_hash_table
*htab
,
873 enum _stub_type stub_type
,
874 struct elf_link_hash_entry
*h
,
875 const Elf_Internal_Rela
*irela
,
880 struct got_entry
*g
, **head
;
882 bfd_vma addend
, val
, from
, to
;
885 if (stub_type
!= nonovl_stub
)
886 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
889 head
= &h
->got
.glist
;
891 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
895 addend
= irela
->r_addend
;
897 for (g
= *head
; g
!= NULL
; g
= g
->next
)
898 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
903 if (g
->ovl
== 0 && ovl
!= 0)
906 if (g
->stub_addr
!= (bfd_vma
) -1)
909 sec
= htab
->stub_sec
[ovl
];
910 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
911 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
913 to
= (htab
->ovly_load
->root
.u
.def
.value
914 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
915 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
917 if (OVL_STUB_SIZE
== 16)
919 if (((dest
| to
| from
) & 3) != 0
920 || val
+ 0x20000 >= 0x40000)
925 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
927 if (OVL_STUB_SIZE
== 16)
929 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
930 sec
->contents
+ sec
->size
);
931 bfd_put_32 (sec
->owner
, LNOP
,
932 sec
->contents
+ sec
->size
+ 4);
933 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
934 sec
->contents
+ sec
->size
+ 8);
935 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
936 sec
->contents
+ sec
->size
+ 12);
938 else if (OVL_STUB_SIZE
== 8)
940 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
941 sec
->contents
+ sec
->size
);
943 val
= (dest
& 0x3ffff) | (ovl
<< 14);
944 bfd_put_32 (sec
->owner
, val
,
945 sec
->contents
+ sec
->size
+ 4);
949 sec
->size
+= OVL_STUB_SIZE
;
951 if (htab
->emit_stub_syms
)
957 len
= 8 + sizeof (".ovl_call.") - 1;
959 len
+= strlen (h
->root
.root
.string
);
964 add
= (int) irela
->r_addend
& 0xffffffff;
967 name
= bfd_malloc (len
);
971 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
973 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
975 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
976 dest_sec
->id
& 0xffffffff,
977 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
979 sprintf (name
+ len
- 9, "+%x", add
);
981 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
985 if (h
->root
.type
== bfd_link_hash_new
)
987 h
->root
.type
= bfd_link_hash_defined
;
988 h
->root
.u
.def
.section
= sec
;
989 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
990 h
->size
= OVL_STUB_SIZE
;
994 h
->ref_regular_nonweak
= 1;
1003 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1007 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1009 /* Symbols starting with _SPUEAR_ need a stub because they may be
1010 invoked by the PPU. */
1011 if ((h
->root
.type
== bfd_link_hash_defined
1012 || h
->root
.type
== bfd_link_hash_defweak
)
1014 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
1016 struct spu_link_hash_table
*htab
= inf
;
1018 count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1025 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1027 /* Symbols starting with _SPUEAR_ need a stub because they may be
1028 invoked by the PPU. */
1029 if ((h
->root
.type
== bfd_link_hash_defined
1030 || h
->root
.type
== bfd_link_hash_defweak
)
1032 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
1034 struct spu_link_hash_table
*htab
= inf
;
1036 build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1037 h
->root
.u
.def
.value
, h
->root
.u
.def
.section
);
1043 /* Size or build stubs. */
1046 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1048 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1051 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1053 extern const bfd_target bfd_elf32_spu_vec
;
1054 Elf_Internal_Shdr
*symtab_hdr
;
1056 Elf_Internal_Sym
*local_syms
= NULL
;
1059 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1062 /* We'll need the symbol table in a second. */
1063 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1064 if (symtab_hdr
->sh_info
== 0)
1067 /* Arrange to read and keep global syms for later stack analysis. */
1068 psyms
= &local_syms
;
1069 if (htab
->stack_analysis
)
1070 psyms
= &symtab_hdr
->contents
;
1072 /* Walk over each section attached to the input bfd. */
1073 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1075 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1077 /* If there aren't any relocs, then there's nothing more to do. */
1078 if ((isec
->flags
& SEC_RELOC
) == 0
1079 || isec
->reloc_count
== 0)
1082 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1085 /* Get the relocs. */
1086 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1088 if (internal_relocs
== NULL
)
1089 goto error_ret_free_local
;
1091 /* Now examine each relocation. */
1092 irela
= internal_relocs
;
1093 irelaend
= irela
+ isec
->reloc_count
;
1094 for (; irela
< irelaend
; irela
++)
1096 enum elf_spu_reloc_type r_type
;
1097 unsigned int r_indx
;
1099 Elf_Internal_Sym
*sym
;
1100 struct elf_link_hash_entry
*h
;
1101 enum _stub_type stub_type
;
1103 r_type
= ELF32_R_TYPE (irela
->r_info
);
1104 r_indx
= ELF32_R_SYM (irela
->r_info
);
1106 if (r_type
>= R_SPU_max
)
1108 bfd_set_error (bfd_error_bad_value
);
1109 error_ret_free_internal
:
1110 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1111 free (internal_relocs
);
1112 error_ret_free_local
:
1113 if (local_syms
!= NULL
1114 && (symtab_hdr
->contents
1115 != (unsigned char *) local_syms
))
1120 /* Determine the reloc target section. */
1121 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
1122 goto error_ret_free_internal
;
1124 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1126 if (stub_type
== no_stub
)
1128 else if (stub_type
== stub_error
)
1129 goto error_ret_free_internal
;
1131 if (htab
->stub_count
== NULL
)
1134 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1135 htab
->stub_count
= bfd_zmalloc (amt
);
1136 if (htab
->stub_count
== NULL
)
1137 goto error_ret_free_internal
;
1142 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1143 goto error_ret_free_internal
;
1150 dest
= h
->root
.u
.def
.value
;
1152 dest
= sym
->st_value
;
1153 dest
+= irela
->r_addend
;
1154 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1156 goto error_ret_free_internal
;
1160 /* We're done with the internal relocs, free them. */
1161 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1162 free (internal_relocs
);
1165 if (local_syms
!= NULL
1166 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1168 if (!info
->keep_memory
)
1171 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1178 /* Allocate space for overlay call and return stubs. */
1181 spu_elf_size_stubs (struct bfd_link_info
*info
,
1182 void (*place_spu_section
) (asection
*, asection
*,
1184 int non_overlay_stubs
)
1186 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1193 htab
->non_overlay_stubs
= non_overlay_stubs
;
1194 if (!process_stubs (info
, FALSE
))
1197 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, htab
);
1201 if (htab
->stub_count
== NULL
)
1204 ibfd
= info
->input_bfds
;
1205 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1206 htab
->stub_sec
= bfd_zmalloc (amt
);
1207 if (htab
->stub_sec
== NULL
)
1210 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1211 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1212 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1213 htab
->stub_sec
[0] = stub
;
1215 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1217 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1218 (*place_spu_section
) (stub
, NULL
, ".text");
1220 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1222 asection
*osec
= htab
->ovl_sec
[i
];
1223 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1224 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1225 htab
->stub_sec
[ovl
] = stub
;
1227 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1229 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1230 (*place_spu_section
) (stub
, osec
, NULL
);
1233 /* htab->ovtab consists of two arrays.
1243 . } _ovly_buf_table[];
1246 flags
= (SEC_ALLOC
| SEC_LOAD
1247 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1248 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1249 if (htab
->ovtab
== NULL
1250 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1253 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1254 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1256 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1257 if (htab
->toe
== NULL
1258 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1260 htab
->toe
->size
= 16;
1261 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1266 /* Functions to handle embedded spu_ovl.o object. */
1269 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1275 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1281 struct _ovl_stream
*os
;
1285 os
= (struct _ovl_stream
*) stream
;
1286 max
= (const char *) os
->end
- (const char *) os
->start
;
1288 if ((ufile_ptr
) offset
>= max
)
1292 if (count
> max
- offset
)
1293 count
= max
- offset
;
1295 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1300 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1302 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1309 return *ovl_bfd
!= NULL
;
1312 /* Define an STT_OBJECT symbol. */
1314 static struct elf_link_hash_entry
*
1315 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1317 struct elf_link_hash_entry
*h
;
1319 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1323 if (h
->root
.type
!= bfd_link_hash_defined
1326 h
->root
.type
= bfd_link_hash_defined
;
1327 h
->root
.u
.def
.section
= htab
->ovtab
;
1328 h
->type
= STT_OBJECT
;
1331 h
->ref_regular_nonweak
= 1;
1336 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1337 h
->root
.u
.def
.section
->owner
,
1338 h
->root
.root
.string
);
1339 bfd_set_error (bfd_error_bad_value
);
1346 /* Fill in all stubs and the overlay tables. */
1349 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1351 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1352 struct elf_link_hash_entry
*h
;
1358 htab
->emit_stub_syms
= emit_syms
;
1359 if (htab
->stub_count
== NULL
)
1362 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1363 if (htab
->stub_sec
[i
]->size
!= 0)
1365 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1366 htab
->stub_sec
[i
]->size
);
1367 if (htab
->stub_sec
[i
]->contents
== NULL
)
1369 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1370 htab
->stub_sec
[i
]->size
= 0;
1373 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1374 htab
->ovly_load
= h
;
1375 BFD_ASSERT (h
!= NULL
1376 && (h
->root
.type
== bfd_link_hash_defined
1377 || h
->root
.type
== bfd_link_hash_defweak
)
1380 s
= h
->root
.u
.def
.section
->output_section
;
1381 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1383 (*_bfd_error_handler
) (_("%s in overlay section"),
1384 h
->root
.u
.def
.section
->owner
);
1385 bfd_set_error (bfd_error_bad_value
);
1389 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1390 htab
->ovly_return
= h
;
1392 /* Fill in all the stubs. */
1393 process_stubs (info
, TRUE
);
1395 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, htab
);
1399 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1401 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1403 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1404 bfd_set_error (bfd_error_bad_value
);
1407 htab
->stub_sec
[i
]->rawsize
= 0;
1412 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1413 bfd_set_error (bfd_error_bad_value
);
1417 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1418 if (htab
->ovtab
->contents
== NULL
)
1421 /* Write out _ovly_table. */
1422 p
= htab
->ovtab
->contents
;
1423 /* set low bit of .size to mark non-overlay area as present. */
1425 obfd
= htab
->ovtab
->output_section
->owner
;
1426 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1428 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1432 unsigned long off
= ovl_index
* 16;
1433 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1435 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1436 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1437 /* file_off written later in spu_elf_modify_program_headers. */
1438 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1442 h
= define_ovtab_symbol (htab
, "_ovly_table");
1445 h
->root
.u
.def
.value
= 16;
1446 h
->size
= htab
->num_overlays
* 16;
1448 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1451 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1454 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1457 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1458 h
->size
= htab
->num_buf
* 4;
1460 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1463 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1466 h
= define_ovtab_symbol (htab
, "_EAR_");
1469 h
->root
.u
.def
.section
= htab
->toe
;
1470 h
->root
.u
.def
.value
= 0;
1476 /* Check that all loadable section VMAs lie in the range
1477 LO .. HI inclusive. */
1480 spu_elf_check_vma (struct bfd_link_info
*info
, bfd_vma lo
, bfd_vma hi
)
1482 struct elf_segment_map
*m
;
1484 bfd
*abfd
= info
->output_bfd
;
1486 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1487 if (m
->p_type
== PT_LOAD
)
1488 for (i
= 0; i
< m
->count
; i
++)
1489 if (m
->sections
[i
]->size
!= 0
1490 && (m
->sections
[i
]->vma
< lo
1491 || m
->sections
[i
]->vma
> hi
1492 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1493 return m
->sections
[i
];
1498 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1499 Search for stack adjusting insns, and return the sp delta. */
1502 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1507 memset (reg
, 0, sizeof (reg
));
1508 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1510 unsigned char buf
[4];
1514 /* Assume no relocs on stack adjusing insns. */
1515 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1518 if (buf
[0] == 0x24 /* stqd */)
1522 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1523 /* Partly decoded immediate field. */
1524 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1526 if (buf
[0] == 0x1c /* ai */)
1529 imm
= (imm
^ 0x200) - 0x200;
1530 reg
[rt
] = reg
[ra
] + imm
;
1532 if (rt
== 1 /* sp */)
1539 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1541 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1543 reg
[rt
] = reg
[ra
] + reg
[rb
];
1547 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1549 if (buf
[0] >= 0x42 /* ila */)
1550 imm
|= (buf
[0] & 1) << 17;
1555 if (buf
[0] == 0x40 /* il */)
1557 if ((buf
[1] & 0x80) == 0)
1559 imm
= (imm
^ 0x8000) - 0x8000;
1561 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1567 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1569 reg
[rt
] |= imm
& 0xffff;
1572 else if (buf
[0] == 0x04 /* ori */)
1575 imm
= (imm
^ 0x200) - 0x200;
1576 reg
[rt
] = reg
[ra
] | imm
;
1579 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1580 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1582 /* Used in pic reg load. Say rt is trashed. */
1586 else if (is_branch (buf
) || is_indirect_branch (buf
))
1587 /* If we hit a branch then we must be out of the prologue. */
1596 /* qsort predicate to sort symbols by section and value. */
1598 static Elf_Internal_Sym
*sort_syms_syms
;
1599 static asection
**sort_syms_psecs
;
1602 sort_syms (const void *a
, const void *b
)
1604 Elf_Internal_Sym
*const *s1
= a
;
1605 Elf_Internal_Sym
*const *s2
= b
;
1606 asection
*sec1
,*sec2
;
1607 bfd_signed_vma delta
;
1609 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1610 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1613 return sec1
->index
- sec2
->index
;
1615 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1617 return delta
< 0 ? -1 : 1;
1619 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1621 return delta
< 0 ? -1 : 1;
1623 return *s1
< *s2
? -1 : 1;
1628 struct function_info
*fun
;
1629 struct call_info
*next
;
1630 unsigned int is_tail
: 1;
1633 struct function_info
1635 /* List of functions called. Also branches to hot/cold part of
1637 struct call_info
*call_list
;
1638 /* For hot/cold part of function, point to owner. */
1639 struct function_info
*start
;
1640 /* Symbol at start of function. */
1642 Elf_Internal_Sym
*sym
;
1643 struct elf_link_hash_entry
*h
;
1645 /* Function section. */
1647 /* Address range of (this part of) function. */
1651 /* Set if global symbol. */
1652 unsigned int global
: 1;
1653 /* Set if known to be start of function (as distinct from a hunk
1654 in hot/cold section. */
1655 unsigned int is_func
: 1;
1656 /* Flags used during call tree traversal. */
1657 unsigned int visit1
: 1;
1658 unsigned int non_root
: 1;
1659 unsigned int visit2
: 1;
1660 unsigned int marking
: 1;
1661 unsigned int visit3
: 1;
1664 struct spu_elf_stack_info
1668 /* Variable size array describing functions, one per contiguous
1669 address range belonging to a function. */
1670 struct function_info fun
[1];
1673 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1674 entries for section SEC. */
1676 static struct spu_elf_stack_info
*
1677 alloc_stack_info (asection
*sec
, int max_fun
)
1679 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1682 amt
= sizeof (struct spu_elf_stack_info
);
1683 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1684 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1685 if (sec_data
->u
.i
.stack_info
!= NULL
)
1686 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1687 return sec_data
->u
.i
.stack_info
;
1690 /* Add a new struct function_info describing a (part of a) function
1691 starting at SYM_H. Keep the array sorted by address. */
1693 static struct function_info
*
1694 maybe_insert_function (asection
*sec
,
1697 bfd_boolean is_func
)
1699 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1700 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1706 sinfo
= alloc_stack_info (sec
, 20);
1713 Elf_Internal_Sym
*sym
= sym_h
;
1714 off
= sym
->st_value
;
1715 size
= sym
->st_size
;
1719 struct elf_link_hash_entry
*h
= sym_h
;
1720 off
= h
->root
.u
.def
.value
;
1724 for (i
= sinfo
->num_fun
; --i
>= 0; )
1725 if (sinfo
->fun
[i
].lo
<= off
)
1730 /* Don't add another entry for an alias, but do update some
1732 if (sinfo
->fun
[i
].lo
== off
)
1734 /* Prefer globals over local syms. */
1735 if (global
&& !sinfo
->fun
[i
].global
)
1737 sinfo
->fun
[i
].global
= TRUE
;
1738 sinfo
->fun
[i
].u
.h
= sym_h
;
1741 sinfo
->fun
[i
].is_func
= TRUE
;
1742 return &sinfo
->fun
[i
];
1744 /* Ignore a zero-size symbol inside an existing function. */
1745 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1746 return &sinfo
->fun
[i
];
1749 if (++i
< sinfo
->num_fun
)
1750 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1751 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1752 else if (i
>= sinfo
->max_fun
)
1754 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1755 bfd_size_type old
= amt
;
1757 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1758 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1759 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1760 sinfo
= bfd_realloc (sinfo
, amt
);
1763 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1764 sec_data
->u
.i
.stack_info
= sinfo
;
1766 sinfo
->fun
[i
].is_func
= is_func
;
1767 sinfo
->fun
[i
].global
= global
;
1768 sinfo
->fun
[i
].sec
= sec
;
1770 sinfo
->fun
[i
].u
.h
= sym_h
;
1772 sinfo
->fun
[i
].u
.sym
= sym_h
;
1773 sinfo
->fun
[i
].lo
= off
;
1774 sinfo
->fun
[i
].hi
= off
+ size
;
1775 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1776 sinfo
->num_fun
+= 1;
1777 return &sinfo
->fun
[i
];
1780 /* Return the name of FUN. */
1783 func_name (struct function_info
*fun
)
1787 Elf_Internal_Shdr
*symtab_hdr
;
1789 while (fun
->start
!= NULL
)
1793 return fun
->u
.h
->root
.root
.string
;
1796 if (fun
->u
.sym
->st_name
== 0)
1798 size_t len
= strlen (sec
->name
);
1799 char *name
= bfd_malloc (len
+ 10);
1802 sprintf (name
, "%s+%lx", sec
->name
,
1803 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1807 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1808 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1811 /* Read the instruction at OFF in SEC. Return true iff the instruction
1812 is a nop, lnop, or stop 0 (all zero insn). */
1815 is_nop (asection
*sec
, bfd_vma off
)
1817 unsigned char insn
[4];
1819 if (off
+ 4 > sec
->size
1820 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1822 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1824 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1829 /* Extend the range of FUN to cover nop padding up to LIMIT.
1830 Return TRUE iff some instruction other than a NOP was found. */
1833 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1835 bfd_vma off
= (fun
->hi
+ 3) & -4;
1837 while (off
< limit
&& is_nop (fun
->sec
, off
))
1848 /* Check and fix overlapping function ranges. Return TRUE iff there
1849 are gaps in the current info we have about functions in SEC. */
1852 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1854 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1855 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1857 bfd_boolean gaps
= FALSE
;
1862 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1863 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1865 /* Fix overlapping symbols. */
1866 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1867 const char *f2
= func_name (&sinfo
->fun
[i
]);
1869 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1870 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1872 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1875 if (sinfo
->num_fun
== 0)
1879 if (sinfo
->fun
[0].lo
!= 0)
1881 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1883 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1885 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1886 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1888 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1894 /* Search current function info for a function that contains address
1895 OFFSET in section SEC. */
1897 static struct function_info
*
1898 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1900 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1901 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1905 hi
= sinfo
->num_fun
;
1908 mid
= (lo
+ hi
) / 2;
1909 if (offset
< sinfo
->fun
[mid
].lo
)
1911 else if (offset
>= sinfo
->fun
[mid
].hi
)
1914 return &sinfo
->fun
[mid
];
1916 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1921 /* Add CALLEE to CALLER call list if not already present. */
1924 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1926 struct call_info
*p
;
1927 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1928 if (p
->fun
== callee
->fun
)
1930 /* Tail calls use less stack than normal calls. Retain entry
1931 for normal call over one for tail call. */
1932 p
->is_tail
&= callee
->is_tail
;
1935 p
->fun
->start
= NULL
;
1936 p
->fun
->is_func
= TRUE
;
1940 callee
->next
= caller
->call_list
;
1941 caller
->call_list
= callee
;
1945 /* Rummage through the relocs for SEC, looking for function calls.
1946 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1947 mark destination symbols on calls as being functions. Also
1948 look at branches, which may be tail calls or go to hot/cold
1949 section part of same function. */
1952 mark_functions_via_relocs (asection
*sec
,
1953 struct bfd_link_info
*info
,
1956 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1957 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1958 Elf_Internal_Sym
*syms
;
1960 static bfd_boolean warned
;
1962 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1964 if (internal_relocs
== NULL
)
1967 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1968 psyms
= &symtab_hdr
->contents
;
1969 syms
= *(Elf_Internal_Sym
**) psyms
;
1970 irela
= internal_relocs
;
1971 irelaend
= irela
+ sec
->reloc_count
;
1972 for (; irela
< irelaend
; irela
++)
1974 enum elf_spu_reloc_type r_type
;
1975 unsigned int r_indx
;
1977 Elf_Internal_Sym
*sym
;
1978 struct elf_link_hash_entry
*h
;
1980 unsigned char insn
[4];
1981 bfd_boolean is_call
;
1982 struct function_info
*caller
;
1983 struct call_info
*callee
;
1985 r_type
= ELF32_R_TYPE (irela
->r_info
);
1986 if (r_type
!= R_SPU_REL16
1987 && r_type
!= R_SPU_ADDR16
)
1990 r_indx
= ELF32_R_SYM (irela
->r_info
);
1991 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
1995 || sym_sec
->output_section
== NULL
1996 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
1999 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2000 irela
->r_offset
, 4))
2002 if (!is_branch (insn
))
2005 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2006 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2010 if (!call_tree
|| !warned
)
2011 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
2012 " %B(%A), stack analysis incomplete\n"),
2013 sec
->owner
, sec
, irela
->r_offset
,
2014 sym_sec
->owner
, sym_sec
);
2018 is_call
= (insn
[0] & 0xfd) == 0x31;
2021 val
= h
->root
.u
.def
.value
;
2023 val
= sym
->st_value
;
2024 val
+= irela
->r_addend
;
2028 struct function_info
*fun
;
2030 if (irela
->r_addend
!= 0)
2032 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2035 fake
->st_value
= val
;
2037 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2041 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2043 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2046 if (irela
->r_addend
!= 0
2047 && fun
->u
.sym
!= sym
)
2052 caller
= find_function (sec
, irela
->r_offset
, info
);
2055 callee
= bfd_malloc (sizeof *callee
);
2059 callee
->fun
= find_function (sym_sec
, val
, info
);
2060 if (callee
->fun
== NULL
)
2062 callee
->is_tail
= !is_call
;
2063 if (!insert_callee (caller
, callee
))
2066 && !callee
->fun
->is_func
2067 && callee
->fun
->stack
== 0)
2069 /* This is either a tail call or a branch from one part of
2070 the function to another, ie. hot/cold section. If the
2071 destination has been called by some other function then
2072 it is a separate function. We also assume that functions
2073 are not split across input files. */
2074 if (sec
->owner
!= sym_sec
->owner
)
2076 callee
->fun
->start
= NULL
;
2077 callee
->fun
->is_func
= TRUE
;
2079 else if (callee
->fun
->start
== NULL
)
2080 callee
->fun
->start
= caller
;
2083 struct function_info
*callee_start
;
2084 struct function_info
*caller_start
;
2085 callee_start
= callee
->fun
;
2086 while (callee_start
->start
)
2087 callee_start
= callee_start
->start
;
2088 caller_start
= caller
;
2089 while (caller_start
->start
)
2090 caller_start
= caller_start
->start
;
2091 if (caller_start
!= callee_start
)
2093 callee
->fun
->start
= NULL
;
2094 callee
->fun
->is_func
= TRUE
;
2103 /* Handle something like .init or .fini, which has a piece of a function.
2104 These sections are pasted together to form a single function. */
2107 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2109 struct bfd_link_order
*l
;
2110 struct _spu_elf_section_data
*sec_data
;
2111 struct spu_elf_stack_info
*sinfo
;
2112 Elf_Internal_Sym
*fake
;
2113 struct function_info
*fun
, *fun_start
;
2115 fake
= bfd_zmalloc (sizeof (*fake
));
2119 fake
->st_size
= sec
->size
;
2121 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2122 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2126 /* Find a function immediately preceding this section. */
2128 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2130 if (l
->u
.indirect
.section
== sec
)
2132 if (fun_start
!= NULL
)
2133 fun
->start
= fun_start
;
2136 if (l
->type
== bfd_indirect_link_order
2137 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2138 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2139 && sinfo
->num_fun
!= 0)
2140 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2143 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2147 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2148 overlay stub sections. */
2151 interesting_section (asection
*s
, bfd
*obfd
)
2153 return (s
->output_section
!= NULL
2154 && s
->output_section
->owner
== obfd
2155 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2156 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2160 /* Map address ranges in code sections to functions. */
2163 discover_functions (struct bfd_link_info
*info
)
2167 Elf_Internal_Sym
***psym_arr
;
2168 asection
***sec_arr
;
2169 bfd_boolean gaps
= FALSE
;
2172 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2175 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2176 if (psym_arr
== NULL
)
2178 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2179 if (sec_arr
== NULL
)
2183 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2185 ibfd
= ibfd
->link_next
, bfd_idx
++)
2187 extern const bfd_target bfd_elf32_spu_vec
;
2188 Elf_Internal_Shdr
*symtab_hdr
;
2191 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2192 asection
**psecs
, **p
;
2194 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2197 /* Read all the symbols. */
2198 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2199 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2203 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2206 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2208 symtab_hdr
->contents
= (void *) syms
;
2213 /* Select defined function symbols that are going to be output. */
2214 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2217 psym_arr
[bfd_idx
] = psyms
;
2218 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2221 sec_arr
[bfd_idx
] = psecs
;
2222 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2223 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2224 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2228 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2229 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2232 symcount
= psy
- psyms
;
2235 /* Sort them by section and offset within section. */
2236 sort_syms_syms
= syms
;
2237 sort_syms_psecs
= psecs
;
2238 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2240 /* Now inspect the function symbols. */
2241 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2243 asection
*s
= psecs
[*psy
- syms
];
2244 Elf_Internal_Sym
**psy2
;
2246 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2247 if (psecs
[*psy2
- syms
] != s
)
2250 if (!alloc_stack_info (s
, psy2
- psy
))
2255 /* First install info about properly typed and sized functions.
2256 In an ideal world this will cover all code sections, except
2257 when partitioning functions into hot and cold sections,
2258 and the horrible pasted together .init and .fini functions. */
2259 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2262 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2264 asection
*s
= psecs
[sy
- syms
];
2265 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2270 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2271 if (interesting_section (sec
, info
->output_bfd
))
2272 gaps
|= check_function_ranges (sec
, info
);
2277 /* See if we can discover more function symbols by looking at
2279 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2281 ibfd
= ibfd
->link_next
, bfd_idx
++)
2285 if (psym_arr
[bfd_idx
] == NULL
)
2288 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2289 if (interesting_section (sec
, info
->output_bfd
)
2290 && sec
->reloc_count
!= 0)
2292 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2297 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2299 ibfd
= ibfd
->link_next
, bfd_idx
++)
2301 Elf_Internal_Shdr
*symtab_hdr
;
2303 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2306 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2309 psecs
= sec_arr
[bfd_idx
];
2311 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2312 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2315 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2316 if (interesting_section (sec
, info
->output_bfd
))
2317 gaps
|= check_function_ranges (sec
, info
);
2321 /* Finally, install all globals. */
2322 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2326 s
= psecs
[sy
- syms
];
2328 /* Global syms might be improperly typed functions. */
2329 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2330 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2332 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2337 /* Some of the symbols we've installed as marking the
2338 beginning of functions may have a size of zero. Extend
2339 the range of such functions to the beginning of the
2340 next symbol of interest. */
2341 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2342 if (interesting_section (sec
, info
->output_bfd
))
2344 struct _spu_elf_section_data
*sec_data
;
2345 struct spu_elf_stack_info
*sinfo
;
2347 sec_data
= spu_elf_section_data (sec
);
2348 sinfo
= sec_data
->u
.i
.stack_info
;
2352 bfd_vma hi
= sec
->size
;
2354 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2356 sinfo
->fun
[fun_idx
].hi
= hi
;
2357 hi
= sinfo
->fun
[fun_idx
].lo
;
2360 /* No symbols in this section. Must be .init or .fini
2361 or something similar. */
2362 else if (!pasted_function (sec
, info
))
2368 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2370 ibfd
= ibfd
->link_next
, bfd_idx
++)
2372 if (psym_arr
[bfd_idx
] == NULL
)
2375 free (psym_arr
[bfd_idx
]);
2376 free (sec_arr
[bfd_idx
]);
2385 /* Mark nodes in the call graph that are called by some other node. */
2388 mark_non_root (struct function_info
*fun
)
2390 struct call_info
*call
;
2393 for (call
= fun
->call_list
; call
; call
= call
->next
)
2395 call
->fun
->non_root
= TRUE
;
2396 if (!call
->fun
->visit1
)
2397 mark_non_root (call
->fun
);
2401 /* Remove cycles from the call graph. */
2404 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2406 struct call_info
**callp
, *call
;
2409 fun
->marking
= TRUE
;
2411 callp
= &fun
->call_list
;
2412 while ((call
= *callp
) != NULL
)
2414 if (!call
->fun
->visit2
)
2415 call_graph_traverse (call
->fun
, info
);
2416 else if (call
->fun
->marking
)
2418 const char *f1
= func_name (fun
);
2419 const char *f2
= func_name (call
->fun
);
2421 info
->callbacks
->info (_("Stack analysis will ignore the call "
2424 *callp
= call
->next
;
2427 callp
= &call
->next
;
2429 fun
->marking
= FALSE
;
2432 /* Populate call_list for each function. */
2435 build_call_tree (struct bfd_link_info
*info
)
2439 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2441 extern const bfd_target bfd_elf32_spu_vec
;
2444 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2447 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2449 if (!interesting_section (sec
, info
->output_bfd
)
2450 || sec
->reloc_count
== 0)
2453 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2457 /* Transfer call info from hot/cold section part of function
2459 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2461 struct _spu_elf_section_data
*sec_data
;
2462 struct spu_elf_stack_info
*sinfo
;
2464 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2465 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2468 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2470 struct function_info
*start
= sinfo
->fun
[i
].start
;
2474 struct call_info
*call
;
2476 while (start
->start
!= NULL
)
2477 start
= start
->start
;
2478 call
= sinfo
->fun
[i
].call_list
;
2479 while (call
!= NULL
)
2481 struct call_info
*call_next
= call
->next
;
2482 if (!insert_callee (start
, call
))
2486 sinfo
->fun
[i
].call_list
= NULL
;
2487 sinfo
->fun
[i
].non_root
= TRUE
;
2494 /* Find the call graph root(s). */
2495 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2497 extern const bfd_target bfd_elf32_spu_vec
;
2500 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2503 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2505 struct _spu_elf_section_data
*sec_data
;
2506 struct spu_elf_stack_info
*sinfo
;
2508 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2509 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2512 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2513 if (!sinfo
->fun
[i
].visit1
)
2514 mark_non_root (&sinfo
->fun
[i
]);
2519 /* Remove cycles from the call graph. We start from the root node(s)
2520 so that we break cycles in a reasonable place. */
2521 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2523 extern const bfd_target bfd_elf32_spu_vec
;
2526 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2529 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2531 struct _spu_elf_section_data
*sec_data
;
2532 struct spu_elf_stack_info
*sinfo
;
2534 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2535 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2538 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2539 if (!sinfo
->fun
[i
].non_root
)
2540 call_graph_traverse (&sinfo
->fun
[i
], info
);
2548 /* Descend the call graph for FUN, accumulating total stack required. */
2551 sum_stack (struct function_info
*fun
,
2552 struct bfd_link_info
*info
,
2553 int emit_stack_syms
)
2555 struct call_info
*call
;
2556 struct function_info
*max
= NULL
;
2557 bfd_vma max_stack
= fun
->stack
;
2564 for (call
= fun
->call_list
; call
; call
= call
->next
)
2566 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2567 /* Include caller stack for normal calls, don't do so for
2568 tail calls. fun->stack here is local stack usage for
2571 stack
+= fun
->stack
;
2572 if (max_stack
< stack
)
2579 f1
= func_name (fun
);
2580 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
2581 f1
, (bfd_vma
) fun
->stack
, max_stack
);
2585 info
->callbacks
->minfo (_(" calls:\n"));
2586 for (call
= fun
->call_list
; call
; call
= call
->next
)
2588 const char *f2
= func_name (call
->fun
);
2589 const char *ann1
= call
->fun
== max
? "*" : " ";
2590 const char *ann2
= call
->is_tail
? "t" : " ";
2592 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2596 /* Now fun->stack holds cumulative stack. */
2597 fun
->stack
= max_stack
;
2600 if (emit_stack_syms
)
2602 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2603 char *name
= bfd_malloc (18 + strlen (f1
));
2604 struct elf_link_hash_entry
*h
;
2608 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2609 sprintf (name
, "__stack_%s", f1
);
2611 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2613 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2616 && (h
->root
.type
== bfd_link_hash_new
2617 || h
->root
.type
== bfd_link_hash_undefined
2618 || h
->root
.type
== bfd_link_hash_undefweak
))
2620 h
->root
.type
= bfd_link_hash_defined
;
2621 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2622 h
->root
.u
.def
.value
= max_stack
;
2627 h
->ref_regular_nonweak
= 1;
2628 h
->forced_local
= 1;
2637 /* Provide an estimate of total stack required. */
2640 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
2643 bfd_vma max_stack
= 0;
2645 if (!discover_functions (info
))
2648 if (!build_call_tree (info
))
2651 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2652 info
->callbacks
->minfo (_("\nStack size for functions. "
2653 "Annotations: '*' max stack, 't' tail call\n"));
2654 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2656 extern const bfd_target bfd_elf32_spu_vec
;
2659 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2662 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2664 struct _spu_elf_section_data
*sec_data
;
2665 struct spu_elf_stack_info
*sinfo
;
2667 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2668 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2671 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2673 if (!sinfo
->fun
[i
].non_root
)
2678 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2680 f1
= func_name (&sinfo
->fun
[i
]);
2681 info
->callbacks
->info (_(" %s: 0x%v\n"),
2683 if (max_stack
< stack
)
2691 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2695 /* Perform a final link. */
2698 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2700 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2702 if (htab
->stack_analysis
2703 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
2704 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2706 return bfd_elf_final_link (output_bfd
, info
);
2709 /* Called when not normally emitting relocs, ie. !info->relocatable
2710 and !info->emitrelocations. Returns a count of special relocs
2711 that need to be emitted. */
2714 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2716 unsigned int count
= 0;
2717 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2719 for (; relocs
< relend
; relocs
++)
2721 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2722 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2729 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2732 spu_elf_relocate_section (bfd
*output_bfd
,
2733 struct bfd_link_info
*info
,
2735 asection
*input_section
,
2737 Elf_Internal_Rela
*relocs
,
2738 Elf_Internal_Sym
*local_syms
,
2739 asection
**local_sections
)
2741 Elf_Internal_Shdr
*symtab_hdr
;
2742 struct elf_link_hash_entry
**sym_hashes
;
2743 Elf_Internal_Rela
*rel
, *relend
;
2744 struct spu_link_hash_table
*htab
;
2746 bfd_boolean emit_these_relocs
= FALSE
;
2749 htab
= spu_hash_table (info
);
2750 stubs
= (htab
->stub_sec
!= NULL
2751 && maybe_needs_stubs (input_section
, output_bfd
));
2752 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2753 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2756 relend
= relocs
+ input_section
->reloc_count
;
2757 for (; rel
< relend
; rel
++)
2760 reloc_howto_type
*howto
;
2761 unsigned long r_symndx
;
2762 Elf_Internal_Sym
*sym
;
2764 struct elf_link_hash_entry
*h
;
2765 const char *sym_name
;
2768 bfd_reloc_status_type r
;
2769 bfd_boolean unresolved_reloc
;
2772 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2773 r_type
= ELF32_R_TYPE (rel
->r_info
);
2774 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2776 emit_these_relocs
= TRUE
;
2780 howto
= elf_howto_table
+ r_type
;
2781 unresolved_reloc
= FALSE
;
2786 if (r_symndx
< symtab_hdr
->sh_info
)
2788 sym
= local_syms
+ r_symndx
;
2789 sec
= local_sections
[r_symndx
];
2790 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2791 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2795 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2796 r_symndx
, symtab_hdr
, sym_hashes
,
2798 unresolved_reloc
, warned
);
2799 sym_name
= h
->root
.root
.string
;
2802 if (sec
!= NULL
&& elf_discarded_section (sec
))
2804 /* For relocs against symbols from removed linkonce sections,
2805 or sections discarded by a linker script, we just want the
2806 section contents zeroed. Avoid any special processing. */
2807 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2813 if (info
->relocatable
)
2816 if (unresolved_reloc
)
2818 (*_bfd_error_handler
)
2819 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2821 bfd_get_section_name (input_bfd
, input_section
),
2822 (long) rel
->r_offset
,
2828 /* If this symbol is in an overlay area, we may need to relocate
2829 to the overlay stub. */
2830 addend
= rel
->r_addend
;
2833 enum _stub_type stub_type
;
2835 stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
2837 if (stub_type
!= no_stub
)
2839 unsigned int ovl
= 0;
2840 struct got_entry
*g
, **head
;
2842 if (stub_type
!= nonovl_stub
)
2843 ovl
= (spu_elf_section_data (input_section
->output_section
)
2847 head
= &h
->got
.glist
;
2849 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
2851 for (g
= *head
; g
!= NULL
; g
= g
->next
)
2852 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
2857 relocation
= g
->stub_addr
;
2862 r
= _bfd_final_link_relocate (howto
,
2866 rel
->r_offset
, relocation
, addend
);
2868 if (r
!= bfd_reloc_ok
)
2870 const char *msg
= (const char *) 0;
2874 case bfd_reloc_overflow
:
2875 if (!((*info
->callbacks
->reloc_overflow
)
2876 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2877 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2881 case bfd_reloc_undefined
:
2882 if (!((*info
->callbacks
->undefined_symbol
)
2883 (info
, sym_name
, input_bfd
, input_section
,
2884 rel
->r_offset
, TRUE
)))
2888 case bfd_reloc_outofrange
:
2889 msg
= _("internal error: out of range error");
2892 case bfd_reloc_notsupported
:
2893 msg
= _("internal error: unsupported relocation error");
2896 case bfd_reloc_dangerous
:
2897 msg
= _("internal error: dangerous error");
2901 msg
= _("internal error: unknown error");
2906 if (!((*info
->callbacks
->warning
)
2907 (info
, msg
, sym_name
, input_bfd
, input_section
,
2916 && emit_these_relocs
2917 && !info
->relocatable
2918 && !info
->emitrelocations
)
2920 Elf_Internal_Rela
*wrel
;
2921 Elf_Internal_Shdr
*rel_hdr
;
2923 wrel
= rel
= relocs
;
2924 relend
= relocs
+ input_section
->reloc_count
;
2925 for (; rel
< relend
; rel
++)
2929 r_type
= ELF32_R_TYPE (rel
->r_info
);
2930 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2933 input_section
->reloc_count
= wrel
- relocs
;
2934 /* Backflips for _bfd_elf_link_output_relocs. */
2935 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2936 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2943 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2946 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2947 const char *sym_name ATTRIBUTE_UNUSED
,
2948 Elf_Internal_Sym
*sym
,
2949 asection
*sym_sec ATTRIBUTE_UNUSED
,
2950 struct elf_link_hash_entry
*h
)
2952 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2954 if (!info
->relocatable
2955 && htab
->stub_sec
!= NULL
2957 && (h
->root
.type
== bfd_link_hash_defined
2958 || h
->root
.type
== bfd_link_hash_defweak
)
2960 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2962 struct got_entry
*g
;
2964 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
2965 if (g
->addend
== 0 && g
->ovl
== 0)
2967 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
2968 (htab
->stub_sec
[0]->output_section
->owner
,
2969 htab
->stub_sec
[0]->output_section
));
2970 sym
->st_value
= g
->stub_addr
;
2978 static int spu_plugin
= 0;
2981 spu_elf_plugin (int val
)
2986 /* Set ELF header e_type for plugins. */
2989 spu_elf_post_process_headers (bfd
*abfd
,
2990 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
2994 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
2996 i_ehdrp
->e_type
= ET_DYN
;
3000 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3001 segments for overlays. */
3004 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3006 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3007 int extra
= htab
->num_overlays
;
3013 sec
= bfd_get_section_by_name (abfd
, ".toe");
3014 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
3020 /* Remove .toe section from other PT_LOAD segments and put it in
3021 a segment of its own. Put overlays in separate segments too. */
3024 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
3027 struct elf_segment_map
*m
;
3033 toe
= bfd_get_section_by_name (abfd
, ".toe");
3034 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3035 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
3036 for (i
= 0; i
< m
->count
; i
++)
3037 if ((s
= m
->sections
[i
]) == toe
3038 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
3040 struct elf_segment_map
*m2
;
3043 if (i
+ 1 < m
->count
)
3045 amt
= sizeof (struct elf_segment_map
);
3046 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
3047 m2
= bfd_zalloc (abfd
, amt
);
3050 m2
->count
= m
->count
- (i
+ 1);
3051 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
3052 m2
->count
* sizeof (m
->sections
[0]));
3053 m2
->p_type
= PT_LOAD
;
3061 amt
= sizeof (struct elf_segment_map
);
3062 m2
= bfd_zalloc (abfd
, amt
);
3065 m2
->p_type
= PT_LOAD
;
3067 m2
->sections
[0] = s
;
3077 /* Tweak the section type of .note.spu_name. */
3080 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
3081 Elf_Internal_Shdr
*hdr
,
3084 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
3085 hdr
->sh_type
= SHT_NOTE
;
3089 /* Tweak phdrs before writing them out. */
3092 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3094 const struct elf_backend_data
*bed
;
3095 struct elf_obj_tdata
*tdata
;
3096 Elf_Internal_Phdr
*phdr
, *last
;
3097 struct spu_link_hash_table
*htab
;
3104 bed
= get_elf_backend_data (abfd
);
3105 tdata
= elf_tdata (abfd
);
3107 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3108 htab
= spu_hash_table (info
);
3109 if (htab
->num_overlays
!= 0)
3111 struct elf_segment_map
*m
;
3114 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3116 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
3118 /* Mark this as an overlay header. */
3119 phdr
[i
].p_flags
|= PF_OVERLAY
;
3121 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3123 bfd_byte
*p
= htab
->ovtab
->contents
;
3124 unsigned int off
= o
* 16 + 8;
3126 /* Write file_off into _ovly_table. */
3127 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3132 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3133 of 16. This should always be possible when using the standard
3134 linker scripts, but don't create overlapping segments if
3135 someone is playing games with linker scripts. */
3137 for (i
= count
; i
-- != 0; )
3138 if (phdr
[i
].p_type
== PT_LOAD
)
3142 adjust
= -phdr
[i
].p_filesz
& 15;
3145 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3148 adjust
= -phdr
[i
].p_memsz
& 15;
3151 && phdr
[i
].p_filesz
!= 0
3152 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3153 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3156 if (phdr
[i
].p_filesz
!= 0)
3160 if (i
== (unsigned int) -1)
3161 for (i
= count
; i
-- != 0; )
3162 if (phdr
[i
].p_type
== PT_LOAD
)
3166 adjust
= -phdr
[i
].p_filesz
& 15;
3167 phdr
[i
].p_filesz
+= adjust
;
3169 adjust
= -phdr
[i
].p_memsz
& 15;
3170 phdr
[i
].p_memsz
+= adjust
;
3176 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3177 #define TARGET_BIG_NAME "elf32-spu"
3178 #define ELF_ARCH bfd_arch_spu
3179 #define ELF_MACHINE_CODE EM_SPU
3180 /* This matches the alignment need for DMA. */
3181 #define ELF_MAXPAGESIZE 0x80
3182 #define elf_backend_rela_normal 1
3183 #define elf_backend_can_gc_sections 1
3185 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3186 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3187 #define elf_info_to_howto spu_elf_info_to_howto
3188 #define elf_backend_count_relocs spu_elf_count_relocs
3189 #define elf_backend_relocate_section spu_elf_relocate_section
3190 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3191 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3192 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3193 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3195 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3196 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3197 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3198 #define elf_backend_post_process_headers spu_elf_post_process_headers
3199 #define elf_backend_fake_sections spu_elf_fake_sections
3200 #define elf_backend_special_sections spu_elf_special_sections
3201 #define bfd_elf32_bfd_final_link spu_elf_final_link
3203 #include "elf32-target.h"