1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
6 This file is part of BFD, the Binary File Descriptor library.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
23 #include "libiberty.h"
29 #include "elf32-spu.h"
31 /* We use RELA style relocs. Don't define USE_REL. */
33 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
37 /* Values of type 'enum elf_spu_reloc_type' are used to index this
38 array, so it must be declared in the order of that type. */
40 static reloc_howto_type elf_howto_table
[] = {
41 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
42 bfd_elf_generic_reloc
, "SPU_NONE",
43 FALSE
, 0, 0x00000000, FALSE
),
44 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
45 bfd_elf_generic_reloc
, "SPU_ADDR10",
46 FALSE
, 0, 0x00ffc000, FALSE
),
47 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
48 bfd_elf_generic_reloc
, "SPU_ADDR16",
49 FALSE
, 0, 0x007fff80, FALSE
),
50 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
51 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
52 FALSE
, 0, 0x007fff80, FALSE
),
53 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
54 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
55 FALSE
, 0, 0x007fff80, FALSE
),
56 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
57 bfd_elf_generic_reloc
, "SPU_ADDR18",
58 FALSE
, 0, 0x01ffff80, FALSE
),
59 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
60 bfd_elf_generic_reloc
, "SPU_ADDR32",
61 FALSE
, 0, 0xffffffff, FALSE
),
62 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
63 bfd_elf_generic_reloc
, "SPU_REL16",
64 FALSE
, 0, 0x007fff80, TRUE
),
65 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
66 bfd_elf_generic_reloc
, "SPU_ADDR7",
67 FALSE
, 0, 0x001fc000, FALSE
),
68 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
69 spu_elf_rel9
, "SPU_REL9",
70 FALSE
, 0, 0x0180007f, TRUE
),
71 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
72 spu_elf_rel9
, "SPU_REL9I",
73 FALSE
, 0, 0x0000c07f, TRUE
),
74 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
75 bfd_elf_generic_reloc
, "SPU_ADDR10I",
76 FALSE
, 0, 0x00ffc000, FALSE
),
77 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
78 bfd_elf_generic_reloc
, "SPU_ADDR16I",
79 FALSE
, 0, 0x007fff80, FALSE
),
80 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
81 bfd_elf_generic_reloc
, "SPU_REL32",
82 FALSE
, 0, 0xffffffff, TRUE
),
83 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
84 bfd_elf_generic_reloc
, "SPU_ADDR16X",
85 FALSE
, 0, 0x007fff80, FALSE
),
86 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
87 bfd_elf_generic_reloc
, "SPU_PPU32",
88 FALSE
, 0, 0xffffffff, FALSE
),
89 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
90 bfd_elf_generic_reloc
, "SPU_PPU64",
92 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
93 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
94 FALSE
, 0, 0x00000000, FALSE
),
97 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
98 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
99 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
103 static enum elf_spu_reloc_type
104 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
110 case BFD_RELOC_SPU_IMM10W
:
112 case BFD_RELOC_SPU_IMM16W
:
114 case BFD_RELOC_SPU_LO16
:
115 return R_SPU_ADDR16_LO
;
116 case BFD_RELOC_SPU_HI16
:
117 return R_SPU_ADDR16_HI
;
118 case BFD_RELOC_SPU_IMM18
:
120 case BFD_RELOC_SPU_PCREL16
:
122 case BFD_RELOC_SPU_IMM7
:
124 case BFD_RELOC_SPU_IMM8
:
126 case BFD_RELOC_SPU_PCREL9a
:
128 case BFD_RELOC_SPU_PCREL9b
:
130 case BFD_RELOC_SPU_IMM10
:
131 return R_SPU_ADDR10I
;
132 case BFD_RELOC_SPU_IMM16
:
133 return R_SPU_ADDR16I
;
136 case BFD_RELOC_32_PCREL
:
138 case BFD_RELOC_SPU_PPU32
:
140 case BFD_RELOC_SPU_PPU64
:
142 case BFD_RELOC_SPU_ADD_PIC
:
143 return R_SPU_ADD_PIC
;
148 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
150 Elf_Internal_Rela
*dst
)
152 enum elf_spu_reloc_type r_type
;
154 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
155 BFD_ASSERT (r_type
< R_SPU_max
);
156 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
159 static reloc_howto_type
*
160 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
161 bfd_reloc_code_real_type code
)
163 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
165 if (r_type
== R_SPU_NONE
)
168 return elf_howto_table
+ r_type
;
171 static reloc_howto_type
*
172 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
177 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
178 if (elf_howto_table
[i
].name
!= NULL
179 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
180 return &elf_howto_table
[i
];
185 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
187 static bfd_reloc_status_type
188 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
189 void *data
, asection
*input_section
,
190 bfd
*output_bfd
, char **error_message
)
192 bfd_size_type octets
;
196 /* If this is a relocatable link (output_bfd test tells us), just
197 call the generic function. Any adjustment will be done at final
199 if (output_bfd
!= NULL
)
200 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
201 input_section
, output_bfd
, error_message
);
203 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
204 return bfd_reloc_outofrange
;
205 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
207 /* Get symbol value. */
209 if (!bfd_is_com_section (symbol
->section
))
211 if (symbol
->section
->output_section
)
212 val
+= symbol
->section
->output_section
->vma
;
214 val
+= reloc_entry
->addend
;
216 /* Make it pc-relative. */
217 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
220 if (val
+ 256 >= 512)
221 return bfd_reloc_overflow
;
223 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
225 /* Move two high bits of value to REL9I and REL9 position.
226 The mask will take care of selecting the right field. */
227 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
228 insn
&= ~reloc_entry
->howto
->dst_mask
;
229 insn
|= val
& reloc_entry
->howto
->dst_mask
;
230 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
235 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
237 if (!sec
->used_by_bfd
)
239 struct _spu_elf_section_data
*sdata
;
241 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
244 sec
->used_by_bfd
= sdata
;
247 return _bfd_elf_new_section_hook (abfd
, sec
);
250 /* Set up overlay info for executables. */
253 spu_elf_object_p (bfd
*abfd
)
255 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
257 unsigned int i
, num_ovl
, num_buf
;
258 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
259 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
260 Elf_Internal_Phdr
*last_phdr
= NULL
;
262 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
263 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
268 if (last_phdr
== NULL
269 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
272 for (j
= 1; j
< elf_numsections (abfd
); j
++)
274 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
276 if (ELF_SECTION_SIZE (shdr
, phdr
) != 0
277 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
279 asection
*sec
= shdr
->bfd_section
;
280 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
281 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
289 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
290 strip --strip-unneeded will not remove them. */
293 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
295 if (sym
->name
!= NULL
296 && sym
->section
!= bfd_abs_section_ptr
297 && strncmp (sym
->name
, "_EAR_", 5) == 0)
298 sym
->flags
|= BSF_KEEP
;
301 /* SPU ELF linker hash table. */
303 struct spu_link_hash_table
305 struct elf_link_hash_table elf
;
307 struct spu_elf_params
*params
;
309 /* Shortcuts to overlay sections. */
315 /* Count of stubs in each overlay section. */
316 unsigned int *stub_count
;
318 /* The stub section for each overlay section. */
321 struct elf_link_hash_entry
*ovly_entry
[2];
323 /* Number of overlay buffers. */
324 unsigned int num_buf
;
326 /* Total number of overlays. */
327 unsigned int num_overlays
;
329 /* For soft icache. */
330 unsigned int line_size_log2
;
331 unsigned int num_lines_log2
;
332 unsigned int fromelem_size_log2
;
334 /* How much memory we have. */
335 unsigned int local_store
;
337 /* Count of overlay stubs needed in non-overlay area. */
338 unsigned int non_ovly_stub
;
340 /* Pointer to the fixup section */
344 unsigned int stub_err
: 1;
347 /* Hijack the generic got fields for overlay stub accounting. */
351 struct got_entry
*next
;
360 #define spu_hash_table(p) \
361 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
362 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
366 struct function_info
*fun
;
367 struct call_info
*next
;
369 unsigned int max_depth
;
370 unsigned int is_tail
: 1;
371 unsigned int is_pasted
: 1;
372 unsigned int broken_cycle
: 1;
373 unsigned int priority
: 13;
378 /* List of functions called. Also branches to hot/cold part of
380 struct call_info
*call_list
;
381 /* For hot/cold part of function, point to owner. */
382 struct function_info
*start
;
383 /* Symbol at start of function. */
385 Elf_Internal_Sym
*sym
;
386 struct elf_link_hash_entry
*h
;
388 /* Function section. */
391 /* Where last called from, and number of sections called from. */
392 asection
*last_caller
;
393 unsigned int call_count
;
394 /* Address range of (this part of) function. */
396 /* Offset where we found a store of lr, or -1 if none found. */
398 /* Offset where we found the stack adjustment insn. */
402 /* Distance from root of call tree. Tail and hot/cold branches
403 count as one deeper. We aren't counting stack frames here. */
405 /* Set if global symbol. */
406 unsigned int global
: 1;
407 /* Set if known to be start of function (as distinct from a hunk
408 in hot/cold section. */
409 unsigned int is_func
: 1;
410 /* Set if not a root node. */
411 unsigned int non_root
: 1;
412 /* Flags used during call tree traversal. It's cheaper to replicate
413 the visit flags than have one which needs clearing after a traversal. */
414 unsigned int visit1
: 1;
415 unsigned int visit2
: 1;
416 unsigned int marking
: 1;
417 unsigned int visit3
: 1;
418 unsigned int visit4
: 1;
419 unsigned int visit5
: 1;
420 unsigned int visit6
: 1;
421 unsigned int visit7
: 1;
424 struct spu_elf_stack_info
428 /* Variable size array describing functions, one per contiguous
429 address range belonging to a function. */
430 struct function_info fun
[1];
433 static struct function_info
*find_function (asection
*, bfd_vma
,
434 struct bfd_link_info
*);
436 /* Create a spu ELF linker hash table. */
438 static struct bfd_link_hash_table
*
439 spu_elf_link_hash_table_create (bfd
*abfd
)
441 struct spu_link_hash_table
*htab
;
443 htab
= bfd_malloc (sizeof (*htab
));
447 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
448 _bfd_elf_link_hash_newfunc
,
449 sizeof (struct elf_link_hash_entry
),
456 memset (&htab
->ovtab
, 0,
457 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
459 htab
->elf
.init_got_refcount
.refcount
= 0;
460 htab
->elf
.init_got_refcount
.glist
= NULL
;
461 htab
->elf
.init_got_offset
.offset
= 0;
462 htab
->elf
.init_got_offset
.glist
= NULL
;
463 return &htab
->elf
.root
;
467 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
469 bfd_vma max_branch_log2
;
471 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
472 htab
->params
= params
;
473 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
474 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
476 /* For the software i-cache, we provide a "from" list whose size
477 is a power-of-two number of quadwords, big enough to hold one
478 byte per outgoing branch. Compute this number here. */
479 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
480 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
483 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
484 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
485 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
488 get_sym_h (struct elf_link_hash_entry
**hp
,
489 Elf_Internal_Sym
**symp
,
491 Elf_Internal_Sym
**locsymsp
,
492 unsigned long r_symndx
,
495 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
497 if (r_symndx
>= symtab_hdr
->sh_info
)
499 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
500 struct elf_link_hash_entry
*h
;
502 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
503 while (h
->root
.type
== bfd_link_hash_indirect
504 || h
->root
.type
== bfd_link_hash_warning
)
505 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
515 asection
*symsec
= NULL
;
516 if (h
->root
.type
== bfd_link_hash_defined
517 || h
->root
.type
== bfd_link_hash_defweak
)
518 symsec
= h
->root
.u
.def
.section
;
524 Elf_Internal_Sym
*sym
;
525 Elf_Internal_Sym
*locsyms
= *locsymsp
;
529 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
531 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
533 0, NULL
, NULL
, NULL
);
538 sym
= locsyms
+ r_symndx
;
547 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
553 /* Create the note section if not already present. This is done early so
554 that the linker maps the sections to the right place in the output. */
557 spu_elf_create_sections (struct bfd_link_info
*info
)
559 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
562 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
563 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
568 /* Make SPU_PTNOTE_SPUNAME section. */
575 ibfd
= info
->input_bfds
;
576 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
577 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
579 || !bfd_set_section_alignment (ibfd
, s
, 4))
582 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
583 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
584 size
+= (name_len
+ 3) & -4;
586 if (!bfd_set_section_size (ibfd
, s
, size
))
589 data
= bfd_zalloc (ibfd
, size
);
593 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
594 bfd_put_32 (ibfd
, name_len
, data
+ 4);
595 bfd_put_32 (ibfd
, 1, data
+ 8);
596 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
597 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
598 bfd_get_filename (info
->output_bfd
), name_len
);
602 if (htab
->params
->emit_fixups
)
607 if (htab
->elf
.dynobj
== NULL
)
608 htab
->elf
.dynobj
= ibfd
;
609 ibfd
= htab
->elf
.dynobj
;
610 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
611 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
612 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
613 if (s
== NULL
|| !bfd_set_section_alignment (ibfd
, s
, 2))
621 /* qsort predicate to sort sections by vma. */
624 sort_sections (const void *a
, const void *b
)
626 const asection
*const *s1
= a
;
627 const asection
*const *s2
= b
;
628 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
631 return delta
< 0 ? -1 : 1;
633 return (*s1
)->index
- (*s2
)->index
;
636 /* Identify overlays in the output bfd, and number them.
637 Returns 0 on error, 1 if no overlays, 2 if overlays. */
640 spu_elf_find_overlays (struct bfd_link_info
*info
)
642 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
643 asection
**alloc_sec
;
644 unsigned int i
, n
, ovl_index
, num_buf
;
647 static const char *const entry_names
[2][2] = {
648 { "__ovly_load", "__icache_br_handler" },
649 { "__ovly_return", "__icache_call_handler" }
652 if (info
->output_bfd
->section_count
< 2)
656 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
657 if (alloc_sec
== NULL
)
660 /* Pick out all the alloced sections. */
661 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
662 if ((s
->flags
& SEC_ALLOC
) != 0
663 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
673 /* Sort them by vma. */
674 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
676 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
677 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
679 unsigned int prev_buf
= 0, set_id
= 0;
681 /* Look for an overlapping vma to find the first overlay section. */
682 bfd_vma vma_start
= 0;
684 for (i
= 1; i
< n
; i
++)
687 if (s
->vma
< ovl_end
)
689 asection
*s0
= alloc_sec
[i
- 1];
693 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
698 ovl_end
= s
->vma
+ s
->size
;
701 /* Now find any sections within the cache area. */
702 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
705 if (s
->vma
>= ovl_end
)
708 /* A section in an overlay area called .ovl.init is not
709 an overlay, in the sense that it might be loaded in
710 by the overlay manager, but rather the initial
711 section contents for the overlay buffer. */
712 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
714 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
715 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
718 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
720 info
->callbacks
->einfo (_("%X%P: overlay section %A "
721 "does not start on a cache line.\n"),
723 bfd_set_error (bfd_error_bad_value
);
726 else if (s
->size
> htab
->params
->line_size
)
728 info
->callbacks
->einfo (_("%X%P: overlay section %A "
729 "is larger than a cache line.\n"),
731 bfd_set_error (bfd_error_bad_value
);
735 alloc_sec
[ovl_index
++] = s
;
736 spu_elf_section_data (s
)->u
.o
.ovl_index
737 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
738 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
742 /* Ensure there are no more overlay sections. */
746 if (s
->vma
< ovl_end
)
748 info
->callbacks
->einfo (_("%X%P: overlay section %A "
749 "is not in cache area.\n"),
751 bfd_set_error (bfd_error_bad_value
);
755 ovl_end
= s
->vma
+ s
->size
;
760 /* Look for overlapping vmas. Any with overlap must be overlays.
761 Count them. Also count the number of overlay regions. */
762 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
765 if (s
->vma
< ovl_end
)
767 asection
*s0
= alloc_sec
[i
- 1];
769 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
772 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
774 alloc_sec
[ovl_index
] = s0
;
775 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
776 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
779 ovl_end
= s
->vma
+ s
->size
;
781 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
783 alloc_sec
[ovl_index
] = s
;
784 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
785 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
786 if (s0
->vma
!= s
->vma
)
788 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
789 "and %A do not start at the "
792 bfd_set_error (bfd_error_bad_value
);
795 if (ovl_end
< s
->vma
+ s
->size
)
796 ovl_end
= s
->vma
+ s
->size
;
800 ovl_end
= s
->vma
+ s
->size
;
804 htab
->num_overlays
= ovl_index
;
805 htab
->num_buf
= num_buf
;
806 htab
->ovl_sec
= alloc_sec
;
811 for (i
= 0; i
< 2; i
++)
814 struct elf_link_hash_entry
*h
;
816 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
817 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
821 if (h
->root
.type
== bfd_link_hash_new
)
823 h
->root
.type
= bfd_link_hash_undefined
;
825 h
->ref_regular_nonweak
= 1;
828 htab
->ovly_entry
[i
] = h
;
834 /* Non-zero to use bra in overlay stubs rather than br. */
837 #define BRA 0x30000000
838 #define BRASL 0x31000000
839 #define BR 0x32000000
840 #define BRSL 0x33000000
841 #define NOP 0x40200000
842 #define LNOP 0x00200000
843 #define ILA 0x42000000
845 /* Return true for all relative and absolute branch instructions.
853 brhnz 00100011 0.. */
856 is_branch (const unsigned char *insn
)
858 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
861 /* Return true for all indirect branch instructions.
869 bihnz 00100101 011 */
872 is_indirect_branch (const unsigned char *insn
)
874 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
877 /* Return true for branch hint instructions.
882 is_hint (const unsigned char *insn
)
884 return (insn
[0] & 0xfc) == 0x10;
887 /* True if INPUT_SECTION might need overlay stubs. */
890 maybe_needs_stubs (asection
*input_section
)
892 /* No stubs for debug sections and suchlike. */
893 if ((input_section
->flags
& SEC_ALLOC
) == 0)
896 /* No stubs for link-once sections that will be discarded. */
897 if (input_section
->output_section
== bfd_abs_section_ptr
)
900 /* Don't create stubs for .eh_frame references. */
901 if (strcmp (input_section
->name
, ".eh_frame") == 0)
923 /* Return non-zero if this reloc symbol should go via an overlay stub.
924 Return 2 if the stub must be in non-overlay area. */
926 static enum _stub_type
927 needs_ovl_stub (struct elf_link_hash_entry
*h
,
928 Elf_Internal_Sym
*sym
,
930 asection
*input_section
,
931 Elf_Internal_Rela
*irela
,
933 struct bfd_link_info
*info
)
935 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
936 enum elf_spu_reloc_type r_type
;
937 unsigned int sym_type
;
938 bfd_boolean branch
, hint
, call
;
939 enum _stub_type ret
= no_stub
;
943 || sym_sec
->output_section
== bfd_abs_section_ptr
944 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
949 /* Ensure no stubs for user supplied overlay manager syms. */
950 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
953 /* setjmp always goes via an overlay stub, because then the return
954 and hence the longjmp goes via __ovly_return. That magically
955 makes setjmp/longjmp between overlays work. */
956 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
957 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
964 sym_type
= ELF_ST_TYPE (sym
->st_info
);
966 r_type
= ELF32_R_TYPE (irela
->r_info
);
970 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
972 if (contents
== NULL
)
975 if (!bfd_get_section_contents (input_section
->owner
,
982 contents
+= irela
->r_offset
;
984 branch
= is_branch (contents
);
985 hint
= is_hint (contents
);
988 call
= (contents
[0] & 0xfd) == 0x31;
990 && sym_type
!= STT_FUNC
993 /* It's common for people to write assembly and forget
994 to give function symbols the right type. Handle
995 calls to such symbols, but warn so that (hopefully)
996 people will fix their code. We need the symbol
997 type to be correct to distinguish function pointer
998 initialisation from other pointer initialisations. */
999 const char *sym_name
;
1002 sym_name
= h
->root
.root
.string
;
1005 Elf_Internal_Shdr
*symtab_hdr
;
1006 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1007 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1012 (*_bfd_error_handler
) (_("warning: call to non-function"
1013 " symbol %s defined in %B"),
1014 sym_sec
->owner
, sym_name
);
1020 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1021 || (sym_type
!= STT_FUNC
1022 && !(branch
|| hint
)
1023 && (sym_sec
->flags
& SEC_CODE
) == 0))
1026 /* Usually, symbols in non-overlay sections don't need stubs. */
1027 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1028 && !htab
->params
->non_overlay_stubs
)
1031 /* A reference from some other section to a symbol in an overlay
1032 section needs a stub. */
1033 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1034 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1036 unsigned int lrlive
= 0;
1038 lrlive
= (contents
[1] & 0x70) >> 4;
1040 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1041 ret
= call_ovl_stub
;
1043 ret
= br000_ovl_stub
+ lrlive
;
1046 /* If this insn isn't a branch then we are possibly taking the
1047 address of a function and passing it out somehow. Soft-icache code
1048 always generates inline code to do indirect branches. */
1049 if (!(branch
|| hint
)
1050 && sym_type
== STT_FUNC
1051 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1058 count_stub (struct spu_link_hash_table
*htab
,
1061 enum _stub_type stub_type
,
1062 struct elf_link_hash_entry
*h
,
1063 const Elf_Internal_Rela
*irela
)
1065 unsigned int ovl
= 0;
1066 struct got_entry
*g
, **head
;
1069 /* If this instruction is a branch or call, we need a stub
1070 for it. One stub per function per overlay.
1071 If it isn't a branch, then we are taking the address of
1072 this function so need a stub in the non-overlay area
1073 for it. One stub per function. */
1074 if (stub_type
!= nonovl_stub
)
1075 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1078 head
= &h
->got
.glist
;
1081 if (elf_local_got_ents (ibfd
) == NULL
)
1083 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1084 * sizeof (*elf_local_got_ents (ibfd
)));
1085 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1086 if (elf_local_got_ents (ibfd
) == NULL
)
1089 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1092 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1094 htab
->stub_count
[ovl
] += 1;
1100 addend
= irela
->r_addend
;
1104 struct got_entry
*gnext
;
1106 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1107 if (g
->addend
== addend
&& g
->ovl
== 0)
1112 /* Need a new non-overlay area stub. Zap other stubs. */
1113 for (g
= *head
; g
!= NULL
; g
= gnext
)
1116 if (g
->addend
== addend
)
1118 htab
->stub_count
[g
->ovl
] -= 1;
1126 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1127 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1133 g
= bfd_malloc (sizeof *g
);
1138 g
->stub_addr
= (bfd_vma
) -1;
1142 htab
->stub_count
[ovl
] += 1;
1148 /* Support two sizes of overlay stubs, a slower more compact stub of two
1149 intructions, and a faster stub of four instructions.
1150 Soft-icache stubs are four or eight words. */
1153 ovl_stub_size (struct spu_elf_params
*params
)
1155 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1159 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1161 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1164 /* Two instruction overlay stubs look like:
1166 brsl $75,__ovly_load
1167 .word target_ovl_and_address
1169 ovl_and_address is a word with the overlay number in the top 14 bits
1170 and local store address in the bottom 18 bits.
1172 Four instruction overlay stubs look like:
1176 ila $79,target_address
1179 Software icache stubs are:
1183 .word lrlive_branchlocalstoreaddr;
1184 brasl $75,__icache_br_handler
1189 build_stub (struct bfd_link_info
*info
,
1192 enum _stub_type stub_type
,
1193 struct elf_link_hash_entry
*h
,
1194 const Elf_Internal_Rela
*irela
,
1198 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1199 unsigned int ovl
, dest_ovl
, set_id
;
1200 struct got_entry
*g
, **head
;
1202 bfd_vma addend
, from
, to
, br_dest
, patt
;
1203 unsigned int lrlive
;
1206 if (stub_type
!= nonovl_stub
)
1207 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1210 head
= &h
->got
.glist
;
1212 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1216 addend
= irela
->r_addend
;
1218 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1220 g
= bfd_malloc (sizeof *g
);
1226 g
->br_addr
= (irela
->r_offset
1227 + isec
->output_offset
1228 + isec
->output_section
->vma
);
1234 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1235 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1240 if (g
->ovl
== 0 && ovl
!= 0)
1243 if (g
->stub_addr
!= (bfd_vma
) -1)
1247 sec
= htab
->stub_sec
[ovl
];
1248 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1249 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1250 g
->stub_addr
= from
;
1251 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1252 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1253 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1255 if (((dest
| to
| from
) & 3) != 0)
1260 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1262 if (htab
->params
->ovly_flavour
== ovly_normal
1263 && !htab
->params
->compact_stub
)
1265 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1266 sec
->contents
+ sec
->size
);
1267 bfd_put_32 (sec
->owner
, LNOP
,
1268 sec
->contents
+ sec
->size
+ 4);
1269 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1270 sec
->contents
+ sec
->size
+ 8);
1272 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1273 sec
->contents
+ sec
->size
+ 12);
1275 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1276 sec
->contents
+ sec
->size
+ 12);
1278 else if (htab
->params
->ovly_flavour
== ovly_normal
1279 && htab
->params
->compact_stub
)
1282 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1283 sec
->contents
+ sec
->size
);
1285 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1286 sec
->contents
+ sec
->size
);
1287 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1288 sec
->contents
+ sec
->size
+ 4);
1290 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1291 && htab
->params
->compact_stub
)
1294 if (stub_type
== nonovl_stub
)
1296 else if (stub_type
== call_ovl_stub
)
1297 /* A brsl makes lr live and *(*sp+16) is live.
1298 Tail calls have the same liveness. */
1300 else if (!htab
->params
->lrlive_analysis
)
1301 /* Assume stack frame and lr save. */
1303 else if (irela
!= NULL
)
1305 /* Analyse branch instructions. */
1306 struct function_info
*caller
;
1309 caller
= find_function (isec
, irela
->r_offset
, info
);
1310 if (caller
->start
== NULL
)
1311 off
= irela
->r_offset
;
1314 struct function_info
*found
= NULL
;
1316 /* Find the earliest piece of this function that
1317 has frame adjusting instructions. We might
1318 see dynamic frame adjustment (eg. for alloca)
1319 in some later piece, but functions using
1320 alloca always set up a frame earlier. Frame
1321 setup instructions are always in one piece. */
1322 if (caller
->lr_store
!= (bfd_vma
) -1
1323 || caller
->sp_adjust
!= (bfd_vma
) -1)
1325 while (caller
->start
!= NULL
)
1327 caller
= caller
->start
;
1328 if (caller
->lr_store
!= (bfd_vma
) -1
1329 || caller
->sp_adjust
!= (bfd_vma
) -1)
1337 if (off
> caller
->sp_adjust
)
1339 if (off
> caller
->lr_store
)
1340 /* Only *(*sp+16) is live. */
1343 /* If no lr save, then we must be in a
1344 leaf function with a frame.
1345 lr is still live. */
1348 else if (off
> caller
->lr_store
)
1350 /* Between lr save and stack adjust. */
1352 /* This should never happen since prologues won't
1357 /* On entry to function. */
1360 if (stub_type
!= br000_ovl_stub
1361 && lrlive
!= stub_type
- br000_ovl_stub
)
1362 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1363 "from analysis (%u)\n"),
1364 isec
, irela
->r_offset
, lrlive
,
1365 stub_type
- br000_ovl_stub
);
1368 /* If given lrlive info via .brinfo, use it. */
1369 if (stub_type
> br000_ovl_stub
)
1370 lrlive
= stub_type
- br000_ovl_stub
;
1373 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1374 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1375 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1377 /* The branch that uses this stub goes to stub_addr + 4. We'll
1378 set up an xor pattern that can be used by the icache manager
1379 to modify this branch to go directly to its destination. */
1381 br_dest
= g
->stub_addr
;
1384 /* Except in the case of _SPUEAR_ stubs, the branch in
1385 question is the one in the stub itself. */
1386 BFD_ASSERT (stub_type
== nonovl_stub
);
1387 g
->br_addr
= g
->stub_addr
;
1391 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1392 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1393 sec
->contents
+ sec
->size
);
1394 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1395 sec
->contents
+ sec
->size
+ 4);
1396 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1397 sec
->contents
+ sec
->size
+ 8);
1398 patt
= dest
^ br_dest
;
1399 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1400 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1401 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1402 sec
->contents
+ sec
->size
+ 12);
1405 /* Extra space for linked list entries. */
1411 sec
->size
+= ovl_stub_size (htab
->params
);
1413 if (htab
->params
->emit_stub_syms
)
1419 len
= 8 + sizeof (".ovl_call.") - 1;
1421 len
+= strlen (h
->root
.root
.string
);
1426 add
= (int) irela
->r_addend
& 0xffffffff;
1429 name
= bfd_malloc (len
+ 1);
1433 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1435 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1437 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1438 dest_sec
->id
& 0xffffffff,
1439 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1441 sprintf (name
+ len
- 9, "+%x", add
);
1443 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1447 if (h
->root
.type
== bfd_link_hash_new
)
1449 h
->root
.type
= bfd_link_hash_defined
;
1450 h
->root
.u
.def
.section
= sec
;
1451 h
->size
= ovl_stub_size (htab
->params
);
1452 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1456 h
->ref_regular_nonweak
= 1;
1457 h
->forced_local
= 1;
1465 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1469 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1471 /* Symbols starting with _SPUEAR_ need a stub because they may be
1472 invoked by the PPU. */
1473 struct bfd_link_info
*info
= inf
;
1474 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1477 if ((h
->root
.type
== bfd_link_hash_defined
1478 || h
->root
.type
== bfd_link_hash_defweak
)
1480 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1481 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1482 && sym_sec
->output_section
!= bfd_abs_section_ptr
1483 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1484 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1485 || htab
->params
->non_overlay_stubs
))
1487 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1494 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1496 /* Symbols starting with _SPUEAR_ need a stub because they may be
1497 invoked by the PPU. */
1498 struct bfd_link_info
*info
= inf
;
1499 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1502 if ((h
->root
.type
== bfd_link_hash_defined
1503 || h
->root
.type
== bfd_link_hash_defweak
)
1505 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1506 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1507 && sym_sec
->output_section
!= bfd_abs_section_ptr
1508 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1509 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1510 || htab
->params
->non_overlay_stubs
))
1512 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1513 h
->root
.u
.def
.value
, sym_sec
);
1519 /* Size or build stubs. */
1522 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1524 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1527 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1529 extern const bfd_target bfd_elf32_spu_vec
;
1530 Elf_Internal_Shdr
*symtab_hdr
;
1532 Elf_Internal_Sym
*local_syms
= NULL
;
1534 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1537 /* We'll need the symbol table in a second. */
1538 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1539 if (symtab_hdr
->sh_info
== 0)
1542 /* Walk over each section attached to the input bfd. */
1543 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1545 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1547 /* If there aren't any relocs, then there's nothing more to do. */
1548 if ((isec
->flags
& SEC_RELOC
) == 0
1549 || isec
->reloc_count
== 0)
1552 if (!maybe_needs_stubs (isec
))
1555 /* Get the relocs. */
1556 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1558 if (internal_relocs
== NULL
)
1559 goto error_ret_free_local
;
1561 /* Now examine each relocation. */
1562 irela
= internal_relocs
;
1563 irelaend
= irela
+ isec
->reloc_count
;
1564 for (; irela
< irelaend
; irela
++)
1566 enum elf_spu_reloc_type r_type
;
1567 unsigned int r_indx
;
1569 Elf_Internal_Sym
*sym
;
1570 struct elf_link_hash_entry
*h
;
1571 enum _stub_type stub_type
;
1573 r_type
= ELF32_R_TYPE (irela
->r_info
);
1574 r_indx
= ELF32_R_SYM (irela
->r_info
);
1576 if (r_type
>= R_SPU_max
)
1578 bfd_set_error (bfd_error_bad_value
);
1579 error_ret_free_internal
:
1580 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1581 free (internal_relocs
);
1582 error_ret_free_local
:
1583 if (local_syms
!= NULL
1584 && (symtab_hdr
->contents
1585 != (unsigned char *) local_syms
))
1590 /* Determine the reloc target section. */
1591 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1592 goto error_ret_free_internal
;
1594 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1596 if (stub_type
== no_stub
)
1598 else if (stub_type
== stub_error
)
1599 goto error_ret_free_internal
;
1601 if (htab
->stub_count
== NULL
)
1604 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1605 htab
->stub_count
= bfd_zmalloc (amt
);
1606 if (htab
->stub_count
== NULL
)
1607 goto error_ret_free_internal
;
1612 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1613 goto error_ret_free_internal
;
1620 dest
= h
->root
.u
.def
.value
;
1622 dest
= sym
->st_value
;
1623 dest
+= irela
->r_addend
;
1624 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1626 goto error_ret_free_internal
;
1630 /* We're done with the internal relocs, free them. */
1631 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1632 free (internal_relocs
);
1635 if (local_syms
!= NULL
1636 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1638 if (!info
->keep_memory
)
1641 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1648 /* Allocate space for overlay call and return stubs.
1649 Return 0 on error, 1 if no overlays, 2 otherwise. */
1652 spu_elf_size_stubs (struct bfd_link_info
*info
)
1654 struct spu_link_hash_table
*htab
;
1661 if (!process_stubs (info
, FALSE
))
1664 htab
= spu_hash_table (info
);
1665 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1669 ibfd
= info
->input_bfds
;
1670 if (htab
->stub_count
!= NULL
)
1672 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1673 htab
->stub_sec
= bfd_zmalloc (amt
);
1674 if (htab
->stub_sec
== NULL
)
1677 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1678 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1679 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1680 htab
->stub_sec
[0] = stub
;
1682 || !bfd_set_section_alignment (ibfd
, stub
,
1683 ovl_stub_size_log2 (htab
->params
)))
1685 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1686 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1687 /* Extra space for linked list entries. */
1688 stub
->size
+= htab
->stub_count
[0] * 16;
1690 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1692 asection
*osec
= htab
->ovl_sec
[i
];
1693 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1694 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1695 htab
->stub_sec
[ovl
] = stub
;
1697 || !bfd_set_section_alignment (ibfd
, stub
,
1698 ovl_stub_size_log2 (htab
->params
)))
1700 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1704 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1706 /* Space for icache manager tables.
1707 a) Tag array, one quadword per cache line.
1708 b) Rewrite "to" list, one quadword per cache line.
1709 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1710 a power-of-two number of full quadwords) per cache line. */
1713 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1714 if (htab
->ovtab
== NULL
1715 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1718 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1719 << htab
->num_lines_log2
;
1721 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1722 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1723 if (htab
->init
== NULL
1724 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1727 htab
->init
->size
= 16;
1729 else if (htab
->stub_count
== NULL
)
1733 /* htab->ovtab consists of two arrays.
1743 . } _ovly_buf_table[];
1746 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1747 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1748 if (htab
->ovtab
== NULL
1749 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1752 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1755 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1756 if (htab
->toe
== NULL
1757 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1759 htab
->toe
->size
= 16;
1764 /* Called from ld to place overlay manager data sections. This is done
1765 after the overlay manager itself is loaded, mainly so that the
1766 linker's htab->init section is placed after any other .ovl.init
1770 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1772 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1775 if (htab
->stub_sec
!= NULL
)
1777 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1779 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1781 asection
*osec
= htab
->ovl_sec
[i
];
1782 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1783 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1787 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1788 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1790 if (htab
->ovtab
!= NULL
)
1792 const char *ovout
= ".data";
1793 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1795 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1798 if (htab
->toe
!= NULL
)
1799 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1802 /* Functions to handle embedded spu_ovl.o object. */
1805 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1811 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1817 struct _ovl_stream
*os
;
1821 os
= (struct _ovl_stream
*) stream
;
1822 max
= (const char *) os
->end
- (const char *) os
->start
;
1824 if ((ufile_ptr
) offset
>= max
)
1828 if (count
> max
- offset
)
1829 count
= max
- offset
;
1831 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1836 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1838 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1845 return *ovl_bfd
!= NULL
;
1849 overlay_index (asection
*sec
)
1852 || sec
->output_section
== bfd_abs_section_ptr
)
1854 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1857 /* Define an STT_OBJECT symbol. */
1859 static struct elf_link_hash_entry
*
1860 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1862 struct elf_link_hash_entry
*h
;
1864 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1868 if (h
->root
.type
!= bfd_link_hash_defined
1871 h
->root
.type
= bfd_link_hash_defined
;
1872 h
->root
.u
.def
.section
= htab
->ovtab
;
1873 h
->type
= STT_OBJECT
;
1876 h
->ref_regular_nonweak
= 1;
1879 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1881 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1882 h
->root
.u
.def
.section
->owner
,
1883 h
->root
.root
.string
);
1884 bfd_set_error (bfd_error_bad_value
);
1889 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1890 h
->root
.root
.string
);
1891 bfd_set_error (bfd_error_bad_value
);
1898 /* Fill in all stubs and the overlay tables. */
1901 spu_elf_build_stubs (struct bfd_link_info
*info
)
1903 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1904 struct elf_link_hash_entry
*h
;
1910 if (htab
->num_overlays
!= 0)
1912 for (i
= 0; i
< 2; i
++)
1914 h
= htab
->ovly_entry
[i
];
1916 && (h
->root
.type
== bfd_link_hash_defined
1917 || h
->root
.type
== bfd_link_hash_defweak
)
1920 s
= h
->root
.u
.def
.section
->output_section
;
1921 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1923 (*_bfd_error_handler
) (_("%s in overlay section"),
1924 h
->root
.root
.string
);
1925 bfd_set_error (bfd_error_bad_value
);
1932 if (htab
->stub_sec
!= NULL
)
1934 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1935 if (htab
->stub_sec
[i
]->size
!= 0)
1937 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1938 htab
->stub_sec
[i
]->size
);
1939 if (htab
->stub_sec
[i
]->contents
== NULL
)
1941 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1942 htab
->stub_sec
[i
]->size
= 0;
1945 /* Fill in all the stubs. */
1946 process_stubs (info
, TRUE
);
1947 if (!htab
->stub_err
)
1948 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1952 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1953 bfd_set_error (bfd_error_bad_value
);
1957 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1959 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1961 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1962 bfd_set_error (bfd_error_bad_value
);
1965 htab
->stub_sec
[i
]->rawsize
= 0;
1969 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1972 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1973 if (htab
->ovtab
->contents
== NULL
)
1976 p
= htab
->ovtab
->contents
;
1977 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1981 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1984 h
->root
.u
.def
.value
= 0;
1985 h
->size
= 16 << htab
->num_lines_log2
;
1988 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1991 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1992 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1994 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1997 h
->root
.u
.def
.value
= off
;
1998 h
->size
= 16 << htab
->num_lines_log2
;
2001 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2004 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2005 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2007 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2010 h
->root
.u
.def
.value
= off
;
2011 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2014 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2017 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2018 + htab
->num_lines_log2
);
2019 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2021 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2024 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2025 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2027 h
= define_ovtab_symbol (htab
, "__icache_base");
2030 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2031 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2032 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2034 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2037 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2038 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2040 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2043 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2044 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2046 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2049 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2050 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2052 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2055 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2056 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2058 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2061 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2062 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2064 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2067 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2068 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2070 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2072 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2074 if (htab
->init
->contents
== NULL
)
2077 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2080 h
->root
.u
.def
.value
= 0;
2081 h
->root
.u
.def
.section
= htab
->init
;
2087 /* Write out _ovly_table. */
2088 /* set low bit of .size to mark non-overlay area as present. */
2090 obfd
= htab
->ovtab
->output_section
->owner
;
2091 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2093 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2097 unsigned long off
= ovl_index
* 16;
2098 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2100 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2101 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2103 /* file_off written later in spu_elf_modify_program_headers. */
2104 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2108 h
= define_ovtab_symbol (htab
, "_ovly_table");
2111 h
->root
.u
.def
.value
= 16;
2112 h
->size
= htab
->num_overlays
* 16;
2114 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2117 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2120 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2123 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2124 h
->size
= htab
->num_buf
* 4;
2126 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2129 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2133 h
= define_ovtab_symbol (htab
, "_EAR_");
2136 h
->root
.u
.def
.section
= htab
->toe
;
2137 h
->root
.u
.def
.value
= 0;
2143 /* Check that all loadable section VMAs lie in the range
2144 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2147 spu_elf_check_vma (struct bfd_link_info
*info
)
2149 struct elf_segment_map
*m
;
2151 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2152 bfd
*abfd
= info
->output_bfd
;
2153 bfd_vma hi
= htab
->params
->local_store_hi
;
2154 bfd_vma lo
= htab
->params
->local_store_lo
;
2156 htab
->local_store
= hi
+ 1 - lo
;
2158 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2159 if (m
->p_type
== PT_LOAD
)
2160 for (i
= 0; i
< m
->count
; i
++)
2161 if (m
->sections
[i
]->size
!= 0
2162 && (m
->sections
[i
]->vma
< lo
2163 || m
->sections
[i
]->vma
> hi
2164 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2165 return m
->sections
[i
];
2170 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2171 Search for stack adjusting insns, and return the sp delta.
2172 If a store of lr is found save the instruction offset to *LR_STORE.
2173 If a stack adjusting instruction is found, save that offset to
2177 find_function_stack_adjust (asection
*sec
,
2184 memset (reg
, 0, sizeof (reg
));
2185 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2187 unsigned char buf
[4];
2191 /* Assume no relocs on stack adjusing insns. */
2192 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2196 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2198 if (buf
[0] == 0x24 /* stqd */)
2200 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2205 /* Partly decoded immediate field. */
2206 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2208 if (buf
[0] == 0x1c /* ai */)
2211 imm
= (imm
^ 0x200) - 0x200;
2212 reg
[rt
] = reg
[ra
] + imm
;
2214 if (rt
== 1 /* sp */)
2218 *sp_adjust
= offset
;
2222 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2224 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2226 reg
[rt
] = reg
[ra
] + reg
[rb
];
2231 *sp_adjust
= offset
;
2235 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2237 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2239 reg
[rt
] = reg
[rb
] - reg
[ra
];
2244 *sp_adjust
= offset
;
2248 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2250 if (buf
[0] >= 0x42 /* ila */)
2251 imm
|= (buf
[0] & 1) << 17;
2256 if (buf
[0] == 0x40 /* il */)
2258 if ((buf
[1] & 0x80) == 0)
2260 imm
= (imm
^ 0x8000) - 0x8000;
2262 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2268 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2270 reg
[rt
] |= imm
& 0xffff;
2273 else if (buf
[0] == 0x04 /* ori */)
2276 imm
= (imm
^ 0x200) - 0x200;
2277 reg
[rt
] = reg
[ra
] | imm
;
2280 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2282 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2283 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2284 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2285 | ((imm
& 0x1000) ? 0x000000ff : 0));
2288 else if (buf
[0] == 0x16 /* andbi */)
2294 reg
[rt
] = reg
[ra
] & imm
;
2297 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2299 /* Used in pic reg load. Say rt is trashed. Won't be used
2300 in stack adjust, but we need to continue past this branch. */
2304 else if (is_branch (buf
) || is_indirect_branch (buf
))
2305 /* If we hit a branch then we must be out of the prologue. */
2312 /* qsort predicate to sort symbols by section and value. */
2314 static Elf_Internal_Sym
*sort_syms_syms
;
2315 static asection
**sort_syms_psecs
;
2318 sort_syms (const void *a
, const void *b
)
2320 Elf_Internal_Sym
*const *s1
= a
;
2321 Elf_Internal_Sym
*const *s2
= b
;
2322 asection
*sec1
,*sec2
;
2323 bfd_signed_vma delta
;
2325 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2326 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2329 return sec1
->index
- sec2
->index
;
2331 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2333 return delta
< 0 ? -1 : 1;
2335 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2337 return delta
< 0 ? -1 : 1;
2339 return *s1
< *s2
? -1 : 1;
2342 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2343 entries for section SEC. */
2345 static struct spu_elf_stack_info
*
2346 alloc_stack_info (asection
*sec
, int max_fun
)
2348 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2351 amt
= sizeof (struct spu_elf_stack_info
);
2352 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2353 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2354 if (sec_data
->u
.i
.stack_info
!= NULL
)
2355 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2356 return sec_data
->u
.i
.stack_info
;
2359 /* Add a new struct function_info describing a (part of a) function
2360 starting at SYM_H. Keep the array sorted by address. */
2362 static struct function_info
*
2363 maybe_insert_function (asection
*sec
,
2366 bfd_boolean is_func
)
2368 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2369 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2375 sinfo
= alloc_stack_info (sec
, 20);
2382 Elf_Internal_Sym
*sym
= sym_h
;
2383 off
= sym
->st_value
;
2384 size
= sym
->st_size
;
2388 struct elf_link_hash_entry
*h
= sym_h
;
2389 off
= h
->root
.u
.def
.value
;
2393 for (i
= sinfo
->num_fun
; --i
>= 0; )
2394 if (sinfo
->fun
[i
].lo
<= off
)
2399 /* Don't add another entry for an alias, but do update some
2401 if (sinfo
->fun
[i
].lo
== off
)
2403 /* Prefer globals over local syms. */
2404 if (global
&& !sinfo
->fun
[i
].global
)
2406 sinfo
->fun
[i
].global
= TRUE
;
2407 sinfo
->fun
[i
].u
.h
= sym_h
;
2410 sinfo
->fun
[i
].is_func
= TRUE
;
2411 return &sinfo
->fun
[i
];
2413 /* Ignore a zero-size symbol inside an existing function. */
2414 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2415 return &sinfo
->fun
[i
];
2418 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2420 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2421 bfd_size_type old
= amt
;
2423 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2424 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2425 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2426 sinfo
= bfd_realloc (sinfo
, amt
);
2429 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2430 sec_data
->u
.i
.stack_info
= sinfo
;
2433 if (++i
< sinfo
->num_fun
)
2434 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2435 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2436 sinfo
->fun
[i
].is_func
= is_func
;
2437 sinfo
->fun
[i
].global
= global
;
2438 sinfo
->fun
[i
].sec
= sec
;
2440 sinfo
->fun
[i
].u
.h
= sym_h
;
2442 sinfo
->fun
[i
].u
.sym
= sym_h
;
2443 sinfo
->fun
[i
].lo
= off
;
2444 sinfo
->fun
[i
].hi
= off
+ size
;
2445 sinfo
->fun
[i
].lr_store
= -1;
2446 sinfo
->fun
[i
].sp_adjust
= -1;
2447 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2448 &sinfo
->fun
[i
].lr_store
,
2449 &sinfo
->fun
[i
].sp_adjust
);
2450 sinfo
->num_fun
+= 1;
2451 return &sinfo
->fun
[i
];
2454 /* Return the name of FUN. */
2457 func_name (struct function_info
*fun
)
2461 Elf_Internal_Shdr
*symtab_hdr
;
2463 while (fun
->start
!= NULL
)
2467 return fun
->u
.h
->root
.root
.string
;
2470 if (fun
->u
.sym
->st_name
== 0)
2472 size_t len
= strlen (sec
->name
);
2473 char *name
= bfd_malloc (len
+ 10);
2476 sprintf (name
, "%s+%lx", sec
->name
,
2477 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2481 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2482 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2485 /* Read the instruction at OFF in SEC. Return true iff the instruction
2486 is a nop, lnop, or stop 0 (all zero insn). */
2489 is_nop (asection
*sec
, bfd_vma off
)
2491 unsigned char insn
[4];
2493 if (off
+ 4 > sec
->size
2494 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2496 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2498 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2503 /* Extend the range of FUN to cover nop padding up to LIMIT.
2504 Return TRUE iff some instruction other than a NOP was found. */
2507 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2509 bfd_vma off
= (fun
->hi
+ 3) & -4;
2511 while (off
< limit
&& is_nop (fun
->sec
, off
))
2522 /* Check and fix overlapping function ranges. Return TRUE iff there
2523 are gaps in the current info we have about functions in SEC. */
2526 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2528 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2529 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2531 bfd_boolean gaps
= FALSE
;
2536 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2537 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2539 /* Fix overlapping symbols. */
2540 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2541 const char *f2
= func_name (&sinfo
->fun
[i
]);
2543 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2544 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2546 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2549 if (sinfo
->num_fun
== 0)
2553 if (sinfo
->fun
[0].lo
!= 0)
2555 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2557 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2559 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2560 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2562 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2568 /* Search current function info for a function that contains address
2569 OFFSET in section SEC. */
2571 static struct function_info
*
2572 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2574 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2575 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2579 hi
= sinfo
->num_fun
;
2582 mid
= (lo
+ hi
) / 2;
2583 if (offset
< sinfo
->fun
[mid
].lo
)
2585 else if (offset
>= sinfo
->fun
[mid
].hi
)
2588 return &sinfo
->fun
[mid
];
2590 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2592 bfd_set_error (bfd_error_bad_value
);
2596 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2597 if CALLEE was new. If this function return FALSE, CALLEE should
2601 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2603 struct call_info
**pp
, *p
;
2605 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2606 if (p
->fun
== callee
->fun
)
2608 /* Tail calls use less stack than normal calls. Retain entry
2609 for normal call over one for tail call. */
2610 p
->is_tail
&= callee
->is_tail
;
2613 p
->fun
->start
= NULL
;
2614 p
->fun
->is_func
= TRUE
;
2616 p
->count
+= callee
->count
;
2617 /* Reorder list so most recent call is first. */
2619 p
->next
= caller
->call_list
;
2620 caller
->call_list
= p
;
2623 callee
->next
= caller
->call_list
;
2624 caller
->call_list
= callee
;
2628 /* Copy CALL and insert the copy into CALLER. */
2631 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2633 struct call_info
*callee
;
2634 callee
= bfd_malloc (sizeof (*callee
));
2638 if (!insert_callee (caller
, callee
))
2643 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2644 overlay stub sections. */
2647 interesting_section (asection
*s
)
2649 return (s
->output_section
!= bfd_abs_section_ptr
2650 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2651 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2655 /* Rummage through the relocs for SEC, looking for function calls.
2656 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2657 mark destination symbols on calls as being functions. Also
2658 look at branches, which may be tail calls or go to hot/cold
2659 section part of same function. */
2662 mark_functions_via_relocs (asection
*sec
,
2663 struct bfd_link_info
*info
,
2666 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2667 Elf_Internal_Shdr
*symtab_hdr
;
2669 unsigned int priority
= 0;
2670 static bfd_boolean warned
;
2672 if (!interesting_section (sec
)
2673 || sec
->reloc_count
== 0)
2676 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2678 if (internal_relocs
== NULL
)
2681 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2682 psyms
= &symtab_hdr
->contents
;
2683 irela
= internal_relocs
;
2684 irelaend
= irela
+ sec
->reloc_count
;
2685 for (; irela
< irelaend
; irela
++)
2687 enum elf_spu_reloc_type r_type
;
2688 unsigned int r_indx
;
2690 Elf_Internal_Sym
*sym
;
2691 struct elf_link_hash_entry
*h
;
2693 bfd_boolean nonbranch
, is_call
;
2694 struct function_info
*caller
;
2695 struct call_info
*callee
;
2697 r_type
= ELF32_R_TYPE (irela
->r_info
);
2698 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2700 r_indx
= ELF32_R_SYM (irela
->r_info
);
2701 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2705 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2711 unsigned char insn
[4];
2713 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2714 irela
->r_offset
, 4))
2716 if (is_branch (insn
))
2718 is_call
= (insn
[0] & 0xfd) == 0x31;
2719 priority
= insn
[1] & 0x0f;
2721 priority
|= insn
[2];
2723 priority
|= insn
[3];
2725 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2726 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2729 info
->callbacks
->einfo
2730 (_("%B(%A+0x%v): call to non-code section"
2731 " %B(%A), analysis incomplete\n"),
2732 sec
->owner
, sec
, irela
->r_offset
,
2733 sym_sec
->owner
, sym_sec
);
2748 /* For --auto-overlay, count possible stubs we need for
2749 function pointer references. */
2750 unsigned int sym_type
;
2754 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2755 if (sym_type
== STT_FUNC
)
2757 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2758 spu_hash_table (info
)->non_ovly_stub
+= 1;
2759 /* If the symbol type is STT_FUNC then this must be a
2760 function pointer initialisation. */
2763 /* Ignore data references. */
2764 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2765 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2767 /* Otherwise we probably have a jump table reloc for
2768 a switch statement or some other reference to a
2773 val
= h
->root
.u
.def
.value
;
2775 val
= sym
->st_value
;
2776 val
+= irela
->r_addend
;
2780 struct function_info
*fun
;
2782 if (irela
->r_addend
!= 0)
2784 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2787 fake
->st_value
= val
;
2789 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2793 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2795 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2798 if (irela
->r_addend
!= 0
2799 && fun
->u
.sym
!= sym
)
2804 caller
= find_function (sec
, irela
->r_offset
, info
);
2807 callee
= bfd_malloc (sizeof *callee
);
2811 callee
->fun
= find_function (sym_sec
, val
, info
);
2812 if (callee
->fun
== NULL
)
2814 callee
->is_tail
= !is_call
;
2815 callee
->is_pasted
= FALSE
;
2816 callee
->broken_cycle
= FALSE
;
2817 callee
->priority
= priority
;
2818 callee
->count
= nonbranch
? 0 : 1;
2819 if (callee
->fun
->last_caller
!= sec
)
2821 callee
->fun
->last_caller
= sec
;
2822 callee
->fun
->call_count
+= 1;
2824 if (!insert_callee (caller
, callee
))
2827 && !callee
->fun
->is_func
2828 && callee
->fun
->stack
== 0)
2830 /* This is either a tail call or a branch from one part of
2831 the function to another, ie. hot/cold section. If the
2832 destination has been called by some other function then
2833 it is a separate function. We also assume that functions
2834 are not split across input files. */
2835 if (sec
->owner
!= sym_sec
->owner
)
2837 callee
->fun
->start
= NULL
;
2838 callee
->fun
->is_func
= TRUE
;
2840 else if (callee
->fun
->start
== NULL
)
2842 struct function_info
*caller_start
= caller
;
2843 while (caller_start
->start
)
2844 caller_start
= caller_start
->start
;
2846 if (caller_start
!= callee
->fun
)
2847 callee
->fun
->start
= caller_start
;
2851 struct function_info
*callee_start
;
2852 struct function_info
*caller_start
;
2853 callee_start
= callee
->fun
;
2854 while (callee_start
->start
)
2855 callee_start
= callee_start
->start
;
2856 caller_start
= caller
;
2857 while (caller_start
->start
)
2858 caller_start
= caller_start
->start
;
2859 if (caller_start
!= callee_start
)
2861 callee
->fun
->start
= NULL
;
2862 callee
->fun
->is_func
= TRUE
;
2871 /* Handle something like .init or .fini, which has a piece of a function.
2872 These sections are pasted together to form a single function. */
2875 pasted_function (asection
*sec
)
2877 struct bfd_link_order
*l
;
2878 struct _spu_elf_section_data
*sec_data
;
2879 struct spu_elf_stack_info
*sinfo
;
2880 Elf_Internal_Sym
*fake
;
2881 struct function_info
*fun
, *fun_start
;
2883 fake
= bfd_zmalloc (sizeof (*fake
));
2887 fake
->st_size
= sec
->size
;
2889 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2890 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2894 /* Find a function immediately preceding this section. */
2896 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2898 if (l
->u
.indirect
.section
== sec
)
2900 if (fun_start
!= NULL
)
2902 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2906 fun
->start
= fun_start
;
2908 callee
->is_tail
= TRUE
;
2909 callee
->is_pasted
= TRUE
;
2910 callee
->broken_cycle
= FALSE
;
2911 callee
->priority
= 0;
2913 if (!insert_callee (fun_start
, callee
))
2919 if (l
->type
== bfd_indirect_link_order
2920 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2921 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2922 && sinfo
->num_fun
!= 0)
2923 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2926 /* Don't return an error if we did not find a function preceding this
2927 section. The section may have incorrect flags. */
2931 /* Map address ranges in code sections to functions. */
2934 discover_functions (struct bfd_link_info
*info
)
2938 Elf_Internal_Sym
***psym_arr
;
2939 asection
***sec_arr
;
2940 bfd_boolean gaps
= FALSE
;
2943 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2946 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2947 if (psym_arr
== NULL
)
2949 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2950 if (sec_arr
== NULL
)
2953 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2955 ibfd
= ibfd
->link_next
, bfd_idx
++)
2957 extern const bfd_target bfd_elf32_spu_vec
;
2958 Elf_Internal_Shdr
*symtab_hdr
;
2961 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2962 asection
**psecs
, **p
;
2964 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2967 /* Read all the symbols. */
2968 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2969 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2973 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2974 if (interesting_section (sec
))
2982 if (symtab_hdr
->contents
!= NULL
)
2984 /* Don't use cached symbols since the generic ELF linker
2985 code only reads local symbols, and we need globals too. */
2986 free (symtab_hdr
->contents
);
2987 symtab_hdr
->contents
= NULL
;
2989 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2991 symtab_hdr
->contents
= (void *) syms
;
2995 /* Select defined function symbols that are going to be output. */
2996 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2999 psym_arr
[bfd_idx
] = psyms
;
3000 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3003 sec_arr
[bfd_idx
] = psecs
;
3004 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3005 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3006 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3010 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3011 if (s
!= NULL
&& interesting_section (s
))
3014 symcount
= psy
- psyms
;
3017 /* Sort them by section and offset within section. */
3018 sort_syms_syms
= syms
;
3019 sort_syms_psecs
= psecs
;
3020 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3022 /* Now inspect the function symbols. */
3023 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3025 asection
*s
= psecs
[*psy
- syms
];
3026 Elf_Internal_Sym
**psy2
;
3028 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3029 if (psecs
[*psy2
- syms
] != s
)
3032 if (!alloc_stack_info (s
, psy2
- psy
))
3037 /* First install info about properly typed and sized functions.
3038 In an ideal world this will cover all code sections, except
3039 when partitioning functions into hot and cold sections,
3040 and the horrible pasted together .init and .fini functions. */
3041 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3044 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3046 asection
*s
= psecs
[sy
- syms
];
3047 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3052 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3053 if (interesting_section (sec
))
3054 gaps
|= check_function_ranges (sec
, info
);
3059 /* See if we can discover more function symbols by looking at
3061 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3063 ibfd
= ibfd
->link_next
, bfd_idx
++)
3067 if (psym_arr
[bfd_idx
] == NULL
)
3070 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3071 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3075 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3077 ibfd
= ibfd
->link_next
, bfd_idx
++)
3079 Elf_Internal_Shdr
*symtab_hdr
;
3081 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3084 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3087 psecs
= sec_arr
[bfd_idx
];
3089 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3090 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3093 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3094 if (interesting_section (sec
))
3095 gaps
|= check_function_ranges (sec
, info
);
3099 /* Finally, install all globals. */
3100 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3104 s
= psecs
[sy
- syms
];
3106 /* Global syms might be improperly typed functions. */
3107 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3108 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3110 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3116 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3118 extern const bfd_target bfd_elf32_spu_vec
;
3121 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3124 /* Some of the symbols we've installed as marking the
3125 beginning of functions may have a size of zero. Extend
3126 the range of such functions to the beginning of the
3127 next symbol of interest. */
3128 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3129 if (interesting_section (sec
))
3131 struct _spu_elf_section_data
*sec_data
;
3132 struct spu_elf_stack_info
*sinfo
;
3134 sec_data
= spu_elf_section_data (sec
);
3135 sinfo
= sec_data
->u
.i
.stack_info
;
3136 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3139 bfd_vma hi
= sec
->size
;
3141 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3143 sinfo
->fun
[fun_idx
].hi
= hi
;
3144 hi
= sinfo
->fun
[fun_idx
].lo
;
3147 sinfo
->fun
[0].lo
= 0;
3149 /* No symbols in this section. Must be .init or .fini
3150 or something similar. */
3151 else if (!pasted_function (sec
))
3157 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3159 ibfd
= ibfd
->link_next
, bfd_idx
++)
3161 if (psym_arr
[bfd_idx
] == NULL
)
3164 free (psym_arr
[bfd_idx
]);
3165 free (sec_arr
[bfd_idx
]);
3174 /* Iterate over all function_info we have collected, calling DOIT on
3175 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3179 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3180 struct bfd_link_info
*,
3182 struct bfd_link_info
*info
,
3188 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3190 extern const bfd_target bfd_elf32_spu_vec
;
3193 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3196 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3198 struct _spu_elf_section_data
*sec_data
;
3199 struct spu_elf_stack_info
*sinfo
;
3201 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3202 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3205 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3206 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3207 if (!doit (&sinfo
->fun
[i
], info
, param
))
3215 /* Transfer call info attached to struct function_info entries for
3216 all of a given function's sections to the first entry. */
3219 transfer_calls (struct function_info
*fun
,
3220 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3221 void *param ATTRIBUTE_UNUSED
)
3223 struct function_info
*start
= fun
->start
;
3227 struct call_info
*call
, *call_next
;
3229 while (start
->start
!= NULL
)
3230 start
= start
->start
;
3231 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3233 call_next
= call
->next
;
3234 if (!insert_callee (start
, call
))
3237 fun
->call_list
= NULL
;
3242 /* Mark nodes in the call graph that are called by some other node. */
3245 mark_non_root (struct function_info
*fun
,
3246 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3247 void *param ATTRIBUTE_UNUSED
)
3249 struct call_info
*call
;
3254 for (call
= fun
->call_list
; call
; call
= call
->next
)
3256 call
->fun
->non_root
= TRUE
;
3257 mark_non_root (call
->fun
, 0, 0);
3262 /* Remove cycles from the call graph. Set depth of nodes. */
3265 remove_cycles (struct function_info
*fun
,
3266 struct bfd_link_info
*info
,
3269 struct call_info
**callp
, *call
;
3270 unsigned int depth
= *(unsigned int *) param
;
3271 unsigned int max_depth
= depth
;
3275 fun
->marking
= TRUE
;
3277 callp
= &fun
->call_list
;
3278 while ((call
= *callp
) != NULL
)
3280 call
->max_depth
= depth
+ !call
->is_pasted
;
3281 if (!call
->fun
->visit2
)
3283 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3285 if (max_depth
< call
->max_depth
)
3286 max_depth
= call
->max_depth
;
3288 else if (call
->fun
->marking
)
3290 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3292 if (!htab
->params
->auto_overlay
3293 && htab
->params
->stack_analysis
)
3295 const char *f1
= func_name (fun
);
3296 const char *f2
= func_name (call
->fun
);
3298 info
->callbacks
->info (_("Stack analysis will ignore the call "
3303 call
->broken_cycle
= TRUE
;
3305 callp
= &call
->next
;
3307 fun
->marking
= FALSE
;
3308 *(unsigned int *) param
= max_depth
;
3312 /* Check that we actually visited all nodes in remove_cycles. If we
3313 didn't, then there is some cycle in the call graph not attached to
3314 any root node. Arbitrarily choose a node in the cycle as a new
3315 root and break the cycle. */
3318 mark_detached_root (struct function_info
*fun
,
3319 struct bfd_link_info
*info
,
3324 fun
->non_root
= FALSE
;
3325 *(unsigned int *) param
= 0;
3326 return remove_cycles (fun
, info
, param
);
3329 /* Populate call_list for each function. */
3332 build_call_tree (struct bfd_link_info
*info
)
3337 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3339 extern const bfd_target bfd_elf32_spu_vec
;
3342 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3345 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3346 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3350 /* Transfer call info from hot/cold section part of function
3352 if (!spu_hash_table (info
)->params
->auto_overlay
3353 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3356 /* Find the call graph root(s). */
3357 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3360 /* Remove cycles from the call graph. We start from the root node(s)
3361 so that we break cycles in a reasonable place. */
3363 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3366 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3369 /* qsort predicate to sort calls by priority, max_depth then count. */
3372 sort_calls (const void *a
, const void *b
)
3374 struct call_info
*const *c1
= a
;
3375 struct call_info
*const *c2
= b
;
3378 delta
= (*c2
)->priority
- (*c1
)->priority
;
3382 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3386 delta
= (*c2
)->count
- (*c1
)->count
;
3390 return (char *) c1
- (char *) c2
;
3394 unsigned int max_overlay_size
;
3397 /* Set linker_mark and gc_mark on any sections that we will put in
3398 overlays. These flags are used by the generic ELF linker, but we
3399 won't be continuing on to bfd_elf_final_link so it is OK to use
3400 them. linker_mark is clear before we get here. Set segment_mark
3401 on sections that are part of a pasted function (excluding the last
3404 Set up function rodata section if --overlay-rodata. We don't
3405 currently include merged string constant rodata sections since
3407 Sort the call graph so that the deepest nodes will be visited
3411 mark_overlay_section (struct function_info
*fun
,
3412 struct bfd_link_info
*info
,
3415 struct call_info
*call
;
3417 struct _mos_param
*mos_param
= param
;
3418 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3424 if (!fun
->sec
->linker_mark
3425 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3426 || htab
->params
->non_ia_text
3427 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3428 || strcmp (fun
->sec
->name
, ".init") == 0
3429 || strcmp (fun
->sec
->name
, ".fini") == 0))
3433 fun
->sec
->linker_mark
= 1;
3434 fun
->sec
->gc_mark
= 1;
3435 fun
->sec
->segment_mark
= 0;
3436 /* Ensure SEC_CODE is set on this text section (it ought to
3437 be!), and SEC_CODE is clear on rodata sections. We use
3438 this flag to differentiate the two overlay section types. */
3439 fun
->sec
->flags
|= SEC_CODE
;
3441 size
= fun
->sec
->size
;
3442 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3446 /* Find the rodata section corresponding to this function's
3448 if (strcmp (fun
->sec
->name
, ".text") == 0)
3450 name
= bfd_malloc (sizeof (".rodata"));
3453 memcpy (name
, ".rodata", sizeof (".rodata"));
3455 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3457 size_t len
= strlen (fun
->sec
->name
);
3458 name
= bfd_malloc (len
+ 3);
3461 memcpy (name
, ".rodata", sizeof (".rodata"));
3462 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3464 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3466 size_t len
= strlen (fun
->sec
->name
) + 1;
3467 name
= bfd_malloc (len
);
3470 memcpy (name
, fun
->sec
->name
, len
);
3476 asection
*rodata
= NULL
;
3477 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3478 if (group_sec
== NULL
)
3479 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3481 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3483 if (strcmp (group_sec
->name
, name
) == 0)
3488 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3490 fun
->rodata
= rodata
;
3493 size
+= fun
->rodata
->size
;
3494 if (htab
->params
->line_size
!= 0
3495 && size
> htab
->params
->line_size
)
3497 size
-= fun
->rodata
->size
;
3502 fun
->rodata
->linker_mark
= 1;
3503 fun
->rodata
->gc_mark
= 1;
3504 fun
->rodata
->flags
&= ~SEC_CODE
;
3510 if (mos_param
->max_overlay_size
< size
)
3511 mos_param
->max_overlay_size
= size
;
3514 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3519 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3523 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3524 calls
[count
++] = call
;
3526 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3528 fun
->call_list
= NULL
;
3532 calls
[count
]->next
= fun
->call_list
;
3533 fun
->call_list
= calls
[count
];
3538 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3540 if (call
->is_pasted
)
3542 /* There can only be one is_pasted call per function_info. */
3543 BFD_ASSERT (!fun
->sec
->segment_mark
);
3544 fun
->sec
->segment_mark
= 1;
3546 if (!call
->broken_cycle
3547 && !mark_overlay_section (call
->fun
, info
, param
))
3551 /* Don't put entry code into an overlay. The overlay manager needs
3552 a stack! Also, don't mark .ovl.init as an overlay. */
3553 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3554 == info
->output_bfd
->start_address
3555 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3557 fun
->sec
->linker_mark
= 0;
3558 if (fun
->rodata
!= NULL
)
3559 fun
->rodata
->linker_mark
= 0;
3564 /* If non-zero then unmark functions called from those within sections
3565 that we need to unmark. Unfortunately this isn't reliable since the
3566 call graph cannot know the destination of function pointer calls. */
3567 #define RECURSE_UNMARK 0
3570 asection
*exclude_input_section
;
3571 asection
*exclude_output_section
;
3572 unsigned long clearing
;
3575 /* Undo some of mark_overlay_section's work. */
3578 unmark_overlay_section (struct function_info
*fun
,
3579 struct bfd_link_info
*info
,
3582 struct call_info
*call
;
3583 struct _uos_param
*uos_param
= param
;
3584 unsigned int excluded
= 0;
3592 if (fun
->sec
== uos_param
->exclude_input_section
3593 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3597 uos_param
->clearing
+= excluded
;
3599 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3601 fun
->sec
->linker_mark
= 0;
3603 fun
->rodata
->linker_mark
= 0;
3606 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3607 if (!call
->broken_cycle
3608 && !unmark_overlay_section (call
->fun
, info
, param
))
3612 uos_param
->clearing
-= excluded
;
3617 unsigned int lib_size
;
3618 asection
**lib_sections
;
3621 /* Add sections we have marked as belonging to overlays to an array
3622 for consideration as non-overlay sections. The array consist of
3623 pairs of sections, (text,rodata), for functions in the call graph. */
3626 collect_lib_sections (struct function_info
*fun
,
3627 struct bfd_link_info
*info
,
3630 struct _cl_param
*lib_param
= param
;
3631 struct call_info
*call
;
3638 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3641 size
= fun
->sec
->size
;
3643 size
+= fun
->rodata
->size
;
3645 if (size
<= lib_param
->lib_size
)
3647 *lib_param
->lib_sections
++ = fun
->sec
;
3648 fun
->sec
->gc_mark
= 0;
3649 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3651 *lib_param
->lib_sections
++ = fun
->rodata
;
3652 fun
->rodata
->gc_mark
= 0;
3655 *lib_param
->lib_sections
++ = NULL
;
3658 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3659 if (!call
->broken_cycle
)
3660 collect_lib_sections (call
->fun
, info
, param
);
3665 /* qsort predicate to sort sections by call count. */
3668 sort_lib (const void *a
, const void *b
)
3670 asection
*const *s1
= a
;
3671 asection
*const *s2
= b
;
3672 struct _spu_elf_section_data
*sec_data
;
3673 struct spu_elf_stack_info
*sinfo
;
3677 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3678 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3681 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3682 delta
-= sinfo
->fun
[i
].call_count
;
3685 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3686 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3689 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3690 delta
+= sinfo
->fun
[i
].call_count
;
3699 /* Remove some sections from those marked to be in overlays. Choose
3700 those that are called from many places, likely library functions. */
3703 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3706 asection
**lib_sections
;
3707 unsigned int i
, lib_count
;
3708 struct _cl_param collect_lib_param
;
3709 struct function_info dummy_caller
;
3710 struct spu_link_hash_table
*htab
;
3712 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3714 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3716 extern const bfd_target bfd_elf32_spu_vec
;
3719 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3722 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3723 if (sec
->linker_mark
3724 && sec
->size
< lib_size
3725 && (sec
->flags
& SEC_CODE
) != 0)
3728 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3729 if (lib_sections
== NULL
)
3730 return (unsigned int) -1;
3731 collect_lib_param
.lib_size
= lib_size
;
3732 collect_lib_param
.lib_sections
= lib_sections
;
3733 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3735 return (unsigned int) -1;
3736 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3738 /* Sort sections so that those with the most calls are first. */
3740 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3742 htab
= spu_hash_table (info
);
3743 for (i
= 0; i
< lib_count
; i
++)
3745 unsigned int tmp
, stub_size
;
3747 struct _spu_elf_section_data
*sec_data
;
3748 struct spu_elf_stack_info
*sinfo
;
3750 sec
= lib_sections
[2 * i
];
3751 /* If this section is OK, its size must be less than lib_size. */
3753 /* If it has a rodata section, then add that too. */
3754 if (lib_sections
[2 * i
+ 1])
3755 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3756 /* Add any new overlay call stubs needed by the section. */
3759 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3760 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3763 struct call_info
*call
;
3765 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3766 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3767 if (call
->fun
->sec
->linker_mark
)
3769 struct call_info
*p
;
3770 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3771 if (p
->fun
== call
->fun
)
3774 stub_size
+= ovl_stub_size (htab
->params
);
3777 if (tmp
+ stub_size
< lib_size
)
3779 struct call_info
**pp
, *p
;
3781 /* This section fits. Mark it as non-overlay. */
3782 lib_sections
[2 * i
]->linker_mark
= 0;
3783 if (lib_sections
[2 * i
+ 1])
3784 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3785 lib_size
-= tmp
+ stub_size
;
3786 /* Call stubs to the section we just added are no longer
3788 pp
= &dummy_caller
.call_list
;
3789 while ((p
= *pp
) != NULL
)
3790 if (!p
->fun
->sec
->linker_mark
)
3792 lib_size
+= ovl_stub_size (htab
->params
);
3798 /* Add new call stubs to dummy_caller. */
3799 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3800 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3803 struct call_info
*call
;
3805 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3806 for (call
= sinfo
->fun
[k
].call_list
;
3809 if (call
->fun
->sec
->linker_mark
)
3811 struct call_info
*callee
;
3812 callee
= bfd_malloc (sizeof (*callee
));
3814 return (unsigned int) -1;
3816 if (!insert_callee (&dummy_caller
, callee
))
3822 while (dummy_caller
.call_list
!= NULL
)
3824 struct call_info
*call
= dummy_caller
.call_list
;
3825 dummy_caller
.call_list
= call
->next
;
3828 for (i
= 0; i
< 2 * lib_count
; i
++)
3829 if (lib_sections
[i
])
3830 lib_sections
[i
]->gc_mark
= 1;
3831 free (lib_sections
);
3835 /* Build an array of overlay sections. The deepest node's section is
3836 added first, then its parent node's section, then everything called
3837 from the parent section. The idea being to group sections to
3838 minimise calls between different overlays. */
3841 collect_overlays (struct function_info
*fun
,
3842 struct bfd_link_info
*info
,
3845 struct call_info
*call
;
3846 bfd_boolean added_fun
;
3847 asection
***ovly_sections
= param
;
3853 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3854 if (!call
->is_pasted
&& !call
->broken_cycle
)
3856 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3862 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3864 fun
->sec
->gc_mark
= 0;
3865 *(*ovly_sections
)++ = fun
->sec
;
3866 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3868 fun
->rodata
->gc_mark
= 0;
3869 *(*ovly_sections
)++ = fun
->rodata
;
3872 *(*ovly_sections
)++ = NULL
;
3875 /* Pasted sections must stay with the first section. We don't
3876 put pasted sections in the array, just the first section.
3877 Mark subsequent sections as already considered. */
3878 if (fun
->sec
->segment_mark
)
3880 struct function_info
*call_fun
= fun
;
3883 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3884 if (call
->is_pasted
)
3886 call_fun
= call
->fun
;
3887 call_fun
->sec
->gc_mark
= 0;
3888 if (call_fun
->rodata
)
3889 call_fun
->rodata
->gc_mark
= 0;
3895 while (call_fun
->sec
->segment_mark
);
3899 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3900 if (!call
->broken_cycle
3901 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3906 struct _spu_elf_section_data
*sec_data
;
3907 struct spu_elf_stack_info
*sinfo
;
3909 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3910 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3913 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3914 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3922 struct _sum_stack_param
{
3924 size_t overall_stack
;
3925 bfd_boolean emit_stack_syms
;
3928 /* Descend the call graph for FUN, accumulating total stack required. */
3931 sum_stack (struct function_info
*fun
,
3932 struct bfd_link_info
*info
,
3935 struct call_info
*call
;
3936 struct function_info
*max
;
3937 size_t stack
, cum_stack
;
3939 bfd_boolean has_call
;
3940 struct _sum_stack_param
*sum_stack_param
= param
;
3941 struct spu_link_hash_table
*htab
;
3943 cum_stack
= fun
->stack
;
3944 sum_stack_param
->cum_stack
= cum_stack
;
3950 for (call
= fun
->call_list
; call
; call
= call
->next
)
3952 if (call
->broken_cycle
)
3954 if (!call
->is_pasted
)
3956 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3958 stack
= sum_stack_param
->cum_stack
;
3959 /* Include caller stack for normal calls, don't do so for
3960 tail calls. fun->stack here is local stack usage for
3962 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3963 stack
+= fun
->stack
;
3964 if (cum_stack
< stack
)
3971 sum_stack_param
->cum_stack
= cum_stack
;
3973 /* Now fun->stack holds cumulative stack. */
3974 fun
->stack
= cum_stack
;
3978 && sum_stack_param
->overall_stack
< cum_stack
)
3979 sum_stack_param
->overall_stack
= cum_stack
;
3981 htab
= spu_hash_table (info
);
3982 if (htab
->params
->auto_overlay
)
3985 f1
= func_name (fun
);
3986 if (htab
->params
->stack_analysis
)
3989 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3990 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3991 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3995 info
->callbacks
->minfo (_(" calls:\n"));
3996 for (call
= fun
->call_list
; call
; call
= call
->next
)
3997 if (!call
->is_pasted
&& !call
->broken_cycle
)
3999 const char *f2
= func_name (call
->fun
);
4000 const char *ann1
= call
->fun
== max
? "*" : " ";
4001 const char *ann2
= call
->is_tail
? "t" : " ";
4003 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
4008 if (sum_stack_param
->emit_stack_syms
)
4010 char *name
= bfd_malloc (18 + strlen (f1
));
4011 struct elf_link_hash_entry
*h
;
4016 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4017 sprintf (name
, "__stack_%s", f1
);
4019 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4021 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4024 && (h
->root
.type
== bfd_link_hash_new
4025 || h
->root
.type
== bfd_link_hash_undefined
4026 || h
->root
.type
== bfd_link_hash_undefweak
))
4028 h
->root
.type
= bfd_link_hash_defined
;
4029 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4030 h
->root
.u
.def
.value
= cum_stack
;
4035 h
->ref_regular_nonweak
= 1;
4036 h
->forced_local
= 1;
4044 /* SEC is part of a pasted function. Return the call_info for the
4045 next section of this function. */
4047 static struct call_info
*
4048 find_pasted_call (asection
*sec
)
4050 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4051 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4052 struct call_info
*call
;
4055 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4056 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4057 if (call
->is_pasted
)
4063 /* qsort predicate to sort bfds by file name. */
4066 sort_bfds (const void *a
, const void *b
)
4068 bfd
*const *abfd1
= a
;
4069 bfd
*const *abfd2
= b
;
4071 return filename_cmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4075 print_one_overlay_section (FILE *script
,
4078 unsigned int ovlynum
,
4079 unsigned int *ovly_map
,
4080 asection
**ovly_sections
,
4081 struct bfd_link_info
*info
)
4085 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4087 asection
*sec
= ovly_sections
[2 * j
];
4089 if (fprintf (script
, " %s%c%s (%s)\n",
4090 (sec
->owner
->my_archive
!= NULL
4091 ? sec
->owner
->my_archive
->filename
: ""),
4092 info
->path_separator
,
4093 sec
->owner
->filename
,
4096 if (sec
->segment_mark
)
4098 struct call_info
*call
= find_pasted_call (sec
);
4099 while (call
!= NULL
)
4101 struct function_info
*call_fun
= call
->fun
;
4102 sec
= call_fun
->sec
;
4103 if (fprintf (script
, " %s%c%s (%s)\n",
4104 (sec
->owner
->my_archive
!= NULL
4105 ? sec
->owner
->my_archive
->filename
: ""),
4106 info
->path_separator
,
4107 sec
->owner
->filename
,
4110 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4111 if (call
->is_pasted
)
4117 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4119 asection
*sec
= ovly_sections
[2 * j
+ 1];
4121 && fprintf (script
, " %s%c%s (%s)\n",
4122 (sec
->owner
->my_archive
!= NULL
4123 ? sec
->owner
->my_archive
->filename
: ""),
4124 info
->path_separator
,
4125 sec
->owner
->filename
,
4129 sec
= ovly_sections
[2 * j
];
4130 if (sec
->segment_mark
)
4132 struct call_info
*call
= find_pasted_call (sec
);
4133 while (call
!= NULL
)
4135 struct function_info
*call_fun
= call
->fun
;
4136 sec
= call_fun
->rodata
;
4138 && fprintf (script
, " %s%c%s (%s)\n",
4139 (sec
->owner
->my_archive
!= NULL
4140 ? sec
->owner
->my_archive
->filename
: ""),
4141 info
->path_separator
,
4142 sec
->owner
->filename
,
4145 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4146 if (call
->is_pasted
)
4155 /* Handle --auto-overlay. */
4158 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4162 struct elf_segment_map
*m
;
4163 unsigned int fixed_size
, lo
, hi
;
4164 unsigned int reserved
;
4165 struct spu_link_hash_table
*htab
;
4166 unsigned int base
, i
, count
, bfd_count
;
4167 unsigned int region
, ovlynum
;
4168 asection
**ovly_sections
, **ovly_p
;
4169 unsigned int *ovly_map
;
4171 unsigned int total_overlay_size
, overlay_size
;
4172 const char *ovly_mgr_entry
;
4173 struct elf_link_hash_entry
*h
;
4174 struct _mos_param mos_param
;
4175 struct _uos_param uos_param
;
4176 struct function_info dummy_caller
;
4178 /* Find the extents of our loadable image. */
4179 lo
= (unsigned int) -1;
4181 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4182 if (m
->p_type
== PT_LOAD
)
4183 for (i
= 0; i
< m
->count
; i
++)
4184 if (m
->sections
[i
]->size
!= 0)
4186 if (m
->sections
[i
]->vma
< lo
)
4187 lo
= m
->sections
[i
]->vma
;
4188 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4189 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4191 fixed_size
= hi
+ 1 - lo
;
4193 if (!discover_functions (info
))
4196 if (!build_call_tree (info
))
4199 htab
= spu_hash_table (info
);
4200 reserved
= htab
->params
->auto_overlay_reserved
;
4203 struct _sum_stack_param sum_stack_param
;
4205 sum_stack_param
.emit_stack_syms
= 0;
4206 sum_stack_param
.overall_stack
= 0;
4207 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4209 reserved
= (sum_stack_param
.overall_stack
4210 + htab
->params
->extra_stack_space
);
4213 /* No need for overlays if everything already fits. */
4214 if (fixed_size
+ reserved
<= htab
->local_store
4215 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4217 htab
->params
->auto_overlay
= 0;
4221 uos_param
.exclude_input_section
= 0;
4222 uos_param
.exclude_output_section
4223 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4225 ovly_mgr_entry
= "__ovly_load";
4226 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4227 ovly_mgr_entry
= "__icache_br_handler";
4228 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4229 FALSE
, FALSE
, FALSE
);
4231 && (h
->root
.type
== bfd_link_hash_defined
4232 || h
->root
.type
== bfd_link_hash_defweak
)
4235 /* We have a user supplied overlay manager. */
4236 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4240 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4241 builtin version to .text, and will adjust .text size. */
4242 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4245 /* Mark overlay sections, and find max overlay section size. */
4246 mos_param
.max_overlay_size
= 0;
4247 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4250 /* We can't put the overlay manager or interrupt routines in
4252 uos_param
.clearing
= 0;
4253 if ((uos_param
.exclude_input_section
4254 || uos_param
.exclude_output_section
)
4255 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4259 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4261 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4262 if (bfd_arr
== NULL
)
4265 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4268 total_overlay_size
= 0;
4269 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4271 extern const bfd_target bfd_elf32_spu_vec
;
4273 unsigned int old_count
;
4275 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4279 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4280 if (sec
->linker_mark
)
4282 if ((sec
->flags
& SEC_CODE
) != 0)
4284 fixed_size
-= sec
->size
;
4285 total_overlay_size
+= sec
->size
;
4287 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4288 && sec
->output_section
->owner
== info
->output_bfd
4289 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4290 fixed_size
-= sec
->size
;
4291 if (count
!= old_count
)
4292 bfd_arr
[bfd_count
++] = ibfd
;
4295 /* Since the overlay link script selects sections by file name and
4296 section name, ensure that file names are unique. */
4299 bfd_boolean ok
= TRUE
;
4301 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4302 for (i
= 1; i
< bfd_count
; ++i
)
4303 if (filename_cmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4305 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4307 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4308 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4309 bfd_arr
[i
]->filename
,
4310 bfd_arr
[i
]->my_archive
->filename
);
4312 info
->callbacks
->einfo (_("%s duplicated\n"),
4313 bfd_arr
[i
]->filename
);
4319 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4320 "object files in auto-overlay script\n"));
4321 bfd_set_error (bfd_error_bad_value
);
4327 fixed_size
+= reserved
;
4328 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4329 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4331 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4333 /* Stubs in the non-icache area are bigger. */
4334 fixed_size
+= htab
->non_ovly_stub
* 16;
4335 /* Space for icache manager tables.
4336 a) Tag array, one quadword per cache line.
4337 - word 0: ia address of present line, init to zero. */
4338 fixed_size
+= 16 << htab
->num_lines_log2
;
4339 /* b) Rewrite "to" list, one quadword per cache line. */
4340 fixed_size
+= 16 << htab
->num_lines_log2
;
4341 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4342 to a power-of-two number of full quadwords) per cache line. */
4343 fixed_size
+= 16 << (htab
->fromelem_size_log2
4344 + htab
->num_lines_log2
);
4345 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4350 /* Guess number of overlays. Assuming overlay buffer is on
4351 average only half full should be conservative. */
4352 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4353 / (htab
->local_store
- fixed_size
));
4354 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4355 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4359 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4360 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4361 "size of 0x%v exceeds local store\n"),
4362 (bfd_vma
) fixed_size
,
4363 (bfd_vma
) mos_param
.max_overlay_size
);
4365 /* Now see if we should put some functions in the non-overlay area. */
4366 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4368 unsigned int max_fixed
, lib_size
;
4370 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4371 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4372 max_fixed
= htab
->params
->auto_overlay_fixed
;
4373 lib_size
= max_fixed
- fixed_size
;
4374 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4375 if (lib_size
== (unsigned int) -1)
4377 fixed_size
= max_fixed
- lib_size
;
4380 /* Build an array of sections, suitably sorted to place into
4382 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4383 if (ovly_sections
== NULL
)
4385 ovly_p
= ovly_sections
;
4386 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4388 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4389 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4390 if (ovly_map
== NULL
)
4393 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4394 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4395 if (htab
->params
->line_size
!= 0)
4396 overlay_size
= htab
->params
->line_size
;
4399 while (base
< count
)
4401 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4403 for (i
= base
; i
< count
; i
++)
4405 asection
*sec
, *rosec
;
4406 unsigned int tmp
, rotmp
;
4407 unsigned int num_stubs
;
4408 struct call_info
*call
, *pasty
;
4409 struct _spu_elf_section_data
*sec_data
;
4410 struct spu_elf_stack_info
*sinfo
;
4413 /* See whether we can add this section to the current
4414 overlay without overflowing our overlay buffer. */
4415 sec
= ovly_sections
[2 * i
];
4416 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4418 rosec
= ovly_sections
[2 * i
+ 1];
4421 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4422 if (roalign
< rosec
->alignment_power
)
4423 roalign
= rosec
->alignment_power
;
4425 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4427 if (sec
->segment_mark
)
4429 /* Pasted sections must stay together, so add their
4431 pasty
= find_pasted_call (sec
);
4432 while (pasty
!= NULL
)
4434 struct function_info
*call_fun
= pasty
->fun
;
4435 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4436 + call_fun
->sec
->size
);
4437 if (call_fun
->rodata
)
4439 rotmp
= (align_power (rotmp
,
4440 call_fun
->rodata
->alignment_power
)
4441 + call_fun
->rodata
->size
);
4442 if (roalign
< rosec
->alignment_power
)
4443 roalign
= rosec
->alignment_power
;
4445 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4446 if (pasty
->is_pasted
)
4450 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4453 /* If we add this section, we might need new overlay call
4454 stubs. Add any overlay section calls to dummy_call. */
4456 sec_data
= spu_elf_section_data (sec
);
4457 sinfo
= sec_data
->u
.i
.stack_info
;
4458 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4459 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4460 if (call
->is_pasted
)
4462 BFD_ASSERT (pasty
== NULL
);
4465 else if (call
->fun
->sec
->linker_mark
)
4467 if (!copy_callee (&dummy_caller
, call
))
4470 while (pasty
!= NULL
)
4472 struct function_info
*call_fun
= pasty
->fun
;
4474 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4475 if (call
->is_pasted
)
4477 BFD_ASSERT (pasty
== NULL
);
4480 else if (!copy_callee (&dummy_caller
, call
))
4484 /* Calculate call stub size. */
4486 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4488 unsigned int stub_delta
= 1;
4490 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4491 stub_delta
= call
->count
;
4492 num_stubs
+= stub_delta
;
4494 /* If the call is within this overlay, we won't need a
4496 for (k
= base
; k
< i
+ 1; k
++)
4497 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4499 num_stubs
-= stub_delta
;
4503 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4504 && num_stubs
> htab
->params
->max_branch
)
4506 if (align_power (tmp
, roalign
) + rotmp
4507 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4515 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4516 ovly_sections
[2 * i
]->owner
,
4517 ovly_sections
[2 * i
],
4518 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4519 bfd_set_error (bfd_error_bad_value
);
4523 while (dummy_caller
.call_list
!= NULL
)
4525 struct call_info
*call
= dummy_caller
.call_list
;
4526 dummy_caller
.call_list
= call
->next
;
4532 ovly_map
[base
++] = ovlynum
;
4535 script
= htab
->params
->spu_elf_open_overlay_script ();
4537 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4539 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4542 if (fprintf (script
,
4543 " . = ALIGN (%u);\n"
4544 " .ovl.init : { *(.ovl.init) }\n"
4545 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4546 htab
->params
->line_size
) <= 0)
4551 while (base
< count
)
4553 unsigned int indx
= ovlynum
- 1;
4554 unsigned int vma
, lma
;
4556 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4557 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4559 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4560 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4561 ovlynum
, vma
, lma
) <= 0)
4564 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4565 ovly_map
, ovly_sections
, info
);
4566 if (base
== (unsigned) -1)
4569 if (fprintf (script
, " }\n") <= 0)
4575 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4576 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4579 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4584 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4587 if (fprintf (script
,
4588 " . = ALIGN (16);\n"
4589 " .ovl.init : { *(.ovl.init) }\n"
4590 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4593 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4597 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4605 /* We need to set lma since we are overlaying .ovl.init. */
4606 if (fprintf (script
,
4607 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4612 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4616 while (base
< count
)
4618 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4621 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4622 ovly_map
, ovly_sections
, info
);
4623 if (base
== (unsigned) -1)
4626 if (fprintf (script
, " }\n") <= 0)
4629 ovlynum
+= htab
->params
->num_lines
;
4630 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4634 if (fprintf (script
, " }\n") <= 0)
4638 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4643 free (ovly_sections
);
4645 if (fclose (script
) != 0)
4648 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4649 (*htab
->params
->spu_elf_relink
) ();
4654 bfd_set_error (bfd_error_system_call
);
4656 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4660 /* Provide an estimate of total stack required. */
4663 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4665 struct spu_link_hash_table
*htab
;
4666 struct _sum_stack_param sum_stack_param
;
4668 if (!discover_functions (info
))
4671 if (!build_call_tree (info
))
4674 htab
= spu_hash_table (info
);
4675 if (htab
->params
->stack_analysis
)
4677 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4678 info
->callbacks
->minfo (_("\nStack size for functions. "
4679 "Annotations: '*' max stack, 't' tail call\n"));
4682 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4683 sum_stack_param
.overall_stack
= 0;
4684 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4687 if (htab
->params
->stack_analysis
)
4688 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4689 (bfd_vma
) sum_stack_param
.overall_stack
);
4693 /* Perform a final link. */
4696 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4698 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4700 if (htab
->params
->auto_overlay
)
4701 spu_elf_auto_overlay (info
);
4703 if ((htab
->params
->stack_analysis
4704 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4705 && htab
->params
->lrlive_analysis
))
4706 && !spu_elf_stack_analysis (info
))
4707 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4709 if (!spu_elf_build_stubs (info
))
4710 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4712 return bfd_elf_final_link (output_bfd
, info
);
4715 /* Called when not normally emitting relocs, ie. !info->relocatable
4716 and !info->emitrelocations. Returns a count of special relocs
4717 that need to be emitted. */
4720 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4722 Elf_Internal_Rela
*relocs
;
4723 unsigned int count
= 0;
4725 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4729 Elf_Internal_Rela
*rel
;
4730 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4732 for (rel
= relocs
; rel
< relend
; rel
++)
4734 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4735 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4739 if (elf_section_data (sec
)->relocs
!= relocs
)
4746 /* Functions for adding fixup records to .fixup */
4748 #define FIXUP_RECORD_SIZE 4
4750 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4751 bfd_put_32 (output_bfd, addr, \
4752 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4753 #define FIXUP_GET(output_bfd,htab,index) \
4754 bfd_get_32 (output_bfd, \
4755 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4757 /* Store OFFSET in .fixup. This assumes it will be called with an
4758 increasing OFFSET. When this OFFSET fits with the last base offset,
4759 it just sets a bit, otherwise it adds a new fixup record. */
4761 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4764 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4765 asection
*sfixup
= htab
->sfixup
;
4766 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4767 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4768 if (sfixup
->reloc_count
== 0)
4770 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4771 sfixup
->reloc_count
++;
4775 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4776 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4778 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4779 (*_bfd_error_handler
) (_("fatal error while creating .fixup"));
4780 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4781 sfixup
->reloc_count
++;
4784 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4788 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4791 spu_elf_relocate_section (bfd
*output_bfd
,
4792 struct bfd_link_info
*info
,
4794 asection
*input_section
,
4796 Elf_Internal_Rela
*relocs
,
4797 Elf_Internal_Sym
*local_syms
,
4798 asection
**local_sections
)
4800 Elf_Internal_Shdr
*symtab_hdr
;
4801 struct elf_link_hash_entry
**sym_hashes
;
4802 Elf_Internal_Rela
*rel
, *relend
;
4803 struct spu_link_hash_table
*htab
;
4806 bfd_boolean emit_these_relocs
= FALSE
;
4807 bfd_boolean is_ea_sym
;
4809 unsigned int iovl
= 0;
4811 htab
= spu_hash_table (info
);
4812 stubs
= (htab
->stub_sec
!= NULL
4813 && maybe_needs_stubs (input_section
));
4814 iovl
= overlay_index (input_section
);
4815 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4816 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4817 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4820 relend
= relocs
+ input_section
->reloc_count
;
4821 for (; rel
< relend
; rel
++)
4824 reloc_howto_type
*howto
;
4825 unsigned int r_symndx
;
4826 Elf_Internal_Sym
*sym
;
4828 struct elf_link_hash_entry
*h
;
4829 const char *sym_name
;
4832 bfd_reloc_status_type r
;
4833 bfd_boolean unresolved_reloc
;
4834 enum _stub_type stub_type
;
4836 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4837 r_type
= ELF32_R_TYPE (rel
->r_info
);
4838 howto
= elf_howto_table
+ r_type
;
4839 unresolved_reloc
= FALSE
;
4843 if (r_symndx
< symtab_hdr
->sh_info
)
4845 sym
= local_syms
+ r_symndx
;
4846 sec
= local_sections
[r_symndx
];
4847 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4848 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4852 if (sym_hashes
== NULL
)
4855 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4857 while (h
->root
.type
== bfd_link_hash_indirect
4858 || h
->root
.type
== bfd_link_hash_warning
)
4859 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4862 if (h
->root
.type
== bfd_link_hash_defined
4863 || h
->root
.type
== bfd_link_hash_defweak
)
4865 sec
= h
->root
.u
.def
.section
;
4867 || sec
->output_section
== NULL
)
4868 /* Set a flag that will be cleared later if we find a
4869 relocation value for this symbol. output_section
4870 is typically NULL for symbols satisfied by a shared
4872 unresolved_reloc
= TRUE
;
4874 relocation
= (h
->root
.u
.def
.value
4875 + sec
->output_section
->vma
4876 + sec
->output_offset
);
4878 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4880 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4881 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4883 else if (!info
->relocatable
4884 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4887 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4888 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4889 if (!info
->callbacks
->undefined_symbol (info
,
4890 h
->root
.root
.string
,
4893 rel
->r_offset
, err
))
4896 sym_name
= h
->root
.root
.string
;
4899 if (sec
!= NULL
&& elf_discarded_section (sec
))
4900 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4901 rel
, relend
, howto
, contents
);
4903 if (info
->relocatable
)
4906 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4907 if (r_type
== R_SPU_ADD_PIC
4909 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4911 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4917 is_ea_sym
= (ea
!= NULL
4919 && sec
->output_section
== ea
);
4921 /* If this symbol is in an overlay area, we may need to relocate
4922 to the overlay stub. */
4923 addend
= rel
->r_addend
;
4926 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4927 contents
, info
)) != no_stub
)
4929 unsigned int ovl
= 0;
4930 struct got_entry
*g
, **head
;
4932 if (stub_type
!= nonovl_stub
)
4936 head
= &h
->got
.glist
;
4938 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4940 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4941 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4943 && g
->br_addr
== (rel
->r_offset
4944 + input_section
->output_offset
4945 + input_section
->output_section
->vma
))
4946 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4951 relocation
= g
->stub_addr
;
4956 /* For soft icache, encode the overlay index into addresses. */
4957 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4958 && (r_type
== R_SPU_ADDR16_HI
4959 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4962 unsigned int ovl
= overlay_index (sec
);
4965 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4966 relocation
+= set_id
<< 18;
4971 if (htab
->params
->emit_fixups
&& !info
->relocatable
4972 && (input_section
->flags
& SEC_ALLOC
) != 0
4973 && r_type
== R_SPU_ADDR32
)
4976 offset
= rel
->r_offset
+ input_section
->output_section
->vma
4977 + input_section
->output_offset
;
4978 spu_elf_emit_fixup (output_bfd
, info
, offset
);
4981 if (unresolved_reloc
)
4983 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4987 /* ._ea is a special section that isn't allocated in SPU
4988 memory, but rather occupies space in PPU memory as
4989 part of an embedded ELF image. If this reloc is
4990 against a symbol defined in ._ea, then transform the
4991 reloc into an equivalent one without a symbol
4992 relative to the start of the ELF image. */
4993 rel
->r_addend
+= (relocation
4995 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4996 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4998 emit_these_relocs
= TRUE
;
5002 unresolved_reloc
= TRUE
;
5004 if (unresolved_reloc
5005 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5006 rel
->r_offset
) != (bfd_vma
) -1)
5008 (*_bfd_error_handler
)
5009 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5011 bfd_get_section_name (input_bfd
, input_section
),
5012 (long) rel
->r_offset
,
5018 r
= _bfd_final_link_relocate (howto
,
5022 rel
->r_offset
, relocation
, addend
);
5024 if (r
!= bfd_reloc_ok
)
5026 const char *msg
= (const char *) 0;
5030 case bfd_reloc_overflow
:
5031 if (!((*info
->callbacks
->reloc_overflow
)
5032 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5033 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
5037 case bfd_reloc_undefined
:
5038 if (!((*info
->callbacks
->undefined_symbol
)
5039 (info
, sym_name
, input_bfd
, input_section
,
5040 rel
->r_offset
, TRUE
)))
5044 case bfd_reloc_outofrange
:
5045 msg
= _("internal error: out of range error");
5048 case bfd_reloc_notsupported
:
5049 msg
= _("internal error: unsupported relocation error");
5052 case bfd_reloc_dangerous
:
5053 msg
= _("internal error: dangerous error");
5057 msg
= _("internal error: unknown error");
5062 if (!((*info
->callbacks
->warning
)
5063 (info
, msg
, sym_name
, input_bfd
, input_section
,
5072 && emit_these_relocs
5073 && !info
->emitrelocations
)
5075 Elf_Internal_Rela
*wrel
;
5076 Elf_Internal_Shdr
*rel_hdr
;
5078 wrel
= rel
= relocs
;
5079 relend
= relocs
+ input_section
->reloc_count
;
5080 for (; rel
< relend
; rel
++)
5084 r_type
= ELF32_R_TYPE (rel
->r_info
);
5085 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5088 input_section
->reloc_count
= wrel
- relocs
;
5089 /* Backflips for _bfd_elf_link_output_relocs. */
5090 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5091 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5099 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5100 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5105 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5108 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5109 const char *sym_name ATTRIBUTE_UNUSED
,
5110 Elf_Internal_Sym
*sym
,
5111 asection
*sym_sec ATTRIBUTE_UNUSED
,
5112 struct elf_link_hash_entry
*h
)
5114 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5116 if (!info
->relocatable
5117 && htab
->stub_sec
!= NULL
5119 && (h
->root
.type
== bfd_link_hash_defined
5120 || h
->root
.type
== bfd_link_hash_defweak
)
5122 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5124 struct got_entry
*g
;
5126 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5127 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5128 ? g
->br_addr
== g
->stub_addr
5129 : g
->addend
== 0 && g
->ovl
== 0)
5131 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5132 (htab
->stub_sec
[0]->output_section
->owner
,
5133 htab
->stub_sec
[0]->output_section
));
5134 sym
->st_value
= g
->stub_addr
;
5142 static int spu_plugin
= 0;
5145 spu_elf_plugin (int val
)
5150 /* Set ELF header e_type for plugins. */
5153 spu_elf_post_process_headers (bfd
*abfd
,
5154 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5158 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5160 i_ehdrp
->e_type
= ET_DYN
;
5164 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5165 segments for overlays. */
5168 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5175 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5176 extra
= htab
->num_overlays
;
5182 sec
= bfd_get_section_by_name (abfd
, ".toe");
5183 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5189 /* Remove .toe section from other PT_LOAD segments and put it in
5190 a segment of its own. Put overlays in separate segments too. */
5193 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5196 struct elf_segment_map
*m
, *m_overlay
;
5197 struct elf_segment_map
**p
, **p_overlay
;
5203 toe
= bfd_get_section_by_name (abfd
, ".toe");
5204 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5205 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5206 for (i
= 0; i
< m
->count
; i
++)
5207 if ((s
= m
->sections
[i
]) == toe
5208 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5210 struct elf_segment_map
*m2
;
5213 if (i
+ 1 < m
->count
)
5215 amt
= sizeof (struct elf_segment_map
);
5216 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5217 m2
= bfd_zalloc (abfd
, amt
);
5220 m2
->count
= m
->count
- (i
+ 1);
5221 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5222 m2
->count
* sizeof (m
->sections
[0]));
5223 m2
->p_type
= PT_LOAD
;
5231 amt
= sizeof (struct elf_segment_map
);
5232 m2
= bfd_zalloc (abfd
, amt
);
5235 m2
->p_type
= PT_LOAD
;
5237 m2
->sections
[0] = s
;
5245 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5246 PT_LOAD segments. This can cause the .ovl.init section to be
5247 overwritten with the contents of some overlay segment. To work
5248 around this issue, we ensure that all PF_OVERLAY segments are
5249 sorted first amongst the program headers; this ensures that even
5250 with a broken loader, the .ovl.init section (which is not marked
5251 as PF_OVERLAY) will be placed into SPU local store on startup. */
5253 /* Move all overlay segments onto a separate list. */
5254 p
= &elf_tdata (abfd
)->segment_map
;
5255 p_overlay
= &m_overlay
;
5258 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5259 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5264 p_overlay
= &m
->next
;
5271 /* Re-insert overlay segments at the head of the segment map. */
5272 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5273 elf_tdata (abfd
)->segment_map
= m_overlay
;
5278 /* Tweak the section type of .note.spu_name. */
5281 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5282 Elf_Internal_Shdr
*hdr
,
5285 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5286 hdr
->sh_type
= SHT_NOTE
;
5290 /* Tweak phdrs before writing them out. */
5293 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5295 const struct elf_backend_data
*bed
;
5296 struct elf_obj_tdata
*tdata
;
5297 Elf_Internal_Phdr
*phdr
, *last
;
5298 struct spu_link_hash_table
*htab
;
5305 bed
= get_elf_backend_data (abfd
);
5306 tdata
= elf_tdata (abfd
);
5308 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5309 htab
= spu_hash_table (info
);
5310 if (htab
->num_overlays
!= 0)
5312 struct elf_segment_map
*m
;
5315 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5317 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5319 /* Mark this as an overlay header. */
5320 phdr
[i
].p_flags
|= PF_OVERLAY
;
5322 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5323 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5325 bfd_byte
*p
= htab
->ovtab
->contents
;
5326 unsigned int off
= o
* 16 + 8;
5328 /* Write file_off into _ovly_table. */
5329 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5332 /* Soft-icache has its file offset put in .ovl.init. */
5333 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5335 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5337 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5341 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5342 of 16. This should always be possible when using the standard
5343 linker scripts, but don't create overlapping segments if
5344 someone is playing games with linker scripts. */
5346 for (i
= count
; i
-- != 0; )
5347 if (phdr
[i
].p_type
== PT_LOAD
)
5351 adjust
= -phdr
[i
].p_filesz
& 15;
5354 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5357 adjust
= -phdr
[i
].p_memsz
& 15;
5360 && phdr
[i
].p_filesz
!= 0
5361 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5362 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5365 if (phdr
[i
].p_filesz
!= 0)
5369 if (i
== (unsigned int) -1)
5370 for (i
= count
; i
-- != 0; )
5371 if (phdr
[i
].p_type
== PT_LOAD
)
5375 adjust
= -phdr
[i
].p_filesz
& 15;
5376 phdr
[i
].p_filesz
+= adjust
;
5378 adjust
= -phdr
[i
].p_memsz
& 15;
5379 phdr
[i
].p_memsz
+= adjust
;
5386 spu_elf_size_sections (bfd
* output_bfd
, struct bfd_link_info
*info
)
5388 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5389 if (htab
->params
->emit_fixups
)
5391 asection
*sfixup
= htab
->sfixup
;
5392 int fixup_count
= 0;
5396 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
5400 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5403 /* Walk over each section attached to the input bfd. */
5404 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5406 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5409 /* If there aren't any relocs, then there's nothing more
5411 if ((isec
->flags
& SEC_ALLOC
) == 0
5412 || (isec
->flags
& SEC_RELOC
) == 0
5413 || isec
->reloc_count
== 0)
5416 /* Get the relocs. */
5418 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5420 if (internal_relocs
== NULL
)
5423 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5424 relocations. They are stored in a single word by
5425 saving the upper 28 bits of the address and setting the
5426 lower 4 bits to a bit mask of the words that have the
5427 relocation. BASE_END keeps track of the next quadword. */
5428 irela
= internal_relocs
;
5429 irelaend
= irela
+ isec
->reloc_count
;
5431 for (; irela
< irelaend
; irela
++)
5432 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5433 && irela
->r_offset
>= base_end
)
5435 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5441 /* We always have a NULL fixup as a sentinel */
5442 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5443 if (!bfd_set_section_size (output_bfd
, sfixup
, size
))
5445 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5446 if (sfixup
->contents
== NULL
)
5452 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5453 #define TARGET_BIG_NAME "elf32-spu"
5454 #define ELF_ARCH bfd_arch_spu
5455 #define ELF_TARGET_ID SPU_ELF_DATA
5456 #define ELF_MACHINE_CODE EM_SPU
5457 /* This matches the alignment need for DMA. */
5458 #define ELF_MAXPAGESIZE 0x80
5459 #define elf_backend_rela_normal 1
5460 #define elf_backend_can_gc_sections 1
5462 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5463 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5464 #define elf_info_to_howto spu_elf_info_to_howto
5465 #define elf_backend_count_relocs spu_elf_count_relocs
5466 #define elf_backend_relocate_section spu_elf_relocate_section
5467 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5468 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5469 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5470 #define elf_backend_object_p spu_elf_object_p
5471 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5472 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5474 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5475 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5476 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5477 #define elf_backend_post_process_headers spu_elf_post_process_headers
5478 #define elf_backend_fake_sections spu_elf_fake_sections
5479 #define elf_backend_special_sections spu_elf_special_sections
5480 #define bfd_elf32_bfd_final_link spu_elf_final_link
5482 #include "elf32-target.h"