1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
93 FALSE
, 0, 0x00000000, FALSE
),
96 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
97 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
98 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
109 case BFD_RELOC_SPU_IMM10W
:
111 case BFD_RELOC_SPU_IMM16W
:
113 case BFD_RELOC_SPU_LO16
:
114 return R_SPU_ADDR16_LO
;
115 case BFD_RELOC_SPU_HI16
:
116 return R_SPU_ADDR16_HI
;
117 case BFD_RELOC_SPU_IMM18
:
119 case BFD_RELOC_SPU_PCREL16
:
121 case BFD_RELOC_SPU_IMM7
:
123 case BFD_RELOC_SPU_IMM8
:
125 case BFD_RELOC_SPU_PCREL9a
:
127 case BFD_RELOC_SPU_PCREL9b
:
129 case BFD_RELOC_SPU_IMM10
:
130 return R_SPU_ADDR10I
;
131 case BFD_RELOC_SPU_IMM16
:
132 return R_SPU_ADDR16I
;
135 case BFD_RELOC_32_PCREL
:
137 case BFD_RELOC_SPU_PPU32
:
139 case BFD_RELOC_SPU_PPU64
:
141 case BFD_RELOC_SPU_ADD_PIC
:
142 return R_SPU_ADD_PIC
;
147 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
149 Elf_Internal_Rela
*dst
)
151 enum elf_spu_reloc_type r_type
;
153 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
154 BFD_ASSERT (r_type
< R_SPU_max
);
155 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
158 static reloc_howto_type
*
159 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
160 bfd_reloc_code_real_type code
)
162 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
164 if (r_type
== R_SPU_NONE
)
167 return elf_howto_table
+ r_type
;
170 static reloc_howto_type
*
171 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
176 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
177 if (elf_howto_table
[i
].name
!= NULL
178 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
179 return &elf_howto_table
[i
];
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
188 void *data
, asection
*input_section
,
189 bfd
*output_bfd
, char **error_message
)
191 bfd_size_type octets
;
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
198 if (output_bfd
!= NULL
)
199 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
200 input_section
, output_bfd
, error_message
);
202 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
203 return bfd_reloc_outofrange
;
204 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
206 /* Get symbol value. */
208 if (!bfd_is_com_section (symbol
->section
))
210 if (symbol
->section
->output_section
)
211 val
+= symbol
->section
->output_section
->vma
;
213 val
+= reloc_entry
->addend
;
215 /* Make it pc-relative. */
216 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
219 if (val
+ 256 >= 512)
220 return bfd_reloc_overflow
;
222 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
227 insn
&= ~reloc_entry
->howto
->dst_mask
;
228 insn
|= val
& reloc_entry
->howto
->dst_mask
;
229 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
234 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
236 if (!sec
->used_by_bfd
)
238 struct _spu_elf_section_data
*sdata
;
240 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
243 sec
->used_by_bfd
= sdata
;
246 return _bfd_elf_new_section_hook (abfd
, sec
);
249 /* Set up overlay info for executables. */
252 spu_elf_object_p (bfd
*abfd
)
254 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
256 unsigned int i
, num_ovl
, num_buf
;
257 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
258 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
259 Elf_Internal_Phdr
*last_phdr
= NULL
;
261 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
262 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
267 if (last_phdr
== NULL
268 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
271 for (j
= 1; j
< elf_numsections (abfd
); j
++)
273 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
275 if (ELF_SECTION_SIZE (shdr
, phdr
) != 0
276 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
278 asection
*sec
= shdr
->bfd_section
;
279 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
280 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
288 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
289 strip --strip-unneeded will not remove them. */
292 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
294 if (sym
->name
!= NULL
295 && sym
->section
!= bfd_abs_section_ptr
296 && strncmp (sym
->name
, "_EAR_", 5) == 0)
297 sym
->flags
|= BSF_KEEP
;
300 /* SPU ELF linker hash table. */
302 struct spu_link_hash_table
304 struct elf_link_hash_table elf
;
306 struct spu_elf_params
*params
;
308 /* Shortcuts to overlay sections. */
314 /* Count of stubs in each overlay section. */
315 unsigned int *stub_count
;
317 /* The stub section for each overlay section. */
320 struct elf_link_hash_entry
*ovly_entry
[2];
322 /* Number of overlay buffers. */
323 unsigned int num_buf
;
325 /* Total number of overlays. */
326 unsigned int num_overlays
;
328 /* For soft icache. */
329 unsigned int line_size_log2
;
330 unsigned int num_lines_log2
;
331 unsigned int fromelem_size_log2
;
333 /* How much memory we have. */
334 unsigned int local_store
;
336 /* Count of overlay stubs needed in non-overlay area. */
337 unsigned int non_ovly_stub
;
339 /* Pointer to the fixup section */
343 unsigned int stub_err
: 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry
*next
;
359 #define spu_hash_table(p) \
360 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
361 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
365 struct function_info
*fun
;
366 struct call_info
*next
;
368 unsigned int max_depth
;
369 unsigned int is_tail
: 1;
370 unsigned int is_pasted
: 1;
371 unsigned int broken_cycle
: 1;
372 unsigned int priority
: 13;
377 /* List of functions called. Also branches to hot/cold part of
379 struct call_info
*call_list
;
380 /* For hot/cold part of function, point to owner. */
381 struct function_info
*start
;
382 /* Symbol at start of function. */
384 Elf_Internal_Sym
*sym
;
385 struct elf_link_hash_entry
*h
;
387 /* Function section. */
390 /* Where last called from, and number of sections called from. */
391 asection
*last_caller
;
392 unsigned int call_count
;
393 /* Address range of (this part of) function. */
395 /* Offset where we found a store of lr, or -1 if none found. */
397 /* Offset where we found the stack adjustment insn. */
401 /* Distance from root of call tree. Tail and hot/cold branches
402 count as one deeper. We aren't counting stack frames here. */
404 /* Set if global symbol. */
405 unsigned int global
: 1;
406 /* Set if known to be start of function (as distinct from a hunk
407 in hot/cold section. */
408 unsigned int is_func
: 1;
409 /* Set if not a root node. */
410 unsigned int non_root
: 1;
411 /* Flags used during call tree traversal. It's cheaper to replicate
412 the visit flags than have one which needs clearing after a traversal. */
413 unsigned int visit1
: 1;
414 unsigned int visit2
: 1;
415 unsigned int marking
: 1;
416 unsigned int visit3
: 1;
417 unsigned int visit4
: 1;
418 unsigned int visit5
: 1;
419 unsigned int visit6
: 1;
420 unsigned int visit7
: 1;
423 struct spu_elf_stack_info
427 /* Variable size array describing functions, one per contiguous
428 address range belonging to a function. */
429 struct function_info fun
[1];
432 static struct function_info
*find_function (asection
*, bfd_vma
,
433 struct bfd_link_info
*);
435 /* Create a spu ELF linker hash table. */
437 static struct bfd_link_hash_table
*
438 spu_elf_link_hash_table_create (bfd
*abfd
)
440 struct spu_link_hash_table
*htab
;
442 htab
= bfd_malloc (sizeof (*htab
));
446 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
447 _bfd_elf_link_hash_newfunc
,
448 sizeof (struct elf_link_hash_entry
),
455 memset (&htab
->ovtab
, 0,
456 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
458 htab
->elf
.init_got_refcount
.refcount
= 0;
459 htab
->elf
.init_got_refcount
.glist
= NULL
;
460 htab
->elf
.init_got_offset
.offset
= 0;
461 htab
->elf
.init_got_offset
.glist
= NULL
;
462 return &htab
->elf
.root
;
466 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
468 bfd_vma max_branch_log2
;
470 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
471 htab
->params
= params
;
472 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
473 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
475 /* For the software i-cache, we provide a "from" list whose size
476 is a power-of-two number of quadwords, big enough to hold one
477 byte per outgoing branch. Compute this number here. */
478 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
479 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
482 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
483 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
484 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
487 get_sym_h (struct elf_link_hash_entry
**hp
,
488 Elf_Internal_Sym
**symp
,
490 Elf_Internal_Sym
**locsymsp
,
491 unsigned long r_symndx
,
494 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
496 if (r_symndx
>= symtab_hdr
->sh_info
)
498 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
499 struct elf_link_hash_entry
*h
;
501 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
502 while (h
->root
.type
== bfd_link_hash_indirect
503 || h
->root
.type
== bfd_link_hash_warning
)
504 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
514 asection
*symsec
= NULL
;
515 if (h
->root
.type
== bfd_link_hash_defined
516 || h
->root
.type
== bfd_link_hash_defweak
)
517 symsec
= h
->root
.u
.def
.section
;
523 Elf_Internal_Sym
*sym
;
524 Elf_Internal_Sym
*locsyms
= *locsymsp
;
528 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
530 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
532 0, NULL
, NULL
, NULL
);
537 sym
= locsyms
+ r_symndx
;
546 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
552 /* Create the note section if not already present. This is done early so
553 that the linker maps the sections to the right place in the output. */
556 spu_elf_create_sections (struct bfd_link_info
*info
)
558 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
561 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
562 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
567 /* Make SPU_PTNOTE_SPUNAME section. */
574 ibfd
= info
->input_bfds
;
575 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
576 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
578 || !bfd_set_section_alignment (ibfd
, s
, 4))
581 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
582 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
583 size
+= (name_len
+ 3) & -4;
585 if (!bfd_set_section_size (ibfd
, s
, size
))
588 data
= bfd_zalloc (ibfd
, size
);
592 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
593 bfd_put_32 (ibfd
, name_len
, data
+ 4);
594 bfd_put_32 (ibfd
, 1, data
+ 8);
595 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
596 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
597 bfd_get_filename (info
->output_bfd
), name_len
);
601 if (htab
->params
->emit_fixups
)
606 if (htab
->elf
.dynobj
== NULL
)
607 htab
->elf
.dynobj
= ibfd
;
608 ibfd
= htab
->elf
.dynobj
;
609 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
610 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
611 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
612 if (s
== NULL
|| !bfd_set_section_alignment (ibfd
, s
, 2))
620 /* qsort predicate to sort sections by vma. */
623 sort_sections (const void *a
, const void *b
)
625 const asection
*const *s1
= a
;
626 const asection
*const *s2
= b
;
627 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
630 return delta
< 0 ? -1 : 1;
632 return (*s1
)->index
- (*s2
)->index
;
635 /* Identify overlays in the output bfd, and number them.
636 Returns 0 on error, 1 if no overlays, 2 if overlays. */
639 spu_elf_find_overlays (struct bfd_link_info
*info
)
641 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
642 asection
**alloc_sec
;
643 unsigned int i
, n
, ovl_index
, num_buf
;
646 static const char *const entry_names
[2][2] = {
647 { "__ovly_load", "__icache_br_handler" },
648 { "__ovly_return", "__icache_call_handler" }
651 if (info
->output_bfd
->section_count
< 2)
655 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
656 if (alloc_sec
== NULL
)
659 /* Pick out all the alloced sections. */
660 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
661 if ((s
->flags
& SEC_ALLOC
) != 0
662 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
672 /* Sort them by vma. */
673 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
675 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
676 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
678 unsigned int prev_buf
= 0, set_id
= 0;
680 /* Look for an overlapping vma to find the first overlay section. */
681 bfd_vma vma_start
= 0;
683 for (i
= 1; i
< n
; i
++)
686 if (s
->vma
< ovl_end
)
688 asection
*s0
= alloc_sec
[i
- 1];
692 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
697 ovl_end
= s
->vma
+ s
->size
;
700 /* Now find any sections within the cache area. */
701 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
704 if (s
->vma
>= ovl_end
)
707 /* A section in an overlay area called .ovl.init is not
708 an overlay, in the sense that it might be loaded in
709 by the overlay manager, but rather the initial
710 section contents for the overlay buffer. */
711 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
713 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
714 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
717 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
719 info
->callbacks
->einfo (_("%X%P: overlay section %A "
720 "does not start on a cache line.\n"),
722 bfd_set_error (bfd_error_bad_value
);
725 else if (s
->size
> htab
->params
->line_size
)
727 info
->callbacks
->einfo (_("%X%P: overlay section %A "
728 "is larger than a cache line.\n"),
730 bfd_set_error (bfd_error_bad_value
);
734 alloc_sec
[ovl_index
++] = s
;
735 spu_elf_section_data (s
)->u
.o
.ovl_index
736 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
737 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
741 /* Ensure there are no more overlay sections. */
745 if (s
->vma
< ovl_end
)
747 info
->callbacks
->einfo (_("%X%P: overlay section %A "
748 "is not in cache area.\n"),
750 bfd_set_error (bfd_error_bad_value
);
754 ovl_end
= s
->vma
+ s
->size
;
759 /* Look for overlapping vmas. Any with overlap must be overlays.
760 Count them. Also count the number of overlay regions. */
761 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
764 if (s
->vma
< ovl_end
)
766 asection
*s0
= alloc_sec
[i
- 1];
768 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
771 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
773 alloc_sec
[ovl_index
] = s0
;
774 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
775 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
778 ovl_end
= s
->vma
+ s
->size
;
780 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
782 alloc_sec
[ovl_index
] = s
;
783 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
784 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
785 if (s0
->vma
!= s
->vma
)
787 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
788 "and %A do not start at the "
791 bfd_set_error (bfd_error_bad_value
);
794 if (ovl_end
< s
->vma
+ s
->size
)
795 ovl_end
= s
->vma
+ s
->size
;
799 ovl_end
= s
->vma
+ s
->size
;
803 htab
->num_overlays
= ovl_index
;
804 htab
->num_buf
= num_buf
;
805 htab
->ovl_sec
= alloc_sec
;
810 for (i
= 0; i
< 2; i
++)
813 struct elf_link_hash_entry
*h
;
815 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
816 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
820 if (h
->root
.type
== bfd_link_hash_new
)
822 h
->root
.type
= bfd_link_hash_undefined
;
824 h
->ref_regular_nonweak
= 1;
827 htab
->ovly_entry
[i
] = h
;
833 /* Non-zero to use bra in overlay stubs rather than br. */
836 #define BRA 0x30000000
837 #define BRASL 0x31000000
838 #define BR 0x32000000
839 #define BRSL 0x33000000
840 #define NOP 0x40200000
841 #define LNOP 0x00200000
842 #define ILA 0x42000000
844 /* Return true for all relative and absolute branch instructions.
852 brhnz 00100011 0.. */
855 is_branch (const unsigned char *insn
)
857 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
860 /* Return true for all indirect branch instructions.
868 bihnz 00100101 011 */
871 is_indirect_branch (const unsigned char *insn
)
873 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
876 /* Return true for branch hint instructions.
881 is_hint (const unsigned char *insn
)
883 return (insn
[0] & 0xfc) == 0x10;
886 /* True if INPUT_SECTION might need overlay stubs. */
889 maybe_needs_stubs (asection
*input_section
)
891 /* No stubs for debug sections and suchlike. */
892 if ((input_section
->flags
& SEC_ALLOC
) == 0)
895 /* No stubs for link-once sections that will be discarded. */
896 if (input_section
->output_section
== bfd_abs_section_ptr
)
899 /* Don't create stubs for .eh_frame references. */
900 if (strcmp (input_section
->name
, ".eh_frame") == 0)
922 /* Return non-zero if this reloc symbol should go via an overlay stub.
923 Return 2 if the stub must be in non-overlay area. */
925 static enum _stub_type
926 needs_ovl_stub (struct elf_link_hash_entry
*h
,
927 Elf_Internal_Sym
*sym
,
929 asection
*input_section
,
930 Elf_Internal_Rela
*irela
,
932 struct bfd_link_info
*info
)
934 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
935 enum elf_spu_reloc_type r_type
;
936 unsigned int sym_type
;
937 bfd_boolean branch
, hint
, call
;
938 enum _stub_type ret
= no_stub
;
942 || sym_sec
->output_section
== bfd_abs_section_ptr
943 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
948 /* Ensure no stubs for user supplied overlay manager syms. */
949 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
952 /* setjmp always goes via an overlay stub, because then the return
953 and hence the longjmp goes via __ovly_return. That magically
954 makes setjmp/longjmp between overlays work. */
955 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
956 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
963 sym_type
= ELF_ST_TYPE (sym
->st_info
);
965 r_type
= ELF32_R_TYPE (irela
->r_info
);
969 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
971 if (contents
== NULL
)
974 if (!bfd_get_section_contents (input_section
->owner
,
981 contents
+= irela
->r_offset
;
983 branch
= is_branch (contents
);
984 hint
= is_hint (contents
);
987 call
= (contents
[0] & 0xfd) == 0x31;
989 && sym_type
!= STT_FUNC
992 /* It's common for people to write assembly and forget
993 to give function symbols the right type. Handle
994 calls to such symbols, but warn so that (hopefully)
995 people will fix their code. We need the symbol
996 type to be correct to distinguish function pointer
997 initialisation from other pointer initialisations. */
998 const char *sym_name
;
1001 sym_name
= h
->root
.root
.string
;
1004 Elf_Internal_Shdr
*symtab_hdr
;
1005 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1006 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1011 (*_bfd_error_handler
) (_("warning: call to non-function"
1012 " symbol %s defined in %B"),
1013 sym_sec
->owner
, sym_name
);
1019 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1020 || (sym_type
!= STT_FUNC
1021 && !(branch
|| hint
)
1022 && (sym_sec
->flags
& SEC_CODE
) == 0))
1025 /* Usually, symbols in non-overlay sections don't need stubs. */
1026 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1027 && !htab
->params
->non_overlay_stubs
)
1030 /* A reference from some other section to a symbol in an overlay
1031 section needs a stub. */
1032 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1033 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1035 unsigned int lrlive
= 0;
1037 lrlive
= (contents
[1] & 0x70) >> 4;
1039 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1040 ret
= call_ovl_stub
;
1042 ret
= br000_ovl_stub
+ lrlive
;
1045 /* If this insn isn't a branch then we are possibly taking the
1046 address of a function and passing it out somehow. Soft-icache code
1047 always generates inline code to do indirect branches. */
1048 if (!(branch
|| hint
)
1049 && sym_type
== STT_FUNC
1050 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1057 count_stub (struct spu_link_hash_table
*htab
,
1060 enum _stub_type stub_type
,
1061 struct elf_link_hash_entry
*h
,
1062 const Elf_Internal_Rela
*irela
)
1064 unsigned int ovl
= 0;
1065 struct got_entry
*g
, **head
;
1068 /* If this instruction is a branch or call, we need a stub
1069 for it. One stub per function per overlay.
1070 If it isn't a branch, then we are taking the address of
1071 this function so need a stub in the non-overlay area
1072 for it. One stub per function. */
1073 if (stub_type
!= nonovl_stub
)
1074 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1077 head
= &h
->got
.glist
;
1080 if (elf_local_got_ents (ibfd
) == NULL
)
1082 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1083 * sizeof (*elf_local_got_ents (ibfd
)));
1084 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1085 if (elf_local_got_ents (ibfd
) == NULL
)
1088 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1091 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1093 htab
->stub_count
[ovl
] += 1;
1099 addend
= irela
->r_addend
;
1103 struct got_entry
*gnext
;
1105 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1106 if (g
->addend
== addend
&& g
->ovl
== 0)
1111 /* Need a new non-overlay area stub. Zap other stubs. */
1112 for (g
= *head
; g
!= NULL
; g
= gnext
)
1115 if (g
->addend
== addend
)
1117 htab
->stub_count
[g
->ovl
] -= 1;
1125 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1126 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1132 g
= bfd_malloc (sizeof *g
);
1137 g
->stub_addr
= (bfd_vma
) -1;
1141 htab
->stub_count
[ovl
] += 1;
1147 /* Support two sizes of overlay stubs, a slower more compact stub of two
1148 intructions, and a faster stub of four instructions.
1149 Soft-icache stubs are four or eight words. */
1152 ovl_stub_size (struct spu_elf_params
*params
)
1154 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1158 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1160 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1163 /* Two instruction overlay stubs look like:
1165 brsl $75,__ovly_load
1166 .word target_ovl_and_address
1168 ovl_and_address is a word with the overlay number in the top 14 bits
1169 and local store address in the bottom 18 bits.
1171 Four instruction overlay stubs look like:
1175 ila $79,target_address
1178 Software icache stubs are:
1182 .word lrlive_branchlocalstoreaddr;
1183 brasl $75,__icache_br_handler
1188 build_stub (struct bfd_link_info
*info
,
1191 enum _stub_type stub_type
,
1192 struct elf_link_hash_entry
*h
,
1193 const Elf_Internal_Rela
*irela
,
1197 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1198 unsigned int ovl
, dest_ovl
, set_id
;
1199 struct got_entry
*g
, **head
;
1201 bfd_vma addend
, from
, to
, br_dest
, patt
;
1202 unsigned int lrlive
;
1205 if (stub_type
!= nonovl_stub
)
1206 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1209 head
= &h
->got
.glist
;
1211 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1215 addend
= irela
->r_addend
;
1217 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1219 g
= bfd_malloc (sizeof *g
);
1225 g
->br_addr
= (irela
->r_offset
1226 + isec
->output_offset
1227 + isec
->output_section
->vma
);
1233 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1234 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1239 if (g
->ovl
== 0 && ovl
!= 0)
1242 if (g
->stub_addr
!= (bfd_vma
) -1)
1246 sec
= htab
->stub_sec
[ovl
];
1247 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1248 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1249 g
->stub_addr
= from
;
1250 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1251 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1252 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1254 if (((dest
| to
| from
) & 3) != 0)
1259 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1261 if (htab
->params
->ovly_flavour
== ovly_normal
1262 && !htab
->params
->compact_stub
)
1264 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1265 sec
->contents
+ sec
->size
);
1266 bfd_put_32 (sec
->owner
, LNOP
,
1267 sec
->contents
+ sec
->size
+ 4);
1268 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1269 sec
->contents
+ sec
->size
+ 8);
1271 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1272 sec
->contents
+ sec
->size
+ 12);
1274 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1275 sec
->contents
+ sec
->size
+ 12);
1277 else if (htab
->params
->ovly_flavour
== ovly_normal
1278 && htab
->params
->compact_stub
)
1281 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1282 sec
->contents
+ sec
->size
);
1284 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1285 sec
->contents
+ sec
->size
);
1286 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1287 sec
->contents
+ sec
->size
+ 4);
1289 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1290 && htab
->params
->compact_stub
)
1293 if (stub_type
== nonovl_stub
)
1295 else if (stub_type
== call_ovl_stub
)
1296 /* A brsl makes lr live and *(*sp+16) is live.
1297 Tail calls have the same liveness. */
1299 else if (!htab
->params
->lrlive_analysis
)
1300 /* Assume stack frame and lr save. */
1302 else if (irela
!= NULL
)
1304 /* Analyse branch instructions. */
1305 struct function_info
*caller
;
1308 caller
= find_function (isec
, irela
->r_offset
, info
);
1309 if (caller
->start
== NULL
)
1310 off
= irela
->r_offset
;
1313 struct function_info
*found
= NULL
;
1315 /* Find the earliest piece of this function that
1316 has frame adjusting instructions. We might
1317 see dynamic frame adjustment (eg. for alloca)
1318 in some later piece, but functions using
1319 alloca always set up a frame earlier. Frame
1320 setup instructions are always in one piece. */
1321 if (caller
->lr_store
!= (bfd_vma
) -1
1322 || caller
->sp_adjust
!= (bfd_vma
) -1)
1324 while (caller
->start
!= NULL
)
1326 caller
= caller
->start
;
1327 if (caller
->lr_store
!= (bfd_vma
) -1
1328 || caller
->sp_adjust
!= (bfd_vma
) -1)
1336 if (off
> caller
->sp_adjust
)
1338 if (off
> caller
->lr_store
)
1339 /* Only *(*sp+16) is live. */
1342 /* If no lr save, then we must be in a
1343 leaf function with a frame.
1344 lr is still live. */
1347 else if (off
> caller
->lr_store
)
1349 /* Between lr save and stack adjust. */
1351 /* This should never happen since prologues won't
1356 /* On entry to function. */
1359 if (stub_type
!= br000_ovl_stub
1360 && lrlive
!= stub_type
- br000_ovl_stub
)
1361 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1362 "from analysis (%u)\n"),
1363 isec
, irela
->r_offset
, lrlive
,
1364 stub_type
- br000_ovl_stub
);
1367 /* If given lrlive info via .brinfo, use it. */
1368 if (stub_type
> br000_ovl_stub
)
1369 lrlive
= stub_type
- br000_ovl_stub
;
1372 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1373 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1374 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1376 /* The branch that uses this stub goes to stub_addr + 4. We'll
1377 set up an xor pattern that can be used by the icache manager
1378 to modify this branch to go directly to its destination. */
1380 br_dest
= g
->stub_addr
;
1383 /* Except in the case of _SPUEAR_ stubs, the branch in
1384 question is the one in the stub itself. */
1385 BFD_ASSERT (stub_type
== nonovl_stub
);
1386 g
->br_addr
= g
->stub_addr
;
1390 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1391 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1392 sec
->contents
+ sec
->size
);
1393 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1394 sec
->contents
+ sec
->size
+ 4);
1395 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1396 sec
->contents
+ sec
->size
+ 8);
1397 patt
= dest
^ br_dest
;
1398 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1399 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1400 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1401 sec
->contents
+ sec
->size
+ 12);
1404 /* Extra space for linked list entries. */
1410 sec
->size
+= ovl_stub_size (htab
->params
);
1412 if (htab
->params
->emit_stub_syms
)
1418 len
= 8 + sizeof (".ovl_call.") - 1;
1420 len
+= strlen (h
->root
.root
.string
);
1425 add
= (int) irela
->r_addend
& 0xffffffff;
1428 name
= bfd_malloc (len
);
1432 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1434 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1436 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1437 dest_sec
->id
& 0xffffffff,
1438 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1440 sprintf (name
+ len
- 9, "+%x", add
);
1442 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1446 if (h
->root
.type
== bfd_link_hash_new
)
1448 h
->root
.type
= bfd_link_hash_defined
;
1449 h
->root
.u
.def
.section
= sec
;
1450 h
->size
= ovl_stub_size (htab
->params
);
1451 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1455 h
->ref_regular_nonweak
= 1;
1456 h
->forced_local
= 1;
1464 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1468 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1470 /* Symbols starting with _SPUEAR_ need a stub because they may be
1471 invoked by the PPU. */
1472 struct bfd_link_info
*info
= inf
;
1473 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1476 if ((h
->root
.type
== bfd_link_hash_defined
1477 || h
->root
.type
== bfd_link_hash_defweak
)
1479 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1480 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1481 && sym_sec
->output_section
!= bfd_abs_section_ptr
1482 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1483 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1484 || htab
->params
->non_overlay_stubs
))
1486 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1493 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1495 /* Symbols starting with _SPUEAR_ need a stub because they may be
1496 invoked by the PPU. */
1497 struct bfd_link_info
*info
= inf
;
1498 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1501 if ((h
->root
.type
== bfd_link_hash_defined
1502 || h
->root
.type
== bfd_link_hash_defweak
)
1504 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1505 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1506 && sym_sec
->output_section
!= bfd_abs_section_ptr
1507 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1508 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1509 || htab
->params
->non_overlay_stubs
))
1511 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1512 h
->root
.u
.def
.value
, sym_sec
);
1518 /* Size or build stubs. */
1521 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1523 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1526 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1528 extern const bfd_target bfd_elf32_spu_vec
;
1529 Elf_Internal_Shdr
*symtab_hdr
;
1531 Elf_Internal_Sym
*local_syms
= NULL
;
1533 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1536 /* We'll need the symbol table in a second. */
1537 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1538 if (symtab_hdr
->sh_info
== 0)
1541 /* Walk over each section attached to the input bfd. */
1542 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1544 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1546 /* If there aren't any relocs, then there's nothing more to do. */
1547 if ((isec
->flags
& SEC_RELOC
) == 0
1548 || isec
->reloc_count
== 0)
1551 if (!maybe_needs_stubs (isec
))
1554 /* Get the relocs. */
1555 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1557 if (internal_relocs
== NULL
)
1558 goto error_ret_free_local
;
1560 /* Now examine each relocation. */
1561 irela
= internal_relocs
;
1562 irelaend
= irela
+ isec
->reloc_count
;
1563 for (; irela
< irelaend
; irela
++)
1565 enum elf_spu_reloc_type r_type
;
1566 unsigned int r_indx
;
1568 Elf_Internal_Sym
*sym
;
1569 struct elf_link_hash_entry
*h
;
1570 enum _stub_type stub_type
;
1572 r_type
= ELF32_R_TYPE (irela
->r_info
);
1573 r_indx
= ELF32_R_SYM (irela
->r_info
);
1575 if (r_type
>= R_SPU_max
)
1577 bfd_set_error (bfd_error_bad_value
);
1578 error_ret_free_internal
:
1579 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1580 free (internal_relocs
);
1581 error_ret_free_local
:
1582 if (local_syms
!= NULL
1583 && (symtab_hdr
->contents
1584 != (unsigned char *) local_syms
))
1589 /* Determine the reloc target section. */
1590 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1591 goto error_ret_free_internal
;
1593 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1595 if (stub_type
== no_stub
)
1597 else if (stub_type
== stub_error
)
1598 goto error_ret_free_internal
;
1600 if (htab
->stub_count
== NULL
)
1603 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1604 htab
->stub_count
= bfd_zmalloc (amt
);
1605 if (htab
->stub_count
== NULL
)
1606 goto error_ret_free_internal
;
1611 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1612 goto error_ret_free_internal
;
1619 dest
= h
->root
.u
.def
.value
;
1621 dest
= sym
->st_value
;
1622 dest
+= irela
->r_addend
;
1623 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1625 goto error_ret_free_internal
;
1629 /* We're done with the internal relocs, free them. */
1630 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1631 free (internal_relocs
);
1634 if (local_syms
!= NULL
1635 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1637 if (!info
->keep_memory
)
1640 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1647 /* Allocate space for overlay call and return stubs.
1648 Return 0 on error, 1 if no overlays, 2 otherwise. */
1651 spu_elf_size_stubs (struct bfd_link_info
*info
)
1653 struct spu_link_hash_table
*htab
;
1660 if (!process_stubs (info
, FALSE
))
1663 htab
= spu_hash_table (info
);
1664 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1668 ibfd
= info
->input_bfds
;
1669 if (htab
->stub_count
!= NULL
)
1671 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1672 htab
->stub_sec
= bfd_zmalloc (amt
);
1673 if (htab
->stub_sec
== NULL
)
1676 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1677 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1678 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1679 htab
->stub_sec
[0] = stub
;
1681 || !bfd_set_section_alignment (ibfd
, stub
,
1682 ovl_stub_size_log2 (htab
->params
)))
1684 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1685 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1686 /* Extra space for linked list entries. */
1687 stub
->size
+= htab
->stub_count
[0] * 16;
1689 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1691 asection
*osec
= htab
->ovl_sec
[i
];
1692 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1693 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1694 htab
->stub_sec
[ovl
] = stub
;
1696 || !bfd_set_section_alignment (ibfd
, stub
,
1697 ovl_stub_size_log2 (htab
->params
)))
1699 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1703 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1705 /* Space for icache manager tables.
1706 a) Tag array, one quadword per cache line.
1707 b) Rewrite "to" list, one quadword per cache line.
1708 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1709 a power-of-two number of full quadwords) per cache line. */
1712 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1713 if (htab
->ovtab
== NULL
1714 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1717 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1718 << htab
->num_lines_log2
;
1720 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1721 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1722 if (htab
->init
== NULL
1723 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1726 htab
->init
->size
= 16;
1728 else if (htab
->stub_count
== NULL
)
1732 /* htab->ovtab consists of two arrays.
1742 . } _ovly_buf_table[];
1745 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1746 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1747 if (htab
->ovtab
== NULL
1748 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1751 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1754 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1755 if (htab
->toe
== NULL
1756 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1758 htab
->toe
->size
= 16;
1763 /* Called from ld to place overlay manager data sections. This is done
1764 after the overlay manager itself is loaded, mainly so that the
1765 linker's htab->init section is placed after any other .ovl.init
1769 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1771 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1774 if (htab
->stub_sec
!= NULL
)
1776 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1778 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1780 asection
*osec
= htab
->ovl_sec
[i
];
1781 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1782 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1786 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1787 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1789 if (htab
->ovtab
!= NULL
)
1791 const char *ovout
= ".data";
1792 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1794 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1797 if (htab
->toe
!= NULL
)
1798 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1801 /* Functions to handle embedded spu_ovl.o object. */
1804 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1810 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1816 struct _ovl_stream
*os
;
1820 os
= (struct _ovl_stream
*) stream
;
1821 max
= (const char *) os
->end
- (const char *) os
->start
;
1823 if ((ufile_ptr
) offset
>= max
)
1827 if (count
> max
- offset
)
1828 count
= max
- offset
;
1830 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1835 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1837 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1844 return *ovl_bfd
!= NULL
;
1848 overlay_index (asection
*sec
)
1851 || sec
->output_section
== bfd_abs_section_ptr
)
1853 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1856 /* Define an STT_OBJECT symbol. */
1858 static struct elf_link_hash_entry
*
1859 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1861 struct elf_link_hash_entry
*h
;
1863 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1867 if (h
->root
.type
!= bfd_link_hash_defined
1870 h
->root
.type
= bfd_link_hash_defined
;
1871 h
->root
.u
.def
.section
= htab
->ovtab
;
1872 h
->type
= STT_OBJECT
;
1875 h
->ref_regular_nonweak
= 1;
1878 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1880 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1881 h
->root
.u
.def
.section
->owner
,
1882 h
->root
.root
.string
);
1883 bfd_set_error (bfd_error_bad_value
);
1888 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1889 h
->root
.root
.string
);
1890 bfd_set_error (bfd_error_bad_value
);
1897 /* Fill in all stubs and the overlay tables. */
1900 spu_elf_build_stubs (struct bfd_link_info
*info
)
1902 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1903 struct elf_link_hash_entry
*h
;
1909 if (htab
->num_overlays
!= 0)
1911 for (i
= 0; i
< 2; i
++)
1913 h
= htab
->ovly_entry
[i
];
1915 && (h
->root
.type
== bfd_link_hash_defined
1916 || h
->root
.type
== bfd_link_hash_defweak
)
1919 s
= h
->root
.u
.def
.section
->output_section
;
1920 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1922 (*_bfd_error_handler
) (_("%s in overlay section"),
1923 h
->root
.root
.string
);
1924 bfd_set_error (bfd_error_bad_value
);
1931 if (htab
->stub_sec
!= NULL
)
1933 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1934 if (htab
->stub_sec
[i
]->size
!= 0)
1936 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1937 htab
->stub_sec
[i
]->size
);
1938 if (htab
->stub_sec
[i
]->contents
== NULL
)
1940 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1941 htab
->stub_sec
[i
]->size
= 0;
1944 /* Fill in all the stubs. */
1945 process_stubs (info
, TRUE
);
1946 if (!htab
->stub_err
)
1947 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1951 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1952 bfd_set_error (bfd_error_bad_value
);
1956 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1958 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1960 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1961 bfd_set_error (bfd_error_bad_value
);
1964 htab
->stub_sec
[i
]->rawsize
= 0;
1968 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1971 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1972 if (htab
->ovtab
->contents
== NULL
)
1975 p
= htab
->ovtab
->contents
;
1976 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1980 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1983 h
->root
.u
.def
.value
= 0;
1984 h
->size
= 16 << htab
->num_lines_log2
;
1987 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1990 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1991 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1993 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1996 h
->root
.u
.def
.value
= off
;
1997 h
->size
= 16 << htab
->num_lines_log2
;
2000 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2003 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2004 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2006 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2009 h
->root
.u
.def
.value
= off
;
2010 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2013 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2016 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2017 + htab
->num_lines_log2
);
2018 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2020 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2023 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2024 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2026 h
= define_ovtab_symbol (htab
, "__icache_base");
2029 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2030 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2031 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2033 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2036 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2037 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2039 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2042 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2043 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2045 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2048 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2049 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2051 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2054 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2055 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2057 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2060 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2061 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2063 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2066 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2067 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2069 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2071 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2073 if (htab
->init
->contents
== NULL
)
2076 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2079 h
->root
.u
.def
.value
= 0;
2080 h
->root
.u
.def
.section
= htab
->init
;
2086 /* Write out _ovly_table. */
2087 /* set low bit of .size to mark non-overlay area as present. */
2089 obfd
= htab
->ovtab
->output_section
->owner
;
2090 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2092 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2096 unsigned long off
= ovl_index
* 16;
2097 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2099 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2100 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2102 /* file_off written later in spu_elf_modify_program_headers. */
2103 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2107 h
= define_ovtab_symbol (htab
, "_ovly_table");
2110 h
->root
.u
.def
.value
= 16;
2111 h
->size
= htab
->num_overlays
* 16;
2113 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2116 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2119 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2122 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2123 h
->size
= htab
->num_buf
* 4;
2125 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2128 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2132 h
= define_ovtab_symbol (htab
, "_EAR_");
2135 h
->root
.u
.def
.section
= htab
->toe
;
2136 h
->root
.u
.def
.value
= 0;
2142 /* Check that all loadable section VMAs lie in the range
2143 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2146 spu_elf_check_vma (struct bfd_link_info
*info
)
2148 struct elf_segment_map
*m
;
2150 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2151 bfd
*abfd
= info
->output_bfd
;
2152 bfd_vma hi
= htab
->params
->local_store_hi
;
2153 bfd_vma lo
= htab
->params
->local_store_lo
;
2155 htab
->local_store
= hi
+ 1 - lo
;
2157 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2158 if (m
->p_type
== PT_LOAD
)
2159 for (i
= 0; i
< m
->count
; i
++)
2160 if (m
->sections
[i
]->size
!= 0
2161 && (m
->sections
[i
]->vma
< lo
2162 || m
->sections
[i
]->vma
> hi
2163 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2164 return m
->sections
[i
];
2169 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2170 Search for stack adjusting insns, and return the sp delta.
2171 If a store of lr is found save the instruction offset to *LR_STORE.
2172 If a stack adjusting instruction is found, save that offset to
2176 find_function_stack_adjust (asection
*sec
,
2183 memset (reg
, 0, sizeof (reg
));
2184 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2186 unsigned char buf
[4];
2190 /* Assume no relocs on stack adjusing insns. */
2191 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2195 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2197 if (buf
[0] == 0x24 /* stqd */)
2199 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2204 /* Partly decoded immediate field. */
2205 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2207 if (buf
[0] == 0x1c /* ai */)
2210 imm
= (imm
^ 0x200) - 0x200;
2211 reg
[rt
] = reg
[ra
] + imm
;
2213 if (rt
== 1 /* sp */)
2217 *sp_adjust
= offset
;
2221 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2223 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2225 reg
[rt
] = reg
[ra
] + reg
[rb
];
2230 *sp_adjust
= offset
;
2234 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2236 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2238 reg
[rt
] = reg
[rb
] - reg
[ra
];
2243 *sp_adjust
= offset
;
2247 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2249 if (buf
[0] >= 0x42 /* ila */)
2250 imm
|= (buf
[0] & 1) << 17;
2255 if (buf
[0] == 0x40 /* il */)
2257 if ((buf
[1] & 0x80) == 0)
2259 imm
= (imm
^ 0x8000) - 0x8000;
2261 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2267 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2269 reg
[rt
] |= imm
& 0xffff;
2272 else if (buf
[0] == 0x04 /* ori */)
2275 imm
= (imm
^ 0x200) - 0x200;
2276 reg
[rt
] = reg
[ra
] | imm
;
2279 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2281 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2282 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2283 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2284 | ((imm
& 0x1000) ? 0x000000ff : 0));
2287 else if (buf
[0] == 0x16 /* andbi */)
2293 reg
[rt
] = reg
[ra
] & imm
;
2296 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2298 /* Used in pic reg load. Say rt is trashed. Won't be used
2299 in stack adjust, but we need to continue past this branch. */
2303 else if (is_branch (buf
) || is_indirect_branch (buf
))
2304 /* If we hit a branch then we must be out of the prologue. */
2311 /* qsort predicate to sort symbols by section and value. */
2313 static Elf_Internal_Sym
*sort_syms_syms
;
2314 static asection
**sort_syms_psecs
;
2317 sort_syms (const void *a
, const void *b
)
2319 Elf_Internal_Sym
*const *s1
= a
;
2320 Elf_Internal_Sym
*const *s2
= b
;
2321 asection
*sec1
,*sec2
;
2322 bfd_signed_vma delta
;
2324 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2325 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2328 return sec1
->index
- sec2
->index
;
2330 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2332 return delta
< 0 ? -1 : 1;
2334 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2336 return delta
< 0 ? -1 : 1;
2338 return *s1
< *s2
? -1 : 1;
2341 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2342 entries for section SEC. */
2344 static struct spu_elf_stack_info
*
2345 alloc_stack_info (asection
*sec
, int max_fun
)
2347 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2350 amt
= sizeof (struct spu_elf_stack_info
);
2351 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2352 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2353 if (sec_data
->u
.i
.stack_info
!= NULL
)
2354 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2355 return sec_data
->u
.i
.stack_info
;
2358 /* Add a new struct function_info describing a (part of a) function
2359 starting at SYM_H. Keep the array sorted by address. */
2361 static struct function_info
*
2362 maybe_insert_function (asection
*sec
,
2365 bfd_boolean is_func
)
2367 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2368 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2374 sinfo
= alloc_stack_info (sec
, 20);
2381 Elf_Internal_Sym
*sym
= sym_h
;
2382 off
= sym
->st_value
;
2383 size
= sym
->st_size
;
2387 struct elf_link_hash_entry
*h
= sym_h
;
2388 off
= h
->root
.u
.def
.value
;
2392 for (i
= sinfo
->num_fun
; --i
>= 0; )
2393 if (sinfo
->fun
[i
].lo
<= off
)
2398 /* Don't add another entry for an alias, but do update some
2400 if (sinfo
->fun
[i
].lo
== off
)
2402 /* Prefer globals over local syms. */
2403 if (global
&& !sinfo
->fun
[i
].global
)
2405 sinfo
->fun
[i
].global
= TRUE
;
2406 sinfo
->fun
[i
].u
.h
= sym_h
;
2409 sinfo
->fun
[i
].is_func
= TRUE
;
2410 return &sinfo
->fun
[i
];
2412 /* Ignore a zero-size symbol inside an existing function. */
2413 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2414 return &sinfo
->fun
[i
];
2417 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2419 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2420 bfd_size_type old
= amt
;
2422 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2423 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2424 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2425 sinfo
= bfd_realloc (sinfo
, amt
);
2428 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2429 sec_data
->u
.i
.stack_info
= sinfo
;
2432 if (++i
< sinfo
->num_fun
)
2433 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2434 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2435 sinfo
->fun
[i
].is_func
= is_func
;
2436 sinfo
->fun
[i
].global
= global
;
2437 sinfo
->fun
[i
].sec
= sec
;
2439 sinfo
->fun
[i
].u
.h
= sym_h
;
2441 sinfo
->fun
[i
].u
.sym
= sym_h
;
2442 sinfo
->fun
[i
].lo
= off
;
2443 sinfo
->fun
[i
].hi
= off
+ size
;
2444 sinfo
->fun
[i
].lr_store
= -1;
2445 sinfo
->fun
[i
].sp_adjust
= -1;
2446 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2447 &sinfo
->fun
[i
].lr_store
,
2448 &sinfo
->fun
[i
].sp_adjust
);
2449 sinfo
->num_fun
+= 1;
2450 return &sinfo
->fun
[i
];
2453 /* Return the name of FUN. */
2456 func_name (struct function_info
*fun
)
2460 Elf_Internal_Shdr
*symtab_hdr
;
2462 while (fun
->start
!= NULL
)
2466 return fun
->u
.h
->root
.root
.string
;
2469 if (fun
->u
.sym
->st_name
== 0)
2471 size_t len
= strlen (sec
->name
);
2472 char *name
= bfd_malloc (len
+ 10);
2475 sprintf (name
, "%s+%lx", sec
->name
,
2476 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2480 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2481 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2484 /* Read the instruction at OFF in SEC. Return true iff the instruction
2485 is a nop, lnop, or stop 0 (all zero insn). */
2488 is_nop (asection
*sec
, bfd_vma off
)
2490 unsigned char insn
[4];
2492 if (off
+ 4 > sec
->size
2493 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2495 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2497 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2502 /* Extend the range of FUN to cover nop padding up to LIMIT.
2503 Return TRUE iff some instruction other than a NOP was found. */
2506 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2508 bfd_vma off
= (fun
->hi
+ 3) & -4;
2510 while (off
< limit
&& is_nop (fun
->sec
, off
))
2521 /* Check and fix overlapping function ranges. Return TRUE iff there
2522 are gaps in the current info we have about functions in SEC. */
2525 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2527 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2528 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2530 bfd_boolean gaps
= FALSE
;
2535 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2536 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2538 /* Fix overlapping symbols. */
2539 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2540 const char *f2
= func_name (&sinfo
->fun
[i
]);
2542 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2543 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2545 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2548 if (sinfo
->num_fun
== 0)
2552 if (sinfo
->fun
[0].lo
!= 0)
2554 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2556 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2558 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2559 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2561 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2567 /* Search current function info for a function that contains address
2568 OFFSET in section SEC. */
2570 static struct function_info
*
2571 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2573 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2574 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2578 hi
= sinfo
->num_fun
;
2581 mid
= (lo
+ hi
) / 2;
2582 if (offset
< sinfo
->fun
[mid
].lo
)
2584 else if (offset
>= sinfo
->fun
[mid
].hi
)
2587 return &sinfo
->fun
[mid
];
2589 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2591 bfd_set_error (bfd_error_bad_value
);
2595 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2596 if CALLEE was new. If this function return FALSE, CALLEE should
2600 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2602 struct call_info
**pp
, *p
;
2604 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2605 if (p
->fun
== callee
->fun
)
2607 /* Tail calls use less stack than normal calls. Retain entry
2608 for normal call over one for tail call. */
2609 p
->is_tail
&= callee
->is_tail
;
2612 p
->fun
->start
= NULL
;
2613 p
->fun
->is_func
= TRUE
;
2615 p
->count
+= callee
->count
;
2616 /* Reorder list so most recent call is first. */
2618 p
->next
= caller
->call_list
;
2619 caller
->call_list
= p
;
2622 callee
->next
= caller
->call_list
;
2623 caller
->call_list
= callee
;
2627 /* Copy CALL and insert the copy into CALLER. */
2630 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2632 struct call_info
*callee
;
2633 callee
= bfd_malloc (sizeof (*callee
));
2637 if (!insert_callee (caller
, callee
))
2642 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2643 overlay stub sections. */
2646 interesting_section (asection
*s
)
2648 return (s
->output_section
!= bfd_abs_section_ptr
2649 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2650 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2654 /* Rummage through the relocs for SEC, looking for function calls.
2655 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2656 mark destination symbols on calls as being functions. Also
2657 look at branches, which may be tail calls or go to hot/cold
2658 section part of same function. */
2661 mark_functions_via_relocs (asection
*sec
,
2662 struct bfd_link_info
*info
,
2665 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2666 Elf_Internal_Shdr
*symtab_hdr
;
2668 unsigned int priority
= 0;
2669 static bfd_boolean warned
;
2671 if (!interesting_section (sec
)
2672 || sec
->reloc_count
== 0)
2675 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2677 if (internal_relocs
== NULL
)
2680 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2681 psyms
= &symtab_hdr
->contents
;
2682 irela
= internal_relocs
;
2683 irelaend
= irela
+ sec
->reloc_count
;
2684 for (; irela
< irelaend
; irela
++)
2686 enum elf_spu_reloc_type r_type
;
2687 unsigned int r_indx
;
2689 Elf_Internal_Sym
*sym
;
2690 struct elf_link_hash_entry
*h
;
2692 bfd_boolean nonbranch
, is_call
;
2693 struct function_info
*caller
;
2694 struct call_info
*callee
;
2696 r_type
= ELF32_R_TYPE (irela
->r_info
);
2697 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2699 r_indx
= ELF32_R_SYM (irela
->r_info
);
2700 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2704 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2710 unsigned char insn
[4];
2712 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2713 irela
->r_offset
, 4))
2715 if (is_branch (insn
))
2717 is_call
= (insn
[0] & 0xfd) == 0x31;
2718 priority
= insn
[1] & 0x0f;
2720 priority
|= insn
[2];
2722 priority
|= insn
[3];
2724 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2725 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2728 info
->callbacks
->einfo
2729 (_("%B(%A+0x%v): call to non-code section"
2730 " %B(%A), analysis incomplete\n"),
2731 sec
->owner
, sec
, irela
->r_offset
,
2732 sym_sec
->owner
, sym_sec
);
2747 /* For --auto-overlay, count possible stubs we need for
2748 function pointer references. */
2749 unsigned int sym_type
;
2753 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2754 if (sym_type
== STT_FUNC
)
2756 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2757 spu_hash_table (info
)->non_ovly_stub
+= 1;
2758 /* If the symbol type is STT_FUNC then this must be a
2759 function pointer initialisation. */
2762 /* Ignore data references. */
2763 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2764 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2766 /* Otherwise we probably have a jump table reloc for
2767 a switch statement or some other reference to a
2772 val
= h
->root
.u
.def
.value
;
2774 val
= sym
->st_value
;
2775 val
+= irela
->r_addend
;
2779 struct function_info
*fun
;
2781 if (irela
->r_addend
!= 0)
2783 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2786 fake
->st_value
= val
;
2788 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2792 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2794 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2797 if (irela
->r_addend
!= 0
2798 && fun
->u
.sym
!= sym
)
2803 caller
= find_function (sec
, irela
->r_offset
, info
);
2806 callee
= bfd_malloc (sizeof *callee
);
2810 callee
->fun
= find_function (sym_sec
, val
, info
);
2811 if (callee
->fun
== NULL
)
2813 callee
->is_tail
= !is_call
;
2814 callee
->is_pasted
= FALSE
;
2815 callee
->broken_cycle
= FALSE
;
2816 callee
->priority
= priority
;
2817 callee
->count
= nonbranch
? 0 : 1;
2818 if (callee
->fun
->last_caller
!= sec
)
2820 callee
->fun
->last_caller
= sec
;
2821 callee
->fun
->call_count
+= 1;
2823 if (!insert_callee (caller
, callee
))
2826 && !callee
->fun
->is_func
2827 && callee
->fun
->stack
== 0)
2829 /* This is either a tail call or a branch from one part of
2830 the function to another, ie. hot/cold section. If the
2831 destination has been called by some other function then
2832 it is a separate function. We also assume that functions
2833 are not split across input files. */
2834 if (sec
->owner
!= sym_sec
->owner
)
2836 callee
->fun
->start
= NULL
;
2837 callee
->fun
->is_func
= TRUE
;
2839 else if (callee
->fun
->start
== NULL
)
2841 struct function_info
*caller_start
= caller
;
2842 while (caller_start
->start
)
2843 caller_start
= caller_start
->start
;
2845 if (caller_start
!= callee
->fun
)
2846 callee
->fun
->start
= caller_start
;
2850 struct function_info
*callee_start
;
2851 struct function_info
*caller_start
;
2852 callee_start
= callee
->fun
;
2853 while (callee_start
->start
)
2854 callee_start
= callee_start
->start
;
2855 caller_start
= caller
;
2856 while (caller_start
->start
)
2857 caller_start
= caller_start
->start
;
2858 if (caller_start
!= callee_start
)
2860 callee
->fun
->start
= NULL
;
2861 callee
->fun
->is_func
= TRUE
;
2870 /* Handle something like .init or .fini, which has a piece of a function.
2871 These sections are pasted together to form a single function. */
2874 pasted_function (asection
*sec
)
2876 struct bfd_link_order
*l
;
2877 struct _spu_elf_section_data
*sec_data
;
2878 struct spu_elf_stack_info
*sinfo
;
2879 Elf_Internal_Sym
*fake
;
2880 struct function_info
*fun
, *fun_start
;
2882 fake
= bfd_zmalloc (sizeof (*fake
));
2886 fake
->st_size
= sec
->size
;
2888 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2889 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2893 /* Find a function immediately preceding this section. */
2895 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2897 if (l
->u
.indirect
.section
== sec
)
2899 if (fun_start
!= NULL
)
2901 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2905 fun
->start
= fun_start
;
2907 callee
->is_tail
= TRUE
;
2908 callee
->is_pasted
= TRUE
;
2909 callee
->broken_cycle
= FALSE
;
2910 callee
->priority
= 0;
2912 if (!insert_callee (fun_start
, callee
))
2918 if (l
->type
== bfd_indirect_link_order
2919 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2920 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2921 && sinfo
->num_fun
!= 0)
2922 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2925 /* Don't return an error if we did not find a function preceding this
2926 section. The section may have incorrect flags. */
2930 /* Map address ranges in code sections to functions. */
2933 discover_functions (struct bfd_link_info
*info
)
2937 Elf_Internal_Sym
***psym_arr
;
2938 asection
***sec_arr
;
2939 bfd_boolean gaps
= FALSE
;
2942 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2945 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2946 if (psym_arr
== NULL
)
2948 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2949 if (sec_arr
== NULL
)
2952 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2954 ibfd
= ibfd
->link_next
, bfd_idx
++)
2956 extern const bfd_target bfd_elf32_spu_vec
;
2957 Elf_Internal_Shdr
*symtab_hdr
;
2960 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2961 asection
**psecs
, **p
;
2963 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2966 /* Read all the symbols. */
2967 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2968 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2972 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2973 if (interesting_section (sec
))
2981 if (symtab_hdr
->contents
!= NULL
)
2983 /* Don't use cached symbols since the generic ELF linker
2984 code only reads local symbols, and we need globals too. */
2985 free (symtab_hdr
->contents
);
2986 symtab_hdr
->contents
= NULL
;
2988 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2990 symtab_hdr
->contents
= (void *) syms
;
2994 /* Select defined function symbols that are going to be output. */
2995 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2998 psym_arr
[bfd_idx
] = psyms
;
2999 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3002 sec_arr
[bfd_idx
] = psecs
;
3003 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3004 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3005 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3009 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3010 if (s
!= NULL
&& interesting_section (s
))
3013 symcount
= psy
- psyms
;
3016 /* Sort them by section and offset within section. */
3017 sort_syms_syms
= syms
;
3018 sort_syms_psecs
= psecs
;
3019 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3021 /* Now inspect the function symbols. */
3022 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3024 asection
*s
= psecs
[*psy
- syms
];
3025 Elf_Internal_Sym
**psy2
;
3027 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3028 if (psecs
[*psy2
- syms
] != s
)
3031 if (!alloc_stack_info (s
, psy2
- psy
))
3036 /* First install info about properly typed and sized functions.
3037 In an ideal world this will cover all code sections, except
3038 when partitioning functions into hot and cold sections,
3039 and the horrible pasted together .init and .fini functions. */
3040 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3043 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3045 asection
*s
= psecs
[sy
- syms
];
3046 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3051 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3052 if (interesting_section (sec
))
3053 gaps
|= check_function_ranges (sec
, info
);
3058 /* See if we can discover more function symbols by looking at
3060 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3062 ibfd
= ibfd
->link_next
, bfd_idx
++)
3066 if (psym_arr
[bfd_idx
] == NULL
)
3069 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3070 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3074 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3076 ibfd
= ibfd
->link_next
, bfd_idx
++)
3078 Elf_Internal_Shdr
*symtab_hdr
;
3080 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3083 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3086 psecs
= sec_arr
[bfd_idx
];
3088 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3089 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3092 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3093 if (interesting_section (sec
))
3094 gaps
|= check_function_ranges (sec
, info
);
3098 /* Finally, install all globals. */
3099 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3103 s
= psecs
[sy
- syms
];
3105 /* Global syms might be improperly typed functions. */
3106 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3107 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3109 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3115 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3117 extern const bfd_target bfd_elf32_spu_vec
;
3120 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3123 /* Some of the symbols we've installed as marking the
3124 beginning of functions may have a size of zero. Extend
3125 the range of such functions to the beginning of the
3126 next symbol of interest. */
3127 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3128 if (interesting_section (sec
))
3130 struct _spu_elf_section_data
*sec_data
;
3131 struct spu_elf_stack_info
*sinfo
;
3133 sec_data
= spu_elf_section_data (sec
);
3134 sinfo
= sec_data
->u
.i
.stack_info
;
3135 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3138 bfd_vma hi
= sec
->size
;
3140 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3142 sinfo
->fun
[fun_idx
].hi
= hi
;
3143 hi
= sinfo
->fun
[fun_idx
].lo
;
3146 sinfo
->fun
[0].lo
= 0;
3148 /* No symbols in this section. Must be .init or .fini
3149 or something similar. */
3150 else if (!pasted_function (sec
))
3156 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3158 ibfd
= ibfd
->link_next
, bfd_idx
++)
3160 if (psym_arr
[bfd_idx
] == NULL
)
3163 free (psym_arr
[bfd_idx
]);
3164 free (sec_arr
[bfd_idx
]);
3173 /* Iterate over all function_info we have collected, calling DOIT on
3174 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3178 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3179 struct bfd_link_info
*,
3181 struct bfd_link_info
*info
,
3187 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3189 extern const bfd_target bfd_elf32_spu_vec
;
3192 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3195 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3197 struct _spu_elf_section_data
*sec_data
;
3198 struct spu_elf_stack_info
*sinfo
;
3200 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3201 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3204 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3205 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3206 if (!doit (&sinfo
->fun
[i
], info
, param
))
3214 /* Transfer call info attached to struct function_info entries for
3215 all of a given function's sections to the first entry. */
3218 transfer_calls (struct function_info
*fun
,
3219 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3220 void *param ATTRIBUTE_UNUSED
)
3222 struct function_info
*start
= fun
->start
;
3226 struct call_info
*call
, *call_next
;
3228 while (start
->start
!= NULL
)
3229 start
= start
->start
;
3230 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3232 call_next
= call
->next
;
3233 if (!insert_callee (start
, call
))
3236 fun
->call_list
= NULL
;
3241 /* Mark nodes in the call graph that are called by some other node. */
3244 mark_non_root (struct function_info
*fun
,
3245 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3246 void *param ATTRIBUTE_UNUSED
)
3248 struct call_info
*call
;
3253 for (call
= fun
->call_list
; call
; call
= call
->next
)
3255 call
->fun
->non_root
= TRUE
;
3256 mark_non_root (call
->fun
, 0, 0);
3261 /* Remove cycles from the call graph. Set depth of nodes. */
3264 remove_cycles (struct function_info
*fun
,
3265 struct bfd_link_info
*info
,
3268 struct call_info
**callp
, *call
;
3269 unsigned int depth
= *(unsigned int *) param
;
3270 unsigned int max_depth
= depth
;
3274 fun
->marking
= TRUE
;
3276 callp
= &fun
->call_list
;
3277 while ((call
= *callp
) != NULL
)
3279 call
->max_depth
= depth
+ !call
->is_pasted
;
3280 if (!call
->fun
->visit2
)
3282 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3284 if (max_depth
< call
->max_depth
)
3285 max_depth
= call
->max_depth
;
3287 else if (call
->fun
->marking
)
3289 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3291 if (!htab
->params
->auto_overlay
3292 && htab
->params
->stack_analysis
)
3294 const char *f1
= func_name (fun
);
3295 const char *f2
= func_name (call
->fun
);
3297 info
->callbacks
->info (_("Stack analysis will ignore the call "
3302 call
->broken_cycle
= TRUE
;
3304 callp
= &call
->next
;
3306 fun
->marking
= FALSE
;
3307 *(unsigned int *) param
= max_depth
;
3311 /* Check that we actually visited all nodes in remove_cycles. If we
3312 didn't, then there is some cycle in the call graph not attached to
3313 any root node. Arbitrarily choose a node in the cycle as a new
3314 root and break the cycle. */
3317 mark_detached_root (struct function_info
*fun
,
3318 struct bfd_link_info
*info
,
3323 fun
->non_root
= FALSE
;
3324 *(unsigned int *) param
= 0;
3325 return remove_cycles (fun
, info
, param
);
3328 /* Populate call_list for each function. */
3331 build_call_tree (struct bfd_link_info
*info
)
3336 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3338 extern const bfd_target bfd_elf32_spu_vec
;
3341 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3344 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3345 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3349 /* Transfer call info from hot/cold section part of function
3351 if (!spu_hash_table (info
)->params
->auto_overlay
3352 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3355 /* Find the call graph root(s). */
3356 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3359 /* Remove cycles from the call graph. We start from the root node(s)
3360 so that we break cycles in a reasonable place. */
3362 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3365 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3368 /* qsort predicate to sort calls by priority, max_depth then count. */
3371 sort_calls (const void *a
, const void *b
)
3373 struct call_info
*const *c1
= a
;
3374 struct call_info
*const *c2
= b
;
3377 delta
= (*c2
)->priority
- (*c1
)->priority
;
3381 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3385 delta
= (*c2
)->count
- (*c1
)->count
;
3389 return (char *) c1
- (char *) c2
;
3393 unsigned int max_overlay_size
;
3396 /* Set linker_mark and gc_mark on any sections that we will put in
3397 overlays. These flags are used by the generic ELF linker, but we
3398 won't be continuing on to bfd_elf_final_link so it is OK to use
3399 them. linker_mark is clear before we get here. Set segment_mark
3400 on sections that are part of a pasted function (excluding the last
3403 Set up function rodata section if --overlay-rodata. We don't
3404 currently include merged string constant rodata sections since
3406 Sort the call graph so that the deepest nodes will be visited
3410 mark_overlay_section (struct function_info
*fun
,
3411 struct bfd_link_info
*info
,
3414 struct call_info
*call
;
3416 struct _mos_param
*mos_param
= param
;
3417 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3423 if (!fun
->sec
->linker_mark
3424 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3425 || htab
->params
->non_ia_text
3426 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3427 || strcmp (fun
->sec
->name
, ".init") == 0
3428 || strcmp (fun
->sec
->name
, ".fini") == 0))
3432 fun
->sec
->linker_mark
= 1;
3433 fun
->sec
->gc_mark
= 1;
3434 fun
->sec
->segment_mark
= 0;
3435 /* Ensure SEC_CODE is set on this text section (it ought to
3436 be!), and SEC_CODE is clear on rodata sections. We use
3437 this flag to differentiate the two overlay section types. */
3438 fun
->sec
->flags
|= SEC_CODE
;
3440 size
= fun
->sec
->size
;
3441 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3445 /* Find the rodata section corresponding to this function's
3447 if (strcmp (fun
->sec
->name
, ".text") == 0)
3449 name
= bfd_malloc (sizeof (".rodata"));
3452 memcpy (name
, ".rodata", sizeof (".rodata"));
3454 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3456 size_t len
= strlen (fun
->sec
->name
);
3457 name
= bfd_malloc (len
+ 3);
3460 memcpy (name
, ".rodata", sizeof (".rodata"));
3461 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3463 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3465 size_t len
= strlen (fun
->sec
->name
) + 1;
3466 name
= bfd_malloc (len
);
3469 memcpy (name
, fun
->sec
->name
, len
);
3475 asection
*rodata
= NULL
;
3476 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3477 if (group_sec
== NULL
)
3478 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3480 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3482 if (strcmp (group_sec
->name
, name
) == 0)
3487 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3489 fun
->rodata
= rodata
;
3492 size
+= fun
->rodata
->size
;
3493 if (htab
->params
->line_size
!= 0
3494 && size
> htab
->params
->line_size
)
3496 size
-= fun
->rodata
->size
;
3501 fun
->rodata
->linker_mark
= 1;
3502 fun
->rodata
->gc_mark
= 1;
3503 fun
->rodata
->flags
&= ~SEC_CODE
;
3509 if (mos_param
->max_overlay_size
< size
)
3510 mos_param
->max_overlay_size
= size
;
3513 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3518 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3522 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3523 calls
[count
++] = call
;
3525 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3527 fun
->call_list
= NULL
;
3531 calls
[count
]->next
= fun
->call_list
;
3532 fun
->call_list
= calls
[count
];
3537 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3539 if (call
->is_pasted
)
3541 /* There can only be one is_pasted call per function_info. */
3542 BFD_ASSERT (!fun
->sec
->segment_mark
);
3543 fun
->sec
->segment_mark
= 1;
3545 if (!call
->broken_cycle
3546 && !mark_overlay_section (call
->fun
, info
, param
))
3550 /* Don't put entry code into an overlay. The overlay manager needs
3551 a stack! Also, don't mark .ovl.init as an overlay. */
3552 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3553 == info
->output_bfd
->start_address
3554 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3556 fun
->sec
->linker_mark
= 0;
3557 if (fun
->rodata
!= NULL
)
3558 fun
->rodata
->linker_mark
= 0;
3563 /* If non-zero then unmark functions called from those within sections
3564 that we need to unmark. Unfortunately this isn't reliable since the
3565 call graph cannot know the destination of function pointer calls. */
3566 #define RECURSE_UNMARK 0
3569 asection
*exclude_input_section
;
3570 asection
*exclude_output_section
;
3571 unsigned long clearing
;
3574 /* Undo some of mark_overlay_section's work. */
3577 unmark_overlay_section (struct function_info
*fun
,
3578 struct bfd_link_info
*info
,
3581 struct call_info
*call
;
3582 struct _uos_param
*uos_param
= param
;
3583 unsigned int excluded
= 0;
3591 if (fun
->sec
== uos_param
->exclude_input_section
3592 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3596 uos_param
->clearing
+= excluded
;
3598 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3600 fun
->sec
->linker_mark
= 0;
3602 fun
->rodata
->linker_mark
= 0;
3605 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3606 if (!call
->broken_cycle
3607 && !unmark_overlay_section (call
->fun
, info
, param
))
3611 uos_param
->clearing
-= excluded
;
3616 unsigned int lib_size
;
3617 asection
**lib_sections
;
3620 /* Add sections we have marked as belonging to overlays to an array
3621 for consideration as non-overlay sections. The array consist of
3622 pairs of sections, (text,rodata), for functions in the call graph. */
3625 collect_lib_sections (struct function_info
*fun
,
3626 struct bfd_link_info
*info
,
3629 struct _cl_param
*lib_param
= param
;
3630 struct call_info
*call
;
3637 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3640 size
= fun
->sec
->size
;
3642 size
+= fun
->rodata
->size
;
3644 if (size
<= lib_param
->lib_size
)
3646 *lib_param
->lib_sections
++ = fun
->sec
;
3647 fun
->sec
->gc_mark
= 0;
3648 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3650 *lib_param
->lib_sections
++ = fun
->rodata
;
3651 fun
->rodata
->gc_mark
= 0;
3654 *lib_param
->lib_sections
++ = NULL
;
3657 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3658 if (!call
->broken_cycle
)
3659 collect_lib_sections (call
->fun
, info
, param
);
3664 /* qsort predicate to sort sections by call count. */
3667 sort_lib (const void *a
, const void *b
)
3669 asection
*const *s1
= a
;
3670 asection
*const *s2
= b
;
3671 struct _spu_elf_section_data
*sec_data
;
3672 struct spu_elf_stack_info
*sinfo
;
3676 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3677 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3680 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3681 delta
-= sinfo
->fun
[i
].call_count
;
3684 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3685 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3688 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3689 delta
+= sinfo
->fun
[i
].call_count
;
3698 /* Remove some sections from those marked to be in overlays. Choose
3699 those that are called from many places, likely library functions. */
3702 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3705 asection
**lib_sections
;
3706 unsigned int i
, lib_count
;
3707 struct _cl_param collect_lib_param
;
3708 struct function_info dummy_caller
;
3709 struct spu_link_hash_table
*htab
;
3711 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3713 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3715 extern const bfd_target bfd_elf32_spu_vec
;
3718 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3721 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3722 if (sec
->linker_mark
3723 && sec
->size
< lib_size
3724 && (sec
->flags
& SEC_CODE
) != 0)
3727 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3728 if (lib_sections
== NULL
)
3729 return (unsigned int) -1;
3730 collect_lib_param
.lib_size
= lib_size
;
3731 collect_lib_param
.lib_sections
= lib_sections
;
3732 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3734 return (unsigned int) -1;
3735 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3737 /* Sort sections so that those with the most calls are first. */
3739 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3741 htab
= spu_hash_table (info
);
3742 for (i
= 0; i
< lib_count
; i
++)
3744 unsigned int tmp
, stub_size
;
3746 struct _spu_elf_section_data
*sec_data
;
3747 struct spu_elf_stack_info
*sinfo
;
3749 sec
= lib_sections
[2 * i
];
3750 /* If this section is OK, its size must be less than lib_size. */
3752 /* If it has a rodata section, then add that too. */
3753 if (lib_sections
[2 * i
+ 1])
3754 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3755 /* Add any new overlay call stubs needed by the section. */
3758 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3759 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3762 struct call_info
*call
;
3764 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3765 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3766 if (call
->fun
->sec
->linker_mark
)
3768 struct call_info
*p
;
3769 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3770 if (p
->fun
== call
->fun
)
3773 stub_size
+= ovl_stub_size (htab
->params
);
3776 if (tmp
+ stub_size
< lib_size
)
3778 struct call_info
**pp
, *p
;
3780 /* This section fits. Mark it as non-overlay. */
3781 lib_sections
[2 * i
]->linker_mark
= 0;
3782 if (lib_sections
[2 * i
+ 1])
3783 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3784 lib_size
-= tmp
+ stub_size
;
3785 /* Call stubs to the section we just added are no longer
3787 pp
= &dummy_caller
.call_list
;
3788 while ((p
= *pp
) != NULL
)
3789 if (!p
->fun
->sec
->linker_mark
)
3791 lib_size
+= ovl_stub_size (htab
->params
);
3797 /* Add new call stubs to dummy_caller. */
3798 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3799 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3802 struct call_info
*call
;
3804 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3805 for (call
= sinfo
->fun
[k
].call_list
;
3808 if (call
->fun
->sec
->linker_mark
)
3810 struct call_info
*callee
;
3811 callee
= bfd_malloc (sizeof (*callee
));
3813 return (unsigned int) -1;
3815 if (!insert_callee (&dummy_caller
, callee
))
3821 while (dummy_caller
.call_list
!= NULL
)
3823 struct call_info
*call
= dummy_caller
.call_list
;
3824 dummy_caller
.call_list
= call
->next
;
3827 for (i
= 0; i
< 2 * lib_count
; i
++)
3828 if (lib_sections
[i
])
3829 lib_sections
[i
]->gc_mark
= 1;
3830 free (lib_sections
);
3834 /* Build an array of overlay sections. The deepest node's section is
3835 added first, then its parent node's section, then everything called
3836 from the parent section. The idea being to group sections to
3837 minimise calls between different overlays. */
3840 collect_overlays (struct function_info
*fun
,
3841 struct bfd_link_info
*info
,
3844 struct call_info
*call
;
3845 bfd_boolean added_fun
;
3846 asection
***ovly_sections
= param
;
3852 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3853 if (!call
->is_pasted
&& !call
->broken_cycle
)
3855 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3861 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3863 fun
->sec
->gc_mark
= 0;
3864 *(*ovly_sections
)++ = fun
->sec
;
3865 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3867 fun
->rodata
->gc_mark
= 0;
3868 *(*ovly_sections
)++ = fun
->rodata
;
3871 *(*ovly_sections
)++ = NULL
;
3874 /* Pasted sections must stay with the first section. We don't
3875 put pasted sections in the array, just the first section.
3876 Mark subsequent sections as already considered. */
3877 if (fun
->sec
->segment_mark
)
3879 struct function_info
*call_fun
= fun
;
3882 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3883 if (call
->is_pasted
)
3885 call_fun
= call
->fun
;
3886 call_fun
->sec
->gc_mark
= 0;
3887 if (call_fun
->rodata
)
3888 call_fun
->rodata
->gc_mark
= 0;
3894 while (call_fun
->sec
->segment_mark
);
3898 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3899 if (!call
->broken_cycle
3900 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3905 struct _spu_elf_section_data
*sec_data
;
3906 struct spu_elf_stack_info
*sinfo
;
3908 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3909 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3912 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3913 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3921 struct _sum_stack_param
{
3923 size_t overall_stack
;
3924 bfd_boolean emit_stack_syms
;
3927 /* Descend the call graph for FUN, accumulating total stack required. */
3930 sum_stack (struct function_info
*fun
,
3931 struct bfd_link_info
*info
,
3934 struct call_info
*call
;
3935 struct function_info
*max
;
3936 size_t stack
, cum_stack
;
3938 bfd_boolean has_call
;
3939 struct _sum_stack_param
*sum_stack_param
= param
;
3940 struct spu_link_hash_table
*htab
;
3942 cum_stack
= fun
->stack
;
3943 sum_stack_param
->cum_stack
= cum_stack
;
3949 for (call
= fun
->call_list
; call
; call
= call
->next
)
3951 if (call
->broken_cycle
)
3953 if (!call
->is_pasted
)
3955 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3957 stack
= sum_stack_param
->cum_stack
;
3958 /* Include caller stack for normal calls, don't do so for
3959 tail calls. fun->stack here is local stack usage for
3961 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3962 stack
+= fun
->stack
;
3963 if (cum_stack
< stack
)
3970 sum_stack_param
->cum_stack
= cum_stack
;
3972 /* Now fun->stack holds cumulative stack. */
3973 fun
->stack
= cum_stack
;
3977 && sum_stack_param
->overall_stack
< cum_stack
)
3978 sum_stack_param
->overall_stack
= cum_stack
;
3980 htab
= spu_hash_table (info
);
3981 if (htab
->params
->auto_overlay
)
3984 f1
= func_name (fun
);
3985 if (htab
->params
->stack_analysis
)
3988 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3989 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3990 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3994 info
->callbacks
->minfo (_(" calls:\n"));
3995 for (call
= fun
->call_list
; call
; call
= call
->next
)
3996 if (!call
->is_pasted
&& !call
->broken_cycle
)
3998 const char *f2
= func_name (call
->fun
);
3999 const char *ann1
= call
->fun
== max
? "*" : " ";
4000 const char *ann2
= call
->is_tail
? "t" : " ";
4002 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
4007 if (sum_stack_param
->emit_stack_syms
)
4009 char *name
= bfd_malloc (18 + strlen (f1
));
4010 struct elf_link_hash_entry
*h
;
4015 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4016 sprintf (name
, "__stack_%s", f1
);
4018 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4020 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4023 && (h
->root
.type
== bfd_link_hash_new
4024 || h
->root
.type
== bfd_link_hash_undefined
4025 || h
->root
.type
== bfd_link_hash_undefweak
))
4027 h
->root
.type
= bfd_link_hash_defined
;
4028 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4029 h
->root
.u
.def
.value
= cum_stack
;
4034 h
->ref_regular_nonweak
= 1;
4035 h
->forced_local
= 1;
4043 /* SEC is part of a pasted function. Return the call_info for the
4044 next section of this function. */
4046 static struct call_info
*
4047 find_pasted_call (asection
*sec
)
4049 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4050 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4051 struct call_info
*call
;
4054 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4055 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4056 if (call
->is_pasted
)
4062 /* qsort predicate to sort bfds by file name. */
4065 sort_bfds (const void *a
, const void *b
)
4067 bfd
*const *abfd1
= a
;
4068 bfd
*const *abfd2
= b
;
4070 return filename_cmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4074 print_one_overlay_section (FILE *script
,
4077 unsigned int ovlynum
,
4078 unsigned int *ovly_map
,
4079 asection
**ovly_sections
,
4080 struct bfd_link_info
*info
)
4084 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4086 asection
*sec
= ovly_sections
[2 * j
];
4088 if (fprintf (script
, " %s%c%s (%s)\n",
4089 (sec
->owner
->my_archive
!= NULL
4090 ? sec
->owner
->my_archive
->filename
: ""),
4091 info
->path_separator
,
4092 sec
->owner
->filename
,
4095 if (sec
->segment_mark
)
4097 struct call_info
*call
= find_pasted_call (sec
);
4098 while (call
!= NULL
)
4100 struct function_info
*call_fun
= call
->fun
;
4101 sec
= call_fun
->sec
;
4102 if (fprintf (script
, " %s%c%s (%s)\n",
4103 (sec
->owner
->my_archive
!= NULL
4104 ? sec
->owner
->my_archive
->filename
: ""),
4105 info
->path_separator
,
4106 sec
->owner
->filename
,
4109 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4110 if (call
->is_pasted
)
4116 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4118 asection
*sec
= ovly_sections
[2 * j
+ 1];
4120 && fprintf (script
, " %s%c%s (%s)\n",
4121 (sec
->owner
->my_archive
!= NULL
4122 ? sec
->owner
->my_archive
->filename
: ""),
4123 info
->path_separator
,
4124 sec
->owner
->filename
,
4128 sec
= ovly_sections
[2 * j
];
4129 if (sec
->segment_mark
)
4131 struct call_info
*call
= find_pasted_call (sec
);
4132 while (call
!= NULL
)
4134 struct function_info
*call_fun
= call
->fun
;
4135 sec
= call_fun
->rodata
;
4137 && fprintf (script
, " %s%c%s (%s)\n",
4138 (sec
->owner
->my_archive
!= NULL
4139 ? sec
->owner
->my_archive
->filename
: ""),
4140 info
->path_separator
,
4141 sec
->owner
->filename
,
4144 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4145 if (call
->is_pasted
)
4154 /* Handle --auto-overlay. */
4157 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4161 struct elf_segment_map
*m
;
4162 unsigned int fixed_size
, lo
, hi
;
4163 unsigned int reserved
;
4164 struct spu_link_hash_table
*htab
;
4165 unsigned int base
, i
, count
, bfd_count
;
4166 unsigned int region
, ovlynum
;
4167 asection
**ovly_sections
, **ovly_p
;
4168 unsigned int *ovly_map
;
4170 unsigned int total_overlay_size
, overlay_size
;
4171 const char *ovly_mgr_entry
;
4172 struct elf_link_hash_entry
*h
;
4173 struct _mos_param mos_param
;
4174 struct _uos_param uos_param
;
4175 struct function_info dummy_caller
;
4177 /* Find the extents of our loadable image. */
4178 lo
= (unsigned int) -1;
4180 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4181 if (m
->p_type
== PT_LOAD
)
4182 for (i
= 0; i
< m
->count
; i
++)
4183 if (m
->sections
[i
]->size
!= 0)
4185 if (m
->sections
[i
]->vma
< lo
)
4186 lo
= m
->sections
[i
]->vma
;
4187 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4188 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4190 fixed_size
= hi
+ 1 - lo
;
4192 if (!discover_functions (info
))
4195 if (!build_call_tree (info
))
4198 htab
= spu_hash_table (info
);
4199 reserved
= htab
->params
->auto_overlay_reserved
;
4202 struct _sum_stack_param sum_stack_param
;
4204 sum_stack_param
.emit_stack_syms
= 0;
4205 sum_stack_param
.overall_stack
= 0;
4206 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4208 reserved
= (sum_stack_param
.overall_stack
4209 + htab
->params
->extra_stack_space
);
4212 /* No need for overlays if everything already fits. */
4213 if (fixed_size
+ reserved
<= htab
->local_store
4214 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4216 htab
->params
->auto_overlay
= 0;
4220 uos_param
.exclude_input_section
= 0;
4221 uos_param
.exclude_output_section
4222 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4224 ovly_mgr_entry
= "__ovly_load";
4225 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4226 ovly_mgr_entry
= "__icache_br_handler";
4227 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4228 FALSE
, FALSE
, FALSE
);
4230 && (h
->root
.type
== bfd_link_hash_defined
4231 || h
->root
.type
== bfd_link_hash_defweak
)
4234 /* We have a user supplied overlay manager. */
4235 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4239 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4240 builtin version to .text, and will adjust .text size. */
4241 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4244 /* Mark overlay sections, and find max overlay section size. */
4245 mos_param
.max_overlay_size
= 0;
4246 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4249 /* We can't put the overlay manager or interrupt routines in
4251 uos_param
.clearing
= 0;
4252 if ((uos_param
.exclude_input_section
4253 || uos_param
.exclude_output_section
)
4254 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4258 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4260 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4261 if (bfd_arr
== NULL
)
4264 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4267 total_overlay_size
= 0;
4268 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4270 extern const bfd_target bfd_elf32_spu_vec
;
4272 unsigned int old_count
;
4274 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4278 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4279 if (sec
->linker_mark
)
4281 if ((sec
->flags
& SEC_CODE
) != 0)
4283 fixed_size
-= sec
->size
;
4284 total_overlay_size
+= sec
->size
;
4286 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4287 && sec
->output_section
->owner
== info
->output_bfd
4288 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4289 fixed_size
-= sec
->size
;
4290 if (count
!= old_count
)
4291 bfd_arr
[bfd_count
++] = ibfd
;
4294 /* Since the overlay link script selects sections by file name and
4295 section name, ensure that file names are unique. */
4298 bfd_boolean ok
= TRUE
;
4300 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4301 for (i
= 1; i
< bfd_count
; ++i
)
4302 if (filename_cmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4304 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4306 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4307 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4308 bfd_arr
[i
]->filename
,
4309 bfd_arr
[i
]->my_archive
->filename
);
4311 info
->callbacks
->einfo (_("%s duplicated\n"),
4312 bfd_arr
[i
]->filename
);
4318 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4319 "object files in auto-overlay script\n"));
4320 bfd_set_error (bfd_error_bad_value
);
4326 fixed_size
+= reserved
;
4327 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4328 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4330 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4332 /* Stubs in the non-icache area are bigger. */
4333 fixed_size
+= htab
->non_ovly_stub
* 16;
4334 /* Space for icache manager tables.
4335 a) Tag array, one quadword per cache line.
4336 - word 0: ia address of present line, init to zero. */
4337 fixed_size
+= 16 << htab
->num_lines_log2
;
4338 /* b) Rewrite "to" list, one quadword per cache line. */
4339 fixed_size
+= 16 << htab
->num_lines_log2
;
4340 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4341 to a power-of-two number of full quadwords) per cache line. */
4342 fixed_size
+= 16 << (htab
->fromelem_size_log2
4343 + htab
->num_lines_log2
);
4344 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4349 /* Guess number of overlays. Assuming overlay buffer is on
4350 average only half full should be conservative. */
4351 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4352 / (htab
->local_store
- fixed_size
));
4353 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4354 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4358 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4359 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4360 "size of 0x%v exceeds local store\n"),
4361 (bfd_vma
) fixed_size
,
4362 (bfd_vma
) mos_param
.max_overlay_size
);
4364 /* Now see if we should put some functions in the non-overlay area. */
4365 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4367 unsigned int max_fixed
, lib_size
;
4369 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4370 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4371 max_fixed
= htab
->params
->auto_overlay_fixed
;
4372 lib_size
= max_fixed
- fixed_size
;
4373 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4374 if (lib_size
== (unsigned int) -1)
4376 fixed_size
= max_fixed
- lib_size
;
4379 /* Build an array of sections, suitably sorted to place into
4381 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4382 if (ovly_sections
== NULL
)
4384 ovly_p
= ovly_sections
;
4385 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4387 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4388 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4389 if (ovly_map
== NULL
)
4392 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4393 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4394 if (htab
->params
->line_size
!= 0)
4395 overlay_size
= htab
->params
->line_size
;
4398 while (base
< count
)
4400 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4402 for (i
= base
; i
< count
; i
++)
4404 asection
*sec
, *rosec
;
4405 unsigned int tmp
, rotmp
;
4406 unsigned int num_stubs
;
4407 struct call_info
*call
, *pasty
;
4408 struct _spu_elf_section_data
*sec_data
;
4409 struct spu_elf_stack_info
*sinfo
;
4412 /* See whether we can add this section to the current
4413 overlay without overflowing our overlay buffer. */
4414 sec
= ovly_sections
[2 * i
];
4415 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4417 rosec
= ovly_sections
[2 * i
+ 1];
4420 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4421 if (roalign
< rosec
->alignment_power
)
4422 roalign
= rosec
->alignment_power
;
4424 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4426 if (sec
->segment_mark
)
4428 /* Pasted sections must stay together, so add their
4430 pasty
= find_pasted_call (sec
);
4431 while (pasty
!= NULL
)
4433 struct function_info
*call_fun
= pasty
->fun
;
4434 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4435 + call_fun
->sec
->size
);
4436 if (call_fun
->rodata
)
4438 rotmp
= (align_power (rotmp
,
4439 call_fun
->rodata
->alignment_power
)
4440 + call_fun
->rodata
->size
);
4441 if (roalign
< rosec
->alignment_power
)
4442 roalign
= rosec
->alignment_power
;
4444 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4445 if (pasty
->is_pasted
)
4449 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4452 /* If we add this section, we might need new overlay call
4453 stubs. Add any overlay section calls to dummy_call. */
4455 sec_data
= spu_elf_section_data (sec
);
4456 sinfo
= sec_data
->u
.i
.stack_info
;
4457 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4458 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4459 if (call
->is_pasted
)
4461 BFD_ASSERT (pasty
== NULL
);
4464 else if (call
->fun
->sec
->linker_mark
)
4466 if (!copy_callee (&dummy_caller
, call
))
4469 while (pasty
!= NULL
)
4471 struct function_info
*call_fun
= pasty
->fun
;
4473 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4474 if (call
->is_pasted
)
4476 BFD_ASSERT (pasty
== NULL
);
4479 else if (!copy_callee (&dummy_caller
, call
))
4483 /* Calculate call stub size. */
4485 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4487 unsigned int stub_delta
= 1;
4489 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4490 stub_delta
= call
->count
;
4491 num_stubs
+= stub_delta
;
4493 /* If the call is within this overlay, we won't need a
4495 for (k
= base
; k
< i
+ 1; k
++)
4496 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4498 num_stubs
-= stub_delta
;
4502 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4503 && num_stubs
> htab
->params
->max_branch
)
4505 if (align_power (tmp
, roalign
) + rotmp
4506 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4514 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4515 ovly_sections
[2 * i
]->owner
,
4516 ovly_sections
[2 * i
],
4517 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4518 bfd_set_error (bfd_error_bad_value
);
4522 while (dummy_caller
.call_list
!= NULL
)
4524 struct call_info
*call
= dummy_caller
.call_list
;
4525 dummy_caller
.call_list
= call
->next
;
4531 ovly_map
[base
++] = ovlynum
;
4534 script
= htab
->params
->spu_elf_open_overlay_script ();
4536 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4538 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4541 if (fprintf (script
,
4542 " . = ALIGN (%u);\n"
4543 " .ovl.init : { *(.ovl.init) }\n"
4544 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4545 htab
->params
->line_size
) <= 0)
4550 while (base
< count
)
4552 unsigned int indx
= ovlynum
- 1;
4553 unsigned int vma
, lma
;
4555 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4556 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4558 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4559 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4560 ovlynum
, vma
, lma
) <= 0)
4563 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4564 ovly_map
, ovly_sections
, info
);
4565 if (base
== (unsigned) -1)
4568 if (fprintf (script
, " }\n") <= 0)
4574 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4575 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4578 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4583 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4586 if (fprintf (script
,
4587 " . = ALIGN (16);\n"
4588 " .ovl.init : { *(.ovl.init) }\n"
4589 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4592 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4596 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4604 /* We need to set lma since we are overlaying .ovl.init. */
4605 if (fprintf (script
,
4606 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4611 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4615 while (base
< count
)
4617 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4620 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4621 ovly_map
, ovly_sections
, info
);
4622 if (base
== (unsigned) -1)
4625 if (fprintf (script
, " }\n") <= 0)
4628 ovlynum
+= htab
->params
->num_lines
;
4629 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4633 if (fprintf (script
, " }\n") <= 0)
4637 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4642 free (ovly_sections
);
4644 if (fclose (script
) != 0)
4647 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4648 (*htab
->params
->spu_elf_relink
) ();
4653 bfd_set_error (bfd_error_system_call
);
4655 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4659 /* Provide an estimate of total stack required. */
4662 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4664 struct spu_link_hash_table
*htab
;
4665 struct _sum_stack_param sum_stack_param
;
4667 if (!discover_functions (info
))
4670 if (!build_call_tree (info
))
4673 htab
= spu_hash_table (info
);
4674 if (htab
->params
->stack_analysis
)
4676 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4677 info
->callbacks
->minfo (_("\nStack size for functions. "
4678 "Annotations: '*' max stack, 't' tail call\n"));
4681 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4682 sum_stack_param
.overall_stack
= 0;
4683 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4686 if (htab
->params
->stack_analysis
)
4687 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4688 (bfd_vma
) sum_stack_param
.overall_stack
);
4692 /* Perform a final link. */
4695 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4697 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4699 if (htab
->params
->auto_overlay
)
4700 spu_elf_auto_overlay (info
);
4702 if ((htab
->params
->stack_analysis
4703 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4704 && htab
->params
->lrlive_analysis
))
4705 && !spu_elf_stack_analysis (info
))
4706 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4708 if (!spu_elf_build_stubs (info
))
4709 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4711 return bfd_elf_final_link (output_bfd
, info
);
4714 /* Called when not normally emitting relocs, ie. !info->relocatable
4715 and !info->emitrelocations. Returns a count of special relocs
4716 that need to be emitted. */
4719 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4721 Elf_Internal_Rela
*relocs
;
4722 unsigned int count
= 0;
4724 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4728 Elf_Internal_Rela
*rel
;
4729 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4731 for (rel
= relocs
; rel
< relend
; rel
++)
4733 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4734 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4738 if (elf_section_data (sec
)->relocs
!= relocs
)
4745 /* Functions for adding fixup records to .fixup */
4747 #define FIXUP_RECORD_SIZE 4
4749 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4750 bfd_put_32 (output_bfd, addr, \
4751 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4752 #define FIXUP_GET(output_bfd,htab,index) \
4753 bfd_get_32 (output_bfd, \
4754 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4756 /* Store OFFSET in .fixup. This assumes it will be called with an
4757 increasing OFFSET. When this OFFSET fits with the last base offset,
4758 it just sets a bit, otherwise it adds a new fixup record. */
4760 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4763 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4764 asection
*sfixup
= htab
->sfixup
;
4765 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4766 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4767 if (sfixup
->reloc_count
== 0)
4769 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4770 sfixup
->reloc_count
++;
4774 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4775 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4777 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4778 (*_bfd_error_handler
) (_("fatal error while creating .fixup"));
4779 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4780 sfixup
->reloc_count
++;
4783 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4787 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4790 spu_elf_relocate_section (bfd
*output_bfd
,
4791 struct bfd_link_info
*info
,
4793 asection
*input_section
,
4795 Elf_Internal_Rela
*relocs
,
4796 Elf_Internal_Sym
*local_syms
,
4797 asection
**local_sections
)
4799 Elf_Internal_Shdr
*symtab_hdr
;
4800 struct elf_link_hash_entry
**sym_hashes
;
4801 Elf_Internal_Rela
*rel
, *relend
;
4802 struct spu_link_hash_table
*htab
;
4805 bfd_boolean emit_these_relocs
= FALSE
;
4806 bfd_boolean is_ea_sym
;
4808 unsigned int iovl
= 0;
4810 htab
= spu_hash_table (info
);
4811 stubs
= (htab
->stub_sec
!= NULL
4812 && maybe_needs_stubs (input_section
));
4813 iovl
= overlay_index (input_section
);
4814 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4815 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4816 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4819 relend
= relocs
+ input_section
->reloc_count
;
4820 for (; rel
< relend
; rel
++)
4823 reloc_howto_type
*howto
;
4824 unsigned int r_symndx
;
4825 Elf_Internal_Sym
*sym
;
4827 struct elf_link_hash_entry
*h
;
4828 const char *sym_name
;
4831 bfd_reloc_status_type r
;
4832 bfd_boolean unresolved_reloc
;
4833 enum _stub_type stub_type
;
4835 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4836 r_type
= ELF32_R_TYPE (rel
->r_info
);
4837 howto
= elf_howto_table
+ r_type
;
4838 unresolved_reloc
= FALSE
;
4842 if (r_symndx
< symtab_hdr
->sh_info
)
4844 sym
= local_syms
+ r_symndx
;
4845 sec
= local_sections
[r_symndx
];
4846 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4847 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4851 if (sym_hashes
== NULL
)
4854 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4856 while (h
->root
.type
== bfd_link_hash_indirect
4857 || h
->root
.type
== bfd_link_hash_warning
)
4858 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4861 if (h
->root
.type
== bfd_link_hash_defined
4862 || h
->root
.type
== bfd_link_hash_defweak
)
4864 sec
= h
->root
.u
.def
.section
;
4866 || sec
->output_section
== NULL
)
4867 /* Set a flag that will be cleared later if we find a
4868 relocation value for this symbol. output_section
4869 is typically NULL for symbols satisfied by a shared
4871 unresolved_reloc
= TRUE
;
4873 relocation
= (h
->root
.u
.def
.value
4874 + sec
->output_section
->vma
4875 + sec
->output_offset
);
4877 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4879 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4880 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4882 else if (!info
->relocatable
4883 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4886 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4887 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4888 if (!info
->callbacks
->undefined_symbol (info
,
4889 h
->root
.root
.string
,
4892 rel
->r_offset
, err
))
4895 sym_name
= h
->root
.root
.string
;
4898 if (sec
!= NULL
&& elf_discarded_section (sec
))
4899 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4900 rel
, relend
, howto
, contents
);
4902 if (info
->relocatable
)
4905 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4906 if (r_type
== R_SPU_ADD_PIC
4908 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4910 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4916 is_ea_sym
= (ea
!= NULL
4918 && sec
->output_section
== ea
);
4920 /* If this symbol is in an overlay area, we may need to relocate
4921 to the overlay stub. */
4922 addend
= rel
->r_addend
;
4925 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4926 contents
, info
)) != no_stub
)
4928 unsigned int ovl
= 0;
4929 struct got_entry
*g
, **head
;
4931 if (stub_type
!= nonovl_stub
)
4935 head
= &h
->got
.glist
;
4937 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4939 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4940 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4942 && g
->br_addr
== (rel
->r_offset
4943 + input_section
->output_offset
4944 + input_section
->output_section
->vma
))
4945 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4950 relocation
= g
->stub_addr
;
4955 /* For soft icache, encode the overlay index into addresses. */
4956 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4957 && (r_type
== R_SPU_ADDR16_HI
4958 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4961 unsigned int ovl
= overlay_index (sec
);
4964 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4965 relocation
+= set_id
<< 18;
4970 if (htab
->params
->emit_fixups
&& !info
->relocatable
4971 && (input_section
->flags
& SEC_ALLOC
) != 0
4972 && r_type
== R_SPU_ADDR32
)
4975 offset
= rel
->r_offset
+ input_section
->output_section
->vma
4976 + input_section
->output_offset
;
4977 spu_elf_emit_fixup (output_bfd
, info
, offset
);
4980 if (unresolved_reloc
)
4982 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4986 /* ._ea is a special section that isn't allocated in SPU
4987 memory, but rather occupies space in PPU memory as
4988 part of an embedded ELF image. If this reloc is
4989 against a symbol defined in ._ea, then transform the
4990 reloc into an equivalent one without a symbol
4991 relative to the start of the ELF image. */
4992 rel
->r_addend
+= (relocation
4994 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4995 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4997 emit_these_relocs
= TRUE
;
5001 unresolved_reloc
= TRUE
;
5003 if (unresolved_reloc
5004 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5005 rel
->r_offset
) != (bfd_vma
) -1)
5007 (*_bfd_error_handler
)
5008 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5010 bfd_get_section_name (input_bfd
, input_section
),
5011 (long) rel
->r_offset
,
5017 r
= _bfd_final_link_relocate (howto
,
5021 rel
->r_offset
, relocation
, addend
);
5023 if (r
!= bfd_reloc_ok
)
5025 const char *msg
= (const char *) 0;
5029 case bfd_reloc_overflow
:
5030 if (!((*info
->callbacks
->reloc_overflow
)
5031 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5032 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
5036 case bfd_reloc_undefined
:
5037 if (!((*info
->callbacks
->undefined_symbol
)
5038 (info
, sym_name
, input_bfd
, input_section
,
5039 rel
->r_offset
, TRUE
)))
5043 case bfd_reloc_outofrange
:
5044 msg
= _("internal error: out of range error");
5047 case bfd_reloc_notsupported
:
5048 msg
= _("internal error: unsupported relocation error");
5051 case bfd_reloc_dangerous
:
5052 msg
= _("internal error: dangerous error");
5056 msg
= _("internal error: unknown error");
5061 if (!((*info
->callbacks
->warning
)
5062 (info
, msg
, sym_name
, input_bfd
, input_section
,
5071 && emit_these_relocs
5072 && !info
->emitrelocations
)
5074 Elf_Internal_Rela
*wrel
;
5075 Elf_Internal_Shdr
*rel_hdr
;
5077 wrel
= rel
= relocs
;
5078 relend
= relocs
+ input_section
->reloc_count
;
5079 for (; rel
< relend
; rel
++)
5083 r_type
= ELF32_R_TYPE (rel
->r_info
);
5084 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5087 input_section
->reloc_count
= wrel
- relocs
;
5088 /* Backflips for _bfd_elf_link_output_relocs. */
5089 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5090 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5098 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5099 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5104 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5107 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5108 const char *sym_name ATTRIBUTE_UNUSED
,
5109 Elf_Internal_Sym
*sym
,
5110 asection
*sym_sec ATTRIBUTE_UNUSED
,
5111 struct elf_link_hash_entry
*h
)
5113 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5115 if (!info
->relocatable
5116 && htab
->stub_sec
!= NULL
5118 && (h
->root
.type
== bfd_link_hash_defined
5119 || h
->root
.type
== bfd_link_hash_defweak
)
5121 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5123 struct got_entry
*g
;
5125 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5126 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5127 ? g
->br_addr
== g
->stub_addr
5128 : g
->addend
== 0 && g
->ovl
== 0)
5130 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5131 (htab
->stub_sec
[0]->output_section
->owner
,
5132 htab
->stub_sec
[0]->output_section
));
5133 sym
->st_value
= g
->stub_addr
;
5141 static int spu_plugin
= 0;
5144 spu_elf_plugin (int val
)
5149 /* Set ELF header e_type for plugins. */
5152 spu_elf_post_process_headers (bfd
*abfd
,
5153 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5157 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5159 i_ehdrp
->e_type
= ET_DYN
;
5163 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5164 segments for overlays. */
5167 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5174 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5175 extra
= htab
->num_overlays
;
5181 sec
= bfd_get_section_by_name (abfd
, ".toe");
5182 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5188 /* Remove .toe section from other PT_LOAD segments and put it in
5189 a segment of its own. Put overlays in separate segments too. */
5192 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5195 struct elf_segment_map
*m
, *m_overlay
;
5196 struct elf_segment_map
**p
, **p_overlay
;
5202 toe
= bfd_get_section_by_name (abfd
, ".toe");
5203 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5204 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5205 for (i
= 0; i
< m
->count
; i
++)
5206 if ((s
= m
->sections
[i
]) == toe
5207 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5209 struct elf_segment_map
*m2
;
5212 if (i
+ 1 < m
->count
)
5214 amt
= sizeof (struct elf_segment_map
);
5215 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5216 m2
= bfd_zalloc (abfd
, amt
);
5219 m2
->count
= m
->count
- (i
+ 1);
5220 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5221 m2
->count
* sizeof (m
->sections
[0]));
5222 m2
->p_type
= PT_LOAD
;
5230 amt
= sizeof (struct elf_segment_map
);
5231 m2
= bfd_zalloc (abfd
, amt
);
5234 m2
->p_type
= PT_LOAD
;
5236 m2
->sections
[0] = s
;
5244 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5245 PT_LOAD segments. This can cause the .ovl.init section to be
5246 overwritten with the contents of some overlay segment. To work
5247 around this issue, we ensure that all PF_OVERLAY segments are
5248 sorted first amongst the program headers; this ensures that even
5249 with a broken loader, the .ovl.init section (which is not marked
5250 as PF_OVERLAY) will be placed into SPU local store on startup. */
5252 /* Move all overlay segments onto a separate list. */
5253 p
= &elf_tdata (abfd
)->segment_map
;
5254 p_overlay
= &m_overlay
;
5257 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5258 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5263 p_overlay
= &m
->next
;
5270 /* Re-insert overlay segments at the head of the segment map. */
5271 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5272 elf_tdata (abfd
)->segment_map
= m_overlay
;
5277 /* Tweak the section type of .note.spu_name. */
5280 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5281 Elf_Internal_Shdr
*hdr
,
5284 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5285 hdr
->sh_type
= SHT_NOTE
;
5289 /* Tweak phdrs before writing them out. */
5292 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5294 const struct elf_backend_data
*bed
;
5295 struct elf_obj_tdata
*tdata
;
5296 Elf_Internal_Phdr
*phdr
, *last
;
5297 struct spu_link_hash_table
*htab
;
5304 bed
= get_elf_backend_data (abfd
);
5305 tdata
= elf_tdata (abfd
);
5307 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5308 htab
= spu_hash_table (info
);
5309 if (htab
->num_overlays
!= 0)
5311 struct elf_segment_map
*m
;
5314 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5316 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5318 /* Mark this as an overlay header. */
5319 phdr
[i
].p_flags
|= PF_OVERLAY
;
5321 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5322 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5324 bfd_byte
*p
= htab
->ovtab
->contents
;
5325 unsigned int off
= o
* 16 + 8;
5327 /* Write file_off into _ovly_table. */
5328 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5331 /* Soft-icache has its file offset put in .ovl.init. */
5332 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5334 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5336 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5340 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5341 of 16. This should always be possible when using the standard
5342 linker scripts, but don't create overlapping segments if
5343 someone is playing games with linker scripts. */
5345 for (i
= count
; i
-- != 0; )
5346 if (phdr
[i
].p_type
== PT_LOAD
)
5350 adjust
= -phdr
[i
].p_filesz
& 15;
5353 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5356 adjust
= -phdr
[i
].p_memsz
& 15;
5359 && phdr
[i
].p_filesz
!= 0
5360 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5361 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5364 if (phdr
[i
].p_filesz
!= 0)
5368 if (i
== (unsigned int) -1)
5369 for (i
= count
; i
-- != 0; )
5370 if (phdr
[i
].p_type
== PT_LOAD
)
5374 adjust
= -phdr
[i
].p_filesz
& 15;
5375 phdr
[i
].p_filesz
+= adjust
;
5377 adjust
= -phdr
[i
].p_memsz
& 15;
5378 phdr
[i
].p_memsz
+= adjust
;
5385 spu_elf_size_sections (bfd
* output_bfd
, struct bfd_link_info
*info
)
5387 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5388 if (htab
->params
->emit_fixups
)
5390 asection
*sfixup
= htab
->sfixup
;
5391 int fixup_count
= 0;
5395 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
5399 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5402 /* Walk over each section attached to the input bfd. */
5403 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5405 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5408 /* If there aren't any relocs, then there's nothing more
5410 if ((isec
->flags
& SEC_ALLOC
) == 0
5411 || (isec
->flags
& SEC_RELOC
) == 0
5412 || isec
->reloc_count
== 0)
5415 /* Get the relocs. */
5417 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5419 if (internal_relocs
== NULL
)
5422 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5423 relocations. They are stored in a single word by
5424 saving the upper 28 bits of the address and setting the
5425 lower 4 bits to a bit mask of the words that have the
5426 relocation. BASE_END keeps track of the next quadword. */
5427 irela
= internal_relocs
;
5428 irelaend
= irela
+ isec
->reloc_count
;
5430 for (; irela
< irelaend
; irela
++)
5431 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5432 && irela
->r_offset
>= base_end
)
5434 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5440 /* We always have a NULL fixup as a sentinel */
5441 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5442 if (!bfd_set_section_size (output_bfd
, sfixup
, size
))
5444 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5445 if (sfixup
->contents
== NULL
)
5451 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5452 #define TARGET_BIG_NAME "elf32-spu"
5453 #define ELF_ARCH bfd_arch_spu
5454 #define ELF_TARGET_ID SPU_ELF_DATA
5455 #define ELF_MACHINE_CODE EM_SPU
5456 /* This matches the alignment need for DMA. */
5457 #define ELF_MAXPAGESIZE 0x80
5458 #define elf_backend_rela_normal 1
5459 #define elf_backend_can_gc_sections 1
5461 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5462 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5463 #define elf_info_to_howto spu_elf_info_to_howto
5464 #define elf_backend_count_relocs spu_elf_count_relocs
5465 #define elf_backend_relocate_section spu_elf_relocate_section
5466 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5467 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5468 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5469 #define elf_backend_object_p spu_elf_object_p
5470 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5471 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5473 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5474 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5475 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5476 #define elf_backend_post_process_headers spu_elf_post_process_headers
5477 #define elf_backend_fake_sections spu_elf_fake_sections
5478 #define elf_backend_special_sections spu_elf_special_sections
5479 #define bfd_elf32_bfd_final_link spu_elf_final_link
5481 #include "elf32-target.h"