1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2021 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
33 /* We use RELA style relocs. Don't define USE_REL. */
35 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
40 array, so it must be declared in the order of that type. */
42 static reloc_howto_type elf_howto_table
[] = {
43 HOWTO (R_SPU_NONE
, 0, 3, 0, FALSE
, 0, complain_overflow_dont
,
44 bfd_elf_generic_reloc
, "SPU_NONE",
45 FALSE
, 0, 0x00000000, FALSE
),
46 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR10",
48 FALSE
, 0, 0x00ffc000, FALSE
),
49 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
56 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
57 FALSE
, 0, 0x007fff80, FALSE
),
58 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
59 bfd_elf_generic_reloc
, "SPU_ADDR18",
60 FALSE
, 0, 0x01ffff80, FALSE
),
61 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
62 bfd_elf_generic_reloc
, "SPU_ADDR32",
63 FALSE
, 0, 0xffffffff, FALSE
),
64 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
65 bfd_elf_generic_reloc
, "SPU_REL16",
66 FALSE
, 0, 0x007fff80, TRUE
),
67 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
68 bfd_elf_generic_reloc
, "SPU_ADDR7",
69 FALSE
, 0, 0x001fc000, FALSE
),
70 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9",
72 FALSE
, 0, 0x0180007f, TRUE
),
73 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
74 spu_elf_rel9
, "SPU_REL9I",
75 FALSE
, 0, 0x0000c07f, TRUE
),
76 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR10I",
78 FALSE
, 0, 0x00ffc000, FALSE
),
79 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
80 bfd_elf_generic_reloc
, "SPU_ADDR16I",
81 FALSE
, 0, 0x007fff80, FALSE
),
82 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
83 bfd_elf_generic_reloc
, "SPU_REL32",
84 FALSE
, 0, 0xffffffff, TRUE
),
85 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
86 bfd_elf_generic_reloc
, "SPU_ADDR16X",
87 FALSE
, 0, 0x007fff80, FALSE
),
88 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU32",
90 FALSE
, 0, 0xffffffff, FALSE
),
91 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_PPU64",
94 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
95 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
96 FALSE
, 0, 0x00000000, FALSE
),
99 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
100 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
101 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
105 static enum elf_spu_reloc_type
106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
111 return (enum elf_spu_reloc_type
) -1;
114 case BFD_RELOC_SPU_IMM10W
:
116 case BFD_RELOC_SPU_IMM16W
:
118 case BFD_RELOC_SPU_LO16
:
119 return R_SPU_ADDR16_LO
;
120 case BFD_RELOC_SPU_HI16
:
121 return R_SPU_ADDR16_HI
;
122 case BFD_RELOC_SPU_IMM18
:
124 case BFD_RELOC_SPU_PCREL16
:
126 case BFD_RELOC_SPU_IMM7
:
128 case BFD_RELOC_SPU_IMM8
:
130 case BFD_RELOC_SPU_PCREL9a
:
132 case BFD_RELOC_SPU_PCREL9b
:
134 case BFD_RELOC_SPU_IMM10
:
135 return R_SPU_ADDR10I
;
136 case BFD_RELOC_SPU_IMM16
:
137 return R_SPU_ADDR16I
;
140 case BFD_RELOC_32_PCREL
:
142 case BFD_RELOC_SPU_PPU32
:
144 case BFD_RELOC_SPU_PPU64
:
146 case BFD_RELOC_SPU_ADD_PIC
:
147 return R_SPU_ADD_PIC
;
152 spu_elf_info_to_howto (bfd
*abfd
,
154 Elf_Internal_Rela
*dst
)
156 enum elf_spu_reloc_type r_type
;
158 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
159 /* PR 17512: file: 90c2a92e. */
160 if (r_type
>= R_SPU_max
)
162 /* xgettext:c-format */
163 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
165 bfd_set_error (bfd_error_bad_value
);
168 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
172 static reloc_howto_type
*
173 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
174 bfd_reloc_code_real_type code
)
176 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
178 if (r_type
== (enum elf_spu_reloc_type
) -1)
181 return elf_howto_table
+ r_type
;
184 static reloc_howto_type
*
185 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
190 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
191 if (elf_howto_table
[i
].name
!= NULL
192 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
193 return &elf_howto_table
[i
];
198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
200 static bfd_reloc_status_type
201 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
202 void *data
, asection
*input_section
,
203 bfd
*output_bfd
, char **error_message
)
205 bfd_size_type octets
;
209 /* If this is a relocatable link (output_bfd test tells us), just
210 call the generic function. Any adjustment will be done at final
212 if (output_bfd
!= NULL
)
213 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
214 input_section
, output_bfd
, error_message
);
216 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
217 return bfd_reloc_outofrange
;
218 octets
= reloc_entry
->address
* OCTETS_PER_BYTE (abfd
, input_section
);
220 /* Get symbol value. */
222 if (!bfd_is_com_section (symbol
->section
))
224 if (symbol
->section
->output_section
)
225 val
+= symbol
->section
->output_section
->vma
;
227 val
+= reloc_entry
->addend
;
229 /* Make it pc-relative. */
230 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
233 if (val
+ 256 >= 512)
234 return bfd_reloc_overflow
;
236 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
238 /* Move two high bits of value to REL9I and REL9 position.
239 The mask will take care of selecting the right field. */
240 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
241 insn
&= ~reloc_entry
->howto
->dst_mask
;
242 insn
|= val
& reloc_entry
->howto
->dst_mask
;
243 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
248 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
250 if (!sec
->used_by_bfd
)
252 struct _spu_elf_section_data
*sdata
;
254 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
257 sec
->used_by_bfd
= sdata
;
260 return _bfd_elf_new_section_hook (abfd
, sec
);
263 /* Set up overlay info for executables. */
266 spu_elf_object_p (bfd
*abfd
)
268 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
270 unsigned int i
, num_ovl
, num_buf
;
271 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
272 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
273 Elf_Internal_Phdr
*last_phdr
= NULL
;
275 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
276 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
281 if (last_phdr
== NULL
282 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
285 for (j
= 1; j
< elf_numsections (abfd
); j
++)
287 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
289 if (ELF_SECTION_SIZE (shdr
, phdr
) != 0
290 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
292 asection
*sec
= shdr
->bfd_section
;
293 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
294 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
302 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
303 strip --strip-unneeded will not remove them. */
306 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
308 if (sym
->name
!= NULL
309 && sym
->section
!= bfd_abs_section_ptr
310 && strncmp (sym
->name
, "_EAR_", 5) == 0)
311 sym
->flags
|= BSF_KEEP
;
314 /* SPU ELF linker hash table. */
316 struct spu_link_hash_table
318 struct elf_link_hash_table elf
;
320 struct spu_elf_params
*params
;
322 /* Shortcuts to overlay sections. */
328 /* Count of stubs in each overlay section. */
329 unsigned int *stub_count
;
331 /* The stub section for each overlay section. */
334 struct elf_link_hash_entry
*ovly_entry
[2];
336 /* Number of overlay buffers. */
337 unsigned int num_buf
;
339 /* Total number of overlays. */
340 unsigned int num_overlays
;
342 /* For soft icache. */
343 unsigned int line_size_log2
;
344 unsigned int num_lines_log2
;
345 unsigned int fromelem_size_log2
;
347 /* How much memory we have. */
348 unsigned int local_store
;
350 /* Count of overlay stubs needed in non-overlay area. */
351 unsigned int non_ovly_stub
;
353 /* Pointer to the fixup section */
357 unsigned int stub_err
: 1;
360 /* Hijack the generic got fields for overlay stub accounting. */
364 struct got_entry
*next
;
373 #define spu_hash_table(p) \
374 ((is_elf_hash_table ((p)->hash) \
375 && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA) \
376 ? (struct spu_link_hash_table *) (p)->hash : NULL)
380 struct function_info
*fun
;
381 struct call_info
*next
;
383 unsigned int max_depth
;
384 unsigned int is_tail
: 1;
385 unsigned int is_pasted
: 1;
386 unsigned int broken_cycle
: 1;
387 unsigned int priority
: 13;
392 /* List of functions called. Also branches to hot/cold part of
394 struct call_info
*call_list
;
395 /* For hot/cold part of function, point to owner. */
396 struct function_info
*start
;
397 /* Symbol at start of function. */
399 Elf_Internal_Sym
*sym
;
400 struct elf_link_hash_entry
*h
;
402 /* Function section. */
405 /* Where last called from, and number of sections called from. */
406 asection
*last_caller
;
407 unsigned int call_count
;
408 /* Address range of (this part of) function. */
410 /* Offset where we found a store of lr, or -1 if none found. */
412 /* Offset where we found the stack adjustment insn. */
416 /* Distance from root of call tree. Tail and hot/cold branches
417 count as one deeper. We aren't counting stack frames here. */
419 /* Set if global symbol. */
420 unsigned int global
: 1;
421 /* Set if known to be start of function (as distinct from a hunk
422 in hot/cold section. */
423 unsigned int is_func
: 1;
424 /* Set if not a root node. */
425 unsigned int non_root
: 1;
426 /* Flags used during call tree traversal. It's cheaper to replicate
427 the visit flags than have one which needs clearing after a traversal. */
428 unsigned int visit1
: 1;
429 unsigned int visit2
: 1;
430 unsigned int marking
: 1;
431 unsigned int visit3
: 1;
432 unsigned int visit4
: 1;
433 unsigned int visit5
: 1;
434 unsigned int visit6
: 1;
435 unsigned int visit7
: 1;
438 struct spu_elf_stack_info
442 /* Variable size array describing functions, one per contiguous
443 address range belonging to a function. */
444 struct function_info fun
[1];
447 static struct function_info
*find_function (asection
*, bfd_vma
,
448 struct bfd_link_info
*);
450 /* Create a spu ELF linker hash table. */
452 static struct bfd_link_hash_table
*
453 spu_elf_link_hash_table_create (bfd
*abfd
)
455 struct spu_link_hash_table
*htab
;
457 htab
= bfd_zmalloc (sizeof (*htab
));
461 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
462 _bfd_elf_link_hash_newfunc
,
463 sizeof (struct elf_link_hash_entry
),
470 htab
->elf
.init_got_refcount
.refcount
= 0;
471 htab
->elf
.init_got_refcount
.glist
= NULL
;
472 htab
->elf
.init_got_offset
.offset
= 0;
473 htab
->elf
.init_got_offset
.glist
= NULL
;
474 return &htab
->elf
.root
;
478 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
480 bfd_vma max_branch_log2
;
482 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
483 htab
->params
= params
;
484 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
485 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
487 /* For the software i-cache, we provide a "from" list whose size
488 is a power-of-two number of quadwords, big enough to hold one
489 byte per outgoing branch. Compute this number here. */
490 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
491 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
494 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
495 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
496 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
499 get_sym_h (struct elf_link_hash_entry
**hp
,
500 Elf_Internal_Sym
**symp
,
502 Elf_Internal_Sym
**locsymsp
,
503 unsigned long r_symndx
,
506 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
508 if (r_symndx
>= symtab_hdr
->sh_info
)
510 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
511 struct elf_link_hash_entry
*h
;
513 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
514 while (h
->root
.type
== bfd_link_hash_indirect
515 || h
->root
.type
== bfd_link_hash_warning
)
516 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
526 asection
*symsec
= NULL
;
527 if (h
->root
.type
== bfd_link_hash_defined
528 || h
->root
.type
== bfd_link_hash_defweak
)
529 symsec
= h
->root
.u
.def
.section
;
535 Elf_Internal_Sym
*sym
;
536 Elf_Internal_Sym
*locsyms
= *locsymsp
;
540 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
542 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
544 0, NULL
, NULL
, NULL
);
549 sym
= locsyms
+ r_symndx
;
558 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
564 /* Create the note section if not already present. This is done early so
565 that the linker maps the sections to the right place in the output. */
568 spu_elf_create_sections (struct bfd_link_info
*info
)
570 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
573 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
574 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
579 /* Make SPU_PTNOTE_SPUNAME section. */
586 ibfd
= info
->input_bfds
;
587 /* This should really be SEC_LINKER_CREATED, but then we'd need
588 to write out the section ourselves. */
589 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
590 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
592 || !bfd_set_section_alignment (s
, 4))
594 /* Because we didn't set SEC_LINKER_CREATED we need to set the
595 proper section type. */
596 elf_section_type (s
) = SHT_NOTE
;
598 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
599 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
600 size
+= (name_len
+ 3) & -4;
602 if (!bfd_set_section_size (s
, size
))
605 data
= bfd_zalloc (ibfd
, size
);
609 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
610 bfd_put_32 (ibfd
, name_len
, data
+ 4);
611 bfd_put_32 (ibfd
, 1, data
+ 8);
612 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
613 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
614 bfd_get_filename (info
->output_bfd
), name_len
);
618 if (htab
->params
->emit_fixups
)
623 if (htab
->elf
.dynobj
== NULL
)
624 htab
->elf
.dynobj
= ibfd
;
625 ibfd
= htab
->elf
.dynobj
;
626 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
627 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
628 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
629 if (s
== NULL
|| !bfd_set_section_alignment (s
, 2))
637 /* qsort predicate to sort sections by vma. */
640 sort_sections (const void *a
, const void *b
)
642 const asection
*const *s1
= a
;
643 const asection
*const *s2
= b
;
644 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
647 return delta
< 0 ? -1 : 1;
649 return (*s1
)->index
- (*s2
)->index
;
652 /* Identify overlays in the output bfd, and number them.
653 Returns 0 on error, 1 if no overlays, 2 if overlays. */
656 spu_elf_find_overlays (struct bfd_link_info
*info
)
658 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
659 asection
**alloc_sec
;
660 unsigned int i
, n
, ovl_index
, num_buf
;
663 static const char *const entry_names
[2][2] = {
664 { "__ovly_load", "__icache_br_handler" },
665 { "__ovly_return", "__icache_call_handler" }
668 if (info
->output_bfd
->section_count
< 2)
672 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
673 if (alloc_sec
== NULL
)
676 /* Pick out all the alloced sections. */
677 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
678 if ((s
->flags
& SEC_ALLOC
) != 0
679 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
689 /* Sort them by vma. */
690 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
692 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
693 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
695 unsigned int prev_buf
= 0, set_id
= 0;
697 /* Look for an overlapping vma to find the first overlay section. */
698 bfd_vma vma_start
= 0;
700 for (i
= 1; i
< n
; i
++)
703 if (s
->vma
< ovl_end
)
705 asection
*s0
= alloc_sec
[i
- 1];
709 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
714 ovl_end
= s
->vma
+ s
->size
;
717 /* Now find any sections within the cache area. */
718 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
721 if (s
->vma
>= ovl_end
)
724 /* A section in an overlay area called .ovl.init is not
725 an overlay, in the sense that it might be loaded in
726 by the overlay manager, but rather the initial
727 section contents for the overlay buffer. */
728 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
730 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
731 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
734 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
736 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
737 "does not start on a cache line\n"),
739 bfd_set_error (bfd_error_bad_value
);
742 else if (s
->size
> htab
->params
->line_size
)
744 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
745 "is larger than a cache line\n"),
747 bfd_set_error (bfd_error_bad_value
);
751 alloc_sec
[ovl_index
++] = s
;
752 spu_elf_section_data (s
)->u
.o
.ovl_index
753 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
754 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
758 /* Ensure there are no more overlay sections. */
762 if (s
->vma
< ovl_end
)
764 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
765 "is not in cache area\n"),
767 bfd_set_error (bfd_error_bad_value
);
771 ovl_end
= s
->vma
+ s
->size
;
776 /* Look for overlapping vmas. Any with overlap must be overlays.
777 Count them. Also count the number of overlay regions. */
778 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
781 if (s
->vma
< ovl_end
)
783 asection
*s0
= alloc_sec
[i
- 1];
785 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
788 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
790 alloc_sec
[ovl_index
] = s0
;
791 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
792 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
795 ovl_end
= s
->vma
+ s
->size
;
797 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
799 alloc_sec
[ovl_index
] = s
;
800 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
801 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
802 if (s0
->vma
!= s
->vma
)
804 /* xgettext:c-format */
805 info
->callbacks
->einfo (_("%X%P: overlay sections %pA "
806 "and %pA do not start at the "
809 bfd_set_error (bfd_error_bad_value
);
812 if (ovl_end
< s
->vma
+ s
->size
)
813 ovl_end
= s
->vma
+ s
->size
;
817 ovl_end
= s
->vma
+ s
->size
;
821 htab
->num_overlays
= ovl_index
;
822 htab
->num_buf
= num_buf
;
823 htab
->ovl_sec
= alloc_sec
;
828 for (i
= 0; i
< 2; i
++)
831 struct elf_link_hash_entry
*h
;
833 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
834 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
838 if (h
->root
.type
== bfd_link_hash_new
)
840 h
->root
.type
= bfd_link_hash_undefined
;
842 h
->ref_regular_nonweak
= 1;
845 htab
->ovly_entry
[i
] = h
;
851 /* Non-zero to use bra in overlay stubs rather than br. */
854 #define BRA 0x30000000
855 #define BRASL 0x31000000
856 #define BR 0x32000000
857 #define BRSL 0x33000000
858 #define NOP 0x40200000
859 #define LNOP 0x00200000
860 #define ILA 0x42000000
862 /* Return true for all relative and absolute branch instructions.
870 brhnz 00100011 0.. */
873 is_branch (const unsigned char *insn
)
875 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
878 /* Return true for all indirect branch instructions.
886 bihnz 00100101 011 */
889 is_indirect_branch (const unsigned char *insn
)
891 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
894 /* Return true for branch hint instructions.
899 is_hint (const unsigned char *insn
)
901 return (insn
[0] & 0xfc) == 0x10;
904 /* True if INPUT_SECTION might need overlay stubs. */
907 maybe_needs_stubs (asection
*input_section
)
909 /* No stubs for debug sections and suchlike. */
910 if ((input_section
->flags
& SEC_ALLOC
) == 0)
913 /* No stubs for link-once sections that will be discarded. */
914 if (input_section
->output_section
== bfd_abs_section_ptr
)
917 /* Don't create stubs for .eh_frame references. */
918 if (strcmp (input_section
->name
, ".eh_frame") == 0)
940 /* Return non-zero if this reloc symbol should go via an overlay stub.
941 Return 2 if the stub must be in non-overlay area. */
943 static enum _stub_type
944 needs_ovl_stub (struct elf_link_hash_entry
*h
,
945 Elf_Internal_Sym
*sym
,
947 asection
*input_section
,
948 Elf_Internal_Rela
*irela
,
950 struct bfd_link_info
*info
)
952 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
953 enum elf_spu_reloc_type r_type
;
954 unsigned int sym_type
;
955 bfd_boolean branch
, hint
, call
;
956 enum _stub_type ret
= no_stub
;
960 || sym_sec
->output_section
== bfd_abs_section_ptr
961 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
966 /* Ensure no stubs for user supplied overlay manager syms. */
967 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
970 /* setjmp always goes via an overlay stub, because then the return
971 and hence the longjmp goes via __ovly_return. That magically
972 makes setjmp/longjmp between overlays work. */
973 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
974 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
981 sym_type
= ELF_ST_TYPE (sym
->st_info
);
983 r_type
= ELF32_R_TYPE (irela
->r_info
);
987 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
989 if (contents
== NULL
)
992 if (!bfd_get_section_contents (input_section
->owner
,
999 contents
+= irela
->r_offset
;
1001 branch
= is_branch (contents
);
1002 hint
= is_hint (contents
);
1005 call
= (contents
[0] & 0xfd) == 0x31;
1007 && sym_type
!= STT_FUNC
1008 && contents
!= insn
)
1010 /* It's common for people to write assembly and forget
1011 to give function symbols the right type. Handle
1012 calls to such symbols, but warn so that (hopefully)
1013 people will fix their code. We need the symbol
1014 type to be correct to distinguish function pointer
1015 initialisation from other pointer initialisations. */
1016 const char *sym_name
;
1019 sym_name
= h
->root
.root
.string
;
1022 Elf_Internal_Shdr
*symtab_hdr
;
1023 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1024 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1030 /* xgettext:c-format */
1031 (_("warning: call to non-function symbol %s defined in %pB"),
1032 sym_name
, sym_sec
->owner
);
1038 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1039 || (sym_type
!= STT_FUNC
1040 && !(branch
|| hint
)
1041 && (sym_sec
->flags
& SEC_CODE
) == 0))
1044 /* Usually, symbols in non-overlay sections don't need stubs. */
1045 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1046 && !htab
->params
->non_overlay_stubs
)
1049 /* A reference from some other section to a symbol in an overlay
1050 section needs a stub. */
1051 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1052 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1054 unsigned int lrlive
= 0;
1056 lrlive
= (contents
[1] & 0x70) >> 4;
1058 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1059 ret
= call_ovl_stub
;
1061 ret
= br000_ovl_stub
+ lrlive
;
1064 /* If this insn isn't a branch then we are possibly taking the
1065 address of a function and passing it out somehow. Soft-icache code
1066 always generates inline code to do indirect branches. */
1067 if (!(branch
|| hint
)
1068 && sym_type
== STT_FUNC
1069 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1076 count_stub (struct spu_link_hash_table
*htab
,
1079 enum _stub_type stub_type
,
1080 struct elf_link_hash_entry
*h
,
1081 const Elf_Internal_Rela
*irela
)
1083 unsigned int ovl
= 0;
1084 struct got_entry
*g
, **head
;
1087 /* If this instruction is a branch or call, we need a stub
1088 for it. One stub per function per overlay.
1089 If it isn't a branch, then we are taking the address of
1090 this function so need a stub in the non-overlay area
1091 for it. One stub per function. */
1092 if (stub_type
!= nonovl_stub
)
1093 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1096 head
= &h
->got
.glist
;
1099 if (elf_local_got_ents (ibfd
) == NULL
)
1101 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1102 * sizeof (*elf_local_got_ents (ibfd
)));
1103 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1104 if (elf_local_got_ents (ibfd
) == NULL
)
1107 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1110 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1112 htab
->stub_count
[ovl
] += 1;
1118 addend
= irela
->r_addend
;
1122 struct got_entry
*gnext
;
1124 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1125 if (g
->addend
== addend
&& g
->ovl
== 0)
1130 /* Need a new non-overlay area stub. Zap other stubs. */
1131 for (g
= *head
; g
!= NULL
; g
= gnext
)
1134 if (g
->addend
== addend
)
1136 htab
->stub_count
[g
->ovl
] -= 1;
1144 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1145 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1151 g
= bfd_malloc (sizeof *g
);
1156 g
->stub_addr
= (bfd_vma
) -1;
1160 htab
->stub_count
[ovl
] += 1;
1166 /* Support two sizes of overlay stubs, a slower more compact stub of two
1167 instructions, and a faster stub of four instructions.
1168 Soft-icache stubs are four or eight words. */
1171 ovl_stub_size (struct spu_elf_params
*params
)
1173 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1177 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1179 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1182 /* Two instruction overlay stubs look like:
1184 brsl $75,__ovly_load
1185 .word target_ovl_and_address
1187 ovl_and_address is a word with the overlay number in the top 14 bits
1188 and local store address in the bottom 18 bits.
1190 Four instruction overlay stubs look like:
1194 ila $79,target_address
1197 Software icache stubs are:
1201 .word lrlive_branchlocalstoreaddr;
1202 brasl $75,__icache_br_handler
1207 build_stub (struct bfd_link_info
*info
,
1210 enum _stub_type stub_type
,
1211 struct elf_link_hash_entry
*h
,
1212 const Elf_Internal_Rela
*irela
,
1216 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1217 unsigned int ovl
, dest_ovl
, set_id
;
1218 struct got_entry
*g
, **head
;
1220 bfd_vma addend
, from
, to
, br_dest
, patt
;
1221 unsigned int lrlive
;
1224 if (stub_type
!= nonovl_stub
)
1225 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1228 head
= &h
->got
.glist
;
1230 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1234 addend
= irela
->r_addend
;
1236 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1238 g
= bfd_malloc (sizeof *g
);
1244 g
->br_addr
= (irela
->r_offset
1245 + isec
->output_offset
1246 + isec
->output_section
->vma
);
1252 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1253 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1258 if (g
->ovl
== 0 && ovl
!= 0)
1261 if (g
->stub_addr
!= (bfd_vma
) -1)
1265 sec
= htab
->stub_sec
[ovl
];
1266 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1267 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1268 g
->stub_addr
= from
;
1269 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1270 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1271 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1273 if (((dest
| to
| from
) & 3) != 0)
1278 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1280 if (htab
->params
->ovly_flavour
== ovly_normal
1281 && !htab
->params
->compact_stub
)
1283 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1284 sec
->contents
+ sec
->size
);
1285 bfd_put_32 (sec
->owner
, LNOP
,
1286 sec
->contents
+ sec
->size
+ 4);
1287 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1288 sec
->contents
+ sec
->size
+ 8);
1290 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1291 sec
->contents
+ sec
->size
+ 12);
1293 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1294 sec
->contents
+ sec
->size
+ 12);
1296 else if (htab
->params
->ovly_flavour
== ovly_normal
1297 && htab
->params
->compact_stub
)
1300 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1301 sec
->contents
+ sec
->size
);
1303 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1304 sec
->contents
+ sec
->size
);
1305 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1306 sec
->contents
+ sec
->size
+ 4);
1308 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1309 && htab
->params
->compact_stub
)
1312 if (stub_type
== nonovl_stub
)
1314 else if (stub_type
== call_ovl_stub
)
1315 /* A brsl makes lr live and *(*sp+16) is live.
1316 Tail calls have the same liveness. */
1318 else if (!htab
->params
->lrlive_analysis
)
1319 /* Assume stack frame and lr save. */
1321 else if (irela
!= NULL
)
1323 /* Analyse branch instructions. */
1324 struct function_info
*caller
;
1327 caller
= find_function (isec
, irela
->r_offset
, info
);
1328 if (caller
->start
== NULL
)
1329 off
= irela
->r_offset
;
1332 struct function_info
*found
= NULL
;
1334 /* Find the earliest piece of this function that
1335 has frame adjusting instructions. We might
1336 see dynamic frame adjustment (eg. for alloca)
1337 in some later piece, but functions using
1338 alloca always set up a frame earlier. Frame
1339 setup instructions are always in one piece. */
1340 if (caller
->lr_store
!= (bfd_vma
) -1
1341 || caller
->sp_adjust
!= (bfd_vma
) -1)
1343 while (caller
->start
!= NULL
)
1345 caller
= caller
->start
;
1346 if (caller
->lr_store
!= (bfd_vma
) -1
1347 || caller
->sp_adjust
!= (bfd_vma
) -1)
1355 if (off
> caller
->sp_adjust
)
1357 if (off
> caller
->lr_store
)
1358 /* Only *(*sp+16) is live. */
1361 /* If no lr save, then we must be in a
1362 leaf function with a frame.
1363 lr is still live. */
1366 else if (off
> caller
->lr_store
)
1368 /* Between lr save and stack adjust. */
1370 /* This should never happen since prologues won't
1375 /* On entry to function. */
1378 if (stub_type
!= br000_ovl_stub
1379 && lrlive
!= stub_type
- br000_ovl_stub
)
1380 /* xgettext:c-format */
1381 info
->callbacks
->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1382 "from analysis (%u)\n"),
1383 isec
, irela
->r_offset
, lrlive
,
1384 stub_type
- br000_ovl_stub
);
1387 /* If given lrlive info via .brinfo, use it. */
1388 if (stub_type
> br000_ovl_stub
)
1389 lrlive
= stub_type
- br000_ovl_stub
;
1392 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1393 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1394 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1396 /* The branch that uses this stub goes to stub_addr + 4. We'll
1397 set up an xor pattern that can be used by the icache manager
1398 to modify this branch to go directly to its destination. */
1400 br_dest
= g
->stub_addr
;
1403 /* Except in the case of _SPUEAR_ stubs, the branch in
1404 question is the one in the stub itself. */
1405 BFD_ASSERT (stub_type
== nonovl_stub
);
1406 g
->br_addr
= g
->stub_addr
;
1410 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1411 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1412 sec
->contents
+ sec
->size
);
1413 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1414 sec
->contents
+ sec
->size
+ 4);
1415 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1416 sec
->contents
+ sec
->size
+ 8);
1417 patt
= dest
^ br_dest
;
1418 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1419 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1420 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1421 sec
->contents
+ sec
->size
+ 12);
1424 /* Extra space for linked list entries. */
1430 sec
->size
+= ovl_stub_size (htab
->params
);
1432 if (htab
->params
->emit_stub_syms
)
1438 len
= 8 + sizeof (".ovl_call.") - 1;
1440 len
+= strlen (h
->root
.root
.string
);
1445 add
= (int) irela
->r_addend
& 0xffffffff;
1448 name
= bfd_malloc (len
+ 1);
1452 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1454 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1456 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1457 dest_sec
->id
& 0xffffffff,
1458 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1460 sprintf (name
+ len
- 9, "+%x", add
);
1462 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1466 if (h
->root
.type
== bfd_link_hash_new
)
1468 h
->root
.type
= bfd_link_hash_defined
;
1469 h
->root
.u
.def
.section
= sec
;
1470 h
->size
= ovl_stub_size (htab
->params
);
1471 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1475 h
->ref_regular_nonweak
= 1;
1476 h
->forced_local
= 1;
1484 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1488 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1490 /* Symbols starting with _SPUEAR_ need a stub because they may be
1491 invoked by the PPU. */
1492 struct bfd_link_info
*info
= inf
;
1493 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1496 if ((h
->root
.type
== bfd_link_hash_defined
1497 || h
->root
.type
== bfd_link_hash_defweak
)
1499 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1500 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1501 && sym_sec
->output_section
!= bfd_abs_section_ptr
1502 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1503 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1504 || htab
->params
->non_overlay_stubs
))
1506 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1513 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1515 /* Symbols starting with _SPUEAR_ need a stub because they may be
1516 invoked by the PPU. */
1517 struct bfd_link_info
*info
= inf
;
1518 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1521 if ((h
->root
.type
== bfd_link_hash_defined
1522 || h
->root
.type
== bfd_link_hash_defweak
)
1524 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1525 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1526 && sym_sec
->output_section
!= bfd_abs_section_ptr
1527 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1528 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1529 || htab
->params
->non_overlay_stubs
))
1531 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1532 h
->root
.u
.def
.value
, sym_sec
);
1538 /* Size or build stubs. */
1541 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1543 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1546 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
1548 extern const bfd_target spu_elf32_vec
;
1549 Elf_Internal_Shdr
*symtab_hdr
;
1551 Elf_Internal_Sym
*local_syms
= NULL
;
1553 if (ibfd
->xvec
!= &spu_elf32_vec
)
1556 /* We'll need the symbol table in a second. */
1557 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1558 if (symtab_hdr
->sh_info
== 0)
1561 /* Walk over each section attached to the input bfd. */
1562 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1564 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1566 /* If there aren't any relocs, then there's nothing more to do. */
1567 if ((isec
->flags
& SEC_RELOC
) == 0
1568 || isec
->reloc_count
== 0)
1571 if (!maybe_needs_stubs (isec
))
1574 /* Get the relocs. */
1575 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1577 if (internal_relocs
== NULL
)
1578 goto error_ret_free_local
;
1580 /* Now examine each relocation. */
1581 irela
= internal_relocs
;
1582 irelaend
= irela
+ isec
->reloc_count
;
1583 for (; irela
< irelaend
; irela
++)
1585 enum elf_spu_reloc_type r_type
;
1586 unsigned int r_indx
;
1588 Elf_Internal_Sym
*sym
;
1589 struct elf_link_hash_entry
*h
;
1590 enum _stub_type stub_type
;
1592 r_type
= ELF32_R_TYPE (irela
->r_info
);
1593 r_indx
= ELF32_R_SYM (irela
->r_info
);
1595 if (r_type
>= R_SPU_max
)
1597 bfd_set_error (bfd_error_bad_value
);
1598 error_ret_free_internal
:
1599 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1600 free (internal_relocs
);
1601 error_ret_free_local
:
1602 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1607 /* Determine the reloc target section. */
1608 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1609 goto error_ret_free_internal
;
1611 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1613 if (stub_type
== no_stub
)
1615 else if (stub_type
== stub_error
)
1616 goto error_ret_free_internal
;
1618 if (htab
->stub_count
== NULL
)
1621 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1622 htab
->stub_count
= bfd_zmalloc (amt
);
1623 if (htab
->stub_count
== NULL
)
1624 goto error_ret_free_internal
;
1629 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1630 goto error_ret_free_internal
;
1637 dest
= h
->root
.u
.def
.value
;
1639 dest
= sym
->st_value
;
1640 dest
+= irela
->r_addend
;
1641 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1643 goto error_ret_free_internal
;
1647 /* We're done with the internal relocs, free them. */
1648 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1649 free (internal_relocs
);
1652 if (local_syms
!= NULL
1653 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1655 if (!info
->keep_memory
)
1658 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1665 /* Allocate space for overlay call and return stubs.
1666 Return 0 on error, 1 if no overlays, 2 otherwise. */
1669 spu_elf_size_stubs (struct bfd_link_info
*info
)
1671 struct spu_link_hash_table
*htab
;
1678 if (!process_stubs (info
, FALSE
))
1681 htab
= spu_hash_table (info
);
1682 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1686 ibfd
= info
->input_bfds
;
1687 if (htab
->stub_count
!= NULL
)
1689 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1690 htab
->stub_sec
= bfd_zmalloc (amt
);
1691 if (htab
->stub_sec
== NULL
)
1694 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1695 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1696 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1697 htab
->stub_sec
[0] = stub
;
1699 || !bfd_set_section_alignment (stub
,
1700 ovl_stub_size_log2 (htab
->params
)))
1702 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1703 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1704 /* Extra space for linked list entries. */
1705 stub
->size
+= htab
->stub_count
[0] * 16;
1707 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1709 asection
*osec
= htab
->ovl_sec
[i
];
1710 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1711 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1712 htab
->stub_sec
[ovl
] = stub
;
1714 || !bfd_set_section_alignment (stub
,
1715 ovl_stub_size_log2 (htab
->params
)))
1717 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1721 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1723 /* Space for icache manager tables.
1724 a) Tag array, one quadword per cache line.
1725 b) Rewrite "to" list, one quadword per cache line.
1726 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1727 a power-of-two number of full quadwords) per cache line. */
1730 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1731 if (htab
->ovtab
== NULL
1732 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1735 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1736 << htab
->num_lines_log2
;
1738 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1739 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1740 if (htab
->init
== NULL
1741 || !bfd_set_section_alignment (htab
->init
, 4))
1744 htab
->init
->size
= 16;
1746 else if (htab
->stub_count
== NULL
)
1750 /* htab->ovtab consists of two arrays.
1760 . } _ovly_buf_table[];
1763 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1764 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1765 if (htab
->ovtab
== NULL
1766 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1769 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1772 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1773 if (htab
->toe
== NULL
1774 || !bfd_set_section_alignment (htab
->toe
, 4))
1776 htab
->toe
->size
= 16;
1781 /* Called from ld to place overlay manager data sections. This is done
1782 after the overlay manager itself is loaded, mainly so that the
1783 linker's htab->init section is placed after any other .ovl.init
1787 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1789 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1792 if (htab
->stub_sec
!= NULL
)
1794 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1796 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1798 asection
*osec
= htab
->ovl_sec
[i
];
1799 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1800 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1804 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1805 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1807 if (htab
->ovtab
!= NULL
)
1809 const char *ovout
= ".data";
1810 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1812 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1815 if (htab
->toe
!= NULL
)
1816 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1819 /* Functions to handle embedded spu_ovl.o object. */
1822 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1828 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1834 struct _ovl_stream
*os
;
1838 os
= (struct _ovl_stream
*) stream
;
1839 max
= (const char *) os
->end
- (const char *) os
->start
;
1841 if ((ufile_ptr
) offset
>= max
)
1845 if (count
> max
- offset
)
1846 count
= max
- offset
;
1848 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1853 ovl_mgr_stat (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1857 struct _ovl_stream
*os
= (struct _ovl_stream
*) stream
;
1859 memset (sb
, 0, sizeof (*sb
));
1860 sb
->st_size
= (const char *) os
->end
- (const char *) os
->start
;
1865 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1867 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1874 return *ovl_bfd
!= NULL
;
1878 overlay_index (asection
*sec
)
1881 || sec
->output_section
== bfd_abs_section_ptr
)
1883 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1886 /* Define an STT_OBJECT symbol. */
1888 static struct elf_link_hash_entry
*
1889 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1891 struct elf_link_hash_entry
*h
;
1893 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1897 if (h
->root
.type
!= bfd_link_hash_defined
1900 h
->root
.type
= bfd_link_hash_defined
;
1901 h
->root
.u
.def
.section
= htab
->ovtab
;
1902 h
->type
= STT_OBJECT
;
1905 h
->ref_regular_nonweak
= 1;
1908 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1910 /* xgettext:c-format */
1911 _bfd_error_handler (_("%pB is not allowed to define %s"),
1912 h
->root
.u
.def
.section
->owner
,
1913 h
->root
.root
.string
);
1914 bfd_set_error (bfd_error_bad_value
);
1919 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1920 h
->root
.root
.string
);
1921 bfd_set_error (bfd_error_bad_value
);
1928 /* Fill in all stubs and the overlay tables. */
1931 spu_elf_build_stubs (struct bfd_link_info
*info
)
1933 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1934 struct elf_link_hash_entry
*h
;
1940 if (htab
->num_overlays
!= 0)
1942 for (i
= 0; i
< 2; i
++)
1944 h
= htab
->ovly_entry
[i
];
1946 && (h
->root
.type
== bfd_link_hash_defined
1947 || h
->root
.type
== bfd_link_hash_defweak
)
1950 s
= h
->root
.u
.def
.section
->output_section
;
1951 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1953 _bfd_error_handler (_("%s in overlay section"),
1954 h
->root
.root
.string
);
1955 bfd_set_error (bfd_error_bad_value
);
1962 if (htab
->stub_sec
!= NULL
)
1964 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1965 if (htab
->stub_sec
[i
]->size
!= 0)
1967 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1968 htab
->stub_sec
[i
]->size
);
1969 if (htab
->stub_sec
[i
]->contents
== NULL
)
1971 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1972 htab
->stub_sec
[i
]->size
= 0;
1975 /* Fill in all the stubs. */
1976 process_stubs (info
, TRUE
);
1977 if (!htab
->stub_err
)
1978 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1982 _bfd_error_handler (_("overlay stub relocation overflow"));
1983 bfd_set_error (bfd_error_bad_value
);
1987 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1989 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1991 _bfd_error_handler (_("stubs don't match calculated size"));
1992 bfd_set_error (bfd_error_bad_value
);
1995 htab
->stub_sec
[i
]->rawsize
= 0;
1999 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
2002 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
2003 if (htab
->ovtab
->contents
== NULL
)
2006 p
= htab
->ovtab
->contents
;
2007 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
2011 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
2014 h
->root
.u
.def
.value
= 0;
2015 h
->size
= 16 << htab
->num_lines_log2
;
2018 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
2021 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2022 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2024 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
2027 h
->root
.u
.def
.value
= off
;
2028 h
->size
= 16 << htab
->num_lines_log2
;
2031 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2034 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2035 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2037 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2040 h
->root
.u
.def
.value
= off
;
2041 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2044 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2047 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2048 + htab
->num_lines_log2
);
2049 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2051 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2054 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2055 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2057 h
= define_ovtab_symbol (htab
, "__icache_base");
2060 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2061 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2062 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2064 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2067 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2068 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2070 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2073 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2074 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2076 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2079 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2080 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2082 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2085 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2086 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2088 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2091 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2092 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2094 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2097 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2098 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2100 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2102 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2104 if (htab
->init
->contents
== NULL
)
2107 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2110 h
->root
.u
.def
.value
= 0;
2111 h
->root
.u
.def
.section
= htab
->init
;
2117 /* Write out _ovly_table. */
2118 /* set low bit of .size to mark non-overlay area as present. */
2120 obfd
= htab
->ovtab
->output_section
->owner
;
2121 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2123 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2127 unsigned long off
= ovl_index
* 16;
2128 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2130 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2131 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2133 /* file_off written later in spu_elf_modify_headers. */
2134 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2138 h
= define_ovtab_symbol (htab
, "_ovly_table");
2141 h
->root
.u
.def
.value
= 16;
2142 h
->size
= htab
->num_overlays
* 16;
2144 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2147 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2150 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2153 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2154 h
->size
= htab
->num_buf
* 4;
2156 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2159 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2163 h
= define_ovtab_symbol (htab
, "_EAR_");
2166 h
->root
.u
.def
.section
= htab
->toe
;
2167 h
->root
.u
.def
.value
= 0;
2173 /* Check that all loadable section VMAs lie in the range
2174 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2177 spu_elf_check_vma (struct bfd_link_info
*info
)
2179 struct elf_segment_map
*m
;
2181 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2182 bfd
*abfd
= info
->output_bfd
;
2183 bfd_vma hi
= htab
->params
->local_store_hi
;
2184 bfd_vma lo
= htab
->params
->local_store_lo
;
2186 htab
->local_store
= hi
+ 1 - lo
;
2188 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
2189 if (m
->p_type
== PT_LOAD
)
2190 for (i
= 0; i
< m
->count
; i
++)
2191 if (m
->sections
[i
]->size
!= 0
2192 && (m
->sections
[i
]->vma
< lo
2193 || m
->sections
[i
]->vma
> hi
2194 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2195 return m
->sections
[i
];
2200 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2201 Search for stack adjusting insns, and return the sp delta.
2202 If a store of lr is found save the instruction offset to *LR_STORE.
2203 If a stack adjusting instruction is found, save that offset to
2207 find_function_stack_adjust (asection
*sec
,
2214 memset (reg
, 0, sizeof (reg
));
2215 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2217 unsigned char buf
[4];
2221 /* Assume no relocs on stack adjusing insns. */
2222 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2226 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2228 if (buf
[0] == 0x24 /* stqd */)
2230 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2235 /* Partly decoded immediate field. */
2236 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2238 if (buf
[0] == 0x1c /* ai */)
2241 imm
= (imm
^ 0x200) - 0x200;
2242 reg
[rt
] = reg
[ra
] + imm
;
2244 if (rt
== 1 /* sp */)
2248 *sp_adjust
= offset
;
2252 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2254 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2256 reg
[rt
] = reg
[ra
] + reg
[rb
];
2261 *sp_adjust
= offset
;
2265 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2267 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2269 reg
[rt
] = reg
[rb
] - reg
[ra
];
2274 *sp_adjust
= offset
;
2278 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2280 if (buf
[0] >= 0x42 /* ila */)
2281 imm
|= (buf
[0] & 1) << 17;
2286 if (buf
[0] == 0x40 /* il */)
2288 if ((buf
[1] & 0x80) == 0)
2290 imm
= (imm
^ 0x8000) - 0x8000;
2292 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2298 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2300 reg
[rt
] |= imm
& 0xffff;
2303 else if (buf
[0] == 0x04 /* ori */)
2306 imm
= (imm
^ 0x200) - 0x200;
2307 reg
[rt
] = reg
[ra
] | imm
;
2310 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2312 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2313 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2314 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2315 | ((imm
& 0x1000) ? 0x000000ff : 0));
2318 else if (buf
[0] == 0x16 /* andbi */)
2324 reg
[rt
] = reg
[ra
] & imm
;
2327 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2329 /* Used in pic reg load. Say rt is trashed. Won't be used
2330 in stack adjust, but we need to continue past this branch. */
2334 else if (is_branch (buf
) || is_indirect_branch (buf
))
2335 /* If we hit a branch then we must be out of the prologue. */
2342 /* qsort predicate to sort symbols by section and value. */
2344 static Elf_Internal_Sym
*sort_syms_syms
;
2345 static asection
**sort_syms_psecs
;
2348 sort_syms (const void *a
, const void *b
)
2350 Elf_Internal_Sym
*const *s1
= a
;
2351 Elf_Internal_Sym
*const *s2
= b
;
2352 asection
*sec1
,*sec2
;
2353 bfd_signed_vma delta
;
2355 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2356 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2359 return sec1
->index
- sec2
->index
;
2361 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2363 return delta
< 0 ? -1 : 1;
2365 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2367 return delta
< 0 ? -1 : 1;
2369 return *s1
< *s2
? -1 : 1;
2372 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2373 entries for section SEC. */
2375 static struct spu_elf_stack_info
*
2376 alloc_stack_info (asection
*sec
, int max_fun
)
2378 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2381 amt
= sizeof (struct spu_elf_stack_info
);
2382 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2383 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2384 if (sec_data
->u
.i
.stack_info
!= NULL
)
2385 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2386 return sec_data
->u
.i
.stack_info
;
2389 /* Add a new struct function_info describing a (part of a) function
2390 starting at SYM_H. Keep the array sorted by address. */
2392 static struct function_info
*
2393 maybe_insert_function (asection
*sec
,
2396 bfd_boolean is_func
)
2398 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2399 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2405 sinfo
= alloc_stack_info (sec
, 20);
2412 Elf_Internal_Sym
*sym
= sym_h
;
2413 off
= sym
->st_value
;
2414 size
= sym
->st_size
;
2418 struct elf_link_hash_entry
*h
= sym_h
;
2419 off
= h
->root
.u
.def
.value
;
2423 for (i
= sinfo
->num_fun
; --i
>= 0; )
2424 if (sinfo
->fun
[i
].lo
<= off
)
2429 /* Don't add another entry for an alias, but do update some
2431 if (sinfo
->fun
[i
].lo
== off
)
2433 /* Prefer globals over local syms. */
2434 if (global
&& !sinfo
->fun
[i
].global
)
2436 sinfo
->fun
[i
].global
= TRUE
;
2437 sinfo
->fun
[i
].u
.h
= sym_h
;
2440 sinfo
->fun
[i
].is_func
= TRUE
;
2441 return &sinfo
->fun
[i
];
2443 /* Ignore a zero-size symbol inside an existing function. */
2444 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2445 return &sinfo
->fun
[i
];
2448 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2450 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2451 bfd_size_type old
= amt
;
2453 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2454 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2455 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2456 sinfo
= bfd_realloc (sinfo
, amt
);
2459 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2460 sec_data
->u
.i
.stack_info
= sinfo
;
2463 if (++i
< sinfo
->num_fun
)
2464 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2465 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2466 sinfo
->fun
[i
].is_func
= is_func
;
2467 sinfo
->fun
[i
].global
= global
;
2468 sinfo
->fun
[i
].sec
= sec
;
2470 sinfo
->fun
[i
].u
.h
= sym_h
;
2472 sinfo
->fun
[i
].u
.sym
= sym_h
;
2473 sinfo
->fun
[i
].lo
= off
;
2474 sinfo
->fun
[i
].hi
= off
+ size
;
2475 sinfo
->fun
[i
].lr_store
= -1;
2476 sinfo
->fun
[i
].sp_adjust
= -1;
2477 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2478 &sinfo
->fun
[i
].lr_store
,
2479 &sinfo
->fun
[i
].sp_adjust
);
2480 sinfo
->num_fun
+= 1;
2481 return &sinfo
->fun
[i
];
2484 /* Return the name of FUN. */
2487 func_name (struct function_info
*fun
)
2491 Elf_Internal_Shdr
*symtab_hdr
;
2493 while (fun
->start
!= NULL
)
2497 return fun
->u
.h
->root
.root
.string
;
2500 if (fun
->u
.sym
->st_name
== 0)
2502 size_t len
= strlen (sec
->name
);
2503 char *name
= bfd_malloc (len
+ 10);
2506 sprintf (name
, "%s+%lx", sec
->name
,
2507 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2511 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2512 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2515 /* Read the instruction at OFF in SEC. Return true iff the instruction
2516 is a nop, lnop, or stop 0 (all zero insn). */
2519 is_nop (asection
*sec
, bfd_vma off
)
2521 unsigned char insn
[4];
2523 if (off
+ 4 > sec
->size
2524 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2526 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2528 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2533 /* Extend the range of FUN to cover nop padding up to LIMIT.
2534 Return TRUE iff some instruction other than a NOP was found. */
2537 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2539 bfd_vma off
= (fun
->hi
+ 3) & -4;
2541 while (off
< limit
&& is_nop (fun
->sec
, off
))
2552 /* Check and fix overlapping function ranges. Return TRUE iff there
2553 are gaps in the current info we have about functions in SEC. */
2556 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2558 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2559 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2561 bfd_boolean gaps
= FALSE
;
2566 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2567 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2569 /* Fix overlapping symbols. */
2570 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2571 const char *f2
= func_name (&sinfo
->fun
[i
]);
2573 /* xgettext:c-format */
2574 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2575 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2577 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2580 if (sinfo
->num_fun
== 0)
2584 if (sinfo
->fun
[0].lo
!= 0)
2586 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2588 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2590 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2591 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2593 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2599 /* Search current function info for a function that contains address
2600 OFFSET in section SEC. */
2602 static struct function_info
*
2603 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2605 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2606 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2610 hi
= sinfo
->num_fun
;
2613 mid
= (lo
+ hi
) / 2;
2614 if (offset
< sinfo
->fun
[mid
].lo
)
2616 else if (offset
>= sinfo
->fun
[mid
].hi
)
2619 return &sinfo
->fun
[mid
];
2621 /* xgettext:c-format */
2622 info
->callbacks
->einfo (_("%pA:0x%v not found in function table\n"),
2624 bfd_set_error (bfd_error_bad_value
);
2628 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2629 if CALLEE was new. If this function return FALSE, CALLEE should
2633 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2635 struct call_info
**pp
, *p
;
2637 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2638 if (p
->fun
== callee
->fun
)
2640 /* Tail calls use less stack than normal calls. Retain entry
2641 for normal call over one for tail call. */
2642 p
->is_tail
&= callee
->is_tail
;
2645 p
->fun
->start
= NULL
;
2646 p
->fun
->is_func
= TRUE
;
2648 p
->count
+= callee
->count
;
2649 /* Reorder list so most recent call is first. */
2651 p
->next
= caller
->call_list
;
2652 caller
->call_list
= p
;
2655 callee
->next
= caller
->call_list
;
2656 caller
->call_list
= callee
;
2660 /* Copy CALL and insert the copy into CALLER. */
2663 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2665 struct call_info
*callee
;
2666 callee
= bfd_malloc (sizeof (*callee
));
2670 if (!insert_callee (caller
, callee
))
2675 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2676 overlay stub sections. */
2679 interesting_section (asection
*s
)
2681 return (s
->output_section
!= bfd_abs_section_ptr
2682 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2683 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2687 /* Rummage through the relocs for SEC, looking for function calls.
2688 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2689 mark destination symbols on calls as being functions. Also
2690 look at branches, which may be tail calls or go to hot/cold
2691 section part of same function. */
2694 mark_functions_via_relocs (asection
*sec
,
2695 struct bfd_link_info
*info
,
2698 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2699 Elf_Internal_Shdr
*symtab_hdr
;
2701 unsigned int priority
= 0;
2702 static bfd_boolean warned
;
2704 if (!interesting_section (sec
)
2705 || sec
->reloc_count
== 0)
2708 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2710 if (internal_relocs
== NULL
)
2713 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2714 psyms
= &symtab_hdr
->contents
;
2715 irela
= internal_relocs
;
2716 irelaend
= irela
+ sec
->reloc_count
;
2717 for (; irela
< irelaend
; irela
++)
2719 enum elf_spu_reloc_type r_type
;
2720 unsigned int r_indx
;
2722 Elf_Internal_Sym
*sym
;
2723 struct elf_link_hash_entry
*h
;
2725 bfd_boolean nonbranch
, is_call
;
2726 struct function_info
*caller
;
2727 struct call_info
*callee
;
2729 r_type
= ELF32_R_TYPE (irela
->r_info
);
2730 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2732 r_indx
= ELF32_R_SYM (irela
->r_info
);
2733 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2737 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2743 unsigned char insn
[4];
2745 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2746 irela
->r_offset
, 4))
2748 if (is_branch (insn
))
2750 is_call
= (insn
[0] & 0xfd) == 0x31;
2751 priority
= insn
[1] & 0x0f;
2753 priority
|= insn
[2];
2755 priority
|= insn
[3];
2757 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2758 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2761 info
->callbacks
->einfo
2762 /* xgettext:c-format */
2763 (_("%pB(%pA+0x%v): call to non-code section"
2764 " %pB(%pA), analysis incomplete\n"),
2765 sec
->owner
, sec
, irela
->r_offset
,
2766 sym_sec
->owner
, sym_sec
);
2781 /* For --auto-overlay, count possible stubs we need for
2782 function pointer references. */
2783 unsigned int sym_type
;
2787 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2788 if (sym_type
== STT_FUNC
)
2790 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2791 spu_hash_table (info
)->non_ovly_stub
+= 1;
2792 /* If the symbol type is STT_FUNC then this must be a
2793 function pointer initialisation. */
2796 /* Ignore data references. */
2797 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2798 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2800 /* Otherwise we probably have a jump table reloc for
2801 a switch statement or some other reference to a
2806 val
= h
->root
.u
.def
.value
;
2808 val
= sym
->st_value
;
2809 val
+= irela
->r_addend
;
2813 struct function_info
*fun
;
2815 if (irela
->r_addend
!= 0)
2817 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2820 fake
->st_value
= val
;
2822 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2826 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2828 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2831 if (irela
->r_addend
!= 0
2832 && fun
->u
.sym
!= sym
)
2837 caller
= find_function (sec
, irela
->r_offset
, info
);
2840 callee
= bfd_malloc (sizeof *callee
);
2844 callee
->fun
= find_function (sym_sec
, val
, info
);
2845 if (callee
->fun
== NULL
)
2847 callee
->is_tail
= !is_call
;
2848 callee
->is_pasted
= FALSE
;
2849 callee
->broken_cycle
= FALSE
;
2850 callee
->priority
= priority
;
2851 callee
->count
= nonbranch
? 0 : 1;
2852 if (callee
->fun
->last_caller
!= sec
)
2854 callee
->fun
->last_caller
= sec
;
2855 callee
->fun
->call_count
+= 1;
2857 if (!insert_callee (caller
, callee
))
2860 && !callee
->fun
->is_func
2861 && callee
->fun
->stack
== 0)
2863 /* This is either a tail call or a branch from one part of
2864 the function to another, ie. hot/cold section. If the
2865 destination has been called by some other function then
2866 it is a separate function. We also assume that functions
2867 are not split across input files. */
2868 if (sec
->owner
!= sym_sec
->owner
)
2870 callee
->fun
->start
= NULL
;
2871 callee
->fun
->is_func
= TRUE
;
2873 else if (callee
->fun
->start
== NULL
)
2875 struct function_info
*caller_start
= caller
;
2876 while (caller_start
->start
)
2877 caller_start
= caller_start
->start
;
2879 if (caller_start
!= callee
->fun
)
2880 callee
->fun
->start
= caller_start
;
2884 struct function_info
*callee_start
;
2885 struct function_info
*caller_start
;
2886 callee_start
= callee
->fun
;
2887 while (callee_start
->start
)
2888 callee_start
= callee_start
->start
;
2889 caller_start
= caller
;
2890 while (caller_start
->start
)
2891 caller_start
= caller_start
->start
;
2892 if (caller_start
!= callee_start
)
2894 callee
->fun
->start
= NULL
;
2895 callee
->fun
->is_func
= TRUE
;
2904 /* Handle something like .init or .fini, which has a piece of a function.
2905 These sections are pasted together to form a single function. */
2908 pasted_function (asection
*sec
)
2910 struct bfd_link_order
*l
;
2911 struct _spu_elf_section_data
*sec_data
;
2912 struct spu_elf_stack_info
*sinfo
;
2913 Elf_Internal_Sym
*fake
;
2914 struct function_info
*fun
, *fun_start
;
2916 fake
= bfd_zmalloc (sizeof (*fake
));
2920 fake
->st_size
= sec
->size
;
2922 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2923 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2927 /* Find a function immediately preceding this section. */
2929 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2931 if (l
->u
.indirect
.section
== sec
)
2933 if (fun_start
!= NULL
)
2935 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2939 fun
->start
= fun_start
;
2941 callee
->is_tail
= TRUE
;
2942 callee
->is_pasted
= TRUE
;
2943 callee
->broken_cycle
= FALSE
;
2944 callee
->priority
= 0;
2946 if (!insert_callee (fun_start
, callee
))
2952 if (l
->type
== bfd_indirect_link_order
2953 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2954 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2955 && sinfo
->num_fun
!= 0)
2956 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2959 /* Don't return an error if we did not find a function preceding this
2960 section. The section may have incorrect flags. */
2964 /* Map address ranges in code sections to functions. */
2967 discover_functions (struct bfd_link_info
*info
)
2971 Elf_Internal_Sym
***psym_arr
;
2972 asection
***sec_arr
;
2973 bfd_boolean gaps
= FALSE
;
2976 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
2979 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2980 if (psym_arr
== NULL
)
2982 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2983 if (sec_arr
== NULL
)
2986 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2988 ibfd
= ibfd
->link
.next
, bfd_idx
++)
2990 extern const bfd_target spu_elf32_vec
;
2991 Elf_Internal_Shdr
*symtab_hdr
;
2994 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2995 asection
**psecs
, **p
;
2997 if (ibfd
->xvec
!= &spu_elf32_vec
)
3000 /* Read all the symbols. */
3001 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3002 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
3006 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3007 if (interesting_section (sec
))
3015 /* Don't use cached symbols since the generic ELF linker
3016 code only reads local symbols, and we need globals too. */
3017 free (symtab_hdr
->contents
);
3018 symtab_hdr
->contents
= NULL
;
3019 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
3021 symtab_hdr
->contents
= (void *) syms
;
3025 /* Select defined function symbols that are going to be output. */
3026 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
3029 psym_arr
[bfd_idx
] = psyms
;
3030 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3033 sec_arr
[bfd_idx
] = psecs
;
3034 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3035 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3036 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3040 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3041 if (s
!= NULL
&& interesting_section (s
))
3044 symcount
= psy
- psyms
;
3047 /* Sort them by section and offset within section. */
3048 sort_syms_syms
= syms
;
3049 sort_syms_psecs
= psecs
;
3050 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3052 /* Now inspect the function symbols. */
3053 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3055 asection
*s
= psecs
[*psy
- syms
];
3056 Elf_Internal_Sym
**psy2
;
3058 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3059 if (psecs
[*psy2
- syms
] != s
)
3062 if (!alloc_stack_info (s
, psy2
- psy
))
3067 /* First install info about properly typed and sized functions.
3068 In an ideal world this will cover all code sections, except
3069 when partitioning functions into hot and cold sections,
3070 and the horrible pasted together .init and .fini functions. */
3071 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3074 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3076 asection
*s
= psecs
[sy
- syms
];
3077 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3082 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3083 if (interesting_section (sec
))
3084 gaps
|= check_function_ranges (sec
, info
);
3089 /* See if we can discover more function symbols by looking at
3091 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3093 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3097 if (psym_arr
[bfd_idx
] == NULL
)
3100 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3101 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3105 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3107 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3109 Elf_Internal_Shdr
*symtab_hdr
;
3111 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3114 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3117 psecs
= sec_arr
[bfd_idx
];
3119 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3120 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3123 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3124 if (interesting_section (sec
))
3125 gaps
|= check_function_ranges (sec
, info
);
3129 /* Finally, install all globals. */
3130 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3134 s
= psecs
[sy
- syms
];
3136 /* Global syms might be improperly typed functions. */
3137 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3138 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3140 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3146 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3148 extern const bfd_target spu_elf32_vec
;
3151 if (ibfd
->xvec
!= &spu_elf32_vec
)
3154 /* Some of the symbols we've installed as marking the
3155 beginning of functions may have a size of zero. Extend
3156 the range of such functions to the beginning of the
3157 next symbol of interest. */
3158 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3159 if (interesting_section (sec
))
3161 struct _spu_elf_section_data
*sec_data
;
3162 struct spu_elf_stack_info
*sinfo
;
3164 sec_data
= spu_elf_section_data (sec
);
3165 sinfo
= sec_data
->u
.i
.stack_info
;
3166 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3169 bfd_vma hi
= sec
->size
;
3171 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3173 sinfo
->fun
[fun_idx
].hi
= hi
;
3174 hi
= sinfo
->fun
[fun_idx
].lo
;
3177 sinfo
->fun
[0].lo
= 0;
3179 /* No symbols in this section. Must be .init or .fini
3180 or something similar. */
3181 else if (!pasted_function (sec
))
3187 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3189 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3191 if (psym_arr
[bfd_idx
] == NULL
)
3194 free (psym_arr
[bfd_idx
]);
3195 free (sec_arr
[bfd_idx
]);
3204 /* Iterate over all function_info we have collected, calling DOIT on
3205 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3209 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3210 struct bfd_link_info
*,
3212 struct bfd_link_info
*info
,
3218 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3220 extern const bfd_target spu_elf32_vec
;
3223 if (ibfd
->xvec
!= &spu_elf32_vec
)
3226 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3228 struct _spu_elf_section_data
*sec_data
;
3229 struct spu_elf_stack_info
*sinfo
;
3231 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3232 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3235 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3236 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3237 if (!doit (&sinfo
->fun
[i
], info
, param
))
3245 /* Transfer call info attached to struct function_info entries for
3246 all of a given function's sections to the first entry. */
3249 transfer_calls (struct function_info
*fun
,
3250 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3251 void *param ATTRIBUTE_UNUSED
)
3253 struct function_info
*start
= fun
->start
;
3257 struct call_info
*call
, *call_next
;
3259 while (start
->start
!= NULL
)
3260 start
= start
->start
;
3261 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3263 call_next
= call
->next
;
3264 if (!insert_callee (start
, call
))
3267 fun
->call_list
= NULL
;
3272 /* Mark nodes in the call graph that are called by some other node. */
3275 mark_non_root (struct function_info
*fun
,
3276 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3277 void *param ATTRIBUTE_UNUSED
)
3279 struct call_info
*call
;
3284 for (call
= fun
->call_list
; call
; call
= call
->next
)
3286 call
->fun
->non_root
= TRUE
;
3287 mark_non_root (call
->fun
, 0, 0);
3292 /* Remove cycles from the call graph. Set depth of nodes. */
3295 remove_cycles (struct function_info
*fun
,
3296 struct bfd_link_info
*info
,
3299 struct call_info
**callp
, *call
;
3300 unsigned int depth
= *(unsigned int *) param
;
3301 unsigned int max_depth
= depth
;
3305 fun
->marking
= TRUE
;
3307 callp
= &fun
->call_list
;
3308 while ((call
= *callp
) != NULL
)
3310 call
->max_depth
= depth
+ !call
->is_pasted
;
3311 if (!call
->fun
->visit2
)
3313 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3315 if (max_depth
< call
->max_depth
)
3316 max_depth
= call
->max_depth
;
3318 else if (call
->fun
->marking
)
3320 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3322 if (!htab
->params
->auto_overlay
3323 && htab
->params
->stack_analysis
)
3325 const char *f1
= func_name (fun
);
3326 const char *f2
= func_name (call
->fun
);
3328 /* xgettext:c-format */
3329 info
->callbacks
->info (_("stack analysis will ignore the call "
3334 call
->broken_cycle
= TRUE
;
3336 callp
= &call
->next
;
3338 fun
->marking
= FALSE
;
3339 *(unsigned int *) param
= max_depth
;
3343 /* Check that we actually visited all nodes in remove_cycles. If we
3344 didn't, then there is some cycle in the call graph not attached to
3345 any root node. Arbitrarily choose a node in the cycle as a new
3346 root and break the cycle. */
3349 mark_detached_root (struct function_info
*fun
,
3350 struct bfd_link_info
*info
,
3355 fun
->non_root
= FALSE
;
3356 *(unsigned int *) param
= 0;
3357 return remove_cycles (fun
, info
, param
);
3360 /* Populate call_list for each function. */
3363 build_call_tree (struct bfd_link_info
*info
)
3368 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3370 extern const bfd_target spu_elf32_vec
;
3373 if (ibfd
->xvec
!= &spu_elf32_vec
)
3376 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3377 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3381 /* Transfer call info from hot/cold section part of function
3383 if (!spu_hash_table (info
)->params
->auto_overlay
3384 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3387 /* Find the call graph root(s). */
3388 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3391 /* Remove cycles from the call graph. We start from the root node(s)
3392 so that we break cycles in a reasonable place. */
3394 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3397 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3400 /* qsort predicate to sort calls by priority, max_depth then count. */
3403 sort_calls (const void *a
, const void *b
)
3405 struct call_info
*const *c1
= a
;
3406 struct call_info
*const *c2
= b
;
3409 delta
= (*c2
)->priority
- (*c1
)->priority
;
3413 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3417 delta
= (*c2
)->count
- (*c1
)->count
;
3421 return (char *) c1
- (char *) c2
;
3425 unsigned int max_overlay_size
;
3428 /* Set linker_mark and gc_mark on any sections that we will put in
3429 overlays. These flags are used by the generic ELF linker, but we
3430 won't be continuing on to bfd_elf_final_link so it is OK to use
3431 them. linker_mark is clear before we get here. Set segment_mark
3432 on sections that are part of a pasted function (excluding the last
3435 Set up function rodata section if --overlay-rodata. We don't
3436 currently include merged string constant rodata sections since
3438 Sort the call graph so that the deepest nodes will be visited
3442 mark_overlay_section (struct function_info
*fun
,
3443 struct bfd_link_info
*info
,
3446 struct call_info
*call
;
3448 struct _mos_param
*mos_param
= param
;
3449 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3455 if (!fun
->sec
->linker_mark
3456 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3457 || htab
->params
->non_ia_text
3458 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3459 || strcmp (fun
->sec
->name
, ".init") == 0
3460 || strcmp (fun
->sec
->name
, ".fini") == 0))
3464 fun
->sec
->linker_mark
= 1;
3465 fun
->sec
->gc_mark
= 1;
3466 fun
->sec
->segment_mark
= 0;
3467 /* Ensure SEC_CODE is set on this text section (it ought to
3468 be!), and SEC_CODE is clear on rodata sections. We use
3469 this flag to differentiate the two overlay section types. */
3470 fun
->sec
->flags
|= SEC_CODE
;
3472 size
= fun
->sec
->size
;
3473 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3477 /* Find the rodata section corresponding to this function's
3479 if (strcmp (fun
->sec
->name
, ".text") == 0)
3481 name
= bfd_malloc (sizeof (".rodata"));
3484 memcpy (name
, ".rodata", sizeof (".rodata"));
3486 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3488 size_t len
= strlen (fun
->sec
->name
);
3489 name
= bfd_malloc (len
+ 3);
3492 memcpy (name
, ".rodata", sizeof (".rodata"));
3493 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3495 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3497 size_t len
= strlen (fun
->sec
->name
) + 1;
3498 name
= bfd_malloc (len
);
3501 memcpy (name
, fun
->sec
->name
, len
);
3507 asection
*rodata
= NULL
;
3508 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3509 if (group_sec
== NULL
)
3510 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3512 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3514 if (strcmp (group_sec
->name
, name
) == 0)
3519 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3521 fun
->rodata
= rodata
;
3524 size
+= fun
->rodata
->size
;
3525 if (htab
->params
->line_size
!= 0
3526 && size
> htab
->params
->line_size
)
3528 size
-= fun
->rodata
->size
;
3533 fun
->rodata
->linker_mark
= 1;
3534 fun
->rodata
->gc_mark
= 1;
3535 fun
->rodata
->flags
&= ~SEC_CODE
;
3541 if (mos_param
->max_overlay_size
< size
)
3542 mos_param
->max_overlay_size
= size
;
3545 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3550 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3554 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3555 calls
[count
++] = call
;
3557 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3559 fun
->call_list
= NULL
;
3563 calls
[count
]->next
= fun
->call_list
;
3564 fun
->call_list
= calls
[count
];
3569 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3571 if (call
->is_pasted
)
3573 /* There can only be one is_pasted call per function_info. */
3574 BFD_ASSERT (!fun
->sec
->segment_mark
);
3575 fun
->sec
->segment_mark
= 1;
3577 if (!call
->broken_cycle
3578 && !mark_overlay_section (call
->fun
, info
, param
))
3582 /* Don't put entry code into an overlay. The overlay manager needs
3583 a stack! Also, don't mark .ovl.init as an overlay. */
3584 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3585 == info
->output_bfd
->start_address
3586 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3588 fun
->sec
->linker_mark
= 0;
3589 if (fun
->rodata
!= NULL
)
3590 fun
->rodata
->linker_mark
= 0;
3595 /* If non-zero then unmark functions called from those within sections
3596 that we need to unmark. Unfortunately this isn't reliable since the
3597 call graph cannot know the destination of function pointer calls. */
3598 #define RECURSE_UNMARK 0
3601 asection
*exclude_input_section
;
3602 asection
*exclude_output_section
;
3603 unsigned long clearing
;
3606 /* Undo some of mark_overlay_section's work. */
3609 unmark_overlay_section (struct function_info
*fun
,
3610 struct bfd_link_info
*info
,
3613 struct call_info
*call
;
3614 struct _uos_param
*uos_param
= param
;
3615 unsigned int excluded
= 0;
3623 if (fun
->sec
== uos_param
->exclude_input_section
3624 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3628 uos_param
->clearing
+= excluded
;
3630 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3632 fun
->sec
->linker_mark
= 0;
3634 fun
->rodata
->linker_mark
= 0;
3637 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3638 if (!call
->broken_cycle
3639 && !unmark_overlay_section (call
->fun
, info
, param
))
3643 uos_param
->clearing
-= excluded
;
3648 unsigned int lib_size
;
3649 asection
**lib_sections
;
3652 /* Add sections we have marked as belonging to overlays to an array
3653 for consideration as non-overlay sections. The array consist of
3654 pairs of sections, (text,rodata), for functions in the call graph. */
3657 collect_lib_sections (struct function_info
*fun
,
3658 struct bfd_link_info
*info
,
3661 struct _cl_param
*lib_param
= param
;
3662 struct call_info
*call
;
3669 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3672 size
= fun
->sec
->size
;
3674 size
+= fun
->rodata
->size
;
3676 if (size
<= lib_param
->lib_size
)
3678 *lib_param
->lib_sections
++ = fun
->sec
;
3679 fun
->sec
->gc_mark
= 0;
3680 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3682 *lib_param
->lib_sections
++ = fun
->rodata
;
3683 fun
->rodata
->gc_mark
= 0;
3686 *lib_param
->lib_sections
++ = NULL
;
3689 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3690 if (!call
->broken_cycle
)
3691 collect_lib_sections (call
->fun
, info
, param
);
3696 /* qsort predicate to sort sections by call count. */
3699 sort_lib (const void *a
, const void *b
)
3701 asection
*const *s1
= a
;
3702 asection
*const *s2
= b
;
3703 struct _spu_elf_section_data
*sec_data
;
3704 struct spu_elf_stack_info
*sinfo
;
3708 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3709 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3712 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3713 delta
-= sinfo
->fun
[i
].call_count
;
3716 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3717 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3720 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3721 delta
+= sinfo
->fun
[i
].call_count
;
3730 /* Remove some sections from those marked to be in overlays. Choose
3731 those that are called from many places, likely library functions. */
3734 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3737 asection
**lib_sections
;
3738 unsigned int i
, lib_count
;
3739 struct _cl_param collect_lib_param
;
3740 struct function_info dummy_caller
;
3741 struct spu_link_hash_table
*htab
;
3743 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3745 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3747 extern const bfd_target spu_elf32_vec
;
3750 if (ibfd
->xvec
!= &spu_elf32_vec
)
3753 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3754 if (sec
->linker_mark
3755 && sec
->size
< lib_size
3756 && (sec
->flags
& SEC_CODE
) != 0)
3759 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3760 if (lib_sections
== NULL
)
3761 return (unsigned int) -1;
3762 collect_lib_param
.lib_size
= lib_size
;
3763 collect_lib_param
.lib_sections
= lib_sections
;
3764 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3766 return (unsigned int) -1;
3767 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3769 /* Sort sections so that those with the most calls are first. */
3771 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3773 htab
= spu_hash_table (info
);
3774 for (i
= 0; i
< lib_count
; i
++)
3776 unsigned int tmp
, stub_size
;
3778 struct _spu_elf_section_data
*sec_data
;
3779 struct spu_elf_stack_info
*sinfo
;
3781 sec
= lib_sections
[2 * i
];
3782 /* If this section is OK, its size must be less than lib_size. */
3784 /* If it has a rodata section, then add that too. */
3785 if (lib_sections
[2 * i
+ 1])
3786 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3787 /* Add any new overlay call stubs needed by the section. */
3790 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3791 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3794 struct call_info
*call
;
3796 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3797 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3798 if (call
->fun
->sec
->linker_mark
)
3800 struct call_info
*p
;
3801 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3802 if (p
->fun
== call
->fun
)
3805 stub_size
+= ovl_stub_size (htab
->params
);
3808 if (tmp
+ stub_size
< lib_size
)
3810 struct call_info
**pp
, *p
;
3812 /* This section fits. Mark it as non-overlay. */
3813 lib_sections
[2 * i
]->linker_mark
= 0;
3814 if (lib_sections
[2 * i
+ 1])
3815 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3816 lib_size
-= tmp
+ stub_size
;
3817 /* Call stubs to the section we just added are no longer
3819 pp
= &dummy_caller
.call_list
;
3820 while ((p
= *pp
) != NULL
)
3821 if (!p
->fun
->sec
->linker_mark
)
3823 lib_size
+= ovl_stub_size (htab
->params
);
3829 /* Add new call stubs to dummy_caller. */
3830 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3831 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3834 struct call_info
*call
;
3836 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3837 for (call
= sinfo
->fun
[k
].call_list
;
3840 if (call
->fun
->sec
->linker_mark
)
3842 struct call_info
*callee
;
3843 callee
= bfd_malloc (sizeof (*callee
));
3845 return (unsigned int) -1;
3847 if (!insert_callee (&dummy_caller
, callee
))
3853 while (dummy_caller
.call_list
!= NULL
)
3855 struct call_info
*call
= dummy_caller
.call_list
;
3856 dummy_caller
.call_list
= call
->next
;
3859 for (i
= 0; i
< 2 * lib_count
; i
++)
3860 if (lib_sections
[i
])
3861 lib_sections
[i
]->gc_mark
= 1;
3862 free (lib_sections
);
3866 /* Build an array of overlay sections. The deepest node's section is
3867 added first, then its parent node's section, then everything called
3868 from the parent section. The idea being to group sections to
3869 minimise calls between different overlays. */
3872 collect_overlays (struct function_info
*fun
,
3873 struct bfd_link_info
*info
,
3876 struct call_info
*call
;
3877 bfd_boolean added_fun
;
3878 asection
***ovly_sections
= param
;
3884 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3885 if (!call
->is_pasted
&& !call
->broken_cycle
)
3887 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3893 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3895 fun
->sec
->gc_mark
= 0;
3896 *(*ovly_sections
)++ = fun
->sec
;
3897 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3899 fun
->rodata
->gc_mark
= 0;
3900 *(*ovly_sections
)++ = fun
->rodata
;
3903 *(*ovly_sections
)++ = NULL
;
3906 /* Pasted sections must stay with the first section. We don't
3907 put pasted sections in the array, just the first section.
3908 Mark subsequent sections as already considered. */
3909 if (fun
->sec
->segment_mark
)
3911 struct function_info
*call_fun
= fun
;
3914 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3915 if (call
->is_pasted
)
3917 call_fun
= call
->fun
;
3918 call_fun
->sec
->gc_mark
= 0;
3919 if (call_fun
->rodata
)
3920 call_fun
->rodata
->gc_mark
= 0;
3926 while (call_fun
->sec
->segment_mark
);
3930 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3931 if (!call
->broken_cycle
3932 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3937 struct _spu_elf_section_data
*sec_data
;
3938 struct spu_elf_stack_info
*sinfo
;
3940 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3941 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3944 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3945 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3953 struct _sum_stack_param
{
3955 size_t overall_stack
;
3956 bfd_boolean emit_stack_syms
;
3959 /* Descend the call graph for FUN, accumulating total stack required. */
3962 sum_stack (struct function_info
*fun
,
3963 struct bfd_link_info
*info
,
3966 struct call_info
*call
;
3967 struct function_info
*max
;
3968 size_t stack
, cum_stack
;
3970 bfd_boolean has_call
;
3971 struct _sum_stack_param
*sum_stack_param
= param
;
3972 struct spu_link_hash_table
*htab
;
3974 cum_stack
= fun
->stack
;
3975 sum_stack_param
->cum_stack
= cum_stack
;
3981 for (call
= fun
->call_list
; call
; call
= call
->next
)
3983 if (call
->broken_cycle
)
3985 if (!call
->is_pasted
)
3987 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3989 stack
= sum_stack_param
->cum_stack
;
3990 /* Include caller stack for normal calls, don't do so for
3991 tail calls. fun->stack here is local stack usage for
3993 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3994 stack
+= fun
->stack
;
3995 if (cum_stack
< stack
)
4002 sum_stack_param
->cum_stack
= cum_stack
;
4004 /* Now fun->stack holds cumulative stack. */
4005 fun
->stack
= cum_stack
;
4009 && sum_stack_param
->overall_stack
< cum_stack
)
4010 sum_stack_param
->overall_stack
= cum_stack
;
4012 htab
= spu_hash_table (info
);
4013 if (htab
->params
->auto_overlay
)
4016 f1
= func_name (fun
);
4017 if (htab
->params
->stack_analysis
)
4020 info
->callbacks
->info (" %s: 0x%v\n", f1
, (bfd_vma
) cum_stack
);
4021 info
->callbacks
->minfo ("%s: 0x%v 0x%v\n",
4022 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
4026 info
->callbacks
->minfo (_(" calls:\n"));
4027 for (call
= fun
->call_list
; call
; call
= call
->next
)
4028 if (!call
->is_pasted
&& !call
->broken_cycle
)
4030 const char *f2
= func_name (call
->fun
);
4031 const char *ann1
= call
->fun
== max
? "*" : " ";
4032 const char *ann2
= call
->is_tail
? "t" : " ";
4034 info
->callbacks
->minfo (" %s%s %s\n", ann1
, ann2
, f2
);
4039 if (sum_stack_param
->emit_stack_syms
)
4041 char *name
= bfd_malloc (18 + strlen (f1
));
4042 struct elf_link_hash_entry
*h
;
4047 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4048 sprintf (name
, "__stack_%s", f1
);
4050 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4052 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4055 && (h
->root
.type
== bfd_link_hash_new
4056 || h
->root
.type
== bfd_link_hash_undefined
4057 || h
->root
.type
== bfd_link_hash_undefweak
))
4059 h
->root
.type
= bfd_link_hash_defined
;
4060 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4061 h
->root
.u
.def
.value
= cum_stack
;
4066 h
->ref_regular_nonweak
= 1;
4067 h
->forced_local
= 1;
4075 /* SEC is part of a pasted function. Return the call_info for the
4076 next section of this function. */
4078 static struct call_info
*
4079 find_pasted_call (asection
*sec
)
4081 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4082 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4083 struct call_info
*call
;
4086 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4087 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4088 if (call
->is_pasted
)
4094 /* qsort predicate to sort bfds by file name. */
4097 sort_bfds (const void *a
, const void *b
)
4099 bfd
*const *abfd1
= a
;
4100 bfd
*const *abfd2
= b
;
4102 return filename_cmp (bfd_get_filename (*abfd1
), bfd_get_filename (*abfd2
));
4106 print_one_overlay_section (FILE *script
,
4109 unsigned int ovlynum
,
4110 unsigned int *ovly_map
,
4111 asection
**ovly_sections
,
4112 struct bfd_link_info
*info
)
4116 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4118 asection
*sec
= ovly_sections
[2 * j
];
4120 if (fprintf (script
, " %s%c%s (%s)\n",
4121 (sec
->owner
->my_archive
!= NULL
4122 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4123 info
->path_separator
,
4124 bfd_get_filename (sec
->owner
),
4127 if (sec
->segment_mark
)
4129 struct call_info
*call
= find_pasted_call (sec
);
4130 while (call
!= NULL
)
4132 struct function_info
*call_fun
= call
->fun
;
4133 sec
= call_fun
->sec
;
4134 if (fprintf (script
, " %s%c%s (%s)\n",
4135 (sec
->owner
->my_archive
!= NULL
4136 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4137 info
->path_separator
,
4138 bfd_get_filename (sec
->owner
),
4141 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4142 if (call
->is_pasted
)
4148 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4150 asection
*sec
= ovly_sections
[2 * j
+ 1];
4152 && fprintf (script
, " %s%c%s (%s)\n",
4153 (sec
->owner
->my_archive
!= NULL
4154 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4155 info
->path_separator
,
4156 bfd_get_filename (sec
->owner
),
4160 sec
= ovly_sections
[2 * j
];
4161 if (sec
->segment_mark
)
4163 struct call_info
*call
= find_pasted_call (sec
);
4164 while (call
!= NULL
)
4166 struct function_info
*call_fun
= call
->fun
;
4167 sec
= call_fun
->rodata
;
4169 && fprintf (script
, " %s%c%s (%s)\n",
4170 (sec
->owner
->my_archive
!= NULL
4171 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4172 info
->path_separator
,
4173 bfd_get_filename (sec
->owner
),
4176 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4177 if (call
->is_pasted
)
4186 /* Handle --auto-overlay. */
4189 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4193 struct elf_segment_map
*m
;
4194 unsigned int fixed_size
, lo
, hi
;
4195 unsigned int reserved
;
4196 struct spu_link_hash_table
*htab
;
4197 unsigned int base
, i
, count
, bfd_count
;
4198 unsigned int region
, ovlynum
;
4199 asection
**ovly_sections
, **ovly_p
;
4200 unsigned int *ovly_map
;
4202 unsigned int total_overlay_size
, overlay_size
;
4203 const char *ovly_mgr_entry
;
4204 struct elf_link_hash_entry
*h
;
4205 struct _mos_param mos_param
;
4206 struct _uos_param uos_param
;
4207 struct function_info dummy_caller
;
4209 /* Find the extents of our loadable image. */
4210 lo
= (unsigned int) -1;
4212 for (m
= elf_seg_map (info
->output_bfd
); m
!= NULL
; m
= m
->next
)
4213 if (m
->p_type
== PT_LOAD
)
4214 for (i
= 0; i
< m
->count
; i
++)
4215 if (m
->sections
[i
]->size
!= 0)
4217 if (m
->sections
[i
]->vma
< lo
)
4218 lo
= m
->sections
[i
]->vma
;
4219 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4220 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4222 fixed_size
= hi
+ 1 - lo
;
4224 if (!discover_functions (info
))
4227 if (!build_call_tree (info
))
4230 htab
= spu_hash_table (info
);
4231 reserved
= htab
->params
->auto_overlay_reserved
;
4234 struct _sum_stack_param sum_stack_param
;
4236 sum_stack_param
.emit_stack_syms
= 0;
4237 sum_stack_param
.overall_stack
= 0;
4238 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4240 reserved
= (sum_stack_param
.overall_stack
4241 + htab
->params
->extra_stack_space
);
4244 /* No need for overlays if everything already fits. */
4245 if (fixed_size
+ reserved
<= htab
->local_store
4246 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4248 htab
->params
->auto_overlay
= 0;
4252 uos_param
.exclude_input_section
= 0;
4253 uos_param
.exclude_output_section
4254 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4256 ovly_mgr_entry
= "__ovly_load";
4257 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4258 ovly_mgr_entry
= "__icache_br_handler";
4259 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4260 FALSE
, FALSE
, FALSE
);
4262 && (h
->root
.type
== bfd_link_hash_defined
4263 || h
->root
.type
== bfd_link_hash_defweak
)
4266 /* We have a user supplied overlay manager. */
4267 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4271 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4272 builtin version to .text, and will adjust .text size. */
4273 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4276 /* Mark overlay sections, and find max overlay section size. */
4277 mos_param
.max_overlay_size
= 0;
4278 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4281 /* We can't put the overlay manager or interrupt routines in
4283 uos_param
.clearing
= 0;
4284 if ((uos_param
.exclude_input_section
4285 || uos_param
.exclude_output_section
)
4286 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4290 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4292 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4293 if (bfd_arr
== NULL
)
4296 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4299 total_overlay_size
= 0;
4300 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4302 extern const bfd_target spu_elf32_vec
;
4304 unsigned int old_count
;
4306 if (ibfd
->xvec
!= &spu_elf32_vec
)
4310 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4311 if (sec
->linker_mark
)
4313 if ((sec
->flags
& SEC_CODE
) != 0)
4315 fixed_size
-= sec
->size
;
4316 total_overlay_size
+= sec
->size
;
4318 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4319 && sec
->output_section
->owner
== info
->output_bfd
4320 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4321 fixed_size
-= sec
->size
;
4322 if (count
!= old_count
)
4323 bfd_arr
[bfd_count
++] = ibfd
;
4326 /* Since the overlay link script selects sections by file name and
4327 section name, ensure that file names are unique. */
4330 bfd_boolean ok
= TRUE
;
4332 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4333 for (i
= 1; i
< bfd_count
; ++i
)
4334 if (filename_cmp (bfd_get_filename (bfd_arr
[i
- 1]),
4335 bfd_get_filename (bfd_arr
[i
])) == 0)
4337 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4339 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4340 /* xgettext:c-format */
4341 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4342 bfd_get_filename (bfd_arr
[i
]),
4343 bfd_get_filename (bfd_arr
[i
]->my_archive
));
4345 info
->callbacks
->einfo (_("%s duplicated\n"),
4346 bfd_get_filename (bfd_arr
[i
]));
4352 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4353 "object files in auto-overlay script\n"));
4354 bfd_set_error (bfd_error_bad_value
);
4360 fixed_size
+= reserved
;
4361 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4362 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4364 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4366 /* Stubs in the non-icache area are bigger. */
4367 fixed_size
+= htab
->non_ovly_stub
* 16;
4368 /* Space for icache manager tables.
4369 a) Tag array, one quadword per cache line.
4370 - word 0: ia address of present line, init to zero. */
4371 fixed_size
+= 16 << htab
->num_lines_log2
;
4372 /* b) Rewrite "to" list, one quadword per cache line. */
4373 fixed_size
+= 16 << htab
->num_lines_log2
;
4374 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4375 to a power-of-two number of full quadwords) per cache line. */
4376 fixed_size
+= 16 << (htab
->fromelem_size_log2
4377 + htab
->num_lines_log2
);
4378 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4383 /* Guess number of overlays. Assuming overlay buffer is on
4384 average only half full should be conservative. */
4385 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4386 / (htab
->local_store
- fixed_size
));
4387 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4388 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4392 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4393 /* xgettext:c-format */
4394 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4395 "size of 0x%v exceeds local store\n"),
4396 (bfd_vma
) fixed_size
,
4397 (bfd_vma
) mos_param
.max_overlay_size
);
4399 /* Now see if we should put some functions in the non-overlay area. */
4400 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4402 unsigned int max_fixed
, lib_size
;
4404 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4405 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4406 max_fixed
= htab
->params
->auto_overlay_fixed
;
4407 lib_size
= max_fixed
- fixed_size
;
4408 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4409 if (lib_size
== (unsigned int) -1)
4411 fixed_size
= max_fixed
- lib_size
;
4414 /* Build an array of sections, suitably sorted to place into
4416 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4417 if (ovly_sections
== NULL
)
4419 ovly_p
= ovly_sections
;
4420 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4422 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4423 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4424 if (ovly_map
== NULL
)
4427 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4428 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4429 if (htab
->params
->line_size
!= 0)
4430 overlay_size
= htab
->params
->line_size
;
4433 while (base
< count
)
4435 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4437 for (i
= base
; i
< count
; i
++)
4439 asection
*sec
, *rosec
;
4440 unsigned int tmp
, rotmp
;
4441 unsigned int num_stubs
;
4442 struct call_info
*call
, *pasty
;
4443 struct _spu_elf_section_data
*sec_data
;
4444 struct spu_elf_stack_info
*sinfo
;
4447 /* See whether we can add this section to the current
4448 overlay without overflowing our overlay buffer. */
4449 sec
= ovly_sections
[2 * i
];
4450 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4452 rosec
= ovly_sections
[2 * i
+ 1];
4455 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4456 if (roalign
< rosec
->alignment_power
)
4457 roalign
= rosec
->alignment_power
;
4459 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4461 if (sec
->segment_mark
)
4463 /* Pasted sections must stay together, so add their
4465 pasty
= find_pasted_call (sec
);
4466 while (pasty
!= NULL
)
4468 struct function_info
*call_fun
= pasty
->fun
;
4469 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4470 + call_fun
->sec
->size
);
4471 if (call_fun
->rodata
)
4473 rotmp
= (align_power (rotmp
,
4474 call_fun
->rodata
->alignment_power
)
4475 + call_fun
->rodata
->size
);
4476 if (roalign
< rosec
->alignment_power
)
4477 roalign
= rosec
->alignment_power
;
4479 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4480 if (pasty
->is_pasted
)
4484 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4487 /* If we add this section, we might need new overlay call
4488 stubs. Add any overlay section calls to dummy_call. */
4490 sec_data
= spu_elf_section_data (sec
);
4491 sinfo
= sec_data
->u
.i
.stack_info
;
4492 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4493 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4494 if (call
->is_pasted
)
4496 BFD_ASSERT (pasty
== NULL
);
4499 else if (call
->fun
->sec
->linker_mark
)
4501 if (!copy_callee (&dummy_caller
, call
))
4504 while (pasty
!= NULL
)
4506 struct function_info
*call_fun
= pasty
->fun
;
4508 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4509 if (call
->is_pasted
)
4511 BFD_ASSERT (pasty
== NULL
);
4514 else if (!copy_callee (&dummy_caller
, call
))
4518 /* Calculate call stub size. */
4520 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4522 unsigned int stub_delta
= 1;
4524 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4525 stub_delta
= call
->count
;
4526 num_stubs
+= stub_delta
;
4528 /* If the call is within this overlay, we won't need a
4530 for (k
= base
; k
< i
+ 1; k
++)
4531 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4533 num_stubs
-= stub_delta
;
4537 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4538 && num_stubs
> htab
->params
->max_branch
)
4540 if (align_power (tmp
, roalign
) + rotmp
4541 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4549 /* xgettext:c-format */
4550 info
->callbacks
->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4551 ovly_sections
[2 * i
]->owner
,
4552 ovly_sections
[2 * i
],
4553 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4554 bfd_set_error (bfd_error_bad_value
);
4558 while (dummy_caller
.call_list
!= NULL
)
4560 struct call_info
*call
= dummy_caller
.call_list
;
4561 dummy_caller
.call_list
= call
->next
;
4567 ovly_map
[base
++] = ovlynum
;
4570 script
= htab
->params
->spu_elf_open_overlay_script ();
4572 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4574 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4577 if (fprintf (script
,
4578 " . = ALIGN (%u);\n"
4579 " .ovl.init : { *(.ovl.init) }\n"
4580 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4581 htab
->params
->line_size
) <= 0)
4586 while (base
< count
)
4588 unsigned int indx
= ovlynum
- 1;
4589 unsigned int vma
, lma
;
4591 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4592 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4594 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4595 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4596 ovlynum
, vma
, lma
) <= 0)
4599 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4600 ovly_map
, ovly_sections
, info
);
4601 if (base
== (unsigned) -1)
4604 if (fprintf (script
, " }\n") <= 0)
4610 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4611 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4614 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4619 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4622 if (fprintf (script
,
4623 " . = ALIGN (16);\n"
4624 " .ovl.init : { *(.ovl.init) }\n"
4625 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4628 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4632 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4640 /* We need to set lma since we are overlaying .ovl.init. */
4641 if (fprintf (script
,
4642 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4647 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4651 while (base
< count
)
4653 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4656 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4657 ovly_map
, ovly_sections
, info
);
4658 if (base
== (unsigned) -1)
4661 if (fprintf (script
, " }\n") <= 0)
4664 ovlynum
+= htab
->params
->num_lines
;
4665 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4669 if (fprintf (script
, " }\n") <= 0)
4673 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4678 free (ovly_sections
);
4680 if (fclose (script
) != 0)
4683 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4684 (*htab
->params
->spu_elf_relink
) ();
4689 bfd_set_error (bfd_error_system_call
);
4691 info
->callbacks
->einfo (_("%F%P: auto overlay error: %E\n"));
4695 /* Provide an estimate of total stack required. */
4698 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4700 struct spu_link_hash_table
*htab
;
4701 struct _sum_stack_param sum_stack_param
;
4703 if (!discover_functions (info
))
4706 if (!build_call_tree (info
))
4709 htab
= spu_hash_table (info
);
4710 if (htab
->params
->stack_analysis
)
4712 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4713 info
->callbacks
->minfo (_("\nStack size for functions. "
4714 "Annotations: '*' max stack, 't' tail call\n"));
4717 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4718 sum_stack_param
.overall_stack
= 0;
4719 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4722 if (htab
->params
->stack_analysis
)
4723 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4724 (bfd_vma
) sum_stack_param
.overall_stack
);
4728 /* Perform a final link. */
4731 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4733 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4735 if (htab
->params
->auto_overlay
)
4736 spu_elf_auto_overlay (info
);
4738 if ((htab
->params
->stack_analysis
4739 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4740 && htab
->params
->lrlive_analysis
))
4741 && !spu_elf_stack_analysis (info
))
4742 info
->callbacks
->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4744 if (!spu_elf_build_stubs (info
))
4745 info
->callbacks
->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4747 return bfd_elf_final_link (output_bfd
, info
);
4750 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4751 and !info->emitrelocations. Returns a count of special relocs
4752 that need to be emitted. */
4755 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4757 Elf_Internal_Rela
*relocs
;
4758 unsigned int count
= 0;
4760 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4764 Elf_Internal_Rela
*rel
;
4765 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4767 for (rel
= relocs
; rel
< relend
; rel
++)
4769 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4770 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4774 if (elf_section_data (sec
)->relocs
!= relocs
)
4781 /* Functions for adding fixup records to .fixup */
4783 #define FIXUP_RECORD_SIZE 4
4785 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4786 bfd_put_32 (output_bfd, addr, \
4787 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4788 #define FIXUP_GET(output_bfd,htab,index) \
4789 bfd_get_32 (output_bfd, \
4790 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4792 /* Store OFFSET in .fixup. This assumes it will be called with an
4793 increasing OFFSET. When this OFFSET fits with the last base offset,
4794 it just sets a bit, otherwise it adds a new fixup record. */
4796 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4799 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4800 asection
*sfixup
= htab
->sfixup
;
4801 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4802 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4803 if (sfixup
->reloc_count
== 0)
4805 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4806 sfixup
->reloc_count
++;
4810 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4811 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4813 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4814 _bfd_error_handler (_("fatal error while creating .fixup"));
4815 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4816 sfixup
->reloc_count
++;
4819 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4823 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4826 spu_elf_relocate_section (bfd
*output_bfd
,
4827 struct bfd_link_info
*info
,
4829 asection
*input_section
,
4831 Elf_Internal_Rela
*relocs
,
4832 Elf_Internal_Sym
*local_syms
,
4833 asection
**local_sections
)
4835 Elf_Internal_Shdr
*symtab_hdr
;
4836 struct elf_link_hash_entry
**sym_hashes
;
4837 Elf_Internal_Rela
*rel
, *relend
;
4838 struct spu_link_hash_table
*htab
;
4841 bfd_boolean emit_these_relocs
= FALSE
;
4842 bfd_boolean is_ea_sym
;
4844 unsigned int iovl
= 0;
4846 htab
= spu_hash_table (info
);
4847 stubs
= (htab
->stub_sec
!= NULL
4848 && maybe_needs_stubs (input_section
));
4849 iovl
= overlay_index (input_section
);
4850 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4851 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4852 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4855 relend
= relocs
+ input_section
->reloc_count
;
4856 for (; rel
< relend
; rel
++)
4859 reloc_howto_type
*howto
;
4860 unsigned int r_symndx
;
4861 Elf_Internal_Sym
*sym
;
4863 struct elf_link_hash_entry
*h
;
4864 const char *sym_name
;
4867 bfd_reloc_status_type r
;
4868 bfd_boolean unresolved_reloc
;
4869 enum _stub_type stub_type
;
4871 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4872 r_type
= ELF32_R_TYPE (rel
->r_info
);
4873 howto
= elf_howto_table
+ r_type
;
4874 unresolved_reloc
= FALSE
;
4878 if (r_symndx
< symtab_hdr
->sh_info
)
4880 sym
= local_syms
+ r_symndx
;
4881 sec
= local_sections
[r_symndx
];
4882 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4883 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4887 if (sym_hashes
== NULL
)
4890 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4892 if (info
->wrap_hash
!= NULL
4893 && (input_section
->flags
& SEC_DEBUGGING
) != 0)
4894 h
= ((struct elf_link_hash_entry
*)
4895 unwrap_hash_lookup (info
, input_bfd
, &h
->root
));
4897 while (h
->root
.type
== bfd_link_hash_indirect
4898 || h
->root
.type
== bfd_link_hash_warning
)
4899 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4902 if (h
->root
.type
== bfd_link_hash_defined
4903 || h
->root
.type
== bfd_link_hash_defweak
)
4905 sec
= h
->root
.u
.def
.section
;
4907 || sec
->output_section
== NULL
)
4908 /* Set a flag that will be cleared later if we find a
4909 relocation value for this symbol. output_section
4910 is typically NULL for symbols satisfied by a shared
4912 unresolved_reloc
= TRUE
;
4914 relocation
= (h
->root
.u
.def
.value
4915 + sec
->output_section
->vma
4916 + sec
->output_offset
);
4918 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4920 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4921 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4923 else if (!bfd_link_relocatable (info
)
4924 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4928 err
= (info
->unresolved_syms_in_objects
== RM_DIAGNOSE
4929 && !info
->warn_unresolved_syms
)
4930 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
;
4932 info
->callbacks
->undefined_symbol
4933 (info
, h
->root
.root
.string
, input_bfd
,
4934 input_section
, rel
->r_offset
, err
);
4936 sym_name
= h
->root
.root
.string
;
4939 if (sec
!= NULL
&& discarded_section (sec
))
4940 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4941 rel
, 1, relend
, howto
, 0, contents
);
4943 if (bfd_link_relocatable (info
))
4946 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4947 if (r_type
== R_SPU_ADD_PIC
4949 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4951 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4957 is_ea_sym
= (ea
!= NULL
4959 && sec
->output_section
== ea
);
4961 /* If this symbol is in an overlay area, we may need to relocate
4962 to the overlay stub. */
4963 addend
= rel
->r_addend
;
4966 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4967 contents
, info
)) != no_stub
)
4969 unsigned int ovl
= 0;
4970 struct got_entry
*g
, **head
;
4972 if (stub_type
!= nonovl_stub
)
4976 head
= &h
->got
.glist
;
4978 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4980 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4981 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4983 && g
->br_addr
== (rel
->r_offset
4984 + input_section
->output_offset
4985 + input_section
->output_section
->vma
))
4986 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4991 relocation
= g
->stub_addr
;
4996 /* For soft icache, encode the overlay index into addresses. */
4997 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4998 && (r_type
== R_SPU_ADDR16_HI
4999 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
5002 unsigned int ovl
= overlay_index (sec
);
5005 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
5006 relocation
+= set_id
<< 18;
5011 if (htab
->params
->emit_fixups
&& !bfd_link_relocatable (info
)
5012 && (input_section
->flags
& SEC_ALLOC
) != 0
5013 && r_type
== R_SPU_ADDR32
)
5016 offset
= rel
->r_offset
+ input_section
->output_section
->vma
5017 + input_section
->output_offset
;
5018 spu_elf_emit_fixup (output_bfd
, info
, offset
);
5021 if (unresolved_reloc
)
5023 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5027 /* ._ea is a special section that isn't allocated in SPU
5028 memory, but rather occupies space in PPU memory as
5029 part of an embedded ELF image. If this reloc is
5030 against a symbol defined in ._ea, then transform the
5031 reloc into an equivalent one without a symbol
5032 relative to the start of the ELF image. */
5033 rel
->r_addend
+= (relocation
5035 + elf_section_data (ea
)->this_hdr
.sh_offset
);
5036 rel
->r_info
= ELF32_R_INFO (0, r_type
);
5038 emit_these_relocs
= TRUE
;
5042 unresolved_reloc
= TRUE
;
5044 if (unresolved_reloc
5045 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5046 rel
->r_offset
) != (bfd_vma
) -1)
5049 /* xgettext:c-format */
5050 (_("%pB(%s+%#" PRIx64
"): "
5051 "unresolvable %s relocation against symbol `%s'"),
5053 bfd_section_name (input_section
),
5054 (uint64_t) rel
->r_offset
,
5060 r
= _bfd_final_link_relocate (howto
,
5064 rel
->r_offset
, relocation
, addend
);
5066 if (r
!= bfd_reloc_ok
)
5068 const char *msg
= (const char *) 0;
5072 case bfd_reloc_overflow
:
5073 (*info
->callbacks
->reloc_overflow
)
5074 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5075 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
5078 case bfd_reloc_undefined
:
5079 (*info
->callbacks
->undefined_symbol
)
5080 (info
, sym_name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
5083 case bfd_reloc_outofrange
:
5084 msg
= _("internal error: out of range error");
5087 case bfd_reloc_notsupported
:
5088 msg
= _("internal error: unsupported relocation error");
5091 case bfd_reloc_dangerous
:
5092 msg
= _("internal error: dangerous error");
5096 msg
= _("internal error: unknown error");
5101 (*info
->callbacks
->warning
) (info
, msg
, sym_name
, input_bfd
,
5102 input_section
, rel
->r_offset
);
5109 && emit_these_relocs
5110 && !info
->emitrelocations
)
5112 Elf_Internal_Rela
*wrel
;
5113 Elf_Internal_Shdr
*rel_hdr
;
5115 wrel
= rel
= relocs
;
5116 relend
= relocs
+ input_section
->reloc_count
;
5117 for (; rel
< relend
; rel
++)
5121 r_type
= ELF32_R_TYPE (rel
->r_info
);
5122 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5125 input_section
->reloc_count
= wrel
- relocs
;
5126 /* Backflips for _bfd_elf_link_output_relocs. */
5127 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5128 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5136 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5137 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5142 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5145 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5146 const char *sym_name ATTRIBUTE_UNUSED
,
5147 Elf_Internal_Sym
*sym
,
5148 asection
*sym_sec ATTRIBUTE_UNUSED
,
5149 struct elf_link_hash_entry
*h
)
5151 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5153 if (!bfd_link_relocatable (info
)
5154 && htab
->stub_sec
!= NULL
5156 && (h
->root
.type
== bfd_link_hash_defined
5157 || h
->root
.type
== bfd_link_hash_defweak
)
5159 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5161 struct got_entry
*g
;
5163 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5164 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5165 ? g
->br_addr
== g
->stub_addr
5166 : g
->addend
== 0 && g
->ovl
== 0)
5168 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5169 (htab
->stub_sec
[0]->output_section
->owner
,
5170 htab
->stub_sec
[0]->output_section
));
5171 sym
->st_value
= g
->stub_addr
;
5179 static int spu_plugin
= 0;
5182 spu_elf_plugin (int val
)
5187 /* Set ELF header e_type for plugins. */
5190 spu_elf_init_file_header (bfd
*abfd
, struct bfd_link_info
*info
)
5192 if (!_bfd_elf_init_file_header (abfd
, info
))
5197 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5199 i_ehdrp
->e_type
= ET_DYN
;
5204 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5205 segments for overlays. */
5208 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5215 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5216 extra
= htab
->num_overlays
;
5222 sec
= bfd_get_section_by_name (abfd
, ".toe");
5223 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5229 /* Remove .toe section from other PT_LOAD segments and put it in
5230 a segment of its own. Put overlays in separate segments too. */
5233 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5236 struct elf_segment_map
*m
, *m_overlay
;
5237 struct elf_segment_map
**p
, **p_overlay
, **first_load
;
5243 toe
= bfd_get_section_by_name (abfd
, ".toe");
5244 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
5245 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5246 for (i
= 0; i
< m
->count
; i
++)
5247 if ((s
= m
->sections
[i
]) == toe
5248 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5250 struct elf_segment_map
*m2
;
5253 if (i
+ 1 < m
->count
)
5255 amt
= sizeof (struct elf_segment_map
);
5256 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5257 m2
= bfd_zalloc (abfd
, amt
);
5260 m2
->count
= m
->count
- (i
+ 1);
5261 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5262 m2
->count
* sizeof (m
->sections
[0]));
5263 m2
->p_type
= PT_LOAD
;
5271 amt
= sizeof (struct elf_segment_map
);
5272 m2
= bfd_zalloc (abfd
, amt
);
5275 m2
->p_type
= PT_LOAD
;
5277 m2
->sections
[0] = s
;
5285 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5286 PT_LOAD segments. This can cause the .ovl.init section to be
5287 overwritten with the contents of some overlay segment. To work
5288 around this issue, we ensure that all PF_OVERLAY segments are
5289 sorted first amongst the program headers; this ensures that even
5290 with a broken loader, the .ovl.init section (which is not marked
5291 as PF_OVERLAY) will be placed into SPU local store on startup. */
5293 /* Move all overlay segments onto a separate list. */
5294 p
= &elf_seg_map (abfd
);
5295 p_overlay
= &m_overlay
;
5300 if ((*p
)->p_type
== PT_LOAD
)
5304 if ((*p
)->count
== 1
5305 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5311 p_overlay
= &m
->next
;
5318 /* Re-insert overlay segments at the head of the segment map. */
5319 if (m_overlay
!= NULL
)
5322 if (*p
!= NULL
&& (*p
)->p_type
== PT_LOAD
&& (*p
)->includes_filehdr
)
5323 /* It doesn't really make sense for someone to include the ELF
5324 file header into an spu image, but if they do the code that
5325 assigns p_offset needs to see the segment containing the
5335 /* Tweak the section type of .note.spu_name. */
5338 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5339 Elf_Internal_Shdr
*hdr
,
5342 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5343 hdr
->sh_type
= SHT_NOTE
;
5347 /* Tweak phdrs before writing them out. */
5350 spu_elf_modify_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5354 const struct elf_backend_data
*bed
;
5355 struct elf_obj_tdata
*tdata
;
5356 Elf_Internal_Phdr
*phdr
, *last
;
5357 struct spu_link_hash_table
*htab
;
5361 bed
= get_elf_backend_data (abfd
);
5362 tdata
= elf_tdata (abfd
);
5364 count
= elf_program_header_size (abfd
) / bed
->s
->sizeof_phdr
;
5365 htab
= spu_hash_table (info
);
5366 if (htab
->num_overlays
!= 0)
5368 struct elf_segment_map
*m
;
5371 for (i
= 0, m
= elf_seg_map (abfd
); m
; ++i
, m
= m
->next
)
5373 && ((o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
)
5376 /* Mark this as an overlay header. */
5377 phdr
[i
].p_flags
|= PF_OVERLAY
;
5379 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5380 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5382 bfd_byte
*p
= htab
->ovtab
->contents
;
5383 unsigned int off
= o
* 16 + 8;
5385 /* Write file_off into _ovly_table. */
5386 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5389 /* Soft-icache has its file offset put in .ovl.init. */
5390 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5393 = elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5395 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5399 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5400 of 16. This should always be possible when using the standard
5401 linker scripts, but don't create overlapping segments if
5402 someone is playing games with linker scripts. */
5404 for (i
= count
; i
-- != 0; )
5405 if (phdr
[i
].p_type
== PT_LOAD
)
5409 adjust
= -phdr
[i
].p_filesz
& 15;
5412 && (phdr
[i
].p_offset
+ phdr
[i
].p_filesz
5413 > last
->p_offset
- adjust
))
5416 adjust
= -phdr
[i
].p_memsz
& 15;
5419 && phdr
[i
].p_filesz
!= 0
5420 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5421 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5424 if (phdr
[i
].p_filesz
!= 0)
5428 if (i
== (unsigned int) -1)
5429 for (i
= count
; i
-- != 0; )
5430 if (phdr
[i
].p_type
== PT_LOAD
)
5434 adjust
= -phdr
[i
].p_filesz
& 15;
5435 phdr
[i
].p_filesz
+= adjust
;
5437 adjust
= -phdr
[i
].p_memsz
& 15;
5438 phdr
[i
].p_memsz
+= adjust
;
5442 return _bfd_elf_modify_headers (abfd
, info
);
5446 spu_elf_size_sections (bfd
*obfd ATTRIBUTE_UNUSED
, struct bfd_link_info
*info
)
5448 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5449 if (htab
->params
->emit_fixups
)
5451 asection
*sfixup
= htab
->sfixup
;
5452 int fixup_count
= 0;
5456 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
5460 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5463 /* Walk over each section attached to the input bfd. */
5464 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5466 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5469 /* If there aren't any relocs, then there's nothing more
5471 if ((isec
->flags
& SEC_ALLOC
) == 0
5472 || (isec
->flags
& SEC_RELOC
) == 0
5473 || isec
->reloc_count
== 0)
5476 /* Get the relocs. */
5478 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5480 if (internal_relocs
== NULL
)
5483 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5484 relocations. They are stored in a single word by
5485 saving the upper 28 bits of the address and setting the
5486 lower 4 bits to a bit mask of the words that have the
5487 relocation. BASE_END keeps track of the next quadword. */
5488 irela
= internal_relocs
;
5489 irelaend
= irela
+ isec
->reloc_count
;
5491 for (; irela
< irelaend
; irela
++)
5492 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5493 && irela
->r_offset
>= base_end
)
5495 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5501 /* We always have a NULL fixup as a sentinel */
5502 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5503 if (!bfd_set_section_size (sfixup
, size
))
5505 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5506 if (sfixup
->contents
== NULL
)
5512 #define TARGET_BIG_SYM spu_elf32_vec
5513 #define TARGET_BIG_NAME "elf32-spu"
5514 #define ELF_ARCH bfd_arch_spu
5515 #define ELF_TARGET_ID SPU_ELF_DATA
5516 #define ELF_MACHINE_CODE EM_SPU
5517 /* This matches the alignment need for DMA. */
5518 #define ELF_MAXPAGESIZE 0x80
5519 #define elf_backend_rela_normal 1
5520 #define elf_backend_can_gc_sections 1
5522 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5523 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5524 #define elf_info_to_howto spu_elf_info_to_howto
5525 #define elf_backend_count_relocs spu_elf_count_relocs
5526 #define elf_backend_relocate_section spu_elf_relocate_section
5527 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5528 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5529 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5530 #define elf_backend_object_p spu_elf_object_p
5531 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5532 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5534 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5535 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5536 #define elf_backend_modify_headers spu_elf_modify_headers
5537 #define elf_backend_init_file_header spu_elf_init_file_header
5538 #define elf_backend_fake_sections spu_elf_fake_sections
5539 #define elf_backend_special_sections spu_elf_special_sections
5540 #define bfd_elf32_bfd_final_link spu_elf_final_link
5542 #include "elf32-target.h"