1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count
;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry
*ovly_load
;
315 struct elf_link_hash_entry
*ovly_return
;
316 unsigned long ovly_load_r_symndx
;
318 /* Number of overlay buffers. */
319 unsigned int num_buf
;
321 /* Total number of overlays. */
322 unsigned int num_overlays
;
324 /* For soft icache. */
325 unsigned int line_size_log2
;
326 unsigned int num_lines_log2
;
328 /* How much memory we have. */
329 unsigned int local_store
;
330 /* Local store --auto-overlay should reserve for non-overlay
331 functions and data. */
332 unsigned int overlay_fixed
;
333 /* Local store --auto-overlay should reserve for stack and heap. */
334 unsigned int reserved
;
335 /* If reserved is not specified, stack analysis will calculate a value
336 for the stack. This parameter adjusts that value to allow for
337 negative sp access (the ABI says 2000 bytes below sp are valid,
338 and the overlay manager uses some of this area). */
339 int extra_stack_space
;
340 /* Count of overlay stubs needed in non-overlay area. */
341 unsigned int non_ovly_stub
;
344 unsigned int stub_err
: 1;
347 /* Hijack the generic got fields for overlay stub accounting. */
351 struct got_entry
*next
;
360 #define spu_hash_table(p) \
361 ((struct spu_link_hash_table *) ((p)->hash))
365 struct function_info
*fun
;
366 struct call_info
*next
;
368 unsigned int max_depth
;
369 unsigned int is_tail
: 1;
370 unsigned int is_pasted
: 1;
371 unsigned int priority
: 13;
376 /* List of functions called. Also branches to hot/cold part of
378 struct call_info
*call_list
;
379 /* For hot/cold part of function, point to owner. */
380 struct function_info
*start
;
381 /* Symbol at start of function. */
383 Elf_Internal_Sym
*sym
;
384 struct elf_link_hash_entry
*h
;
386 /* Function section. */
389 /* Where last called from, and number of sections called from. */
390 asection
*last_caller
;
391 unsigned int call_count
;
392 /* Address range of (this part of) function. */
394 /* Offset where we found a store of lr, or -1 if none found. */
396 /* Offset where we found the stack adjustment insn. */
400 /* Distance from root of call tree. Tail and hot/cold branches
401 count as one deeper. We aren't counting stack frames here. */
403 /* Set if global symbol. */
404 unsigned int global
: 1;
405 /* Set if known to be start of function (as distinct from a hunk
406 in hot/cold section. */
407 unsigned int is_func
: 1;
408 /* Set if not a root node. */
409 unsigned int non_root
: 1;
410 /* Flags used during call tree traversal. It's cheaper to replicate
411 the visit flags than have one which needs clearing after a traversal. */
412 unsigned int visit1
: 1;
413 unsigned int visit2
: 1;
414 unsigned int marking
: 1;
415 unsigned int visit3
: 1;
416 unsigned int visit4
: 1;
417 unsigned int visit5
: 1;
418 unsigned int visit6
: 1;
419 unsigned int visit7
: 1;
422 struct spu_elf_stack_info
426 /* Variable size array describing functions, one per contiguous
427 address range belonging to a function. */
428 struct function_info fun
[1];
431 static struct function_info
*find_function (asection
*, bfd_vma
,
432 struct bfd_link_info
*);
434 /* Create a spu ELF linker hash table. */
436 static struct bfd_link_hash_table
*
437 spu_elf_link_hash_table_create (bfd
*abfd
)
439 struct spu_link_hash_table
*htab
;
441 htab
= bfd_malloc (sizeof (*htab
));
445 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
446 _bfd_elf_link_hash_newfunc
,
447 sizeof (struct elf_link_hash_entry
)))
453 memset (&htab
->ovtab
, 0,
454 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
456 htab
->elf
.init_got_refcount
.refcount
= 0;
457 htab
->elf
.init_got_refcount
.glist
= NULL
;
458 htab
->elf
.init_got_offset
.offset
= 0;
459 htab
->elf
.init_got_offset
.glist
= NULL
;
460 return &htab
->elf
.root
;
464 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
466 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
467 htab
->params
= params
;
468 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
469 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
472 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
473 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
474 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
477 get_sym_h (struct elf_link_hash_entry
**hp
,
478 Elf_Internal_Sym
**symp
,
480 Elf_Internal_Sym
**locsymsp
,
481 unsigned long r_symndx
,
484 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
486 if (r_symndx
>= symtab_hdr
->sh_info
)
488 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
489 struct elf_link_hash_entry
*h
;
491 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
492 while (h
->root
.type
== bfd_link_hash_indirect
493 || h
->root
.type
== bfd_link_hash_warning
)
494 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
504 asection
*symsec
= NULL
;
505 if (h
->root
.type
== bfd_link_hash_defined
506 || h
->root
.type
== bfd_link_hash_defweak
)
507 symsec
= h
->root
.u
.def
.section
;
513 Elf_Internal_Sym
*sym
;
514 Elf_Internal_Sym
*locsyms
= *locsymsp
;
518 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
520 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
522 0, NULL
, NULL
, NULL
);
527 sym
= locsyms
+ r_symndx
;
536 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
542 /* Create the note section if not already present. This is done early so
543 that the linker maps the sections to the right place in the output. */
546 spu_elf_create_sections (struct bfd_link_info
*info
)
550 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
551 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
556 /* Make SPU_PTNOTE_SPUNAME section. */
563 ibfd
= info
->input_bfds
;
564 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
565 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
567 || !bfd_set_section_alignment (ibfd
, s
, 4))
570 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
571 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
572 size
+= (name_len
+ 3) & -4;
574 if (!bfd_set_section_size (ibfd
, s
, size
))
577 data
= bfd_zalloc (ibfd
, size
);
581 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
582 bfd_put_32 (ibfd
, name_len
, data
+ 4);
583 bfd_put_32 (ibfd
, 1, data
+ 8);
584 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
585 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
586 bfd_get_filename (info
->output_bfd
), name_len
);
593 /* qsort predicate to sort sections by vma. */
596 sort_sections (const void *a
, const void *b
)
598 const asection
*const *s1
= a
;
599 const asection
*const *s2
= b
;
600 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
603 return delta
< 0 ? -1 : 1;
605 return (*s1
)->index
- (*s2
)->index
;
608 /* Identify overlays in the output bfd, and number them. */
611 spu_elf_find_overlays (struct bfd_link_info
*info
)
613 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
614 asection
**alloc_sec
;
615 unsigned int i
, n
, ovl_index
, num_buf
;
618 const char *ovly_mgr_entry
;
620 if (info
->output_bfd
->section_count
< 2)
624 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
625 if (alloc_sec
== NULL
)
628 /* Pick out all the alloced sections. */
629 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
630 if ((s
->flags
& SEC_ALLOC
) != 0
631 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
641 /* Sort them by vma. */
642 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
644 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
645 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
647 /* Look for an overlapping vma to find the first overlay section. */
648 bfd_vma vma_start
= 0;
649 bfd_vma lma_start
= 0;
651 for (i
= 1; i
< n
; i
++)
654 if (s
->vma
< ovl_end
)
656 asection
*s0
= alloc_sec
[i
- 1];
661 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
666 ovl_end
= s
->vma
+ s
->size
;
669 /* Now find any sections within the cache area. */
670 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
673 if (s
->vma
>= ovl_end
)
676 /* A section in an overlay area called .ovl.init is not
677 an overlay, in the sense that it might be loaded in
678 by the overlay manager, but rather the initial
679 section contents for the overlay buffer. */
680 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
682 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
683 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
684 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
686 info
->callbacks
->einfo (_("%X%P: overlay section %A "
687 "does not start on a cache line.\n"),
691 else if (s
->size
> htab
->params
->line_size
)
693 info
->callbacks
->einfo (_("%X%P: overlay section %A "
694 "is larger than a cache line.\n"),
699 alloc_sec
[ovl_index
++] = s
;
700 spu_elf_section_data (s
)->u
.o
.ovl_index
701 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
702 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
706 /* Ensure there are no more overlay sections. */
710 if (s
->vma
< ovl_end
)
712 info
->callbacks
->einfo (_("%X%P: overlay section %A "
713 "is not in cache area.\n"),
718 ovl_end
= s
->vma
+ s
->size
;
723 /* Look for overlapping vmas. Any with overlap must be overlays.
724 Count them. Also count the number of overlay regions. */
725 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
728 if (s
->vma
< ovl_end
)
730 asection
*s0
= alloc_sec
[i
- 1];
732 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
735 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
737 alloc_sec
[ovl_index
] = s0
;
738 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
739 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
742 ovl_end
= s
->vma
+ s
->size
;
744 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
746 alloc_sec
[ovl_index
] = s
;
747 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
748 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
749 if (s0
->vma
!= s
->vma
)
751 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
752 "and %A do not start at the "
757 if (ovl_end
< s
->vma
+ s
->size
)
758 ovl_end
= s
->vma
+ s
->size
;
762 ovl_end
= s
->vma
+ s
->size
;
766 htab
->num_overlays
= ovl_index
;
767 htab
->num_buf
= num_buf
;
768 htab
->ovl_sec
= alloc_sec
;
769 ovly_mgr_entry
= "__ovly_load";
770 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
771 ovly_mgr_entry
= "__icache_br_handler";
772 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
773 FALSE
, FALSE
, FALSE
);
774 if (htab
->params
->ovly_flavour
!= ovly_soft_icache
)
775 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
776 FALSE
, FALSE
, FALSE
);
777 return ovl_index
!= 0;
780 /* Non-zero to use bra in overlay stubs rather than br. */
783 #define BRA 0x30000000
784 #define BRASL 0x31000000
785 #define BR 0x32000000
786 #define BRSL 0x33000000
787 #define NOP 0x40200000
788 #define LNOP 0x00200000
789 #define ILA 0x42000000
791 /* Return true for all relative and absolute branch instructions.
799 brhnz 00100011 0.. */
802 is_branch (const unsigned char *insn
)
804 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
807 /* Return true for all indirect branch instructions.
815 bihnz 00100101 011 */
818 is_indirect_branch (const unsigned char *insn
)
820 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
823 /* Return true for branch hint instructions.
828 is_hint (const unsigned char *insn
)
830 return (insn
[0] & 0xfc) == 0x10;
833 /* True if INPUT_SECTION might need overlay stubs. */
836 maybe_needs_stubs (asection
*input_section
)
838 /* No stubs for debug sections and suchlike. */
839 if ((input_section
->flags
& SEC_ALLOC
) == 0)
842 /* No stubs for link-once sections that will be discarded. */
843 if (input_section
->output_section
== bfd_abs_section_ptr
)
846 /* Don't create stubs for .eh_frame references. */
847 if (strcmp (input_section
->name
, ".eh_frame") == 0)
869 /* Return non-zero if this reloc symbol should go via an overlay stub.
870 Return 2 if the stub must be in non-overlay area. */
872 static enum _stub_type
873 needs_ovl_stub (struct elf_link_hash_entry
*h
,
874 Elf_Internal_Sym
*sym
,
876 asection
*input_section
,
877 Elf_Internal_Rela
*irela
,
879 struct bfd_link_info
*info
)
881 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
882 enum elf_spu_reloc_type r_type
;
883 unsigned int sym_type
;
884 bfd_boolean branch
, hint
, call
;
885 enum _stub_type ret
= no_stub
;
889 || sym_sec
->output_section
== bfd_abs_section_ptr
890 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
895 /* Ensure no stubs for user supplied overlay manager syms. */
896 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
899 /* setjmp always goes via an overlay stub, because then the return
900 and hence the longjmp goes via __ovly_return. That magically
901 makes setjmp/longjmp between overlays work. */
902 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
903 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
910 sym_type
= ELF_ST_TYPE (sym
->st_info
);
912 r_type
= ELF32_R_TYPE (irela
->r_info
);
916 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
918 if (contents
== NULL
)
921 if (!bfd_get_section_contents (input_section
->owner
,
928 contents
+= irela
->r_offset
;
930 branch
= is_branch (contents
);
931 hint
= is_hint (contents
);
934 call
= (contents
[0] & 0xfd) == 0x31;
936 && sym_type
!= STT_FUNC
939 /* It's common for people to write assembly and forget
940 to give function symbols the right type. Handle
941 calls to such symbols, but warn so that (hopefully)
942 people will fix their code. We need the symbol
943 type to be correct to distinguish function pointer
944 initialisation from other pointer initialisations. */
945 const char *sym_name
;
948 sym_name
= h
->root
.root
.string
;
951 Elf_Internal_Shdr
*symtab_hdr
;
952 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
953 sym_name
= bfd_elf_sym_name (input_section
->owner
,
958 (*_bfd_error_handler
) (_("warning: call to non-function"
959 " symbol %s defined in %B"),
960 sym_sec
->owner
, sym_name
);
966 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
967 || (sym_type
!= STT_FUNC
969 && (sym_sec
->flags
& SEC_CODE
) == 0))
972 /* Usually, symbols in non-overlay sections don't need stubs. */
973 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
974 && !htab
->params
->non_overlay_stubs
)
977 /* A reference from some other section to a symbol in an overlay
978 section needs a stub. */
979 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
980 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
982 if (call
|| sym_type
== STT_FUNC
)
986 ret
= br000_ovl_stub
;
990 unsigned int lrlive
= (contents
[1] & 0x70) >> 4;
996 /* If this insn isn't a branch then we are possibly taking the
997 address of a function and passing it out somehow. Soft-icache code
998 always generates inline code to do indirect branches. */
999 if (!(branch
|| hint
)
1000 && sym_type
== STT_FUNC
1001 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1008 count_stub (struct spu_link_hash_table
*htab
,
1011 enum _stub_type stub_type
,
1012 struct elf_link_hash_entry
*h
,
1013 const Elf_Internal_Rela
*irela
)
1015 unsigned int ovl
= 0;
1016 struct got_entry
*g
, **head
;
1019 /* If this instruction is a branch or call, we need a stub
1020 for it. One stub per function per overlay.
1021 If it isn't a branch, then we are taking the address of
1022 this function so need a stub in the non-overlay area
1023 for it. One stub per function. */
1024 if (stub_type
!= nonovl_stub
)
1025 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1028 head
= &h
->got
.glist
;
1031 if (elf_local_got_ents (ibfd
) == NULL
)
1033 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1034 * sizeof (*elf_local_got_ents (ibfd
)));
1035 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1036 if (elf_local_got_ents (ibfd
) == NULL
)
1039 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1042 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1044 htab
->stub_count
[ovl
] += 1;
1050 addend
= irela
->r_addend
;
1054 struct got_entry
*gnext
;
1056 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1057 if (g
->addend
== addend
&& g
->ovl
== 0)
1062 /* Need a new non-overlay area stub. Zap other stubs. */
1063 for (g
= *head
; g
!= NULL
; g
= gnext
)
1066 if (g
->addend
== addend
)
1068 htab
->stub_count
[g
->ovl
] -= 1;
1076 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1077 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1083 g
= bfd_malloc (sizeof *g
);
1088 g
->stub_addr
= (bfd_vma
) -1;
1092 htab
->stub_count
[ovl
] += 1;
1098 /* Support two sizes of overlay stubs, a slower more compact stub of two
1099 intructions, and a faster stub of four instructions. */
1102 ovl_stub_size (enum _ovly_flavour ovly_flavour
)
1104 return 8 << ovly_flavour
;
1107 /* Two instruction overlay stubs look like:
1109 brsl $75,__ovly_load
1110 .word target_ovl_and_address
1112 ovl_and_address is a word with the overlay number in the top 14 bits
1113 and local store address in the bottom 18 bits.
1115 Four instruction overlay stubs look like:
1119 ila $79,target_address
1122 Software icache stubs are:
1126 .word lrlive_branchlocalstoreaddr;
1127 brasl $75,__icache_br_handler
1132 build_stub (struct bfd_link_info
*info
,
1135 enum _stub_type stub_type
,
1136 struct elf_link_hash_entry
*h
,
1137 const Elf_Internal_Rela
*irela
,
1141 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1142 unsigned int ovl
, dest_ovl
, set_id
;
1143 struct got_entry
*g
, **head
;
1145 bfd_vma addend
, from
, to
, br_dest
, patt
;
1146 unsigned int lrlive
;
1149 if (stub_type
!= nonovl_stub
)
1150 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1153 head
= &h
->got
.glist
;
1155 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1159 addend
= irela
->r_addend
;
1161 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1163 g
= bfd_malloc (sizeof *g
);
1169 g
->br_addr
= (irela
->r_offset
1170 + isec
->output_offset
1171 + isec
->output_section
->vma
);
1177 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1178 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1183 if (g
->ovl
== 0 && ovl
!= 0)
1186 if (g
->stub_addr
!= (bfd_vma
) -1)
1190 sec
= htab
->stub_sec
[ovl
];
1191 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1192 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1193 g
->stub_addr
= from
;
1194 to
= (htab
->ovly_load
->root
.u
.def
.value
1195 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1196 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
1198 if (((dest
| to
| from
) & 3) != 0)
1203 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1205 switch (htab
->params
->ovly_flavour
)
1208 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1209 sec
->contents
+ sec
->size
);
1210 bfd_put_32 (sec
->owner
, LNOP
,
1211 sec
->contents
+ sec
->size
+ 4);
1212 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1213 sec
->contents
+ sec
->size
+ 8);
1215 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1216 sec
->contents
+ sec
->size
+ 12);
1218 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1219 sec
->contents
+ sec
->size
+ 12);
1224 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1225 sec
->contents
+ sec
->size
);
1227 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1228 sec
->contents
+ sec
->size
);
1229 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1230 sec
->contents
+ sec
->size
+ 4);
1233 case ovly_soft_icache
:
1235 if (stub_type
== nonovl_stub
)
1237 else if (stub_type
== call_ovl_stub
)
1238 /* A brsl makes lr live and *(*sp+16) is live.
1239 Tail calls have the same liveness. */
1241 else if (!htab
->params
->lrlive_analysis
)
1242 /* Assume stack frame and lr save. */
1244 else if (irela
!= NULL
)
1246 /* Analyse branch instructions. */
1247 struct function_info
*caller
;
1250 caller
= find_function (isec
, irela
->r_offset
, info
);
1251 if (caller
->start
== NULL
)
1252 off
= irela
->r_offset
;
1255 struct function_info
*found
= NULL
;
1257 /* Find the earliest piece of this function that
1258 has frame adjusting instructions. We might
1259 see dynamic frame adjustment (eg. for alloca)
1260 in some later piece, but functions using
1261 alloca always set up a frame earlier. Frame
1262 setup instructions are always in one piece. */
1263 if (caller
->lr_store
!= (bfd_vma
) -1
1264 || caller
->sp_adjust
!= (bfd_vma
) -1)
1266 while (caller
->start
!= NULL
)
1268 caller
= caller
->start
;
1269 if (caller
->lr_store
!= (bfd_vma
) -1
1270 || caller
->sp_adjust
!= (bfd_vma
) -1)
1278 if (off
> caller
->sp_adjust
)
1280 if (off
> caller
->lr_store
)
1281 /* Only *(*sp+16) is live. */
1284 /* If no lr save, then we must be in a
1285 leaf function with a frame.
1286 lr is still live. */
1289 else if (off
> caller
->lr_store
)
1291 /* Between lr save and stack adjust. */
1293 /* This should never happen since prologues won't
1298 /* On entry to function. */
1301 if (stub_type
!= br000_ovl_stub
1302 && lrlive
!= stub_type
- br000_ovl_stub
)
1303 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1304 "from analysis (%u)\n"),
1305 isec
, irela
->r_offset
, lrlive
,
1306 stub_type
- br000_ovl_stub
);
1309 /* If given lrlive info via .brinfo, use it. */
1310 if (stub_type
> br000_ovl_stub
)
1311 lrlive
= stub_type
- br000_ovl_stub
;
1313 /* The branch that uses this stub goes to stub_addr + 12. We'll
1314 set up an xor pattern that can be used by the icache manager
1315 to modify this branch to go directly to its destination. */
1317 br_dest
= g
->stub_addr
;
1320 /* Except in the case of _SPUEAR_ stubs, the branch in
1321 question is the one in the stub itself. */
1322 BFD_ASSERT (stub_type
== nonovl_stub
);
1323 g
->br_addr
= g
->stub_addr
;
1327 bfd_put_32 (sec
->owner
, dest_ovl
- 1,
1328 sec
->contents
+ sec
->size
+ 0);
1329 set_id
= (dest_ovl
- 1) >> htab
->num_lines_log2
;
1330 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1331 sec
->contents
+ sec
->size
+ 4);
1332 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1333 sec
->contents
+ sec
->size
+ 8);
1334 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1335 sec
->contents
+ sec
->size
+ 12);
1336 patt
= dest
^ br_dest
;
1337 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1338 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1339 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1340 sec
->contents
+ sec
->size
+ 16 + (g
->br_addr
& 0xf));
1342 /* Extra space for linked list entries. */
1349 sec
->size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
1351 if (htab
->params
->emit_stub_syms
)
1357 len
= 8 + sizeof (".ovl_call.") - 1;
1359 len
+= strlen (h
->root
.root
.string
);
1364 add
= (int) irela
->r_addend
& 0xffffffff;
1367 name
= bfd_malloc (len
);
1371 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1373 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1375 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1376 dest_sec
->id
& 0xffffffff,
1377 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1379 sprintf (name
+ len
- 9, "+%x", add
);
1381 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1385 if (h
->root
.type
== bfd_link_hash_new
)
1387 h
->root
.type
= bfd_link_hash_defined
;
1388 h
->root
.u
.def
.section
= sec
;
1389 h
->size
= ovl_stub_size (htab
->params
->ovly_flavour
);
1390 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1394 h
->ref_regular_nonweak
= 1;
1395 h
->forced_local
= 1;
1403 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1407 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1409 /* Symbols starting with _SPUEAR_ need a stub because they may be
1410 invoked by the PPU. */
1411 struct bfd_link_info
*info
= inf
;
1412 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1415 if ((h
->root
.type
== bfd_link_hash_defined
1416 || h
->root
.type
== bfd_link_hash_defweak
)
1418 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1419 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1420 && sym_sec
->output_section
!= bfd_abs_section_ptr
1421 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1422 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1423 || htab
->params
->non_overlay_stubs
))
1425 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1432 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1434 /* Symbols starting with _SPUEAR_ need a stub because they may be
1435 invoked by the PPU. */
1436 struct bfd_link_info
*info
= inf
;
1437 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1440 if ((h
->root
.type
== bfd_link_hash_defined
1441 || h
->root
.type
== bfd_link_hash_defweak
)
1443 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1444 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1445 && sym_sec
->output_section
!= bfd_abs_section_ptr
1446 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1447 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1448 || htab
->params
->non_overlay_stubs
))
1450 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1451 h
->root
.u
.def
.value
, sym_sec
);
1457 /* Size or build stubs. */
1460 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1462 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1465 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1467 extern const bfd_target bfd_elf32_spu_vec
;
1468 Elf_Internal_Shdr
*symtab_hdr
;
1470 Elf_Internal_Sym
*local_syms
= NULL
;
1472 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1475 /* We'll need the symbol table in a second. */
1476 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1477 if (symtab_hdr
->sh_info
== 0)
1480 /* Walk over each section attached to the input bfd. */
1481 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1483 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1485 /* If there aren't any relocs, then there's nothing more to do. */
1486 if ((isec
->flags
& SEC_RELOC
) == 0
1487 || isec
->reloc_count
== 0)
1490 if (!maybe_needs_stubs (isec
))
1493 /* Get the relocs. */
1494 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1496 if (internal_relocs
== NULL
)
1497 goto error_ret_free_local
;
1499 /* Now examine each relocation. */
1500 irela
= internal_relocs
;
1501 irelaend
= irela
+ isec
->reloc_count
;
1502 for (; irela
< irelaend
; irela
++)
1504 enum elf_spu_reloc_type r_type
;
1505 unsigned int r_indx
;
1507 Elf_Internal_Sym
*sym
;
1508 struct elf_link_hash_entry
*h
;
1509 enum _stub_type stub_type
;
1511 r_type
= ELF32_R_TYPE (irela
->r_info
);
1512 r_indx
= ELF32_R_SYM (irela
->r_info
);
1514 if (r_type
>= R_SPU_max
)
1516 bfd_set_error (bfd_error_bad_value
);
1517 error_ret_free_internal
:
1518 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1519 free (internal_relocs
);
1520 error_ret_free_local
:
1521 if (local_syms
!= NULL
1522 && (symtab_hdr
->contents
1523 != (unsigned char *) local_syms
))
1528 /* Determine the reloc target section. */
1529 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1530 goto error_ret_free_internal
;
1532 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1534 if (stub_type
== no_stub
)
1536 else if (stub_type
== stub_error
)
1537 goto error_ret_free_internal
;
1539 if (htab
->stub_count
== NULL
)
1542 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1543 htab
->stub_count
= bfd_zmalloc (amt
);
1544 if (htab
->stub_count
== NULL
)
1545 goto error_ret_free_internal
;
1550 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1551 goto error_ret_free_internal
;
1558 dest
= h
->root
.u
.def
.value
;
1560 dest
= sym
->st_value
;
1561 dest
+= irela
->r_addend
;
1562 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1564 goto error_ret_free_internal
;
1568 /* We're done with the internal relocs, free them. */
1569 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1570 free (internal_relocs
);
1573 if (local_syms
!= NULL
1574 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1576 if (!info
->keep_memory
)
1579 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1586 /* Allocate space for overlay call and return stubs. */
1589 spu_elf_size_stubs (struct bfd_link_info
*info
)
1591 struct spu_link_hash_table
*htab
;
1599 if (!process_stubs (info
, FALSE
))
1602 htab
= spu_hash_table (info
);
1603 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1607 if (htab
->stub_count
== NULL
)
1610 ibfd
= info
->input_bfds
;
1611 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1612 htab
->stub_sec
= bfd_zmalloc (amt
);
1613 if (htab
->stub_sec
== NULL
)
1616 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1617 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1618 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1619 htab
->stub_sec
[0] = stub
;
1621 || !bfd_set_section_alignment (ibfd
, stub
,
1622 htab
->params
->ovly_flavour
+ 3))
1624 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
->ovly_flavour
);
1625 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1626 /* Extra space for linked list entries. */
1627 stub
->size
+= htab
->stub_count
[0] * 16;
1628 (*htab
->params
->place_spu_section
) (stub
, NULL
, ".text");
1630 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1632 asection
*osec
= htab
->ovl_sec
[i
];
1633 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1634 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1635 htab
->stub_sec
[ovl
] = stub
;
1637 || !bfd_set_section_alignment (ibfd
, stub
,
1638 htab
->params
->ovly_flavour
+ 3))
1640 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
->ovly_flavour
);
1641 (*htab
->params
->place_spu_section
) (stub
, osec
, NULL
);
1644 flags
= (SEC_ALLOC
| SEC_LOAD
1645 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1646 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1647 if (htab
->ovtab
== NULL
1648 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1651 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1653 /* Space for icache manager tables.
1654 a) Tag array, one quadword per cache line.
1655 b) Linked list elements, max_branch per line quadwords.
1656 c) Indirect branch descriptors, 8 quadwords. */
1657 htab
->ovtab
->size
= 16 * (((1 + htab
->params
->max_branch
)
1658 << htab
->num_lines_log2
)
1661 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1662 if (htab
->init
== NULL
1663 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1666 htab
->init
->size
= 16;
1667 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1671 /* htab->ovtab consists of two arrays.
1681 . } _ovly_buf_table[];
1684 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1687 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1688 ovout
= ".data.icache";
1689 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1691 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1692 if (htab
->toe
== NULL
1693 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1695 htab
->toe
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 256 : 16;
1696 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1701 /* Functions to handle embedded spu_ovl.o object. */
1704 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1710 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1716 struct _ovl_stream
*os
;
1720 os
= (struct _ovl_stream
*) stream
;
1721 max
= (const char *) os
->end
- (const char *) os
->start
;
1723 if ((ufile_ptr
) offset
>= max
)
1727 if (count
> max
- offset
)
1728 count
= max
- offset
;
1730 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1735 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1737 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1744 return *ovl_bfd
!= NULL
;
1748 overlay_index (asection
*sec
)
1751 || sec
->output_section
== bfd_abs_section_ptr
)
1753 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1756 /* Define an STT_OBJECT symbol. */
1758 static struct elf_link_hash_entry
*
1759 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1761 struct elf_link_hash_entry
*h
;
1763 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1767 if (h
->root
.type
!= bfd_link_hash_defined
1770 h
->root
.type
= bfd_link_hash_defined
;
1771 h
->root
.u
.def
.section
= htab
->ovtab
;
1772 h
->type
= STT_OBJECT
;
1775 h
->ref_regular_nonweak
= 1;
1778 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1780 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1781 h
->root
.u
.def
.section
->owner
,
1782 h
->root
.root
.string
);
1783 bfd_set_error (bfd_error_bad_value
);
1788 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1789 h
->root
.root
.string
);
1790 bfd_set_error (bfd_error_bad_value
);
1797 /* Fill in all stubs and the overlay tables. */
1800 spu_elf_build_stubs (struct bfd_link_info
*info
)
1802 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1803 struct elf_link_hash_entry
*h
;
1809 if (htab
->stub_count
== NULL
)
1812 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1813 if (htab
->stub_sec
[i
]->size
!= 0)
1815 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1816 htab
->stub_sec
[i
]->size
);
1817 if (htab
->stub_sec
[i
]->contents
== NULL
)
1819 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1820 htab
->stub_sec
[i
]->size
= 0;
1823 h
= htab
->ovly_load
;
1826 const char *ovly_mgr_entry
= "__ovly_load";
1828 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1829 ovly_mgr_entry
= "__icache_br_handler";
1830 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
1831 FALSE
, FALSE
, FALSE
);
1832 htab
->ovly_load
= h
;
1834 BFD_ASSERT (h
!= NULL
1835 && (h
->root
.type
== bfd_link_hash_defined
1836 || h
->root
.type
== bfd_link_hash_defweak
)
1839 s
= h
->root
.u
.def
.section
->output_section
;
1840 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1842 (*_bfd_error_handler
) (_("%s in overlay section"),
1843 h
->root
.root
.string
);
1844 bfd_set_error (bfd_error_bad_value
);
1848 h
= htab
->ovly_return
;
1849 if (h
== NULL
&& htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1851 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
1852 FALSE
, FALSE
, FALSE
);
1853 htab
->ovly_return
= h
;
1856 /* Fill in all the stubs. */
1857 process_stubs (info
, TRUE
);
1858 if (!htab
->stub_err
)
1859 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1863 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1864 bfd_set_error (bfd_error_bad_value
);
1868 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1870 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1872 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1873 bfd_set_error (bfd_error_bad_value
);
1876 htab
->stub_sec
[i
]->rawsize
= 0;
1879 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1882 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1883 if (htab
->ovtab
->contents
== NULL
)
1886 p
= htab
->ovtab
->contents
;
1887 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1889 #define BI_HANDLER "__icache_ptr_handler0"
1890 char name
[sizeof (BI_HANDLER
)];
1891 bfd_vma off
, icache_base
, linklist
, bihand
;
1893 h
= define_ovtab_symbol (htab
, "__icache_tagbase");
1896 h
->root
.u
.def
.value
= 0;
1897 h
->size
= 16 << htab
->num_lines_log2
;
1899 icache_base
= htab
->ovl_sec
[0]->vma
;
1900 linklist
= (htab
->ovtab
->output_section
->vma
1901 + htab
->ovtab
->output_offset
1903 for (i
= 0; i
< htab
->params
->num_lines
; i
++)
1905 bfd_vma line_end
= icache_base
+ ((i
+ 1) << htab
->line_size_log2
);
1906 bfd_vma stub_base
= line_end
- htab
->params
->max_branch
* 32;
1907 bfd_vma link_elem
= linklist
+ i
* htab
->params
->max_branch
* 16;
1908 bfd_vma locator
= link_elem
- stub_base
/ 2;
1910 bfd_put_32 (htab
->ovtab
->owner
, locator
, p
+ 4);
1911 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 8);
1912 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 10);
1913 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 12);
1914 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 14);
1918 h
= define_ovtab_symbol (htab
, "__icache_linked_list");
1921 h
->root
.u
.def
.value
= off
;
1922 h
->size
= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
1926 h
= elf_link_hash_lookup (&htab
->elf
, "__icache_bi_handler",
1927 FALSE
, FALSE
, FALSE
);
1930 && (h
->root
.type
== bfd_link_hash_defined
1931 || h
->root
.type
== bfd_link_hash_defweak
)
1933 bihand
= (h
->root
.u
.def
.value
1934 + h
->root
.u
.def
.section
->output_offset
1935 + h
->root
.u
.def
.section
->output_section
->vma
);
1936 memcpy (name
, BI_HANDLER
, sizeof (BI_HANDLER
));
1937 for (i
= 0; i
< 8; i
++)
1939 name
[sizeof (BI_HANDLER
) - 2] = '0' + i
;
1940 h
= define_ovtab_symbol (htab
, name
);
1943 h
->root
.u
.def
.value
= off
;
1945 bfd_put_32 (htab
->ovtab
->owner
, bihand
, p
);
1946 bfd_put_32 (htab
->ovtab
->owner
, i
<< 28, p
+ 8);
1951 h
= define_ovtab_symbol (htab
, "__icache_base");
1954 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
1955 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1956 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
1958 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
1961 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
1962 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1964 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
1966 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
1968 if (htab
->init
->contents
== NULL
)
1971 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
1974 h
->root
.u
.def
.value
= 0;
1975 h
->root
.u
.def
.section
= htab
->init
;
1981 /* Write out _ovly_table. */
1982 /* set low bit of .size to mark non-overlay area as present. */
1984 obfd
= htab
->ovtab
->output_section
->owner
;
1985 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1987 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1991 unsigned long off
= ovl_index
* 16;
1992 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1994 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1995 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
1997 /* file_off written later in spu_elf_modify_program_headers. */
1998 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2002 h
= define_ovtab_symbol (htab
, "_ovly_table");
2005 h
->root
.u
.def
.value
= 16;
2006 h
->size
= htab
->num_overlays
* 16;
2008 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2011 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2014 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2017 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2018 h
->size
= htab
->num_buf
* 4;
2020 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2023 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2027 h
= define_ovtab_symbol (htab
, "_EAR_");
2030 h
->root
.u
.def
.section
= htab
->toe
;
2031 h
->root
.u
.def
.value
= 0;
2032 h
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 16 * 16 : 16;
2037 /* Check that all loadable section VMAs lie in the range
2038 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2041 spu_elf_check_vma (struct bfd_link_info
*info
)
2043 struct elf_segment_map
*m
;
2045 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2046 bfd
*abfd
= info
->output_bfd
;
2047 bfd_vma hi
= htab
->params
->local_store_hi
;
2048 bfd_vma lo
= htab
->params
->local_store_lo
;
2050 htab
->local_store
= hi
+ 1 - lo
;
2052 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2053 if (m
->p_type
== PT_LOAD
)
2054 for (i
= 0; i
< m
->count
; i
++)
2055 if (m
->sections
[i
]->size
!= 0
2056 && (m
->sections
[i
]->vma
< lo
2057 || m
->sections
[i
]->vma
> hi
2058 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2059 return m
->sections
[i
];
2061 /* No need for overlays if it all fits. */
2062 if (htab
->params
->ovly_flavour
!= ovly_soft_icache
)
2063 htab
->params
->auto_overlay
= 0;
2067 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2068 Search for stack adjusting insns, and return the sp delta.
2069 If a store of lr is found save the instruction offset to *LR_STORE.
2070 If a stack adjusting instruction is found, save that offset to
2074 find_function_stack_adjust (asection
*sec
,
2081 memset (reg
, 0, sizeof (reg
));
2082 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2084 unsigned char buf
[4];
2088 /* Assume no relocs on stack adjusing insns. */
2089 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2093 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2095 if (buf
[0] == 0x24 /* stqd */)
2097 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2102 /* Partly decoded immediate field. */
2103 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2105 if (buf
[0] == 0x1c /* ai */)
2108 imm
= (imm
^ 0x200) - 0x200;
2109 reg
[rt
] = reg
[ra
] + imm
;
2111 if (rt
== 1 /* sp */)
2115 *sp_adjust
= offset
;
2119 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2121 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2123 reg
[rt
] = reg
[ra
] + reg
[rb
];
2128 *sp_adjust
= offset
;
2132 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2134 if (buf
[0] >= 0x42 /* ila */)
2135 imm
|= (buf
[0] & 1) << 17;
2140 if (buf
[0] == 0x40 /* il */)
2142 if ((buf
[1] & 0x80) == 0)
2144 imm
= (imm
^ 0x8000) - 0x8000;
2146 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2152 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2154 reg
[rt
] |= imm
& 0xffff;
2157 else if (buf
[0] == 0x04 /* ori */)
2160 imm
= (imm
^ 0x200) - 0x200;
2161 reg
[rt
] = reg
[ra
] | imm
;
2164 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2166 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2167 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2168 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2169 | ((imm
& 0x1000) ? 0x000000ff : 0));
2172 else if (buf
[0] == 0x16 /* andbi */)
2178 reg
[rt
] = reg
[ra
] & imm
;
2181 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2183 /* Used in pic reg load. Say rt is trashed. Won't be used
2184 in stack adjust, but we need to continue past this branch. */
2188 else if (is_branch (buf
) || is_indirect_branch (buf
))
2189 /* If we hit a branch then we must be out of the prologue. */
2196 /* qsort predicate to sort symbols by section and value. */
2198 static Elf_Internal_Sym
*sort_syms_syms
;
2199 static asection
**sort_syms_psecs
;
2202 sort_syms (const void *a
, const void *b
)
2204 Elf_Internal_Sym
*const *s1
= a
;
2205 Elf_Internal_Sym
*const *s2
= b
;
2206 asection
*sec1
,*sec2
;
2207 bfd_signed_vma delta
;
2209 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2210 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2213 return sec1
->index
- sec2
->index
;
2215 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2217 return delta
< 0 ? -1 : 1;
2219 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2221 return delta
< 0 ? -1 : 1;
2223 return *s1
< *s2
? -1 : 1;
2226 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2227 entries for section SEC. */
2229 static struct spu_elf_stack_info
*
2230 alloc_stack_info (asection
*sec
, int max_fun
)
2232 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2235 amt
= sizeof (struct spu_elf_stack_info
);
2236 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2237 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2238 if (sec_data
->u
.i
.stack_info
!= NULL
)
2239 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2240 return sec_data
->u
.i
.stack_info
;
2243 /* Add a new struct function_info describing a (part of a) function
2244 starting at SYM_H. Keep the array sorted by address. */
2246 static struct function_info
*
2247 maybe_insert_function (asection
*sec
,
2250 bfd_boolean is_func
)
2252 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2253 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2259 sinfo
= alloc_stack_info (sec
, 20);
2266 Elf_Internal_Sym
*sym
= sym_h
;
2267 off
= sym
->st_value
;
2268 size
= sym
->st_size
;
2272 struct elf_link_hash_entry
*h
= sym_h
;
2273 off
= h
->root
.u
.def
.value
;
2277 for (i
= sinfo
->num_fun
; --i
>= 0; )
2278 if (sinfo
->fun
[i
].lo
<= off
)
2283 /* Don't add another entry for an alias, but do update some
2285 if (sinfo
->fun
[i
].lo
== off
)
2287 /* Prefer globals over local syms. */
2288 if (global
&& !sinfo
->fun
[i
].global
)
2290 sinfo
->fun
[i
].global
= TRUE
;
2291 sinfo
->fun
[i
].u
.h
= sym_h
;
2294 sinfo
->fun
[i
].is_func
= TRUE
;
2295 return &sinfo
->fun
[i
];
2297 /* Ignore a zero-size symbol inside an existing function. */
2298 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2299 return &sinfo
->fun
[i
];
2302 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2304 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2305 bfd_size_type old
= amt
;
2307 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2308 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2309 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2310 sinfo
= bfd_realloc (sinfo
, amt
);
2313 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2314 sec_data
->u
.i
.stack_info
= sinfo
;
2317 if (++i
< sinfo
->num_fun
)
2318 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2319 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2320 sinfo
->fun
[i
].is_func
= is_func
;
2321 sinfo
->fun
[i
].global
= global
;
2322 sinfo
->fun
[i
].sec
= sec
;
2324 sinfo
->fun
[i
].u
.h
= sym_h
;
2326 sinfo
->fun
[i
].u
.sym
= sym_h
;
2327 sinfo
->fun
[i
].lo
= off
;
2328 sinfo
->fun
[i
].hi
= off
+ size
;
2329 sinfo
->fun
[i
].lr_store
= -1;
2330 sinfo
->fun
[i
].sp_adjust
= -1;
2331 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2332 &sinfo
->fun
[i
].lr_store
,
2333 &sinfo
->fun
[i
].sp_adjust
);
2334 sinfo
->num_fun
+= 1;
2335 return &sinfo
->fun
[i
];
2338 /* Return the name of FUN. */
2341 func_name (struct function_info
*fun
)
2345 Elf_Internal_Shdr
*symtab_hdr
;
2347 while (fun
->start
!= NULL
)
2351 return fun
->u
.h
->root
.root
.string
;
2354 if (fun
->u
.sym
->st_name
== 0)
2356 size_t len
= strlen (sec
->name
);
2357 char *name
= bfd_malloc (len
+ 10);
2360 sprintf (name
, "%s+%lx", sec
->name
,
2361 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2365 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2366 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2369 /* Read the instruction at OFF in SEC. Return true iff the instruction
2370 is a nop, lnop, or stop 0 (all zero insn). */
2373 is_nop (asection
*sec
, bfd_vma off
)
2375 unsigned char insn
[4];
2377 if (off
+ 4 > sec
->size
2378 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2380 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2382 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2387 /* Extend the range of FUN to cover nop padding up to LIMIT.
2388 Return TRUE iff some instruction other than a NOP was found. */
2391 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2393 bfd_vma off
= (fun
->hi
+ 3) & -4;
2395 while (off
< limit
&& is_nop (fun
->sec
, off
))
2406 /* Check and fix overlapping function ranges. Return TRUE iff there
2407 are gaps in the current info we have about functions in SEC. */
2410 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2412 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2413 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2415 bfd_boolean gaps
= FALSE
;
2420 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2421 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2423 /* Fix overlapping symbols. */
2424 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2425 const char *f2
= func_name (&sinfo
->fun
[i
]);
2427 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2428 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2430 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2433 if (sinfo
->num_fun
== 0)
2437 if (sinfo
->fun
[0].lo
!= 0)
2439 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2441 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2443 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2444 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2446 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2452 /* Search current function info for a function that contains address
2453 OFFSET in section SEC. */
2455 static struct function_info
*
2456 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2458 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2459 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2463 hi
= sinfo
->num_fun
;
2466 mid
= (lo
+ hi
) / 2;
2467 if (offset
< sinfo
->fun
[mid
].lo
)
2469 else if (offset
>= sinfo
->fun
[mid
].hi
)
2472 return &sinfo
->fun
[mid
];
2474 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2479 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2480 if CALLEE was new. If this function return FALSE, CALLEE should
2484 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2486 struct call_info
**pp
, *p
;
2488 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2489 if (p
->fun
== callee
->fun
)
2491 /* Tail calls use less stack than normal calls. Retain entry
2492 for normal call over one for tail call. */
2493 p
->is_tail
&= callee
->is_tail
;
2496 p
->fun
->start
= NULL
;
2497 p
->fun
->is_func
= TRUE
;
2500 /* Reorder list so most recent call is first. */
2502 p
->next
= caller
->call_list
;
2503 caller
->call_list
= p
;
2506 callee
->next
= caller
->call_list
;
2508 caller
->call_list
= callee
;
2512 /* Copy CALL and insert the copy into CALLER. */
2515 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2517 struct call_info
*callee
;
2518 callee
= bfd_malloc (sizeof (*callee
));
2522 if (!insert_callee (caller
, callee
))
2527 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2528 overlay stub sections. */
2531 interesting_section (asection
*s
)
2533 return (s
->output_section
!= bfd_abs_section_ptr
2534 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2535 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2539 /* Rummage through the relocs for SEC, looking for function calls.
2540 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2541 mark destination symbols on calls as being functions. Also
2542 look at branches, which may be tail calls or go to hot/cold
2543 section part of same function. */
2546 mark_functions_via_relocs (asection
*sec
,
2547 struct bfd_link_info
*info
,
2550 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2551 Elf_Internal_Shdr
*symtab_hdr
;
2553 unsigned int priority
= 0;
2554 static bfd_boolean warned
;
2556 if (!interesting_section (sec
)
2557 || sec
->reloc_count
== 0)
2560 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2562 if (internal_relocs
== NULL
)
2565 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2566 psyms
= &symtab_hdr
->contents
;
2567 irela
= internal_relocs
;
2568 irelaend
= irela
+ sec
->reloc_count
;
2569 for (; irela
< irelaend
; irela
++)
2571 enum elf_spu_reloc_type r_type
;
2572 unsigned int r_indx
;
2574 Elf_Internal_Sym
*sym
;
2575 struct elf_link_hash_entry
*h
;
2577 bfd_boolean reject
, is_call
;
2578 struct function_info
*caller
;
2579 struct call_info
*callee
;
2582 r_type
= ELF32_R_TYPE (irela
->r_info
);
2583 if (r_type
!= R_SPU_REL16
2584 && r_type
!= R_SPU_ADDR16
)
2587 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2591 r_indx
= ELF32_R_SYM (irela
->r_info
);
2592 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2596 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2602 unsigned char insn
[4];
2604 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2605 irela
->r_offset
, 4))
2607 if (is_branch (insn
))
2609 is_call
= (insn
[0] & 0xfd) == 0x31;
2610 priority
= insn
[1] & 0x0f;
2612 priority
|= insn
[2];
2614 priority
|= insn
[3];
2616 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2617 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2620 info
->callbacks
->einfo
2621 (_("%B(%A+0x%v): call to non-code section"
2622 " %B(%A), analysis incomplete\n"),
2623 sec
->owner
, sec
, irela
->r_offset
,
2624 sym_sec
->owner
, sym_sec
);
2632 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2640 /* For --auto-overlay, count possible stubs we need for
2641 function pointer references. */
2642 unsigned int sym_type
;
2646 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2647 if (sym_type
== STT_FUNC
)
2648 spu_hash_table (info
)->non_ovly_stub
+= 1;
2653 val
= h
->root
.u
.def
.value
;
2655 val
= sym
->st_value
;
2656 val
+= irela
->r_addend
;
2660 struct function_info
*fun
;
2662 if (irela
->r_addend
!= 0)
2664 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2667 fake
->st_value
= val
;
2669 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2673 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2675 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2678 if (irela
->r_addend
!= 0
2679 && fun
->u
.sym
!= sym
)
2684 caller
= find_function (sec
, irela
->r_offset
, info
);
2687 callee
= bfd_malloc (sizeof *callee
);
2691 callee
->fun
= find_function (sym_sec
, val
, info
);
2692 if (callee
->fun
== NULL
)
2694 callee
->is_tail
= !is_call
;
2695 callee
->is_pasted
= FALSE
;
2696 callee
->priority
= priority
;
2698 if (callee
->fun
->last_caller
!= sec
)
2700 callee
->fun
->last_caller
= sec
;
2701 callee
->fun
->call_count
+= 1;
2703 if (!insert_callee (caller
, callee
))
2706 && !callee
->fun
->is_func
2707 && callee
->fun
->stack
== 0)
2709 /* This is either a tail call or a branch from one part of
2710 the function to another, ie. hot/cold section. If the
2711 destination has been called by some other function then
2712 it is a separate function. We also assume that functions
2713 are not split across input files. */
2714 if (sec
->owner
!= sym_sec
->owner
)
2716 callee
->fun
->start
= NULL
;
2717 callee
->fun
->is_func
= TRUE
;
2719 else if (callee
->fun
->start
== NULL
)
2720 callee
->fun
->start
= caller
;
2723 struct function_info
*callee_start
;
2724 struct function_info
*caller_start
;
2725 callee_start
= callee
->fun
;
2726 while (callee_start
->start
)
2727 callee_start
= callee_start
->start
;
2728 caller_start
= caller
;
2729 while (caller_start
->start
)
2730 caller_start
= caller_start
->start
;
2731 if (caller_start
!= callee_start
)
2733 callee
->fun
->start
= NULL
;
2734 callee
->fun
->is_func
= TRUE
;
2743 /* Handle something like .init or .fini, which has a piece of a function.
2744 These sections are pasted together to form a single function. */
2747 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2749 struct bfd_link_order
*l
;
2750 struct _spu_elf_section_data
*sec_data
;
2751 struct spu_elf_stack_info
*sinfo
;
2752 Elf_Internal_Sym
*fake
;
2753 struct function_info
*fun
, *fun_start
;
2755 fake
= bfd_zmalloc (sizeof (*fake
));
2759 fake
->st_size
= sec
->size
;
2761 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2762 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2766 /* Find a function immediately preceding this section. */
2768 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2770 if (l
->u
.indirect
.section
== sec
)
2772 if (fun_start
!= NULL
)
2774 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2778 fun
->start
= fun_start
;
2780 callee
->is_tail
= TRUE
;
2781 callee
->is_pasted
= TRUE
;
2783 if (!insert_callee (fun_start
, callee
))
2789 if (l
->type
== bfd_indirect_link_order
2790 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2791 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2792 && sinfo
->num_fun
!= 0)
2793 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2796 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2800 /* Map address ranges in code sections to functions. */
2803 discover_functions (struct bfd_link_info
*info
)
2807 Elf_Internal_Sym
***psym_arr
;
2808 asection
***sec_arr
;
2809 bfd_boolean gaps
= FALSE
;
2812 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2815 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2816 if (psym_arr
== NULL
)
2818 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2819 if (sec_arr
== NULL
)
2823 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2825 ibfd
= ibfd
->link_next
, bfd_idx
++)
2827 extern const bfd_target bfd_elf32_spu_vec
;
2828 Elf_Internal_Shdr
*symtab_hdr
;
2831 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2832 asection
**psecs
, **p
;
2834 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2837 /* Read all the symbols. */
2838 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2839 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2843 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2844 if (interesting_section (sec
))
2852 if (symtab_hdr
->contents
!= NULL
)
2854 /* Don't use cached symbols since the generic ELF linker
2855 code only reads local symbols, and we need globals too. */
2856 free (symtab_hdr
->contents
);
2857 symtab_hdr
->contents
= NULL
;
2859 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2861 symtab_hdr
->contents
= (void *) syms
;
2865 /* Select defined function symbols that are going to be output. */
2866 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2869 psym_arr
[bfd_idx
] = psyms
;
2870 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2873 sec_arr
[bfd_idx
] = psecs
;
2874 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2875 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2876 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
2877 || ELF_ST_TYPE (sy
->st_info
) == STT_SECTION
)
2881 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2882 if (s
!= NULL
&& interesting_section (s
))
2885 symcount
= psy
- psyms
;
2888 /* Sort them by section and offset within section. */
2889 sort_syms_syms
= syms
;
2890 sort_syms_psecs
= psecs
;
2891 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2893 /* Now inspect the function symbols. */
2894 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2896 asection
*s
= psecs
[*psy
- syms
];
2897 Elf_Internal_Sym
**psy2
;
2899 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2900 if (psecs
[*psy2
- syms
] != s
)
2903 if (!alloc_stack_info (s
, psy2
- psy
))
2908 /* First install info about properly typed and sized functions.
2909 In an ideal world this will cover all code sections, except
2910 when partitioning functions into hot and cold sections,
2911 and the horrible pasted together .init and .fini functions. */
2912 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2915 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2917 asection
*s
= psecs
[sy
- syms
];
2918 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2923 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2924 if (interesting_section (sec
))
2925 gaps
|= check_function_ranges (sec
, info
);
2930 /* See if we can discover more function symbols by looking at
2932 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2934 ibfd
= ibfd
->link_next
, bfd_idx
++)
2938 if (psym_arr
[bfd_idx
] == NULL
)
2941 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2942 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2946 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2948 ibfd
= ibfd
->link_next
, bfd_idx
++)
2950 Elf_Internal_Shdr
*symtab_hdr
;
2952 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2955 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2958 psecs
= sec_arr
[bfd_idx
];
2960 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2961 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2964 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2965 if (interesting_section (sec
))
2966 gaps
|= check_function_ranges (sec
, info
);
2970 /* Finally, install all globals. */
2971 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2975 s
= psecs
[sy
- syms
];
2977 /* Global syms might be improperly typed functions. */
2978 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2979 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2981 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2987 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2989 extern const bfd_target bfd_elf32_spu_vec
;
2992 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2995 /* Some of the symbols we've installed as marking the
2996 beginning of functions may have a size of zero. Extend
2997 the range of such functions to the beginning of the
2998 next symbol of interest. */
2999 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3000 if (interesting_section (sec
))
3002 struct _spu_elf_section_data
*sec_data
;
3003 struct spu_elf_stack_info
*sinfo
;
3005 sec_data
= spu_elf_section_data (sec
);
3006 sinfo
= sec_data
->u
.i
.stack_info
;
3010 bfd_vma hi
= sec
->size
;
3012 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3014 sinfo
->fun
[fun_idx
].hi
= hi
;
3015 hi
= sinfo
->fun
[fun_idx
].lo
;
3018 /* No symbols in this section. Must be .init or .fini
3019 or something similar. */
3020 else if (!pasted_function (sec
, info
))
3026 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3028 ibfd
= ibfd
->link_next
, bfd_idx
++)
3030 if (psym_arr
[bfd_idx
] == NULL
)
3033 free (psym_arr
[bfd_idx
]);
3034 free (sec_arr
[bfd_idx
]);
3043 /* Iterate over all function_info we have collected, calling DOIT on
3044 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3048 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3049 struct bfd_link_info
*,
3051 struct bfd_link_info
*info
,
3057 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3059 extern const bfd_target bfd_elf32_spu_vec
;
3062 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3065 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3067 struct _spu_elf_section_data
*sec_data
;
3068 struct spu_elf_stack_info
*sinfo
;
3070 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3071 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3074 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3075 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3076 if (!doit (&sinfo
->fun
[i
], info
, param
))
3084 /* Transfer call info attached to struct function_info entries for
3085 all of a given function's sections to the first entry. */
3088 transfer_calls (struct function_info
*fun
,
3089 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3090 void *param ATTRIBUTE_UNUSED
)
3092 struct function_info
*start
= fun
->start
;
3096 struct call_info
*call
, *call_next
;
3098 while (start
->start
!= NULL
)
3099 start
= start
->start
;
3100 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3102 call_next
= call
->next
;
3103 if (!insert_callee (start
, call
))
3106 fun
->call_list
= NULL
;
3111 /* Mark nodes in the call graph that are called by some other node. */
3114 mark_non_root (struct function_info
*fun
,
3115 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3116 void *param ATTRIBUTE_UNUSED
)
3118 struct call_info
*call
;
3123 for (call
= fun
->call_list
; call
; call
= call
->next
)
3125 call
->fun
->non_root
= TRUE
;
3126 mark_non_root (call
->fun
, 0, 0);
3131 /* Remove cycles from the call graph. Set depth of nodes. */
3134 remove_cycles (struct function_info
*fun
,
3135 struct bfd_link_info
*info
,
3138 struct call_info
**callp
, *call
;
3139 unsigned int depth
= *(unsigned int *) param
;
3140 unsigned int max_depth
= depth
;
3144 fun
->marking
= TRUE
;
3146 callp
= &fun
->call_list
;
3147 while ((call
= *callp
) != NULL
)
3149 call
->max_depth
= depth
+ !call
->is_pasted
;
3150 if (!call
->fun
->visit2
)
3152 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3154 if (max_depth
< call
->max_depth
)
3155 max_depth
= call
->max_depth
;
3157 else if (call
->fun
->marking
)
3159 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3161 if (!htab
->params
->auto_overlay
3162 && htab
->params
->stack_analysis
)
3164 const char *f1
= func_name (fun
);
3165 const char *f2
= func_name (call
->fun
);
3167 info
->callbacks
->info (_("Stack analysis will ignore the call "
3171 *callp
= call
->next
;
3175 callp
= &call
->next
;
3177 fun
->marking
= FALSE
;
3178 *(unsigned int *) param
= max_depth
;
3182 /* Check that we actually visited all nodes in remove_cycles. If we
3183 didn't, then there is some cycle in the call graph not attached to
3184 any root node. Arbitrarily choose a node in the cycle as a new
3185 root and break the cycle. */
3188 mark_detached_root (struct function_info
*fun
,
3189 struct bfd_link_info
*info
,
3194 fun
->non_root
= FALSE
;
3195 *(unsigned int *) param
= 0;
3196 return remove_cycles (fun
, info
, param
);
3199 /* Populate call_list for each function. */
3202 build_call_tree (struct bfd_link_info
*info
)
3207 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3209 extern const bfd_target bfd_elf32_spu_vec
;
3212 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3215 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3216 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3220 /* Transfer call info from hot/cold section part of function
3222 if (!spu_hash_table (info
)->params
->auto_overlay
3223 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3226 /* Find the call graph root(s). */
3227 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3230 /* Remove cycles from the call graph. We start from the root node(s)
3231 so that we break cycles in a reasonable place. */
3233 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3236 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3239 /* qsort predicate to sort calls by priority, max_depth then count. */
3242 sort_calls (const void *a
, const void *b
)
3244 struct call_info
*const *c1
= a
;
3245 struct call_info
*const *c2
= b
;
3248 delta
= (*c2
)->priority
- (*c1
)->priority
;
3252 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3256 delta
= (*c2
)->count
- (*c1
)->count
;
3260 return (char *) c1
- (char *) c2
;
3264 unsigned int max_overlay_size
;
3267 /* Set linker_mark and gc_mark on any sections that we will put in
3268 overlays. These flags are used by the generic ELF linker, but we
3269 won't be continuing on to bfd_elf_final_link so it is OK to use
3270 them. linker_mark is clear before we get here. Set segment_mark
3271 on sections that are part of a pasted function (excluding the last
3274 Set up function rodata section if --overlay-rodata. We don't
3275 currently include merged string constant rodata sections since
3277 Sort the call graph so that the deepest nodes will be visited
3281 mark_overlay_section (struct function_info
*fun
,
3282 struct bfd_link_info
*info
,
3285 struct call_info
*call
;
3287 struct _mos_param
*mos_param
= param
;
3288 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3294 if (!fun
->sec
->linker_mark
3295 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3296 || htab
->params
->non_ia_text
3297 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0))
3301 fun
->sec
->linker_mark
= 1;
3302 fun
->sec
->gc_mark
= 1;
3303 fun
->sec
->segment_mark
= 0;
3304 /* Ensure SEC_CODE is set on this text section (it ought to
3305 be!), and SEC_CODE is clear on rodata sections. We use
3306 this flag to differentiate the two overlay section types. */
3307 fun
->sec
->flags
|= SEC_CODE
;
3309 size
= fun
->sec
->size
;
3310 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3314 /* Find the rodata section corresponding to this function's
3316 if (strcmp (fun
->sec
->name
, ".text") == 0)
3318 name
= bfd_malloc (sizeof (".rodata"));
3321 memcpy (name
, ".rodata", sizeof (".rodata"));
3323 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3325 size_t len
= strlen (fun
->sec
->name
);
3326 name
= bfd_malloc (len
+ 3);
3329 memcpy (name
, ".rodata", sizeof (".rodata"));
3330 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3332 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3334 size_t len
= strlen (fun
->sec
->name
) + 1;
3335 name
= bfd_malloc (len
);
3338 memcpy (name
, fun
->sec
->name
, len
);
3344 asection
*rodata
= NULL
;
3345 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3346 if (group_sec
== NULL
)
3347 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3349 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3351 if (strcmp (group_sec
->name
, name
) == 0)
3356 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3358 fun
->rodata
= rodata
;
3361 size
+= fun
->rodata
->size
;
3362 if (htab
->params
->line_size
!= 0
3363 && size
> htab
->params
->line_size
)
3365 size
-= fun
->rodata
->size
;
3370 fun
->rodata
->linker_mark
= 1;
3371 fun
->rodata
->gc_mark
= 1;
3372 fun
->rodata
->flags
&= ~SEC_CODE
;
3378 if (mos_param
->max_overlay_size
< size
)
3379 mos_param
->max_overlay_size
= size
;
3382 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3387 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3391 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3392 calls
[count
++] = call
;
3394 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3396 fun
->call_list
= NULL
;
3400 calls
[count
]->next
= fun
->call_list
;
3401 fun
->call_list
= calls
[count
];
3406 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3408 if (call
->is_pasted
)
3410 /* There can only be one is_pasted call per function_info. */
3411 BFD_ASSERT (!fun
->sec
->segment_mark
);
3412 fun
->sec
->segment_mark
= 1;
3414 if (!mark_overlay_section (call
->fun
, info
, param
))
3418 /* Don't put entry code into an overlay. The overlay manager needs
3419 a stack! Also, don't mark .ovl.init as an overlay. */
3420 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3421 == info
->output_bfd
->start_address
3422 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3424 fun
->sec
->linker_mark
= 0;
3425 if (fun
->rodata
!= NULL
)
3426 fun
->rodata
->linker_mark
= 0;
3431 /* If non-zero then unmark functions called from those within sections
3432 that we need to unmark. Unfortunately this isn't reliable since the
3433 call graph cannot know the destination of function pointer calls. */
3434 #define RECURSE_UNMARK 0
3437 asection
*exclude_input_section
;
3438 asection
*exclude_output_section
;
3439 unsigned long clearing
;
3442 /* Undo some of mark_overlay_section's work. */
3445 unmark_overlay_section (struct function_info
*fun
,
3446 struct bfd_link_info
*info
,
3449 struct call_info
*call
;
3450 struct _uos_param
*uos_param
= param
;
3451 unsigned int excluded
= 0;
3459 if (fun
->sec
== uos_param
->exclude_input_section
3460 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3464 uos_param
->clearing
+= excluded
;
3466 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3468 fun
->sec
->linker_mark
= 0;
3470 fun
->rodata
->linker_mark
= 0;
3473 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3474 if (!unmark_overlay_section (call
->fun
, info
, param
))
3478 uos_param
->clearing
-= excluded
;
3483 unsigned int lib_size
;
3484 asection
**lib_sections
;
3487 /* Add sections we have marked as belonging to overlays to an array
3488 for consideration as non-overlay sections. The array consist of
3489 pairs of sections, (text,rodata), for functions in the call graph. */
3492 collect_lib_sections (struct function_info
*fun
,
3493 struct bfd_link_info
*info
,
3496 struct _cl_param
*lib_param
= param
;
3497 struct call_info
*call
;
3504 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3507 size
= fun
->sec
->size
;
3509 size
+= fun
->rodata
->size
;
3511 if (size
<= lib_param
->lib_size
)
3513 *lib_param
->lib_sections
++ = fun
->sec
;
3514 fun
->sec
->gc_mark
= 0;
3515 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3517 *lib_param
->lib_sections
++ = fun
->rodata
;
3518 fun
->rodata
->gc_mark
= 0;
3521 *lib_param
->lib_sections
++ = NULL
;
3524 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3525 collect_lib_sections (call
->fun
, info
, param
);
3530 /* qsort predicate to sort sections by call count. */
3533 sort_lib (const void *a
, const void *b
)
3535 asection
*const *s1
= a
;
3536 asection
*const *s2
= b
;
3537 struct _spu_elf_section_data
*sec_data
;
3538 struct spu_elf_stack_info
*sinfo
;
3542 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3543 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3546 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3547 delta
-= sinfo
->fun
[i
].call_count
;
3550 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3551 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3554 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3555 delta
+= sinfo
->fun
[i
].call_count
;
3564 /* Remove some sections from those marked to be in overlays. Choose
3565 those that are called from many places, likely library functions. */
3568 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3571 asection
**lib_sections
;
3572 unsigned int i
, lib_count
;
3573 struct _cl_param collect_lib_param
;
3574 struct function_info dummy_caller
;
3575 struct spu_link_hash_table
*htab
;
3577 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3579 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3581 extern const bfd_target bfd_elf32_spu_vec
;
3584 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3587 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3588 if (sec
->linker_mark
3589 && sec
->size
< lib_size
3590 && (sec
->flags
& SEC_CODE
) != 0)
3593 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3594 if (lib_sections
== NULL
)
3595 return (unsigned int) -1;
3596 collect_lib_param
.lib_size
= lib_size
;
3597 collect_lib_param
.lib_sections
= lib_sections
;
3598 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3600 return (unsigned int) -1;
3601 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3603 /* Sort sections so that those with the most calls are first. */
3605 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3607 htab
= spu_hash_table (info
);
3608 for (i
= 0; i
< lib_count
; i
++)
3610 unsigned int tmp
, stub_size
;
3612 struct _spu_elf_section_data
*sec_data
;
3613 struct spu_elf_stack_info
*sinfo
;
3615 sec
= lib_sections
[2 * i
];
3616 /* If this section is OK, its size must be less than lib_size. */
3618 /* If it has a rodata section, then add that too. */
3619 if (lib_sections
[2 * i
+ 1])
3620 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3621 /* Add any new overlay call stubs needed by the section. */
3624 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3625 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3628 struct call_info
*call
;
3630 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3631 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3632 if (call
->fun
->sec
->linker_mark
)
3634 struct call_info
*p
;
3635 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3636 if (p
->fun
== call
->fun
)
3639 stub_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3642 if (tmp
+ stub_size
< lib_size
)
3644 struct call_info
**pp
, *p
;
3646 /* This section fits. Mark it as non-overlay. */
3647 lib_sections
[2 * i
]->linker_mark
= 0;
3648 if (lib_sections
[2 * i
+ 1])
3649 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3650 lib_size
-= tmp
+ stub_size
;
3651 /* Call stubs to the section we just added are no longer
3653 pp
= &dummy_caller
.call_list
;
3654 while ((p
= *pp
) != NULL
)
3655 if (!p
->fun
->sec
->linker_mark
)
3657 lib_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3663 /* Add new call stubs to dummy_caller. */
3664 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3665 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3668 struct call_info
*call
;
3670 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3671 for (call
= sinfo
->fun
[k
].call_list
;
3674 if (call
->fun
->sec
->linker_mark
)
3676 struct call_info
*callee
;
3677 callee
= bfd_malloc (sizeof (*callee
));
3679 return (unsigned int) -1;
3681 if (!insert_callee (&dummy_caller
, callee
))
3687 while (dummy_caller
.call_list
!= NULL
)
3689 struct call_info
*call
= dummy_caller
.call_list
;
3690 dummy_caller
.call_list
= call
->next
;
3693 for (i
= 0; i
< 2 * lib_count
; i
++)
3694 if (lib_sections
[i
])
3695 lib_sections
[i
]->gc_mark
= 1;
3696 free (lib_sections
);
3700 /* Build an array of overlay sections. The deepest node's section is
3701 added first, then its parent node's section, then everything called
3702 from the parent section. The idea being to group sections to
3703 minimise calls between different overlays. */
3706 collect_overlays (struct function_info
*fun
,
3707 struct bfd_link_info
*info
,
3710 struct call_info
*call
;
3711 bfd_boolean added_fun
;
3712 asection
***ovly_sections
= param
;
3718 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3719 if (!call
->is_pasted
)
3721 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3727 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3729 fun
->sec
->gc_mark
= 0;
3730 *(*ovly_sections
)++ = fun
->sec
;
3731 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3733 fun
->rodata
->gc_mark
= 0;
3734 *(*ovly_sections
)++ = fun
->rodata
;
3737 *(*ovly_sections
)++ = NULL
;
3740 /* Pasted sections must stay with the first section. We don't
3741 put pasted sections in the array, just the first section.
3742 Mark subsequent sections as already considered. */
3743 if (fun
->sec
->segment_mark
)
3745 struct function_info
*call_fun
= fun
;
3748 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3749 if (call
->is_pasted
)
3751 call_fun
= call
->fun
;
3752 call_fun
->sec
->gc_mark
= 0;
3753 if (call_fun
->rodata
)
3754 call_fun
->rodata
->gc_mark
= 0;
3760 while (call_fun
->sec
->segment_mark
);
3764 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3765 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3770 struct _spu_elf_section_data
*sec_data
;
3771 struct spu_elf_stack_info
*sinfo
;
3773 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3774 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3777 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3778 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3786 struct _sum_stack_param
{
3788 size_t overall_stack
;
3789 bfd_boolean emit_stack_syms
;
3792 /* Descend the call graph for FUN, accumulating total stack required. */
3795 sum_stack (struct function_info
*fun
,
3796 struct bfd_link_info
*info
,
3799 struct call_info
*call
;
3800 struct function_info
*max
;
3801 size_t stack
, cum_stack
;
3803 bfd_boolean has_call
;
3804 struct _sum_stack_param
*sum_stack_param
= param
;
3805 struct spu_link_hash_table
*htab
;
3807 cum_stack
= fun
->stack
;
3808 sum_stack_param
->cum_stack
= cum_stack
;
3814 for (call
= fun
->call_list
; call
; call
= call
->next
)
3816 if (!call
->is_pasted
)
3818 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3820 stack
= sum_stack_param
->cum_stack
;
3821 /* Include caller stack for normal calls, don't do so for
3822 tail calls. fun->stack here is local stack usage for
3824 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3825 stack
+= fun
->stack
;
3826 if (cum_stack
< stack
)
3833 sum_stack_param
->cum_stack
= cum_stack
;
3835 /* Now fun->stack holds cumulative stack. */
3836 fun
->stack
= cum_stack
;
3840 && sum_stack_param
->overall_stack
< cum_stack
)
3841 sum_stack_param
->overall_stack
= cum_stack
;
3843 htab
= spu_hash_table (info
);
3844 if (htab
->params
->auto_overlay
)
3847 f1
= func_name (fun
);
3848 if (htab
->params
->stack_analysis
)
3851 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3852 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3853 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3857 info
->callbacks
->minfo (_(" calls:\n"));
3858 for (call
= fun
->call_list
; call
; call
= call
->next
)
3859 if (!call
->is_pasted
)
3861 const char *f2
= func_name (call
->fun
);
3862 const char *ann1
= call
->fun
== max
? "*" : " ";
3863 const char *ann2
= call
->is_tail
? "t" : " ";
3865 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3870 if (sum_stack_param
->emit_stack_syms
)
3872 char *name
= bfd_malloc (18 + strlen (f1
));
3873 struct elf_link_hash_entry
*h
;
3878 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3879 sprintf (name
, "__stack_%s", f1
);
3881 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3883 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3886 && (h
->root
.type
== bfd_link_hash_new
3887 || h
->root
.type
== bfd_link_hash_undefined
3888 || h
->root
.type
== bfd_link_hash_undefweak
))
3890 h
->root
.type
= bfd_link_hash_defined
;
3891 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3892 h
->root
.u
.def
.value
= cum_stack
;
3897 h
->ref_regular_nonweak
= 1;
3898 h
->forced_local
= 1;
3906 /* SEC is part of a pasted function. Return the call_info for the
3907 next section of this function. */
3909 static struct call_info
*
3910 find_pasted_call (asection
*sec
)
3912 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3913 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3914 struct call_info
*call
;
3917 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3918 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3919 if (call
->is_pasted
)
3925 /* qsort predicate to sort bfds by file name. */
3928 sort_bfds (const void *a
, const void *b
)
3930 bfd
*const *abfd1
= a
;
3931 bfd
*const *abfd2
= b
;
3933 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3937 print_one_overlay_section (FILE *script
,
3940 unsigned int ovlynum
,
3941 unsigned int *ovly_map
,
3942 asection
**ovly_sections
,
3943 struct bfd_link_info
*info
)
3947 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
3949 asection
*sec
= ovly_sections
[2 * j
];
3951 if (fprintf (script
, " %s%c%s (%s)\n",
3952 (sec
->owner
->my_archive
!= NULL
3953 ? sec
->owner
->my_archive
->filename
: ""),
3954 info
->path_separator
,
3955 sec
->owner
->filename
,
3958 if (sec
->segment_mark
)
3960 struct call_info
*call
= find_pasted_call (sec
);
3961 while (call
!= NULL
)
3963 struct function_info
*call_fun
= call
->fun
;
3964 sec
= call_fun
->sec
;
3965 if (fprintf (script
, " %s%c%s (%s)\n",
3966 (sec
->owner
->my_archive
!= NULL
3967 ? sec
->owner
->my_archive
->filename
: ""),
3968 info
->path_separator
,
3969 sec
->owner
->filename
,
3972 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3973 if (call
->is_pasted
)
3979 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
3981 asection
*sec
= ovly_sections
[2 * j
+ 1];
3983 && fprintf (script
, " %s%c%s (%s)\n",
3984 (sec
->owner
->my_archive
!= NULL
3985 ? sec
->owner
->my_archive
->filename
: ""),
3986 info
->path_separator
,
3987 sec
->owner
->filename
,
3991 sec
= ovly_sections
[2 * j
];
3992 if (sec
->segment_mark
)
3994 struct call_info
*call
= find_pasted_call (sec
);
3995 while (call
!= NULL
)
3997 struct function_info
*call_fun
= call
->fun
;
3998 sec
= call_fun
->rodata
;
4000 && fprintf (script
, " %s%c%s (%s)\n",
4001 (sec
->owner
->my_archive
!= NULL
4002 ? sec
->owner
->my_archive
->filename
: ""),
4003 info
->path_separator
,
4004 sec
->owner
->filename
,
4007 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4008 if (call
->is_pasted
)
4017 /* Handle --auto-overlay. */
4019 static void spu_elf_auto_overlay (struct bfd_link_info
*)
4023 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4027 struct elf_segment_map
*m
;
4028 unsigned int fixed_size
, lo
, hi
;
4029 struct spu_link_hash_table
*htab
;
4030 unsigned int base
, i
, count
, bfd_count
;
4031 unsigned int region
, ovlynum
;
4032 asection
**ovly_sections
, **ovly_p
;
4033 unsigned int *ovly_map
;
4035 unsigned int total_overlay_size
, overlay_size
;
4036 const char *ovly_mgr_entry
;
4037 struct elf_link_hash_entry
*h
;
4038 struct _mos_param mos_param
;
4039 struct _uos_param uos_param
;
4040 struct function_info dummy_caller
;
4042 /* Find the extents of our loadable image. */
4043 lo
= (unsigned int) -1;
4045 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4046 if (m
->p_type
== PT_LOAD
)
4047 for (i
= 0; i
< m
->count
; i
++)
4048 if (m
->sections
[i
]->size
!= 0)
4050 if (m
->sections
[i
]->vma
< lo
)
4051 lo
= m
->sections
[i
]->vma
;
4052 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4053 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4055 fixed_size
= hi
+ 1 - lo
;
4057 if (!discover_functions (info
))
4060 if (!build_call_tree (info
))
4063 uos_param
.exclude_input_section
= 0;
4064 uos_param
.exclude_output_section
4065 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4067 htab
= spu_hash_table (info
);
4068 ovly_mgr_entry
= "__ovly_load";
4069 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4070 ovly_mgr_entry
= "__icache_br_handler";
4071 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4072 FALSE
, FALSE
, FALSE
);
4074 && (h
->root
.type
== bfd_link_hash_defined
4075 || h
->root
.type
== bfd_link_hash_defweak
)
4078 /* We have a user supplied overlay manager. */
4079 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4083 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4084 builtin version to .text, and will adjust .text size. */
4085 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4088 /* Mark overlay sections, and find max overlay section size. */
4089 mos_param
.max_overlay_size
= 0;
4090 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4093 /* We can't put the overlay manager or interrupt routines in
4095 uos_param
.clearing
= 0;
4096 if ((uos_param
.exclude_input_section
4097 || uos_param
.exclude_output_section
)
4098 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4102 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4104 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4105 if (bfd_arr
== NULL
)
4108 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4111 total_overlay_size
= 0;
4112 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4114 extern const bfd_target bfd_elf32_spu_vec
;
4116 unsigned int old_count
;
4118 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4122 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4123 if (sec
->linker_mark
)
4125 if ((sec
->flags
& SEC_CODE
) != 0)
4127 fixed_size
-= sec
->size
;
4128 total_overlay_size
+= sec
->size
;
4130 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4131 && sec
->output_section
->owner
== info
->output_bfd
4132 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4133 fixed_size
-= sec
->size
;
4134 if (count
!= old_count
)
4135 bfd_arr
[bfd_count
++] = ibfd
;
4138 /* Since the overlay link script selects sections by file name and
4139 section name, ensure that file names are unique. */
4142 bfd_boolean ok
= TRUE
;
4144 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4145 for (i
= 1; i
< bfd_count
; ++i
)
4146 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4148 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4150 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4151 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4152 bfd_arr
[i
]->filename
,
4153 bfd_arr
[i
]->my_archive
->filename
);
4155 info
->callbacks
->einfo (_("%s duplicated\n"),
4156 bfd_arr
[i
]->filename
);
4162 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4163 "object files in auto-overlay script\n"));
4164 bfd_set_error (bfd_error_bad_value
);
4170 if (htab
->reserved
== 0)
4172 struct _sum_stack_param sum_stack_param
;
4174 sum_stack_param
.emit_stack_syms
= 0;
4175 sum_stack_param
.overall_stack
= 0;
4176 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4178 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4180 fixed_size
+= htab
->reserved
;
4181 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
->ovly_flavour
);
4182 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4184 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4186 /* Stubs in the non-icache area are bigger. */
4187 fixed_size
+= htab
->non_ovly_stub
* 16;
4188 /* Space for icache manager tables.
4189 a) Tag array, one quadword per cache line.
4190 - word 0: ia address of present line, init to zero.
4191 - word 1: link locator. link_elem=stub_addr/2+locator
4192 - halfwords 4-7: head/tail pointers for linked lists. */
4193 fixed_size
+= 16 << htab
->num_lines_log2
;
4194 /* b) Linked list elements, max_branch per line. */
4195 fixed_size
+= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
4196 /* c) Indirect branch descriptors, 8 quadwords. */
4197 fixed_size
+= 8 * 16;
4198 /* d) Pointers to __ea backing store, 16 quadwords. */
4199 fixed_size
+= 16 * 16;
4203 /* Guess number of overlays. Assuming overlay buffer is on
4204 average only half full should be conservative. */
4205 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4206 / (htab
->local_store
- fixed_size
));
4207 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4208 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4212 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4213 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4214 "size of 0x%v exceeds local store\n"),
4215 (bfd_vma
) fixed_size
,
4216 (bfd_vma
) mos_param
.max_overlay_size
);
4218 /* Now see if we should put some functions in the non-overlay area. */
4219 else if (fixed_size
< htab
->overlay_fixed
)
4221 unsigned int max_fixed
, lib_size
;
4223 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4224 if (max_fixed
> htab
->overlay_fixed
)
4225 max_fixed
= htab
->overlay_fixed
;
4226 lib_size
= max_fixed
- fixed_size
;
4227 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4228 if (lib_size
== (unsigned int) -1)
4230 fixed_size
= max_fixed
- lib_size
;
4233 /* Build an array of sections, suitably sorted to place into
4235 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4236 if (ovly_sections
== NULL
)
4238 ovly_p
= ovly_sections
;
4239 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4241 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4242 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4243 if (ovly_map
== NULL
)
4246 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4247 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4248 if (htab
->params
->line_size
!= 0)
4249 overlay_size
= htab
->params
->line_size
;
4252 while (base
< count
)
4254 unsigned int size
= 0;
4256 for (i
= base
; i
< count
; i
++)
4260 unsigned int num_stubs
;
4261 struct call_info
*call
, *pasty
;
4262 struct _spu_elf_section_data
*sec_data
;
4263 struct spu_elf_stack_info
*sinfo
;
4266 /* See whether we can add this section to the current
4267 overlay without overflowing our overlay buffer. */
4268 sec
= ovly_sections
[2 * i
];
4269 tmp
= size
+ sec
->size
;
4270 if (ovly_sections
[2 * i
+ 1])
4271 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
4272 if (tmp
> overlay_size
)
4274 if (sec
->segment_mark
)
4276 /* Pasted sections must stay together, so add their
4278 struct call_info
*pasty
= find_pasted_call (sec
);
4279 while (pasty
!= NULL
)
4281 struct function_info
*call_fun
= pasty
->fun
;
4282 tmp
+= call_fun
->sec
->size
;
4283 if (call_fun
->rodata
)
4284 tmp
+= call_fun
->rodata
->size
;
4285 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4286 if (pasty
->is_pasted
)
4290 if (tmp
> overlay_size
)
4293 /* If we add this section, we might need new overlay call
4294 stubs. Add any overlay section calls to dummy_call. */
4296 sec_data
= spu_elf_section_data (sec
);
4297 sinfo
= sec_data
->u
.i
.stack_info
;
4298 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4299 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4300 if (call
->is_pasted
)
4302 BFD_ASSERT (pasty
== NULL
);
4305 else if (call
->fun
->sec
->linker_mark
)
4307 if (!copy_callee (&dummy_caller
, call
))
4310 while (pasty
!= NULL
)
4312 struct function_info
*call_fun
= pasty
->fun
;
4314 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4315 if (call
->is_pasted
)
4317 BFD_ASSERT (pasty
== NULL
);
4320 else if (!copy_callee (&dummy_caller
, call
))
4324 /* Calculate call stub size. */
4326 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4331 /* If the call is within this overlay, we won't need a
4333 for (k
= base
; k
< i
+ 1; k
++)
4334 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4340 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4341 && num_stubs
> htab
->params
->max_branch
)
4343 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
->ovly_flavour
)
4351 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4352 ovly_sections
[2 * i
]->owner
,
4353 ovly_sections
[2 * i
],
4354 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4355 bfd_set_error (bfd_error_bad_value
);
4359 while (dummy_caller
.call_list
!= NULL
)
4361 struct call_info
*call
= dummy_caller
.call_list
;
4362 dummy_caller
.call_list
= call
->next
;
4368 ovly_map
[base
++] = ovlynum
;
4371 script
= htab
->params
->spu_elf_open_overlay_script ();
4373 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4376 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4378 if (fprintf (script
,
4379 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4380 " . = ALIGN (%u);\n"
4381 " .ovl.init : { *(.ovl.init) }\n"
4382 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4383 htab
->params
->line_size
) <= 0)
4388 while (base
< count
)
4390 unsigned int indx
= ovlynum
- 1;
4391 unsigned int vma
, lma
;
4393 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4394 lma
= indx
<< htab
->line_size_log2
;
4396 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4397 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4398 ovlynum
, vma
, lma
) <= 0)
4401 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4402 ovly_map
, ovly_sections
, info
);
4403 if (base
== (unsigned) -1)
4406 if (fprintf (script
, " }\n") <= 0)
4412 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4413 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4418 if (fprintf (script
,
4419 " . = ALIGN (16);\n"
4420 " .ovl.init : { *(.ovl.init) }\n"
4421 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4424 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4428 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4436 /* We need to set lma since we are overlaying .ovl.init. */
4437 if (fprintf (script
,
4438 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4443 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4447 while (base
< count
)
4449 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4452 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4453 ovly_map
, ovly_sections
, info
);
4454 if (base
== (unsigned) -1)
4457 if (fprintf (script
, " }\n") <= 0)
4460 ovlynum
+= htab
->params
->num_lines
;
4461 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4465 if (fprintf (script
, " }\n") <= 0)
4472 free (ovly_sections
);
4474 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4476 if (fclose (script
) != 0)
4479 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4480 (*htab
->params
->spu_elf_relink
) ();
4485 bfd_set_error (bfd_error_system_call
);
4487 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4491 /* Provide an estimate of total stack required. */
4494 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4496 struct spu_link_hash_table
*htab
;
4497 struct _sum_stack_param sum_stack_param
;
4499 if (!discover_functions (info
))
4502 if (!build_call_tree (info
))
4505 htab
= spu_hash_table (info
);
4506 if (htab
->params
->stack_analysis
)
4508 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4509 info
->callbacks
->minfo (_("\nStack size for functions. "
4510 "Annotations: '*' max stack, 't' tail call\n"));
4513 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4514 sum_stack_param
.overall_stack
= 0;
4515 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4518 if (htab
->params
->stack_analysis
)
4519 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4520 (bfd_vma
) sum_stack_param
.overall_stack
);
4524 /* Perform a final link. */
4527 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4529 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4531 if (htab
->params
->auto_overlay
)
4532 spu_elf_auto_overlay (info
);
4534 if ((htab
->params
->stack_analysis
4535 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4536 && htab
->params
->lrlive_analysis
))
4537 && !spu_elf_stack_analysis (info
))
4538 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4540 if (!spu_elf_build_stubs (info
))
4541 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4543 return bfd_elf_final_link (output_bfd
, info
);
4546 /* Called when not normally emitting relocs, ie. !info->relocatable
4547 and !info->emitrelocations. Returns a count of special relocs
4548 that need to be emitted. */
4551 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4553 Elf_Internal_Rela
*relocs
;
4554 unsigned int count
= 0;
4556 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4560 Elf_Internal_Rela
*rel
;
4561 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4563 for (rel
= relocs
; rel
< relend
; rel
++)
4565 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4566 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4570 if (elf_section_data (sec
)->relocs
!= relocs
)
4577 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4580 spu_elf_relocate_section (bfd
*output_bfd
,
4581 struct bfd_link_info
*info
,
4583 asection
*input_section
,
4585 Elf_Internal_Rela
*relocs
,
4586 Elf_Internal_Sym
*local_syms
,
4587 asection
**local_sections
)
4589 Elf_Internal_Shdr
*symtab_hdr
;
4590 struct elf_link_hash_entry
**sym_hashes
;
4591 Elf_Internal_Rela
*rel
, *relend
;
4592 struct spu_link_hash_table
*htab
;
4595 bfd_boolean emit_these_relocs
= FALSE
;
4596 bfd_boolean is_ea_sym
;
4598 unsigned int iovl
= 0;
4600 htab
= spu_hash_table (info
);
4601 stubs
= (htab
->stub_sec
!= NULL
4602 && maybe_needs_stubs (input_section
));
4603 iovl
= overlay_index (input_section
);
4604 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4605 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4606 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4609 relend
= relocs
+ input_section
->reloc_count
;
4610 for (; rel
< relend
; rel
++)
4613 reloc_howto_type
*howto
;
4614 unsigned int r_symndx
;
4615 Elf_Internal_Sym
*sym
;
4617 struct elf_link_hash_entry
*h
;
4618 const char *sym_name
;
4621 bfd_reloc_status_type r
;
4622 bfd_boolean unresolved_reloc
;
4624 bfd_boolean overlay_encoded
;
4625 enum _stub_type stub_type
;
4627 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4628 r_type
= ELF32_R_TYPE (rel
->r_info
);
4629 howto
= elf_howto_table
+ r_type
;
4630 unresolved_reloc
= FALSE
;
4635 if (r_symndx
< symtab_hdr
->sh_info
)
4637 sym
= local_syms
+ r_symndx
;
4638 sec
= local_sections
[r_symndx
];
4639 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4640 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4644 if (sym_hashes
== NULL
)
4647 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4649 while (h
->root
.type
== bfd_link_hash_indirect
4650 || h
->root
.type
== bfd_link_hash_warning
)
4651 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4654 if (h
->root
.type
== bfd_link_hash_defined
4655 || h
->root
.type
== bfd_link_hash_defweak
)
4657 sec
= h
->root
.u
.def
.section
;
4659 || sec
->output_section
== NULL
)
4660 /* Set a flag that will be cleared later if we find a
4661 relocation value for this symbol. output_section
4662 is typically NULL for symbols satisfied by a shared
4664 unresolved_reloc
= TRUE
;
4666 relocation
= (h
->root
.u
.def
.value
4667 + sec
->output_section
->vma
4668 + sec
->output_offset
);
4670 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4672 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4673 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4675 else if (!info
->relocatable
4676 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4679 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4680 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4681 if (!info
->callbacks
->undefined_symbol (info
,
4682 h
->root
.root
.string
,
4685 rel
->r_offset
, err
))
4689 sym_name
= h
->root
.root
.string
;
4692 if (sec
!= NULL
&& elf_discarded_section (sec
))
4694 /* For relocs against symbols from removed linkonce sections,
4695 or sections discarded by a linker script, we just want the
4696 section contents zeroed. Avoid any special processing. */
4697 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4703 if (info
->relocatable
)
4706 is_ea_sym
= (ea
!= NULL
4708 && sec
->output_section
== ea
);
4709 overlay_encoded
= FALSE
;
4711 /* If this symbol is in an overlay area, we may need to relocate
4712 to the overlay stub. */
4713 addend
= rel
->r_addend
;
4716 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4717 contents
, info
)) != no_stub
)
4719 unsigned int ovl
= 0;
4720 struct got_entry
*g
, **head
;
4722 if (stub_type
!= nonovl_stub
)
4726 head
= &h
->got
.glist
;
4728 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4730 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4731 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4732 ? g
->br_addr
== (rel
->r_offset
4733 + input_section
->output_offset
4734 + input_section
->output_section
->vma
)
4735 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4740 relocation
= g
->stub_addr
;
4745 /* For soft icache, encode the overlay index into addresses. */
4746 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4749 unsigned int ovl
= overlay_index (sec
);
4752 unsigned int set_id
= (ovl
- 1) >> htab
->num_lines_log2
;
4753 relocation
+= set_id
<< 18;
4754 overlay_encoded
= set_id
!= 0;
4759 if (unresolved_reloc
)
4761 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4765 /* ._ea is a special section that isn't allocated in SPU
4766 memory, but rather occupies space in PPU memory as
4767 part of an embedded ELF image. If this reloc is
4768 against a symbol defined in ._ea, then transform the
4769 reloc into an equivalent one without a symbol
4770 relative to the start of the ELF image. */
4771 rel
->r_addend
+= (relocation
4773 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4774 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4776 emit_these_relocs
= TRUE
;
4780 unresolved_reloc
= TRUE
;
4782 if (unresolved_reloc
)
4784 (*_bfd_error_handler
)
4785 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4787 bfd_get_section_name (input_bfd
, input_section
),
4788 (long) rel
->r_offset
,
4794 r
= _bfd_final_link_relocate (howto
,
4798 rel
->r_offset
, relocation
, addend
);
4800 if (r
!= bfd_reloc_ok
)
4802 const char *msg
= (const char *) 0;
4806 case bfd_reloc_overflow
:
4807 /* FIXME: We don't want to warn on most references
4808 within an overlay to itself, but this may silence a
4809 warning that should be reported. */
4810 if (overlay_encoded
&& sec
== input_section
)
4812 if (!((*info
->callbacks
->reloc_overflow
)
4813 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4814 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4818 case bfd_reloc_undefined
:
4819 if (!((*info
->callbacks
->undefined_symbol
)
4820 (info
, sym_name
, input_bfd
, input_section
,
4821 rel
->r_offset
, TRUE
)))
4825 case bfd_reloc_outofrange
:
4826 msg
= _("internal error: out of range error");
4829 case bfd_reloc_notsupported
:
4830 msg
= _("internal error: unsupported relocation error");
4833 case bfd_reloc_dangerous
:
4834 msg
= _("internal error: dangerous error");
4838 msg
= _("internal error: unknown error");
4843 if (!((*info
->callbacks
->warning
)
4844 (info
, msg
, sym_name
, input_bfd
, input_section
,
4853 && emit_these_relocs
4854 && !info
->emitrelocations
)
4856 Elf_Internal_Rela
*wrel
;
4857 Elf_Internal_Shdr
*rel_hdr
;
4859 wrel
= rel
= relocs
;
4860 relend
= relocs
+ input_section
->reloc_count
;
4861 for (; rel
< relend
; rel
++)
4865 r_type
= ELF32_R_TYPE (rel
->r_info
);
4866 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4869 input_section
->reloc_count
= wrel
- relocs
;
4870 /* Backflips for _bfd_elf_link_output_relocs. */
4871 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4872 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4879 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4882 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4883 const char *sym_name ATTRIBUTE_UNUSED
,
4884 Elf_Internal_Sym
*sym
,
4885 asection
*sym_sec ATTRIBUTE_UNUSED
,
4886 struct elf_link_hash_entry
*h
)
4888 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4890 if (!info
->relocatable
4891 && htab
->stub_sec
!= NULL
4893 && (h
->root
.type
== bfd_link_hash_defined
4894 || h
->root
.type
== bfd_link_hash_defweak
)
4896 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4898 struct got_entry
*g
;
4900 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4901 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4902 ? g
->br_addr
== g
->stub_addr
4903 : g
->addend
== 0 && g
->ovl
== 0)
4905 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4906 (htab
->stub_sec
[0]->output_section
->owner
,
4907 htab
->stub_sec
[0]->output_section
));
4908 sym
->st_value
= g
->stub_addr
;
4916 static int spu_plugin
= 0;
4919 spu_elf_plugin (int val
)
4924 /* Set ELF header e_type for plugins. */
4927 spu_elf_post_process_headers (bfd
*abfd
,
4928 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4932 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4934 i_ehdrp
->e_type
= ET_DYN
;
4938 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4939 segments for overlays. */
4942 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4949 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4950 extra
= htab
->num_overlays
;
4956 sec
= bfd_get_section_by_name (abfd
, ".toe");
4957 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4963 /* Remove .toe section from other PT_LOAD segments and put it in
4964 a segment of its own. Put overlays in separate segments too. */
4967 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4970 struct elf_segment_map
*m
;
4976 toe
= bfd_get_section_by_name (abfd
, ".toe");
4977 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4978 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4979 for (i
= 0; i
< m
->count
; i
++)
4980 if ((s
= m
->sections
[i
]) == toe
4981 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4983 struct elf_segment_map
*m2
;
4986 if (i
+ 1 < m
->count
)
4988 amt
= sizeof (struct elf_segment_map
);
4989 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4990 m2
= bfd_zalloc (abfd
, amt
);
4993 m2
->count
= m
->count
- (i
+ 1);
4994 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4995 m2
->count
* sizeof (m
->sections
[0]));
4996 m2
->p_type
= PT_LOAD
;
5004 amt
= sizeof (struct elf_segment_map
);
5005 m2
= bfd_zalloc (abfd
, amt
);
5008 m2
->p_type
= PT_LOAD
;
5010 m2
->sections
[0] = s
;
5020 /* Tweak the section type of .note.spu_name. */
5023 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5024 Elf_Internal_Shdr
*hdr
,
5027 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5028 hdr
->sh_type
= SHT_NOTE
;
5032 /* Tweak phdrs before writing them out. */
5035 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5037 const struct elf_backend_data
*bed
;
5038 struct elf_obj_tdata
*tdata
;
5039 Elf_Internal_Phdr
*phdr
, *last
;
5040 struct spu_link_hash_table
*htab
;
5047 bed
= get_elf_backend_data (abfd
);
5048 tdata
= elf_tdata (abfd
);
5050 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5051 htab
= spu_hash_table (info
);
5052 if (htab
->num_overlays
!= 0)
5054 struct elf_segment_map
*m
;
5057 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5059 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5061 /* Mark this as an overlay header. */
5062 phdr
[i
].p_flags
|= PF_OVERLAY
;
5064 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5065 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5067 bfd_byte
*p
= htab
->ovtab
->contents
;
5068 unsigned int off
= o
* 16 + 8;
5070 /* Write file_off into _ovly_table. */
5071 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5074 /* Soft-icache has its file offset put in .ovl.init. */
5075 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5077 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5079 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5083 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5084 of 16. This should always be possible when using the standard
5085 linker scripts, but don't create overlapping segments if
5086 someone is playing games with linker scripts. */
5088 for (i
= count
; i
-- != 0; )
5089 if (phdr
[i
].p_type
== PT_LOAD
)
5093 adjust
= -phdr
[i
].p_filesz
& 15;
5096 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5099 adjust
= -phdr
[i
].p_memsz
& 15;
5102 && phdr
[i
].p_filesz
!= 0
5103 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5104 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5107 if (phdr
[i
].p_filesz
!= 0)
5111 if (i
== (unsigned int) -1)
5112 for (i
= count
; i
-- != 0; )
5113 if (phdr
[i
].p_type
== PT_LOAD
)
5117 adjust
= -phdr
[i
].p_filesz
& 15;
5118 phdr
[i
].p_filesz
+= adjust
;
5120 adjust
= -phdr
[i
].p_memsz
& 15;
5121 phdr
[i
].p_memsz
+= adjust
;
5127 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5128 #define TARGET_BIG_NAME "elf32-spu"
5129 #define ELF_ARCH bfd_arch_spu
5130 #define ELF_MACHINE_CODE EM_SPU
5131 /* This matches the alignment need for DMA. */
5132 #define ELF_MAXPAGESIZE 0x80
5133 #define elf_backend_rela_normal 1
5134 #define elf_backend_can_gc_sections 1
5136 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5137 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5138 #define elf_info_to_howto spu_elf_info_to_howto
5139 #define elf_backend_count_relocs spu_elf_count_relocs
5140 #define elf_backend_relocate_section spu_elf_relocate_section
5141 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5142 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5143 #define elf_backend_object_p spu_elf_object_p
5144 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5145 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5147 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5148 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5149 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5150 #define elf_backend_post_process_headers spu_elf_post_process_headers
5151 #define elf_backend_fake_sections spu_elf_fake_sections
5152 #define elf_backend_special_sections spu_elf_special_sections
5153 #define bfd_elf32_bfd_final_link spu_elf_final_link
5155 #include "elf32-target.h"