* elfcpp.h (NT_VERSION, NT_ARCH): Define as enum constants.
[binutils.git] / bfd / elf32-spu.c
blobdb9f205868496ab5aa4f5b3418e72fd03a4bd4ec
1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
100 switch (code)
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
144 enum elf_spu_reloc_type r_type;
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157 if (r_type == R_SPU_NONE)
158 return NULL;
160 return elf_howto_table + r_type;
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
167 unsigned int i;
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
174 return NULL;
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
206 val += reloc_entry->addend;
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
229 if (!sec->used_by_bfd)
231 struct _spu_elf_section_data *sdata;
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
239 return _bfd_elf_new_section_hook (abfd, sec);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf;
260 /* Shortcuts to overlay sections. */
261 asection *ovtab;
262 asection *toe;
263 asection **ovl_sec;
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count;
268 /* The stub section for each overlay section. */
269 asection **stub_sec;
271 struct elf_link_hash_entry *ovly_load;
272 struct elf_link_hash_entry *ovly_return;
273 unsigned long ovly_load_r_symndx;
275 /* Number of overlay buffers. */
276 unsigned int num_buf;
278 /* Total number of overlays. */
279 unsigned int num_overlays;
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms:1;
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs : 1;
288 /* Set on error. */
289 unsigned int stub_err : 1;
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis : 1;
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms : 1;
298 /* Hijack the generic got fields for overlay stub accounting. */
300 struct got_entry
302 struct got_entry *next;
303 unsigned int ovl;
304 bfd_vma addend;
305 bfd_vma stub_addr;
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
311 /* Create a spu ELF linker hash table. */
313 static struct bfd_link_hash_table *
314 spu_elf_link_hash_table_create (bfd *abfd)
316 struct spu_link_hash_table *htab;
318 htab = bfd_malloc (sizeof (*htab));
319 if (htab == NULL)
320 return NULL;
322 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
323 _bfd_elf_link_hash_newfunc,
324 sizeof (struct elf_link_hash_entry)))
326 free (htab);
327 return NULL;
330 memset (&htab->ovtab, 0,
331 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
333 htab->elf.init_got_refcount.refcount = 0;
334 htab->elf.init_got_refcount.glist = NULL;
335 htab->elf.init_got_offset.offset = 0;
336 htab->elf.init_got_offset.glist = NULL;
337 return &htab->elf.root;
340 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
341 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
342 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
344 static bfd_boolean
345 get_sym_h (struct elf_link_hash_entry **hp,
346 Elf_Internal_Sym **symp,
347 asection **symsecp,
348 Elf_Internal_Sym **locsymsp,
349 unsigned long r_symndx,
350 bfd *ibfd)
352 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
354 if (r_symndx >= symtab_hdr->sh_info)
356 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
357 struct elf_link_hash_entry *h;
359 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
360 while (h->root.type == bfd_link_hash_indirect
361 || h->root.type == bfd_link_hash_warning)
362 h = (struct elf_link_hash_entry *) h->root.u.i.link;
364 if (hp != NULL)
365 *hp = h;
367 if (symp != NULL)
368 *symp = NULL;
370 if (symsecp != NULL)
372 asection *symsec = NULL;
373 if (h->root.type == bfd_link_hash_defined
374 || h->root.type == bfd_link_hash_defweak)
375 symsec = h->root.u.def.section;
376 *symsecp = symsec;
379 else
381 Elf_Internal_Sym *sym;
382 Elf_Internal_Sym *locsyms = *locsymsp;
384 if (locsyms == NULL)
386 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
387 if (locsyms == NULL)
389 size_t symcount = symtab_hdr->sh_info;
391 /* If we are reading symbols into the contents, then
392 read the global syms too. This is done to cache
393 syms for later stack analysis. */
394 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
395 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
396 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
397 NULL, NULL, NULL);
399 if (locsyms == NULL)
400 return FALSE;
401 *locsymsp = locsyms;
403 sym = locsyms + r_symndx;
405 if (hp != NULL)
406 *hp = NULL;
408 if (symp != NULL)
409 *symp = sym;
411 if (symsecp != NULL)
412 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
415 return TRUE;
418 /* Create the note section if not already present. This is done early so
419 that the linker maps the sections to the right place in the output. */
421 bfd_boolean
422 spu_elf_create_sections (struct bfd_link_info *info,
423 int stack_analysis,
424 int emit_stack_syms)
426 bfd *ibfd;
427 struct spu_link_hash_table *htab = spu_hash_table (info);
429 /* Stash some options away where we can get at them later. */
430 htab->stack_analysis = stack_analysis;
431 htab->emit_stack_syms = emit_stack_syms;
433 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
434 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
435 break;
437 if (ibfd == NULL)
439 /* Make SPU_PTNOTE_SPUNAME section. */
440 asection *s;
441 size_t name_len;
442 size_t size;
443 bfd_byte *data;
444 flagword flags;
446 ibfd = info->input_bfds;
447 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
448 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
449 if (s == NULL
450 || !bfd_set_section_alignment (ibfd, s, 4))
451 return FALSE;
453 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
454 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
455 size += (name_len + 3) & -4;
457 if (!bfd_set_section_size (ibfd, s, size))
458 return FALSE;
460 data = bfd_zalloc (ibfd, size);
461 if (data == NULL)
462 return FALSE;
464 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
465 bfd_put_32 (ibfd, name_len, data + 4);
466 bfd_put_32 (ibfd, 1, data + 8);
467 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
468 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
469 bfd_get_filename (info->output_bfd), name_len);
470 s->contents = data;
473 return TRUE;
476 /* qsort predicate to sort sections by vma. */
478 static int
479 sort_sections (const void *a, const void *b)
481 const asection *const *s1 = a;
482 const asection *const *s2 = b;
483 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
485 if (delta != 0)
486 return delta < 0 ? -1 : 1;
488 return (*s1)->index - (*s2)->index;
491 /* Identify overlays in the output bfd, and number them. */
493 bfd_boolean
494 spu_elf_find_overlays (struct bfd_link_info *info)
496 struct spu_link_hash_table *htab = spu_hash_table (info);
497 asection **alloc_sec;
498 unsigned int i, n, ovl_index, num_buf;
499 asection *s;
500 bfd_vma ovl_end;
502 if (info->output_bfd->section_count < 2)
503 return FALSE;
505 alloc_sec
506 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
507 if (alloc_sec == NULL)
508 return FALSE;
510 /* Pick out all the alloced sections. */
511 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
512 if ((s->flags & SEC_ALLOC) != 0
513 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
514 && s->size != 0)
515 alloc_sec[n++] = s;
517 if (n == 0)
519 free (alloc_sec);
520 return FALSE;
523 /* Sort them by vma. */
524 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
526 /* Look for overlapping vmas. Any with overlap must be overlays.
527 Count them. Also count the number of overlay regions. */
528 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
529 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
531 s = alloc_sec[i];
532 if (s->vma < ovl_end)
534 asection *s0 = alloc_sec[i - 1];
536 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
538 alloc_sec[ovl_index] = s0;
539 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
540 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
542 alloc_sec[ovl_index] = s;
543 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
544 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
545 if (s0->vma != s->vma)
547 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
548 "do not start at the same address.\n"),
549 s0, s);
550 return FALSE;
552 if (ovl_end < s->vma + s->size)
553 ovl_end = s->vma + s->size;
555 else
556 ovl_end = s->vma + s->size;
559 htab->num_overlays = ovl_index;
560 htab->num_buf = num_buf;
561 htab->ovl_sec = alloc_sec;
562 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
563 FALSE, FALSE, FALSE);
564 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
565 FALSE, FALSE, FALSE);
566 return ovl_index != 0;
569 /* Support two sizes of overlay stubs, a slower more compact stub of two
570 intructions, and a faster stub of four instructions. */
571 #ifndef OVL_STUB_SIZE
572 /* Default to faster. */
573 #define OVL_STUB_SIZE 16
574 /* #define OVL_STUB_SIZE 8 */
575 #endif
576 #define BRSL 0x33000000
577 #define BR 0x32000000
578 #define NOP 0x40200000
579 #define LNOP 0x00200000
580 #define ILA 0x42000000
582 /* Return true for all relative and absolute branch instructions.
583 bra 00110000 0..
584 brasl 00110001 0..
585 br 00110010 0..
586 brsl 00110011 0..
587 brz 00100000 0..
588 brnz 00100001 0..
589 brhz 00100010 0..
590 brhnz 00100011 0.. */
592 static bfd_boolean
593 is_branch (const unsigned char *insn)
595 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
598 /* Return true for all indirect branch instructions.
599 bi 00110101 000
600 bisl 00110101 001
601 iret 00110101 010
602 bisled 00110101 011
603 biz 00100101 000
604 binz 00100101 001
605 bihz 00100101 010
606 bihnz 00100101 011 */
608 static bfd_boolean
609 is_indirect_branch (const unsigned char *insn)
611 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
614 /* Return true for branch hint instructions.
615 hbra 0001000..
616 hbrr 0001001.. */
618 static bfd_boolean
619 is_hint (const unsigned char *insn)
621 return (insn[0] & 0xfc) == 0x10;
624 /* True if INPUT_SECTION might need overlay stubs. */
626 static bfd_boolean
627 maybe_needs_stubs (asection *input_section, bfd *output_bfd)
629 /* No stubs for debug sections and suchlike. */
630 if ((input_section->flags & SEC_ALLOC) == 0)
631 return FALSE;
633 /* No stubs for link-once sections that will be discarded. */
634 if (input_section->output_section == NULL
635 || input_section->output_section->owner != output_bfd)
636 return FALSE;
638 /* Don't create stubs for .eh_frame references. */
639 if (strcmp (input_section->name, ".eh_frame") == 0)
640 return FALSE;
642 return TRUE;
645 enum _stub_type
647 no_stub,
648 ovl_stub,
649 nonovl_stub,
650 stub_error
653 /* Return non-zero if this reloc symbol should go via an overlay stub.
654 Return 2 if the stub must be in non-overlay area. */
656 static enum _stub_type
657 needs_ovl_stub (struct elf_link_hash_entry *h,
658 Elf_Internal_Sym *sym,
659 asection *sym_sec,
660 asection *input_section,
661 Elf_Internal_Rela *irela,
662 bfd_byte *contents,
663 struct bfd_link_info *info)
665 struct spu_link_hash_table *htab = spu_hash_table (info);
666 enum elf_spu_reloc_type r_type;
667 unsigned int sym_type;
668 bfd_boolean branch;
669 enum _stub_type ret = no_stub;
671 if (sym_sec == NULL
672 || sym_sec->output_section == NULL
673 || sym_sec->output_section->owner != info->output_bfd
674 || spu_elf_section_data (sym_sec->output_section) == NULL)
675 return ret;
677 if (h != NULL)
679 /* Ensure no stubs for user supplied overlay manager syms. */
680 if (h == htab->ovly_load || h == htab->ovly_return)
681 return ret;
683 /* setjmp always goes via an overlay stub, because then the return
684 and hence the longjmp goes via __ovly_return. That magically
685 makes setjmp/longjmp between overlays work. */
686 if (strncmp (h->root.root.string, "setjmp", 6) == 0
687 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
688 ret = ovl_stub;
691 /* Usually, symbols in non-overlay sections don't need stubs. */
692 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
693 && !htab->non_overlay_stubs)
694 return ret;
696 if (h != NULL)
697 sym_type = h->type;
698 else
699 sym_type = ELF_ST_TYPE (sym->st_info);
701 r_type = ELF32_R_TYPE (irela->r_info);
702 branch = FALSE;
703 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
705 bfd_byte insn[4];
707 if (contents == NULL)
709 contents = insn;
710 if (!bfd_get_section_contents (input_section->owner,
711 input_section,
712 contents,
713 irela->r_offset, 4))
714 return stub_error;
716 else
717 contents += irela->r_offset;
719 if (is_branch (contents) || is_hint (contents))
721 branch = TRUE;
722 if ((contents[0] & 0xfd) == 0x31
723 && sym_type != STT_FUNC
724 && contents == insn)
726 /* It's common for people to write assembly and forget
727 to give function symbols the right type. Handle
728 calls to such symbols, but warn so that (hopefully)
729 people will fix their code. We need the symbol
730 type to be correct to distinguish function pointer
731 initialisation from other pointer initialisations. */
732 const char *sym_name;
734 if (h != NULL)
735 sym_name = h->root.root.string;
736 else
738 Elf_Internal_Shdr *symtab_hdr;
739 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
740 sym_name = bfd_elf_sym_name (input_section->owner,
741 symtab_hdr,
742 sym,
743 sym_sec);
745 (*_bfd_error_handler) (_("warning: call to non-function"
746 " symbol %s defined in %B"),
747 sym_sec->owner, sym_name);
753 if (sym_type != STT_FUNC
754 && !branch
755 && (sym_sec->flags & SEC_CODE) == 0)
756 return ret;
758 /* A reference from some other section to a symbol in an overlay
759 section needs a stub. */
760 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
761 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
762 return ovl_stub;
764 /* If this insn isn't a branch then we are possibly taking the
765 address of a function and passing it out somehow. */
766 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
769 static bfd_boolean
770 count_stub (struct spu_link_hash_table *htab,
771 bfd *ibfd,
772 asection *isec,
773 enum _stub_type stub_type,
774 struct elf_link_hash_entry *h,
775 const Elf_Internal_Rela *irela)
777 unsigned int ovl = 0;
778 struct got_entry *g, **head;
779 bfd_vma addend;
781 /* If this instruction is a branch or call, we need a stub
782 for it. One stub per function per overlay.
783 If it isn't a branch, then we are taking the address of
784 this function so need a stub in the non-overlay area
785 for it. One stub per function. */
786 if (stub_type != nonovl_stub)
787 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
789 if (h != NULL)
790 head = &h->got.glist;
791 else
793 if (elf_local_got_ents (ibfd) == NULL)
795 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
796 * sizeof (*elf_local_got_ents (ibfd)));
797 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
798 if (elf_local_got_ents (ibfd) == NULL)
799 return FALSE;
801 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
804 addend = 0;
805 if (irela != NULL)
806 addend = irela->r_addend;
808 if (ovl == 0)
810 struct got_entry *gnext;
812 for (g = *head; g != NULL; g = g->next)
813 if (g->addend == addend && g->ovl == 0)
814 break;
816 if (g == NULL)
818 /* Need a new non-overlay area stub. Zap other stubs. */
819 for (g = *head; g != NULL; g = gnext)
821 gnext = g->next;
822 if (g->addend == addend)
824 htab->stub_count[g->ovl] -= 1;
825 free (g);
830 else
832 for (g = *head; g != NULL; g = g->next)
833 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
834 break;
837 if (g == NULL)
839 g = bfd_malloc (sizeof *g);
840 if (g == NULL)
841 return FALSE;
842 g->ovl = ovl;
843 g->addend = addend;
844 g->stub_addr = (bfd_vma) -1;
845 g->next = *head;
846 *head = g;
848 htab->stub_count[ovl] += 1;
851 return TRUE;
854 /* Two instruction overlay stubs look like:
856 brsl $75,__ovly_load
857 .word target_ovl_and_address
859 ovl_and_address is a word with the overlay number in the top 14 bits
860 and local store address in the bottom 18 bits.
862 Four instruction overlay stubs look like:
864 ila $78,ovl_number
865 lnop
866 ila $79,target_address
867 br __ovly_load */
869 static bfd_boolean
870 build_stub (struct spu_link_hash_table *htab,
871 bfd *ibfd,
872 asection *isec,
873 enum _stub_type stub_type,
874 struct elf_link_hash_entry *h,
875 const Elf_Internal_Rela *irela,
876 bfd_vma dest,
877 asection *dest_sec)
879 unsigned int ovl;
880 struct got_entry *g, **head;
881 asection *sec;
882 bfd_vma addend, val, from, to;
884 ovl = 0;
885 if (stub_type != nonovl_stub)
886 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
888 if (h != NULL)
889 head = &h->got.glist;
890 else
891 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
893 addend = 0;
894 if (irela != NULL)
895 addend = irela->r_addend;
897 for (g = *head; g != NULL; g = g->next)
898 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
899 break;
900 if (g == NULL)
901 abort ();
903 if (g->ovl == 0 && ovl != 0)
904 return TRUE;
906 if (g->stub_addr != (bfd_vma) -1)
907 return TRUE;
909 sec = htab->stub_sec[ovl];
910 dest += dest_sec->output_offset + dest_sec->output_section->vma;
911 from = sec->size + sec->output_offset + sec->output_section->vma;
912 g->stub_addr = from;
913 to = (htab->ovly_load->root.u.def.value
914 + htab->ovly_load->root.u.def.section->output_offset
915 + htab->ovly_load->root.u.def.section->output_section->vma);
916 val = to - from;
917 if (OVL_STUB_SIZE == 16)
918 val -= 12;
919 if (((dest | to | from) & 3) != 0
920 || val + 0x20000 >= 0x40000)
922 htab->stub_err = 1;
923 return FALSE;
925 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
927 if (OVL_STUB_SIZE == 16)
929 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
930 sec->contents + sec->size);
931 bfd_put_32 (sec->owner, LNOP,
932 sec->contents + sec->size + 4);
933 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
934 sec->contents + sec->size + 8);
935 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
936 sec->contents + sec->size + 12);
938 else if (OVL_STUB_SIZE == 8)
940 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
941 sec->contents + sec->size);
943 val = (dest & 0x3ffff) | (ovl << 14);
944 bfd_put_32 (sec->owner, val,
945 sec->contents + sec->size + 4);
947 else
948 abort ();
949 sec->size += OVL_STUB_SIZE;
951 if (htab->emit_stub_syms)
953 size_t len;
954 char *name;
955 int add;
957 len = 8 + sizeof (".ovl_call.") - 1;
958 if (h != NULL)
959 len += strlen (h->root.root.string);
960 else
961 len += 8 + 1 + 8;
962 add = 0;
963 if (irela != NULL)
964 add = (int) irela->r_addend & 0xffffffff;
965 if (add != 0)
966 len += 1 + 8;
967 name = bfd_malloc (len);
968 if (name == NULL)
969 return FALSE;
971 sprintf (name, "%08x.ovl_call.", g->ovl);
972 if (h != NULL)
973 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
974 else
975 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
976 dest_sec->id & 0xffffffff,
977 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
978 if (add != 0)
979 sprintf (name + len - 9, "+%x", add);
981 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
982 free (name);
983 if (h == NULL)
984 return FALSE;
985 if (h->root.type == bfd_link_hash_new)
987 h->root.type = bfd_link_hash_defined;
988 h->root.u.def.section = sec;
989 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
990 h->size = OVL_STUB_SIZE;
991 h->type = STT_FUNC;
992 h->ref_regular = 1;
993 h->def_regular = 1;
994 h->ref_regular_nonweak = 1;
995 h->forced_local = 1;
996 h->non_elf = 0;
1000 return TRUE;
1003 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1004 symbols. */
1006 static bfd_boolean
1007 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1009 /* Symbols starting with _SPUEAR_ need a stub because they may be
1010 invoked by the PPU. */
1011 if ((h->root.type == bfd_link_hash_defined
1012 || h->root.type == bfd_link_hash_defweak)
1013 && h->def_regular
1014 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
1016 struct spu_link_hash_table *htab = inf;
1018 count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1021 return TRUE;
1024 static bfd_boolean
1025 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1027 /* Symbols starting with _SPUEAR_ need a stub because they may be
1028 invoked by the PPU. */
1029 if ((h->root.type == bfd_link_hash_defined
1030 || h->root.type == bfd_link_hash_defweak)
1031 && h->def_regular
1032 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
1034 struct spu_link_hash_table *htab = inf;
1036 build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1037 h->root.u.def.value, h->root.u.def.section);
1040 return TRUE;
1043 /* Size or build stubs. */
1045 static bfd_boolean
1046 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1048 struct spu_link_hash_table *htab = spu_hash_table (info);
1049 bfd *ibfd;
1051 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1053 extern const bfd_target bfd_elf32_spu_vec;
1054 Elf_Internal_Shdr *symtab_hdr;
1055 asection *isec;
1056 Elf_Internal_Sym *local_syms = NULL;
1057 void *psyms;
1059 if (ibfd->xvec != &bfd_elf32_spu_vec)
1060 continue;
1062 /* We'll need the symbol table in a second. */
1063 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1064 if (symtab_hdr->sh_info == 0)
1065 continue;
1067 /* Arrange to read and keep global syms for later stack analysis. */
1068 psyms = &local_syms;
1069 if (htab->stack_analysis)
1070 psyms = &symtab_hdr->contents;
1072 /* Walk over each section attached to the input bfd. */
1073 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1075 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1077 /* If there aren't any relocs, then there's nothing more to do. */
1078 if ((isec->flags & SEC_RELOC) == 0
1079 || isec->reloc_count == 0)
1080 continue;
1082 if (!maybe_needs_stubs (isec, info->output_bfd))
1083 continue;
1085 /* Get the relocs. */
1086 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1087 info->keep_memory);
1088 if (internal_relocs == NULL)
1089 goto error_ret_free_local;
1091 /* Now examine each relocation. */
1092 irela = internal_relocs;
1093 irelaend = irela + isec->reloc_count;
1094 for (; irela < irelaend; irela++)
1096 enum elf_spu_reloc_type r_type;
1097 unsigned int r_indx;
1098 asection *sym_sec;
1099 Elf_Internal_Sym *sym;
1100 struct elf_link_hash_entry *h;
1101 enum _stub_type stub_type;
1103 r_type = ELF32_R_TYPE (irela->r_info);
1104 r_indx = ELF32_R_SYM (irela->r_info);
1106 if (r_type >= R_SPU_max)
1108 bfd_set_error (bfd_error_bad_value);
1109 error_ret_free_internal:
1110 if (elf_section_data (isec)->relocs != internal_relocs)
1111 free (internal_relocs);
1112 error_ret_free_local:
1113 if (local_syms != NULL
1114 && (symtab_hdr->contents
1115 != (unsigned char *) local_syms))
1116 free (local_syms);
1117 return FALSE;
1120 /* Determine the reloc target section. */
1121 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
1122 goto error_ret_free_internal;
1124 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1125 NULL, info);
1126 if (stub_type == no_stub)
1127 continue;
1128 else if (stub_type == stub_error)
1129 goto error_ret_free_internal;
1131 if (htab->stub_count == NULL)
1133 bfd_size_type amt;
1134 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1135 htab->stub_count = bfd_zmalloc (amt);
1136 if (htab->stub_count == NULL)
1137 goto error_ret_free_internal;
1140 if (!build)
1142 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1143 goto error_ret_free_internal;
1145 else
1147 bfd_vma dest;
1149 if (h != NULL)
1150 dest = h->root.u.def.value;
1151 else
1152 dest = sym->st_value;
1153 dest += irela->r_addend;
1154 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1155 dest, sym_sec))
1156 goto error_ret_free_internal;
1160 /* We're done with the internal relocs, free them. */
1161 if (elf_section_data (isec)->relocs != internal_relocs)
1162 free (internal_relocs);
1165 if (local_syms != NULL
1166 && symtab_hdr->contents != (unsigned char *) local_syms)
1168 if (!info->keep_memory)
1169 free (local_syms);
1170 else
1171 symtab_hdr->contents = (unsigned char *) local_syms;
1175 return TRUE;
1178 /* Allocate space for overlay call and return stubs. */
1181 spu_elf_size_stubs (struct bfd_link_info *info,
1182 void (*place_spu_section) (asection *, asection *,
1183 const char *),
1184 int non_overlay_stubs)
1186 struct spu_link_hash_table *htab = spu_hash_table (info);
1187 bfd *ibfd;
1188 bfd_size_type amt;
1189 flagword flags;
1190 unsigned int i;
1191 asection *stub;
1193 htab->non_overlay_stubs = non_overlay_stubs;
1194 if (!process_stubs (info, FALSE))
1195 return 0;
1197 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1198 if (htab->stub_err)
1199 return 0;
1201 if (htab->stub_count == NULL)
1202 return 1;
1204 ibfd = info->input_bfds;
1205 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1206 htab->stub_sec = bfd_zmalloc (amt);
1207 if (htab->stub_sec == NULL)
1208 return 0;
1210 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1211 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1212 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1213 htab->stub_sec[0] = stub;
1214 if (stub == NULL
1215 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1216 return 0;
1217 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1218 (*place_spu_section) (stub, NULL, ".text");
1220 for (i = 0; i < htab->num_overlays; ++i)
1222 asection *osec = htab->ovl_sec[i];
1223 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1224 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1225 htab->stub_sec[ovl] = stub;
1226 if (stub == NULL
1227 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1228 return 0;
1229 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1230 (*place_spu_section) (stub, osec, NULL);
1233 /* htab->ovtab consists of two arrays.
1234 . struct {
1235 . u32 vma;
1236 . u32 size;
1237 . u32 file_off;
1238 . u32 buf;
1239 . } _ovly_table[];
1241 . struct {
1242 . u32 mapped;
1243 . } _ovly_buf_table[];
1244 . */
1246 flags = (SEC_ALLOC | SEC_LOAD
1247 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1248 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1249 if (htab->ovtab == NULL
1250 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1251 return 0;
1253 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1254 (*place_spu_section) (htab->ovtab, NULL, ".data");
1256 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1257 if (htab->toe == NULL
1258 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1259 return 0;
1260 htab->toe->size = 16;
1261 (*place_spu_section) (htab->toe, NULL, ".toe");
1263 return 2;
1266 /* Functions to handle embedded spu_ovl.o object. */
1268 static void *
1269 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1271 return stream;
1274 static file_ptr
1275 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1276 void *stream,
1277 void *buf,
1278 file_ptr nbytes,
1279 file_ptr offset)
1281 struct _ovl_stream *os;
1282 size_t count;
1283 size_t max;
1285 os = (struct _ovl_stream *) stream;
1286 max = (const char *) os->end - (const char *) os->start;
1288 if ((ufile_ptr) offset >= max)
1289 return 0;
1291 count = nbytes;
1292 if (count > max - offset)
1293 count = max - offset;
1295 memcpy (buf, (const char *) os->start + offset, count);
1296 return count;
1299 bfd_boolean
1300 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1302 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1303 "elf32-spu",
1304 ovl_mgr_open,
1305 (void *) stream,
1306 ovl_mgr_pread,
1307 NULL,
1308 NULL);
1309 return *ovl_bfd != NULL;
1312 /* Define an STT_OBJECT symbol. */
1314 static struct elf_link_hash_entry *
1315 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1317 struct elf_link_hash_entry *h;
1319 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1320 if (h == NULL)
1321 return NULL;
1323 if (h->root.type != bfd_link_hash_defined
1324 || !h->def_regular)
1326 h->root.type = bfd_link_hash_defined;
1327 h->root.u.def.section = htab->ovtab;
1328 h->type = STT_OBJECT;
1329 h->ref_regular = 1;
1330 h->def_regular = 1;
1331 h->ref_regular_nonweak = 1;
1332 h->non_elf = 0;
1334 else
1336 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1337 h->root.u.def.section->owner,
1338 h->root.root.string);
1339 bfd_set_error (bfd_error_bad_value);
1340 return NULL;
1343 return h;
1346 /* Fill in all stubs and the overlay tables. */
1348 bfd_boolean
1349 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1351 struct spu_link_hash_table *htab = spu_hash_table (info);
1352 struct elf_link_hash_entry *h;
1353 bfd_byte *p;
1354 asection *s;
1355 bfd *obfd;
1356 unsigned int i;
1358 htab->emit_stub_syms = emit_syms;
1359 if (htab->stub_count == NULL)
1360 return TRUE;
1362 for (i = 0; i <= htab->num_overlays; i++)
1363 if (htab->stub_sec[i]->size != 0)
1365 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1366 htab->stub_sec[i]->size);
1367 if (htab->stub_sec[i]->contents == NULL)
1368 return FALSE;
1369 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1370 htab->stub_sec[i]->size = 0;
1373 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1374 htab->ovly_load = h;
1375 BFD_ASSERT (h != NULL
1376 && (h->root.type == bfd_link_hash_defined
1377 || h->root.type == bfd_link_hash_defweak)
1378 && h->def_regular);
1380 s = h->root.u.def.section->output_section;
1381 if (spu_elf_section_data (s)->u.o.ovl_index)
1383 (*_bfd_error_handler) (_("%s in overlay section"),
1384 h->root.u.def.section->owner);
1385 bfd_set_error (bfd_error_bad_value);
1386 return FALSE;
1389 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1390 htab->ovly_return = h;
1392 /* Fill in all the stubs. */
1393 process_stubs (info, TRUE);
1395 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, htab);
1396 if (htab->stub_err)
1397 return FALSE;
1399 for (i = 0; i <= htab->num_overlays; i++)
1401 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1403 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1404 bfd_set_error (bfd_error_bad_value);
1405 return FALSE;
1407 htab->stub_sec[i]->rawsize = 0;
1410 if (htab->stub_err)
1412 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1413 bfd_set_error (bfd_error_bad_value);
1414 return FALSE;
1417 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1418 if (htab->ovtab->contents == NULL)
1419 return FALSE;
1421 /* Write out _ovly_table. */
1422 p = htab->ovtab->contents;
1423 /* set low bit of .size to mark non-overlay area as present. */
1424 p[7] = 1;
1425 obfd = htab->ovtab->output_section->owner;
1426 for (s = obfd->sections; s != NULL; s = s->next)
1428 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1430 if (ovl_index != 0)
1432 unsigned long off = ovl_index * 16;
1433 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1435 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1436 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1437 /* file_off written later in spu_elf_modify_program_headers. */
1438 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1442 h = define_ovtab_symbol (htab, "_ovly_table");
1443 if (h == NULL)
1444 return FALSE;
1445 h->root.u.def.value = 16;
1446 h->size = htab->num_overlays * 16;
1448 h = define_ovtab_symbol (htab, "_ovly_table_end");
1449 if (h == NULL)
1450 return FALSE;
1451 h->root.u.def.value = htab->num_overlays * 16 + 16;
1452 h->size = 0;
1454 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1455 if (h == NULL)
1456 return FALSE;
1457 h->root.u.def.value = htab->num_overlays * 16 + 16;
1458 h->size = htab->num_buf * 4;
1460 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1461 if (h == NULL)
1462 return FALSE;
1463 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1464 h->size = 0;
1466 h = define_ovtab_symbol (htab, "_EAR_");
1467 if (h == NULL)
1468 return FALSE;
1469 h->root.u.def.section = htab->toe;
1470 h->root.u.def.value = 0;
1471 h->size = 16;
1473 return TRUE;
1476 /* Check that all loadable section VMAs lie in the range
1477 LO .. HI inclusive. */
1479 asection *
1480 spu_elf_check_vma (struct bfd_link_info *info, bfd_vma lo, bfd_vma hi)
1482 struct elf_segment_map *m;
1483 unsigned int i;
1484 bfd *abfd = info->output_bfd;
1486 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1487 if (m->p_type == PT_LOAD)
1488 for (i = 0; i < m->count; i++)
1489 if (m->sections[i]->size != 0
1490 && (m->sections[i]->vma < lo
1491 || m->sections[i]->vma > hi
1492 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1493 return m->sections[i];
1495 return NULL;
1498 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1499 Search for stack adjusting insns, and return the sp delta. */
1501 static int
1502 find_function_stack_adjust (asection *sec, bfd_vma offset)
1504 int unrecog;
1505 int reg[128];
1507 memset (reg, 0, sizeof (reg));
1508 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1510 unsigned char buf[4];
1511 int rt, ra;
1512 int imm;
1514 /* Assume no relocs on stack adjusing insns. */
1515 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1516 break;
1518 if (buf[0] == 0x24 /* stqd */)
1519 continue;
1521 rt = buf[3] & 0x7f;
1522 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1523 /* Partly decoded immediate field. */
1524 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1526 if (buf[0] == 0x1c /* ai */)
1528 imm >>= 7;
1529 imm = (imm ^ 0x200) - 0x200;
1530 reg[rt] = reg[ra] + imm;
1532 if (rt == 1 /* sp */)
1534 if (imm > 0)
1535 break;
1536 return reg[rt];
1539 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1541 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1543 reg[rt] = reg[ra] + reg[rb];
1544 if (rt == 1)
1545 return reg[rt];
1547 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1549 if (buf[0] >= 0x42 /* ila */)
1550 imm |= (buf[0] & 1) << 17;
1551 else
1553 imm &= 0xffff;
1555 if (buf[0] == 0x40 /* il */)
1557 if ((buf[1] & 0x80) == 0)
1558 goto unknown_insn;
1559 imm = (imm ^ 0x8000) - 0x8000;
1561 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1562 imm <<= 16;
1564 reg[rt] = imm;
1565 continue;
1567 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1569 reg[rt] |= imm & 0xffff;
1570 continue;
1572 else if (buf[0] == 0x04 /* ori */)
1574 imm >>= 7;
1575 imm = (imm ^ 0x200) - 0x200;
1576 reg[rt] = reg[ra] | imm;
1577 continue;
1579 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1580 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1582 /* Used in pic reg load. Say rt is trashed. */
1583 reg[rt] = 0;
1584 continue;
1586 else if (is_branch (buf) || is_indirect_branch (buf))
1587 /* If we hit a branch then we must be out of the prologue. */
1588 break;
1589 unknown_insn:
1590 ++unrecog;
1593 return 0;
1596 /* qsort predicate to sort symbols by section and value. */
1598 static Elf_Internal_Sym *sort_syms_syms;
1599 static asection **sort_syms_psecs;
1601 static int
1602 sort_syms (const void *a, const void *b)
1604 Elf_Internal_Sym *const *s1 = a;
1605 Elf_Internal_Sym *const *s2 = b;
1606 asection *sec1,*sec2;
1607 bfd_signed_vma delta;
1609 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1610 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1612 if (sec1 != sec2)
1613 return sec1->index - sec2->index;
1615 delta = (*s1)->st_value - (*s2)->st_value;
1616 if (delta != 0)
1617 return delta < 0 ? -1 : 1;
1619 delta = (*s2)->st_size - (*s1)->st_size;
1620 if (delta != 0)
1621 return delta < 0 ? -1 : 1;
1623 return *s1 < *s2 ? -1 : 1;
1626 struct call_info
1628 struct function_info *fun;
1629 struct call_info *next;
1630 unsigned int is_tail : 1;
1633 struct function_info
1635 /* List of functions called. Also branches to hot/cold part of
1636 function. */
1637 struct call_info *call_list;
1638 /* For hot/cold part of function, point to owner. */
1639 struct function_info *start;
1640 /* Symbol at start of function. */
1641 union {
1642 Elf_Internal_Sym *sym;
1643 struct elf_link_hash_entry *h;
1644 } u;
1645 /* Function section. */
1646 asection *sec;
1647 /* Address range of (this part of) function. */
1648 bfd_vma lo, hi;
1649 /* Stack usage. */
1650 int stack;
1651 /* Set if global symbol. */
1652 unsigned int global : 1;
1653 /* Set if known to be start of function (as distinct from a hunk
1654 in hot/cold section. */
1655 unsigned int is_func : 1;
1656 /* Flags used during call tree traversal. */
1657 unsigned int visit1 : 1;
1658 unsigned int non_root : 1;
1659 unsigned int visit2 : 1;
1660 unsigned int marking : 1;
1661 unsigned int visit3 : 1;
1664 struct spu_elf_stack_info
1666 int num_fun;
1667 int max_fun;
1668 /* Variable size array describing functions, one per contiguous
1669 address range belonging to a function. */
1670 struct function_info fun[1];
1673 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1674 entries for section SEC. */
1676 static struct spu_elf_stack_info *
1677 alloc_stack_info (asection *sec, int max_fun)
1679 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1680 bfd_size_type amt;
1682 amt = sizeof (struct spu_elf_stack_info);
1683 amt += (max_fun - 1) * sizeof (struct function_info);
1684 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1685 if (sec_data->u.i.stack_info != NULL)
1686 sec_data->u.i.stack_info->max_fun = max_fun;
1687 return sec_data->u.i.stack_info;
1690 /* Add a new struct function_info describing a (part of a) function
1691 starting at SYM_H. Keep the array sorted by address. */
1693 static struct function_info *
1694 maybe_insert_function (asection *sec,
1695 void *sym_h,
1696 bfd_boolean global,
1697 bfd_boolean is_func)
1699 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1700 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1701 int i;
1702 bfd_vma off, size;
1704 if (sinfo == NULL)
1706 sinfo = alloc_stack_info (sec, 20);
1707 if (sinfo == NULL)
1708 return NULL;
1711 if (!global)
1713 Elf_Internal_Sym *sym = sym_h;
1714 off = sym->st_value;
1715 size = sym->st_size;
1717 else
1719 struct elf_link_hash_entry *h = sym_h;
1720 off = h->root.u.def.value;
1721 size = h->size;
1724 for (i = sinfo->num_fun; --i >= 0; )
1725 if (sinfo->fun[i].lo <= off)
1726 break;
1728 if (i >= 0)
1730 /* Don't add another entry for an alias, but do update some
1731 info. */
1732 if (sinfo->fun[i].lo == off)
1734 /* Prefer globals over local syms. */
1735 if (global && !sinfo->fun[i].global)
1737 sinfo->fun[i].global = TRUE;
1738 sinfo->fun[i].u.h = sym_h;
1740 if (is_func)
1741 sinfo->fun[i].is_func = TRUE;
1742 return &sinfo->fun[i];
1744 /* Ignore a zero-size symbol inside an existing function. */
1745 else if (sinfo->fun[i].hi > off && size == 0)
1746 return &sinfo->fun[i];
1749 if (++i < sinfo->num_fun)
1750 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1751 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1752 else if (i >= sinfo->max_fun)
1754 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1755 bfd_size_type old = amt;
1757 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1758 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1759 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1760 sinfo = bfd_realloc (sinfo, amt);
1761 if (sinfo == NULL)
1762 return NULL;
1763 memset ((char *) sinfo + old, 0, amt - old);
1764 sec_data->u.i.stack_info = sinfo;
1766 sinfo->fun[i].is_func = is_func;
1767 sinfo->fun[i].global = global;
1768 sinfo->fun[i].sec = sec;
1769 if (global)
1770 sinfo->fun[i].u.h = sym_h;
1771 else
1772 sinfo->fun[i].u.sym = sym_h;
1773 sinfo->fun[i].lo = off;
1774 sinfo->fun[i].hi = off + size;
1775 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1776 sinfo->num_fun += 1;
1777 return &sinfo->fun[i];
1780 /* Return the name of FUN. */
1782 static const char *
1783 func_name (struct function_info *fun)
1785 asection *sec;
1786 bfd *ibfd;
1787 Elf_Internal_Shdr *symtab_hdr;
1789 while (fun->start != NULL)
1790 fun = fun->start;
1792 if (fun->global)
1793 return fun->u.h->root.root.string;
1795 sec = fun->sec;
1796 if (fun->u.sym->st_name == 0)
1798 size_t len = strlen (sec->name);
1799 char *name = bfd_malloc (len + 10);
1800 if (name == NULL)
1801 return "(null)";
1802 sprintf (name, "%s+%lx", sec->name,
1803 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1804 return name;
1806 ibfd = sec->owner;
1807 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1808 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1811 /* Read the instruction at OFF in SEC. Return true iff the instruction
1812 is a nop, lnop, or stop 0 (all zero insn). */
1814 static bfd_boolean
1815 is_nop (asection *sec, bfd_vma off)
1817 unsigned char insn[4];
1819 if (off + 4 > sec->size
1820 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1821 return FALSE;
1822 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1823 return TRUE;
1824 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1825 return TRUE;
1826 return FALSE;
1829 /* Extend the range of FUN to cover nop padding up to LIMIT.
1830 Return TRUE iff some instruction other than a NOP was found. */
1832 static bfd_boolean
1833 insns_at_end (struct function_info *fun, bfd_vma limit)
1835 bfd_vma off = (fun->hi + 3) & -4;
1837 while (off < limit && is_nop (fun->sec, off))
1838 off += 4;
1839 if (off < limit)
1841 fun->hi = off;
1842 return TRUE;
1844 fun->hi = limit;
1845 return FALSE;
1848 /* Check and fix overlapping function ranges. Return TRUE iff there
1849 are gaps in the current info we have about functions in SEC. */
1851 static bfd_boolean
1852 check_function_ranges (asection *sec, struct bfd_link_info *info)
1854 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1855 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1856 int i;
1857 bfd_boolean gaps = FALSE;
1859 if (sinfo == NULL)
1860 return FALSE;
1862 for (i = 1; i < sinfo->num_fun; i++)
1863 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1865 /* Fix overlapping symbols. */
1866 const char *f1 = func_name (&sinfo->fun[i - 1]);
1867 const char *f2 = func_name (&sinfo->fun[i]);
1869 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1870 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1872 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1873 gaps = TRUE;
1875 if (sinfo->num_fun == 0)
1876 gaps = TRUE;
1877 else
1879 if (sinfo->fun[0].lo != 0)
1880 gaps = TRUE;
1881 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1883 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1885 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1886 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1888 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1889 gaps = TRUE;
1891 return gaps;
1894 /* Search current function info for a function that contains address
1895 OFFSET in section SEC. */
1897 static struct function_info *
1898 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1900 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1901 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1902 int lo, hi, mid;
1904 lo = 0;
1905 hi = sinfo->num_fun;
1906 while (lo < hi)
1908 mid = (lo + hi) / 2;
1909 if (offset < sinfo->fun[mid].lo)
1910 hi = mid;
1911 else if (offset >= sinfo->fun[mid].hi)
1912 lo = mid + 1;
1913 else
1914 return &sinfo->fun[mid];
1916 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1917 sec, offset);
1918 return NULL;
1921 /* Add CALLEE to CALLER call list if not already present. */
1923 static bfd_boolean
1924 insert_callee (struct function_info *caller, struct call_info *callee)
1926 struct call_info *p;
1927 for (p = caller->call_list; p != NULL; p = p->next)
1928 if (p->fun == callee->fun)
1930 /* Tail calls use less stack than normal calls. Retain entry
1931 for normal call over one for tail call. */
1932 p->is_tail &= callee->is_tail;
1933 if (!p->is_tail)
1935 p->fun->start = NULL;
1936 p->fun->is_func = TRUE;
1938 return FALSE;
1940 callee->next = caller->call_list;
1941 caller->call_list = callee;
1942 return TRUE;
1945 /* Rummage through the relocs for SEC, looking for function calls.
1946 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1947 mark destination symbols on calls as being functions. Also
1948 look at branches, which may be tail calls or go to hot/cold
1949 section part of same function. */
1951 static bfd_boolean
1952 mark_functions_via_relocs (asection *sec,
1953 struct bfd_link_info *info,
1954 int call_tree)
1956 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1957 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1958 Elf_Internal_Sym *syms;
1959 void *psyms;
1960 static bfd_boolean warned;
1962 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1963 info->keep_memory);
1964 if (internal_relocs == NULL)
1965 return FALSE;
1967 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1968 psyms = &symtab_hdr->contents;
1969 syms = *(Elf_Internal_Sym **) psyms;
1970 irela = internal_relocs;
1971 irelaend = irela + sec->reloc_count;
1972 for (; irela < irelaend; irela++)
1974 enum elf_spu_reloc_type r_type;
1975 unsigned int r_indx;
1976 asection *sym_sec;
1977 Elf_Internal_Sym *sym;
1978 struct elf_link_hash_entry *h;
1979 bfd_vma val;
1980 unsigned char insn[4];
1981 bfd_boolean is_call;
1982 struct function_info *caller;
1983 struct call_info *callee;
1985 r_type = ELF32_R_TYPE (irela->r_info);
1986 if (r_type != R_SPU_REL16
1987 && r_type != R_SPU_ADDR16)
1988 continue;
1990 r_indx = ELF32_R_SYM (irela->r_info);
1991 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1992 return FALSE;
1994 if (sym_sec == NULL
1995 || sym_sec->output_section == NULL
1996 || sym_sec->output_section->owner != sec->output_section->owner)
1997 continue;
1999 if (!bfd_get_section_contents (sec->owner, sec, insn,
2000 irela->r_offset, 4))
2001 return FALSE;
2002 if (!is_branch (insn))
2003 continue;
2005 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2006 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2008 if (!call_tree)
2009 warned = TRUE;
2010 if (!call_tree || !warned)
2011 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
2012 " %B(%A), stack analysis incomplete\n"),
2013 sec->owner, sec, irela->r_offset,
2014 sym_sec->owner, sym_sec);
2015 continue;
2018 is_call = (insn[0] & 0xfd) == 0x31;
2020 if (h)
2021 val = h->root.u.def.value;
2022 else
2023 val = sym->st_value;
2024 val += irela->r_addend;
2026 if (!call_tree)
2028 struct function_info *fun;
2030 if (irela->r_addend != 0)
2032 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2033 if (fake == NULL)
2034 return FALSE;
2035 fake->st_value = val;
2036 fake->st_shndx
2037 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2038 sym = fake;
2040 if (sym)
2041 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2042 else
2043 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2044 if (fun == NULL)
2045 return FALSE;
2046 if (irela->r_addend != 0
2047 && fun->u.sym != sym)
2048 free (sym);
2049 continue;
2052 caller = find_function (sec, irela->r_offset, info);
2053 if (caller == NULL)
2054 return FALSE;
2055 callee = bfd_malloc (sizeof *callee);
2056 if (callee == NULL)
2057 return FALSE;
2059 callee->fun = find_function (sym_sec, val, info);
2060 if (callee->fun == NULL)
2061 return FALSE;
2062 callee->is_tail = !is_call;
2063 if (!insert_callee (caller, callee))
2064 free (callee);
2065 else if (!is_call
2066 && !callee->fun->is_func
2067 && callee->fun->stack == 0)
2069 /* This is either a tail call or a branch from one part of
2070 the function to another, ie. hot/cold section. If the
2071 destination has been called by some other function then
2072 it is a separate function. We also assume that functions
2073 are not split across input files. */
2074 if (sec->owner != sym_sec->owner)
2076 callee->fun->start = NULL;
2077 callee->fun->is_func = TRUE;
2079 else if (callee->fun->start == NULL)
2080 callee->fun->start = caller;
2081 else
2083 struct function_info *callee_start;
2084 struct function_info *caller_start;
2085 callee_start = callee->fun;
2086 while (callee_start->start)
2087 callee_start = callee_start->start;
2088 caller_start = caller;
2089 while (caller_start->start)
2090 caller_start = caller_start->start;
2091 if (caller_start != callee_start)
2093 callee->fun->start = NULL;
2094 callee->fun->is_func = TRUE;
2100 return TRUE;
2103 /* Handle something like .init or .fini, which has a piece of a function.
2104 These sections are pasted together to form a single function. */
2106 static bfd_boolean
2107 pasted_function (asection *sec, struct bfd_link_info *info)
2109 struct bfd_link_order *l;
2110 struct _spu_elf_section_data *sec_data;
2111 struct spu_elf_stack_info *sinfo;
2112 Elf_Internal_Sym *fake;
2113 struct function_info *fun, *fun_start;
2115 fake = bfd_zmalloc (sizeof (*fake));
2116 if (fake == NULL)
2117 return FALSE;
2118 fake->st_value = 0;
2119 fake->st_size = sec->size;
2120 fake->st_shndx
2121 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2122 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2123 if (!fun)
2124 return FALSE;
2126 /* Find a function immediately preceding this section. */
2127 fun_start = NULL;
2128 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2130 if (l->u.indirect.section == sec)
2132 if (fun_start != NULL)
2133 fun->start = fun_start;
2134 return TRUE;
2136 if (l->type == bfd_indirect_link_order
2137 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2138 && (sinfo = sec_data->u.i.stack_info) != NULL
2139 && sinfo->num_fun != 0)
2140 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2143 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2144 return FALSE;
2147 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2148 overlay stub sections. */
2150 static bfd_boolean
2151 interesting_section (asection *s, bfd *obfd)
2153 return (s->output_section != NULL
2154 && s->output_section->owner == obfd
2155 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2156 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2157 && s->size != 0);
2160 /* Map address ranges in code sections to functions. */
2162 static bfd_boolean
2163 discover_functions (struct bfd_link_info *info)
2165 bfd *ibfd;
2166 int bfd_idx;
2167 Elf_Internal_Sym ***psym_arr;
2168 asection ***sec_arr;
2169 bfd_boolean gaps = FALSE;
2171 bfd_idx = 0;
2172 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2173 bfd_idx++;
2175 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2176 if (psym_arr == NULL)
2177 return FALSE;
2178 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2179 if (sec_arr == NULL)
2180 return FALSE;
2183 for (ibfd = info->input_bfds, bfd_idx = 0;
2184 ibfd != NULL;
2185 ibfd = ibfd->link_next, bfd_idx++)
2187 extern const bfd_target bfd_elf32_spu_vec;
2188 Elf_Internal_Shdr *symtab_hdr;
2189 asection *sec;
2190 size_t symcount;
2191 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2192 asection **psecs, **p;
2194 if (ibfd->xvec != &bfd_elf32_spu_vec)
2195 continue;
2197 /* Read all the symbols. */
2198 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2199 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2200 if (symcount == 0)
2201 continue;
2203 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2204 if (syms == NULL)
2206 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2207 NULL, NULL, NULL);
2208 symtab_hdr->contents = (void *) syms;
2209 if (syms == NULL)
2210 return FALSE;
2213 /* Select defined function symbols that are going to be output. */
2214 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2215 if (psyms == NULL)
2216 return FALSE;
2217 psym_arr[bfd_idx] = psyms;
2218 psecs = bfd_malloc (symcount * sizeof (*psecs));
2219 if (psecs == NULL)
2220 return FALSE;
2221 sec_arr[bfd_idx] = psecs;
2222 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2223 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2224 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2226 asection *s;
2228 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2229 if (s != NULL && interesting_section (s, info->output_bfd))
2230 *psy++ = sy;
2232 symcount = psy - psyms;
2233 *psy = NULL;
2235 /* Sort them by section and offset within section. */
2236 sort_syms_syms = syms;
2237 sort_syms_psecs = psecs;
2238 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2240 /* Now inspect the function symbols. */
2241 for (psy = psyms; psy < psyms + symcount; )
2243 asection *s = psecs[*psy - syms];
2244 Elf_Internal_Sym **psy2;
2246 for (psy2 = psy; ++psy2 < psyms + symcount; )
2247 if (psecs[*psy2 - syms] != s)
2248 break;
2250 if (!alloc_stack_info (s, psy2 - psy))
2251 return FALSE;
2252 psy = psy2;
2255 /* First install info about properly typed and sized functions.
2256 In an ideal world this will cover all code sections, except
2257 when partitioning functions into hot and cold sections,
2258 and the horrible pasted together .init and .fini functions. */
2259 for (psy = psyms; psy < psyms + symcount; ++psy)
2261 sy = *psy;
2262 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2264 asection *s = psecs[sy - syms];
2265 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2266 return FALSE;
2270 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2271 if (interesting_section (sec, info->output_bfd))
2272 gaps |= check_function_ranges (sec, info);
2275 if (gaps)
2277 /* See if we can discover more function symbols by looking at
2278 relocations. */
2279 for (ibfd = info->input_bfds, bfd_idx = 0;
2280 ibfd != NULL;
2281 ibfd = ibfd->link_next, bfd_idx++)
2283 asection *sec;
2285 if (psym_arr[bfd_idx] == NULL)
2286 continue;
2288 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2289 if (interesting_section (sec, info->output_bfd)
2290 && sec->reloc_count != 0)
2292 if (!mark_functions_via_relocs (sec, info, FALSE))
2293 return FALSE;
2297 for (ibfd = info->input_bfds, bfd_idx = 0;
2298 ibfd != NULL;
2299 ibfd = ibfd->link_next, bfd_idx++)
2301 Elf_Internal_Shdr *symtab_hdr;
2302 asection *sec;
2303 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2304 asection **psecs;
2306 if ((psyms = psym_arr[bfd_idx]) == NULL)
2307 continue;
2309 psecs = sec_arr[bfd_idx];
2311 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2312 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2314 gaps = FALSE;
2315 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2316 if (interesting_section (sec, info->output_bfd))
2317 gaps |= check_function_ranges (sec, info);
2318 if (!gaps)
2319 continue;
2321 /* Finally, install all globals. */
2322 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2324 asection *s;
2326 s = psecs[sy - syms];
2328 /* Global syms might be improperly typed functions. */
2329 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2330 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2332 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2333 return FALSE;
2337 /* Some of the symbols we've installed as marking the
2338 beginning of functions may have a size of zero. Extend
2339 the range of such functions to the beginning of the
2340 next symbol of interest. */
2341 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2342 if (interesting_section (sec, info->output_bfd))
2344 struct _spu_elf_section_data *sec_data;
2345 struct spu_elf_stack_info *sinfo;
2347 sec_data = spu_elf_section_data (sec);
2348 sinfo = sec_data->u.i.stack_info;
2349 if (sinfo != NULL)
2351 int fun_idx;
2352 bfd_vma hi = sec->size;
2354 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2356 sinfo->fun[fun_idx].hi = hi;
2357 hi = sinfo->fun[fun_idx].lo;
2360 /* No symbols in this section. Must be .init or .fini
2361 or something similar. */
2362 else if (!pasted_function (sec, info))
2363 return FALSE;
2368 for (ibfd = info->input_bfds, bfd_idx = 0;
2369 ibfd != NULL;
2370 ibfd = ibfd->link_next, bfd_idx++)
2372 if (psym_arr[bfd_idx] == NULL)
2373 continue;
2375 free (psym_arr[bfd_idx]);
2376 free (sec_arr[bfd_idx]);
2379 free (psym_arr);
2380 free (sec_arr);
2382 return TRUE;
2385 /* Mark nodes in the call graph that are called by some other node. */
2387 static void
2388 mark_non_root (struct function_info *fun)
2390 struct call_info *call;
2392 fun->visit1 = TRUE;
2393 for (call = fun->call_list; call; call = call->next)
2395 call->fun->non_root = TRUE;
2396 if (!call->fun->visit1)
2397 mark_non_root (call->fun);
2401 /* Remove cycles from the call graph. */
2403 static void
2404 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2406 struct call_info **callp, *call;
2408 fun->visit2 = TRUE;
2409 fun->marking = TRUE;
2411 callp = &fun->call_list;
2412 while ((call = *callp) != NULL)
2414 if (!call->fun->visit2)
2415 call_graph_traverse (call->fun, info);
2416 else if (call->fun->marking)
2418 const char *f1 = func_name (fun);
2419 const char *f2 = func_name (call->fun);
2421 info->callbacks->info (_("Stack analysis will ignore the call "
2422 "from %s to %s\n"),
2423 f1, f2);
2424 *callp = call->next;
2425 continue;
2427 callp = &call->next;
2429 fun->marking = FALSE;
2432 /* Populate call_list for each function. */
2434 static bfd_boolean
2435 build_call_tree (struct bfd_link_info *info)
2437 bfd *ibfd;
2439 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2441 extern const bfd_target bfd_elf32_spu_vec;
2442 asection *sec;
2444 if (ibfd->xvec != &bfd_elf32_spu_vec)
2445 continue;
2447 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2449 if (!interesting_section (sec, info->output_bfd)
2450 || sec->reloc_count == 0)
2451 continue;
2453 if (!mark_functions_via_relocs (sec, info, TRUE))
2454 return FALSE;
2457 /* Transfer call info from hot/cold section part of function
2458 to main entry. */
2459 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2461 struct _spu_elf_section_data *sec_data;
2462 struct spu_elf_stack_info *sinfo;
2464 if ((sec_data = spu_elf_section_data (sec)) != NULL
2465 && (sinfo = sec_data->u.i.stack_info) != NULL)
2467 int i;
2468 for (i = 0; i < sinfo->num_fun; ++i)
2470 struct function_info *start = sinfo->fun[i].start;
2472 if (start != NULL)
2474 struct call_info *call;
2476 while (start->start != NULL)
2477 start = start->start;
2478 call = sinfo->fun[i].call_list;
2479 while (call != NULL)
2481 struct call_info *call_next = call->next;
2482 if (!insert_callee (start, call))
2483 free (call);
2484 call = call_next;
2486 sinfo->fun[i].call_list = NULL;
2487 sinfo->fun[i].non_root = TRUE;
2494 /* Find the call graph root(s). */
2495 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2497 extern const bfd_target bfd_elf32_spu_vec;
2498 asection *sec;
2500 if (ibfd->xvec != &bfd_elf32_spu_vec)
2501 continue;
2503 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2505 struct _spu_elf_section_data *sec_data;
2506 struct spu_elf_stack_info *sinfo;
2508 if ((sec_data = spu_elf_section_data (sec)) != NULL
2509 && (sinfo = sec_data->u.i.stack_info) != NULL)
2511 int i;
2512 for (i = 0; i < sinfo->num_fun; ++i)
2513 if (!sinfo->fun[i].visit1)
2514 mark_non_root (&sinfo->fun[i]);
2519 /* Remove cycles from the call graph. We start from the root node(s)
2520 so that we break cycles in a reasonable place. */
2521 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2523 extern const bfd_target bfd_elf32_spu_vec;
2524 asection *sec;
2526 if (ibfd->xvec != &bfd_elf32_spu_vec)
2527 continue;
2529 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2531 struct _spu_elf_section_data *sec_data;
2532 struct spu_elf_stack_info *sinfo;
2534 if ((sec_data = spu_elf_section_data (sec)) != NULL
2535 && (sinfo = sec_data->u.i.stack_info) != NULL)
2537 int i;
2538 for (i = 0; i < sinfo->num_fun; ++i)
2539 if (!sinfo->fun[i].non_root)
2540 call_graph_traverse (&sinfo->fun[i], info);
2545 return TRUE;
2548 /* Descend the call graph for FUN, accumulating total stack required. */
2550 static bfd_vma
2551 sum_stack (struct function_info *fun,
2552 struct bfd_link_info *info,
2553 int emit_stack_syms)
2555 struct call_info *call;
2556 struct function_info *max = NULL;
2557 bfd_vma max_stack = fun->stack;
2558 bfd_vma stack;
2559 const char *f1;
2561 if (fun->visit3)
2562 return max_stack;
2564 for (call = fun->call_list; call; call = call->next)
2566 stack = sum_stack (call->fun, info, emit_stack_syms);
2567 /* Include caller stack for normal calls, don't do so for
2568 tail calls. fun->stack here is local stack usage for
2569 this function. */
2570 if (!call->is_tail)
2571 stack += fun->stack;
2572 if (max_stack < stack)
2574 max_stack = stack;
2575 max = call->fun;
2579 f1 = func_name (fun);
2580 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2581 f1, (bfd_vma) fun->stack, max_stack);
2583 if (fun->call_list)
2585 info->callbacks->minfo (_(" calls:\n"));
2586 for (call = fun->call_list; call; call = call->next)
2588 const char *f2 = func_name (call->fun);
2589 const char *ann1 = call->fun == max ? "*" : " ";
2590 const char *ann2 = call->is_tail ? "t" : " ";
2592 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2596 /* Now fun->stack holds cumulative stack. */
2597 fun->stack = max_stack;
2598 fun->visit3 = TRUE;
2600 if (emit_stack_syms)
2602 struct spu_link_hash_table *htab = spu_hash_table (info);
2603 char *name = bfd_malloc (18 + strlen (f1));
2604 struct elf_link_hash_entry *h;
2606 if (name != NULL)
2608 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2609 sprintf (name, "__stack_%s", f1);
2610 else
2611 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2613 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2614 free (name);
2615 if (h != NULL
2616 && (h->root.type == bfd_link_hash_new
2617 || h->root.type == bfd_link_hash_undefined
2618 || h->root.type == bfd_link_hash_undefweak))
2620 h->root.type = bfd_link_hash_defined;
2621 h->root.u.def.section = bfd_abs_section_ptr;
2622 h->root.u.def.value = max_stack;
2623 h->size = 0;
2624 h->type = 0;
2625 h->ref_regular = 1;
2626 h->def_regular = 1;
2627 h->ref_regular_nonweak = 1;
2628 h->forced_local = 1;
2629 h->non_elf = 0;
2634 return max_stack;
2637 /* Provide an estimate of total stack required. */
2639 static bfd_boolean
2640 spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
2642 bfd *ibfd;
2643 bfd_vma max_stack = 0;
2645 if (!discover_functions (info))
2646 return FALSE;
2648 if (!build_call_tree (info))
2649 return FALSE;
2651 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2652 info->callbacks->minfo (_("\nStack size for functions. "
2653 "Annotations: '*' max stack, 't' tail call\n"));
2654 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2656 extern const bfd_target bfd_elf32_spu_vec;
2657 asection *sec;
2659 if (ibfd->xvec != &bfd_elf32_spu_vec)
2660 continue;
2662 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2664 struct _spu_elf_section_data *sec_data;
2665 struct spu_elf_stack_info *sinfo;
2667 if ((sec_data = spu_elf_section_data (sec)) != NULL
2668 && (sinfo = sec_data->u.i.stack_info) != NULL)
2670 int i;
2671 for (i = 0; i < sinfo->num_fun; ++i)
2673 if (!sinfo->fun[i].non_root)
2675 bfd_vma stack;
2676 const char *f1;
2678 stack = sum_stack (&sinfo->fun[i], info,
2679 emit_stack_syms);
2680 f1 = func_name (&sinfo->fun[i]);
2681 info->callbacks->info (_(" %s: 0x%v\n"),
2682 f1, stack);
2683 if (max_stack < stack)
2684 max_stack = stack;
2691 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2692 return TRUE;
2695 /* Perform a final link. */
2697 static bfd_boolean
2698 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2700 struct spu_link_hash_table *htab = spu_hash_table (info);
2702 if (htab->stack_analysis
2703 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
2704 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2706 return bfd_elf_final_link (output_bfd, info);
2709 /* Called when not normally emitting relocs, ie. !info->relocatable
2710 and !info->emitrelocations. Returns a count of special relocs
2711 that need to be emitted. */
2713 static unsigned int
2714 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2716 unsigned int count = 0;
2717 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2719 for (; relocs < relend; relocs++)
2721 int r_type = ELF32_R_TYPE (relocs->r_info);
2722 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2723 ++count;
2726 return count;
2729 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2731 static int
2732 spu_elf_relocate_section (bfd *output_bfd,
2733 struct bfd_link_info *info,
2734 bfd *input_bfd,
2735 asection *input_section,
2736 bfd_byte *contents,
2737 Elf_Internal_Rela *relocs,
2738 Elf_Internal_Sym *local_syms,
2739 asection **local_sections)
2741 Elf_Internal_Shdr *symtab_hdr;
2742 struct elf_link_hash_entry **sym_hashes;
2743 Elf_Internal_Rela *rel, *relend;
2744 struct spu_link_hash_table *htab;
2745 int ret = TRUE;
2746 bfd_boolean emit_these_relocs = FALSE;
2747 bfd_boolean stubs;
2749 htab = spu_hash_table (info);
2750 stubs = (htab->stub_sec != NULL
2751 && maybe_needs_stubs (input_section, output_bfd));
2752 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2753 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2755 rel = relocs;
2756 relend = relocs + input_section->reloc_count;
2757 for (; rel < relend; rel++)
2759 int r_type;
2760 reloc_howto_type *howto;
2761 unsigned long r_symndx;
2762 Elf_Internal_Sym *sym;
2763 asection *sec;
2764 struct elf_link_hash_entry *h;
2765 const char *sym_name;
2766 bfd_vma relocation;
2767 bfd_vma addend;
2768 bfd_reloc_status_type r;
2769 bfd_boolean unresolved_reloc;
2770 bfd_boolean warned;
2772 r_symndx = ELF32_R_SYM (rel->r_info);
2773 r_type = ELF32_R_TYPE (rel->r_info);
2774 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2776 emit_these_relocs = TRUE;
2777 continue;
2780 howto = elf_howto_table + r_type;
2781 unresolved_reloc = FALSE;
2782 warned = FALSE;
2783 h = NULL;
2784 sym = NULL;
2785 sec = NULL;
2786 if (r_symndx < symtab_hdr->sh_info)
2788 sym = local_syms + r_symndx;
2789 sec = local_sections[r_symndx];
2790 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2791 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2793 else
2795 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2796 r_symndx, symtab_hdr, sym_hashes,
2797 h, sec, relocation,
2798 unresolved_reloc, warned);
2799 sym_name = h->root.root.string;
2802 if (sec != NULL && elf_discarded_section (sec))
2804 /* For relocs against symbols from removed linkonce sections,
2805 or sections discarded by a linker script, we just want the
2806 section contents zeroed. Avoid any special processing. */
2807 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2808 rel->r_info = 0;
2809 rel->r_addend = 0;
2810 continue;
2813 if (info->relocatable)
2814 continue;
2816 if (unresolved_reloc)
2818 (*_bfd_error_handler)
2819 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2820 input_bfd,
2821 bfd_get_section_name (input_bfd, input_section),
2822 (long) rel->r_offset,
2823 howto->name,
2824 sym_name);
2825 ret = FALSE;
2828 /* If this symbol is in an overlay area, we may need to relocate
2829 to the overlay stub. */
2830 addend = rel->r_addend;
2831 if (stubs)
2833 enum _stub_type stub_type;
2835 stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
2836 contents, info);
2837 if (stub_type != no_stub)
2839 unsigned int ovl = 0;
2840 struct got_entry *g, **head;
2842 if (stub_type != nonovl_stub)
2843 ovl = (spu_elf_section_data (input_section->output_section)
2844 ->u.o.ovl_index);
2846 if (h != NULL)
2847 head = &h->got.glist;
2848 else
2849 head = elf_local_got_ents (input_bfd) + r_symndx;
2851 for (g = *head; g != NULL; g = g->next)
2852 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
2853 break;
2854 if (g == NULL)
2855 abort ();
2857 relocation = g->stub_addr;
2858 addend = 0;
2862 r = _bfd_final_link_relocate (howto,
2863 input_bfd,
2864 input_section,
2865 contents,
2866 rel->r_offset, relocation, addend);
2868 if (r != bfd_reloc_ok)
2870 const char *msg = (const char *) 0;
2872 switch (r)
2874 case bfd_reloc_overflow:
2875 if (!((*info->callbacks->reloc_overflow)
2876 (info, (h ? &h->root : NULL), sym_name, howto->name,
2877 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2878 return FALSE;
2879 break;
2881 case bfd_reloc_undefined:
2882 if (!((*info->callbacks->undefined_symbol)
2883 (info, sym_name, input_bfd, input_section,
2884 rel->r_offset, TRUE)))
2885 return FALSE;
2886 break;
2888 case bfd_reloc_outofrange:
2889 msg = _("internal error: out of range error");
2890 goto common_error;
2892 case bfd_reloc_notsupported:
2893 msg = _("internal error: unsupported relocation error");
2894 goto common_error;
2896 case bfd_reloc_dangerous:
2897 msg = _("internal error: dangerous error");
2898 goto common_error;
2900 default:
2901 msg = _("internal error: unknown error");
2902 /* fall through */
2904 common_error:
2905 ret = FALSE;
2906 if (!((*info->callbacks->warning)
2907 (info, msg, sym_name, input_bfd, input_section,
2908 rel->r_offset)))
2909 return FALSE;
2910 break;
2915 if (ret
2916 && emit_these_relocs
2917 && !info->relocatable
2918 && !info->emitrelocations)
2920 Elf_Internal_Rela *wrel;
2921 Elf_Internal_Shdr *rel_hdr;
2923 wrel = rel = relocs;
2924 relend = relocs + input_section->reloc_count;
2925 for (; rel < relend; rel++)
2927 int r_type;
2929 r_type = ELF32_R_TYPE (rel->r_info);
2930 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2931 *wrel++ = *rel;
2933 input_section->reloc_count = wrel - relocs;
2934 /* Backflips for _bfd_elf_link_output_relocs. */
2935 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2936 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2937 ret = 2;
2940 return ret;
2943 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2945 static bfd_boolean
2946 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2947 const char *sym_name ATTRIBUTE_UNUSED,
2948 Elf_Internal_Sym *sym,
2949 asection *sym_sec ATTRIBUTE_UNUSED,
2950 struct elf_link_hash_entry *h)
2952 struct spu_link_hash_table *htab = spu_hash_table (info);
2954 if (!info->relocatable
2955 && htab->stub_sec != NULL
2956 && h != NULL
2957 && (h->root.type == bfd_link_hash_defined
2958 || h->root.type == bfd_link_hash_defweak)
2959 && h->def_regular
2960 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2962 struct got_entry *g;
2964 for (g = h->got.glist; g != NULL; g = g->next)
2965 if (g->addend == 0 && g->ovl == 0)
2967 sym->st_shndx = (_bfd_elf_section_from_bfd_section
2968 (htab->stub_sec[0]->output_section->owner,
2969 htab->stub_sec[0]->output_section));
2970 sym->st_value = g->stub_addr;
2971 break;
2975 return TRUE;
2978 static int spu_plugin = 0;
2980 void
2981 spu_elf_plugin (int val)
2983 spu_plugin = val;
2986 /* Set ELF header e_type for plugins. */
2988 static void
2989 spu_elf_post_process_headers (bfd *abfd,
2990 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2992 if (spu_plugin)
2994 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2996 i_ehdrp->e_type = ET_DYN;
3000 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3001 segments for overlays. */
3003 static int
3004 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
3006 struct spu_link_hash_table *htab = spu_hash_table (info);
3007 int extra = htab->num_overlays;
3008 asection *sec;
3010 if (extra)
3011 ++extra;
3013 sec = bfd_get_section_by_name (abfd, ".toe");
3014 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
3015 ++extra;
3017 return extra;
3020 /* Remove .toe section from other PT_LOAD segments and put it in
3021 a segment of its own. Put overlays in separate segments too. */
3023 static bfd_boolean
3024 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
3026 asection *toe, *s;
3027 struct elf_segment_map *m;
3028 unsigned int i;
3030 if (info == NULL)
3031 return TRUE;
3033 toe = bfd_get_section_by_name (abfd, ".toe");
3034 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3035 if (m->p_type == PT_LOAD && m->count > 1)
3036 for (i = 0; i < m->count; i++)
3037 if ((s = m->sections[i]) == toe
3038 || spu_elf_section_data (s)->u.o.ovl_index != 0)
3040 struct elf_segment_map *m2;
3041 bfd_vma amt;
3043 if (i + 1 < m->count)
3045 amt = sizeof (struct elf_segment_map);
3046 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
3047 m2 = bfd_zalloc (abfd, amt);
3048 if (m2 == NULL)
3049 return FALSE;
3050 m2->count = m->count - (i + 1);
3051 memcpy (m2->sections, m->sections + i + 1,
3052 m2->count * sizeof (m->sections[0]));
3053 m2->p_type = PT_LOAD;
3054 m2->next = m->next;
3055 m->next = m2;
3057 m->count = 1;
3058 if (i != 0)
3060 m->count = i;
3061 amt = sizeof (struct elf_segment_map);
3062 m2 = bfd_zalloc (abfd, amt);
3063 if (m2 == NULL)
3064 return FALSE;
3065 m2->p_type = PT_LOAD;
3066 m2->count = 1;
3067 m2->sections[0] = s;
3068 m2->next = m->next;
3069 m->next = m2;
3071 break;
3074 return TRUE;
3077 /* Tweak the section type of .note.spu_name. */
3079 static bfd_boolean
3080 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3081 Elf_Internal_Shdr *hdr,
3082 asection *sec)
3084 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3085 hdr->sh_type = SHT_NOTE;
3086 return TRUE;
3089 /* Tweak phdrs before writing them out. */
3091 static int
3092 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3094 const struct elf_backend_data *bed;
3095 struct elf_obj_tdata *tdata;
3096 Elf_Internal_Phdr *phdr, *last;
3097 struct spu_link_hash_table *htab;
3098 unsigned int count;
3099 unsigned int i;
3101 if (info == NULL)
3102 return TRUE;
3104 bed = get_elf_backend_data (abfd);
3105 tdata = elf_tdata (abfd);
3106 phdr = tdata->phdr;
3107 count = tdata->program_header_size / bed->s->sizeof_phdr;
3108 htab = spu_hash_table (info);
3109 if (htab->num_overlays != 0)
3111 struct elf_segment_map *m;
3112 unsigned int o;
3114 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3115 if (m->count != 0
3116 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
3118 /* Mark this as an overlay header. */
3119 phdr[i].p_flags |= PF_OVERLAY;
3121 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3123 bfd_byte *p = htab->ovtab->contents;
3124 unsigned int off = o * 16 + 8;
3126 /* Write file_off into _ovly_table. */
3127 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3132 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3133 of 16. This should always be possible when using the standard
3134 linker scripts, but don't create overlapping segments if
3135 someone is playing games with linker scripts. */
3136 last = NULL;
3137 for (i = count; i-- != 0; )
3138 if (phdr[i].p_type == PT_LOAD)
3140 unsigned adjust;
3142 adjust = -phdr[i].p_filesz & 15;
3143 if (adjust != 0
3144 && last != NULL
3145 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3146 break;
3148 adjust = -phdr[i].p_memsz & 15;
3149 if (adjust != 0
3150 && last != NULL
3151 && phdr[i].p_filesz != 0
3152 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3153 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3154 break;
3156 if (phdr[i].p_filesz != 0)
3157 last = &phdr[i];
3160 if (i == (unsigned int) -1)
3161 for (i = count; i-- != 0; )
3162 if (phdr[i].p_type == PT_LOAD)
3164 unsigned adjust;
3166 adjust = -phdr[i].p_filesz & 15;
3167 phdr[i].p_filesz += adjust;
3169 adjust = -phdr[i].p_memsz & 15;
3170 phdr[i].p_memsz += adjust;
3173 return TRUE;
3176 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3177 #define TARGET_BIG_NAME "elf32-spu"
3178 #define ELF_ARCH bfd_arch_spu
3179 #define ELF_MACHINE_CODE EM_SPU
3180 /* This matches the alignment need for DMA. */
3181 #define ELF_MAXPAGESIZE 0x80
3182 #define elf_backend_rela_normal 1
3183 #define elf_backend_can_gc_sections 1
3185 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3186 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3187 #define elf_info_to_howto spu_elf_info_to_howto
3188 #define elf_backend_count_relocs spu_elf_count_relocs
3189 #define elf_backend_relocate_section spu_elf_relocate_section
3190 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3191 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3192 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3193 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3195 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3196 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3197 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3198 #define elf_backend_post_process_headers spu_elf_post_process_headers
3199 #define elf_backend_fake_sections spu_elf_fake_sections
3200 #define elf_backend_special_sections spu_elf_special_sections
3201 #define bfd_elf32_bfd_final_link spu_elf_final_link
3203 #include "elf32-target.h"