1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "bfd_stdint.h"
27 #include "libiberty.h"
29 #define RX_OPCODE_BIG_ENDIAN 0
31 /* This is a meta-target that's used only with objcopy, to avoid the
32 endian-swap we would otherwise get. We check for this in
34 const bfd_target bfd_elf32_rx_be_ns_vec
;
35 const bfd_target bfd_elf32_rx_be_vec
;
38 char * rx_get_reloc (long);
39 void rx_dump_symtab (bfd
*, void *, void *);
42 #define RXREL(n,sz,bit,shift,complain,pcrel) \
43 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
44 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
46 /* Note that the relocations around 0x7f are internal to this file;
47 feel free to move them as needed to avoid conflicts with published
48 relocation numbers. */
50 static reloc_howto_type rx_elf_howto_table
[] =
52 RXREL (NONE
, 0, 0, 0, dont
, FALSE
),
53 RXREL (DIR32
, 2, 32, 0, signed, FALSE
),
54 RXREL (DIR24S
, 2, 24, 0, signed, FALSE
),
55 RXREL (DIR16
, 1, 16, 0, dont
, FALSE
),
56 RXREL (DIR16U
, 1, 16, 0, unsigned, FALSE
),
57 RXREL (DIR16S
, 1, 16, 0, signed, FALSE
),
58 RXREL (DIR8
, 0, 8, 0, dont
, FALSE
),
59 RXREL (DIR8U
, 0, 8, 0, unsigned, FALSE
),
60 RXREL (DIR8S
, 0, 8, 0, signed, FALSE
),
61 RXREL (DIR24S_PCREL
, 2, 24, 0, signed, TRUE
),
62 RXREL (DIR16S_PCREL
, 1, 16, 0, signed, TRUE
),
63 RXREL (DIR8S_PCREL
, 0, 8, 0, signed, TRUE
),
64 RXREL (DIR16UL
, 1, 16, 2, unsigned, FALSE
),
65 RXREL (DIR16UW
, 1, 16, 1, unsigned, FALSE
),
66 RXREL (DIR8UL
, 0, 8, 2, unsigned, FALSE
),
67 RXREL (DIR8UW
, 0, 8, 1, unsigned, FALSE
),
68 RXREL (DIR32_REV
, 1, 16, 0, dont
, FALSE
),
69 RXREL (DIR16_REV
, 1, 16, 0, dont
, FALSE
),
70 RXREL (DIR3U_PCREL
, 0, 3, 0, dont
, TRUE
),
86 RXREL (RH_3_PCREL
, 0, 3, 0, signed, TRUE
),
87 RXREL (RH_16_OP
, 1, 16, 0, signed, FALSE
),
88 RXREL (RH_24_OP
, 2, 24, 0, signed, FALSE
),
89 RXREL (RH_32_OP
, 2, 32, 0, signed, FALSE
),
90 RXREL (RH_24_UNS
, 2, 24, 0, unsigned, FALSE
),
91 RXREL (RH_8_NEG
, 0, 8, 0, signed, FALSE
),
92 RXREL (RH_16_NEG
, 1, 16, 0, signed, FALSE
),
93 RXREL (RH_24_NEG
, 2, 24, 0, signed, FALSE
),
94 RXREL (RH_32_NEG
, 2, 32, 0, signed, FALSE
),
95 RXREL (RH_DIFF
, 2, 32, 0, signed, FALSE
),
96 RXREL (RH_GPRELB
, 1, 16, 0, unsigned, FALSE
),
97 RXREL (RH_GPRELW
, 1, 16, 0, unsigned, FALSE
),
98 RXREL (RH_GPRELL
, 1, 16, 0, unsigned, FALSE
),
99 RXREL (RH_RELAX
, 0, 0, 0, dont
, FALSE
),
121 RXREL (ABS32
, 2, 32, 0, dont
, FALSE
),
122 RXREL (ABS24S
, 2, 24, 0, signed, FALSE
),
123 RXREL (ABS16
, 1, 16, 0, dont
, FALSE
),
124 RXREL (ABS16U
, 1, 16, 0, unsigned, FALSE
),
125 RXREL (ABS16S
, 1, 16, 0, signed, FALSE
),
126 RXREL (ABS8
, 0, 8, 0, dont
, FALSE
),
127 RXREL (ABS8U
, 0, 8, 0, unsigned, FALSE
),
128 RXREL (ABS8S
, 0, 8, 0, signed, FALSE
),
129 RXREL (ABS24S_PCREL
, 2, 24, 0, signed, TRUE
),
130 RXREL (ABS16S_PCREL
, 1, 16, 0, signed, TRUE
),
131 RXREL (ABS8S_PCREL
, 0, 8, 0, signed, TRUE
),
132 RXREL (ABS16UL
, 1, 16, 0, unsigned, FALSE
),
133 RXREL (ABS16UW
, 1, 16, 0, unsigned, FALSE
),
134 RXREL (ABS8UL
, 0, 8, 0, unsigned, FALSE
),
135 RXREL (ABS8UW
, 0, 8, 0, unsigned, FALSE
),
136 RXREL (ABS32_REV
, 2, 32, 0, dont
, FALSE
),
137 RXREL (ABS16_REV
, 1, 16, 0, dont
, FALSE
),
139 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
180 /* These are internal. */
181 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
182 /* ---- ---- 4--- 3210. */
183 #define R_RX_RH_ABS5p8B 0x78
184 RXREL (RH_ABS5p8B
, 0, 0, 0, dont
, FALSE
),
185 #define R_RX_RH_ABS5p8W 0x79
186 RXREL (RH_ABS5p8W
, 0, 0, 0, dont
, FALSE
),
187 #define R_RX_RH_ABS5p8L 0x7a
188 RXREL (RH_ABS5p8L
, 0, 0, 0, dont
, FALSE
),
189 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
190 /* ---- -432 1--- 0---. */
191 #define R_RX_RH_ABS5p5B 0x7b
192 RXREL (RH_ABS5p5B
, 0, 0, 0, dont
, FALSE
),
193 #define R_RX_RH_ABS5p5W 0x7c
194 RXREL (RH_ABS5p5W
, 0, 0, 0, dont
, FALSE
),
195 #define R_RX_RH_ABS5p5L 0x7d
196 RXREL (RH_ABS5p5L
, 0, 0, 0, dont
, FALSE
),
197 /* A 4-bit unsigned immediate at bit position 8. */
198 #define R_RX_RH_UIMM4p8 0x7e
199 RXREL (RH_UIMM4p8
, 0, 0, 0, dont
, FALSE
),
200 /* A 4-bit negative unsigned immediate at bit position 8. */
201 #define R_RX_RH_UNEG4p8 0x7f
202 RXREL (RH_UNEG4p8
, 0, 0, 0, dont
, FALSE
),
203 /* End of internal relocs. */
205 RXREL (SYM
, 2, 32, 0, dont
, FALSE
),
206 RXREL (OPneg
, 2, 32, 0, dont
, FALSE
),
207 RXREL (OPadd
, 2, 32, 0, dont
, FALSE
),
208 RXREL (OPsub
, 2, 32, 0, dont
, FALSE
),
209 RXREL (OPmul
, 2, 32, 0, dont
, FALSE
),
210 RXREL (OPdiv
, 2, 32, 0, dont
, FALSE
),
211 RXREL (OPshla
, 2, 32, 0, dont
, FALSE
),
212 RXREL (OPshra
, 2, 32, 0, dont
, FALSE
),
213 RXREL (OPsctsize
, 2, 32, 0, dont
, FALSE
),
214 RXREL (OPscttop
, 2, 32, 0, dont
, FALSE
),
215 RXREL (OPand
, 2, 32, 0, dont
, FALSE
),
216 RXREL (OPor
, 2, 32, 0, dont
, FALSE
),
217 RXREL (OPxor
, 2, 32, 0, dont
, FALSE
),
218 RXREL (OPnot
, 2, 32, 0, dont
, FALSE
),
219 RXREL (OPmod
, 2, 32, 0, dont
, FALSE
),
220 RXREL (OPromtop
, 2, 32, 0, dont
, FALSE
),
221 RXREL (OPramtop
, 2, 32, 0, dont
, FALSE
)
224 /* Map BFD reloc types to RX ELF reloc types. */
228 bfd_reloc_code_real_type bfd_reloc_val
;
229 unsigned int rx_reloc_val
;
232 static const struct rx_reloc_map rx_reloc_map
[] =
234 { BFD_RELOC_NONE
, R_RX_NONE
},
235 { BFD_RELOC_8
, R_RX_DIR8S
},
236 { BFD_RELOC_16
, R_RX_DIR16S
},
237 { BFD_RELOC_24
, R_RX_DIR24S
},
238 { BFD_RELOC_32
, R_RX_DIR32
},
239 { BFD_RELOC_RX_16_OP
, R_RX_DIR16
},
240 { BFD_RELOC_RX_DIR3U_PCREL
, R_RX_DIR3U_PCREL
},
241 { BFD_RELOC_8_PCREL
, R_RX_DIR8S_PCREL
},
242 { BFD_RELOC_16_PCREL
, R_RX_DIR16S_PCREL
},
243 { BFD_RELOC_24_PCREL
, R_RX_DIR24S_PCREL
},
244 { BFD_RELOC_RX_8U
, R_RX_DIR8U
},
245 { BFD_RELOC_RX_16U
, R_RX_DIR16U
},
246 { BFD_RELOC_RX_24U
, R_RX_RH_24_UNS
},
247 { BFD_RELOC_RX_NEG8
, R_RX_RH_8_NEG
},
248 { BFD_RELOC_RX_NEG16
, R_RX_RH_16_NEG
},
249 { BFD_RELOC_RX_NEG24
, R_RX_RH_24_NEG
},
250 { BFD_RELOC_RX_NEG32
, R_RX_RH_32_NEG
},
251 { BFD_RELOC_RX_DIFF
, R_RX_RH_DIFF
},
252 { BFD_RELOC_RX_GPRELB
, R_RX_RH_GPRELB
},
253 { BFD_RELOC_RX_GPRELW
, R_RX_RH_GPRELW
},
254 { BFD_RELOC_RX_GPRELL
, R_RX_RH_GPRELL
},
255 { BFD_RELOC_RX_RELAX
, R_RX_RH_RELAX
},
256 { BFD_RELOC_RX_SYM
, R_RX_SYM
},
257 { BFD_RELOC_RX_OP_SUBTRACT
, R_RX_OPsub
},
258 { BFD_RELOC_RX_OP_NEG
, R_RX_OPneg
},
259 { BFD_RELOC_RX_ABS8
, R_RX_ABS8
},
260 { BFD_RELOC_RX_ABS16
, R_RX_ABS16
},
261 { BFD_RELOC_RX_ABS16_REV
, R_RX_ABS16_REV
},
262 { BFD_RELOC_RX_ABS32
, R_RX_ABS32
},
263 { BFD_RELOC_RX_ABS32_REV
, R_RX_ABS32_REV
},
264 { BFD_RELOC_RX_ABS16UL
, R_RX_ABS16UL
},
265 { BFD_RELOC_RX_ABS16UW
, R_RX_ABS16UW
},
266 { BFD_RELOC_RX_ABS16U
, R_RX_ABS16U
}
269 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
271 static reloc_howto_type
*
272 rx_reloc_type_lookup (bfd
* abfd ATTRIBUTE_UNUSED
,
273 bfd_reloc_code_real_type code
)
277 if (code
== BFD_RELOC_RX_32_OP
)
278 return rx_elf_howto_table
+ R_RX_DIR32
;
280 for (i
= ARRAY_SIZE (rx_reloc_map
); --i
;)
281 if (rx_reloc_map
[i
].bfd_reloc_val
== code
)
282 return rx_elf_howto_table
+ rx_reloc_map
[i
].rx_reloc_val
;
287 static reloc_howto_type
*
288 rx_reloc_name_lookup (bfd
* abfd ATTRIBUTE_UNUSED
, const char * r_name
)
292 for (i
= 0; i
< ARRAY_SIZE (rx_elf_howto_table
); i
++)
293 if (rx_elf_howto_table
[i
].name
!= NULL
294 && strcasecmp (rx_elf_howto_table
[i
].name
, r_name
) == 0)
295 return rx_elf_howto_table
+ i
;
300 /* Set the howto pointer for an RX ELF reloc. */
303 rx_info_to_howto_rela (bfd
* abfd ATTRIBUTE_UNUSED
,
305 Elf_Internal_Rela
* dst
)
309 r_type
= ELF32_R_TYPE (dst
->r_info
);
310 BFD_ASSERT (r_type
< (unsigned int) R_RX_max
);
311 cache_ptr
->howto
= rx_elf_howto_table
+ r_type
;
315 get_symbol_value (const char * name
,
316 bfd_reloc_status_type
* status
,
317 struct bfd_link_info
* info
,
319 asection
* input_section
,
323 struct bfd_link_hash_entry
* h
;
325 h
= bfd_link_hash_lookup (info
->hash
, name
, FALSE
, FALSE
, TRUE
);
328 || (h
->type
!= bfd_link_hash_defined
329 && h
->type
!= bfd_link_hash_defweak
))
330 * status
= info
->callbacks
->undefined_symbol
331 (info
, name
, input_bfd
, input_section
, offset
, TRUE
);
333 value
= (h
->u
.def
.value
334 + h
->u
.def
.section
->output_section
->vma
335 + h
->u
.def
.section
->output_offset
);
341 get_gp (bfd_reloc_status_type
* status
,
342 struct bfd_link_info
* info
,
347 static bfd_boolean cached
= FALSE
;
348 static bfd_vma cached_value
= 0;
352 cached_value
= get_symbol_value ("__gp", status
, info
, abfd
, sec
, offset
);
359 get_romstart (bfd_reloc_status_type
* status
,
360 struct bfd_link_info
* info
,
365 static bfd_boolean cached
= FALSE
;
366 static bfd_vma cached_value
= 0;
370 cached_value
= get_symbol_value ("_start", status
, info
, abfd
, sec
, offset
);
377 get_ramstart (bfd_reloc_status_type
* status
,
378 struct bfd_link_info
* info
,
383 static bfd_boolean cached
= FALSE
;
384 static bfd_vma cached_value
= 0;
388 cached_value
= get_symbol_value ("__datastart", status
, info
, abfd
, sec
, offset
);
394 #define NUM_STACK_ENTRIES 16
395 static int32_t rx_stack
[ NUM_STACK_ENTRIES
];
396 static unsigned int rx_stack_top
;
398 #define RX_STACK_PUSH(val) \
401 if (rx_stack_top < NUM_STACK_ENTRIES) \
402 rx_stack [rx_stack_top ++] = (val); \
404 r = bfd_reloc_dangerous; \
408 #define RX_STACK_POP(dest) \
411 if (rx_stack_top > 0) \
412 (dest) = rx_stack [-- rx_stack_top]; \
414 (dest) = 0, r = bfd_reloc_dangerous; \
418 /* Relocate an RX ELF section.
419 There is some attempt to make this function usable for many architectures,
420 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
421 if only to serve as a learning tool.
423 The RELOCATE_SECTION function is called by the new ELF backend linker
424 to handle the relocations for a section.
426 The relocs are always passed as Rela structures; if the section
427 actually uses Rel structures, the r_addend field will always be
430 This function is responsible for adjusting the section contents as
431 necessary, and (if using Rela relocs and generating a relocatable
432 output file) adjusting the reloc addend as necessary.
434 This function does not have to worry about setting the reloc
435 address or the reloc symbol index.
437 LOCAL_SYMS is a pointer to the swapped in local symbols.
439 LOCAL_SECTIONS is an array giving the section in the input file
440 corresponding to the st_shndx field of each local symbol.
442 The global hash table entry for the global symbols can be found
443 via elf_sym_hashes (input_bfd).
445 When generating relocatable output, this function must handle
446 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
447 going to be the section symbol corresponding to the output
448 section, which means that the addend must be adjusted
452 rx_elf_relocate_section
454 struct bfd_link_info
* info
,
456 asection
* input_section
,
458 Elf_Internal_Rela
* relocs
,
459 Elf_Internal_Sym
* local_syms
,
460 asection
** local_sections
)
462 Elf_Internal_Shdr
* symtab_hdr
;
463 struct elf_link_hash_entry
** sym_hashes
;
464 Elf_Internal_Rela
* rel
;
465 Elf_Internal_Rela
* relend
;
466 bfd_boolean pid_mode
;
467 bfd_boolean saw_subtract
= FALSE
;
469 if (elf_elfheader (output_bfd
)->e_flags
& E_FLAG_RX_PID
)
474 symtab_hdr
= & elf_tdata (input_bfd
)->symtab_hdr
;
475 sym_hashes
= elf_sym_hashes (input_bfd
);
476 relend
= relocs
+ input_section
->reloc_count
;
477 for (rel
= relocs
; rel
< relend
; rel
++)
479 reloc_howto_type
* howto
;
480 unsigned long r_symndx
;
481 Elf_Internal_Sym
* sym
;
483 struct elf_link_hash_entry
* h
;
485 bfd_reloc_status_type r
;
486 const char * name
= NULL
;
487 bfd_boolean unresolved_reloc
= TRUE
;
490 r_type
= ELF32_R_TYPE (rel
->r_info
);
491 r_symndx
= ELF32_R_SYM (rel
->r_info
);
493 howto
= rx_elf_howto_table
+ ELF32_R_TYPE (rel
->r_info
);
499 if (rx_stack_top
== 0)
500 saw_subtract
= FALSE
;
502 if (r_symndx
< symtab_hdr
->sh_info
)
504 sym
= local_syms
+ r_symndx
;
505 sec
= local_sections
[r_symndx
];
506 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, & sec
, rel
);
508 name
= bfd_elf_string_from_elf_section
509 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
);
510 name
= (sym
->st_name
== 0) ? bfd_section_name (input_bfd
, sec
) : name
;
516 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
517 r_symndx
, symtab_hdr
, sym_hashes
, h
,
518 sec
, relocation
, unresolved_reloc
,
521 name
= h
->root
.root
.string
;
524 if (sec
!= NULL
&& elf_discarded_section (sec
))
525 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
526 rel
, relend
, howto
, contents
);
528 if (info
->relocatable
)
530 /* This is a relocatable link. We don't have to change
531 anything, unless the reloc is against a section symbol,
532 in which case we have to adjust according to where the
533 section symbol winds up in the output section. */
534 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
535 rel
->r_addend
+= sec
->output_offset
;
539 if (h
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
540 /* If the symbol is undefined and weak
541 then the relocation resolves to zero. */
545 if (howto
->pc_relative
)
547 relocation
-= (input_section
->output_section
->vma
548 + input_section
->output_offset
550 if (r_type
!= R_RX_RH_3_PCREL
551 && r_type
!= R_RX_DIR3U_PCREL
)
555 relocation
+= rel
->r_addend
;
560 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
561 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
562 #define OP(i) (contents[rel->r_offset + (i)])
563 #define WARN_REDHAT(type) \
564 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
565 input_bfd, input_section, name)
567 /* Check for unsafe relocs in PID mode. These are any relocs where
568 an absolute address is being computed. There are special cases
569 for relocs against symbols that are known to be referenced in
570 crt0.o before the PID base address register has been initialised. */
571 #define UNSAFE_FOR_PID \
576 && sec->flags & SEC_READONLY \
577 && !(input_section->flags & SEC_DEBUGGING) \
578 && strcmp (name, "__pid_base") != 0 \
579 && strcmp (name, "__gp") != 0 \
580 && strcmp (name, "__romdatastart") != 0 \
582 _bfd_error_handler (_("%B(%A): unsafe PID relocation %s at 0x%08lx (against %s in %s)"), \
583 input_bfd, input_section, howto->name, \
584 input_section->output_section->vma + input_section->output_offset + rel->r_offset, \
589 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
598 case R_RX_RH_3_PCREL
:
599 WARN_REDHAT ("RX_RH_3_PCREL");
602 OP (0) |= relocation
& 0x07;
606 WARN_REDHAT ("RX_RH_8_NEG");
607 relocation
= - relocation
;
608 case R_RX_DIR8S_PCREL
:
627 WARN_REDHAT ("RX_RH_16_NEG");
628 relocation
= - relocation
;
629 case R_RX_DIR16S_PCREL
:
631 RANGE (-32768, 32767);
632 #if RX_OPCODE_BIG_ENDIAN
635 OP (1) = relocation
>> 8;
640 WARN_REDHAT ("RX_RH_16_OP");
642 RANGE (-32768, 32767);
643 #if RX_OPCODE_BIG_ENDIAN
645 OP (0) = relocation
>> 8;
648 OP (1) = relocation
>> 8;
654 RANGE (-32768, 65535);
655 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
658 OP (0) = relocation
>> 8;
663 OP (1) = relocation
>> 8;
670 #if RX_OPCODE_BIG_ENDIAN
672 OP (0) = relocation
>> 8;
675 OP (1) = relocation
>> 8;
681 RANGE (-32768, 65536);
682 #if RX_OPCODE_BIG_ENDIAN
684 OP (0) = relocation
>> 8;
687 OP (1) = relocation
>> 8;
693 RANGE (-32768, 65536);
694 #if RX_OPCODE_BIG_ENDIAN
696 OP (1) = relocation
>> 8;
699 OP (0) = relocation
>> 8;
703 case R_RX_DIR3U_PCREL
:
706 OP (0) |= relocation
& 0x07;
711 WARN_REDHAT ("RX_RH_24_NEG");
712 relocation
= - relocation
;
713 case R_RX_DIR24S_PCREL
:
714 RANGE (-0x800000, 0x7fffff);
715 #if RX_OPCODE_BIG_ENDIAN
717 OP (1) = relocation
>> 8;
718 OP (0) = relocation
>> 16;
721 OP (1) = relocation
>> 8;
722 OP (2) = relocation
>> 16;
728 WARN_REDHAT ("RX_RH_24_OP");
729 RANGE (-0x800000, 0x7fffff);
730 #if RX_OPCODE_BIG_ENDIAN
732 OP (1) = relocation
>> 8;
733 OP (0) = relocation
>> 16;
736 OP (1) = relocation
>> 8;
737 OP (2) = relocation
>> 16;
743 RANGE (-0x800000, 0x7fffff);
744 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
747 OP (1) = relocation
>> 8;
748 OP (0) = relocation
>> 16;
753 OP (1) = relocation
>> 8;
754 OP (2) = relocation
>> 16;
760 WARN_REDHAT ("RX_RH_24_UNS");
762 #if RX_OPCODE_BIG_ENDIAN
764 OP (1) = relocation
>> 8;
765 OP (0) = relocation
>> 16;
768 OP (1) = relocation
>> 8;
769 OP (2) = relocation
>> 16;
775 WARN_REDHAT ("RX_RH_32_NEG");
776 relocation
= - relocation
;
777 #if RX_OPCODE_BIG_ENDIAN
779 OP (2) = relocation
>> 8;
780 OP (1) = relocation
>> 16;
781 OP (0) = relocation
>> 24;
784 OP (1) = relocation
>> 8;
785 OP (2) = relocation
>> 16;
786 OP (3) = relocation
>> 24;
792 WARN_REDHAT ("RX_RH_32_OP");
793 #if RX_OPCODE_BIG_ENDIAN
795 OP (2) = relocation
>> 8;
796 OP (1) = relocation
>> 16;
797 OP (0) = relocation
>> 24;
800 OP (1) = relocation
>> 8;
801 OP (2) = relocation
>> 16;
802 OP (3) = relocation
>> 24;
807 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
810 OP (2) = relocation
>> 8;
811 OP (1) = relocation
>> 16;
812 OP (0) = relocation
>> 24;
817 OP (1) = relocation
>> 8;
818 OP (2) = relocation
>> 16;
819 OP (3) = relocation
>> 24;
824 if (BIGE (output_bfd
))
827 OP (1) = relocation
>> 8;
828 OP (2) = relocation
>> 16;
829 OP (3) = relocation
>> 24;
834 OP (2) = relocation
>> 8;
835 OP (1) = relocation
>> 16;
836 OP (0) = relocation
>> 24;
843 WARN_REDHAT ("RX_RH_DIFF");
844 val
= bfd_get_32 (output_bfd
, & OP (0));
846 bfd_put_32 (output_bfd
, val
, & OP (0));
851 WARN_REDHAT ("RX_RH_GPRELB");
852 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
854 #if RX_OPCODE_BIG_ENDIAN
856 OP (0) = relocation
>> 8;
859 OP (1) = relocation
>> 8;
864 WARN_REDHAT ("RX_RH_GPRELW");
865 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
869 #if RX_OPCODE_BIG_ENDIAN
871 OP (0) = relocation
>> 8;
874 OP (1) = relocation
>> 8;
879 WARN_REDHAT ("RX_RH_GPRELL");
880 relocation
-= get_gp (&r
, info
, input_bfd
, input_section
, rel
->r_offset
);
884 #if RX_OPCODE_BIG_ENDIAN
886 OP (0) = relocation
>> 8;
889 OP (1) = relocation
>> 8;
893 /* Internal relocations just for relaxation: */
894 case R_RX_RH_ABS5p5B
:
895 RX_STACK_POP (relocation
);
898 OP (0) |= relocation
>> 2;
900 OP (1) |= (relocation
<< 6) & 0x80;
901 OP (1) |= (relocation
<< 3) & 0x08;
904 case R_RX_RH_ABS5p5W
:
905 RX_STACK_POP (relocation
);
910 OP (0) |= relocation
>> 2;
912 OP (1) |= (relocation
<< 6) & 0x80;
913 OP (1) |= (relocation
<< 3) & 0x08;
916 case R_RX_RH_ABS5p5L
:
917 RX_STACK_POP (relocation
);
922 OP (0) |= relocation
>> 2;
924 OP (1) |= (relocation
<< 6) & 0x80;
925 OP (1) |= (relocation
<< 3) & 0x08;
928 case R_RX_RH_ABS5p8B
:
929 RX_STACK_POP (relocation
);
932 OP (0) |= (relocation
<< 3) & 0x80;
933 OP (0) |= relocation
& 0x0f;
936 case R_RX_RH_ABS5p8W
:
937 RX_STACK_POP (relocation
);
942 OP (0) |= (relocation
<< 3) & 0x80;
943 OP (0) |= relocation
& 0x0f;
946 case R_RX_RH_ABS5p8L
:
947 RX_STACK_POP (relocation
);
952 OP (0) |= (relocation
<< 3) & 0x80;
953 OP (0) |= relocation
& 0x0f;
956 case R_RX_RH_UIMM4p8
:
959 OP (0) |= relocation
<< 4;
962 case R_RX_RH_UNEG4p8
:
965 OP (0) |= (-relocation
) << 4;
968 /* Complex reloc handling: */
972 RX_STACK_POP (relocation
);
973 #if RX_OPCODE_BIG_ENDIAN
975 OP (2) = relocation
>> 8;
976 OP (1) = relocation
>> 16;
977 OP (0) = relocation
>> 24;
980 OP (1) = relocation
>> 8;
981 OP (2) = relocation
>> 16;
982 OP (3) = relocation
>> 24;
988 RX_STACK_POP (relocation
);
989 #if RX_OPCODE_BIG_ENDIAN
991 OP (1) = relocation
>> 8;
992 OP (2) = relocation
>> 16;
993 OP (3) = relocation
>> 24;
996 OP (2) = relocation
>> 8;
997 OP (1) = relocation
>> 16;
998 OP (0) = relocation
>> 24;
1002 case R_RX_ABS24S_PCREL
:
1005 RX_STACK_POP (relocation
);
1006 RANGE (-0x800000, 0x7fffff);
1007 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
1009 OP (2) = relocation
;
1010 OP (1) = relocation
>> 8;
1011 OP (0) = relocation
>> 16;
1015 OP (0) = relocation
;
1016 OP (1) = relocation
>> 8;
1017 OP (2) = relocation
>> 16;
1023 RX_STACK_POP (relocation
);
1024 RANGE (-32768, 65535);
1025 #if RX_OPCODE_BIG_ENDIAN
1026 OP (1) = relocation
;
1027 OP (0) = relocation
>> 8;
1029 OP (0) = relocation
;
1030 OP (1) = relocation
>> 8;
1034 case R_RX_ABS16_REV
:
1036 RX_STACK_POP (relocation
);
1037 RANGE (-32768, 65535);
1038 #if RX_OPCODE_BIG_ENDIAN
1039 OP (0) = relocation
;
1040 OP (1) = relocation
>> 8;
1042 OP (1) = relocation
;
1043 OP (0) = relocation
>> 8;
1047 case R_RX_ABS16S_PCREL
:
1049 RX_STACK_POP (relocation
);
1050 RANGE (-32768, 32767);
1051 if (BIGE (output_bfd
) && !(input_section
->flags
& SEC_CODE
))
1053 OP (1) = relocation
;
1054 OP (0) = relocation
>> 8;
1058 OP (0) = relocation
;
1059 OP (1) = relocation
>> 8;
1065 RX_STACK_POP (relocation
);
1067 #if RX_OPCODE_BIG_ENDIAN
1068 OP (1) = relocation
;
1069 OP (0) = relocation
>> 8;
1071 OP (0) = relocation
;
1072 OP (1) = relocation
>> 8;
1078 RX_STACK_POP (relocation
);
1081 #if RX_OPCODE_BIG_ENDIAN
1082 OP (1) = relocation
;
1083 OP (0) = relocation
>> 8;
1085 OP (0) = relocation
;
1086 OP (1) = relocation
>> 8;
1092 RX_STACK_POP (relocation
);
1095 #if RX_OPCODE_BIG_ENDIAN
1096 OP (1) = relocation
;
1097 OP (0) = relocation
>> 8;
1099 OP (0) = relocation
;
1100 OP (1) = relocation
>> 8;
1106 RX_STACK_POP (relocation
);
1108 OP (0) = relocation
;
1113 RX_STACK_POP (relocation
);
1115 OP (0) = relocation
;
1120 RX_STACK_POP (relocation
);
1123 OP (0) = relocation
;
1128 RX_STACK_POP (relocation
);
1131 OP (0) = relocation
;
1136 case R_RX_ABS8S_PCREL
:
1137 RX_STACK_POP (relocation
);
1139 OP (0) = relocation
;
1143 if (r_symndx
< symtab_hdr
->sh_info
)
1144 RX_STACK_PUSH (sec
->output_section
->vma
1145 + sec
->output_offset
1151 && (h
->root
.type
== bfd_link_hash_defined
1152 || h
->root
.type
== bfd_link_hash_defweak
))
1153 RX_STACK_PUSH (h
->root
.u
.def
.value
1154 + sec
->output_section
->vma
1155 + sec
->output_offset
1158 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1168 RX_STACK_PUSH (tmp
);
1176 RX_STACK_POP (tmp1
);
1177 RX_STACK_POP (tmp2
);
1179 RX_STACK_PUSH (tmp1
);
1187 saw_subtract
= TRUE
;
1188 RX_STACK_POP (tmp1
);
1189 RX_STACK_POP (tmp2
);
1191 RX_STACK_PUSH (tmp2
);
1199 RX_STACK_POP (tmp1
);
1200 RX_STACK_POP (tmp2
);
1202 RX_STACK_PUSH (tmp1
);
1210 saw_subtract
= TRUE
;
1211 RX_STACK_POP (tmp1
);
1212 RX_STACK_POP (tmp2
);
1214 RX_STACK_PUSH (tmp1
);
1222 RX_STACK_POP (tmp1
);
1223 RX_STACK_POP (tmp2
);
1225 RX_STACK_PUSH (tmp1
);
1233 RX_STACK_POP (tmp1
);
1234 RX_STACK_POP (tmp2
);
1236 RX_STACK_PUSH (tmp1
);
1240 case R_RX_OPsctsize
:
1241 RX_STACK_PUSH (input_section
->size
);
1245 RX_STACK_PUSH (input_section
->output_section
->vma
);
1252 RX_STACK_POP (tmp1
);
1253 RX_STACK_POP (tmp2
);
1255 RX_STACK_PUSH (tmp1
);
1263 RX_STACK_POP (tmp1
);
1264 RX_STACK_POP (tmp2
);
1266 RX_STACK_PUSH (tmp1
);
1274 RX_STACK_POP (tmp1
);
1275 RX_STACK_POP (tmp2
);
1277 RX_STACK_PUSH (tmp1
);
1287 RX_STACK_PUSH (tmp
);
1295 RX_STACK_POP (tmp1
);
1296 RX_STACK_POP (tmp2
);
1298 RX_STACK_PUSH (tmp1
);
1303 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1307 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1311 r
= bfd_reloc_notsupported
;
1315 if (r
!= bfd_reloc_ok
)
1317 const char * msg
= NULL
;
1321 case bfd_reloc_overflow
:
1322 /* Catch the case of a missing function declaration
1323 and emit a more helpful error message. */
1324 if (r_type
== R_RX_DIR24S_PCREL
)
1325 msg
= _("%B(%A): error: call to undefined function '%s'");
1327 r
= info
->callbacks
->reloc_overflow
1328 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
, (bfd_vma
) 0,
1329 input_bfd
, input_section
, rel
->r_offset
);
1332 case bfd_reloc_undefined
:
1333 r
= info
->callbacks
->undefined_symbol
1334 (info
, name
, input_bfd
, input_section
, rel
->r_offset
,
1338 case bfd_reloc_other
:
1339 msg
= _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1342 case bfd_reloc_outofrange
:
1343 msg
= _("%B(%A): internal error: out of range error");
1346 case bfd_reloc_notsupported
:
1347 msg
= _("%B(%A): internal error: unsupported relocation error");
1350 case bfd_reloc_dangerous
:
1351 msg
= _("%B(%A): internal error: dangerous relocation");
1355 msg
= _("%B(%A): internal error: unknown error");
1360 _bfd_error_handler (msg
, input_bfd
, input_section
, name
);
1370 /* Relaxation Support. */
1372 /* Progression of relocations from largest operand size to smallest
1376 next_smaller_reloc (int r
)
1380 case R_RX_DIR32
: return R_RX_DIR24S
;
1381 case R_RX_DIR24S
: return R_RX_DIR16S
;
1382 case R_RX_DIR16S
: return R_RX_DIR8S
;
1383 case R_RX_DIR8S
: return R_RX_NONE
;
1385 case R_RX_DIR16
: return R_RX_DIR8
;
1386 case R_RX_DIR8
: return R_RX_NONE
;
1388 case R_RX_DIR16U
: return R_RX_DIR8U
;
1389 case R_RX_DIR8U
: return R_RX_NONE
;
1391 case R_RX_DIR24S_PCREL
: return R_RX_DIR16S_PCREL
;
1392 case R_RX_DIR16S_PCREL
: return R_RX_DIR8S_PCREL
;
1393 case R_RX_DIR8S_PCREL
: return R_RX_DIR3U_PCREL
;
1395 case R_RX_DIR16UL
: return R_RX_DIR8UL
;
1396 case R_RX_DIR8UL
: return R_RX_NONE
;
1397 case R_RX_DIR16UW
: return R_RX_DIR8UW
;
1398 case R_RX_DIR8UW
: return R_RX_NONE
;
1400 case R_RX_RH_32_OP
: return R_RX_RH_24_OP
;
1401 case R_RX_RH_24_OP
: return R_RX_RH_16_OP
;
1402 case R_RX_RH_16_OP
: return R_RX_DIR8
;
1404 case R_RX_ABS32
: return R_RX_ABS24S
;
1405 case R_RX_ABS24S
: return R_RX_ABS16S
;
1406 case R_RX_ABS16
: return R_RX_ABS8
;
1407 case R_RX_ABS16U
: return R_RX_ABS8U
;
1408 case R_RX_ABS16S
: return R_RX_ABS8S
;
1409 case R_RX_ABS8
: return R_RX_NONE
;
1410 case R_RX_ABS8U
: return R_RX_NONE
;
1411 case R_RX_ABS8S
: return R_RX_NONE
;
1412 case R_RX_ABS24S_PCREL
: return R_RX_ABS16S_PCREL
;
1413 case R_RX_ABS16S_PCREL
: return R_RX_ABS8S_PCREL
;
1414 case R_RX_ABS8S_PCREL
: return R_RX_NONE
;
1415 case R_RX_ABS16UL
: return R_RX_ABS8UL
;
1416 case R_RX_ABS16UW
: return R_RX_ABS8UW
;
1417 case R_RX_ABS8UL
: return R_RX_NONE
;
1418 case R_RX_ABS8UW
: return R_RX_NONE
;
1423 /* Delete some bytes from a section while relaxing. */
1426 elf32_rx_relax_delete_bytes (bfd
*abfd
, asection
*sec
, bfd_vma addr
, int count
,
1427 Elf_Internal_Rela
*alignment_rel
, int force_snip
)
1429 Elf_Internal_Shdr
* symtab_hdr
;
1430 unsigned int sec_shndx
;
1431 bfd_byte
* contents
;
1432 Elf_Internal_Rela
* irel
;
1433 Elf_Internal_Rela
* irelend
;
1434 Elf_Internal_Sym
* isym
;
1435 Elf_Internal_Sym
* isymend
;
1437 unsigned int symcount
;
1438 struct elf_link_hash_entry
** sym_hashes
;
1439 struct elf_link_hash_entry
** end_hashes
;
1444 sec_shndx
= _bfd_elf_section_from_bfd_section (abfd
, sec
);
1446 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1448 /* The deletion must stop at the next alignment boundary, if
1449 ALIGNMENT_REL is non-NULL. */
1452 toaddr
= alignment_rel
->r_offset
;
1454 irel
= elf_section_data (sec
)->relocs
;
1455 irelend
= irel
+ sec
->reloc_count
;
1457 /* Actually delete the bytes. */
1458 memmove (contents
+ addr
, contents
+ addr
+ count
,
1459 (size_t) (toaddr
- addr
- count
));
1461 /* If we don't have an alignment marker to worry about, we can just
1462 shrink the section. Otherwise, we have to fill in the newly
1463 created gap with NOP insns (0x03). */
1467 memset (contents
+ toaddr
- count
, 0x03, count
);
1469 /* Adjust all the relocs. */
1470 for (irel
= elf_section_data (sec
)->relocs
; irel
< irelend
; irel
++)
1472 /* Get the new reloc address. */
1473 if (irel
->r_offset
> addr
1474 && (irel
->r_offset
< toaddr
1475 || (force_snip
&& irel
->r_offset
== toaddr
)))
1476 irel
->r_offset
-= count
;
1478 /* If we see an ALIGN marker at the end of the gap, we move it
1479 to the beginning of the gap, since marking these gaps is what
1481 if (irel
->r_offset
== toaddr
1482 && ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1483 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1484 irel
->r_offset
-= count
;
1487 /* Adjust the local symbols defined in this section. */
1488 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1489 isym
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1490 isymend
= isym
+ symtab_hdr
->sh_info
;
1492 for (; isym
< isymend
; isym
++)
1494 /* If the symbol is in the range of memory we just moved, we
1495 have to adjust its value. */
1496 if (isym
->st_shndx
== sec_shndx
1497 && isym
->st_value
> addr
1498 && isym
->st_value
< toaddr
)
1499 isym
->st_value
-= count
;
1501 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1502 *end* is in the moved bytes but it's *start* isn't), then we
1503 must adjust its size. */
1504 if (isym
->st_shndx
== sec_shndx
1505 && isym
->st_value
< addr
1506 && isym
->st_value
+ isym
->st_size
> addr
1507 && isym
->st_value
+ isym
->st_size
< toaddr
)
1508 isym
->st_size
-= count
;
1511 /* Now adjust the global symbols defined in this section. */
1512 symcount
= (symtab_hdr
->sh_size
/ sizeof (Elf32_External_Sym
)
1513 - symtab_hdr
->sh_info
);
1514 sym_hashes
= elf_sym_hashes (abfd
);
1515 end_hashes
= sym_hashes
+ symcount
;
1517 for (; sym_hashes
< end_hashes
; sym_hashes
++)
1519 struct elf_link_hash_entry
*sym_hash
= *sym_hashes
;
1521 if ((sym_hash
->root
.type
== bfd_link_hash_defined
1522 || sym_hash
->root
.type
== bfd_link_hash_defweak
)
1523 && sym_hash
->root
.u
.def
.section
== sec
)
1525 /* As above, adjust the value if needed. */
1526 if (sym_hash
->root
.u
.def
.value
> addr
1527 && sym_hash
->root
.u
.def
.value
< toaddr
)
1528 sym_hash
->root
.u
.def
.value
-= count
;
1530 /* As above, adjust the size if needed. */
1531 if (sym_hash
->root
.u
.def
.value
< addr
1532 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
> addr
1533 && sym_hash
->root
.u
.def
.value
+ sym_hash
->size
< toaddr
)
1534 sym_hash
->size
-= count
;
1541 /* Used to sort relocs by address. If relocs have the same address,
1542 we maintain their relative order, except that R_RX_RH_RELAX
1543 alignment relocs must be the first reloc for any given address. */
1546 reloc_bubblesort (Elf_Internal_Rela
* r
, int count
)
1550 bfd_boolean swappit
;
1552 /* This is almost a classic bubblesort. It's the slowest sort, but
1553 we're taking advantage of the fact that the relocations are
1554 mostly in order already (the assembler emits them that way) and
1555 we need relocs with the same address to remain in the same
1561 for (i
= 0; i
< count
- 1; i
++)
1563 if (r
[i
].r_offset
> r
[i
+ 1].r_offset
)
1565 else if (r
[i
].r_offset
< r
[i
+ 1].r_offset
)
1567 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1568 && (r
[i
+ 1].r_addend
& RX_RELAXA_ALIGN
))
1570 else if (ELF32_R_TYPE (r
[i
+ 1].r_info
) == R_RX_RH_RELAX
1571 && (r
[i
+ 1].r_addend
& RX_RELAXA_ELIGN
)
1572 && !(ELF32_R_TYPE (r
[i
].r_info
) == R_RX_RH_RELAX
1573 && (r
[i
].r_addend
& RX_RELAXA_ALIGN
)))
1580 Elf_Internal_Rela tmp
;
1585 /* If we do move a reloc back, re-scan to see if it
1586 needs to be moved even further back. This avoids
1587 most of the O(n^2) behavior for our cases. */
1597 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1598 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1599 lrel, abfd, sec, link_info, scale)
1602 rx_offset_for_reloc (bfd
* abfd
,
1603 Elf_Internal_Rela
* rel
,
1604 Elf_Internal_Shdr
* symtab_hdr
,
1605 Elf_External_Sym_Shndx
* shndx_buf ATTRIBUTE_UNUSED
,
1606 Elf_Internal_Sym
* intsyms
,
1607 Elf_Internal_Rela
** lrel
,
1609 asection
* input_section
,
1610 struct bfd_link_info
* info
,
1614 bfd_reloc_status_type r
;
1618 /* REL is the first of 1..N relocations. We compute the symbol
1619 value for each relocation, then combine them if needed. LREL
1620 gets a pointer to the last relocation used. */
1625 /* Get the value of the symbol referred to by the reloc. */
1626 if (ELF32_R_SYM (rel
->r_info
) < symtab_hdr
->sh_info
)
1628 /* A local symbol. */
1629 Elf_Internal_Sym
*isym
;
1632 isym
= intsyms
+ ELF32_R_SYM (rel
->r_info
);
1634 if (isym
->st_shndx
== SHN_UNDEF
)
1635 ssec
= bfd_und_section_ptr
;
1636 else if (isym
->st_shndx
== SHN_ABS
)
1637 ssec
= bfd_abs_section_ptr
;
1638 else if (isym
->st_shndx
== SHN_COMMON
)
1639 ssec
= bfd_com_section_ptr
;
1641 ssec
= bfd_section_from_elf_index (abfd
,
1644 /* Initial symbol value. */
1645 symval
= isym
->st_value
;
1647 /* GAS may have made this symbol relative to a section, in
1648 which case, we have to add the addend to find the
1650 if (ELF_ST_TYPE (isym
->st_info
) == STT_SECTION
)
1651 symval
+= rel
->r_addend
;
1655 if ((ssec
->flags
& SEC_MERGE
)
1656 && ssec
->sec_info_type
== ELF_INFO_TYPE_MERGE
)
1657 symval
= _bfd_merged_section_offset (abfd
, & ssec
,
1658 elf_section_data (ssec
)->sec_info
,
1662 /* Now make the offset relative to where the linker is putting it. */
1665 ssec
->output_section
->vma
+ ssec
->output_offset
;
1667 symval
+= rel
->r_addend
;
1672 struct elf_link_hash_entry
* h
;
1674 /* An external symbol. */
1675 indx
= ELF32_R_SYM (rel
->r_info
) - symtab_hdr
->sh_info
;
1676 h
= elf_sym_hashes (abfd
)[indx
];
1677 BFD_ASSERT (h
!= NULL
);
1679 if (h
->root
.type
!= bfd_link_hash_defined
1680 && h
->root
.type
!= bfd_link_hash_defweak
)
1682 /* This appears to be a reference to an undefined
1683 symbol. Just ignore it--it will be caught by the
1684 regular reloc processing. */
1690 symval
= (h
->root
.u
.def
.value
1691 + h
->root
.u
.def
.section
->output_section
->vma
1692 + h
->root
.u
.def
.section
->output_offset
);
1694 symval
+= rel
->r_addend
;
1697 switch (ELF32_R_TYPE (rel
->r_info
))
1700 RX_STACK_PUSH (symval
);
1704 RX_STACK_POP (tmp1
);
1706 RX_STACK_PUSH (tmp1
);
1710 RX_STACK_POP (tmp1
);
1711 RX_STACK_POP (tmp2
);
1713 RX_STACK_PUSH (tmp1
);
1717 RX_STACK_POP (tmp1
);
1718 RX_STACK_POP (tmp2
);
1720 RX_STACK_PUSH (tmp2
);
1724 RX_STACK_POP (tmp1
);
1725 RX_STACK_POP (tmp2
);
1727 RX_STACK_PUSH (tmp1
);
1731 RX_STACK_POP (tmp1
);
1732 RX_STACK_POP (tmp2
);
1734 RX_STACK_PUSH (tmp1
);
1738 RX_STACK_POP (tmp1
);
1739 RX_STACK_POP (tmp2
);
1741 RX_STACK_PUSH (tmp1
);
1745 RX_STACK_POP (tmp1
);
1746 RX_STACK_POP (tmp2
);
1748 RX_STACK_PUSH (tmp1
);
1751 case R_RX_OPsctsize
:
1752 RX_STACK_PUSH (input_section
->size
);
1756 RX_STACK_PUSH (input_section
->output_section
->vma
);
1760 RX_STACK_POP (tmp1
);
1761 RX_STACK_POP (tmp2
);
1763 RX_STACK_PUSH (tmp1
);
1767 RX_STACK_POP (tmp1
);
1768 RX_STACK_POP (tmp2
);
1770 RX_STACK_PUSH (tmp1
);
1774 RX_STACK_POP (tmp1
);
1775 RX_STACK_POP (tmp2
);
1777 RX_STACK_PUSH (tmp1
);
1781 RX_STACK_POP (tmp1
);
1783 RX_STACK_PUSH (tmp1
);
1787 RX_STACK_POP (tmp1
);
1788 RX_STACK_POP (tmp2
);
1790 RX_STACK_PUSH (tmp1
);
1794 RX_STACK_PUSH (get_romstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1798 RX_STACK_PUSH (get_ramstart (&r
, info
, input_bfd
, input_section
, rel
->r_offset
));
1806 RX_STACK_POP (symval
);
1817 RX_STACK_POP (symval
);
1825 RX_STACK_POP (symval
);
1836 move_reloc (Elf_Internal_Rela
* irel
, Elf_Internal_Rela
* srel
, int delta
)
1838 bfd_vma old_offset
= srel
->r_offset
;
1841 while (irel
<= srel
)
1843 if (irel
->r_offset
== old_offset
)
1844 irel
->r_offset
+= delta
;
1849 /* Relax one section. */
1852 elf32_rx_relax_section (bfd
* abfd
,
1854 struct bfd_link_info
* link_info
,
1855 bfd_boolean
* again
,
1856 bfd_boolean allow_pcrel3
)
1858 Elf_Internal_Shdr
* symtab_hdr
;
1859 Elf_Internal_Shdr
* shndx_hdr
;
1860 Elf_Internal_Rela
* internal_relocs
;
1861 Elf_Internal_Rela
* free_relocs
= NULL
;
1862 Elf_Internal_Rela
* irel
;
1863 Elf_Internal_Rela
* srel
;
1864 Elf_Internal_Rela
* irelend
;
1865 Elf_Internal_Rela
* next_alignment
;
1866 Elf_Internal_Rela
* prev_alignment
;
1867 bfd_byte
* contents
= NULL
;
1868 bfd_byte
* free_contents
= NULL
;
1869 Elf_Internal_Sym
* intsyms
= NULL
;
1870 Elf_Internal_Sym
* free_intsyms
= NULL
;
1871 Elf_External_Sym_Shndx
* shndx_buf
= NULL
;
1877 int section_alignment_glue
;
1878 /* how much to scale the relocation by - 1, 2, or 4. */
1881 /* Assume nothing changes. */
1884 /* We don't have to do anything for a relocatable link, if
1885 this section does not have relocs, or if this is not a
1887 if (link_info
->relocatable
1888 || (sec
->flags
& SEC_RELOC
) == 0
1889 || sec
->reloc_count
== 0
1890 || (sec
->flags
& SEC_CODE
) == 0)
1893 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
1894 shndx_hdr
= &elf_tdata (abfd
)->symtab_shndx_hdr
;
1896 sec_start
= sec
->output_section
->vma
+ sec
->output_offset
;
1898 /* Get the section contents. */
1899 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
1900 contents
= elf_section_data (sec
)->this_hdr
.contents
;
1901 /* Go get them off disk. */
1904 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
1906 elf_section_data (sec
)->this_hdr
.contents
= contents
;
1909 /* Read this BFD's symbols. */
1910 /* Get cached copy if it exists. */
1911 if (symtab_hdr
->contents
!= NULL
)
1912 intsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
1915 intsyms
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
, symtab_hdr
->sh_info
, 0, NULL
, NULL
, NULL
);
1916 symtab_hdr
->contents
= (bfd_byte
*) intsyms
;
1919 if (shndx_hdr
->sh_size
!= 0)
1923 amt
= symtab_hdr
->sh_info
;
1924 amt
*= sizeof (Elf_External_Sym_Shndx
);
1925 shndx_buf
= (Elf_External_Sym_Shndx
*) bfd_malloc (amt
);
1926 if (shndx_buf
== NULL
)
1928 if (bfd_seek (abfd
, shndx_hdr
->sh_offset
, SEEK_SET
) != 0
1929 || bfd_bread ((PTR
) shndx_buf
, amt
, abfd
) != amt
)
1931 shndx_hdr
->contents
= (bfd_byte
*) shndx_buf
;
1934 /* Get a copy of the native relocations. */
1935 internal_relocs
= (_bfd_elf_link_read_relocs
1936 (abfd
, sec
, (PTR
) NULL
, (Elf_Internal_Rela
*) NULL
,
1937 link_info
->keep_memory
));
1938 if (internal_relocs
== NULL
)
1940 if (! link_info
->keep_memory
)
1941 free_relocs
= internal_relocs
;
1943 /* The RL_ relocs must be just before the operand relocs they go
1944 with, so we must sort them to guarantee this. We use bubblesort
1945 instead of qsort so we can guarantee that relocs with the same
1946 address remain in the same relative order. */
1947 reloc_bubblesort (internal_relocs
, sec
->reloc_count
);
1949 /* Walk through them looking for relaxing opportunities. */
1950 irelend
= internal_relocs
+ sec
->reloc_count
;
1952 /* This will either be NULL or a pointer to the next alignment
1954 next_alignment
= internal_relocs
;
1955 /* This will be the previous alignment, although at first it points
1956 to the first real relocation. */
1957 prev_alignment
= internal_relocs
;
1959 /* We calculate worst case shrinkage caused by alignment directives.
1960 No fool-proof, but better than either ignoring the problem or
1961 doing heavy duty analysis of all the alignment markers in all
1963 section_alignment_glue
= 0;
1964 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1965 if (ELF32_R_TYPE (irel
->r_info
) == R_RX_RH_RELAX
1966 && irel
->r_addend
& RX_RELAXA_ALIGN
)
1968 int this_glue
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
1970 if (section_alignment_glue
< this_glue
)
1971 section_alignment_glue
= this_glue
;
1973 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1975 section_alignment_glue
*= 2;
1977 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
1979 unsigned char *insn
;
1982 /* The insns we care about are all marked with one of these. */
1983 if (ELF32_R_TYPE (irel
->r_info
) != R_RX_RH_RELAX
)
1986 if (irel
->r_addend
& RX_RELAXA_ALIGN
1987 || next_alignment
== internal_relocs
)
1989 /* When we delete bytes, we need to maintain all the alignments
1990 indicated. In addition, we need to be careful about relaxing
1991 jumps across alignment boundaries - these displacements
1992 *grow* when we delete bytes. For now, don't shrink
1993 displacements across an alignment boundary, just in case.
1994 Note that this only affects relocations to the same
1996 prev_alignment
= next_alignment
;
1997 next_alignment
+= 2;
1998 while (next_alignment
< irelend
1999 && (ELF32_R_TYPE (next_alignment
->r_info
) != R_RX_RH_RELAX
2000 || !(next_alignment
->r_addend
& RX_RELAXA_ELIGN
)))
2002 if (next_alignment
>= irelend
|| next_alignment
->r_offset
== 0)
2003 next_alignment
= NULL
;
2006 /* When we hit alignment markers, see if we've shrunk enough
2007 before them to reduce the gap without violating the alignment
2009 if (irel
->r_addend
& RX_RELAXA_ALIGN
)
2011 /* At this point, the next relocation *should* be the ELIGN
2013 Elf_Internal_Rela
*erel
= irel
+ 1;
2014 unsigned int alignment
, nbytes
;
2016 if (ELF32_R_TYPE (erel
->r_info
) != R_RX_RH_RELAX
)
2018 if (!(erel
->r_addend
& RX_RELAXA_ELIGN
))
2021 alignment
= 1 << (irel
->r_addend
& RX_RELAXA_ANUM
);
2023 if (erel
->r_offset
- irel
->r_offset
< alignment
)
2026 nbytes
= erel
->r_offset
- irel
->r_offset
;
2027 nbytes
/= alignment
;
2028 nbytes
*= alignment
;
2030 elf32_rx_relax_delete_bytes (abfd
, sec
, erel
->r_offset
-nbytes
, nbytes
, next_alignment
,
2031 erel
->r_offset
== sec
->size
);
2037 if (irel
->r_addend
& RX_RELAXA_ELIGN
)
2040 insn
= contents
+ irel
->r_offset
;
2042 nrelocs
= irel
->r_addend
& RX_RELAXA_RNUM
;
2044 /* At this point, we have an insn that is a candidate for linker
2045 relaxation. There are NRELOCS relocs following that may be
2046 relaxed, although each reloc may be made of more than one
2047 reloc entry (such as gp-rel symbols). */
2049 /* Get the value of the symbol referred to by the reloc. Just
2050 in case this is the last reloc in the list, use the RL's
2051 addend to choose between this reloc (no addend) or the next
2052 (yes addend, which means at least one following reloc). */
2054 /* srel points to the "current" reloction for this insn -
2055 actually the last reloc for a given operand, which is the one
2056 we need to update. We check the relaxations in the same
2057 order that the relocations happen, so we'll just push it
2061 pc
= sec
->output_section
->vma
+ sec
->output_offset
2065 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
2066 pcrel = symval - pc + srel->r_addend; \
2069 #define SNIPNR(offset, nbytes) \
2070 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
2071 #define SNIP(offset, nbytes, newtype) \
2072 SNIPNR (offset, nbytes); \
2073 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2075 /* The order of these bit tests must match the order that the
2076 relocs appear in. Since we sorted those by offset, we can
2079 /* Note that the numbers in, say, DSP6 are the bit offsets of
2080 the code fields that describe the operand. Bits number 0 for
2081 the MSB of insn[0]. */
2088 if (irel
->r_addend
& RX_RELAXA_DSP6
)
2093 if (code
== 2 && symval
/scale
<= 255)
2095 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2098 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2099 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2101 SNIP (3, 1, newrel
);
2106 else if (code
== 1 && symval
== 0)
2109 SNIP (2, 1, R_RX_NONE
);
2113 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2114 else if (code
== 1 && symval
/scale
<= 31
2115 /* Decodable bits. */
2116 && (insn
[0] & 0xcc) == 0xcc
2118 && (insn
[0] & 0x30) != 3
2119 /* Register MSBs. */
2120 && (insn
[1] & 0x88) == 0x00)
2124 insn
[0] = 0x88 | (insn
[0] & 0x30);
2125 /* The register fields are in the right place already. */
2127 /* We can't relax this new opcode. */
2130 switch ((insn
[0] & 0x30) >> 4)
2133 newrel
= R_RX_RH_ABS5p5B
;
2136 newrel
= R_RX_RH_ABS5p5W
;
2139 newrel
= R_RX_RH_ABS5p5L
;
2143 move_reloc (irel
, srel
, -2);
2144 SNIP (2, 1, newrel
);
2147 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2148 else if (code
== 1 && symval
/scale
<= 31
2149 /* Decodable bits. */
2150 && (insn
[0] & 0xf8) == 0x58
2151 /* Register MSBs. */
2152 && (insn
[1] & 0x88) == 0x00)
2156 insn
[0] = 0xb0 | ((insn
[0] & 0x04) << 1);
2157 /* The register fields are in the right place already. */
2159 /* We can't relax this new opcode. */
2162 switch ((insn
[0] & 0x08) >> 3)
2165 newrel
= R_RX_RH_ABS5p5B
;
2168 newrel
= R_RX_RH_ABS5p5W
;
2172 move_reloc (irel
, srel
, -2);
2173 SNIP (2, 1, newrel
);
2177 /* A DSP4 operand always follows a DSP6 operand, even if there's
2178 no relocation for it. We have to read the code out of the
2179 opcode to calculate the offset of the operand. */
2180 if (irel
->r_addend
& RX_RELAXA_DSP4
)
2182 int code6
, offset
= 0;
2186 code6
= insn
[0] & 0x03;
2189 case 0: offset
= 2; break;
2190 case 1: offset
= 3; break;
2191 case 2: offset
= 4; break;
2192 case 3: offset
= 2; break;
2195 code
= (insn
[0] & 0x0c) >> 2;
2197 if (code
== 2 && symval
/ scale
<= 255)
2199 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2203 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2204 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2206 SNIP (offset
+1, 1, newrel
);
2211 else if (code
== 1 && symval
== 0)
2214 SNIP (offset
, 1, R_RX_NONE
);
2217 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2218 else if (code
== 1 && symval
/scale
<= 31
2219 /* Decodable bits. */
2220 && (insn
[0] & 0xc3) == 0xc3
2222 && (insn
[0] & 0x30) != 3
2223 /* Register MSBs. */
2224 && (insn
[1] & 0x88) == 0x00)
2228 insn
[0] = 0x80 | (insn
[0] & 0x30);
2229 /* The register fields are in the right place already. */
2231 /* We can't relax this new opcode. */
2234 switch ((insn
[0] & 0x30) >> 4)
2237 newrel
= R_RX_RH_ABS5p5B
;
2240 newrel
= R_RX_RH_ABS5p5W
;
2243 newrel
= R_RX_RH_ABS5p5L
;
2247 move_reloc (irel
, srel
, -2);
2248 SNIP (2, 1, newrel
);
2252 /* These always occur alone, but the offset depends on whether
2253 it's a MEMEX opcode (0x06) or not. */
2254 if (irel
->r_addend
& RX_RELAXA_DSP14
)
2259 if (insn
[0] == 0x06)
2266 if (code
== 2 && symval
/ scale
<= 255)
2268 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2272 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2273 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2275 SNIP (offset
, 1, newrel
);
2279 else if (code
== 1 && symval
== 0)
2282 SNIP (offset
, 1, R_RX_NONE
);
2293 /* These always occur alone. */
2294 if (irel
->r_addend
& RX_RELAXA_IMM6
)
2300 /* These relocations sign-extend, so we must do signed compares. */
2301 ssymval
= (long) symval
;
2303 code
= insn
[0] & 0x03;
2305 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2307 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2311 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2312 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2314 SNIP (2, 1, newrel
);
2319 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2321 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2325 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2326 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2328 SNIP (2, 1, newrel
);
2333 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2334 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2335 /* Decodable bits. */
2336 && (insn
[0] & 0xfc) == 0x74
2337 /* Decodable bits. */
2338 && ((insn
[1] & 0xf0) == 0x00))
2343 insn
[1] = 0x50 | (insn
[1] & 0x0f);
2345 /* We can't relax this new opcode. */
2348 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2349 newrel
= R_RX_ABS8U
;
2351 newrel
= R_RX_DIR8U
;
2353 SNIP (2, 1, newrel
);
2357 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2359 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2363 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2364 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2366 SNIP (2, 1, newrel
);
2371 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2372 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2373 /* Decodable bits and immediate type. */
2375 /* Decodable bits. */
2376 && (insn
[1] & 0xc0) == 0x00)
2378 static const int newop
[4] = { 1, 3, 4, 5 };
2380 insn
[0] = 0x60 | newop
[insn
[1] >> 4];
2381 /* The register number doesn't move. */
2383 /* We can't relax this new opcode. */
2386 move_reloc (irel
, srel
, -1);
2388 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2392 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2393 else if (code
== 1 && ssymval
<= 15 && ssymval
>= -15
2394 /* Decodable bits and immediate type. */
2396 /* Same register for source and destination. */
2397 && ((insn
[1] >> 4) == (insn
[1] & 0x0f)))
2401 /* Note that we can't turn "add $0,Rs" into a NOP
2402 because the flags need to be set right. */
2406 insn
[0] = 0x60; /* Subtract. */
2407 newrel
= R_RX_RH_UNEG4p8
;
2411 insn
[0] = 0x62; /* Add. */
2412 newrel
= R_RX_RH_UIMM4p8
;
2415 /* The register number is in the right place. */
2417 /* We can't relax this new opcode. */
2420 move_reloc (irel
, srel
, -1);
2422 SNIP (2, 1, newrel
);
2427 /* These are either matched with a DSP6 (2-byte base) or an id24
2429 if (irel
->r_addend
& RX_RELAXA_IMM12
)
2431 int dspcode
, offset
= 0;
2436 if ((insn
[0] & 0xfc) == 0xfc)
2437 dspcode
= 1; /* Just something with one byte operand. */
2439 dspcode
= insn
[0] & 3;
2442 case 0: offset
= 2; break;
2443 case 1: offset
= 3; break;
2444 case 2: offset
= 4; break;
2445 case 3: offset
= 2; break;
2448 /* These relocations sign-extend, so we must do signed compares. */
2449 ssymval
= (long) symval
;
2451 code
= (insn
[1] >> 2) & 3;
2452 if (code
== 0 && ssymval
<= 8388607 && ssymval
>= -8388608)
2454 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2458 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2459 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2461 SNIP (offset
, 1, newrel
);
2466 else if (code
== 3 && ssymval
<= 32767 && ssymval
>= -32768)
2468 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2472 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2473 if (newrel
!= ELF32_R_TYPE (srel
->r_info
))
2475 SNIP (offset
, 1, newrel
);
2480 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2481 else if (code
== 2 && ssymval
<= 255 && ssymval
>= 16
2482 /* Decodable bits. */
2484 /* Decodable bits. */
2485 && ((insn
[1] & 0x03) == 0x02))
2490 insn
[1] = 0x40 | (insn
[1] >> 4);
2492 /* We can't relax this new opcode. */
2495 if (STACK_REL_P (ELF32_R_TYPE (srel
->r_info
)))
2496 newrel
= R_RX_ABS8U
;
2498 newrel
= R_RX_DIR8U
;
2500 SNIP (2, 1, newrel
);
2504 else if (code
== 2 && ssymval
<= 127 && ssymval
>= -128)
2506 unsigned int newrel
= ELF32_R_TYPE(srel
->r_info
);
2510 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2511 if (newrel
!= ELF32_R_TYPE(srel
->r_info
))
2513 SNIP (offset
, 1, newrel
);
2518 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2519 else if (code
== 1 && ssymval
<= 15 && ssymval
>= 0
2520 /* Decodable bits. */
2522 /* Decodable bits. */
2523 && ((insn
[1] & 0x03) == 0x02))
2526 insn
[1] = insn
[1] >> 4;
2528 /* We can't relax this new opcode. */
2531 move_reloc (irel
, srel
, -1);
2533 SNIP (2, 1, R_RX_RH_UIMM4p8
);
2538 if (irel
->r_addend
& RX_RELAXA_BRA
)
2540 unsigned int newrel
= ELF32_R_TYPE (srel
->r_info
);
2542 int alignment_glue
= 0;
2546 /* Branches over alignment chunks are problematic, as
2547 deleting bytes here makes the branch *further* away. We
2548 can be agressive with branches within this alignment
2549 block, but not branches outside it. */
2550 if ((prev_alignment
== NULL
2551 || symval
< (bfd_vma
)(sec_start
+ prev_alignment
->r_offset
))
2552 && (next_alignment
== NULL
2553 || symval
> (bfd_vma
)(sec_start
+ next_alignment
->r_offset
)))
2554 alignment_glue
= section_alignment_glue
;
2556 if (ELF32_R_TYPE(srel
[1].r_info
) == R_RX_RH_RELAX
2557 && srel
[1].r_addend
& RX_RELAXA_BRA
2558 && srel
[1].r_offset
< irel
->r_offset
+ pcrel
)
2561 newrel
= next_smaller_reloc (ELF32_R_TYPE (srel
->r_info
));
2563 /* The values we compare PCREL with are not what you'd
2564 expect; they're off by a little to compensate for (1)
2565 where the reloc is relative to the insn, and (2) how much
2566 the insn is going to change when we relax it. */
2568 /* These we have to decode. */
2571 case 0x04: /* BRA pcdsp:24 */
2572 if (-32768 + alignment_glue
<= pcrel
2573 && pcrel
<= 32765 - alignment_glue
)
2576 SNIP (3, 1, newrel
);
2581 case 0x38: /* BRA pcdsp:16 */
2582 if (-128 + alignment_glue
<= pcrel
2583 && pcrel
<= 127 - alignment_glue
)
2586 SNIP (2, 1, newrel
);
2591 case 0x2e: /* BRA pcdsp:8 */
2592 /* Note that there's a risk here of shortening things so
2593 much that we no longer fit this reloc; it *should*
2594 only happen when you branch across a branch, and that
2595 branch also devolves into BRA.S. "Real" code should
2597 if (max_pcrel3
+ alignment_glue
<= pcrel
2598 && pcrel
<= 10 - alignment_glue
2602 SNIP (1, 1, newrel
);
2603 move_reloc (irel
, srel
, -1);
2608 case 0x05: /* BSR pcdsp:24 */
2609 if (-32768 + alignment_glue
<= pcrel
2610 && pcrel
<= 32765 - alignment_glue
)
2613 SNIP (1, 1, newrel
);
2618 case 0x3a: /* BEQ.W pcdsp:16 */
2619 case 0x3b: /* BNE.W pcdsp:16 */
2620 if (-128 + alignment_glue
<= pcrel
2621 && pcrel
<= 127 - alignment_glue
)
2623 insn
[0] = 0x20 | (insn
[0] & 1);
2624 SNIP (1, 1, newrel
);
2629 case 0x20: /* BEQ.B pcdsp:8 */
2630 case 0x21: /* BNE.B pcdsp:8 */
2631 if (max_pcrel3
+ alignment_glue
<= pcrel
2632 && pcrel
- alignment_glue
<= 10
2635 insn
[0] = 0x10 | ((insn
[0] & 1) << 3);
2636 SNIP (1, 1, newrel
);
2637 move_reloc (irel
, srel
, -1);
2642 case 0x16: /* synthetic BNE dsp24 */
2643 case 0x1e: /* synthetic BEQ dsp24 */
2644 if (-32767 + alignment_glue
<= pcrel
2645 && pcrel
<= 32766 - alignment_glue
2648 if (insn
[0] == 0x16)
2652 /* We snip out the bytes at the end else the reloc
2653 will get moved too, and too much. */
2654 SNIP (3, 2, newrel
);
2655 move_reloc (irel
, srel
, -1);
2661 /* Special case - synthetic conditional branches, pcrel24.
2662 Note that EQ and NE have been handled above. */
2663 if ((insn
[0] & 0xf0) == 0x20
2666 && srel
->r_offset
!= irel
->r_offset
+ 1
2667 && -32767 + alignment_glue
<= pcrel
2668 && pcrel
<= 32766 - alignment_glue
)
2672 SNIP (5, 1, newrel
);
2676 /* Special case - synthetic conditional branches, pcrel16 */
2677 if ((insn
[0] & 0xf0) == 0x20
2680 && srel
->r_offset
!= irel
->r_offset
+ 1
2681 && -127 + alignment_glue
<= pcrel
2682 && pcrel
<= 126 - alignment_glue
)
2684 int cond
= (insn
[0] & 0x0f) ^ 0x01;
2686 insn
[0] = 0x20 | cond
;
2687 /* By moving the reloc first, we avoid having
2688 delete_bytes move it also. */
2689 move_reloc (irel
, srel
, -2);
2690 SNIP (2, 3, newrel
);
2695 BFD_ASSERT (nrelocs
== 0);
2697 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2698 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2699 because it may have one or two relocations. */
2700 if ((insn
[0] & 0xfc) == 0xf8
2701 && (insn
[1] & 0x80) == 0x00
2702 && (insn
[0] & 0x03) != 0x03)
2704 int dcode
, icode
, reg
, ioff
, dscale
, ilen
;
2705 bfd_vma disp_val
= 0;
2707 Elf_Internal_Rela
* disp_rel
= 0;
2708 Elf_Internal_Rela
* imm_rel
= 0;
2713 dcode
= insn
[0] & 0x03;
2714 icode
= (insn
[1] >> 2) & 0x03;
2715 reg
= (insn
[1] >> 4) & 0x0f;
2717 ioff
= dcode
== 1 ? 3 : dcode
== 2 ? 4 : 2;
2719 /* Figure out what the dispacement is. */
2720 if (dcode
== 1 || dcode
== 2)
2722 /* There's a displacement. See if there's a reloc for it. */
2723 if (srel
[1].r_offset
== irel
->r_offset
+ 2)
2735 #if RX_OPCODE_BIG_ENDIAN
2736 disp_val
= insn
[2] * 256 + insn
[3];
2738 disp_val
= insn
[2] + insn
[3] * 256;
2741 switch (insn
[1] & 3)
2757 /* Figure out what the immediate is. */
2758 if (srel
[1].r_offset
== irel
->r_offset
+ ioff
)
2761 imm_val
= (long) symval
;
2766 unsigned char * ip
= insn
+ ioff
;
2771 /* For byte writes, we don't sign extend. Makes the math easier later. */
2775 imm_val
= (char) ip
[0];
2778 #if RX_OPCODE_BIG_ENDIAN
2779 imm_val
= ((char) ip
[0] << 8) | ip
[1];
2781 imm_val
= ((char) ip
[1] << 8) | ip
[0];
2785 #if RX_OPCODE_BIG_ENDIAN
2786 imm_val
= ((char) ip
[0] << 16) | (ip
[1] << 8) | ip
[2];
2788 imm_val
= ((char) ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2792 #if RX_OPCODE_BIG_ENDIAN
2793 imm_val
= (ip
[0] << 24) | (ip
[1] << 16) | (ip
[2] << 8) | ip
[3];
2795 imm_val
= (ip
[3] << 24) | (ip
[2] << 16) | (ip
[1] << 8) | ip
[0];
2829 /* The shortcut happens when the immediate is 0..255,
2830 register r0 to r7, and displacement (scaled) 0..31. */
2832 if (0 <= imm_val
&& imm_val
<= 255
2833 && 0 <= reg
&& reg
<= 7
2834 && disp_val
/ dscale
<= 31)
2836 insn
[0] = 0x3c | (insn
[1] & 0x03);
2837 insn
[1] = (((disp_val
/ dscale
) << 3) & 0x80) | (reg
<< 4) | ((disp_val
/dscale
) & 0x0f);
2842 int newrel
= R_RX_NONE
;
2847 newrel
= R_RX_RH_ABS5p8B
;
2850 newrel
= R_RX_RH_ABS5p8W
;
2853 newrel
= R_RX_RH_ABS5p8L
;
2856 disp_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (disp_rel
->r_info
), newrel
);
2857 move_reloc (irel
, disp_rel
, -1);
2861 imm_rel
->r_info
= ELF32_R_INFO (ELF32_R_SYM (imm_rel
->r_info
), R_RX_DIR8U
);
2862 move_reloc (disp_rel
? disp_rel
: irel
,
2864 irel
->r_offset
- imm_rel
->r_offset
+ 2);
2867 SNIPNR (3, ilen
- 3);
2870 /* We can't relax this new opcode. */
2876 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2877 whatever they're branching over won't shrink any more. If we're
2878 basically done here, do one more pass just for branches - but
2879 don't request a pass after that one! */
2880 if (!*again
&& !allow_pcrel3
)
2882 bfd_boolean ignored
;
2884 elf32_rx_relax_section (abfd
, sec
, link_info
, &ignored
, TRUE
);
2890 if (free_relocs
!= NULL
)
2893 if (free_contents
!= NULL
)
2894 free (free_contents
);
2896 if (shndx_buf
!= NULL
)
2898 shndx_hdr
->contents
= NULL
;
2902 if (free_intsyms
!= NULL
)
2903 free (free_intsyms
);
2909 elf32_rx_relax_section_wrapper (bfd
* abfd
,
2911 struct bfd_link_info
* link_info
,
2912 bfd_boolean
* again
)
2914 return elf32_rx_relax_section (abfd
, sec
, link_info
, again
, FALSE
);
2917 /* Function to set the ELF flag bits. */
2920 rx_elf_set_private_flags (bfd
* abfd
, flagword flags
)
2922 elf_elfheader (abfd
)->e_flags
= flags
;
2923 elf_flags_init (abfd
) = TRUE
;
2927 static bfd_boolean no_warn_mismatch
= FALSE
;
2928 static bfd_boolean ignore_lma
= TRUE
;
2930 void bfd_elf32_rx_set_target_flags (bfd_boolean
, bfd_boolean
);
2933 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch
,
2934 bfd_boolean user_ignore_lma
)
2936 no_warn_mismatch
= user_no_warn_mismatch
;
2937 ignore_lma
= user_ignore_lma
;
2940 /* Merge backend specific data from an object file to the output
2941 object file when linking. */
2944 rx_elf_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
2948 bfd_boolean error
= FALSE
;
2950 new_flags
= elf_elfheader (ibfd
)->e_flags
;
2951 old_flags
= elf_elfheader (obfd
)->e_flags
;
2953 if (!elf_flags_init (obfd
))
2955 /* First call, no flags set. */
2956 elf_flags_init (obfd
) = TRUE
;
2957 elf_elfheader (obfd
)->e_flags
= new_flags
;
2959 else if (old_flags
!= new_flags
)
2961 flagword known_flags
= E_FLAG_RX_64BIT_DOUBLES
| E_FLAG_RX_DSP
| E_FLAG_RX_PID
;
2963 if ((old_flags
^ new_flags
) & known_flags
)
2965 /* Only complain if flag bits we care about do not match.
2966 Other bits may be set, since older binaries did use some
2967 deprecated flags. */
2968 if (no_warn_mismatch
)
2970 elf_elfheader (obfd
)->e_flags
= (new_flags
| old_flags
) & known_flags
;
2974 (*_bfd_error_handler
)
2975 ("ELF header flags mismatch: old_flags = 0x%.8lx, new_flags = 0x%.8lx, filename = %s",
2976 old_flags
, new_flags
, bfd_get_filename (ibfd
));
2981 elf_elfheader (obfd
)->e_flags
= new_flags
& known_flags
;
2985 bfd_set_error (bfd_error_bad_value
);
2991 rx_elf_print_private_bfd_data (bfd
* abfd
, void * ptr
)
2993 FILE * file
= (FILE *) ptr
;
2996 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
2998 /* Print normal ELF private data. */
2999 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
3001 flags
= elf_elfheader (abfd
)->e_flags
;
3002 fprintf (file
, _("private flags = 0x%lx:"), (long) flags
);
3004 if (flags
& E_FLAG_RX_64BIT_DOUBLES
)
3005 fprintf (file
, _(" [64-bit doubles]"));
3006 if (flags
& E_FLAG_RX_DSP
)
3007 fprintf (file
, _(" [dsp]"));
3013 /* Return the MACH for an e_flags value. */
3016 elf32_rx_machine (bfd
* abfd
)
3018 if ((elf_elfheader (abfd
)->e_flags
& EF_RX_CPU_MASK
) == EF_RX_CPU_RX
)
3025 rx_elf_object_p (bfd
* abfd
)
3029 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
3030 int nphdrs
= elf_elfheader (abfd
)->e_phnum
;
3032 static int saw_be
= FALSE
;
3034 /* We never want to automatically choose the non-swapping big-endian
3035 target. The user can only get that explicitly, such as with -I
3037 if (abfd
->xvec
== &bfd_elf32_rx_be_ns_vec
3038 && abfd
->target_defaulted
)
3041 /* BFD->target_defaulted is not set to TRUE when a target is chosen
3042 as a fallback, so we check for "scanning" to know when to stop
3043 using the non-swapping target. */
3044 if (abfd
->xvec
== &bfd_elf32_rx_be_ns_vec
3047 if (abfd
->xvec
== &bfd_elf32_rx_be_vec
)
3050 bfd_default_set_arch_mach (abfd
, bfd_arch_rx
,
3051 elf32_rx_machine (abfd
));
3053 /* For each PHDR in the object, we must find some section that
3054 corresponds (based on matching file offsets) and use its VMA
3055 information to reconstruct the p_vaddr field we clobbered when we
3057 for (i
=0; i
<nphdrs
; i
++)
3059 for (u
=0; u
<elf_tdata(abfd
)->num_elf_sections
; u
++)
3061 Elf_Internal_Shdr
*sec
= elf_tdata(abfd
)->elf_sect_ptr
[u
];
3063 if (phdr
[i
].p_offset
<= (bfd_vma
) sec
->sh_offset
3064 && (bfd_vma
)sec
->sh_offset
<= phdr
[i
].p_offset
+ (phdr
[i
].p_filesz
- 1))
3066 /* Found one! The difference between the two addresses,
3067 plus the difference between the two file offsets, is
3068 enough information to reconstruct the lma. */
3070 /* Example where they aren't:
3071 PHDR[1] = lma fffc0100 offset 00002010 size 00000100
3072 SEC[6] = vma 00000050 offset 00002050 size 00000040
3074 The correct LMA for the section is fffc0140 + (2050-2010).
3077 phdr
[i
].p_vaddr
= sec
->sh_addr
+ (sec
->sh_offset
- phdr
[i
].p_offset
);
3082 /* We must update the bfd sections as well, so we don't stop
3084 bsec
= abfd
->sections
;
3087 if (phdr
[i
].p_vaddr
<= bsec
->vma
3088 && bsec
->vma
<= phdr
[i
].p_vaddr
+ (phdr
[i
].p_filesz
- 1))
3090 bsec
->lma
= phdr
[i
].p_paddr
+ (bsec
->vma
- phdr
[i
].p_vaddr
);
3102 rx_dump_symtab (bfd
* abfd
, void * internal_syms
, void * external_syms
)
3105 Elf_Internal_Sym
* isymbuf
;
3106 Elf_Internal_Sym
* isymend
;
3107 Elf_Internal_Sym
* isym
;
3108 Elf_Internal_Shdr
* symtab_hdr
;
3109 bfd_boolean free_internal
= FALSE
, free_external
= FALSE
;
3111 char * st_info_stb_str
;
3112 char * st_other_str
;
3113 char * st_shndx_str
;
3115 if (! internal_syms
)
3117 internal_syms
= bfd_malloc (1000);
3120 if (! external_syms
)
3122 external_syms
= bfd_malloc (1000);
3126 symtab_hdr
= &elf_tdata (abfd
)->symtab_hdr
;
3127 locsymcount
= symtab_hdr
->sh_size
/ get_elf_backend_data (abfd
)->s
->sizeof_sym
;
3129 isymbuf
= bfd_elf_get_elf_syms (abfd
, symtab_hdr
,
3130 symtab_hdr
->sh_info
, 0,
3131 internal_syms
, external_syms
, NULL
);
3133 isymbuf
= internal_syms
;
3134 isymend
= isymbuf
+ locsymcount
;
3136 for (isym
= isymbuf
; isym
< isymend
; isym
++)
3138 switch (ELF_ST_TYPE (isym
->st_info
))
3140 case STT_FUNC
: st_info_str
= "STT_FUNC";
3141 case STT_SECTION
: st_info_str
= "STT_SECTION";
3142 case STT_FILE
: st_info_str
= "STT_FILE";
3143 case STT_OBJECT
: st_info_str
= "STT_OBJECT";
3144 case STT_TLS
: st_info_str
= "STT_TLS";
3145 default: st_info_str
= "";
3147 switch (ELF_ST_BIND (isym
->st_info
))
3149 case STB_LOCAL
: st_info_stb_str
= "STB_LOCAL";
3150 case STB_GLOBAL
: st_info_stb_str
= "STB_GLOBAL";
3151 default: st_info_stb_str
= "";
3153 switch (ELF_ST_VISIBILITY (isym
->st_other
))
3155 case STV_DEFAULT
: st_other_str
= "STV_DEFAULT";
3156 case STV_INTERNAL
: st_other_str
= "STV_INTERNAL";
3157 case STV_PROTECTED
: st_other_str
= "STV_PROTECTED";
3158 default: st_other_str
= "";
3160 switch (isym
->st_shndx
)
3162 case SHN_ABS
: st_shndx_str
= "SHN_ABS";
3163 case SHN_COMMON
: st_shndx_str
= "SHN_COMMON";
3164 case SHN_UNDEF
: st_shndx_str
= "SHN_UNDEF";
3165 default: st_shndx_str
= "";
3168 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3169 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3171 (unsigned long) isym
->st_value
,
3172 (unsigned long) isym
->st_size
,
3174 bfd_elf_string_from_elf_section (abfd
, symtab_hdr
->sh_link
,
3176 isym
->st_info
, st_info_str
, st_info_stb_str
,
3177 isym
->st_other
, st_other_str
,
3178 isym
->st_shndx
, st_shndx_str
);
3181 free (internal_syms
);
3183 free (external_syms
);
3187 rx_get_reloc (long reloc
)
3189 if (0 <= reloc
&& reloc
< R_RX_max
)
3190 return rx_elf_howto_table
[reloc
].name
;
3196 /* We must take care to keep the on-disk copy of any code sections
3197 that are fully linked swapped if the target is big endian, to match
3198 the Renesas tools. */
3200 /* The rule is: big endian object that are final-link executables,
3201 have code sections stored with 32-bit words swapped relative to
3202 what you'd get by default. */
3205 rx_get_section_contents (bfd
* abfd
,
3209 bfd_size_type count
)
3211 int exec
= (abfd
->flags
& EXEC_P
) ? 1 : 0;
3212 int s_code
= (section
->flags
& SEC_CODE
) ? 1 : 0;
3216 fprintf (stderr
, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3217 (long) offset
, (long) count
, section
->name
,
3218 bfd_big_endian(abfd
) ? "be" : "le",
3219 exec
, s_code
, (long unsigned) section
->filepos
,
3220 (long unsigned) offset
);
3223 if (exec
&& s_code
&& bfd_big_endian (abfd
))
3225 char * cloc
= (char *) location
;
3226 bfd_size_type cnt
, end_cnt
;
3230 /* Fetch and swap unaligned bytes at the beginning. */
3235 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3240 bfd_putb32 (bfd_getl32 (buf
), buf
);
3242 cnt
= 4 - (offset
% 4);
3246 memcpy (location
, buf
+ (offset
% 4), cnt
);
3253 end_cnt
= count
% 4;
3255 /* Fetch and swap the middle bytes. */
3258 rv
= _bfd_generic_get_section_contents (abfd
, section
, cloc
, offset
,
3263 for (cnt
= count
; cnt
>= 4; cnt
-= 4, cloc
+= 4)
3264 bfd_putb32 (bfd_getl32 (cloc
), cloc
);
3267 /* Fetch and swap the end bytes. */
3272 /* Fetch the end bytes. */
3273 rv
= _bfd_generic_get_section_contents (abfd
, section
, buf
,
3274 offset
+ count
- end_cnt
, 4);
3278 bfd_putb32 (bfd_getl32 (buf
), buf
);
3279 memcpy (cloc
, buf
, end_cnt
);
3283 rv
= _bfd_generic_get_section_contents (abfd
, section
, location
, offset
, count
);
3290 rx2_set_section_contents (bfd
* abfd
,
3292 const void * location
,
3294 bfd_size_type count
)
3298 fprintf (stderr
, " set sec %s %08x loc %p offset %#x count %#x\n",
3299 section
->name
, (unsigned) section
->vma
, location
, (int) offset
, (int) count
);
3300 for (i
= 0; i
< count
; i
++)
3302 if (i
% 16 == 0 && i
> 0)
3303 fprintf (stderr
, "\n");
3305 if (i
% 16 && i
% 4 == 0)
3306 fprintf (stderr
, " ");
3309 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3311 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3313 fprintf (stderr
, "\n");
3315 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3317 #define _bfd_elf_set_section_contents rx2_set_section_contents
3321 rx_set_section_contents (bfd
* abfd
,
3323 const void * location
,
3325 bfd_size_type count
)
3327 bfd_boolean exec
= (abfd
->flags
& EXEC_P
) ? TRUE
: FALSE
;
3328 bfd_boolean s_code
= (section
->flags
& SEC_CODE
) ? TRUE
: FALSE
;
3330 char * swapped_data
= NULL
;
3332 bfd_vma caddr
= section
->vma
+ offset
;
3334 bfd_size_type scount
;
3339 fprintf (stderr
, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3340 (long) offset
, (long) count
, section
->name
,
3341 bfd_big_endian (abfd
) ? "be" : "le",
3344 for (i
= 0; i
< count
; i
++)
3346 int a
= section
->vma
+ offset
+ i
;
3348 if (a
% 16 == 0 && a
> 0)
3349 fprintf (stderr
, "\n");
3351 if (a
% 16 && a
% 4 == 0)
3352 fprintf (stderr
, " ");
3354 if (a
% 16 == 0 || i
== 0)
3355 fprintf (stderr
, " %08x:", (int) (section
->vma
+ offset
+ i
));
3357 fprintf (stderr
, " %02x", ((unsigned char *) location
)[i
]);
3360 fprintf (stderr
, "\n");
3363 if (! exec
|| ! s_code
|| ! bfd_big_endian (abfd
))
3364 return _bfd_elf_set_section_contents (abfd
, section
, location
, offset
, count
);
3366 while (count
> 0 && caddr
> 0 && caddr
% 4)
3370 case 0: faddr
= offset
+ 3; break;
3371 case 1: faddr
= offset
+ 1; break;
3372 case 2: faddr
= offset
- 1; break;
3373 case 3: faddr
= offset
- 3; break;
3376 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3386 scount
= (int)(count
/ 4) * 4;
3389 char * cloc
= (char *) location
;
3391 swapped_data
= (char *) bfd_alloc (abfd
, count
);
3393 for (i
= 0; i
< count
; i
+= 4)
3395 bfd_vma v
= bfd_getl32 (cloc
+ i
);
3396 bfd_putb32 (v
, swapped_data
+ i
);
3399 rv
= _bfd_elf_set_section_contents (abfd
, section
, swapped_data
, offset
, scount
);
3411 caddr
= section
->vma
+ offset
;
3416 case 0: faddr
= offset
+ 3; break;
3417 case 1: faddr
= offset
+ 1; break;
3418 case 2: faddr
= offset
- 1; break;
3419 case 3: faddr
= offset
- 3; break;
3421 rv
= _bfd_elf_set_section_contents (abfd
, section
, location
, faddr
, 1);
3436 rx_final_link (bfd
* abfd
, struct bfd_link_info
* info
)
3440 for (o
= abfd
->sections
; o
!= NULL
; o
= o
->next
)
3443 fprintf (stderr
, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3444 o
->name
, o
->flags
, o
->vma
, o
->lma
, o
->size
, o
->rawsize
);
3446 if (o
->flags
& SEC_CODE
3447 && bfd_big_endian (abfd
)
3451 fprintf (stderr
, "adjusting...\n");
3453 o
->size
+= 4 - (o
->size
% 4);
3457 return bfd_elf_final_link (abfd
, info
);
3461 elf32_rx_modify_program_headers (bfd
* abfd ATTRIBUTE_UNUSED
,
3462 struct bfd_link_info
* info ATTRIBUTE_UNUSED
)
3464 const struct elf_backend_data
* bed
;
3465 struct elf_obj_tdata
* tdata
;
3466 Elf_Internal_Phdr
* phdr
;
3470 bed
= get_elf_backend_data (abfd
);
3471 tdata
= elf_tdata (abfd
);
3473 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3476 for (i
= count
; i
-- != 0;)
3477 if (phdr
[i
].p_type
== PT_LOAD
)
3479 /* The Renesas tools expect p_paddr to be zero. However,
3480 there is no other way to store the writable data in ROM for
3481 startup initialization. So, we let the linker *think*
3482 we're using paddr and vaddr the "usual" way, but at the
3483 last minute we move the paddr into the vaddr (which is what
3484 the simulator uses) and zero out paddr. Note that this
3485 does not affect the section headers, just the program
3486 headers. We hope. */
3487 phdr
[i
].p_vaddr
= phdr
[i
].p_paddr
;
3488 #if 0 /* If we zero out p_paddr, then the LMA in the section table
3490 phdr
[i
].p_paddr
= 0;
3497 #define ELF_ARCH bfd_arch_rx
3498 #define ELF_MACHINE_CODE EM_RX
3499 #define ELF_MAXPAGESIZE 0x1000
3501 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3502 #define TARGET_BIG_NAME "elf32-rx-be"
3504 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3505 #define TARGET_LITTLE_NAME "elf32-rx-le"
3507 #define elf_info_to_howto_rel NULL
3508 #define elf_info_to_howto rx_info_to_howto_rela
3509 #define elf_backend_object_p rx_elf_object_p
3510 #define elf_backend_relocate_section rx_elf_relocate_section
3511 #define elf_symbol_leading_char ('_')
3512 #define elf_backend_can_gc_sections 1
3513 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3515 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3516 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3517 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3518 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3519 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3520 #define bfd_elf32_get_section_contents rx_get_section_contents
3521 #define bfd_elf32_set_section_contents rx_set_section_contents
3522 #define bfd_elf32_bfd_final_link rx_final_link
3523 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3525 #include "elf32-target.h"
3527 /* We define a second big-endian target that doesn't have the custom
3528 section get/set hooks, for times when we want to preserve the
3529 pre-swapped .text sections (like objcopy). */
3531 #undef TARGET_BIG_SYM
3532 #define TARGET_BIG_SYM bfd_elf32_rx_be_ns_vec
3533 #undef TARGET_BIG_NAME
3534 #define TARGET_BIG_NAME "elf32-rx-be-ns"
3535 #undef TARGET_LITTLE_SYM
3537 #undef bfd_elf32_get_section_contents
3538 #undef bfd_elf32_set_section_contents
3541 #define elf32_bed elf32_rx_be_ns_bed
3543 #include "elf32-target.h"