opcodes/
[binutils-gdb.git] / bfd / coff-sh.c
blobf5e07a57b00c3093ea9aebd580b6cfe9f5eeb32f
1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
6 Written by Steve Chamberlain, <sac@cygnus.com>.
7 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
9 This file is part of BFD, the Binary File Descriptor library.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 MA 02110-1301, USA. */
26 #include "sysdep.h"
27 #include "bfd.h"
28 #include "libiberty.h"
29 #include "libbfd.h"
30 #include "bfdlink.h"
31 #include "coff/sh.h"
32 #include "coff/internal.h"
34 #undef bfd_pe_print_pdata
36 #ifdef COFF_WITH_PE
37 #include "coff/pe.h"
39 #ifndef COFF_IMAGE_WITH_PE
40 static bfd_boolean sh_align_load_span
41 (bfd *, asection *, bfd_byte *,
42 bfd_boolean (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
43 void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *);
45 #define _bfd_sh_align_load_span sh_align_load_span
46 #endif
48 #define bfd_pe_print_pdata _bfd_pe_print_ce_compressed_pdata
50 #else
52 #define bfd_pe_print_pdata NULL
54 #endif /* COFF_WITH_PE. */
56 #include "libcoff.h"
58 /* Internal functions. */
60 #ifdef COFF_WITH_PE
61 /* Can't build import tables with 2**4 alignment. */
62 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
63 #else
64 /* Default section alignment to 2**4. */
65 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
66 #endif
68 #ifdef COFF_IMAGE_WITH_PE
69 /* Align PE executables. */
70 #define COFF_PAGE_SIZE 0x1000
71 #endif
73 /* Generate long file names. */
74 #define COFF_LONG_FILENAMES
76 #ifdef COFF_WITH_PE
77 /* Return TRUE if this relocation should
78 appear in the output .reloc section. */
80 static bfd_boolean
81 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
82 reloc_howto_type * howto)
84 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
86 #endif
88 static bfd_reloc_status_type
89 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
90 static bfd_boolean
91 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
92 bfd_byte *, struct internal_reloc *,
93 struct internal_syment *, asection **);
94 static bfd_boolean
95 sh_align_loads (bfd *, asection *, struct internal_reloc *,
96 bfd_byte *, bfd_boolean *);
98 /* The supported relocations. There are a lot of relocations defined
99 in coff/internal.h which we do not expect to ever see. */
100 static reloc_howto_type sh_coff_howtos[] =
102 EMPTY_HOWTO (0),
103 EMPTY_HOWTO (1),
104 #ifdef COFF_WITH_PE
105 /* Windows CE */
106 HOWTO (R_SH_IMM32CE, /* type */
107 0, /* rightshift */
108 2, /* size (0 = byte, 1 = short, 2 = long) */
109 32, /* bitsize */
110 FALSE, /* pc_relative */
111 0, /* bitpos */
112 complain_overflow_bitfield, /* complain_on_overflow */
113 sh_reloc, /* special_function */
114 "r_imm32ce", /* name */
115 TRUE, /* partial_inplace */
116 0xffffffff, /* src_mask */
117 0xffffffff, /* dst_mask */
118 FALSE), /* pcrel_offset */
119 #else
120 EMPTY_HOWTO (2),
121 #endif
122 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
123 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
124 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
125 EMPTY_HOWTO (6), /* R_SH_IMM24 */
126 EMPTY_HOWTO (7), /* R_SH_LOW16 */
127 EMPTY_HOWTO (8),
128 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
130 HOWTO (R_SH_PCDISP8BY2, /* type */
131 1, /* rightshift */
132 1, /* size (0 = byte, 1 = short, 2 = long) */
133 8, /* bitsize */
134 TRUE, /* pc_relative */
135 0, /* bitpos */
136 complain_overflow_signed, /* complain_on_overflow */
137 sh_reloc, /* special_function */
138 "r_pcdisp8by2", /* name */
139 TRUE, /* partial_inplace */
140 0xff, /* src_mask */
141 0xff, /* dst_mask */
142 TRUE), /* pcrel_offset */
144 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
146 HOWTO (R_SH_PCDISP, /* type */
147 1, /* rightshift */
148 1, /* size (0 = byte, 1 = short, 2 = long) */
149 12, /* bitsize */
150 TRUE, /* pc_relative */
151 0, /* bitpos */
152 complain_overflow_signed, /* complain_on_overflow */
153 sh_reloc, /* special_function */
154 "r_pcdisp12by2", /* name */
155 TRUE, /* partial_inplace */
156 0xfff, /* src_mask */
157 0xfff, /* dst_mask */
158 TRUE), /* pcrel_offset */
160 EMPTY_HOWTO (13),
162 HOWTO (R_SH_IMM32, /* type */
163 0, /* rightshift */
164 2, /* size (0 = byte, 1 = short, 2 = long) */
165 32, /* bitsize */
166 FALSE, /* pc_relative */
167 0, /* bitpos */
168 complain_overflow_bitfield, /* complain_on_overflow */
169 sh_reloc, /* special_function */
170 "r_imm32", /* name */
171 TRUE, /* partial_inplace */
172 0xffffffff, /* src_mask */
173 0xffffffff, /* dst_mask */
174 FALSE), /* pcrel_offset */
176 EMPTY_HOWTO (15),
177 #ifdef COFF_WITH_PE
178 HOWTO (R_SH_IMAGEBASE, /* type */
179 0, /* rightshift */
180 2, /* size (0 = byte, 1 = short, 2 = long) */
181 32, /* bitsize */
182 FALSE, /* pc_relative */
183 0, /* bitpos */
184 complain_overflow_bitfield, /* complain_on_overflow */
185 sh_reloc, /* special_function */
186 "rva32", /* name */
187 TRUE, /* partial_inplace */
188 0xffffffff, /* src_mask */
189 0xffffffff, /* dst_mask */
190 FALSE), /* pcrel_offset */
191 #else
192 EMPTY_HOWTO (16), /* R_SH_IMM8 */
193 #endif
194 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
195 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
196 EMPTY_HOWTO (19), /* R_SH_IMM4 */
197 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
198 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
200 HOWTO (R_SH_PCRELIMM8BY2, /* type */
201 1, /* rightshift */
202 1, /* size (0 = byte, 1 = short, 2 = long) */
203 8, /* bitsize */
204 TRUE, /* pc_relative */
205 0, /* bitpos */
206 complain_overflow_unsigned, /* complain_on_overflow */
207 sh_reloc, /* special_function */
208 "r_pcrelimm8by2", /* name */
209 TRUE, /* partial_inplace */
210 0xff, /* src_mask */
211 0xff, /* dst_mask */
212 TRUE), /* pcrel_offset */
214 HOWTO (R_SH_PCRELIMM8BY4, /* type */
215 2, /* rightshift */
216 1, /* size (0 = byte, 1 = short, 2 = long) */
217 8, /* bitsize */
218 TRUE, /* pc_relative */
219 0, /* bitpos */
220 complain_overflow_unsigned, /* complain_on_overflow */
221 sh_reloc, /* special_function */
222 "r_pcrelimm8by4", /* name */
223 TRUE, /* partial_inplace */
224 0xff, /* src_mask */
225 0xff, /* dst_mask */
226 TRUE), /* pcrel_offset */
228 HOWTO (R_SH_IMM16, /* type */
229 0, /* rightshift */
230 1, /* size (0 = byte, 1 = short, 2 = long) */
231 16, /* bitsize */
232 FALSE, /* pc_relative */
233 0, /* bitpos */
234 complain_overflow_bitfield, /* complain_on_overflow */
235 sh_reloc, /* special_function */
236 "r_imm16", /* name */
237 TRUE, /* partial_inplace */
238 0xffff, /* src_mask */
239 0xffff, /* dst_mask */
240 FALSE), /* pcrel_offset */
242 HOWTO (R_SH_SWITCH16, /* type */
243 0, /* rightshift */
244 1, /* size (0 = byte, 1 = short, 2 = long) */
245 16, /* bitsize */
246 FALSE, /* pc_relative */
247 0, /* bitpos */
248 complain_overflow_bitfield, /* complain_on_overflow */
249 sh_reloc, /* special_function */
250 "r_switch16", /* name */
251 TRUE, /* partial_inplace */
252 0xffff, /* src_mask */
253 0xffff, /* dst_mask */
254 FALSE), /* pcrel_offset */
256 HOWTO (R_SH_SWITCH32, /* type */
257 0, /* rightshift */
258 2, /* size (0 = byte, 1 = short, 2 = long) */
259 32, /* bitsize */
260 FALSE, /* pc_relative */
261 0, /* bitpos */
262 complain_overflow_bitfield, /* complain_on_overflow */
263 sh_reloc, /* special_function */
264 "r_switch32", /* name */
265 TRUE, /* partial_inplace */
266 0xffffffff, /* src_mask */
267 0xffffffff, /* dst_mask */
268 FALSE), /* pcrel_offset */
270 HOWTO (R_SH_USES, /* type */
271 0, /* rightshift */
272 1, /* size (0 = byte, 1 = short, 2 = long) */
273 16, /* bitsize */
274 FALSE, /* pc_relative */
275 0, /* bitpos */
276 complain_overflow_bitfield, /* complain_on_overflow */
277 sh_reloc, /* special_function */
278 "r_uses", /* name */
279 TRUE, /* partial_inplace */
280 0xffff, /* src_mask */
281 0xffff, /* dst_mask */
282 FALSE), /* pcrel_offset */
284 HOWTO (R_SH_COUNT, /* type */
285 0, /* rightshift */
286 2, /* size (0 = byte, 1 = short, 2 = long) */
287 32, /* bitsize */
288 FALSE, /* pc_relative */
289 0, /* bitpos */
290 complain_overflow_bitfield, /* complain_on_overflow */
291 sh_reloc, /* special_function */
292 "r_count", /* name */
293 TRUE, /* partial_inplace */
294 0xffffffff, /* src_mask */
295 0xffffffff, /* dst_mask */
296 FALSE), /* pcrel_offset */
298 HOWTO (R_SH_ALIGN, /* type */
299 0, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 32, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_bitfield, /* complain_on_overflow */
305 sh_reloc, /* special_function */
306 "r_align", /* name */
307 TRUE, /* partial_inplace */
308 0xffffffff, /* src_mask */
309 0xffffffff, /* dst_mask */
310 FALSE), /* pcrel_offset */
312 HOWTO (R_SH_CODE, /* type */
313 0, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 32, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_bitfield, /* complain_on_overflow */
319 sh_reloc, /* special_function */
320 "r_code", /* name */
321 TRUE, /* partial_inplace */
322 0xffffffff, /* src_mask */
323 0xffffffff, /* dst_mask */
324 FALSE), /* pcrel_offset */
326 HOWTO (R_SH_DATA, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield, /* complain_on_overflow */
333 sh_reloc, /* special_function */
334 "r_data", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
340 HOWTO (R_SH_LABEL, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield, /* complain_on_overflow */
347 sh_reloc, /* special_function */
348 "r_label", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
354 HOWTO (R_SH_SWITCH8, /* type */
355 0, /* rightshift */
356 0, /* size (0 = byte, 1 = short, 2 = long) */
357 8, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield, /* complain_on_overflow */
361 sh_reloc, /* special_function */
362 "r_switch8", /* name */
363 TRUE, /* partial_inplace */
364 0xff, /* src_mask */
365 0xff, /* dst_mask */
366 FALSE) /* pcrel_offset */
369 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
371 /* Check for a bad magic number. */
372 #define BADMAG(x) SHBADMAG(x)
374 /* Customize coffcode.h (this is not currently used). */
375 #define SH 1
377 /* FIXME: This should not be set here. */
378 #define __A_MAGIC_SET__
380 #ifndef COFF_WITH_PE
381 /* Swap the r_offset field in and out. */
382 #define SWAP_IN_RELOC_OFFSET H_GET_32
383 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
385 /* Swap out extra information in the reloc structure. */
386 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
387 do \
389 dst->r_stuff[0] = 'S'; \
390 dst->r_stuff[1] = 'C'; \
392 while (0)
393 #endif
395 /* Get the value of a symbol, when performing a relocation. */
397 static long
398 get_symbol_value (asymbol *symbol)
400 bfd_vma relocation;
402 if (bfd_is_com_section (symbol->section))
403 relocation = 0;
404 else
405 relocation = (symbol->value +
406 symbol->section->output_section->vma +
407 symbol->section->output_offset);
409 return relocation;
412 #ifdef COFF_WITH_PE
413 /* Convert an rtype to howto for the COFF backend linker.
414 Copied from coff-i386. */
415 #define coff_rtype_to_howto coff_sh_rtype_to_howto
418 static reloc_howto_type *
419 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
420 asection * sec,
421 struct internal_reloc * rel,
422 struct coff_link_hash_entry * h,
423 struct internal_syment * sym,
424 bfd_vma * addendp)
426 reloc_howto_type * howto;
428 howto = sh_coff_howtos + rel->r_type;
430 *addendp = 0;
432 if (howto->pc_relative)
433 *addendp += sec->vma;
435 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
437 /* This is a common symbol. The section contents include the
438 size (sym->n_value) as an addend. The relocate_section
439 function will be adding in the final value of the symbol. We
440 need to subtract out the current size in order to get the
441 correct result. */
442 BFD_ASSERT (h != NULL);
445 if (howto->pc_relative)
447 *addendp -= 4;
449 /* If the symbol is defined, then the generic code is going to
450 add back the symbol value in order to cancel out an
451 adjustment it made to the addend. However, we set the addend
452 to 0 at the start of this function. We need to adjust here,
453 to avoid the adjustment the generic code will make. FIXME:
454 This is getting a bit hackish. */
455 if (sym != NULL && sym->n_scnum != 0)
456 *addendp -= sym->n_value;
459 if (rel->r_type == R_SH_IMAGEBASE)
460 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
462 return howto;
465 #endif /* COFF_WITH_PE */
467 /* This structure is used to map BFD reloc codes to SH PE relocs. */
468 struct shcoff_reloc_map
470 bfd_reloc_code_real_type bfd_reloc_val;
471 unsigned char shcoff_reloc_val;
474 #ifdef COFF_WITH_PE
475 /* An array mapping BFD reloc codes to SH PE relocs. */
476 static const struct shcoff_reloc_map sh_reloc_map[] =
478 { BFD_RELOC_32, R_SH_IMM32CE },
479 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
480 { BFD_RELOC_CTOR, R_SH_IMM32CE },
482 #else
483 /* An array mapping BFD reloc codes to SH PE relocs. */
484 static const struct shcoff_reloc_map sh_reloc_map[] =
486 { BFD_RELOC_32, R_SH_IMM32 },
487 { BFD_RELOC_CTOR, R_SH_IMM32 },
489 #endif
491 /* Given a BFD reloc code, return the howto structure for the
492 corresponding SH PE reloc. */
493 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
494 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
496 static reloc_howto_type *
497 sh_coff_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
498 bfd_reloc_code_real_type code)
500 unsigned int i;
502 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
503 if (sh_reloc_map[i].bfd_reloc_val == code)
504 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
506 (*_bfd_error_handler) (_("SH Error: unknown reloc type %d"), code);
507 return NULL;
510 static reloc_howto_type *
511 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
512 const char *r_name)
514 unsigned int i;
516 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
517 if (sh_coff_howtos[i].name != NULL
518 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
519 return &sh_coff_howtos[i];
521 return NULL;
524 /* This macro is used in coffcode.h to get the howto corresponding to
525 an internal reloc. */
527 #define RTYPE2HOWTO(relent, internal) \
528 ((relent)->howto = \
529 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
530 ? &sh_coff_howtos[(internal)->r_type] \
531 : (reloc_howto_type *) NULL))
533 /* This is the same as the macro in coffcode.h, except that it copies
534 r_offset into reloc_entry->addend for some relocs. */
535 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
537 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
538 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
539 coffsym = (obj_symbols (abfd) \
540 + (cache_ptr->sym_ptr_ptr - symbols)); \
541 else if (ptr) \
542 coffsym = coff_symbol_from (abfd, ptr); \
543 if (coffsym != (coff_symbol_type *) NULL \
544 && coffsym->native->u.syment.n_scnum == 0) \
545 cache_ptr->addend = 0; \
546 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
547 && ptr->section != (asection *) NULL) \
548 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
549 else \
550 cache_ptr->addend = 0; \
551 if ((reloc).r_type == R_SH_SWITCH8 \
552 || (reloc).r_type == R_SH_SWITCH16 \
553 || (reloc).r_type == R_SH_SWITCH32 \
554 || (reloc).r_type == R_SH_USES \
555 || (reloc).r_type == R_SH_COUNT \
556 || (reloc).r_type == R_SH_ALIGN) \
557 cache_ptr->addend = (reloc).r_offset; \
560 /* This is the howto function for the SH relocations. */
562 static bfd_reloc_status_type
563 sh_reloc (bfd * abfd,
564 arelent * reloc_entry,
565 asymbol * symbol_in,
566 void * data,
567 asection * input_section,
568 bfd * output_bfd,
569 char ** error_message ATTRIBUTE_UNUSED)
571 unsigned long insn;
572 bfd_vma sym_value;
573 unsigned short r_type;
574 bfd_vma addr = reloc_entry->address;
575 bfd_byte *hit_data = addr + (bfd_byte *) data;
577 r_type = reloc_entry->howto->type;
579 if (output_bfd != NULL)
581 /* Partial linking--do nothing. */
582 reloc_entry->address += input_section->output_offset;
583 return bfd_reloc_ok;
586 /* Almost all relocs have to do with relaxing. If any work must be
587 done for them, it has been done in sh_relax_section. */
588 if (r_type != R_SH_IMM32
589 #ifdef COFF_WITH_PE
590 && r_type != R_SH_IMM32CE
591 && r_type != R_SH_IMAGEBASE
592 #endif
593 && (r_type != R_SH_PCDISP
594 || (symbol_in->flags & BSF_LOCAL) != 0))
595 return bfd_reloc_ok;
597 if (symbol_in != NULL
598 && bfd_is_und_section (symbol_in->section))
599 return bfd_reloc_undefined;
601 sym_value = get_symbol_value (symbol_in);
603 switch (r_type)
605 case R_SH_IMM32:
606 #ifdef COFF_WITH_PE
607 case R_SH_IMM32CE:
608 #endif
609 insn = bfd_get_32 (abfd, hit_data);
610 insn += sym_value + reloc_entry->addend;
611 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
612 break;
613 #ifdef COFF_WITH_PE
614 case R_SH_IMAGEBASE:
615 insn = bfd_get_32 (abfd, hit_data);
616 insn += sym_value + reloc_entry->addend;
617 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
618 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
619 break;
620 #endif
621 case R_SH_PCDISP:
622 insn = bfd_get_16 (abfd, hit_data);
623 sym_value += reloc_entry->addend;
624 sym_value -= (input_section->output_section->vma
625 + input_section->output_offset
626 + addr
627 + 4);
628 sym_value += (insn & 0xfff) << 1;
629 if (insn & 0x800)
630 sym_value -= 0x1000;
631 insn = (insn & 0xf000) | (sym_value & 0xfff);
632 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
633 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
634 return bfd_reloc_overflow;
635 break;
636 default:
637 abort ();
638 break;
641 return bfd_reloc_ok;
644 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
646 /* We can do relaxing. */
647 #define coff_bfd_relax_section sh_relax_section
649 /* We use the special COFF backend linker. */
650 #define coff_relocate_section sh_relocate_section
652 /* When relaxing, we need to use special code to get the relocated
653 section contents. */
654 #define coff_bfd_get_relocated_section_contents \
655 sh_coff_get_relocated_section_contents
657 #include "coffcode.h"
659 static bfd_boolean
660 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
662 /* This function handles relaxing on the SH.
664 Function calls on the SH look like this:
666 movl L1,r0
668 jsr @r0
671 .long function
673 The compiler and assembler will cooperate to create R_SH_USES
674 relocs on the jsr instructions. The r_offset field of the
675 R_SH_USES reloc is the PC relative offset to the instruction which
676 loads the register (the r_offset field is computed as though it
677 were a jump instruction, so the offset value is actually from four
678 bytes past the instruction). The linker can use this reloc to
679 determine just which function is being called, and thus decide
680 whether it is possible to replace the jsr with a bsr.
682 If multiple function calls are all based on a single register load
683 (i.e., the same function is called multiple times), the compiler
684 guarantees that each function call will have an R_SH_USES reloc.
685 Therefore, if the linker is able to convert each R_SH_USES reloc
686 which refers to that address, it can safely eliminate the register
687 load.
689 When the assembler creates an R_SH_USES reloc, it examines it to
690 determine which address is being loaded (L1 in the above example).
691 It then counts the number of references to that address, and
692 creates an R_SH_COUNT reloc at that address. The r_offset field of
693 the R_SH_COUNT reloc will be the number of references. If the
694 linker is able to eliminate a register load, it can use the
695 R_SH_COUNT reloc to see whether it can also eliminate the function
696 address.
698 SH relaxing also handles another, unrelated, matter. On the SH, if
699 a load or store instruction is not aligned on a four byte boundary,
700 the memory cycle interferes with the 32 bit instruction fetch,
701 causing a one cycle bubble in the pipeline. Therefore, we try to
702 align load and store instructions on four byte boundaries if we
703 can, by swapping them with one of the adjacent instructions. */
705 static bfd_boolean
706 sh_relax_section (bfd *abfd,
707 asection *sec,
708 struct bfd_link_info *link_info,
709 bfd_boolean *again)
711 struct internal_reloc *internal_relocs;
712 bfd_boolean have_code;
713 struct internal_reloc *irel, *irelend;
714 bfd_byte *contents = NULL;
716 *again = FALSE;
718 if (link_info->relocatable
719 || (sec->flags & SEC_RELOC) == 0
720 || sec->reloc_count == 0)
721 return TRUE;
723 if (coff_section_data (abfd, sec) == NULL)
725 bfd_size_type amt = sizeof (struct coff_section_tdata);
726 sec->used_by_bfd = bfd_zalloc (abfd, amt);
727 if (sec->used_by_bfd == NULL)
728 return FALSE;
731 internal_relocs = (_bfd_coff_read_internal_relocs
732 (abfd, sec, link_info->keep_memory,
733 (bfd_byte *) NULL, FALSE,
734 (struct internal_reloc *) NULL));
735 if (internal_relocs == NULL)
736 goto error_return;
738 have_code = FALSE;
740 irelend = internal_relocs + sec->reloc_count;
741 for (irel = internal_relocs; irel < irelend; irel++)
743 bfd_vma laddr, paddr, symval;
744 unsigned short insn;
745 struct internal_reloc *irelfn, *irelscan, *irelcount;
746 struct internal_syment sym;
747 bfd_signed_vma foff;
749 if (irel->r_type == R_SH_CODE)
750 have_code = TRUE;
752 if (irel->r_type != R_SH_USES)
753 continue;
755 /* Get the section contents. */
756 if (contents == NULL)
758 if (coff_section_data (abfd, sec)->contents != NULL)
759 contents = coff_section_data (abfd, sec)->contents;
760 else
762 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
763 goto error_return;
767 /* The r_offset field of the R_SH_USES reloc will point us to
768 the register load. The 4 is because the r_offset field is
769 computed as though it were a jump offset, which are based
770 from 4 bytes after the jump instruction. */
771 laddr = irel->r_vaddr - sec->vma + 4;
772 /* Careful to sign extend the 32-bit offset. */
773 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
774 if (laddr >= sec->size)
776 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
777 abfd, (unsigned long) irel->r_vaddr);
778 continue;
780 insn = bfd_get_16 (abfd, contents + laddr);
782 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
783 if ((insn & 0xf000) != 0xd000)
785 ((*_bfd_error_handler)
786 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
787 abfd, (unsigned long) irel->r_vaddr, insn));
788 continue;
791 /* Get the address from which the register is being loaded. The
792 displacement in the mov.l instruction is quadrupled. It is a
793 displacement from four bytes after the movl instruction, but,
794 before adding in the PC address, two least significant bits
795 of the PC are cleared. We assume that the section is aligned
796 on a four byte boundary. */
797 paddr = insn & 0xff;
798 paddr *= 4;
799 paddr += (laddr + 4) &~ (bfd_vma) 3;
800 if (paddr >= sec->size)
802 ((*_bfd_error_handler)
803 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
804 abfd, (unsigned long) irel->r_vaddr));
805 continue;
808 /* Get the reloc for the address from which the register is
809 being loaded. This reloc will tell us which function is
810 actually being called. */
811 paddr += sec->vma;
812 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
813 if (irelfn->r_vaddr == paddr
814 #ifdef COFF_WITH_PE
815 && (irelfn->r_type == R_SH_IMM32
816 || irelfn->r_type == R_SH_IMM32CE
817 || irelfn->r_type == R_SH_IMAGEBASE)
819 #else
820 && irelfn->r_type == R_SH_IMM32
821 #endif
823 break;
824 if (irelfn >= irelend)
826 ((*_bfd_error_handler)
827 ("%B: 0x%lx: warning: could not find expected reloc",
828 abfd, (unsigned long) paddr));
829 continue;
832 /* Get the value of the symbol referred to by the reloc. */
833 if (! _bfd_coff_get_external_symbols (abfd))
834 goto error_return;
835 bfd_coff_swap_sym_in (abfd,
836 ((bfd_byte *) obj_coff_external_syms (abfd)
837 + (irelfn->r_symndx
838 * bfd_coff_symesz (abfd))),
839 &sym);
840 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
842 ((*_bfd_error_handler)
843 ("%B: 0x%lx: warning: symbol in unexpected section",
844 abfd, (unsigned long) paddr));
845 continue;
848 if (sym.n_sclass != C_EXT)
850 symval = (sym.n_value
851 - sec->vma
852 + sec->output_section->vma
853 + sec->output_offset);
855 else
857 struct coff_link_hash_entry *h;
859 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
860 BFD_ASSERT (h != NULL);
861 if (h->root.type != bfd_link_hash_defined
862 && h->root.type != bfd_link_hash_defweak)
864 /* This appears to be a reference to an undefined
865 symbol. Just ignore it--it will be caught by the
866 regular reloc processing. */
867 continue;
870 symval = (h->root.u.def.value
871 + h->root.u.def.section->output_section->vma
872 + h->root.u.def.section->output_offset);
875 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
877 /* See if this function call can be shortened. */
878 foff = (symval
879 - (irel->r_vaddr
880 - sec->vma
881 + sec->output_section->vma
882 + sec->output_offset
883 + 4));
884 if (foff < -0x1000 || foff >= 0x1000)
886 /* After all that work, we can't shorten this function call. */
887 continue;
890 /* Shorten the function call. */
892 /* For simplicity of coding, we are going to modify the section
893 contents, the section relocs, and the BFD symbol table. We
894 must tell the rest of the code not to free up this
895 information. It would be possible to instead create a table
896 of changes which have to be made, as is done in coff-mips.c;
897 that would be more work, but would require less memory when
898 the linker is run. */
900 coff_section_data (abfd, sec)->relocs = internal_relocs;
901 coff_section_data (abfd, sec)->keep_relocs = TRUE;
903 coff_section_data (abfd, sec)->contents = contents;
904 coff_section_data (abfd, sec)->keep_contents = TRUE;
906 obj_coff_keep_syms (abfd) = TRUE;
908 /* Replace the jsr with a bsr. */
910 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
911 replace the jsr with a bsr. */
912 irel->r_type = R_SH_PCDISP;
913 irel->r_symndx = irelfn->r_symndx;
914 if (sym.n_sclass != C_EXT)
916 /* If this needs to be changed because of future relaxing,
917 it will be handled here like other internal PCDISP
918 relocs. */
919 bfd_put_16 (abfd,
920 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
921 contents + irel->r_vaddr - sec->vma);
923 else
925 /* We can't fully resolve this yet, because the external
926 symbol value may be changed by future relaxing. We let
927 the final link phase handle it. */
928 bfd_put_16 (abfd, (bfd_vma) 0xb000,
929 contents + irel->r_vaddr - sec->vma);
932 /* See if there is another R_SH_USES reloc referring to the same
933 register load. */
934 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
935 if (irelscan->r_type == R_SH_USES
936 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
937 break;
938 if (irelscan < irelend)
940 /* Some other function call depends upon this register load,
941 and we have not yet converted that function call.
942 Indeed, we may never be able to convert it. There is
943 nothing else we can do at this point. */
944 continue;
947 /* Look for a R_SH_COUNT reloc on the location where the
948 function address is stored. Do this before deleting any
949 bytes, to avoid confusion about the address. */
950 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
951 if (irelcount->r_vaddr == paddr
952 && irelcount->r_type == R_SH_COUNT)
953 break;
955 /* Delete the register load. */
956 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
957 goto error_return;
959 /* That will change things, so, just in case it permits some
960 other function call to come within range, we should relax
961 again. Note that this is not required, and it may be slow. */
962 *again = TRUE;
964 /* Now check whether we got a COUNT reloc. */
965 if (irelcount >= irelend)
967 ((*_bfd_error_handler)
968 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
969 abfd, (unsigned long) paddr));
970 continue;
973 /* The number of uses is stored in the r_offset field. We've
974 just deleted one. */
975 if (irelcount->r_offset == 0)
977 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
978 abfd, (unsigned long) paddr));
979 continue;
982 --irelcount->r_offset;
984 /* If there are no more uses, we can delete the address. Reload
985 the address from irelfn, in case it was changed by the
986 previous call to sh_relax_delete_bytes. */
987 if (irelcount->r_offset == 0)
989 if (! sh_relax_delete_bytes (abfd, sec,
990 irelfn->r_vaddr - sec->vma, 4))
991 goto error_return;
994 /* We've done all we can with that function call. */
997 /* Look for load and store instructions that we can align on four
998 byte boundaries. */
999 if (have_code)
1001 bfd_boolean swapped;
1003 /* Get the section contents. */
1004 if (contents == NULL)
1006 if (coff_section_data (abfd, sec)->contents != NULL)
1007 contents = coff_section_data (abfd, sec)->contents;
1008 else
1010 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1011 goto error_return;
1015 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1016 goto error_return;
1018 if (swapped)
1020 coff_section_data (abfd, sec)->relocs = internal_relocs;
1021 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1023 coff_section_data (abfd, sec)->contents = contents;
1024 coff_section_data (abfd, sec)->keep_contents = TRUE;
1026 obj_coff_keep_syms (abfd) = TRUE;
1030 if (internal_relocs != NULL
1031 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1033 if (! link_info->keep_memory)
1034 free (internal_relocs);
1035 else
1036 coff_section_data (abfd, sec)->relocs = internal_relocs;
1039 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1041 if (! link_info->keep_memory)
1042 free (contents);
1043 else
1044 /* Cache the section contents for coff_link_input_bfd. */
1045 coff_section_data (abfd, sec)->contents = contents;
1048 return TRUE;
1050 error_return:
1051 if (internal_relocs != NULL
1052 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1053 free (internal_relocs);
1054 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1055 free (contents);
1056 return FALSE;
1059 /* Delete some bytes from a section while relaxing. */
1061 static bfd_boolean
1062 sh_relax_delete_bytes (bfd *abfd,
1063 asection *sec,
1064 bfd_vma addr,
1065 int count)
1067 bfd_byte *contents;
1068 struct internal_reloc *irel, *irelend;
1069 struct internal_reloc *irelalign;
1070 bfd_vma toaddr;
1071 bfd_byte *esym, *esymend;
1072 bfd_size_type symesz;
1073 struct coff_link_hash_entry **sym_hash;
1074 asection *o;
1076 contents = coff_section_data (abfd, sec)->contents;
1078 /* The deletion must stop at the next ALIGN reloc for an aligment
1079 power larger than the number of bytes we are deleting. */
1081 irelalign = NULL;
1082 toaddr = sec->size;
1084 irel = coff_section_data (abfd, sec)->relocs;
1085 irelend = irel + sec->reloc_count;
1086 for (; irel < irelend; irel++)
1088 if (irel->r_type == R_SH_ALIGN
1089 && irel->r_vaddr - sec->vma > addr
1090 && count < (1 << irel->r_offset))
1092 irelalign = irel;
1093 toaddr = irel->r_vaddr - sec->vma;
1094 break;
1098 /* Actually delete the bytes. */
1099 memmove (contents + addr, contents + addr + count,
1100 (size_t) (toaddr - addr - count));
1101 if (irelalign == NULL)
1102 sec->size -= count;
1103 else
1105 int i;
1107 #define NOP_OPCODE (0x0009)
1109 BFD_ASSERT ((count & 1) == 0);
1110 for (i = 0; i < count; i += 2)
1111 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1114 /* Adjust all the relocs. */
1115 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1117 bfd_vma nraddr, stop;
1118 bfd_vma start = 0;
1119 int insn = 0;
1120 struct internal_syment sym;
1121 int off, adjust, oinsn;
1122 bfd_signed_vma voff = 0;
1123 bfd_boolean overflow;
1125 /* Get the new reloc address. */
1126 nraddr = irel->r_vaddr - sec->vma;
1127 if ((irel->r_vaddr - sec->vma > addr
1128 && irel->r_vaddr - sec->vma < toaddr)
1129 || (irel->r_type == R_SH_ALIGN
1130 && irel->r_vaddr - sec->vma == toaddr))
1131 nraddr -= count;
1133 /* See if this reloc was for the bytes we have deleted, in which
1134 case we no longer care about it. Don't delete relocs which
1135 represent addresses, though. */
1136 if (irel->r_vaddr - sec->vma >= addr
1137 && irel->r_vaddr - sec->vma < addr + count
1138 && irel->r_type != R_SH_ALIGN
1139 && irel->r_type != R_SH_CODE
1140 && irel->r_type != R_SH_DATA
1141 && irel->r_type != R_SH_LABEL)
1142 irel->r_type = R_SH_UNUSED;
1144 /* If this is a PC relative reloc, see if the range it covers
1145 includes the bytes we have deleted. */
1146 switch (irel->r_type)
1148 default:
1149 break;
1151 case R_SH_PCDISP8BY2:
1152 case R_SH_PCDISP:
1153 case R_SH_PCRELIMM8BY2:
1154 case R_SH_PCRELIMM8BY4:
1155 start = irel->r_vaddr - sec->vma;
1156 insn = bfd_get_16 (abfd, contents + nraddr);
1157 break;
1160 switch (irel->r_type)
1162 default:
1163 start = stop = addr;
1164 break;
1166 case R_SH_IMM32:
1167 #ifdef COFF_WITH_PE
1168 case R_SH_IMM32CE:
1169 case R_SH_IMAGEBASE:
1170 #endif
1171 /* If this reloc is against a symbol defined in this
1172 section, and the symbol will not be adjusted below, we
1173 must check the addend to see it will put the value in
1174 range to be adjusted, and hence must be changed. */
1175 bfd_coff_swap_sym_in (abfd,
1176 ((bfd_byte *) obj_coff_external_syms (abfd)
1177 + (irel->r_symndx
1178 * bfd_coff_symesz (abfd))),
1179 &sym);
1180 if (sym.n_sclass != C_EXT
1181 && sym.n_scnum == sec->target_index
1182 && ((bfd_vma) sym.n_value <= addr
1183 || (bfd_vma) sym.n_value >= toaddr))
1185 bfd_vma val;
1187 val = bfd_get_32 (abfd, contents + nraddr);
1188 val += sym.n_value;
1189 if (val > addr && val < toaddr)
1190 bfd_put_32 (abfd, val - count, contents + nraddr);
1192 start = stop = addr;
1193 break;
1195 case R_SH_PCDISP8BY2:
1196 off = insn & 0xff;
1197 if (off & 0x80)
1198 off -= 0x100;
1199 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1200 break;
1202 case R_SH_PCDISP:
1203 bfd_coff_swap_sym_in (abfd,
1204 ((bfd_byte *) obj_coff_external_syms (abfd)
1205 + (irel->r_symndx
1206 * bfd_coff_symesz (abfd))),
1207 &sym);
1208 if (sym.n_sclass == C_EXT)
1209 start = stop = addr;
1210 else
1212 off = insn & 0xfff;
1213 if (off & 0x800)
1214 off -= 0x1000;
1215 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1217 break;
1219 case R_SH_PCRELIMM8BY2:
1220 off = insn & 0xff;
1221 stop = start + 4 + off * 2;
1222 break;
1224 case R_SH_PCRELIMM8BY4:
1225 off = insn & 0xff;
1226 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1227 break;
1229 case R_SH_SWITCH8:
1230 case R_SH_SWITCH16:
1231 case R_SH_SWITCH32:
1232 /* These relocs types represent
1233 .word L2-L1
1234 The r_offset field holds the difference between the reloc
1235 address and L1. That is the start of the reloc, and
1236 adding in the contents gives us the top. We must adjust
1237 both the r_offset field and the section contents. */
1239 start = irel->r_vaddr - sec->vma;
1240 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1242 if (start > addr
1243 && start < toaddr
1244 && (stop <= addr || stop >= toaddr))
1245 irel->r_offset += count;
1246 else if (stop > addr
1247 && stop < toaddr
1248 && (start <= addr || start >= toaddr))
1249 irel->r_offset -= count;
1251 start = stop;
1253 if (irel->r_type == R_SH_SWITCH16)
1254 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1255 else if (irel->r_type == R_SH_SWITCH8)
1256 voff = bfd_get_8 (abfd, contents + nraddr);
1257 else
1258 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1259 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1261 break;
1263 case R_SH_USES:
1264 start = irel->r_vaddr - sec->vma;
1265 stop = (bfd_vma) ((bfd_signed_vma) start
1266 + (long) irel->r_offset
1267 + 4);
1268 break;
1271 if (start > addr
1272 && start < toaddr
1273 && (stop <= addr || stop >= toaddr))
1274 adjust = count;
1275 else if (stop > addr
1276 && stop < toaddr
1277 && (start <= addr || start >= toaddr))
1278 adjust = - count;
1279 else
1280 adjust = 0;
1282 if (adjust != 0)
1284 oinsn = insn;
1285 overflow = FALSE;
1286 switch (irel->r_type)
1288 default:
1289 abort ();
1290 break;
1292 case R_SH_PCDISP8BY2:
1293 case R_SH_PCRELIMM8BY2:
1294 insn += adjust / 2;
1295 if ((oinsn & 0xff00) != (insn & 0xff00))
1296 overflow = TRUE;
1297 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1298 break;
1300 case R_SH_PCDISP:
1301 insn += adjust / 2;
1302 if ((oinsn & 0xf000) != (insn & 0xf000))
1303 overflow = TRUE;
1304 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1305 break;
1307 case R_SH_PCRELIMM8BY4:
1308 BFD_ASSERT (adjust == count || count >= 4);
1309 if (count >= 4)
1310 insn += adjust / 4;
1311 else
1313 if ((irel->r_vaddr & 3) == 0)
1314 ++insn;
1316 if ((oinsn & 0xff00) != (insn & 0xff00))
1317 overflow = TRUE;
1318 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1319 break;
1321 case R_SH_SWITCH8:
1322 voff += adjust;
1323 if (voff < 0 || voff >= 0xff)
1324 overflow = TRUE;
1325 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1326 break;
1328 case R_SH_SWITCH16:
1329 voff += adjust;
1330 if (voff < - 0x8000 || voff >= 0x8000)
1331 overflow = TRUE;
1332 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1333 break;
1335 case R_SH_SWITCH32:
1336 voff += adjust;
1337 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1338 break;
1340 case R_SH_USES:
1341 irel->r_offset += adjust;
1342 break;
1345 if (overflow)
1347 ((*_bfd_error_handler)
1348 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1349 abfd, (unsigned long) irel->r_vaddr));
1350 bfd_set_error (bfd_error_bad_value);
1351 return FALSE;
1355 irel->r_vaddr = nraddr + sec->vma;
1358 /* Look through all the other sections. If there contain any IMM32
1359 relocs against internal symbols which we are not going to adjust
1360 below, we may need to adjust the addends. */
1361 for (o = abfd->sections; o != NULL; o = o->next)
1363 struct internal_reloc *internal_relocs;
1364 struct internal_reloc *irelscan, *irelscanend;
1365 bfd_byte *ocontents;
1367 if (o == sec
1368 || (o->flags & SEC_RELOC) == 0
1369 || o->reloc_count == 0)
1370 continue;
1372 /* We always cache the relocs. Perhaps, if info->keep_memory is
1373 FALSE, we should free them, if we are permitted to, when we
1374 leave sh_coff_relax_section. */
1375 internal_relocs = (_bfd_coff_read_internal_relocs
1376 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1377 (struct internal_reloc *) NULL));
1378 if (internal_relocs == NULL)
1379 return FALSE;
1381 ocontents = NULL;
1382 irelscanend = internal_relocs + o->reloc_count;
1383 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1385 struct internal_syment sym;
1387 #ifdef COFF_WITH_PE
1388 if (irelscan->r_type != R_SH_IMM32
1389 && irelscan->r_type != R_SH_IMAGEBASE
1390 && irelscan->r_type != R_SH_IMM32CE)
1391 #else
1392 if (irelscan->r_type != R_SH_IMM32)
1393 #endif
1394 continue;
1396 bfd_coff_swap_sym_in (abfd,
1397 ((bfd_byte *) obj_coff_external_syms (abfd)
1398 + (irelscan->r_symndx
1399 * bfd_coff_symesz (abfd))),
1400 &sym);
1401 if (sym.n_sclass != C_EXT
1402 && sym.n_scnum == sec->target_index
1403 && ((bfd_vma) sym.n_value <= addr
1404 || (bfd_vma) sym.n_value >= toaddr))
1406 bfd_vma val;
1408 if (ocontents == NULL)
1410 if (coff_section_data (abfd, o)->contents != NULL)
1411 ocontents = coff_section_data (abfd, o)->contents;
1412 else
1414 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1415 return FALSE;
1416 /* We always cache the section contents.
1417 Perhaps, if info->keep_memory is FALSE, we
1418 should free them, if we are permitted to,
1419 when we leave sh_coff_relax_section. */
1420 coff_section_data (abfd, o)->contents = ocontents;
1424 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1425 val += sym.n_value;
1426 if (val > addr && val < toaddr)
1427 bfd_put_32 (abfd, val - count,
1428 ocontents + irelscan->r_vaddr - o->vma);
1430 coff_section_data (abfd, o)->keep_contents = TRUE;
1435 /* Adjusting the internal symbols will not work if something has
1436 already retrieved the generic symbols. It would be possible to
1437 make this work by adjusting the generic symbols at the same time.
1438 However, this case should not arise in normal usage. */
1439 if (obj_symbols (abfd) != NULL
1440 || obj_raw_syments (abfd) != NULL)
1442 ((*_bfd_error_handler)
1443 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1444 bfd_set_error (bfd_error_invalid_operation);
1445 return FALSE;
1448 /* Adjust all the symbols. */
1449 sym_hash = obj_coff_sym_hashes (abfd);
1450 symesz = bfd_coff_symesz (abfd);
1451 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1452 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1453 while (esym < esymend)
1455 struct internal_syment isym;
1457 bfd_coff_swap_sym_in (abfd, esym, &isym);
1459 if (isym.n_scnum == sec->target_index
1460 && (bfd_vma) isym.n_value > addr
1461 && (bfd_vma) isym.n_value < toaddr)
1463 isym.n_value -= count;
1465 bfd_coff_swap_sym_out (abfd, &isym, esym);
1467 if (*sym_hash != NULL)
1469 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1470 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1471 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1472 && (*sym_hash)->root.u.def.value < toaddr);
1473 (*sym_hash)->root.u.def.value -= count;
1477 esym += (isym.n_numaux + 1) * symesz;
1478 sym_hash += isym.n_numaux + 1;
1481 /* See if we can move the ALIGN reloc forward. We have adjusted
1482 r_vaddr for it already. */
1483 if (irelalign != NULL)
1485 bfd_vma alignto, alignaddr;
1487 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1488 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1489 1 << irelalign->r_offset);
1490 if (alignto != alignaddr)
1492 /* Tail recursion. */
1493 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1494 (int) (alignto - alignaddr));
1498 return TRUE;
1501 /* This is yet another version of the SH opcode table, used to rapidly
1502 get information about a particular instruction. */
1504 /* The opcode map is represented by an array of these structures. The
1505 array is indexed by the high order four bits in the instruction. */
1507 struct sh_major_opcode
1509 /* A pointer to the instruction list. This is an array which
1510 contains all the instructions with this major opcode. */
1511 const struct sh_minor_opcode *minor_opcodes;
1512 /* The number of elements in minor_opcodes. */
1513 unsigned short count;
1516 /* This structure holds information for a set of SH opcodes. The
1517 instruction code is anded with the mask value, and the resulting
1518 value is used to search the order opcode list. */
1520 struct sh_minor_opcode
1522 /* The sorted opcode list. */
1523 const struct sh_opcode *opcodes;
1524 /* The number of elements in opcodes. */
1525 unsigned short count;
1526 /* The mask value to use when searching the opcode list. */
1527 unsigned short mask;
1530 /* This structure holds information for an SH instruction. An array
1531 of these structures is sorted in order by opcode. */
1533 struct sh_opcode
1535 /* The code for this instruction, after it has been anded with the
1536 mask value in the sh_major_opcode structure. */
1537 unsigned short opcode;
1538 /* Flags for this instruction. */
1539 unsigned long flags;
1542 /* Flag which appear in the sh_opcode structure. */
1544 /* This instruction loads a value from memory. */
1545 #define LOAD (0x1)
1547 /* This instruction stores a value to memory. */
1548 #define STORE (0x2)
1550 /* This instruction is a branch. */
1551 #define BRANCH (0x4)
1553 /* This instruction has a delay slot. */
1554 #define DELAY (0x8)
1556 /* This instruction uses the value in the register in the field at
1557 mask 0x0f00 of the instruction. */
1558 #define USES1 (0x10)
1559 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1561 /* This instruction uses the value in the register in the field at
1562 mask 0x00f0 of the instruction. */
1563 #define USES2 (0x20)
1564 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1566 /* This instruction uses the value in register 0. */
1567 #define USESR0 (0x40)
1569 /* This instruction sets the value in the register in the field at
1570 mask 0x0f00 of the instruction. */
1571 #define SETS1 (0x80)
1572 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1574 /* This instruction sets the value in the register in the field at
1575 mask 0x00f0 of the instruction. */
1576 #define SETS2 (0x100)
1577 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1579 /* This instruction sets register 0. */
1580 #define SETSR0 (0x200)
1582 /* This instruction sets a special register. */
1583 #define SETSSP (0x400)
1585 /* This instruction uses a special register. */
1586 #define USESSP (0x800)
1588 /* This instruction uses the floating point register in the field at
1589 mask 0x0f00 of the instruction. */
1590 #define USESF1 (0x1000)
1591 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1593 /* This instruction uses the floating point register in the field at
1594 mask 0x00f0 of the instruction. */
1595 #define USESF2 (0x2000)
1596 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1598 /* This instruction uses floating point register 0. */
1599 #define USESF0 (0x4000)
1601 /* This instruction sets the floating point register in the field at
1602 mask 0x0f00 of the instruction. */
1603 #define SETSF1 (0x8000)
1604 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1606 #define USESAS (0x10000)
1607 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1608 #define USESR8 (0x20000)
1609 #define SETSAS (0x40000)
1610 #define SETSAS_REG(x) USESAS_REG (x)
1612 #define MAP(a) a, sizeof a / sizeof a[0]
1614 #ifndef COFF_IMAGE_WITH_PE
1616 /* The opcode maps. */
1618 static const struct sh_opcode sh_opcode00[] =
1620 { 0x0008, SETSSP }, /* clrt */
1621 { 0x0009, 0 }, /* nop */
1622 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1623 { 0x0018, SETSSP }, /* sett */
1624 { 0x0019, SETSSP }, /* div0u */
1625 { 0x001b, 0 }, /* sleep */
1626 { 0x0028, SETSSP }, /* clrmac */
1627 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1628 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1629 { 0x0048, SETSSP }, /* clrs */
1630 { 0x0058, SETSSP } /* sets */
1633 static const struct sh_opcode sh_opcode01[] =
1635 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1636 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1637 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1638 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1639 { 0x0029, SETS1 | USESSP }, /* movt rn */
1640 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1641 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1642 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1643 { 0x0083, LOAD | USES1 }, /* pref @rn */
1644 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1645 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1646 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1647 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1648 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1651 static const struct sh_opcode sh_opcode02[] =
1653 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1654 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1655 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1656 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1657 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1658 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1659 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1660 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1661 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1664 static const struct sh_minor_opcode sh_opcode0[] =
1666 { MAP (sh_opcode00), 0xffff },
1667 { MAP (sh_opcode01), 0xf0ff },
1668 { MAP (sh_opcode02), 0xf00f }
1671 static const struct sh_opcode sh_opcode10[] =
1673 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1676 static const struct sh_minor_opcode sh_opcode1[] =
1678 { MAP (sh_opcode10), 0xf000 }
1681 static const struct sh_opcode sh_opcode20[] =
1683 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1684 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1685 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1686 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1687 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1688 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1689 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1690 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1691 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1692 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1693 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1694 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1695 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1696 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1697 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1700 static const struct sh_minor_opcode sh_opcode2[] =
1702 { MAP (sh_opcode20), 0xf00f }
1705 static const struct sh_opcode sh_opcode30[] =
1707 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1708 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1709 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1710 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1711 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1712 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1713 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1714 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1715 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1716 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1717 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1718 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1719 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1720 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1723 static const struct sh_minor_opcode sh_opcode3[] =
1725 { MAP (sh_opcode30), 0xf00f }
1728 static const struct sh_opcode sh_opcode40[] =
1730 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1731 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1732 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1733 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1734 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1735 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1736 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1737 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1738 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1739 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1740 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1741 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1742 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1743 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1744 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1745 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1746 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1747 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1748 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1749 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1750 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1751 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1752 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1753 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1754 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1755 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1756 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1757 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1758 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1759 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1760 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1761 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1762 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1763 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1764 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1765 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1766 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1767 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1768 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1769 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1770 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1771 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1772 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1773 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1774 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1775 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1776 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1777 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1778 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1779 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1780 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1783 static const struct sh_opcode sh_opcode41[] =
1785 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1786 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1787 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1788 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1789 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1790 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1793 static const struct sh_minor_opcode sh_opcode4[] =
1795 { MAP (sh_opcode40), 0xf0ff },
1796 { MAP (sh_opcode41), 0xf00f }
1799 static const struct sh_opcode sh_opcode50[] =
1801 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1804 static const struct sh_minor_opcode sh_opcode5[] =
1806 { MAP (sh_opcode50), 0xf000 }
1809 static const struct sh_opcode sh_opcode60[] =
1811 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1812 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1813 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1814 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1815 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1816 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1817 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1818 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1819 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1820 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1821 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1822 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1823 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1824 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1825 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1826 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1829 static const struct sh_minor_opcode sh_opcode6[] =
1831 { MAP (sh_opcode60), 0xf00f }
1834 static const struct sh_opcode sh_opcode70[] =
1836 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1839 static const struct sh_minor_opcode sh_opcode7[] =
1841 { MAP (sh_opcode70), 0xf000 }
1844 static const struct sh_opcode sh_opcode80[] =
1846 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1847 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1848 { 0x8200, SETSSP }, /* setrc #imm */
1849 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1850 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1851 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1852 { 0x8900, BRANCH | USESSP }, /* bt label */
1853 { 0x8b00, BRANCH | USESSP }, /* bf label */
1854 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1855 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1856 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1857 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1860 static const struct sh_minor_opcode sh_opcode8[] =
1862 { MAP (sh_opcode80), 0xff00 }
1865 static const struct sh_opcode sh_opcode90[] =
1867 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1870 static const struct sh_minor_opcode sh_opcode9[] =
1872 { MAP (sh_opcode90), 0xf000 }
1875 static const struct sh_opcode sh_opcodea0[] =
1877 { 0xa000, BRANCH | DELAY } /* bra label */
1880 static const struct sh_minor_opcode sh_opcodea[] =
1882 { MAP (sh_opcodea0), 0xf000 }
1885 static const struct sh_opcode sh_opcodeb0[] =
1887 { 0xb000, BRANCH | DELAY } /* bsr label */
1890 static const struct sh_minor_opcode sh_opcodeb[] =
1892 { MAP (sh_opcodeb0), 0xf000 }
1895 static const struct sh_opcode sh_opcodec0[] =
1897 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1898 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1899 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1900 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1901 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1902 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1903 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1904 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1905 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1906 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1907 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1908 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1909 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1910 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1911 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1912 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1915 static const struct sh_minor_opcode sh_opcodec[] =
1917 { MAP (sh_opcodec0), 0xff00 }
1920 static const struct sh_opcode sh_opcoded0[] =
1922 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1925 static const struct sh_minor_opcode sh_opcoded[] =
1927 { MAP (sh_opcoded0), 0xf000 }
1930 static const struct sh_opcode sh_opcodee0[] =
1932 { 0xe000, SETS1 } /* mov #imm,rn */
1935 static const struct sh_minor_opcode sh_opcodee[] =
1937 { MAP (sh_opcodee0), 0xf000 }
1940 static const struct sh_opcode sh_opcodef0[] =
1942 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1943 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1944 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1945 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1946 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1947 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1948 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1949 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1950 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1951 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1952 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1953 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1954 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1955 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1958 static const struct sh_opcode sh_opcodef1[] =
1960 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1961 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1962 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1963 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1964 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1965 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1966 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1967 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1968 { 0xf08d, SETSF1 }, /* fldi0 fn */
1969 { 0xf09d, SETSF1 } /* fldi1 fn */
1972 static const struct sh_minor_opcode sh_opcodef[] =
1974 { MAP (sh_opcodef0), 0xf00f },
1975 { MAP (sh_opcodef1), 0xf0ff }
1978 static struct sh_major_opcode sh_opcodes[] =
1980 { MAP (sh_opcode0) },
1981 { MAP (sh_opcode1) },
1982 { MAP (sh_opcode2) },
1983 { MAP (sh_opcode3) },
1984 { MAP (sh_opcode4) },
1985 { MAP (sh_opcode5) },
1986 { MAP (sh_opcode6) },
1987 { MAP (sh_opcode7) },
1988 { MAP (sh_opcode8) },
1989 { MAP (sh_opcode9) },
1990 { MAP (sh_opcodea) },
1991 { MAP (sh_opcodeb) },
1992 { MAP (sh_opcodec) },
1993 { MAP (sh_opcoded) },
1994 { MAP (sh_opcodee) },
1995 { MAP (sh_opcodef) }
1998 /* The double data transfer / parallel processing insns are not
1999 described here. This will cause sh_align_load_span to leave them alone. */
2001 static const struct sh_opcode sh_dsp_opcodef0[] =
2003 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2004 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2005 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2006 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2007 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2008 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2009 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2010 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2013 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2015 { MAP (sh_dsp_opcodef0), 0xfc0d }
2018 /* Given an instruction, return a pointer to the corresponding
2019 sh_opcode structure. Return NULL if the instruction is not
2020 recognized. */
2022 static const struct sh_opcode *
2023 sh_insn_info (unsigned int insn)
2025 const struct sh_major_opcode *maj;
2026 const struct sh_minor_opcode *min, *minend;
2028 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2029 min = maj->minor_opcodes;
2030 minend = min + maj->count;
2031 for (; min < minend; min++)
2033 unsigned int l;
2034 const struct sh_opcode *op, *opend;
2036 l = insn & min->mask;
2037 op = min->opcodes;
2038 opend = op + min->count;
2040 /* Since the opcodes tables are sorted, we could use a binary
2041 search here if the count were above some cutoff value. */
2042 for (; op < opend; op++)
2043 if (op->opcode == l)
2044 return op;
2047 return NULL;
2050 /* See whether an instruction uses a general purpose register. */
2052 static bfd_boolean
2053 sh_insn_uses_reg (unsigned int insn,
2054 const struct sh_opcode *op,
2055 unsigned int reg)
2057 unsigned int f;
2059 f = op->flags;
2061 if ((f & USES1) != 0
2062 && USES1_REG (insn) == reg)
2063 return TRUE;
2064 if ((f & USES2) != 0
2065 && USES2_REG (insn) == reg)
2066 return TRUE;
2067 if ((f & USESR0) != 0
2068 && reg == 0)
2069 return TRUE;
2070 if ((f & USESAS) && reg == USESAS_REG (insn))
2071 return TRUE;
2072 if ((f & USESR8) && reg == 8)
2073 return TRUE;
2075 return FALSE;
2078 /* See whether an instruction sets a general purpose register. */
2080 static bfd_boolean
2081 sh_insn_sets_reg (unsigned int insn,
2082 const struct sh_opcode *op,
2083 unsigned int reg)
2085 unsigned int f;
2087 f = op->flags;
2089 if ((f & SETS1) != 0
2090 && SETS1_REG (insn) == reg)
2091 return TRUE;
2092 if ((f & SETS2) != 0
2093 && SETS2_REG (insn) == reg)
2094 return TRUE;
2095 if ((f & SETSR0) != 0
2096 && reg == 0)
2097 return TRUE;
2098 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2099 return TRUE;
2101 return FALSE;
2104 /* See whether an instruction uses or sets a general purpose register */
2106 static bfd_boolean
2107 sh_insn_uses_or_sets_reg (unsigned int insn,
2108 const struct sh_opcode *op,
2109 unsigned int reg)
2111 if (sh_insn_uses_reg (insn, op, reg))
2112 return TRUE;
2114 return sh_insn_sets_reg (insn, op, reg);
2117 /* See whether an instruction uses a floating point register. */
2119 static bfd_boolean
2120 sh_insn_uses_freg (unsigned int insn,
2121 const struct sh_opcode *op,
2122 unsigned int freg)
2124 unsigned int f;
2126 f = op->flags;
2128 /* We can't tell if this is a double-precision insn, so just play safe
2129 and assume that it might be. So not only have we test FREG against
2130 itself, but also even FREG against FREG+1 - if the using insn uses
2131 just the low part of a double precision value - but also an odd
2132 FREG against FREG-1 - if the setting insn sets just the low part
2133 of a double precision value.
2134 So what this all boils down to is that we have to ignore the lowest
2135 bit of the register number. */
2137 if ((f & USESF1) != 0
2138 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2139 return TRUE;
2140 if ((f & USESF2) != 0
2141 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2142 return TRUE;
2143 if ((f & USESF0) != 0
2144 && freg == 0)
2145 return TRUE;
2147 return FALSE;
2150 /* See whether an instruction sets a floating point register. */
2152 static bfd_boolean
2153 sh_insn_sets_freg (unsigned int insn,
2154 const struct sh_opcode *op,
2155 unsigned int freg)
2157 unsigned int f;
2159 f = op->flags;
2161 /* We can't tell if this is a double-precision insn, so just play safe
2162 and assume that it might be. So not only have we test FREG against
2163 itself, but also even FREG against FREG+1 - if the using insn uses
2164 just the low part of a double precision value - but also an odd
2165 FREG against FREG-1 - if the setting insn sets just the low part
2166 of a double precision value.
2167 So what this all boils down to is that we have to ignore the lowest
2168 bit of the register number. */
2170 if ((f & SETSF1) != 0
2171 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2172 return TRUE;
2174 return FALSE;
2177 /* See whether an instruction uses or sets a floating point register */
2179 static bfd_boolean
2180 sh_insn_uses_or_sets_freg (unsigned int insn,
2181 const struct sh_opcode *op,
2182 unsigned int reg)
2184 if (sh_insn_uses_freg (insn, op, reg))
2185 return TRUE;
2187 return sh_insn_sets_freg (insn, op, reg);
2190 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2191 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2192 This should return TRUE if there is a conflict, or FALSE if the
2193 instructions can be swapped safely. */
2195 static bfd_boolean
2196 sh_insns_conflict (unsigned int i1,
2197 const struct sh_opcode *op1,
2198 unsigned int i2,
2199 const struct sh_opcode *op2)
2201 unsigned int f1, f2;
2203 f1 = op1->flags;
2204 f2 = op2->flags;
2206 /* Load of fpscr conflicts with floating point operations.
2207 FIXME: shouldn't test raw opcodes here. */
2208 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2209 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2210 return TRUE;
2212 if ((f1 & (BRANCH | DELAY)) != 0
2213 || (f2 & (BRANCH | DELAY)) != 0)
2214 return TRUE;
2216 if (((f1 | f2) & SETSSP)
2217 && (f1 & (SETSSP | USESSP))
2218 && (f2 & (SETSSP | USESSP)))
2219 return TRUE;
2221 if ((f1 & SETS1) != 0
2222 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2223 return TRUE;
2224 if ((f1 & SETS2) != 0
2225 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2226 return TRUE;
2227 if ((f1 & SETSR0) != 0
2228 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2229 return TRUE;
2230 if ((f1 & SETSAS)
2231 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2232 return TRUE;
2233 if ((f1 & SETSF1) != 0
2234 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2235 return TRUE;
2237 if ((f2 & SETS1) != 0
2238 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2239 return TRUE;
2240 if ((f2 & SETS2) != 0
2241 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2242 return TRUE;
2243 if ((f2 & SETSR0) != 0
2244 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2245 return TRUE;
2246 if ((f2 & SETSAS)
2247 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2248 return TRUE;
2249 if ((f2 & SETSF1) != 0
2250 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2251 return TRUE;
2253 /* The instructions do not conflict. */
2254 return FALSE;
2257 /* I1 is a load instruction, and I2 is some other instruction. Return
2258 TRUE if I1 loads a register which I2 uses. */
2260 static bfd_boolean
2261 sh_load_use (unsigned int i1,
2262 const struct sh_opcode *op1,
2263 unsigned int i2,
2264 const struct sh_opcode *op2)
2266 unsigned int f1;
2268 f1 = op1->flags;
2270 if ((f1 & LOAD) == 0)
2271 return FALSE;
2273 /* If both SETS1 and SETSSP are set, that means a load to a special
2274 register using postincrement addressing mode, which we don't care
2275 about here. */
2276 if ((f1 & SETS1) != 0
2277 && (f1 & SETSSP) == 0
2278 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2279 return TRUE;
2281 if ((f1 & SETSR0) != 0
2282 && sh_insn_uses_reg (i2, op2, 0))
2283 return TRUE;
2285 if ((f1 & SETSF1) != 0
2286 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2287 return TRUE;
2289 return FALSE;
2292 /* Try to align loads and stores within a span of memory. This is
2293 called by both the ELF and the COFF sh targets. ABFD and SEC are
2294 the BFD and section we are examining. CONTENTS is the contents of
2295 the section. SWAP is the routine to call to swap two instructions.
2296 RELOCS is a pointer to the internal relocation information, to be
2297 passed to SWAP. PLABEL is a pointer to the current label in a
2298 sorted list of labels; LABEL_END is the end of the list. START and
2299 STOP are the range of memory to examine. If a swap is made,
2300 *PSWAPPED is set to TRUE. */
2302 #ifdef COFF_WITH_PE
2303 static
2304 #endif
2305 bfd_boolean
2306 _bfd_sh_align_load_span (bfd *abfd,
2307 asection *sec,
2308 bfd_byte *contents,
2309 bfd_boolean (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2310 void * relocs,
2311 bfd_vma **plabel,
2312 bfd_vma *label_end,
2313 bfd_vma start,
2314 bfd_vma stop,
2315 bfd_boolean *pswapped)
2317 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2318 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2319 bfd_vma i;
2321 /* The SH4 has a Harvard architecture, hence aligning loads is not
2322 desirable. In fact, it is counter-productive, since it interferes
2323 with the schedules generated by the compiler. */
2324 if (abfd->arch_info->mach == bfd_mach_sh4)
2325 return TRUE;
2327 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2328 instructions. */
2329 if (dsp)
2331 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2332 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2335 /* Instructions should be aligned on 2 byte boundaries. */
2336 if ((start & 1) == 1)
2337 ++start;
2339 /* Now look through the unaligned addresses. */
2340 i = start;
2341 if ((i & 2) == 0)
2342 i += 2;
2343 for (; i < stop; i += 4)
2345 unsigned int insn;
2346 const struct sh_opcode *op;
2347 unsigned int prev_insn = 0;
2348 const struct sh_opcode *prev_op = NULL;
2350 insn = bfd_get_16 (abfd, contents + i);
2351 op = sh_insn_info (insn);
2352 if (op == NULL
2353 || (op->flags & (LOAD | STORE)) == 0)
2354 continue;
2356 /* This is a load or store which is not on a four byte boundary. */
2358 while (*plabel < label_end && **plabel < i)
2359 ++*plabel;
2361 if (i > start)
2363 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2364 /* If INSN is the field b of a parallel processing insn, it is not
2365 a load / store after all. Note that the test here might mistake
2366 the field_b of a pcopy insn for the starting code of a parallel
2367 processing insn; this might miss a swapping opportunity, but at
2368 least we're on the safe side. */
2369 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2370 continue;
2372 /* Check if prev_insn is actually the field b of a parallel
2373 processing insn. Again, this can give a spurious match
2374 after a pcopy. */
2375 if (dsp && i - 2 > start)
2377 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2379 if ((pprev_insn & 0xfc00) == 0xf800)
2380 prev_op = NULL;
2381 else
2382 prev_op = sh_insn_info (prev_insn);
2384 else
2385 prev_op = sh_insn_info (prev_insn);
2387 /* If the load/store instruction is in a delay slot, we
2388 can't swap. */
2389 if (prev_op == NULL
2390 || (prev_op->flags & DELAY) != 0)
2391 continue;
2393 if (i > start
2394 && (*plabel >= label_end || **plabel != i)
2395 && prev_op != NULL
2396 && (prev_op->flags & (LOAD | STORE)) == 0
2397 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2399 bfd_boolean ok;
2401 /* The load/store instruction does not have a label, and
2402 there is a previous instruction; PREV_INSN is not
2403 itself a load/store instruction, and PREV_INSN and
2404 INSN do not conflict. */
2406 ok = TRUE;
2408 if (i >= start + 4)
2410 unsigned int prev2_insn;
2411 const struct sh_opcode *prev2_op;
2413 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2414 prev2_op = sh_insn_info (prev2_insn);
2416 /* If the instruction before PREV_INSN has a delay
2417 slot--that is, PREV_INSN is in a delay slot--we
2418 can not swap. */
2419 if (prev2_op == NULL
2420 || (prev2_op->flags & DELAY) != 0)
2421 ok = FALSE;
2423 /* If the instruction before PREV_INSN is a load,
2424 and it sets a register which INSN uses, then
2425 putting INSN immediately after PREV_INSN will
2426 cause a pipeline bubble, so there is no point to
2427 making the swap. */
2428 if (ok
2429 && (prev2_op->flags & LOAD) != 0
2430 && sh_load_use (prev2_insn, prev2_op, insn, op))
2431 ok = FALSE;
2434 if (ok)
2436 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2437 return FALSE;
2438 *pswapped = TRUE;
2439 continue;
2443 while (*plabel < label_end && **plabel < i + 2)
2444 ++*plabel;
2446 if (i + 2 < stop
2447 && (*plabel >= label_end || **plabel != i + 2))
2449 unsigned int next_insn;
2450 const struct sh_opcode *next_op;
2452 /* There is an instruction after the load/store
2453 instruction, and it does not have a label. */
2454 next_insn = bfd_get_16 (abfd, contents + i + 2);
2455 next_op = sh_insn_info (next_insn);
2456 if (next_op != NULL
2457 && (next_op->flags & (LOAD | STORE)) == 0
2458 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2460 bfd_boolean ok;
2462 /* NEXT_INSN is not itself a load/store instruction,
2463 and it does not conflict with INSN. */
2465 ok = TRUE;
2467 /* If PREV_INSN is a load, and it sets a register
2468 which NEXT_INSN uses, then putting NEXT_INSN
2469 immediately after PREV_INSN will cause a pipeline
2470 bubble, so there is no reason to make this swap. */
2471 if (prev_op != NULL
2472 && (prev_op->flags & LOAD) != 0
2473 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2474 ok = FALSE;
2476 /* If INSN is a load, and it sets a register which
2477 the insn after NEXT_INSN uses, then doing the
2478 swap will cause a pipeline bubble, so there is no
2479 reason to make the swap. However, if the insn
2480 after NEXT_INSN is itself a load or store
2481 instruction, then it is misaligned, so
2482 optimistically hope that it will be swapped
2483 itself, and just live with the pipeline bubble if
2484 it isn't. */
2485 if (ok
2486 && i + 4 < stop
2487 && (op->flags & LOAD) != 0)
2489 unsigned int next2_insn;
2490 const struct sh_opcode *next2_op;
2492 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2493 next2_op = sh_insn_info (next2_insn);
2494 if (next2_op == NULL
2495 || ((next2_op->flags & (LOAD | STORE)) == 0
2496 && sh_load_use (insn, op, next2_insn, next2_op)))
2497 ok = FALSE;
2500 if (ok)
2502 if (! (*swap) (abfd, sec, relocs, contents, i))
2503 return FALSE;
2504 *pswapped = TRUE;
2505 continue;
2511 return TRUE;
2513 #endif /* not COFF_IMAGE_WITH_PE */
2515 /* Swap two SH instructions. */
2517 static bfd_boolean
2518 sh_swap_insns (bfd * abfd,
2519 asection * sec,
2520 void * relocs,
2521 bfd_byte * contents,
2522 bfd_vma addr)
2524 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2525 unsigned short i1, i2;
2526 struct internal_reloc *irel, *irelend;
2528 /* Swap the instructions themselves. */
2529 i1 = bfd_get_16 (abfd, contents + addr);
2530 i2 = bfd_get_16 (abfd, contents + addr + 2);
2531 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2532 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2534 /* Adjust all reloc addresses. */
2535 irelend = internal_relocs + sec->reloc_count;
2536 for (irel = internal_relocs; irel < irelend; irel++)
2538 int type, add;
2540 /* There are a few special types of relocs that we don't want to
2541 adjust. These relocs do not apply to the instruction itself,
2542 but are only associated with the address. */
2543 type = irel->r_type;
2544 if (type == R_SH_ALIGN
2545 || type == R_SH_CODE
2546 || type == R_SH_DATA
2547 || type == R_SH_LABEL)
2548 continue;
2550 /* If an R_SH_USES reloc points to one of the addresses being
2551 swapped, we must adjust it. It would be incorrect to do this
2552 for a jump, though, since we want to execute both
2553 instructions after the jump. (We have avoided swapping
2554 around a label, so the jump will not wind up executing an
2555 instruction it shouldn't). */
2556 if (type == R_SH_USES)
2558 bfd_vma off;
2560 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2561 if (off == addr)
2562 irel->r_offset += 2;
2563 else if (off == addr + 2)
2564 irel->r_offset -= 2;
2567 if (irel->r_vaddr - sec->vma == addr)
2569 irel->r_vaddr += 2;
2570 add = -2;
2572 else if (irel->r_vaddr - sec->vma == addr + 2)
2574 irel->r_vaddr -= 2;
2575 add = 2;
2577 else
2578 add = 0;
2580 if (add != 0)
2582 bfd_byte *loc;
2583 unsigned short insn, oinsn;
2584 bfd_boolean overflow;
2586 loc = contents + irel->r_vaddr - sec->vma;
2587 overflow = FALSE;
2588 switch (type)
2590 default:
2591 break;
2593 case R_SH_PCDISP8BY2:
2594 case R_SH_PCRELIMM8BY2:
2595 insn = bfd_get_16 (abfd, loc);
2596 oinsn = insn;
2597 insn += add / 2;
2598 if ((oinsn & 0xff00) != (insn & 0xff00))
2599 overflow = TRUE;
2600 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2601 break;
2603 case R_SH_PCDISP:
2604 insn = bfd_get_16 (abfd, loc);
2605 oinsn = insn;
2606 insn += add / 2;
2607 if ((oinsn & 0xf000) != (insn & 0xf000))
2608 overflow = TRUE;
2609 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2610 break;
2612 case R_SH_PCRELIMM8BY4:
2613 /* This reloc ignores the least significant 3 bits of
2614 the program counter before adding in the offset.
2615 This means that if ADDR is at an even address, the
2616 swap will not affect the offset. If ADDR is an at an
2617 odd address, then the instruction will be crossing a
2618 four byte boundary, and must be adjusted. */
2619 if ((addr & 3) != 0)
2621 insn = bfd_get_16 (abfd, loc);
2622 oinsn = insn;
2623 insn += add / 2;
2624 if ((oinsn & 0xff00) != (insn & 0xff00))
2625 overflow = TRUE;
2626 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2629 break;
2632 if (overflow)
2634 ((*_bfd_error_handler)
2635 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2636 abfd, (unsigned long) irel->r_vaddr));
2637 bfd_set_error (bfd_error_bad_value);
2638 return FALSE;
2643 return TRUE;
2646 /* Look for loads and stores which we can align to four byte
2647 boundaries. See the longer comment above sh_relax_section for why
2648 this is desirable. This sets *PSWAPPED if some instruction was
2649 swapped. */
2651 static bfd_boolean
2652 sh_align_loads (bfd *abfd,
2653 asection *sec,
2654 struct internal_reloc *internal_relocs,
2655 bfd_byte *contents,
2656 bfd_boolean *pswapped)
2658 struct internal_reloc *irel, *irelend;
2659 bfd_vma *labels = NULL;
2660 bfd_vma *label, *label_end;
2661 bfd_size_type amt;
2663 *pswapped = FALSE;
2665 irelend = internal_relocs + sec->reloc_count;
2667 /* Get all the addresses with labels on them. */
2668 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2669 labels = (bfd_vma *) bfd_malloc (amt);
2670 if (labels == NULL)
2671 goto error_return;
2672 label_end = labels;
2673 for (irel = internal_relocs; irel < irelend; irel++)
2675 if (irel->r_type == R_SH_LABEL)
2677 *label_end = irel->r_vaddr - sec->vma;
2678 ++label_end;
2682 /* Note that the assembler currently always outputs relocs in
2683 address order. If that ever changes, this code will need to sort
2684 the label values and the relocs. */
2686 label = labels;
2688 for (irel = internal_relocs; irel < irelend; irel++)
2690 bfd_vma start, stop;
2692 if (irel->r_type != R_SH_CODE)
2693 continue;
2695 start = irel->r_vaddr - sec->vma;
2697 for (irel++; irel < irelend; irel++)
2698 if (irel->r_type == R_SH_DATA)
2699 break;
2700 if (irel < irelend)
2701 stop = irel->r_vaddr - sec->vma;
2702 else
2703 stop = sec->size;
2705 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2706 internal_relocs, &label,
2707 label_end, start, stop, pswapped))
2708 goto error_return;
2711 free (labels);
2713 return TRUE;
2715 error_return:
2716 if (labels != NULL)
2717 free (labels);
2718 return FALSE;
2721 /* This is a modification of _bfd_coff_generic_relocate_section, which
2722 will handle SH relaxing. */
2724 static bfd_boolean
2725 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2726 struct bfd_link_info *info,
2727 bfd *input_bfd,
2728 asection *input_section,
2729 bfd_byte *contents,
2730 struct internal_reloc *relocs,
2731 struct internal_syment *syms,
2732 asection **sections)
2734 struct internal_reloc *rel;
2735 struct internal_reloc *relend;
2737 rel = relocs;
2738 relend = rel + input_section->reloc_count;
2739 for (; rel < relend; rel++)
2741 long symndx;
2742 struct coff_link_hash_entry *h;
2743 struct internal_syment *sym;
2744 bfd_vma addend;
2745 bfd_vma val;
2746 reloc_howto_type *howto;
2747 bfd_reloc_status_type rstat;
2749 /* Almost all relocs have to do with relaxing. If any work must
2750 be done for them, it has been done in sh_relax_section. */
2751 if (rel->r_type != R_SH_IMM32
2752 #ifdef COFF_WITH_PE
2753 && rel->r_type != R_SH_IMM32CE
2754 && rel->r_type != R_SH_IMAGEBASE
2755 #endif
2756 && rel->r_type != R_SH_PCDISP)
2757 continue;
2759 symndx = rel->r_symndx;
2761 if (symndx == -1)
2763 h = NULL;
2764 sym = NULL;
2766 else
2768 if (symndx < 0
2769 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2771 (*_bfd_error_handler)
2772 ("%B: illegal symbol index %ld in relocs",
2773 input_bfd, symndx);
2774 bfd_set_error (bfd_error_bad_value);
2775 return FALSE;
2777 h = obj_coff_sym_hashes (input_bfd)[symndx];
2778 sym = syms + symndx;
2781 if (sym != NULL && sym->n_scnum != 0)
2782 addend = - sym->n_value;
2783 else
2784 addend = 0;
2786 if (rel->r_type == R_SH_PCDISP)
2787 addend -= 4;
2789 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2790 howto = NULL;
2791 else
2792 howto = &sh_coff_howtos[rel->r_type];
2794 if (howto == NULL)
2796 bfd_set_error (bfd_error_bad_value);
2797 return FALSE;
2800 #ifdef COFF_WITH_PE
2801 if (rel->r_type == R_SH_IMAGEBASE)
2802 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2803 #endif
2805 val = 0;
2807 if (h == NULL)
2809 asection *sec;
2811 /* There is nothing to do for an internal PCDISP reloc. */
2812 if (rel->r_type == R_SH_PCDISP)
2813 continue;
2815 if (symndx == -1)
2817 sec = bfd_abs_section_ptr;
2818 val = 0;
2820 else
2822 sec = sections[symndx];
2823 val = (sec->output_section->vma
2824 + sec->output_offset
2825 + sym->n_value
2826 - sec->vma);
2829 else
2831 if (h->root.type == bfd_link_hash_defined
2832 || h->root.type == bfd_link_hash_defweak)
2834 asection *sec;
2836 sec = h->root.u.def.section;
2837 val = (h->root.u.def.value
2838 + sec->output_section->vma
2839 + sec->output_offset);
2841 else if (! info->relocatable)
2843 if (! ((*info->callbacks->undefined_symbol)
2844 (info, h->root.root.string, input_bfd, input_section,
2845 rel->r_vaddr - input_section->vma, TRUE)))
2846 return FALSE;
2850 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2851 contents,
2852 rel->r_vaddr - input_section->vma,
2853 val, addend);
2855 switch (rstat)
2857 default:
2858 abort ();
2859 case bfd_reloc_ok:
2860 break;
2861 case bfd_reloc_overflow:
2863 const char *name;
2864 char buf[SYMNMLEN + 1];
2866 if (symndx == -1)
2867 name = "*ABS*";
2868 else if (h != NULL)
2869 name = NULL;
2870 else if (sym->_n._n_n._n_zeroes == 0
2871 && sym->_n._n_n._n_offset != 0)
2872 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2873 else
2875 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2876 buf[SYMNMLEN] = '\0';
2877 name = buf;
2880 if (! ((*info->callbacks->reloc_overflow)
2881 (info, (h ? &h->root : NULL), name, howto->name,
2882 (bfd_vma) 0, input_bfd, input_section,
2883 rel->r_vaddr - input_section->vma)))
2884 return FALSE;
2889 return TRUE;
2892 /* This is a version of bfd_generic_get_relocated_section_contents
2893 which uses sh_relocate_section. */
2895 static bfd_byte *
2896 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2897 struct bfd_link_info *link_info,
2898 struct bfd_link_order *link_order,
2899 bfd_byte *data,
2900 bfd_boolean relocatable,
2901 asymbol **symbols)
2903 asection *input_section = link_order->u.indirect.section;
2904 bfd *input_bfd = input_section->owner;
2905 asection **sections = NULL;
2906 struct internal_reloc *internal_relocs = NULL;
2907 struct internal_syment *internal_syms = NULL;
2909 /* We only need to handle the case of relaxing, or of having a
2910 particular set of section contents, specially. */
2911 if (relocatable
2912 || coff_section_data (input_bfd, input_section) == NULL
2913 || coff_section_data (input_bfd, input_section)->contents == NULL)
2914 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2915 link_order, data,
2916 relocatable,
2917 symbols);
2919 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2920 (size_t) input_section->size);
2922 if ((input_section->flags & SEC_RELOC) != 0
2923 && input_section->reloc_count > 0)
2925 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2926 bfd_byte *esym, *esymend;
2927 struct internal_syment *isymp;
2928 asection **secpp;
2929 bfd_size_type amt;
2931 if (! _bfd_coff_get_external_symbols (input_bfd))
2932 goto error_return;
2934 internal_relocs = (_bfd_coff_read_internal_relocs
2935 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2936 FALSE, (struct internal_reloc *) NULL));
2937 if (internal_relocs == NULL)
2938 goto error_return;
2940 amt = obj_raw_syment_count (input_bfd);
2941 amt *= sizeof (struct internal_syment);
2942 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2943 if (internal_syms == NULL)
2944 goto error_return;
2946 amt = obj_raw_syment_count (input_bfd);
2947 amt *= sizeof (asection *);
2948 sections = (asection **) bfd_malloc (amt);
2949 if (sections == NULL)
2950 goto error_return;
2952 isymp = internal_syms;
2953 secpp = sections;
2954 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2955 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2956 while (esym < esymend)
2958 bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2960 if (isymp->n_scnum != 0)
2961 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2962 else
2964 if (isymp->n_value == 0)
2965 *secpp = bfd_und_section_ptr;
2966 else
2967 *secpp = bfd_com_section_ptr;
2970 esym += (isymp->n_numaux + 1) * symesz;
2971 secpp += isymp->n_numaux + 1;
2972 isymp += isymp->n_numaux + 1;
2975 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2976 input_section, data, internal_relocs,
2977 internal_syms, sections))
2978 goto error_return;
2980 free (sections);
2981 sections = NULL;
2982 free (internal_syms);
2983 internal_syms = NULL;
2984 free (internal_relocs);
2985 internal_relocs = NULL;
2988 return data;
2990 error_return:
2991 if (internal_relocs != NULL)
2992 free (internal_relocs);
2993 if (internal_syms != NULL)
2994 free (internal_syms);
2995 if (sections != NULL)
2996 free (sections);
2997 return NULL;
3000 /* The target vectors. */
3002 #ifndef TARGET_SHL_SYM
3003 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3004 #endif
3006 #ifdef TARGET_SHL_SYM
3007 #define TARGET_SYM TARGET_SHL_SYM
3008 #else
3009 #define TARGET_SYM shlcoff_vec
3010 #endif
3012 #ifndef TARGET_SHL_NAME
3013 #define TARGET_SHL_NAME "coff-shl"
3014 #endif
3016 #ifdef COFF_WITH_PE
3017 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3018 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3019 #else
3020 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3021 0, '_', NULL, COFF_SWAP_TABLE)
3022 #endif
3024 #ifndef TARGET_SHL_SYM
3026 /* Some people want versions of the SH COFF target which do not align
3027 to 16 byte boundaries. We implement that by adding a couple of new
3028 target vectors. These are just like the ones above, but they
3029 change the default section alignment. To generate them in the
3030 assembler, use -small. To use them in the linker, use -b
3031 coff-sh{l}-small and -oformat coff-sh{l}-small.
3033 Yes, this is a horrible hack. A general solution for setting
3034 section alignment in COFF is rather complex. ELF handles this
3035 correctly. */
3037 /* Only recognize the small versions if the target was not defaulted.
3038 Otherwise we won't recognize the non default endianness. */
3040 static const bfd_target *
3041 coff_small_object_p (bfd *abfd)
3043 if (abfd->target_defaulted)
3045 bfd_set_error (bfd_error_wrong_format);
3046 return NULL;
3048 return coff_object_p (abfd);
3051 /* Set the section alignment for the small versions. */
3053 static bfd_boolean
3054 coff_small_new_section_hook (bfd *abfd, asection *section)
3056 if (! coff_new_section_hook (abfd, section))
3057 return FALSE;
3059 /* We must align to at least a four byte boundary, because longword
3060 accesses must be on a four byte boundary. */
3061 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3062 section->alignment_power = 2;
3064 return TRUE;
3067 /* This is copied from bfd_coff_std_swap_table so that we can change
3068 the default section alignment power. */
3070 static bfd_coff_backend_data bfd_coff_small_swap_table =
3072 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3073 coff_swap_aux_out, coff_swap_sym_out,
3074 coff_swap_lineno_out, coff_swap_reloc_out,
3075 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3076 coff_swap_scnhdr_out,
3077 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3078 #ifdef COFF_LONG_FILENAMES
3079 TRUE,
3080 #else
3081 FALSE,
3082 #endif
3083 COFF_DEFAULT_LONG_SECTION_NAMES,
3085 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3086 TRUE,
3087 #else
3088 FALSE,
3089 #endif
3090 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3092 #else
3094 #endif
3095 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3096 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3097 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3098 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3099 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3100 coff_classify_symbol, coff_compute_section_file_positions,
3101 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3102 coff_adjust_symndx, coff_link_add_one_symbol,
3103 coff_link_output_has_begun, coff_final_link_postscript,
3104 bfd_pe_print_pdata
3107 #define coff_small_close_and_cleanup \
3108 coff_close_and_cleanup
3109 #define coff_small_bfd_free_cached_info \
3110 coff_bfd_free_cached_info
3111 #define coff_small_get_section_contents \
3112 coff_get_section_contents
3113 #define coff_small_get_section_contents_in_window \
3114 coff_get_section_contents_in_window
3116 extern const bfd_target shlcoff_small_vec;
3118 const bfd_target shcoff_small_vec =
3120 "coff-sh-small", /* name */
3121 bfd_target_coff_flavour,
3122 BFD_ENDIAN_BIG, /* data byte order is big */
3123 BFD_ENDIAN_BIG, /* header byte order is big */
3125 (HAS_RELOC | EXEC_P | /* object flags */
3126 HAS_LINENO | HAS_DEBUG |
3127 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3129 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3130 '_', /* leading symbol underscore */
3131 '/', /* ar_pad_char */
3132 15, /* ar_max_namelen */
3133 0, /* match priority. */
3134 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3135 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3136 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3137 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3138 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3139 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3141 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3142 bfd_generic_archive_p, _bfd_dummy_target},
3143 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3144 bfd_false},
3145 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3146 _bfd_write_archive_contents, bfd_false},
3148 BFD_JUMP_TABLE_GENERIC (coff_small),
3149 BFD_JUMP_TABLE_COPY (coff),
3150 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3151 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3152 BFD_JUMP_TABLE_SYMBOLS (coff),
3153 BFD_JUMP_TABLE_RELOCS (coff),
3154 BFD_JUMP_TABLE_WRITE (coff),
3155 BFD_JUMP_TABLE_LINK (coff),
3156 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3158 & shlcoff_small_vec,
3160 & bfd_coff_small_swap_table
3163 const bfd_target shlcoff_small_vec =
3165 "coff-shl-small", /* name */
3166 bfd_target_coff_flavour,
3167 BFD_ENDIAN_LITTLE, /* data byte order is little */
3168 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3170 (HAS_RELOC | EXEC_P | /* object flags */
3171 HAS_LINENO | HAS_DEBUG |
3172 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3174 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3175 '_', /* leading symbol underscore */
3176 '/', /* ar_pad_char */
3177 15, /* ar_max_namelen */
3178 0, /* match priority. */
3179 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3180 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3181 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3182 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3183 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3184 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3186 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3187 bfd_generic_archive_p, _bfd_dummy_target},
3188 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3189 bfd_false},
3190 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3191 _bfd_write_archive_contents, bfd_false},
3193 BFD_JUMP_TABLE_GENERIC (coff_small),
3194 BFD_JUMP_TABLE_COPY (coff),
3195 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3196 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3197 BFD_JUMP_TABLE_SYMBOLS (coff),
3198 BFD_JUMP_TABLE_RELOCS (coff),
3199 BFD_JUMP_TABLE_WRITE (coff),
3200 BFD_JUMP_TABLE_LINK (coff),
3201 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3203 & shcoff_small_vec,
3205 & bfd_coff_small_swap_table
3207 #endif