1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2025 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "libiberty.h"
29 #include "elf-vxworks.h"
31 #include "elf32-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd
*output_bfd
,
70 struct bfd_link_info
*link_info
,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1
[] =
81 HOWTO (R_ARM_NONE
, /* type */
85 false, /* pc_relative */
87 complain_overflow_dont
,/* complain_on_overflow */
88 bfd_elf_generic_reloc
, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24
, /* type */
99 true, /* pc_relative */
101 complain_overflow_signed
,/* complain_on_overflow */
102 bfd_elf_generic_reloc
, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32
, /* type */
114 false, /* pc_relative */
116 complain_overflow_bitfield
,/* complain_on_overflow */
117 bfd_elf_generic_reloc
, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32
, /* type */
129 true, /* pc_relative */
131 complain_overflow_bitfield
,/* complain_on_overflow */
132 bfd_elf_generic_reloc
, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0
, /* type */
144 true, /* pc_relative */
146 complain_overflow_dont
,/* complain_on_overflow */
147 bfd_elf_generic_reloc
, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16
, /* type */
159 false, /* pc_relative */
161 complain_overflow_bitfield
,/* complain_on_overflow */
162 bfd_elf_generic_reloc
, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12
, /* type */
174 false, /* pc_relative */
176 complain_overflow_bitfield
,/* complain_on_overflow */
177 bfd_elf_generic_reloc
, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5
, /* type */
188 false, /* pc_relative */
190 complain_overflow_bitfield
,/* complain_on_overflow */
191 bfd_elf_generic_reloc
, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8
, /* type */
203 false, /* pc_relative */
205 complain_overflow_bitfield
,/* complain_on_overflow */
206 bfd_elf_generic_reloc
, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32
, /* type */
217 false, /* pc_relative */
219 complain_overflow_dont
,/* complain_on_overflow */
220 bfd_elf_generic_reloc
, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL
, /* type */
231 true, /* pc_relative */
233 complain_overflow_signed
,/* complain_on_overflow */
234 bfd_elf_generic_reloc
, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8
, /* type */
245 true, /* pc_relative */
247 complain_overflow_signed
,/* complain_on_overflow */
248 bfd_elf_generic_reloc
, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ
, /* type */
259 false, /* pc_relative */
261 complain_overflow_signed
,/* complain_on_overflow */
262 bfd_elf_generic_reloc
, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC
, /* type */
273 false, /* pc_relative */
275 complain_overflow_bitfield
,/* complain_on_overflow */
276 bfd_elf_generic_reloc
, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8
, /* type */
287 false, /* pc_relative */
289 complain_overflow_signed
,/* complain_on_overflow */
290 bfd_elf_generic_reloc
, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25
, /* type */
302 true, /* pc_relative */
304 complain_overflow_signed
,/* complain_on_overflow */
305 bfd_elf_generic_reloc
, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22
, /* type */
317 true, /* pc_relative */
319 complain_overflow_signed
,/* complain_on_overflow */
320 bfd_elf_generic_reloc
, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
333 false, /* pc_relative */
335 complain_overflow_bitfield
,/* complain_on_overflow */
336 bfd_elf_generic_reloc
, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
347 false, /* pc_relative */
349 complain_overflow_bitfield
,/* complain_on_overflow */
350 bfd_elf_generic_reloc
, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
361 false, /* pc_relative */
363 complain_overflow_bitfield
,/* complain_on_overflow */
364 bfd_elf_generic_reloc
, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY
, /* type */
377 false, /* pc_relative */
379 complain_overflow_bitfield
,/* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT
, /* type */
391 false, /* pc_relative */
393 complain_overflow_bitfield
,/* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT
, /* type */
405 false, /* pc_relative */
407 complain_overflow_bitfield
,/* complain_on_overflow */
408 bfd_elf_generic_reloc
, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE
, /* type */
419 false, /* pc_relative */
421 complain_overflow_bitfield
,/* complain_on_overflow */
422 bfd_elf_generic_reloc
, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32
, /* type */
433 false, /* pc_relative */
435 complain_overflow_bitfield
,/* complain_on_overflow */
436 bfd_elf_generic_reloc
, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC
, /* type */
447 true, /* pc_relative */
449 complain_overflow_bitfield
,/* complain_on_overflow */
450 bfd_elf_generic_reloc
, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32
, /* type */
461 false, /* pc_relative */
463 complain_overflow_bitfield
,/* complain_on_overflow */
464 bfd_elf_generic_reloc
, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32
, /* type */
475 true, /* pc_relative */
477 complain_overflow_bitfield
,/* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL
, /* type */
489 true, /* pc_relative */
491 complain_overflow_signed
,/* complain_on_overflow */
492 bfd_elf_generic_reloc
, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24
, /* type */
503 true, /* pc_relative */
505 complain_overflow_signed
,/* complain_on_overflow */
506 bfd_elf_generic_reloc
, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24
, /* type */
517 true, /* pc_relative */
519 complain_overflow_signed
,/* complain_on_overflow */
520 bfd_elf_generic_reloc
, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS
, /* type */
531 false, /* pc_relative */
533 complain_overflow_dont
,/* complain_on_overflow */
534 bfd_elf_generic_reloc
, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
545 true, /* pc_relative */
547 complain_overflow_dont
,/* complain_on_overflow */
548 bfd_elf_generic_reloc
, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
559 true, /* pc_relative */
561 complain_overflow_dont
,/* complain_on_overflow */
562 bfd_elf_generic_reloc
, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
573 true, /* pc_relative */
575 complain_overflow_dont
,/* complain_on_overflow */
576 bfd_elf_generic_reloc
, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
587 false, /* pc_relative */
589 complain_overflow_dont
,/* complain_on_overflow */
590 bfd_elf_generic_reloc
, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
601 false, /* pc_relative */
603 complain_overflow_dont
,/* complain_on_overflow */
604 bfd_elf_generic_reloc
, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
615 false, /* pc_relative */
617 complain_overflow_dont
,/* complain_on_overflow */
618 bfd_elf_generic_reloc
, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1
, /* type */
629 false, /* pc_relative */
631 complain_overflow_dont
,/* complain_on_overflow */
632 bfd_elf_generic_reloc
, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32
, /* type */
643 false, /* pc_relative */
645 complain_overflow_dont
,/* complain_on_overflow */
646 bfd_elf_generic_reloc
, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX
, /* type */
657 false, /* pc_relative */
659 complain_overflow_dont
,/* complain_on_overflow */
660 bfd_elf_generic_reloc
, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2
, /* type */
671 false, /* pc_relative */
673 complain_overflow_signed
,/* complain_on_overflow */
674 bfd_elf_generic_reloc
, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31
, /* type */
685 true, /* pc_relative */
687 complain_overflow_signed
,/* complain_on_overflow */
688 bfd_elf_generic_reloc
, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
699 false, /* pc_relative */
701 complain_overflow_dont
,/* complain_on_overflow */
702 bfd_elf_generic_reloc
, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS
, /* type */
713 false, /* pc_relative */
715 complain_overflow_bitfield
,/* complain_on_overflow */
716 bfd_elf_generic_reloc
, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
727 true, /* pc_relative */
729 complain_overflow_dont
,/* complain_on_overflow */
730 bfd_elf_generic_reloc
, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL
, /* type */
741 true, /* pc_relative */
743 complain_overflow_bitfield
,/* complain_on_overflow */
744 bfd_elf_generic_reloc
, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
755 false, /* pc_relative */
757 complain_overflow_dont
,/* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
769 false, /* pc_relative */
771 complain_overflow_bitfield
,/* complain_on_overflow */
772 bfd_elf_generic_reloc
, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
783 true, /* pc_relative */
785 complain_overflow_dont
,/* complain_on_overflow */
786 bfd_elf_generic_reloc
, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
797 true, /* pc_relative */
799 complain_overflow_bitfield
,/* complain_on_overflow */
800 bfd_elf_generic_reloc
, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19
, /* type */
811 true, /* pc_relative */
813 complain_overflow_signed
,/* complain_on_overflow */
814 bfd_elf_generic_reloc
, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6
, /* type */
825 true, /* pc_relative */
827 complain_overflow_unsigned
,/* complain_on_overflow */
828 bfd_elf_generic_reloc
, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
842 true, /* pc_relative */
844 complain_overflow_dont
,/* complain_on_overflow */
845 bfd_elf_generic_reloc
, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12
, /* type */
856 true, /* pc_relative */
858 complain_overflow_dont
,/* complain_on_overflow */
859 bfd_elf_generic_reloc
, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI
, /* type */
870 false, /* pc_relative */
872 complain_overflow_dont
,/* complain_on_overflow */
873 bfd_elf_generic_reloc
, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI
, /* type */
884 true, /* pc_relative */
886 complain_overflow_dont
,/* complain_on_overflow */
887 bfd_elf_generic_reloc
, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
900 true, /* pc_relative */
902 complain_overflow_dont
,/* complain_on_overflow */
903 bfd_elf_generic_reloc
, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0
, /* type */
914 true, /* pc_relative */
916 complain_overflow_dont
,/* complain_on_overflow */
917 bfd_elf_generic_reloc
, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
928 true, /* pc_relative */
930 complain_overflow_dont
,/* complain_on_overflow */
931 bfd_elf_generic_reloc
, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1
, /* type */
942 true, /* pc_relative */
944 complain_overflow_dont
,/* complain_on_overflow */
945 bfd_elf_generic_reloc
, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2
, /* type */
956 true, /* pc_relative */
958 complain_overflow_dont
,/* complain_on_overflow */
959 bfd_elf_generic_reloc
, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1
, /* type */
970 true, /* pc_relative */
972 complain_overflow_dont
,/* complain_on_overflow */
973 bfd_elf_generic_reloc
, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2
, /* type */
984 true, /* pc_relative */
986 complain_overflow_dont
,/* complain_on_overflow */
987 bfd_elf_generic_reloc
, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
998 true, /* pc_relative */
1000 complain_overflow_dont
,/* complain_on_overflow */
1001 bfd_elf_generic_reloc
, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1012 true, /* pc_relative */
1014 complain_overflow_dont
,/* complain_on_overflow */
1015 bfd_elf_generic_reloc
, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1026 true, /* pc_relative */
1028 complain_overflow_dont
,/* complain_on_overflow */
1029 bfd_elf_generic_reloc
, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1040 true, /* pc_relative */
1042 complain_overflow_dont
,/* complain_on_overflow */
1043 bfd_elf_generic_reloc
, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1054 true, /* pc_relative */
1056 complain_overflow_dont
,/* complain_on_overflow */
1057 bfd_elf_generic_reloc
, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1068 true, /* pc_relative */
1070 complain_overflow_dont
,/* complain_on_overflow */
1071 bfd_elf_generic_reloc
, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1082 true, /* pc_relative */
1084 complain_overflow_dont
,/* complain_on_overflow */
1085 bfd_elf_generic_reloc
, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1096 true, /* pc_relative */
1098 complain_overflow_dont
,/* complain_on_overflow */
1099 bfd_elf_generic_reloc
, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1110 true, /* pc_relative */
1112 complain_overflow_dont
,/* complain_on_overflow */
1113 bfd_elf_generic_reloc
, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1124 true, /* pc_relative */
1126 complain_overflow_dont
,/* complain_on_overflow */
1127 bfd_elf_generic_reloc
, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1138 true, /* pc_relative */
1140 complain_overflow_dont
,/* complain_on_overflow */
1141 bfd_elf_generic_reloc
, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1152 true, /* pc_relative */
1154 complain_overflow_dont
,/* complain_on_overflow */
1155 bfd_elf_generic_reloc
, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1166 true, /* pc_relative */
1168 complain_overflow_dont
,/* complain_on_overflow */
1169 bfd_elf_generic_reloc
, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1180 true, /* pc_relative */
1182 complain_overflow_dont
,/* complain_on_overflow */
1183 bfd_elf_generic_reloc
, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1194 true, /* pc_relative */
1196 complain_overflow_dont
,/* complain_on_overflow */
1197 bfd_elf_generic_reloc
, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1208 true, /* pc_relative */
1210 complain_overflow_dont
,/* complain_on_overflow */
1211 bfd_elf_generic_reloc
, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1222 true, /* pc_relative */
1224 complain_overflow_dont
,/* complain_on_overflow */
1225 bfd_elf_generic_reloc
, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1236 true, /* pc_relative */
1238 complain_overflow_dont
,/* complain_on_overflow */
1239 bfd_elf_generic_reloc
, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1250 true, /* pc_relative */
1252 complain_overflow_dont
,/* complain_on_overflow */
1253 bfd_elf_generic_reloc
, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1264 true, /* pc_relative */
1266 complain_overflow_dont
,/* complain_on_overflow */
1267 bfd_elf_generic_reloc
, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1280 false, /* pc_relative */
1282 complain_overflow_dont
,/* complain_on_overflow */
1283 bfd_elf_generic_reloc
, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL
, /* type */
1294 false, /* pc_relative */
1296 complain_overflow_bitfield
,/* complain_on_overflow */
1297 bfd_elf_generic_reloc
, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL
, /* type */
1308 false, /* pc_relative */
1310 complain_overflow_dont
,/* complain_on_overflow */
1311 bfd_elf_generic_reloc
, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1322 false, /* pc_relative */
1324 complain_overflow_dont
,/* complain_on_overflow */
1325 bfd_elf_generic_reloc
, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1336 false, /* pc_relative */
1338 complain_overflow_bitfield
,/* complain_on_overflow */
1339 bfd_elf_generic_reloc
, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1350 false, /* pc_relative */
1352 complain_overflow_dont
,/* complain_on_overflow */
1353 bfd_elf_generic_reloc
, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1364 false, /* pc_relative */
1366 complain_overflow_bitfield
,/* complain_on_overflow */
1367 NULL
, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL
, /* type */
1378 false, /* pc_relative */
1380 complain_overflow_dont
,/* complain_on_overflow */
1381 bfd_elf_generic_reloc
, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1392 false, /* pc_relative */
1394 complain_overflow_dont
,/* complain_on_overflow */
1395 bfd_elf_generic_reloc
, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1406 false, /* pc_relative */
1408 complain_overflow_dont
,/* complain_on_overflow */
1409 bfd_elf_generic_reloc
, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS
, /* type */
1420 false, /* pc_relative */
1422 complain_overflow_dont
,/* complain_on_overflow */
1423 bfd_elf_generic_reloc
, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS
, /* type */
1434 false, /* pc_relative */
1436 complain_overflow_dont
,/* complain_on_overflow */
1437 bfd_elf_generic_reloc
, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL
, /* type */
1448 true, /* pc_relative */
1450 complain_overflow_dont
, /* complain_on_overflow */
1451 bfd_elf_generic_reloc
, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12
, /* type */
1462 false, /* pc_relative */
1464 complain_overflow_bitfield
,/* complain_on_overflow */
1465 bfd_elf_generic_reloc
, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12
, /* type */
1476 false, /* pc_relative */
1478 complain_overflow_bitfield
,/* complain_on_overflow */
1479 bfd_elf_generic_reloc
, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1493 false, /* pc_relative */
1495 complain_overflow_dont
, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1508 false, /* pc_relative */
1510 complain_overflow_dont
, /* complain_on_overflow */
1511 NULL
, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11
, /* type */
1522 true, /* pc_relative */
1524 complain_overflow_signed
, /* complain_on_overflow */
1525 bfd_elf_generic_reloc
, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8
, /* type */
1536 true, /* pc_relative */
1538 complain_overflow_signed
, /* complain_on_overflow */
1539 bfd_elf_generic_reloc
, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32
, /* type */
1551 false, /* pc_relative */
1553 complain_overflow_bitfield
,/* complain_on_overflow */
1554 NULL
, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32
, /* type */
1565 false, /* pc_relative */
1567 complain_overflow_bitfield
,/* complain_on_overflow */
1568 bfd_elf_generic_reloc
, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32
, /* type */
1579 false, /* pc_relative */
1581 complain_overflow_bitfield
,/* complain_on_overflow */
1582 bfd_elf_generic_reloc
, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32
, /* type */
1593 false, /* pc_relative */
1595 complain_overflow_bitfield
,/* complain_on_overflow */
1596 NULL
, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32
, /* type */
1607 false, /* pc_relative */
1609 complain_overflow_bitfield
,/* complain_on_overflow */
1610 NULL
, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12
, /* type */
1621 false, /* pc_relative */
1623 complain_overflow_bitfield
,/* complain_on_overflow */
1624 bfd_elf_generic_reloc
, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12
, /* type */
1635 false, /* pc_relative */
1637 complain_overflow_bitfield
,/* complain_on_overflow */
1638 bfd_elf_generic_reloc
, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1649 false, /* pc_relative */
1651 complain_overflow_bitfield
,/* complain_on_overflow */
1652 bfd_elf_generic_reloc
, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1684 false, /* pc_relative */
1686 complain_overflow_dont
,/* complain_on_overflow */
1687 bfd_elf_generic_reloc
, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1696 0, /* rightshift. */
1699 false, /* pc_relative. */
1701 complain_overflow_bitfield
,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc
, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1709 0, /* rightshift. */
1712 false, /* pc_relative. */
1714 complain_overflow_bitfield
,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc
, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1722 0, /* rightshift. */
1725 false, /* pc_relative. */
1727 complain_overflow_bitfield
,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc
, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1735 0, /* rightshift. */
1738 false, /* pc_relative. */
1740 complain_overflow_bitfield
,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc
, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16
, /* type. */
1749 0, /* rightshift. */
1752 true, /* pc_relative. */
1754 complain_overflow_dont
,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc
, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12
, /* type. */
1762 0, /* rightshift. */
1765 true, /* pc_relative. */
1767 complain_overflow_dont
,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc
, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18
, /* type. */
1775 0, /* rightshift. */
1778 true, /* pc_relative. */
1780 complain_overflow_dont
,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc
, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1790 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1792 HOWTO (R_ARM_IRELATIVE
, /* type */
1796 false, /* pc_relative */
1798 complain_overflow_bitfield
,/* complain_on_overflow */
1799 bfd_elf_generic_reloc
, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1809 false, /* pc_relative */
1811 complain_overflow_bitfield
,/* complain_on_overflow */
1812 bfd_elf_generic_reloc
, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1822 false, /* pc_relative */
1824 complain_overflow_bitfield
,/* complain_on_overflow */
1825 bfd_elf_generic_reloc
, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC
, /* type */
1835 false, /* pc_relative */
1837 complain_overflow_bitfield
,/* complain_on_overflow */
1838 bfd_elf_generic_reloc
, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1848 false, /* pc_relative */
1850 complain_overflow_bitfield
,/* complain_on_overflow */
1851 bfd_elf_generic_reloc
, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1861 false, /* pc_relative */
1863 complain_overflow_bitfield
,/* complain_on_overflow */
1864 bfd_elf_generic_reloc
, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1874 false, /* pc_relative */
1876 complain_overflow_bitfield
,/* complain_on_overflow */
1877 bfd_elf_generic_reloc
, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1887 false, /* pc_relative */
1889 complain_overflow_bitfield
,/* complain_on_overflow */
1890 bfd_elf_generic_reloc
, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1901 HOWTO (R_ARM_RREL32
, /* type */
1905 false, /* pc_relative */
1907 complain_overflow_dont
,/* complain_on_overflow */
1908 bfd_elf_generic_reloc
, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32
, /* type */
1919 false, /* pc_relative */
1921 complain_overflow_dont
,/* complain_on_overflow */
1922 bfd_elf_generic_reloc
, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24
, /* type */
1933 false, /* pc_relative */
1935 complain_overflow_dont
,/* complain_on_overflow */
1936 bfd_elf_generic_reloc
, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE
, /* type */
1947 false, /* pc_relative */
1949 complain_overflow_dont
,/* complain_on_overflow */
1950 bfd_elf_generic_reloc
, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type
*
1959 elf32_arm_howto_from_type (unsigned int r_type
)
1961 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1962 return &elf32_arm_howto_table_1
[r_type
];
1964 if (r_type
>= R_ARM_IRELATIVE
1965 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1966 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1968 if (r_type
>= R_ARM_RREL32
1969 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1970 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1976 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1977 Elf_Internal_Rela
* elf_reloc
)
1979 unsigned int r_type
;
1981 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1982 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1987 bfd_set_error (bfd_error_bad_value
);
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val
;
1996 unsigned char elf_reloc_val
;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
2002 {BFD_RELOC_NONE
, R_ARM_NONE
},
2003 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
2004 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
2005 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
2006 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
2007 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
2008 {BFD_RELOC_32
, R_ARM_ABS32
},
2009 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
2010 {BFD_RELOC_8
, R_ARM_ABS8
},
2011 {BFD_RELOC_16
, R_ARM_ABS16
},
2012 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
2013 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
2020 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
2021 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
2022 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
2023 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
2024 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
2025 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
2026 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
2027 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2028 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
2029 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
2030 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
2031 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
2032 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
2033 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2034 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
2035 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
2036 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
2039 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
2040 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2041 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2042 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2045 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2046 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2047 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2048 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2049 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2051 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2056 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2057 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2058 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2059 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2060 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2061 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2062 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2063 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2067 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2069 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2070 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2071 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2072 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2073 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2074 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2075 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2076 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2077 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2078 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2079 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2081 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2083 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2084 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2085 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2086 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2087 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2088 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2089 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2090 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2091 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2092 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2093 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2094 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
},
2099 {BFD_RELOC_ARM_THUMB_BF17
, R_ARM_THM_BF16
},
2100 {BFD_RELOC_ARM_THUMB_BF13
, R_ARM_THM_BF12
},
2101 {BFD_RELOC_ARM_THUMB_BF19
, R_ARM_THM_BF18
}
2104 static reloc_howto_type
*
2105 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2106 bfd_reloc_code_real_type code
)
2110 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2111 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2117 static reloc_howto_type
*
2118 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2123 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2124 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2125 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2126 return &elf32_arm_howto_table_1
[i
];
2128 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2129 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2130 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2131 return &elf32_arm_howto_table_2
[i
];
2133 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2134 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2135 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2136 return &elf32_arm_howto_table_3
[i
];
2141 /* Support for core dump NOTE sections. */
2144 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2149 switch (note
->descsz
)
2154 case 148: /* Linux/ARM 32-bit. */
2156 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2159 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2170 size
, note
->descpos
+ offset
);
2174 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2176 switch (note
->descsz
)
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd
)->core
->pid
2183 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2184 elf_tdata (abfd
)->core
->program
2185 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2186 elf_tdata (abfd
)->core
->command
2187 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command
= elf_tdata (abfd
)->core
->command
;
2195 int n
= strlen (command
);
2197 if (0 < n
&& command
[n
- 1] == ' ')
2198 command
[n
- 1] = '\0';
2205 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2215 char data
[124] ATTRIBUTE_NONSTRING
;
2218 va_start (ap
, note_type
);
2219 memset (data
, 0, sizeof (data
));
2220 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2229 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2235 return elfcore_write_note (abfd
, buf
, bufsiz
,
2236 "CORE", note_type
, data
, sizeof (data
));
2247 va_start (ap
, note_type
);
2248 memset (data
, 0, sizeof (data
));
2249 pid
= va_arg (ap
, long);
2250 bfd_put_32 (abfd
, pid
, data
+ 24);
2251 cursig
= va_arg (ap
, int);
2252 bfd_put_16 (abfd
, cursig
, data
+ 12);
2253 greg
= va_arg (ap
, const void *);
2254 memcpy (data
+ 72, greg
, 72);
2257 return elfcore_write_note (abfd
, buf
, bufsiz
,
2258 "CORE", note_type
, data
, sizeof (data
));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32
;
2273 typedef unsigned short int insn16
;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline
[] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2377 static const bfd_vma elf32_arm_plt0_entry
[] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2387 static const bfd_vma elf32_arm_plt_entry
[] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2401 static const bfd_vma elf32_arm_plt0_entry
[] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short
[] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long
[] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry
= false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2449 static const bfd_vma elf32_thumb2_plt_entry
[] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2504 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2540 There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
2541 THM2_MAX_FWD_BRANCH_OFFSET. The first macro concerns the case when Thumb-2
2542 is not available, and second macro when Thumb-2 is available. Among other
2543 things, they affect the range of branches represented as BLX instructions
2544 in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
2545 Reference Manual ARMv7-A and ARMv7-R edition issue C.d. Such branches are
2546 specified there to have a maximum forward offset that is a multiple of 4.
2547 Previously, the respective values defined here were multiples of 2 but not
2548 4 and they are included in comments for reference. */
2549 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2550 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2551 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 4 + 4)
2552 /* #def THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 2 + 4) */
2553 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2554 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
2555 /* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
2556 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2557 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2558 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2568 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2569 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2570 is inserted in arm_build_one_stub(). */
2571 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2572 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2573 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2574 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2575 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2576 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2577 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2578 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2583 enum stub_insn_type type
;
2584 unsigned int r_type
;
2588 /* See note [Thumb nop sequence] when adding a veneer. */
2590 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2591 to reach the stub if necessary. */
2592 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2594 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2595 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2598 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2600 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2602 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2603 ARM_INSN (0xe12fff1c), /* bx ip */
2604 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2610 THUMB16_INSN (0xb401), /* push {r0} */
2611 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2612 THUMB16_INSN (0x4684), /* mov ip, r0 */
2613 THUMB16_INSN (0xbc01), /* pop {r0} */
2614 THUMB16_INSN (0x4760), /* bx ip */
2615 THUMB16_INSN (0xbf00), /* nop */
2616 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2619 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2620 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2622 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2623 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2626 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2627 M-profile architectures. */
2628 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2630 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2631 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2632 THUMB16_INSN (0x4760), /* bx ip */
2633 THUMB16_INSN (0xbf00), /* nop */
2634 /* The nop is added to ensure alignment of following stubs in the section. */
2637 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2639 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2641 THUMB16_INSN (0x4778), /* bx pc */
2642 THUMB16_INSN (0xe7fd), /* b .-2 */
2643 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2644 ARM_INSN (0xe12fff1c), /* bx ip */
2645 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2648 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2650 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2652 THUMB16_INSN (0x4778), /* bx pc */
2653 THUMB16_INSN (0xe7fd), /* b .-2 */
2654 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2655 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2658 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2659 one, when the destination is close enough. */
2660 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2662 THUMB16_INSN (0x4778), /* bx pc */
2663 THUMB16_INSN (0xe7fd), /* b .-2 */
2664 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2667 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2668 blx to reach the stub if necessary. */
2669 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2671 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2672 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2673 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2676 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2677 blx to reach the stub if necessary. We can not add into pc;
2678 it is not guaranteed to mode switch (different in ARMv6 and
2680 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2682 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2683 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2684 ARM_INSN (0xe12fff1c), /* bx ip */
2685 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2688 /* V4T ARM -> ARM long branch stub, PIC. */
2689 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2691 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2692 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2693 ARM_INSN (0xe12fff1c), /* bx ip */
2694 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2697 /* V4T Thumb -> ARM long branch stub, PIC. */
2698 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2700 THUMB16_INSN (0x4778), /* bx pc */
2701 THUMB16_INSN (0xe7fd), /* b .-2 */
2702 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2703 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2704 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2707 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2709 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2711 THUMB16_INSN (0xb401), /* push {r0} */
2712 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2713 THUMB16_INSN (0x46fc), /* mov ip, pc */
2714 THUMB16_INSN (0x4484), /* add ip, r0 */
2715 THUMB16_INSN (0xbc01), /* pop {r0} */
2716 THUMB16_INSN (0x4760), /* bx ip */
2717 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2720 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2722 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2724 THUMB16_INSN (0x4778), /* bx pc */
2725 THUMB16_INSN (0xe7fd), /* b .-2 */
2726 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2727 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2728 ARM_INSN (0xe12fff1c), /* bx ip */
2729 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2732 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2733 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2734 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2736 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2737 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2738 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2741 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2742 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2743 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2745 THUMB16_INSN (0x4778), /* bx pc */
2746 THUMB16_INSN (0xe7fd), /* b .-2 */
2747 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2748 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2749 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2752 /* NaCl ARM -> ARM long branch stub. */
2753 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2755 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe320f000), /* nop */
2759 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2760 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2761 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2762 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2765 /* NaCl ARM -> ARM long branch stub, PIC. */
2766 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2768 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2769 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2770 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2771 ARM_INSN (0xe12fff1c), /* bx ip */
2772 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2773 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2774 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2775 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2778 /* Stub used for transition to secure state (aka SG veneer). */
2779 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2781 THUMB32_INSN (0xe97fe97f), /* sg. */
2782 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2786 /* Cortex-A8 erratum-workaround stubs. */
2788 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2789 can't use a conditional branch to reach this stub). */
2791 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2793 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2794 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2795 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2798 /* Stub used for b.w and bl.w instructions. */
2800 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2802 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2805 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2807 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2810 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2811 instruction (which switches to ARM mode) to point to this stub. Jump to the
2812 real destination using an ARM-mode branch. */
2814 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2816 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2819 /* For each section group there can be a specially created linker section
2820 to hold the stubs for that group. The name of the stub section is based
2821 upon the name of another section within that group with the suffix below
2824 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2825 create what appeared to be a linker stub section when it actually
2826 contained user code/data. For example, consider this fragment:
2828 const char * stubborn_problems[] = { "np" };
2830 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2833 .data.rel.local.stubborn_problems
2835 This then causes problems in arm32_arm_build_stubs() as it triggers:
2837 // Ignore non-stub sections.
2838 if (!strstr (stub_sec->name, STUB_SUFFIX))
2841 And so the section would be ignored instead of being processed. Hence
2842 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2844 #define STUB_SUFFIX ".__stub"
2846 /* One entry per long/short branch stub defined above. */
2848 DEF_STUB (long_branch_any_any) \
2849 DEF_STUB (long_branch_v4t_arm_thumb) \
2850 DEF_STUB (long_branch_thumb_only) \
2851 DEF_STUB (long_branch_v4t_thumb_thumb) \
2852 DEF_STUB (long_branch_v4t_thumb_arm) \
2853 DEF_STUB (short_branch_v4t_thumb_arm) \
2854 DEF_STUB (long_branch_any_arm_pic) \
2855 DEF_STUB (long_branch_any_thumb_pic) \
2856 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2857 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2858 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2859 DEF_STUB (long_branch_thumb_only_pic) \
2860 DEF_STUB (long_branch_any_tls_pic) \
2861 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2862 DEF_STUB (long_branch_arm_nacl) \
2863 DEF_STUB (long_branch_arm_nacl_pic) \
2864 DEF_STUB (cmse_branch_thumb_only) \
2865 DEF_STUB (a8_veneer_b_cond) \
2866 DEF_STUB (a8_veneer_b) \
2867 DEF_STUB (a8_veneer_bl) \
2868 DEF_STUB (a8_veneer_blx) \
2869 DEF_STUB (long_branch_thumb2_only) \
2870 DEF_STUB (long_branch_thumb2_only_pure)
2872 #define DEF_STUB(x) arm_stub_##x,
2873 enum elf32_arm_stub_type
2881 /* Note the first a8_veneer type. */
2882 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2886 const insn_sequence
* template_sequence
;
2890 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2891 static const stub_def stub_definitions
[] =
2897 struct elf32_arm_stub_hash_entry
2899 /* Base hash table entry structure. */
2900 struct bfd_hash_entry root
;
2902 /* The stub section. */
2905 /* Offset within stub_sec of the beginning of this stub. */
2906 bfd_vma stub_offset
;
2908 /* Given the symbol's value and its section we can determine its final
2909 value when building the stubs (so the stub knows where to jump). */
2910 bfd_vma target_value
;
2911 asection
*target_section
;
2913 /* Same as above but for the source of the branch to the stub. Used for
2914 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2915 such, source section does not need to be recorded since Cortex-A8 erratum
2916 workaround stubs are only generated when both source and target are in the
2918 bfd_vma source_value
;
2920 /* The instruction which caused this stub to be generated (only valid for
2921 Cortex-A8 erratum workaround stubs at present). */
2922 unsigned long orig_insn
;
2924 /* The stub type. */
2925 enum elf32_arm_stub_type stub_type
;
2926 /* Its encoding size in bytes. */
2929 const insn_sequence
*stub_template
;
2930 /* The size of the template (number of entries). */
2931 int stub_template_size
;
2933 /* The symbol table entry, if any, that this was derived from. */
2934 struct elf32_arm_link_hash_entry
*h
;
2936 /* Type of branch. */
2937 enum arm_st_branch_type branch_type
;
2939 /* Where this stub is being called from, or, in the case of combined
2940 stub sections, the first input section in the group. */
2943 /* The name for the local symbol at the start of this stub. The
2944 stub name in the hash table has to be unique; this does not, so
2945 it can be friendlier. */
2949 /* Used to build a map of a section. This is required for mixed-endian
2952 typedef struct elf32_elf_section_map
2957 elf32_arm_section_map
;
2959 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2963 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2964 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2965 VFP11_ERRATUM_ARM_VENEER
,
2966 VFP11_ERRATUM_THUMB_VENEER
2968 elf32_vfp11_erratum_type
;
2970 typedef struct elf32_vfp11_erratum_list
2972 struct elf32_vfp11_erratum_list
*next
;
2978 struct elf32_vfp11_erratum_list
*veneer
;
2979 unsigned int vfp_insn
;
2983 struct elf32_vfp11_erratum_list
*branch
;
2987 elf32_vfp11_erratum_type type
;
2989 elf32_vfp11_erratum_list
;
2991 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2995 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2996 STM32L4XX_ERRATUM_VENEER
2998 elf32_stm32l4xx_erratum_type
;
3000 typedef struct elf32_stm32l4xx_erratum_list
3002 struct elf32_stm32l4xx_erratum_list
*next
;
3008 struct elf32_stm32l4xx_erratum_list
*veneer
;
3013 struct elf32_stm32l4xx_erratum_list
*branch
;
3017 elf32_stm32l4xx_erratum_type type
;
3019 elf32_stm32l4xx_erratum_list
;
3024 INSERT_EXIDX_CANTUNWIND_AT_END
3026 arm_unwind_edit_type
;
3028 /* A (sorted) list of edits to apply to an unwind table. */
3029 typedef struct arm_unwind_table_edit
3031 arm_unwind_edit_type type
;
3032 /* Note: we sometimes want to insert an unwind entry corresponding to a
3033 section different from the one we're currently writing out, so record the
3034 (text) section this edit relates to here. */
3035 asection
*linked_section
;
3037 struct arm_unwind_table_edit
*next
;
3039 arm_unwind_table_edit
;
3041 typedef struct _arm_elf_section_data
3043 /* Information about mapping symbols. */
3044 struct bfd_elf_section_data elf
;
3045 unsigned int mapcount
;
3046 unsigned int mapsize
;
3047 elf32_arm_section_map
*map
;
3048 /* Information about CPU errata. */
3049 unsigned int erratumcount
;
3050 elf32_vfp11_erratum_list
*erratumlist
;
3051 unsigned int stm32l4xx_erratumcount
;
3052 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
3053 unsigned int additional_reloc_count
;
3054 /* Information about unwind tables. */
3057 /* Unwind info attached to a text section. */
3060 asection
*arm_exidx_sec
;
3063 /* Unwind info attached to an .ARM.exidx section. */
3066 arm_unwind_table_edit
*unwind_edit_list
;
3067 arm_unwind_table_edit
*unwind_edit_tail
;
3071 _arm_elf_section_data
;
3073 #define elf32_arm_section_data(sec) \
3074 ((_arm_elf_section_data *) elf_section_data (sec))
3076 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3077 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3078 so may be created multiple times: we use an array of these entries whilst
3079 relaxing which we can refresh easily, then create stubs for each potentially
3080 erratum-triggering instruction once we've settled on a solution. */
3082 struct a8_erratum_fix
3087 bfd_vma target_offset
;
3088 unsigned long orig_insn
;
3090 enum elf32_arm_stub_type stub_type
;
3091 enum arm_st_branch_type branch_type
;
3094 /* A table of relocs applied to branches which might trigger Cortex-A8
3097 struct a8_erratum_reloc
3100 bfd_vma destination
;
3101 struct elf32_arm_link_hash_entry
*hash
;
3102 const char *sym_name
;
3103 unsigned int r_type
;
3104 enum arm_st_branch_type branch_type
;
3108 /* The size of the thread control block. */
3111 /* ARM-specific information about a PLT entry, over and above the usual
3115 /* We reference count Thumb references to a PLT entry separately,
3116 so that we can emit the Thumb trampoline only if needed. */
3117 bfd_signed_vma thumb_refcount
;
3119 /* Some references from Thumb code may be eliminated by BL->BLX
3120 conversion, so record them separately. */
3121 bfd_signed_vma maybe_thumb_refcount
;
3123 /* How many of the recorded PLT accesses were from non-call relocations.
3124 This information is useful when deciding whether anything takes the
3125 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3126 non-call references to the function should resolve directly to the
3127 real runtime target. */
3128 unsigned int noncall_refcount
;
3130 /* Since PLT entries have variable size if the Thumb prologue is
3131 used, we need to record the index into .got.plt instead of
3132 recomputing it from the PLT offset. */
3133 bfd_signed_vma got_offset
;
3136 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3137 struct arm_local_iplt_info
3139 /* The information that is usually found in the generic ELF part of
3140 the hash table entry. */
3141 union gotplt_union root
;
3143 /* The information that is usually found in the ARM-specific part of
3144 the hash table entry. */
3145 struct arm_plt_info arm
;
3147 /* A list of all potential dynamic relocations against this symbol. */
3148 struct elf_dyn_relocs
*dyn_relocs
;
3151 /* Structure to handle FDPIC support for local functions. */
3154 unsigned int funcdesc_cnt
;
3155 unsigned int gotofffuncdesc_cnt
;
3156 int funcdesc_offset
;
3159 struct elf_arm_obj_tdata
3161 struct elf_obj_tdata root
;
3163 /* Zero to warn when linking objects with incompatible enum sizes. */
3164 int no_enum_size_warning
;
3166 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3167 int no_wchar_size_warning
;
3169 /* The number of entries in each of the arrays in this strcuture.
3170 Used to avoid buffer overruns. */
3171 bfd_size_type num_entries
;
3173 /* tls_type for each local got entry. */
3174 char *local_got_tls_type
;
3176 /* GOTPLT entries for TLS descriptors. */
3177 bfd_vma
*local_tlsdesc_gotent
;
3179 /* Information for local symbols that need entries in .iplt. */
3180 struct arm_local_iplt_info
**local_iplt
;
3182 /* Maintains FDPIC counters and funcdesc info. */
3183 struct fdpic_local
*local_fdpic_cnts
;
3186 #define elf_arm_tdata(bfd) \
3187 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3189 #define elf32_arm_num_entries(bfd) \
3190 (elf_arm_tdata (bfd)->num_entries)
3192 #define elf32_arm_local_got_tls_type(bfd) \
3193 (elf_arm_tdata (bfd)->local_got_tls_type)
3195 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3196 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3198 #define elf32_arm_local_iplt(bfd) \
3199 (elf_arm_tdata (bfd)->local_iplt)
3201 #define elf32_arm_local_fdpic_cnts(bfd) \
3202 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3204 #define is_arm_elf(bfd) \
3205 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3206 && elf_tdata (bfd) != NULL \
3207 && elf_object_id (bfd) == ARM_ELF_DATA)
3210 elf32_arm_mkobject (bfd
*abfd
)
3212 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
));
3215 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3217 /* Structure to handle FDPIC support for extern functions. */
3218 struct fdpic_global
{
3219 unsigned int gotofffuncdesc_cnt
;
3220 unsigned int gotfuncdesc_cnt
;
3221 unsigned int funcdesc_cnt
;
3222 int funcdesc_offset
;
3223 int gotfuncdesc_offset
;
3226 /* Arm ELF linker hash entry. */
3227 struct elf32_arm_link_hash_entry
3229 struct elf_link_hash_entry root
;
3231 /* ARM-specific PLT information. */
3232 struct arm_plt_info plt
;
3234 #define GOT_UNKNOWN 0
3235 #define GOT_NORMAL 1
3236 #define GOT_TLS_GD 2
3237 #define GOT_TLS_IE 4
3238 #define GOT_TLS_GDESC 8
3239 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3240 unsigned int tls_type
: 8;
3242 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3243 unsigned int is_iplt
: 1;
3245 unsigned int unused
: 23;
3247 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3248 starting at the end of the jump table. */
3249 bfd_vma tlsdesc_got
;
3251 /* The symbol marking the real symbol location for exported thumb
3252 symbols with Arm stubs. */
3253 struct elf_link_hash_entry
*export_glue
;
3255 /* A pointer to the most recently used stub hash entry against this
3257 struct elf32_arm_stub_hash_entry
*stub_cache
;
3259 /* Counter for FDPIC relocations against this symbol. */
3260 struct fdpic_global fdpic_cnts
;
3263 /* Traverse an arm ELF linker hash table. */
3264 #define elf32_arm_link_hash_traverse(table, func, info) \
3265 (elf_link_hash_traverse \
3267 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3270 /* Get the ARM elf linker hash table from a link_info structure. */
3271 #define elf32_arm_hash_table(p) \
3272 ((is_elf_hash_table ((p)->hash) \
3273 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3274 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3276 #define arm_stub_hash_lookup(table, string, create, copy) \
3277 ((struct elf32_arm_stub_hash_entry *) \
3278 bfd_hash_lookup ((table), (string), (create), (copy)))
3280 /* Array to keep track of which stub sections have been created, and
3281 information on stub grouping. */
3284 /* This is the section to which stubs in the group will be
3287 /* The stub section. */
3291 #define elf32_arm_compute_jump_table_size(htab) \
3292 ((htab)->next_tls_desc_index * 4)
3294 /* ARM ELF linker hash table. */
3295 struct elf32_arm_link_hash_table
3297 /* The main hash table. */
3298 struct elf_link_hash_table root
;
3300 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3301 bfd_size_type thumb_glue_size
;
3303 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3304 bfd_size_type arm_glue_size
;
3306 /* The size in bytes of section containing the ARMv4 BX veneers. */
3307 bfd_size_type bx_glue_size
;
3309 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3310 veneer has been populated. */
3311 bfd_vma bx_glue_offset
[15];
3313 /* The size in bytes of the section containing glue for VFP11 erratum
3315 bfd_size_type vfp11_erratum_glue_size
;
3317 /* The size in bytes of the section containing glue for STM32L4XX erratum
3319 bfd_size_type stm32l4xx_erratum_glue_size
;
3321 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3322 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3323 elf32_arm_write_section(). */
3324 struct a8_erratum_fix
*a8_erratum_fixes
;
3325 unsigned int num_a8_erratum_fixes
;
3327 /* An arbitrary input BFD chosen to hold the glue sections. */
3328 bfd
* bfd_of_glue_owner
;
3330 /* Nonzero to output a BE8 image. */
3333 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3334 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3337 /* The relocation to use for R_ARM_TARGET2 relocations. */
3340 /* 0 = Ignore R_ARM_V4BX.
3341 1 = Convert BX to MOV PC.
3342 2 = Generate v4 interworing stubs. */
3345 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3348 /* Whether we should fix the ARM1176 BLX immediate issue. */
3351 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3354 /* What sort of code sequences we should look for which may trigger the
3355 VFP11 denorm erratum. */
3356 bfd_arm_vfp11_fix vfp11_fix
;
3358 /* Global counter for the number of fixes we have emitted. */
3359 int num_vfp11_fixes
;
3361 /* What sort of code sequences we should look for which may trigger the
3362 STM32L4XX erratum. */
3363 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3365 /* Global counter for the number of fixes we have emitted. */
3366 int num_stm32l4xx_fixes
;
3368 /* Nonzero to force PIC branch veneers. */
3371 /* The number of bytes in the initial entry in the PLT. */
3372 bfd_size_type plt_header_size
;
3374 /* The number of bytes in the subsequent PLT etries. */
3375 bfd_size_type plt_entry_size
;
3377 /* True if the target uses REL relocations. */
3380 /* Nonzero if import library must be a secure gateway import library
3381 as per ARMv8-M Security Extensions. */
3384 /* The import library whose symbols' address must remain stable in
3385 the import library generated. */
3388 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3389 bfd_vma next_tls_desc_index
;
3391 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3392 bfd_vma num_tls_desc
;
3394 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3397 /* Offset in .plt section of tls_arm_trampoline. */
3398 bfd_vma tls_trampoline
;
3400 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3403 bfd_signed_vma refcount
;
3407 /* For convenience in allocate_dynrelocs. */
3410 /* The amount of space used by the reserved portion of the sgotplt
3411 section, plus whatever space is used by the jump slots. */
3412 bfd_vma sgotplt_jump_table_size
;
3414 /* The stub hash table. */
3415 struct bfd_hash_table stub_hash_table
;
3417 /* Linker stub bfd. */
3420 /* Linker call-backs. */
3421 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3423 void (*layout_sections_again
) (void);
3425 /* Array to keep track of which stub sections have been created, and
3426 information on stub grouping. */
3427 struct map_stub
*stub_group
;
3429 /* Input stub section holding secure gateway veneers. */
3430 asection
*cmse_stub_sec
;
3432 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3433 start to be allocated. */
3434 bfd_vma new_cmse_stub_offset
;
3436 /* Number of elements in stub_group. */
3437 unsigned int top_id
;
3439 /* Assorted information used by elf32_arm_size_stubs. */
3440 unsigned int bfd_count
;
3441 unsigned int top_index
;
3442 asection
**input_list
;
3444 /* True if the target system uses FDPIC. */
3447 /* Fixup section. Used for FDPIC. */
3451 /* Add an FDPIC read-only fixup. */
3453 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3455 bfd_vma fixup_offset
;
3457 fixup_offset
= srofixup
->reloc_count
++ * 4;
3458 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3459 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3463 ctz (unsigned int mask
)
3465 #if GCC_VERSION >= 3004
3466 return __builtin_ctz (mask
);
3470 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3481 elf32_arm_popcount (unsigned int mask
)
3483 #if GCC_VERSION >= 3004
3484 return __builtin_popcount (mask
);
3489 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3499 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3500 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3503 arm_elf_fill_funcdesc (bfd
*output_bfd
,
3504 struct bfd_link_info
*info
,
3505 int *funcdesc_offset
,
3509 bfd_vma dynreloc_value
,
3512 if ((*funcdesc_offset
& 1) == 0)
3514 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3515 asection
*sgot
= globals
->root
.sgot
;
3517 if (bfd_link_pic (info
))
3519 asection
*srelgot
= globals
->root
.srelgot
;
3520 Elf_Internal_Rela outrel
;
3522 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3523 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3524 outrel
.r_addend
= 0;
3526 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3527 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3528 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3532 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3533 bfd_vma got_value
= hgot
->root
.u
.def
.value
3534 + hgot
->root
.u
.def
.section
->output_section
->vma
3535 + hgot
->root
.u
.def
.section
->output_offset
;
3537 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3538 sgot
->output_section
->vma
+ sgot
->output_offset
3540 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3541 sgot
->output_section
->vma
+ sgot
->output_offset
3543 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3544 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3546 *funcdesc_offset
|= 1;
3550 /* Create an entry in an ARM ELF linker hash table. */
3552 static struct bfd_hash_entry
*
3553 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3554 struct bfd_hash_table
* table
,
3555 const char * string
)
3557 struct elf32_arm_link_hash_entry
* ret
=
3558 (struct elf32_arm_link_hash_entry
*) entry
;
3560 /* Allocate the structure if it has not already been allocated by a
3563 ret
= (struct elf32_arm_link_hash_entry
*)
3564 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3566 return (struct bfd_hash_entry
*) ret
;
3568 /* Call the allocation method of the superclass. */
3569 ret
= ((struct elf32_arm_link_hash_entry
*)
3570 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3574 ret
->tls_type
= GOT_UNKNOWN
;
3575 ret
->tlsdesc_got
= (bfd_vma
) -1;
3576 ret
->plt
.thumb_refcount
= 0;
3577 ret
->plt
.maybe_thumb_refcount
= 0;
3578 ret
->plt
.noncall_refcount
= 0;
3579 ret
->plt
.got_offset
= -1;
3580 ret
->is_iplt
= false;
3581 ret
->export_glue
= NULL
;
3583 ret
->stub_cache
= NULL
;
3585 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3586 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3587 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3588 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3589 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3592 return (struct bfd_hash_entry
*) ret
;
3595 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3599 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3601 if (elf_local_got_refcounts (abfd
) == NULL
)
3603 bfd_size_type num_syms
;
3605 elf32_arm_num_entries (abfd
) = 0;
3607 /* Whilst it might be tempting to allocate a single block of memory and
3608 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3609 structure, this interferes with the work of memory checkers looking
3610 for buffer overruns. So allocate each array individually. */
3612 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3614 elf_local_got_refcounts (abfd
) = bfd_zalloc
3615 (abfd
, num_syms
* sizeof (* elf_local_got_refcounts (abfd
)));
3617 if (elf_local_got_refcounts (abfd
) == NULL
)
3620 elf32_arm_local_tlsdesc_gotent (abfd
) = bfd_zalloc
3621 (abfd
, num_syms
* sizeof (* elf32_arm_local_tlsdesc_gotent (abfd
)));
3623 if (elf32_arm_local_tlsdesc_gotent (abfd
) == NULL
)
3626 elf32_arm_local_iplt (abfd
) = bfd_zalloc
3627 (abfd
, num_syms
* sizeof (* elf32_arm_local_iplt (abfd
)));
3629 if (elf32_arm_local_iplt (abfd
) == NULL
)
3632 elf32_arm_local_fdpic_cnts (abfd
) = bfd_zalloc
3633 (abfd
, num_syms
* sizeof (* elf32_arm_local_fdpic_cnts (abfd
)));
3635 if (elf32_arm_local_fdpic_cnts (abfd
) == NULL
)
3638 elf32_arm_local_got_tls_type (abfd
) = bfd_zalloc
3639 (abfd
, num_syms
* sizeof (* elf32_arm_local_got_tls_type (abfd
)));
3641 if (elf32_arm_local_got_tls_type (abfd
) == NULL
)
3644 elf32_arm_num_entries (abfd
) = num_syms
;
3646 #if GCC_VERSION >= 3000
3647 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
))
3648 <= __alignof__ (*elf_local_got_refcounts (abfd
)));
3649 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd
))
3650 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
)));
3651 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd
))
3652 <= __alignof__ (*elf32_arm_local_iplt (abfd
)));
3653 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd
))
3654 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd
)));
3660 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3661 to input bfd ABFD. Create the information if it doesn't already exist.
3662 Return null if an allocation fails. */
3664 static struct arm_local_iplt_info
*
3665 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3667 struct arm_local_iplt_info
**ptr
;
3669 if (!elf32_arm_allocate_local_sym_info (abfd
))
3672 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3673 BFD_ASSERT (r_symndx
< elf32_arm_num_entries (abfd
));
3674 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3676 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3680 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3681 in ABFD's symbol table. If the symbol is global, H points to its
3682 hash table entry, otherwise H is null.
3684 Return true if the symbol does have PLT information. When returning
3685 true, point *ROOT_PLT at the target-independent reference count/offset
3686 union and *ARM_PLT at the ARM-specific information. */
3689 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3690 struct elf32_arm_link_hash_entry
*h
,
3691 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3692 struct arm_plt_info
**arm_plt
)
3694 struct arm_local_iplt_info
*local_iplt
;
3696 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3701 *root_plt
= &h
->root
.plt
;
3706 if (elf32_arm_local_iplt (abfd
) == NULL
)
3709 if (r_symndx
>= elf32_arm_num_entries (abfd
))
3712 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3713 if (local_iplt
== NULL
)
3716 *root_plt
= &local_iplt
->root
;
3717 *arm_plt
= &local_iplt
->arm
;
3721 static bool using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3723 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3727 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3728 struct arm_plt_info
*arm_plt
)
3730 struct elf32_arm_link_hash_table
*htab
;
3732 htab
= elf32_arm_hash_table (info
);
3734 return (!using_thumb_only (htab
) && (arm_plt
->thumb_refcount
!= 0
3735 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3738 /* Return a pointer to the head of the dynamic reloc list that should
3739 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3740 ABFD's symbol table. Return null if an error occurs. */
3742 static struct elf_dyn_relocs
**
3743 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3744 Elf_Internal_Sym
*isym
)
3746 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3748 struct arm_local_iplt_info
*local_iplt
;
3750 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3751 if (local_iplt
== NULL
)
3753 return &local_iplt
->dyn_relocs
;
3757 /* Track dynamic relocs needed for local syms too.
3758 We really need local syms available to do this
3763 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3767 vpp
= &elf_section_data (s
)->local_dynrel
;
3768 return (struct elf_dyn_relocs
**) vpp
;
3772 /* Initialize an entry in the stub hash table. */
3774 static struct bfd_hash_entry
*
3775 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3776 struct bfd_hash_table
*table
,
3779 /* Allocate the structure if it has not already been allocated by a
3783 entry
= (struct bfd_hash_entry
*)
3784 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3789 /* Call the allocation method of the superclass. */
3790 entry
= bfd_hash_newfunc (entry
, table
, string
);
3793 struct elf32_arm_stub_hash_entry
*eh
;
3795 /* Initialize the local fields. */
3796 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3797 eh
->stub_sec
= NULL
;
3798 eh
->stub_offset
= (bfd_vma
) -1;
3799 eh
->source_value
= 0;
3800 eh
->target_value
= 0;
3801 eh
->target_section
= NULL
;
3803 eh
->stub_type
= arm_stub_none
;
3805 eh
->stub_template
= NULL
;
3806 eh
->stub_template_size
= -1;
3809 eh
->output_name
= NULL
;
3815 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3816 shortcuts to them in our hash table. */
3819 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3821 struct elf32_arm_link_hash_table
*htab
;
3823 htab
= elf32_arm_hash_table (info
);
3827 if (! _bfd_elf_create_got_section (dynobj
, info
))
3830 /* Also create .rofixup. */
3833 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3834 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3835 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3836 if (htab
->srofixup
== NULL
3837 || !bfd_set_section_alignment (htab
->srofixup
, 2))
3844 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3847 create_ifunc_sections (struct bfd_link_info
*info
)
3849 struct elf32_arm_link_hash_table
*htab
;
3850 const struct elf_backend_data
*bed
;
3855 htab
= elf32_arm_hash_table (info
);
3856 dynobj
= htab
->root
.dynobj
;
3857 bed
= get_elf_backend_data (dynobj
);
3858 flags
= bed
->dynamic_sec_flags
;
3860 if (htab
->root
.iplt
== NULL
)
3862 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3863 flags
| SEC_READONLY
| SEC_CODE
);
3865 || !bfd_set_section_alignment (s
, bed
->plt_alignment
))
3867 htab
->root
.iplt
= s
;
3870 if (htab
->root
.irelplt
== NULL
)
3872 s
= bfd_make_section_anyway_with_flags (dynobj
,
3873 RELOC_SECTION (htab
, ".iplt"),
3874 flags
| SEC_READONLY
);
3876 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3878 htab
->root
.irelplt
= s
;
3881 if (htab
->root
.igotplt
== NULL
)
3883 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3885 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3887 htab
->root
.igotplt
= s
;
3892 /* Determine if we're dealing with a Thumb only architecture. */
3895 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3898 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3899 Tag_CPU_arch_profile
);
3902 return profile
== 'M';
3904 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3906 /* Force return logic to be reviewed for each new architecture. */
3907 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3909 if (arch
== TAG_CPU_ARCH_V6_M
3910 || arch
== TAG_CPU_ARCH_V6S_M
3911 || arch
== TAG_CPU_ARCH_V7E_M
3912 || arch
== TAG_CPU_ARCH_V8M_BASE
3913 || arch
== TAG_CPU_ARCH_V8M_MAIN
3914 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
)
3920 /* Determine if we're dealing with a Thumb-2 object. */
3923 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3926 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3929 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3931 return thumb_isa
== 2;
3933 /* Variant of thumb is described by the architecture tag. */
3934 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3936 /* Force return logic to be reviewed for each new architecture. */
3937 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3939 return (arch
== TAG_CPU_ARCH_V6T2
3940 || arch
== TAG_CPU_ARCH_V7
3941 || arch
== TAG_CPU_ARCH_V7E_M
3942 || arch
== TAG_CPU_ARCH_V8
3943 || arch
== TAG_CPU_ARCH_V8R
3944 || arch
== TAG_CPU_ARCH_V8M_MAIN
3945 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
);
3948 /* Determine whether Thumb-2 BL instruction is available. */
3951 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3954 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3956 /* Force return logic to be reviewed for each new architecture. */
3957 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
3959 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3960 return (arch
== TAG_CPU_ARCH_V6T2
3961 || arch
>= TAG_CPU_ARCH_V7
);
3964 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3965 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3969 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3971 struct elf32_arm_link_hash_table
*htab
;
3973 htab
= elf32_arm_hash_table (info
);
3977 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3980 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3983 if (htab
->root
.target_os
== is_vxworks
)
3985 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3988 if (bfd_link_pic (info
))
3990 htab
->plt_header_size
= 0;
3991 htab
->plt_entry_size
3992 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3996 htab
->plt_header_size
3997 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3998 htab
->plt_entry_size
3999 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
4002 if (elf_elfheader (dynobj
))
4003 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
4008 Test for thumb only architectures. Note - we cannot just call
4009 using_thumb_only() as the attributes in the output bfd have not been
4010 initialised at this point, so instead we use the input bfd. */
4011 bfd
* saved_obfd
= htab
->obfd
;
4013 htab
->obfd
= dynobj
;
4014 if (using_thumb_only (htab
))
4016 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
4017 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
4019 htab
->obfd
= saved_obfd
;
4022 if (htab
->fdpic_p
) {
4023 htab
->plt_header_size
= 0;
4024 if (info
->flags
& DF_BIND_NOW
)
4025 htab
->plt_entry_size
= 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry
) - 5);
4027 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
);
4030 if (!htab
->root
.splt
4031 || !htab
->root
.srelplt
4032 || !htab
->root
.sdynbss
4033 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
4039 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4042 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
4043 struct elf_link_hash_entry
*dir
,
4044 struct elf_link_hash_entry
*ind
)
4046 struct elf32_arm_link_hash_entry
*edir
, *eind
;
4048 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
4049 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
4051 if (ind
->root
.type
== bfd_link_hash_indirect
)
4053 /* Copy over PLT info. */
4054 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4055 eind
->plt
.thumb_refcount
= 0;
4056 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4057 eind
->plt
.maybe_thumb_refcount
= 0;
4058 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4059 eind
->plt
.noncall_refcount
= 0;
4061 /* Copy FDPIC counters. */
4062 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4063 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4064 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4066 /* We should only allocate a function to .iplt once the final
4067 symbol information is known. */
4068 BFD_ASSERT (!eind
->is_iplt
);
4070 if (dir
->got
.refcount
<= 0)
4072 edir
->tls_type
= eind
->tls_type
;
4073 eind
->tls_type
= GOT_UNKNOWN
;
4077 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4080 /* Destroy an ARM elf linker hash table. */
4083 elf32_arm_link_hash_table_free (bfd
*obfd
)
4085 struct elf32_arm_link_hash_table
*ret
4086 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4088 bfd_hash_table_free (&ret
->stub_hash_table
);
4089 _bfd_elf_link_hash_table_free (obfd
);
4092 /* Create an ARM elf linker hash table. */
4094 static struct bfd_link_hash_table
*
4095 elf32_arm_link_hash_table_create (bfd
*abfd
)
4097 struct elf32_arm_link_hash_table
*ret
;
4098 size_t amt
= sizeof (struct elf32_arm_link_hash_table
);
4100 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4104 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4105 elf32_arm_link_hash_newfunc
,
4106 sizeof (struct elf32_arm_link_hash_entry
)))
4112 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4113 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4114 #ifdef FOUR_WORD_PLT
4115 ret
->plt_header_size
= 16;
4116 ret
->plt_entry_size
= 16;
4118 ret
->plt_header_size
= 20;
4119 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4121 ret
->use_rel
= true;
4125 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4126 sizeof (struct elf32_arm_stub_hash_entry
)))
4128 _bfd_elf_link_hash_table_free (abfd
);
4131 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4133 return &ret
->root
.root
;
4136 /* Determine what kind of NOPs are available. */
4139 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4141 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4144 /* Force return logic to be reviewed for each new architecture. */
4145 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
4147 return (arch
== TAG_CPU_ARCH_V6T2
4148 || arch
== TAG_CPU_ARCH_V6K
4149 || arch
== TAG_CPU_ARCH_V7
4150 || arch
== TAG_CPU_ARCH_V8
4151 || arch
== TAG_CPU_ARCH_V8R
4152 || arch
== TAG_CPU_ARCH_V9
);
4156 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4160 case arm_stub_long_branch_thumb_only
:
4161 case arm_stub_long_branch_thumb2_only
:
4162 case arm_stub_long_branch_thumb2_only_pure
:
4163 case arm_stub_long_branch_v4t_thumb_arm
:
4164 case arm_stub_short_branch_v4t_thumb_arm
:
4165 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4166 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4167 case arm_stub_long_branch_thumb_only_pic
:
4168 case arm_stub_cmse_branch_thumb_only
:
4179 /* Determine the type of stub needed, if any, for a call. */
4181 static enum elf32_arm_stub_type
4182 arm_type_of_stub (struct bfd_link_info
*info
,
4183 asection
*input_sec
,
4184 const Elf_Internal_Rela
*rel
,
4185 unsigned char st_type
,
4186 enum arm_st_branch_type
*actual_branch_type
,
4187 struct elf32_arm_link_hash_entry
*hash
,
4188 bfd_vma destination
,
4194 bfd_signed_vma branch_offset
;
4195 unsigned int r_type
;
4196 struct elf32_arm_link_hash_table
* globals
;
4197 bool thumb2
, thumb2_bl
, thumb_only
;
4198 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4200 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4201 union gotplt_union
*root_plt
;
4202 struct arm_plt_info
*arm_plt
;
4206 if (branch_type
== ST_BRANCH_LONG
)
4209 globals
= elf32_arm_hash_table (info
);
4210 if (globals
== NULL
)
4213 thumb_only
= using_thumb_only (globals
);
4214 thumb2
= using_thumb2 (globals
);
4215 thumb2_bl
= using_thumb2_bl (globals
);
4217 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4219 /* True for architectures that implement the thumb2 movw instruction. */
4220 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4222 /* Determine where the call point is. */
4223 location
= (input_sec
->output_offset
4224 + input_sec
->output_section
->vma
4227 r_type
= ELF32_R_TYPE (rel
->r_info
);
4229 /* Don't pretend we know what stub to use (if any) when we target a
4230 Thumb-only target and we don't know the actual destination
4232 if (branch_type
== ST_BRANCH_UNKNOWN
&& thumb_only
)
4235 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4236 are considering a function call relocation. */
4237 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4238 || r_type
== R_ARM_THM_JUMP19
)
4239 && branch_type
== ST_BRANCH_TO_ARM
)
4241 if (sym_sec
== bfd_abs_section_ptr
)
4242 /* As an exception, assume that absolute symbols are of the
4243 right kind (Thumb). They are presumably defined in the
4244 linker script, where it is not possible to declare them as
4245 Thumb (and thus are seen as Arm mode). We'll inform the
4246 user with a warning, though, in
4247 elf32_arm_final_link_relocate. */
4248 branch_type
= ST_BRANCH_TO_THUMB
;
4250 /* Otherwise do not silently build a stub, and let the users
4251 know they have to fix their code. Indeed, we could decide
4252 to insert a stub involving Arm code and/or BLX, leading to
4253 a run-time crash. */
4257 /* For TLS call relocs, it is the caller's responsibility to provide
4258 the address of the appropriate trampoline. */
4259 if (r_type
!= R_ARM_TLS_CALL
4260 && r_type
!= R_ARM_THM_TLS_CALL
4261 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4262 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4264 && root_plt
->offset
!= (bfd_vma
) -1)
4268 if (hash
== NULL
|| hash
->is_iplt
)
4269 splt
= globals
->root
.iplt
;
4271 splt
= globals
->root
.splt
;
4276 /* Note when dealing with PLT entries: the main PLT stub is in
4277 ARM mode, so if the branch is in Thumb mode, another
4278 Thumb->ARM stub will be inserted later just before the ARM
4279 PLT stub. If a long branch stub is needed, we'll add a
4280 Thumb->Arm one and branch directly to the ARM PLT entry.
4281 Here, we have to check if a pre-PLT Thumb->ARM stub
4282 is needed and if it will be close enough. */
4284 destination
= (splt
->output_section
->vma
4285 + splt
->output_offset
4286 + root_plt
->offset
);
4289 /* Thumb branch/call to PLT: it can become a branch to ARM
4290 or to Thumb. We must perform the same checks and
4291 corrections as in elf32_arm_final_link_relocate. */
4292 if ((r_type
== R_ARM_THM_CALL
)
4293 || (r_type
== R_ARM_THM_JUMP24
))
4295 if (globals
->use_blx
4296 && r_type
== R_ARM_THM_CALL
4299 /* If the Thumb BLX instruction is available, convert
4300 the BL to a BLX instruction to call the ARM-mode
4302 branch_type
= ST_BRANCH_TO_ARM
;
4307 /* Target the Thumb stub before the ARM PLT entry. */
4308 destination
-= PLT_THUMB_STUB_SIZE
;
4309 branch_type
= ST_BRANCH_TO_THUMB
;
4314 branch_type
= ST_BRANCH_TO_ARM
;
4318 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4319 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4321 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4323 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4324 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4326 /* Handle cases where:
4327 - this call goes too far (different Thumb/Thumb2 max
4329 - it's a Thumb->Arm call and blx is not available, or it's a
4330 Thumb->Arm branch (not bl). A stub is needed in this case,
4331 but only if this call is not through a PLT entry. Indeed,
4332 PLT stubs handle mode switching already. */
4334 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4335 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4337 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4338 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4340 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4341 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4342 && (r_type
== R_ARM_THM_JUMP19
))
4343 || (branch_type
== ST_BRANCH_TO_ARM
4344 && (((r_type
== R_ARM_THM_CALL
4345 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4346 || (r_type
== R_ARM_THM_JUMP24
)
4347 || (r_type
== R_ARM_THM_JUMP19
))
4350 /* If we need to insert a Thumb-Thumb long branch stub to a
4351 PLT, use one that branches directly to the ARM PLT
4352 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4353 stub, undo this now. */
4354 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4356 branch_type
= ST_BRANCH_TO_ARM
;
4357 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4360 if (branch_type
== ST_BRANCH_TO_THUMB
)
4362 /* Thumb to thumb. */
4365 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4367 (_("%pB(%pA): warning: long branch veneers used in"
4368 " section with SHF_ARM_PURECODE section"
4369 " attribute is only supported for M-profile"
4370 " targets that implement the movw instruction"),
4371 input_bfd
, input_sec
);
4373 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4375 ? ((globals
->use_blx
4376 && (r_type
== R_ARM_THM_CALL
))
4377 /* V5T and above. Stub starts with ARM code, so
4378 we must be able to switch mode before
4379 reaching it, which is only possible for 'bl'
4380 (ie R_ARM_THM_CALL relocation). */
4381 ? arm_stub_long_branch_any_thumb_pic
4382 /* On V4T, use Thumb code only. */
4383 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4385 /* non-PIC stubs. */
4386 : ((globals
->use_blx
4387 && (r_type
== R_ARM_THM_CALL
))
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4391 : arm_stub_long_branch_v4t_thumb_thumb
);
4395 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4396 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4399 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4401 (_("%pB(%pA): warning: long branch veneers used in"
4402 " section with SHF_ARM_PURECODE section"
4403 " attribute is only supported for M-profile"
4404 " targets that implement the movw instruction"),
4405 input_bfd
, input_sec
);
4407 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4409 ? arm_stub_long_branch_thumb_only_pic
4411 : (thumb2
? arm_stub_long_branch_thumb2_only
4412 : arm_stub_long_branch_thumb_only
);
4418 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4420 (_("%pB(%pA): warning: long branch veneers used in"
4421 " section with SHF_ARM_PURECODE section"
4422 " attribute is only supported" " for M-profile"
4423 " targets that implement the movw instruction"),
4424 input_bfd
, input_sec
);
4428 && sym_sec
->owner
!= NULL
4429 && !INTERWORK_FLAG (sym_sec
->owner
))
4432 (_("%pB(%s): warning: interworking not enabled;"
4433 " first occurrence: %pB: %s call to %s"),
4434 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4438 (bfd_link_pic (info
) | globals
->pic_veneer
)
4440 ? (r_type
== R_ARM_THM_TLS_CALL
4441 /* TLS PIC stubs. */
4442 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4443 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4444 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4445 /* V5T PIC and above. */
4446 ? arm_stub_long_branch_any_arm_pic
4448 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4450 /* non-PIC stubs. */
4451 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4452 /* V5T and above. */
4453 ? arm_stub_long_branch_any_any
4455 : arm_stub_long_branch_v4t_thumb_arm
);
4457 /* Handle v4t short branches. */
4458 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4459 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4460 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4461 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4465 else if (r_type
== R_ARM_CALL
4466 || r_type
== R_ARM_JUMP24
4467 || r_type
== R_ARM_PLT32
4468 || r_type
== R_ARM_TLS_CALL
)
4470 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4472 (_("%pB(%pA): warning: long branch veneers used in"
4473 " section with SHF_ARM_PURECODE section"
4474 " attribute is only supported for M-profile"
4475 " targets that implement the movw instruction"),
4476 input_bfd
, input_sec
);
4477 if (branch_type
== ST_BRANCH_TO_THUMB
)
4482 && sym_sec
->owner
!= NULL
4483 && !INTERWORK_FLAG (sym_sec
->owner
))
4486 (_("%pB(%s): warning: interworking not enabled;"
4487 " first occurrence: %pB: %s call to %s"),
4488 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4491 /* We have an extra 2-bytes reach because of
4492 the mode change (bit 24 (H) of BLX encoding). */
4493 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4494 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4495 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4496 || (r_type
== R_ARM_JUMP24
)
4497 || (r_type
== R_ARM_PLT32
))
4499 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4501 ? ((globals
->use_blx
)
4502 /* V5T and above. */
4503 ? arm_stub_long_branch_any_thumb_pic
4505 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4507 /* non-PIC stubs. */
4508 : ((globals
->use_blx
)
4509 /* V5T and above. */
4510 ? arm_stub_long_branch_any_any
4512 : arm_stub_long_branch_v4t_arm_thumb
);
4518 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4519 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4522 (bfd_link_pic (info
) | globals
->pic_veneer
)
4524 ? (r_type
== R_ARM_TLS_CALL
4526 ? arm_stub_long_branch_any_tls_pic
4527 : (globals
->root
.target_os
== is_nacl
4528 ? arm_stub_long_branch_arm_nacl_pic
4529 : arm_stub_long_branch_any_arm_pic
))
4530 /* non-PIC stubs. */
4531 : (globals
->root
.target_os
== is_nacl
4532 ? arm_stub_long_branch_arm_nacl
4533 : arm_stub_long_branch_any_any
);
4538 /* If a stub is needed, record the actual destination type. */
4539 if (stub_type
!= arm_stub_none
)
4540 *actual_branch_type
= branch_type
;
4545 /* Build a name for an entry in the stub hash table. */
4548 elf32_arm_stub_name (const asection
*input_section
,
4549 const asection
*sym_sec
,
4550 const struct elf32_arm_link_hash_entry
*hash
,
4551 const Elf_Internal_Rela
*rel
,
4552 enum elf32_arm_stub_type stub_type
)
4559 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4560 stub_name
= (char *) bfd_malloc (len
);
4561 if (stub_name
!= NULL
)
4562 sprintf (stub_name
, "%08x_%s+%x_%d",
4563 input_section
->id
& 0xffffffff,
4564 hash
->root
.root
.root
.string
,
4565 (int) rel
->r_addend
& 0xffffffff,
4570 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4571 stub_name
= (char *) bfd_malloc (len
);
4572 if (stub_name
!= NULL
)
4573 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4574 input_section
->id
& 0xffffffff,
4575 sym_sec
->id
& 0xffffffff,
4576 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4577 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4578 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4579 (int) rel
->r_addend
& 0xffffffff,
4586 /* Look up an entry in the stub hash. Stub entries are cached because
4587 creating the stub name takes a bit of time. */
4589 static struct elf32_arm_stub_hash_entry
*
4590 elf32_arm_get_stub_entry (const asection
*input_section
,
4591 const asection
*sym_sec
,
4592 struct elf_link_hash_entry
*hash
,
4593 const Elf_Internal_Rela
*rel
,
4594 struct elf32_arm_link_hash_table
*htab
,
4595 enum elf32_arm_stub_type stub_type
)
4597 struct elf32_arm_stub_hash_entry
*stub_entry
;
4598 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4599 const asection
*id_sec
;
4601 if ((input_section
->flags
& SEC_CODE
) == 0)
4604 /* If the input section is the CMSE stubs one and it needs a long
4605 branch stub to reach it's final destination, give up with an
4606 error message: this is not supported. See PR ld/24709. */
4607 if (!strncmp (input_section
->name
, CMSE_STUB_NAME
, strlen (CMSE_STUB_NAME
)))
4609 bfd
*output_bfd
= htab
->obfd
;
4610 asection
*out_sec
= bfd_get_section_by_name (output_bfd
, CMSE_STUB_NAME
);
4612 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4613 "(%#" PRIx64
") from destination (%#" PRIx64
")"),
4615 (uint64_t)out_sec
->output_section
->vma
4616 + out_sec
->output_offset
,
4617 (uint64_t)sym_sec
->output_section
->vma
4618 + sym_sec
->output_offset
4619 + h
->root
.root
.u
.def
.value
);
4620 /* Exit, rather than leave incompletely processed
4625 /* If this input section is part of a group of sections sharing one
4626 stub section, then use the id of the first section in the group.
4627 Stub names need to include a section id, as there may well be
4628 more than one stub used to reach say, printf, and we need to
4629 distinguish between them. */
4630 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4631 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4633 if (h
!= NULL
&& h
->stub_cache
!= NULL
4634 && h
->stub_cache
->h
== h
4635 && h
->stub_cache
->id_sec
== id_sec
4636 && h
->stub_cache
->stub_type
== stub_type
)
4638 stub_entry
= h
->stub_cache
;
4644 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4645 if (stub_name
== NULL
)
4648 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4649 stub_name
, false, false);
4651 h
->stub_cache
= stub_entry
;
4659 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4663 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4665 if (stub_type
>= max_stub_type
)
4666 abort (); /* Should be unreachable. */
4670 case arm_stub_cmse_branch_thumb_only
:
4677 abort (); /* Should be unreachable. */
4680 /* Required alignment (as a power of 2) for the dedicated section holding
4681 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4682 with input sections. */
4685 arm_dedicated_stub_output_section_required_alignment
4686 (enum elf32_arm_stub_type stub_type
)
4688 if (stub_type
>= max_stub_type
)
4689 abort (); /* Should be unreachable. */
4693 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4695 case arm_stub_cmse_branch_thumb_only
:
4699 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4703 abort (); /* Should be unreachable. */
4706 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4707 NULL if veneers of this type are interspersed with input sections. */
4710 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4712 if (stub_type
>= max_stub_type
)
4713 abort (); /* Should be unreachable. */
4717 case arm_stub_cmse_branch_thumb_only
:
4718 return CMSE_STUB_NAME
;
4721 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4725 abort (); /* Should be unreachable. */
4728 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4729 returns the address of the hash table field in HTAB holding a pointer to the
4730 corresponding input section. Otherwise, returns NULL. */
4733 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4734 enum elf32_arm_stub_type stub_type
)
4736 if (stub_type
>= max_stub_type
)
4737 abort (); /* Should be unreachable. */
4741 case arm_stub_cmse_branch_thumb_only
:
4742 return &htab
->cmse_stub_sec
;
4745 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4749 abort (); /* Should be unreachable. */
4752 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4753 is the section that branch into veneer and can be NULL if stub should go in
4754 a dedicated output section. Returns a pointer to the stub section, and the
4755 section to which the stub section will be attached (in *LINK_SEC_P).
4756 LINK_SEC_P may be NULL. */
4759 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4760 struct elf32_arm_link_hash_table
*htab
,
4761 enum elf32_arm_stub_type stub_type
)
4763 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4764 const char *stub_sec_prefix
;
4765 bool dedicated_output_section
=
4766 arm_dedicated_stub_output_section_required (stub_type
);
4769 if (dedicated_output_section
)
4771 bfd
*output_bfd
= htab
->obfd
;
4772 const char *out_sec_name
=
4773 arm_dedicated_stub_output_section_name (stub_type
);
4775 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4776 stub_sec_prefix
= out_sec_name
;
4777 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4778 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4779 if (out_sec
== NULL
)
4781 _bfd_error_handler (_("no address assigned to the veneers output "
4782 "section %s"), out_sec_name
);
4788 BFD_ASSERT (section
->id
<= htab
->top_id
);
4789 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4790 BFD_ASSERT (link_sec
!= NULL
);
4791 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4792 if (*stub_sec_p
== NULL
)
4793 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4794 stub_sec_prefix
= link_sec
->name
;
4795 out_sec
= link_sec
->output_section
;
4796 align
= htab
->root
.target_os
== is_nacl
? 4 : 3;
4799 if (*stub_sec_p
== NULL
)
4805 namelen
= strlen (stub_sec_prefix
);
4806 len
= namelen
+ sizeof (STUB_SUFFIX
);
4807 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4811 memcpy (s_name
, stub_sec_prefix
, namelen
);
4812 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4813 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4815 if (*stub_sec_p
== NULL
)
4818 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4819 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4823 if (!dedicated_output_section
)
4824 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4827 *link_sec_p
= link_sec
;
4832 /* Add a new stub entry to the stub hash. Not all fields of the new
4833 stub entry are initialised. */
4835 static struct elf32_arm_stub_hash_entry
*
4836 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4837 struct elf32_arm_link_hash_table
*htab
,
4838 enum elf32_arm_stub_type stub_type
)
4842 struct elf32_arm_stub_hash_entry
*stub_entry
;
4844 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4846 if (stub_sec
== NULL
)
4849 /* Enter this entry into the linker stub hash table. */
4850 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4852 if (stub_entry
== NULL
)
4854 if (section
== NULL
)
4856 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4857 section
->owner
, stub_name
);
4861 stub_entry
->stub_sec
= stub_sec
;
4862 stub_entry
->stub_offset
= (bfd_vma
) -1;
4863 stub_entry
->id_sec
= link_sec
;
4868 /* Store an Arm insn into an output section not processed by
4869 elf32_arm_write_section. */
4872 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4873 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4875 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4876 bfd_putl32 (val
, ptr
);
4878 bfd_putb32 (val
, ptr
);
4881 /* Store a 16-bit Thumb insn into an output section not processed by
4882 elf32_arm_write_section. */
4885 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4886 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4888 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4889 bfd_putl16 (val
, ptr
);
4891 bfd_putb16 (val
, ptr
);
4894 /* Store a Thumb2 insn into an output section not processed by
4895 elf32_arm_write_section. */
4898 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4899 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4901 /* T2 instructions are 16-bit streamed. */
4902 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4904 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4905 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4909 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4910 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4914 /* If it's possible to change R_TYPE to a more efficient access
4915 model, return the new reloc type. */
4918 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4919 struct elf_link_hash_entry
*h
)
4921 int is_local
= (h
== NULL
);
4923 if (bfd_link_dll (info
)
4924 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4927 /* We do not support relaxations for Old TLS models. */
4930 case R_ARM_TLS_GOTDESC
:
4931 case R_ARM_TLS_CALL
:
4932 case R_ARM_THM_TLS_CALL
:
4933 case R_ARM_TLS_DESCSEQ
:
4934 case R_ARM_THM_TLS_DESCSEQ
:
4935 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4941 static bfd_reloc_status_type elf32_arm_final_link_relocate
4942 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4943 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4944 const char *, unsigned char, enum arm_st_branch_type
,
4945 struct elf_link_hash_entry
*, bool *, char **);
4948 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4952 case arm_stub_a8_veneer_b_cond
:
4953 case arm_stub_a8_veneer_b
:
4954 case arm_stub_a8_veneer_bl
:
4957 case arm_stub_long_branch_any_any
:
4958 case arm_stub_long_branch_v4t_arm_thumb
:
4959 case arm_stub_long_branch_thumb_only
:
4960 case arm_stub_long_branch_thumb2_only
:
4961 case arm_stub_long_branch_thumb2_only_pure
:
4962 case arm_stub_long_branch_v4t_thumb_thumb
:
4963 case arm_stub_long_branch_v4t_thumb_arm
:
4964 case arm_stub_short_branch_v4t_thumb_arm
:
4965 case arm_stub_long_branch_any_arm_pic
:
4966 case arm_stub_long_branch_any_thumb_pic
:
4967 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4968 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4969 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4970 case arm_stub_long_branch_thumb_only_pic
:
4971 case arm_stub_long_branch_any_tls_pic
:
4972 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4973 case arm_stub_cmse_branch_thumb_only
:
4974 case arm_stub_a8_veneer_blx
:
4977 case arm_stub_long_branch_arm_nacl
:
4978 case arm_stub_long_branch_arm_nacl_pic
:
4982 abort (); /* Should be unreachable. */
4986 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4987 veneering (TRUE) or have their own symbol (FALSE). */
4990 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4992 if (stub_type
>= max_stub_type
)
4993 abort (); /* Should be unreachable. */
4997 case arm_stub_cmse_branch_thumb_only
:
5004 abort (); /* Should be unreachable. */
5007 /* Returns the padding needed for the dedicated section used stubs of type
5011 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
5013 if (stub_type
>= max_stub_type
)
5014 abort (); /* Should be unreachable. */
5018 case arm_stub_cmse_branch_thumb_only
:
5025 abort (); /* Should be unreachable. */
5028 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5029 returns the address of the hash table field in HTAB holding the offset at
5030 which new veneers should be layed out in the stub section. */
5033 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
5034 enum elf32_arm_stub_type stub_type
)
5038 case arm_stub_cmse_branch_thumb_only
:
5039 return &htab
->new_cmse_stub_offset
;
5042 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
5048 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
5052 bool removed_sg_veneer
;
5053 struct elf32_arm_stub_hash_entry
*stub_entry
;
5054 struct elf32_arm_link_hash_table
*globals
;
5055 struct bfd_link_info
*info
;
5062 const insn_sequence
*template_sequence
;
5064 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
5065 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
5067 int just_allocated
= 0;
5069 /* Massage our args to the form they really have. */
5070 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5071 info
= (struct bfd_link_info
*) in_arg
;
5073 /* Fail if the target section could not be assigned to an output
5074 section. The user should fix his linker script. */
5075 if (stub_entry
->target_section
->output_section
== NULL
5076 && info
->non_contiguous_regions
)
5077 info
->callbacks
->einfo (_("%F%P: Could not assign `%pA' to an output section. "
5078 "Retry without --enable-non-contiguous-regions.\n"),
5079 stub_entry
->target_section
);
5081 globals
= elf32_arm_hash_table (info
);
5082 if (globals
== NULL
)
5085 stub_sec
= stub_entry
->stub_sec
;
5087 if ((globals
->fix_cortex_a8
< 0)
5088 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
5089 /* We have to do less-strictly-aligned fixes last. */
5092 /* Assign a slot at the end of section if none assigned yet. */
5093 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5095 stub_entry
->stub_offset
= stub_sec
->size
;
5098 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5100 stub_bfd
= stub_sec
->owner
;
5102 /* This is the address of the stub destination. */
5103 sym_value
= (stub_entry
->target_value
5104 + stub_entry
->target_section
->output_offset
5105 + stub_entry
->target_section
->output_section
->vma
);
5107 template_sequence
= stub_entry
->stub_template
;
5108 template_size
= stub_entry
->stub_template_size
;
5111 for (i
= 0; i
< template_size
; i
++)
5113 switch (template_sequence
[i
].type
)
5117 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5118 if (template_sequence
[i
].reloc_addend
!= 0)
5120 /* We've borrowed the reloc_addend field to mean we should
5121 insert a condition code into this (Thumb-1 branch)
5122 instruction. See THUMB16_BCOND_INSN. */
5123 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5124 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5126 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5132 bfd_put_16 (stub_bfd
,
5133 (template_sequence
[i
].data
>> 16) & 0xffff,
5135 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5137 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5139 stub_reloc_idx
[nrelocs
] = i
;
5140 stub_reloc_offset
[nrelocs
++] = size
;
5146 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5148 /* Handle cases where the target is encoded within the
5150 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5152 stub_reloc_idx
[nrelocs
] = i
;
5153 stub_reloc_offset
[nrelocs
++] = size
;
5159 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5160 stub_reloc_idx
[nrelocs
] = i
;
5161 stub_reloc_offset
[nrelocs
++] = size
;
5172 stub_sec
->size
+= size
;
5174 /* Stub size has already been computed in arm_size_one_stub. Check
5176 BFD_ASSERT (size
== stub_entry
->stub_size
);
5178 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5179 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5182 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5183 to relocate in each stub. */
5185 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5186 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5188 for (i
= 0; i
< nrelocs
; i
++)
5190 Elf_Internal_Rela rel
;
5191 bool unresolved_reloc
;
5192 char *error_message
;
5194 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5196 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5197 rel
.r_info
= ELF32_R_INFO (0,
5198 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5201 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5202 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5203 template should refer back to the instruction after the original
5204 branch. We use target_section as Cortex-A8 erratum workaround stubs
5205 are only generated when both source and target are in the same
5207 points_to
= stub_entry
->target_section
->output_section
->vma
5208 + stub_entry
->target_section
->output_offset
5209 + stub_entry
->source_value
;
5211 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5212 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5213 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5214 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5215 stub_entry
->branch_type
,
5216 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5224 /* Calculate the template, template size and instruction size for a stub.
5225 Return value is the instruction size. */
5228 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5229 const insn_sequence
**stub_template
,
5230 int *stub_template_size
)
5232 const insn_sequence
*template_sequence
= NULL
;
5233 int template_size
= 0, i
;
5236 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5238 *stub_template
= template_sequence
;
5240 template_size
= stub_definitions
[stub_type
].template_size
;
5241 if (stub_template_size
)
5242 *stub_template_size
= template_size
;
5245 for (i
= 0; i
< template_size
; i
++)
5247 switch (template_sequence
[i
].type
)
5268 /* As above, but don't actually build the stub. Just bump offset so
5269 we know stub section sizes. */
5272 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5273 void *in_arg ATTRIBUTE_UNUSED
)
5275 struct elf32_arm_stub_hash_entry
*stub_entry
;
5276 const insn_sequence
*template_sequence
;
5277 int template_size
, size
;
5279 /* Massage our args to the form they really have. */
5280 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5282 BFD_ASSERT ((stub_entry
->stub_type
> arm_stub_none
)
5283 && stub_entry
->stub_type
< ARRAY_SIZE (stub_definitions
));
5285 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5288 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5289 if (stub_entry
->stub_template_size
)
5291 stub_entry
->stub_size
= size
;
5292 stub_entry
->stub_template
= template_sequence
;
5293 stub_entry
->stub_template_size
= template_size
;
5296 /* Already accounted for. */
5297 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5300 size
= (size
+ 7) & ~7;
5301 stub_entry
->stub_sec
->size
+= size
;
5306 /* External entry points for sizing and building linker stubs. */
5308 /* Set up various things so that we can make a list of input sections
5309 for each output section included in the link. Returns -1 on error,
5310 0 when no stubs will be needed, and 1 on success. */
5313 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5314 struct bfd_link_info
*info
)
5317 unsigned int bfd_count
;
5318 unsigned int top_id
, top_index
;
5320 asection
**input_list
, **list
;
5322 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5327 /* Count the number of input BFDs and find the top input section id. */
5328 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5330 input_bfd
= input_bfd
->link
.next
)
5333 for (section
= input_bfd
->sections
;
5335 section
= section
->next
)
5337 if (top_id
< section
->id
)
5338 top_id
= section
->id
;
5341 htab
->bfd_count
= bfd_count
;
5343 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5344 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5345 if (htab
->stub_group
== NULL
)
5347 htab
->top_id
= top_id
;
5349 /* We can't use output_bfd->section_count here to find the top output
5350 section index as some sections may have been removed, and
5351 _bfd_strip_section_from_output doesn't renumber the indices. */
5352 for (section
= output_bfd
->sections
, top_index
= 0;
5354 section
= section
->next
)
5356 if (top_index
< section
->index
)
5357 top_index
= section
->index
;
5360 htab
->top_index
= top_index
;
5361 amt
= sizeof (asection
*) * (top_index
+ 1);
5362 input_list
= (asection
**) bfd_malloc (amt
);
5363 htab
->input_list
= input_list
;
5364 if (input_list
== NULL
)
5367 /* For sections we aren't interested in, mark their entries with a
5368 value we can check later. */
5369 list
= input_list
+ top_index
;
5371 *list
= bfd_abs_section_ptr
;
5372 while (list
-- != input_list
);
5374 for (section
= output_bfd
->sections
;
5376 section
= section
->next
)
5378 if ((section
->flags
& SEC_CODE
) != 0)
5379 input_list
[section
->index
] = NULL
;
5385 /* The linker repeatedly calls this function for each input section,
5386 in the order that input sections are linked into output sections.
5387 Build lists of input sections to determine groupings between which
5388 we may insert linker stubs. */
5391 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5394 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5399 if (isec
->output_section
->index
<= htab
->top_index
)
5401 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5403 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5405 /* Steal the link_sec pointer for our list. */
5406 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5407 /* This happens to make the list in reverse order,
5408 which we reverse later. */
5409 PREV_SEC (isec
) = *list
;
5415 /* See whether we can group stub sections together. Grouping stub
5416 sections may result in fewer stubs. More importantly, we need to
5417 put all .init* and .fini* stubs at the end of the .init or
5418 .fini output sections respectively, because glibc splits the
5419 _init and _fini functions into multiple parts. Putting a stub in
5420 the middle of a function is not a good idea. */
5423 group_sections (struct elf32_arm_link_hash_table
*htab
,
5424 bfd_size_type stub_group_size
,
5425 bool stubs_always_after_branch
)
5427 asection
**list
= htab
->input_list
;
5431 asection
*tail
= *list
;
5434 if (tail
== bfd_abs_section_ptr
)
5437 /* Reverse the list: we must avoid placing stubs at the
5438 beginning of the section because the beginning of the text
5439 section may be required for an interrupt vector in bare metal
5441 #define NEXT_SEC PREV_SEC
5443 while (tail
!= NULL
)
5445 /* Pop from tail. */
5446 asection
*item
= tail
;
5447 tail
= PREV_SEC (item
);
5450 NEXT_SEC (item
) = head
;
5454 while (head
!= NULL
)
5458 bfd_vma stub_group_start
= head
->output_offset
;
5459 bfd_vma end_of_next
;
5462 while (NEXT_SEC (curr
) != NULL
)
5464 next
= NEXT_SEC (curr
);
5465 end_of_next
= next
->output_offset
+ next
->size
;
5466 if (end_of_next
- stub_group_start
>= stub_group_size
)
5467 /* End of NEXT is too far from start, so stop. */
5469 /* Add NEXT to the group. */
5473 /* OK, the size from the start to the start of CURR is less
5474 than stub_group_size and thus can be handled by one stub
5475 section. (Or the head section is itself larger than
5476 stub_group_size, in which case we may be toast.)
5477 We should really be keeping track of the total size of
5478 stubs added here, as stubs contribute to the final output
5482 next
= NEXT_SEC (head
);
5483 /* Set up this stub group. */
5484 htab
->stub_group
[head
->id
].link_sec
= curr
;
5486 while (head
!= curr
&& (head
= next
) != NULL
);
5488 /* But wait, there's more! Input sections up to stub_group_size
5489 bytes after the stub section can be handled by it too. */
5490 if (!stubs_always_after_branch
)
5492 stub_group_start
= curr
->output_offset
+ curr
->size
;
5494 while (next
!= NULL
)
5496 end_of_next
= next
->output_offset
+ next
->size
;
5497 if (end_of_next
- stub_group_start
>= stub_group_size
)
5498 /* End of NEXT is too far from stubs, so stop. */
5500 /* Add NEXT to the stub group. */
5502 next
= NEXT_SEC (head
);
5503 htab
->stub_group
[head
->id
].link_sec
= curr
;
5509 while (list
++ != htab
->input_list
+ htab
->top_index
);
5511 free (htab
->input_list
);
5516 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5520 a8_reloc_compare (const void *a
, const void *b
)
5522 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5523 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5525 if (ra
->from
< rb
->from
)
5527 else if (ra
->from
> rb
->from
)
5533 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5534 const char *, char **);
5536 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5537 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5538 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5542 cortex_a8_erratum_scan (bfd
*input_bfd
,
5543 struct bfd_link_info
*info
,
5544 struct a8_erratum_fix
**a8_fixes_p
,
5545 unsigned int *num_a8_fixes_p
,
5546 unsigned int *a8_fix_table_size_p
,
5547 struct a8_erratum_reloc
*a8_relocs
,
5548 unsigned int num_a8_relocs
,
5549 unsigned prev_num_a8_fixes
,
5550 bool *stub_changed_p
)
5553 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5554 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5555 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5556 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5561 for (section
= input_bfd
->sections
;
5563 section
= section
->next
)
5565 bfd_byte
*contents
= NULL
;
5566 struct _arm_elf_section_data
*sec_data
;
5570 if (elf_section_type (section
) != SHT_PROGBITS
5571 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5572 || (section
->flags
& SEC_EXCLUDE
) != 0
5573 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5574 || (section
->output_section
== bfd_abs_section_ptr
))
5577 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5579 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5580 contents
= elf_section_data (section
)->this_hdr
.contents
;
5581 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5584 sec_data
= elf32_arm_section_data (section
);
5586 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5588 unsigned int span_start
= sec_data
->map
[span
].vma
;
5589 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5590 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5592 char span_type
= sec_data
->map
[span
].type
;
5593 bool last_was_32bit
= false, last_was_branch
= false;
5595 if (span_type
!= 't')
5598 /* Span is entirely within a single 4KB region: skip scanning. */
5599 if (((base_vma
+ span_start
) & ~0xfff)
5600 == ((base_vma
+ span_end
) & ~0xfff))
5603 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5605 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5606 * The branch target is in the same 4KB region as the
5607 first half of the branch.
5608 * The instruction before the branch is a 32-bit
5609 length non-branch instruction. */
5610 for (i
= span_start
; i
< span_end
;)
5612 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5613 bool insn_32bit
= false, is_blx
= false, is_b
= false;
5614 bool is_bl
= false, is_bcc
= false, is_32bit_branch
;
5616 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5621 /* Load the rest of the insn (in manual-friendly order). */
5622 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5624 /* Encoding T4: B<c>.W. */
5625 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5626 /* Encoding T1: BL<c>.W. */
5627 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5628 /* Encoding T2: BLX<c>.W. */
5629 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5630 /* Encoding T3: B<c>.W (not permitted in IT block). */
5631 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5632 && (insn
& 0x07f00000) != 0x03800000;
5635 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5637 if (((base_vma
+ i
) & 0xfff) == 0xffe
5641 && ! last_was_branch
)
5643 bfd_signed_vma offset
= 0;
5644 bool force_target_arm
= false;
5645 bool force_target_thumb
= false;
5647 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5648 struct a8_erratum_reloc key
, *found
;
5649 bool use_plt
= false;
5651 key
.from
= base_vma
+ i
;
5652 found
= (struct a8_erratum_reloc
*)
5653 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5654 sizeof (struct a8_erratum_reloc
),
5659 char *error_message
= NULL
;
5660 struct elf_link_hash_entry
*entry
;
5662 /* We don't care about the error returned from this
5663 function, only if there is glue or not. */
5664 entry
= find_thumb_glue (info
, found
->sym_name
,
5668 found
->non_a8_stub
= true;
5670 /* Keep a simpler condition, for the sake of clarity. */
5671 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5672 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5675 if (found
->r_type
== R_ARM_THM_CALL
)
5677 if (found
->branch_type
== ST_BRANCH_TO_ARM
5679 force_target_arm
= true;
5681 force_target_thumb
= true;
5685 /* Check if we have an offending branch instruction. */
5687 if (found
&& found
->non_a8_stub
)
5688 /* We've already made a stub for this instruction, e.g.
5689 it's a long branch or a Thumb->ARM stub. Assume that
5690 stub will suffice to work around the A8 erratum (see
5691 setting of always_after_branch above). */
5695 offset
= (insn
& 0x7ff) << 1;
5696 offset
|= (insn
& 0x3f0000) >> 4;
5697 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5698 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5699 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5700 if (offset
& 0x100000)
5701 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5702 stub_type
= arm_stub_a8_veneer_b_cond
;
5704 else if (is_b
|| is_bl
|| is_blx
)
5706 int s
= (insn
& 0x4000000) != 0;
5707 int j1
= (insn
& 0x2000) != 0;
5708 int j2
= (insn
& 0x800) != 0;
5712 offset
= (insn
& 0x7ff) << 1;
5713 offset
|= (insn
& 0x3ff0000) >> 4;
5717 if (offset
& 0x1000000)
5718 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5721 offset
&= ~ ((bfd_signed_vma
) 3);
5723 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5724 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5727 if (stub_type
!= arm_stub_none
)
5729 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5731 /* The original instruction is a BL, but the target is
5732 an ARM instruction. If we were not making a stub,
5733 the BL would have been converted to a BLX. Use the
5734 BLX stub instead in that case. */
5735 if (htab
->use_blx
&& force_target_arm
5736 && stub_type
== arm_stub_a8_veneer_bl
)
5738 stub_type
= arm_stub_a8_veneer_blx
;
5742 /* Conversely, if the original instruction was
5743 BLX but the target is Thumb mode, use the BL
5745 else if (force_target_thumb
5746 && stub_type
== arm_stub_a8_veneer_blx
)
5748 stub_type
= arm_stub_a8_veneer_bl
;
5754 pc_for_insn
&= ~ ((bfd_vma
) 3);
5756 /* If we found a relocation, use the proper destination,
5757 not the offset in the (unrelocated) instruction.
5758 Note this is always done if we switched the stub type
5762 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5764 /* If the stub will use a Thumb-mode branch to a
5765 PLT target, redirect it to the preceding Thumb
5767 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5768 offset
-= PLT_THUMB_STUB_SIZE
;
5770 target
= pc_for_insn
+ offset
;
5772 /* The BLX stub is ARM-mode code. Adjust the offset to
5773 take the different PC value (+8 instead of +4) into
5775 if (stub_type
== arm_stub_a8_veneer_blx
)
5778 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5780 char *stub_name
= NULL
;
5782 if (num_a8_fixes
== a8_fix_table_size
)
5784 a8_fix_table_size
*= 2;
5785 a8_fixes
= (struct a8_erratum_fix
*)
5786 bfd_realloc (a8_fixes
,
5787 sizeof (struct a8_erratum_fix
)
5788 * a8_fix_table_size
);
5791 if (num_a8_fixes
< prev_num_a8_fixes
)
5793 /* If we're doing a subsequent scan,
5794 check if we've found the same fix as
5795 before, and try and reuse the stub
5797 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5798 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5799 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5803 *stub_changed_p
= true;
5809 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5810 if (stub_name
!= NULL
)
5811 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5814 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5815 a8_fixes
[num_a8_fixes
].section
= section
;
5816 a8_fixes
[num_a8_fixes
].offset
= i
;
5817 a8_fixes
[num_a8_fixes
].target_offset
=
5819 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5820 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5821 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5822 a8_fixes
[num_a8_fixes
].branch_type
=
5823 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5830 i
+= insn_32bit
? 4 : 2;
5831 last_was_32bit
= insn_32bit
;
5832 last_was_branch
= is_32bit_branch
;
5836 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5840 *a8_fixes_p
= a8_fixes
;
5841 *num_a8_fixes_p
= num_a8_fixes
;
5842 *a8_fix_table_size_p
= a8_fix_table_size
;
5847 /* Create or update a stub entry depending on whether the stub can already be
5848 found in HTAB. The stub is identified by:
5849 - its type STUB_TYPE
5850 - its source branch (note that several can share the same stub) whose
5851 section and relocation (if any) are given by SECTION and IRELA
5853 - its target symbol whose input section, hash, name, value and branch type
5854 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5857 If found, the value of the stub's target symbol is updated from SYM_VALUE
5858 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5859 TRUE and the stub entry is initialized.
5861 Returns the stub that was created or updated, or NULL if an error
5864 static struct elf32_arm_stub_hash_entry
*
5865 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5866 enum elf32_arm_stub_type stub_type
, asection
*section
,
5867 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5868 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5869 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5872 const asection
*id_sec
;
5874 struct elf32_arm_stub_hash_entry
*stub_entry
;
5875 unsigned int r_type
;
5876 bool sym_claimed
= arm_stub_sym_claimed (stub_type
);
5878 BFD_ASSERT (stub_type
!= arm_stub_none
);
5882 stub_name
= sym_name
;
5886 BFD_ASSERT (section
);
5887 BFD_ASSERT (section
->id
<= htab
->top_id
);
5889 /* Support for grouping stub sections. */
5890 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5892 /* Get the name of this stub. */
5893 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5899 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, false,
5901 /* The proper stub has already been created, just update its value. */
5902 if (stub_entry
!= NULL
)
5906 stub_entry
->target_value
= sym_value
;
5910 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5911 if (stub_entry
== NULL
)
5918 stub_entry
->target_value
= sym_value
;
5919 stub_entry
->target_section
= sym_sec
;
5920 stub_entry
->stub_type
= stub_type
;
5921 stub_entry
->h
= hash
;
5922 stub_entry
->branch_type
= branch_type
;
5925 stub_entry
->output_name
= sym_name
;
5928 if (sym_name
== NULL
)
5929 sym_name
= "unnamed";
5930 stub_entry
->output_name
= (char *)
5931 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5932 + strlen (sym_name
));
5933 if (stub_entry
->output_name
== NULL
)
5939 /* For historical reasons, use the existing names for ARM-to-Thumb and
5940 Thumb-to-ARM stubs. */
5941 r_type
= ELF32_R_TYPE (irela
->r_info
);
5942 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5943 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5944 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5945 && branch_type
== ST_BRANCH_TO_ARM
)
5946 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5947 else if ((r_type
== (unsigned int) R_ARM_CALL
5948 || r_type
== (unsigned int) R_ARM_JUMP24
)
5949 && branch_type
== ST_BRANCH_TO_THUMB
)
5950 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5952 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5959 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5960 gateway veneer to transition from non secure to secure state and create them
5963 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5964 defines the conditions that govern Secure Gateway veneer creation for a
5965 given symbol <SYM> as follows:
5966 - it has function type
5967 - it has non local binding
5968 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5969 same type, binding and value as <SYM> (called normal symbol).
5970 An entry function can handle secure state transition itself in which case
5971 its special symbol would have a different value from the normal symbol.
5973 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5974 entry mapping while HTAB gives the name to hash entry mapping.
5975 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5978 The return value gives whether a stub failed to be allocated. */
5981 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5982 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5983 int *cmse_stub_created
)
5985 const struct elf_backend_data
*bed
;
5986 Elf_Internal_Shdr
*symtab_hdr
;
5987 unsigned i
, j
, sym_count
, ext_start
;
5988 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5989 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5990 enum arm_st_branch_type branch_type
;
5991 char *sym_name
, *lsym_name
;
5994 struct elf32_arm_stub_hash_entry
*stub_entry
;
5995 bool is_v8m
, new_stub
, cmse_invalid
, ret
= true;
5997 bed
= get_elf_backend_data (input_bfd
);
5998 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5999 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
6000 ext_start
= symtab_hdr
->sh_info
;
6001 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
6002 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
6004 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6005 if (local_syms
== NULL
)
6006 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6007 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
6009 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
6013 for (i
= 0; i
< sym_count
; i
++)
6015 cmse_invalid
= false;
6019 cmse_sym
= &local_syms
[i
];
6020 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
6021 symtab_hdr
->sh_link
,
6023 if (!sym_name
|| !startswith (sym_name
, CMSE_PREFIX
))
6026 /* Special symbol with local binding. */
6027 cmse_invalid
= true;
6031 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
6032 if (cmse_hash
== NULL
)
6035 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
6036 if (!startswith (sym_name
, CMSE_PREFIX
))
6039 /* Special symbol has incorrect binding or type. */
6040 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
6041 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6042 || cmse_hash
->root
.type
!= STT_FUNC
)
6043 cmse_invalid
= true;
6048 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6049 "ARMv8-M architecture or later"),
6050 input_bfd
, sym_name
);
6051 is_v8m
= true; /* Avoid multiple warning. */
6057 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6058 " a global or weak function symbol"),
6059 input_bfd
, sym_name
);
6065 sym_name
+= strlen (CMSE_PREFIX
);
6066 hash
= (struct elf32_arm_link_hash_entry
*)
6067 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6069 /* No associated normal symbol or it is neither global nor weak. */
6071 || (hash
->root
.root
.type
!= bfd_link_hash_defined
6072 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6073 || hash
->root
.type
!= STT_FUNC
)
6075 /* Initialize here to avoid warning about use of possibly
6076 uninitialized variable. */
6081 /* Searching for a normal symbol with local binding. */
6082 for (; j
< ext_start
; j
++)
6085 bfd_elf_string_from_elf_section (input_bfd
,
6086 symtab_hdr
->sh_link
,
6087 local_syms
[j
].st_name
);
6088 if (!strcmp (sym_name
, lsym_name
))
6093 if (hash
|| j
< ext_start
)
6096 (_("%pB: invalid standard symbol `%s'; it must be "
6097 "a global or weak function symbol"),
6098 input_bfd
, sym_name
);
6102 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6108 sym_value
= hash
->root
.root
.u
.def
.value
;
6109 section
= hash
->root
.root
.u
.def
.section
;
6111 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6114 (_("%pB: `%s' and its special symbol are in different sections"),
6115 input_bfd
, sym_name
);
6118 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6119 continue; /* Ignore: could be an entry function starting with SG. */
6121 /* If this section is a link-once section that will be discarded, then
6122 don't create any stubs. */
6123 if (section
->output_section
== NULL
)
6126 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6130 if (hash
->root
.size
== 0)
6133 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6139 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6141 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6142 NULL
, NULL
, section
, hash
, sym_name
,
6143 sym_value
, branch_type
, &new_stub
);
6145 if (stub_entry
== NULL
)
6149 BFD_ASSERT (new_stub
);
6150 (*cmse_stub_created
)++;
6154 if (!symtab_hdr
->contents
)
6159 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6160 code entry function, ie can be called from non secure code without using a
6164 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6166 bfd_byte contents
[4];
6167 uint32_t first_insn
;
6172 /* Defined symbol of function type. */
6173 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6174 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6176 if (hash
->root
.type
!= STT_FUNC
)
6179 /* Read first instruction. */
6180 section
= hash
->root
.root
.u
.def
.section
;
6181 abfd
= section
->owner
;
6182 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6183 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6187 first_insn
= bfd_get_32 (abfd
, contents
);
6189 /* Starts by SG instruction. */
6190 return first_insn
== 0xe97fe97f;
6193 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6194 secure gateway veneers (ie. the veneers was not in the input import library)
6195 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6198 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6200 struct elf32_arm_stub_hash_entry
*stub_entry
;
6201 struct bfd_link_info
*info
;
6203 /* Massage our args to the form they really have. */
6204 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6205 info
= (struct bfd_link_info
*) gen_info
;
6207 if (info
->out_implib_bfd
)
6210 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6213 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6214 _bfd_error_handler (" %s", stub_entry
->output_name
);
6219 /* Set offset of each secure gateway veneers so that its address remain
6220 identical to the one in the input import library referred by
6221 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6222 (present in input import library but absent from the executable being
6223 linked) or if new veneers appeared and there is no output import library
6224 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6225 number of secure gateway veneers found in the input import library.
6227 The function returns whether an error occurred. If no error occurred,
6228 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6229 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6230 veneer observed set for new veneers to be layed out after. */
6233 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6234 struct elf32_arm_link_hash_table
*htab
,
6235 int *cmse_stub_created
)
6242 asection
*stub_out_sec
;
6244 Elf_Internal_Sym
*intsym
;
6245 const char *out_sec_name
;
6246 bfd_size_type cmse_stub_size
;
6247 asymbol
**sympp
= NULL
, *sym
;
6248 struct elf32_arm_link_hash_entry
*hash
;
6249 const insn_sequence
*cmse_stub_template
;
6250 struct elf32_arm_stub_hash_entry
*stub_entry
;
6251 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6252 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6253 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6255 /* No input secure gateway import library. */
6256 if (!htab
->in_implib_bfd
)
6259 in_implib_bfd
= htab
->in_implib_bfd
;
6260 if (!htab
->cmse_implib
)
6262 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6263 "Gateway import libraries"), in_implib_bfd
);
6267 /* Get symbol table size. */
6268 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6272 /* Read in the input secure gateway import library's symbol table. */
6273 sympp
= (asymbol
**) bfd_malloc (symsize
);
6277 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6284 htab
->new_cmse_stub_offset
= 0;
6286 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6287 &cmse_stub_template
,
6288 &cmse_stub_template_size
);
6290 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6292 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6293 if (stub_out_sec
!= NULL
)
6294 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6296 /* Set addresses of veneers mentionned in input secure gateway import
6297 library's symbol table. */
6298 for (i
= 0; i
< symcount
; i
++)
6302 sym_name
= (char *) bfd_asymbol_name (sym
);
6303 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6305 if (sym
->section
!= bfd_abs_section_ptr
6306 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6307 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6308 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6309 != ST_BRANCH_TO_THUMB
))
6311 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6312 "symbol should be absolute, global and "
6313 "refer to Thumb functions"),
6314 in_implib_bfd
, sym_name
);
6319 veneer_value
= bfd_asymbol_value (sym
);
6320 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6321 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6323 hash
= (struct elf32_arm_link_hash_entry
*)
6324 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6326 /* Stub entry should have been created by cmse_scan or the symbol be of
6327 a secure function callable from non secure code. */
6328 if (!stub_entry
&& !hash
)
6333 (_("entry function `%s' disappeared from secure code"), sym_name
);
6334 hash
= (struct elf32_arm_link_hash_entry
*)
6335 elf_link_hash_lookup (&(htab
)->root
, sym_name
, true, true, true);
6337 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6338 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6339 sym_name
, veneer_value
,
6340 ST_BRANCH_TO_THUMB
, &new_stub
);
6341 if (stub_entry
== NULL
)
6345 BFD_ASSERT (new_stub
);
6346 new_cmse_stubs_created
++;
6347 (*cmse_stub_created
)++;
6349 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6350 stub_entry
->stub_offset
= stub_offset
;
6352 /* Symbol found is not callable from non secure code. */
6353 else if (!stub_entry
)
6355 if (!cmse_entry_fct_p (hash
))
6357 _bfd_error_handler (_("`%s' refers to a non entry function"),
6365 /* Only stubs for SG veneers should have been created. */
6366 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6368 /* Check visibility hasn't changed. */
6369 if (!!(flags
& BSF_GLOBAL
)
6370 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6372 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6375 stub_entry
->stub_offset
= stub_offset
;
6378 /* Size should match that of a SG veneer. */
6379 if (intsym
->st_size
!= cmse_stub_size
)
6381 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6382 in_implib_bfd
, sym_name
);
6386 /* Previous veneer address is before current SG veneer section. */
6387 if (veneer_value
< cmse_stub_sec_vma
)
6389 /* Avoid offset underflow. */
6391 stub_entry
->stub_offset
= 0;
6396 /* Complain if stub offset not a multiple of stub size. */
6397 if (stub_offset
% cmse_stub_size
)
6400 (_("offset of veneer for entry function `%s' not a multiple of "
6401 "its size"), sym_name
);
6408 new_cmse_stubs_created
--;
6409 if (veneer_value
< cmse_stub_array_start
)
6410 cmse_stub_array_start
= veneer_value
;
6411 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6412 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6413 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6416 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6418 BFD_ASSERT (new_cmse_stubs_created
> 0);
6420 (_("new entry function(s) introduced but no output import library "
6422 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6425 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6428 (_("start address of `%s' is different from previous link"),
6438 /* Determine and set the size of the stub section for a final link.
6440 The basic idea here is to examine all the relocations looking for
6441 PC-relative calls to a target that is unreachable with a "bl"
6445 elf32_arm_size_stubs (bfd
*output_bfd
,
6447 struct bfd_link_info
*info
,
6448 bfd_signed_vma group_size
,
6449 asection
* (*add_stub_section
) (const char *, asection
*,
6452 void (*layout_sections_again
) (void))
6455 obj_attribute
*out_attr
;
6456 int cmse_stub_created
= 0;
6457 bfd_size_type stub_group_size
;
6458 bool m_profile
, stubs_always_after_branch
, first_veneer_scan
= true;
6459 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6460 struct a8_erratum_fix
*a8_fixes
= NULL
;
6461 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6462 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6463 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6468 if (htab
->fix_cortex_a8
)
6470 a8_fixes
= (struct a8_erratum_fix
*)
6471 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6472 a8_relocs
= (struct a8_erratum_reloc
*)
6473 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6476 /* Propagate mach to stub bfd, because it may not have been
6477 finalized when we created stub_bfd. */
6478 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6479 bfd_get_mach (output_bfd
));
6481 /* Stash our params away. */
6482 htab
->stub_bfd
= stub_bfd
;
6483 htab
->add_stub_section
= add_stub_section
;
6484 htab
->layout_sections_again
= layout_sections_again
;
6485 stubs_always_after_branch
= group_size
< 0;
6487 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6488 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6490 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6491 as the first half of a 32-bit branch straddling two 4K pages. This is a
6492 crude way of enforcing that. */
6493 if (htab
->fix_cortex_a8
)
6494 stubs_always_after_branch
= 1;
6497 stub_group_size
= -group_size
;
6499 stub_group_size
= group_size
;
6501 if (stub_group_size
== 1)
6503 /* Default values. */
6504 /* Thumb branch range is +-4MB has to be used as the default
6505 maximum size (a given section can contain both ARM and Thumb
6506 code, so the worst case has to be taken into account).
6508 This value is 24K less than that, which allows for 2025
6509 12-byte stubs. If we exceed that, then we will fail to link.
6510 The user will have to relink with an explicit group size
6512 stub_group_size
= 4170000;
6515 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6517 /* If we're applying the cortex A8 fix, we need to determine the
6518 program header size now, because we cannot change it later --
6519 that could alter section placements. Notice the A8 erratum fix
6520 ends up requiring the section addresses to remain unchanged
6521 modulo the page size. That's something we cannot represent
6522 inside BFD, and we don't want to force the section alignment to
6523 be the page size. */
6524 if (htab
->fix_cortex_a8
)
6525 (*htab
->layout_sections_again
) ();
6530 unsigned int bfd_indx
;
6532 enum elf32_arm_stub_type stub_type
;
6533 bool stub_changed
= false;
6534 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6537 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6539 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6541 Elf_Internal_Shdr
*symtab_hdr
;
6543 Elf_Internal_Sym
*local_syms
= NULL
;
6545 if (!is_arm_elf (input_bfd
))
6547 if ((input_bfd
->flags
& DYNAMIC
) != 0
6548 && (elf_sym_hashes (input_bfd
) == NULL
6549 || (elf_dyn_lib_class (input_bfd
) & DYN_AS_NEEDED
) != 0))
6554 /* We'll need the symbol table in a second. */
6555 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6556 if (symtab_hdr
->sh_info
== 0)
6559 /* Limit scan of symbols to object file whose profile is
6560 Microcontroller to not hinder performance in the general case. */
6561 if (m_profile
&& first_veneer_scan
)
6563 struct elf_link_hash_entry
**sym_hashes
;
6565 sym_hashes
= elf_sym_hashes (input_bfd
);
6566 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6567 &cmse_stub_created
))
6568 goto error_ret_free_local
;
6570 if (cmse_stub_created
!= 0)
6571 stub_changed
= true;
6574 /* Walk over each section attached to the input bfd. */
6575 for (section
= input_bfd
->sections
;
6577 section
= section
->next
)
6579 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6581 /* If there aren't any relocs, then there's nothing more
6583 if ((section
->flags
& SEC_RELOC
) == 0
6584 || section
->reloc_count
== 0
6585 || (section
->flags
& SEC_CODE
) == 0)
6588 /* If this section is a link-once section that will be
6589 discarded, then don't create any stubs. */
6590 if (section
->output_section
== NULL
6591 || section
->output_section
->owner
!= output_bfd
)
6594 /* Get the relocs. */
6596 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6597 NULL
, info
->keep_memory
);
6598 if (internal_relocs
== NULL
)
6599 goto error_ret_free_local
;
6601 /* Now examine each relocation. */
6602 irela
= internal_relocs
;
6603 irelaend
= irela
+ section
->reloc_count
;
6604 for (; irela
< irelaend
; irela
++)
6606 unsigned int r_type
, r_indx
;
6609 bfd_vma destination
;
6610 struct elf32_arm_link_hash_entry
*hash
;
6611 const char *sym_name
;
6612 unsigned char st_type
;
6613 enum arm_st_branch_type branch_type
;
6614 bool created_stub
= false;
6616 r_type
= ELF32_R_TYPE (irela
->r_info
);
6617 r_indx
= ELF32_R_SYM (irela
->r_info
);
6619 if (r_type
>= (unsigned int) R_ARM_max
)
6621 bfd_set_error (bfd_error_bad_value
);
6622 error_ret_free_internal
:
6623 if (elf_section_data (section
)->relocs
== NULL
)
6624 free (internal_relocs
);
6626 error_ret_free_local
:
6627 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6633 if (r_indx
>= symtab_hdr
->sh_info
)
6634 hash
= elf32_arm_hash_entry
6635 (elf_sym_hashes (input_bfd
)
6636 [r_indx
- symtab_hdr
->sh_info
]);
6638 /* Only look for stubs on branch instructions, or
6639 non-relaxed TLSCALL */
6640 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6641 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6642 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6643 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6644 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6645 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6646 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6647 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6648 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6649 && r_type
== (elf32_arm_tls_transition
6651 (struct elf_link_hash_entry
*) hash
))
6652 && ((hash
? hash
->tls_type
6653 : (elf32_arm_local_got_tls_type
6654 (input_bfd
)[r_indx
]))
6655 & GOT_TLS_GDESC
) != 0))
6658 /* Now determine the call target, its name, value,
6665 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6666 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6668 /* A non-relaxed TLS call. The target is the
6669 plt-resident trampoline and nothing to do
6671 BFD_ASSERT (htab
->tls_trampoline
> 0);
6672 sym_sec
= htab
->root
.splt
;
6673 sym_value
= htab
->tls_trampoline
;
6676 branch_type
= ST_BRANCH_TO_ARM
;
6680 /* It's a local symbol. */
6681 Elf_Internal_Sym
*sym
;
6683 if (local_syms
== NULL
)
6686 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6687 if (local_syms
== NULL
)
6689 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6690 symtab_hdr
->sh_info
, 0,
6692 if (local_syms
== NULL
)
6693 goto error_ret_free_internal
;
6696 sym
= local_syms
+ r_indx
;
6697 if (sym
->st_shndx
== SHN_UNDEF
)
6698 sym_sec
= bfd_und_section_ptr
;
6699 else if (sym
->st_shndx
== SHN_ABS
)
6700 sym_sec
= bfd_abs_section_ptr
;
6701 else if (sym
->st_shndx
== SHN_COMMON
)
6702 sym_sec
= bfd_com_section_ptr
;
6705 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6708 /* This is an undefined symbol. It can never
6712 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6713 sym_value
= sym
->st_value
;
6714 destination
= (sym_value
+ irela
->r_addend
6715 + sym_sec
->output_offset
6716 + sym_sec
->output_section
->vma
);
6717 st_type
= ELF_ST_TYPE (sym
->st_info
);
6719 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6721 = bfd_elf_string_from_elf_section (input_bfd
,
6722 symtab_hdr
->sh_link
,
6727 /* It's an external symbol. */
6728 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6729 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6730 hash
= ((struct elf32_arm_link_hash_entry
*)
6731 hash
->root
.root
.u
.i
.link
);
6733 if (hash
->root
.root
.type
== bfd_link_hash_defined
6734 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6736 sym_sec
= hash
->root
.root
.u
.def
.section
;
6737 sym_value
= hash
->root
.root
.u
.def
.value
;
6739 struct elf32_arm_link_hash_table
*globals
=
6740 elf32_arm_hash_table (info
);
6742 /* For a destination in a shared library,
6743 use the PLT stub as target address to
6744 decide whether a branch stub is
6747 && globals
->root
.splt
!= NULL
6749 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6751 sym_sec
= globals
->root
.splt
;
6752 sym_value
= hash
->root
.plt
.offset
;
6753 if (sym_sec
->output_section
!= NULL
)
6754 destination
= (sym_value
6755 + sym_sec
->output_offset
6756 + sym_sec
->output_section
->vma
);
6758 else if (sym_sec
->output_section
!= NULL
)
6759 destination
= (sym_value
+ irela
->r_addend
6760 + sym_sec
->output_offset
6761 + sym_sec
->output_section
->vma
);
6763 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6764 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6766 /* For a shared library, use the PLT stub as
6767 target address to decide whether a long
6768 branch stub is needed.
6769 For absolute code, they cannot be handled. */
6770 struct elf32_arm_link_hash_table
*globals
=
6771 elf32_arm_hash_table (info
);
6774 && globals
->root
.splt
!= NULL
6776 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6778 sym_sec
= globals
->root
.splt
;
6779 sym_value
= hash
->root
.plt
.offset
;
6780 if (sym_sec
->output_section
!= NULL
)
6781 destination
= (sym_value
6782 + sym_sec
->output_offset
6783 + sym_sec
->output_section
->vma
);
6790 bfd_set_error (bfd_error_bad_value
);
6791 goto error_ret_free_internal
;
6793 st_type
= hash
->root
.type
;
6795 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6796 sym_name
= hash
->root
.root
.root
.string
;
6802 struct elf32_arm_stub_hash_entry
*stub_entry
;
6804 /* Determine what (if any) linker stub is needed. */
6805 stub_type
= arm_type_of_stub (info
, section
, irela
,
6806 st_type
, &branch_type
,
6807 hash
, destination
, sym_sec
,
6808 input_bfd
, sym_name
);
6809 if (stub_type
== arm_stub_none
)
6812 /* We've either created a stub for this reloc already,
6813 or we are about to. */
6815 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6817 (char *) sym_name
, sym_value
,
6818 branch_type
, &new_stub
);
6820 created_stub
= stub_entry
!= NULL
;
6822 goto error_ret_free_internal
;
6826 stub_changed
= true;
6830 /* Look for relocations which might trigger Cortex-A8
6832 if (htab
->fix_cortex_a8
6833 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6834 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6835 || r_type
== (unsigned int) R_ARM_THM_CALL
6836 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6838 bfd_vma from
= section
->output_section
->vma
6839 + section
->output_offset
6842 if ((from
& 0xfff) == 0xffe)
6844 /* Found a candidate. Note we haven't checked the
6845 destination is within 4K here: if we do so (and
6846 don't create an entry in a8_relocs) we can't tell
6847 that a branch should have been relocated when
6849 if (num_a8_relocs
== a8_reloc_table_size
)
6851 a8_reloc_table_size
*= 2;
6852 a8_relocs
= (struct a8_erratum_reloc
*)
6853 bfd_realloc (a8_relocs
,
6854 sizeof (struct a8_erratum_reloc
)
6855 * a8_reloc_table_size
);
6858 a8_relocs
[num_a8_relocs
].from
= from
;
6859 a8_relocs
[num_a8_relocs
].destination
= destination
;
6860 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6861 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6862 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6863 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6864 a8_relocs
[num_a8_relocs
].hash
= hash
;
6871 /* We're done with the internal relocs, free them. */
6872 if (elf_section_data (section
)->relocs
== NULL
)
6873 free (internal_relocs
);
6876 if (htab
->fix_cortex_a8
)
6878 /* Sort relocs which might apply to Cortex-A8 erratum. */
6879 qsort (a8_relocs
, num_a8_relocs
,
6880 sizeof (struct a8_erratum_reloc
),
6883 /* Scan for branches which might trigger Cortex-A8 erratum. */
6884 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6885 &num_a8_fixes
, &a8_fix_table_size
,
6886 a8_relocs
, num_a8_relocs
,
6887 prev_num_a8_fixes
, &stub_changed
)
6889 goto error_ret_free_local
;
6892 if (local_syms
!= NULL
6893 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6895 if (!info
->keep_memory
)
6898 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6902 if (first_veneer_scan
6903 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6904 &cmse_stub_created
))
6907 if (prev_num_a8_fixes
!= num_a8_fixes
)
6908 stub_changed
= true;
6913 /* OK, we've added some stubs. Find out the new size of the
6915 for (stub_sec
= htab
->stub_bfd
->sections
;
6917 stub_sec
= stub_sec
->next
)
6919 /* Ignore non-stub sections. */
6920 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6926 /* Add new SG veneers after those already in the input import
6928 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6931 bfd_vma
*start_offset_p
;
6932 asection
**stub_sec_p
;
6934 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6935 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6936 if (start_offset_p
== NULL
)
6939 BFD_ASSERT (stub_sec_p
!= NULL
);
6940 if (*stub_sec_p
!= NULL
)
6941 (*stub_sec_p
)->size
= *start_offset_p
;
6944 /* Compute stub section size, considering padding. */
6945 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6946 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6950 asection
**stub_sec_p
;
6952 padding
= arm_dedicated_stub_section_padding (stub_type
);
6953 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6954 /* Skip if no stub input section or no stub section padding
6956 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6958 /* Stub section padding required but no dedicated section. */
6959 BFD_ASSERT (stub_sec_p
);
6961 size
= (*stub_sec_p
)->size
;
6962 size
= (size
+ padding
- 1) & ~(padding
- 1);
6963 (*stub_sec_p
)->size
= size
;
6966 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6967 if (htab
->fix_cortex_a8
)
6968 for (i
= 0; i
< num_a8_fixes
; i
++)
6970 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6971 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6973 if (stub_sec
== NULL
)
6977 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6982 /* Ask the linker to do its stuff. */
6983 (*htab
->layout_sections_again
) ();
6984 first_veneer_scan
= false;
6987 /* Add stubs for Cortex-A8 erratum fixes now. */
6988 if (htab
->fix_cortex_a8
)
6990 for (i
= 0; i
< num_a8_fixes
; i
++)
6992 struct elf32_arm_stub_hash_entry
*stub_entry
;
6993 char *stub_name
= a8_fixes
[i
].stub_name
;
6994 asection
*section
= a8_fixes
[i
].section
;
6995 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6996 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6997 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6998 const insn_sequence
*template_sequence
;
6999 int template_size
, size
= 0;
7001 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
7003 if (stub_entry
== NULL
)
7005 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
7006 section
->owner
, stub_name
);
7010 stub_entry
->stub_sec
= stub_sec
;
7011 stub_entry
->stub_offset
= (bfd_vma
) -1;
7012 stub_entry
->id_sec
= link_sec
;
7013 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
7014 stub_entry
->source_value
= a8_fixes
[i
].offset
;
7015 stub_entry
->target_section
= a8_fixes
[i
].section
;
7016 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
7017 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
7018 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
7020 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
7024 stub_entry
->stub_size
= size
;
7025 stub_entry
->stub_template
= template_sequence
;
7026 stub_entry
->stub_template_size
= template_size
;
7029 /* Stash the Cortex-A8 erratum fix array for use later in
7030 elf32_arm_write_section(). */
7031 htab
->a8_erratum_fixes
= a8_fixes
;
7032 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
7036 htab
->a8_erratum_fixes
= NULL
;
7037 htab
->num_a8_erratum_fixes
= 0;
7042 /* Build all the stubs associated with the current output file. The
7043 stubs are kept in a hash table attached to the main linker hash
7044 table. We also set up the .plt entries for statically linked PIC
7045 functions here. This function is called via arm_elf_finish in the
7049 elf32_arm_build_stubs (struct bfd_link_info
*info
)
7052 struct bfd_hash_table
*table
;
7053 enum elf32_arm_stub_type stub_type
;
7054 struct elf32_arm_link_hash_table
*htab
;
7056 htab
= elf32_arm_hash_table (info
);
7060 for (stub_sec
= htab
->stub_bfd
->sections
;
7062 stub_sec
= stub_sec
->next
)
7066 /* Ignore non-stub sections. */
7067 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
7070 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7071 must at least be done for stub section requiring padding and for SG
7072 veneers to ensure that a non secure code branching to a removed SG
7073 veneer causes an error. */
7074 size
= stub_sec
->size
;
7075 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
7076 if (stub_sec
->contents
== NULL
&& size
!= 0)
7078 stub_sec
->alloced
= 1;
7083 /* Add new SG veneers after those already in the input import library. */
7084 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7086 bfd_vma
*start_offset_p
;
7087 asection
**stub_sec_p
;
7089 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
7090 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
7091 if (start_offset_p
== NULL
)
7094 BFD_ASSERT (stub_sec_p
!= NULL
);
7095 if (*stub_sec_p
!= NULL
)
7096 (*stub_sec_p
)->size
= *start_offset_p
;
7099 /* Build the stubs as directed by the stub hash table. */
7100 table
= &htab
->stub_hash_table
;
7101 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7102 if (htab
->fix_cortex_a8
)
7104 /* Place the cortex a8 stubs last. */
7105 htab
->fix_cortex_a8
= -1;
7106 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7112 /* Locate the Thumb encoded calling stub for NAME. */
7114 static struct elf_link_hash_entry
*
7115 find_thumb_glue (struct bfd_link_info
*link_info
,
7117 char **error_message
)
7120 struct elf_link_hash_entry
*hash
;
7121 struct elf32_arm_link_hash_table
*hash_table
;
7123 /* We need a pointer to the armelf specific hash table. */
7124 hash_table
= elf32_arm_hash_table (link_info
);
7125 if (hash_table
== NULL
)
7128 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7129 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7131 BFD_ASSERT (tmp_name
);
7133 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7135 hash
= elf_link_hash_lookup
7136 (&(hash_table
)->root
, tmp_name
, false, false, true);
7140 *error_message
= bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7141 "Thumb", tmp_name
, name
);
7142 if (*error_message
== NULL
)
7143 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7151 /* Locate the ARM encoded calling stub for NAME. */
7153 static struct elf_link_hash_entry
*
7154 find_arm_glue (struct bfd_link_info
*link_info
,
7156 char **error_message
)
7159 struct elf_link_hash_entry
*myh
;
7160 struct elf32_arm_link_hash_table
*hash_table
;
7162 /* We need a pointer to the elfarm specific hash table. */
7163 hash_table
= elf32_arm_hash_table (link_info
);
7164 if (hash_table
== NULL
)
7167 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7168 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7169 BFD_ASSERT (tmp_name
);
7171 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7173 myh
= elf_link_hash_lookup
7174 (&(hash_table
)->root
, tmp_name
, false, false, true);
7178 *error_message
= bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7179 "ARM", tmp_name
, name
);
7180 if (*error_message
== NULL
)
7181 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7188 /* ARM->Thumb glue (static images):
7192 ldr r12, __func_addr
7195 .word func @ behave as if you saw a ARM_32 reloc.
7202 .word func @ behave as if you saw a ARM_32 reloc.
7204 (relocatable images)
7207 ldr r12, __func_offset
7213 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7214 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7215 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7216 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7218 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7219 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7220 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7222 #define ARM2THUMB_PIC_GLUE_SIZE 16
7223 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7224 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7225 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7227 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7231 __func_from_thumb: __func_from_thumb:
7233 nop ldr r6, __func_addr
7243 #define THUMB2ARM_GLUE_SIZE 8
7244 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7245 static const insn16 t2a2_noop_insn
= 0x46c0;
7246 static const insn32 t2a3_b_insn
= 0xea000000;
7248 #define VFP11_ERRATUM_VENEER_SIZE 8
7249 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7250 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7252 #define ARM_BX_VENEER_SIZE 12
7253 static const insn32 armbx1_tst_insn
= 0xe3100001;
7254 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7255 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7257 #ifndef ELFARM_NABI_C_INCLUDED
7259 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7262 bfd_byte
* contents
;
7266 /* Do not include empty glue sections in the output. */
7269 s
= bfd_get_linker_section (abfd
, name
);
7271 s
->flags
|= SEC_EXCLUDE
;
7276 BFD_ASSERT (abfd
!= NULL
);
7278 s
= bfd_get_linker_section (abfd
, name
);
7279 BFD_ASSERT (s
!= NULL
);
7281 contents
= (bfd_byte
*) bfd_zalloc (abfd
, size
);
7283 BFD_ASSERT (s
->size
== size
);
7284 s
->contents
= contents
;
7289 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7291 struct elf32_arm_link_hash_table
* globals
;
7293 globals
= elf32_arm_hash_table (info
);
7294 BFD_ASSERT (globals
!= NULL
);
7296 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7297 globals
->arm_glue_size
,
7298 ARM2THUMB_GLUE_SECTION_NAME
);
7300 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7301 globals
->thumb_glue_size
,
7302 THUMB2ARM_GLUE_SECTION_NAME
);
7304 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7305 globals
->vfp11_erratum_glue_size
,
7306 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7308 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7309 globals
->stm32l4xx_erratum_glue_size
,
7310 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7312 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7313 globals
->bx_glue_size
,
7314 ARM_BX_GLUE_SECTION_NAME
);
7319 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7320 returns the symbol identifying the stub. */
7322 static struct elf_link_hash_entry
*
7323 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7324 struct elf_link_hash_entry
* h
)
7326 const char * name
= h
->root
.root
.string
;
7329 struct elf_link_hash_entry
* myh
;
7330 struct bfd_link_hash_entry
* bh
;
7331 struct elf32_arm_link_hash_table
* globals
;
7335 globals
= elf32_arm_hash_table (link_info
);
7336 BFD_ASSERT (globals
!= NULL
);
7337 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7339 s
= bfd_get_linker_section
7340 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7342 BFD_ASSERT (s
!= NULL
);
7344 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7345 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7346 BFD_ASSERT (tmp_name
);
7348 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7350 myh
= elf_link_hash_lookup
7351 (&(globals
)->root
, tmp_name
, false, false, true);
7355 /* We've already seen this guy. */
7360 /* The only trick here is using hash_table->arm_glue_size as the value.
7361 Even though the section isn't allocated yet, this is where we will be
7362 putting it. The +1 on the value marks that the stub has not been
7363 output yet - not that it is a Thumb function. */
7365 val
= globals
->arm_glue_size
+ 1;
7366 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7367 tmp_name
, BSF_GLOBAL
, s
, val
,
7368 NULL
, true, false, &bh
);
7370 myh
= (struct elf_link_hash_entry
*) bh
;
7371 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7372 myh
->forced_local
= 1;
7376 if (bfd_link_pic (link_info
)
7377 || globals
->pic_veneer
)
7378 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7379 else if (globals
->use_blx
)
7380 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7382 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7385 globals
->arm_glue_size
+= size
;
7390 /* Allocate space for ARMv4 BX veneers. */
7393 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7396 struct elf32_arm_link_hash_table
*globals
;
7398 struct elf_link_hash_entry
*myh
;
7399 struct bfd_link_hash_entry
*bh
;
7402 /* BX PC does not need a veneer. */
7406 globals
= elf32_arm_hash_table (link_info
);
7407 BFD_ASSERT (globals
!= NULL
);
7408 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7410 /* Check if this veneer has already been allocated. */
7411 if (globals
->bx_glue_offset
[reg
])
7414 s
= bfd_get_linker_section
7415 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7417 BFD_ASSERT (s
!= NULL
);
7419 /* Add symbol for veneer. */
7421 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7422 BFD_ASSERT (tmp_name
);
7424 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7426 myh
= elf_link_hash_lookup
7427 (&(globals
)->root
, tmp_name
, false, false, false);
7429 BFD_ASSERT (myh
== NULL
);
7432 val
= globals
->bx_glue_size
;
7433 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7434 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7435 NULL
, true, false, &bh
);
7437 myh
= (struct elf_link_hash_entry
*) bh
;
7438 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7439 myh
->forced_local
= 1;
7441 s
->size
+= ARM_BX_VENEER_SIZE
;
7442 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7443 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7447 /* Add an entry to the code/data map for section SEC. */
7450 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7452 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7453 unsigned int newidx
;
7455 if (sec_data
->map
== NULL
)
7457 sec_data
->map
= (elf32_arm_section_map
*)
7458 bfd_malloc (sizeof (elf32_arm_section_map
));
7459 sec_data
->mapcount
= 0;
7460 sec_data
->mapsize
= 1;
7463 newidx
= sec_data
->mapcount
++;
7465 if (sec_data
->mapcount
> sec_data
->mapsize
)
7467 sec_data
->mapsize
*= 2;
7468 sec_data
->map
= (elf32_arm_section_map
*)
7469 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7470 * sizeof (elf32_arm_section_map
));
7475 sec_data
->map
[newidx
].vma
= vma
;
7476 sec_data
->map
[newidx
].type
= type
;
7481 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7482 veneers are handled for now. */
7485 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7486 elf32_vfp11_erratum_list
*branch
,
7488 asection
*branch_sec
,
7489 unsigned int offset
)
7492 struct elf32_arm_link_hash_table
*hash_table
;
7494 struct elf_link_hash_entry
*myh
;
7495 struct bfd_link_hash_entry
*bh
;
7497 struct _arm_elf_section_data
*sec_data
;
7498 elf32_vfp11_erratum_list
*newerr
;
7500 hash_table
= elf32_arm_hash_table (link_info
);
7501 BFD_ASSERT (hash_table
!= NULL
);
7502 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7504 s
= bfd_get_linker_section
7505 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7507 sec_data
= elf32_arm_section_data (s
);
7509 BFD_ASSERT (s
!= NULL
);
7511 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7512 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7513 BFD_ASSERT (tmp_name
);
7515 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7516 hash_table
->num_vfp11_fixes
);
7518 myh
= elf_link_hash_lookup
7519 (&(hash_table
)->root
, tmp_name
, false, false, false);
7521 BFD_ASSERT (myh
== NULL
);
7524 val
= hash_table
->vfp11_erratum_glue_size
;
7525 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7526 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7527 NULL
, true, false, &bh
);
7529 myh
= (struct elf_link_hash_entry
*) bh
;
7530 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7531 myh
->forced_local
= 1;
7533 /* Link veneer back to calling location. */
7534 sec_data
->erratumcount
+= 1;
7535 newerr
= (elf32_vfp11_erratum_list
*)
7536 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7538 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7540 newerr
->u
.v
.branch
= branch
;
7541 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7542 branch
->u
.b
.veneer
= newerr
;
7544 newerr
->next
= sec_data
->erratumlist
;
7545 sec_data
->erratumlist
= newerr
;
7547 /* A symbol for the return from the veneer. */
7548 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7549 hash_table
->num_vfp11_fixes
);
7551 myh
= elf_link_hash_lookup
7552 (&(hash_table
)->root
, tmp_name
, false, false, false);
7559 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7560 branch_sec
, val
, NULL
, true, false, &bh
);
7562 myh
= (struct elf_link_hash_entry
*) bh
;
7563 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7564 myh
->forced_local
= 1;
7568 /* Generate a mapping symbol for the veneer section, and explicitly add an
7569 entry for that symbol to the code/data map for the section. */
7570 if (hash_table
->vfp11_erratum_glue_size
== 0)
7573 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7574 ever requires this erratum fix. */
7575 _bfd_generic_link_add_one_symbol (link_info
,
7576 hash_table
->bfd_of_glue_owner
, "$a",
7577 BSF_LOCAL
, s
, 0, NULL
,
7580 myh
= (struct elf_link_hash_entry
*) bh
;
7581 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7582 myh
->forced_local
= 1;
7584 /* The elf32_arm_init_maps function only cares about symbols from input
7585 BFDs. We must make a note of this generated mapping symbol
7586 ourselves so that code byteswapping works properly in
7587 elf32_arm_write_section. */
7588 elf32_arm_section_map_add (s
, 'a', 0);
7591 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7592 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7593 hash_table
->num_vfp11_fixes
++;
7595 /* The offset of the veneer. */
7599 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7600 veneers need to be handled because used only in Cortex-M. */
7603 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7604 elf32_stm32l4xx_erratum_list
*branch
,
7606 asection
*branch_sec
,
7607 unsigned int offset
,
7608 bfd_size_type veneer_size
)
7611 struct elf32_arm_link_hash_table
*hash_table
;
7613 struct elf_link_hash_entry
*myh
;
7614 struct bfd_link_hash_entry
*bh
;
7616 struct _arm_elf_section_data
*sec_data
;
7617 elf32_stm32l4xx_erratum_list
*newerr
;
7619 hash_table
= elf32_arm_hash_table (link_info
);
7620 BFD_ASSERT (hash_table
!= NULL
);
7621 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7623 s
= bfd_get_linker_section
7624 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7626 BFD_ASSERT (s
!= NULL
);
7628 sec_data
= elf32_arm_section_data (s
);
7630 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7631 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7632 BFD_ASSERT (tmp_name
);
7634 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7635 hash_table
->num_stm32l4xx_fixes
);
7637 myh
= elf_link_hash_lookup
7638 (&(hash_table
)->root
, tmp_name
, false, false, false);
7640 BFD_ASSERT (myh
== NULL
);
7643 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7644 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7645 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7646 NULL
, true, false, &bh
);
7648 myh
= (struct elf_link_hash_entry
*) bh
;
7649 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7650 myh
->forced_local
= 1;
7652 /* Link veneer back to calling location. */
7653 sec_data
->stm32l4xx_erratumcount
+= 1;
7654 newerr
= (elf32_stm32l4xx_erratum_list
*)
7655 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7657 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7659 newerr
->u
.v
.branch
= branch
;
7660 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7661 branch
->u
.b
.veneer
= newerr
;
7663 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7664 sec_data
->stm32l4xx_erratumlist
= newerr
;
7666 /* A symbol for the return from the veneer. */
7667 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7668 hash_table
->num_stm32l4xx_fixes
);
7670 myh
= elf_link_hash_lookup
7671 (&(hash_table
)->root
, tmp_name
, false, false, false);
7678 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7679 branch_sec
, val
, NULL
, true, false, &bh
);
7681 myh
= (struct elf_link_hash_entry
*) bh
;
7682 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7683 myh
->forced_local
= 1;
7687 /* Generate a mapping symbol for the veneer section, and explicitly add an
7688 entry for that symbol to the code/data map for the section. */
7689 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7692 /* Creates a THUMB symbol since there is no other choice. */
7693 _bfd_generic_link_add_one_symbol (link_info
,
7694 hash_table
->bfd_of_glue_owner
, "$t",
7695 BSF_LOCAL
, s
, 0, NULL
,
7698 myh
= (struct elf_link_hash_entry
*) bh
;
7699 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7700 myh
->forced_local
= 1;
7702 /* The elf32_arm_init_maps function only cares about symbols from input
7703 BFDs. We must make a note of this generated mapping symbol
7704 ourselves so that code byteswapping works properly in
7705 elf32_arm_write_section. */
7706 elf32_arm_section_map_add (s
, 't', 0);
7709 s
->size
+= veneer_size
;
7710 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7711 hash_table
->num_stm32l4xx_fixes
++;
7713 /* The offset of the veneer. */
7717 #define ARM_GLUE_SECTION_FLAGS \
7718 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7719 | SEC_READONLY | SEC_LINKER_CREATED)
7721 /* Create a fake section for use by the ARM backend of the linker. */
7724 arm_make_glue_section (bfd
* abfd
, const char * name
)
7728 sec
= bfd_get_linker_section (abfd
, name
);
7733 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7736 || !bfd_set_section_alignment (sec
, 2))
7739 /* Set the gc mark to prevent the section from being removed by garbage
7740 collection, despite the fact that no relocs refer to this section. */
7746 /* Set size of .plt entries. This function is called from the
7747 linker scripts in ld/emultempl/{armelf}.em. */
7750 bfd_elf32_arm_use_long_plt (void)
7752 elf32_arm_use_long_plt_entry
= true;
7755 /* Add the glue sections to ABFD. This function is called from the
7756 linker scripts in ld/emultempl/{armelf}.em. */
7759 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7760 struct bfd_link_info
*info
)
7762 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7763 bool dostm32l4xx
= globals
7764 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7767 /* If we are only performing a partial
7768 link do not bother adding the glue. */
7769 if (bfd_link_relocatable (info
))
7772 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7773 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7774 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7775 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7781 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7784 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7785 ensures they are not marked for deletion by
7786 strip_excluded_output_sections () when veneers are going to be created
7787 later. Not doing so would trigger assert on empty section size in
7788 lang_size_sections_1 (). */
7791 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7793 enum elf32_arm_stub_type stub_type
;
7795 /* If we are only performing a partial
7796 link do not bother adding the glue. */
7797 if (bfd_link_relocatable (info
))
7800 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7803 const char *out_sec_name
;
7805 if (!arm_dedicated_stub_output_section_required (stub_type
))
7808 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7809 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7810 if (out_sec
!= NULL
)
7811 out_sec
->flags
|= SEC_KEEP
;
7815 /* Select a BFD to be used to hold the sections used by the glue code.
7816 This function is called from the linker scripts in ld/emultempl/
7820 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7822 struct elf32_arm_link_hash_table
*globals
;
7824 /* If we are only performing a partial link
7825 do not bother getting a bfd to hold the glue. */
7826 if (bfd_link_relocatable (info
))
7829 /* Make sure we don't attach the glue sections to a dynamic object. */
7830 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7832 globals
= elf32_arm_hash_table (info
);
7833 BFD_ASSERT (globals
!= NULL
);
7835 if (globals
->bfd_of_glue_owner
!= NULL
)
7838 /* Save the bfd for later use. */
7839 globals
->bfd_of_glue_owner
= abfd
;
7845 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7849 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7852 if (globals
->fix_arm1176
)
7854 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7855 globals
->use_blx
= 1;
7859 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7860 globals
->use_blx
= 1;
7865 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7866 struct bfd_link_info
*link_info
)
7868 Elf_Internal_Shdr
*symtab_hdr
;
7869 Elf_Internal_Rela
*internal_relocs
= NULL
;
7870 Elf_Internal_Rela
*irel
, *irelend
;
7871 bfd_byte
*contents
= NULL
;
7874 struct elf32_arm_link_hash_table
*globals
;
7876 /* If we are only performing a partial link do not bother
7877 to construct any glue. */
7878 if (bfd_link_relocatable (link_info
))
7881 /* Here we have a bfd that is to be included on the link. We have a
7882 hook to do reloc rummaging, before section sizes are nailed down. */
7883 globals
= elf32_arm_hash_table (link_info
);
7884 BFD_ASSERT (globals
!= NULL
);
7886 check_use_blx (globals
);
7888 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7890 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7895 /* PR 5398: If we have not decided to include any loadable sections in
7896 the output then we will not have a glue owner bfd. This is OK, it
7897 just means that there is nothing else for us to do here. */
7898 if (globals
->bfd_of_glue_owner
== NULL
)
7901 /* Rummage around all the relocs and map the glue vectors. */
7902 sec
= abfd
->sections
;
7907 for (; sec
!= NULL
; sec
= sec
->next
)
7909 if (sec
->reloc_count
== 0)
7912 if ((sec
->flags
& SEC_EXCLUDE
) != 0
7913 || (sec
->flags
& SEC_HAS_CONTENTS
) == 0)
7916 symtab_hdr
= & elf_symtab_hdr (abfd
);
7918 /* Load the relocs. */
7920 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, false);
7922 if (internal_relocs
== NULL
)
7925 irelend
= internal_relocs
+ sec
->reloc_count
;
7926 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7929 unsigned long r_index
;
7931 struct elf_link_hash_entry
*h
;
7933 r_type
= ELF32_R_TYPE (irel
->r_info
);
7934 r_index
= ELF32_R_SYM (irel
->r_info
);
7936 /* These are the only relocation types we care about. */
7937 if ( r_type
!= R_ARM_PC24
7938 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7941 /* Get the section contents if we haven't done so already. */
7942 if (contents
== NULL
)
7944 /* Get cached copy if it exists. */
7945 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7946 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7949 /* Go get them off disk. */
7950 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7955 if (r_type
== R_ARM_V4BX
)
7959 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7960 record_arm_bx_glue (link_info
, reg
);
7964 /* If the relocation is not against a symbol it cannot concern us. */
7967 /* We don't care about local symbols. */
7968 if (r_index
< symtab_hdr
->sh_info
)
7971 /* This is an external symbol. */
7972 r_index
-= symtab_hdr
->sh_info
;
7973 h
= (struct elf_link_hash_entry
*)
7974 elf_sym_hashes (abfd
)[r_index
];
7976 /* If the relocation is against a static symbol it must be within
7977 the current section and so cannot be a cross ARM/Thumb relocation. */
7981 /* If the call will go through a PLT entry then we do not need
7983 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7989 /* This one is a call from arm code. We need to look up
7990 the target of the call. If it is a thumb target, we
7992 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7993 == ST_BRANCH_TO_THUMB
)
7994 record_arm_to_thumb_glue (link_info
, h
);
8002 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8006 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
8007 free (internal_relocs
);
8008 internal_relocs
= NULL
;
8014 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8016 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
8017 free (internal_relocs
);
8024 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8027 bfd_elf32_arm_init_maps (bfd
*abfd
)
8029 Elf_Internal_Sym
*isymbuf
;
8030 Elf_Internal_Shdr
*hdr
;
8031 unsigned int i
, localsyms
;
8033 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8034 if (! is_arm_elf (abfd
))
8037 if ((abfd
->flags
& DYNAMIC
) != 0)
8040 hdr
= & elf_symtab_hdr (abfd
);
8041 localsyms
= hdr
->sh_info
;
8043 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8044 should contain the number of local symbols, which should come before any
8045 global symbols. Mapping symbols are always local. */
8046 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
8049 /* No internal symbols read? Skip this BFD. */
8050 if (isymbuf
== NULL
)
8053 for (i
= 0; i
< localsyms
; i
++)
8055 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
8056 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
8060 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
8062 name
= bfd_elf_string_from_elf_section (abfd
,
8063 hdr
->sh_link
, isym
->st_name
);
8065 if (bfd_is_arm_special_symbol_name (name
,
8066 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
8067 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
8073 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8074 say what they wanted. */
8077 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8079 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8080 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8082 if (globals
== NULL
)
8085 if (globals
->fix_cortex_a8
== -1)
8087 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8088 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
8089 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
8090 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
8091 globals
->fix_cortex_a8
= 1;
8093 globals
->fix_cortex_a8
= 0;
8099 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8101 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8102 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8104 if (globals
== NULL
)
8106 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8107 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8109 switch (globals
->vfp11_fix
)
8111 case BFD_ARM_VFP11_FIX_DEFAULT
:
8112 case BFD_ARM_VFP11_FIX_NONE
:
8113 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8117 /* Give a warning, but do as the user requests anyway. */
8118 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8119 "workaround is not necessary for target architecture"), obfd
);
8122 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8123 /* For earlier architectures, we might need the workaround, but do not
8124 enable it by default. If users is running with broken hardware, they
8125 must enable the erratum fix explicitly. */
8126 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8130 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8132 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8133 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8135 if (globals
== NULL
)
8138 /* We assume only Cortex-M4 may require the fix. */
8139 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8140 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8142 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8143 /* Give a warning, but do as the user requests anyway. */
8145 (_("%pB: warning: selected STM32L4XX erratum "
8146 "workaround is not necessary for target architecture"), obfd
);
8150 enum bfd_arm_vfp11_pipe
8158 /* Return a VFP register number. This is encoded as RX:X for single-precision
8159 registers, or X:RX for double-precision registers, where RX is the group of
8160 four bits in the instruction encoding and X is the single extension bit.
8161 RX and X fields are specified using their lowest (starting) bit. The return
8164 0...31: single-precision registers s0...s31
8165 32...63: double-precision registers d0...d31.
8167 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8168 encounter VFP3 instructions, so we allow the full range for DP registers. */
8171 bfd_arm_vfp11_regno (unsigned int insn
, bool is_double
, unsigned int rx
,
8175 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8177 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8180 /* Set bits in *WMASK according to a register number REG as encoded by
8181 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8184 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8189 *wmask
|= 3 << ((reg
- 32) * 2);
8192 /* Return TRUE if WMASK overwrites anything in REGS. */
8195 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8199 for (i
= 0; i
< numregs
; i
++)
8201 unsigned int reg
= regs
[i
];
8203 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8211 if ((wmask
& (3 << (reg
* 2))) != 0)
8218 /* In this function, we're interested in two things: finding input registers
8219 for VFP data-processing instructions, and finding the set of registers which
8220 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8221 hold the written set, so FLDM etc. are easy to deal with (we're only
8222 interested in 32 SP registers or 16 dp registers, due to the VFP version
8223 implemented by the chip in question). DP registers are marked by setting
8224 both SP registers in the write mask). */
8226 static enum bfd_arm_vfp11_pipe
8227 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8230 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8231 bool is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8233 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8236 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8237 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8239 pqrs
= ((insn
& 0x00800000) >> 20)
8240 | ((insn
& 0x00300000) >> 19)
8241 | ((insn
& 0x00000040) >> 6);
8245 case 0: /* fmac[sd]. */
8246 case 1: /* fnmac[sd]. */
8247 case 2: /* fmsc[sd]. */
8248 case 3: /* fnmsc[sd]. */
8250 bfd_arm_vfp11_write_mask (destmask
, fd
);
8252 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8257 case 4: /* fmul[sd]. */
8258 case 5: /* fnmul[sd]. */
8259 case 6: /* fadd[sd]. */
8260 case 7: /* fsub[sd]. */
8264 case 8: /* fdiv[sd]. */
8267 bfd_arm_vfp11_write_mask (destmask
, fd
);
8268 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8273 case 15: /* extended opcode. */
8275 unsigned int extn
= ((insn
>> 15) & 0x1e)
8276 | ((insn
>> 7) & 1);
8280 case 0: /* fcpy[sd]. */
8281 case 1: /* fabs[sd]. */
8282 case 2: /* fneg[sd]. */
8283 case 8: /* fcmp[sd]. */
8284 case 9: /* fcmpe[sd]. */
8285 case 10: /* fcmpz[sd]. */
8286 case 11: /* fcmpez[sd]. */
8287 case 16: /* fuito[sd]. */
8288 case 17: /* fsito[sd]. */
8289 case 24: /* ftoui[sd]. */
8290 case 25: /* ftouiz[sd]. */
8291 case 26: /* ftosi[sd]. */
8292 case 27: /* ftosiz[sd]. */
8293 /* These instructions will not bounce due to underflow. */
8298 case 3: /* fsqrt[sd]. */
8299 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8300 registers to cause the erratum in previous instructions. */
8301 bfd_arm_vfp11_write_mask (destmask
, fd
);
8305 case 15: /* fcvt{ds,sd}. */
8309 bfd_arm_vfp11_write_mask (destmask
, fd
);
8311 /* Only FCVTSD can underflow. */
8312 if ((insn
& 0x100) != 0)
8331 /* Two-register transfer. */
8332 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8334 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8336 if ((insn
& 0x100000) == 0)
8339 bfd_arm_vfp11_write_mask (destmask
, fm
);
8342 bfd_arm_vfp11_write_mask (destmask
, fm
);
8343 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8349 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8351 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8352 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8356 case 0: /* Two-reg transfer. We should catch these above. */
8359 case 2: /* fldm[sdx]. */
8363 unsigned int i
, offset
= insn
& 0xff;
8368 for (i
= fd
; i
< fd
+ offset
; i
++)
8369 bfd_arm_vfp11_write_mask (destmask
, i
);
8373 case 4: /* fld[sd]. */
8375 bfd_arm_vfp11_write_mask (destmask
, fd
);
8384 /* Single-register transfer. Note L==0. */
8385 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8387 unsigned int opcode
= (insn
>> 21) & 7;
8388 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8392 case 0: /* fmsr/fmdlr. */
8393 case 1: /* fmdhr. */
8394 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8395 destination register. I don't know if this is exactly right,
8396 but it is the conservative choice. */
8397 bfd_arm_vfp11_write_mask (destmask
, fn
);
8411 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8414 /* Look for potentially-troublesome code sequences which might trigger the
8415 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8416 (available from ARM) for details of the erratum. A short version is
8417 described in ld.texinfo. */
8420 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8423 bfd_byte
*contents
= NULL
;
8425 int regs
[3], numregs
= 0;
8426 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8427 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8429 if (globals
== NULL
)
8432 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8433 The states transition as follows:
8435 0 -> 1 (vector) or 0 -> 2 (scalar)
8436 A VFP FMAC-pipeline instruction has been seen. Fill
8437 regs[0]..regs[numregs-1] with its input operands. Remember this
8438 instruction in 'first_fmac'.
8441 Any instruction, except for a VFP instruction which overwrites
8446 A VFP instruction has been seen which overwrites any of regs[*].
8447 We must make a veneer! Reset state to 0 before examining next
8451 If we fail to match anything in state 2, reset to state 0 and reset
8452 the instruction pointer to the instruction after 'first_fmac'.
8454 If the VFP11 vector mode is in use, there must be at least two unrelated
8455 instructions between anti-dependent VFP11 instructions to properly avoid
8456 triggering the erratum, hence the use of the extra state 1. */
8458 /* If we are only performing a partial link do not bother
8459 to construct any glue. */
8460 if (bfd_link_relocatable (link_info
))
8463 /* Skip if this bfd does not correspond to an ELF image. */
8464 if (! is_arm_elf (abfd
))
8467 /* We should have chosen a fix type by the time we get here. */
8468 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8470 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8473 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8474 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8477 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8479 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8480 struct _arm_elf_section_data
*sec_data
;
8482 /* If we don't have executable progbits, we're not interested in this
8483 section. Also skip if section is to be excluded. */
8484 if (elf_section_type (sec
) != SHT_PROGBITS
8485 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8486 || (sec
->flags
& SEC_EXCLUDE
) != 0
8487 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8488 || sec
->output_section
== bfd_abs_section_ptr
8489 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8492 sec_data
= elf32_arm_section_data (sec
);
8494 if (sec_data
->mapcount
== 0)
8497 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8498 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8499 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8502 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8503 elf32_arm_compare_mapping
);
8505 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8507 unsigned int span_start
= sec_data
->map
[span
].vma
;
8508 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8509 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8510 char span_type
= sec_data
->map
[span
].type
;
8512 /* FIXME: Only ARM mode is supported at present. We may need to
8513 support Thumb-2 mode also at some point. */
8514 if (span_type
!= 'a')
8517 for (i
= span_start
; i
< span_end
;)
8519 unsigned int next_i
= i
+ 4;
8520 unsigned int insn
= bfd_big_endian (abfd
)
8521 ? (((unsigned) contents
[i
] << 24)
8522 | (contents
[i
+ 1] << 16)
8523 | (contents
[i
+ 2] << 8)
8525 : (((unsigned) contents
[i
+ 3] << 24)
8526 | (contents
[i
+ 2] << 16)
8527 | (contents
[i
+ 1] << 8)
8529 unsigned int writemask
= 0;
8530 enum bfd_arm_vfp11_pipe vpipe
;
8535 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8537 /* I'm assuming the VFP11 erratum can trigger with denorm
8538 operands on either the FMAC or the DS pipeline. This might
8539 lead to slightly overenthusiastic veneer insertion. */
8540 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8542 state
= use_vector
? 1 : 2;
8544 veneer_of_insn
= insn
;
8550 int other_regs
[3], other_numregs
;
8551 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8554 if (vpipe
!= VFP11_BAD
8555 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8565 int other_regs
[3], other_numregs
;
8566 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8569 if (vpipe
!= VFP11_BAD
8570 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8576 next_i
= first_fmac
+ 4;
8582 abort (); /* Should be unreachable. */
8587 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8588 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8590 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8592 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8597 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8604 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8609 newerr
->next
= sec_data
->erratumlist
;
8610 sec_data
->erratumlist
= newerr
;
8619 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8627 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8633 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8634 after sections have been laid out, using specially-named symbols. */
8637 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8638 struct bfd_link_info
*link_info
)
8641 struct elf32_arm_link_hash_table
*globals
;
8644 if (bfd_link_relocatable (link_info
))
8647 /* Skip if this bfd does not correspond to an ELF image. */
8648 if (! is_arm_elf (abfd
))
8651 globals
= elf32_arm_hash_table (link_info
);
8652 if (globals
== NULL
)
8655 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8656 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8657 BFD_ASSERT (tmp_name
);
8659 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8661 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8662 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8664 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8666 struct elf_link_hash_entry
*myh
;
8669 switch (errnode
->type
)
8671 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8672 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8673 /* Find veneer symbol. */
8674 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8675 errnode
->u
.b
.veneer
->u
.v
.id
);
8677 myh
= elf_link_hash_lookup
8678 (&(globals
)->root
, tmp_name
, false, false, true);
8681 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8682 abfd
, "VFP11", tmp_name
);
8684 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8685 + myh
->root
.u
.def
.section
->output_offset
8686 + myh
->root
.u
.def
.value
;
8688 errnode
->u
.b
.veneer
->vma
= vma
;
8691 case VFP11_ERRATUM_ARM_VENEER
:
8692 case VFP11_ERRATUM_THUMB_VENEER
:
8693 /* Find return location. */
8694 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8697 myh
= elf_link_hash_lookup
8698 (&(globals
)->root
, tmp_name
, false, false, true);
8701 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8702 abfd
, "VFP11", tmp_name
);
8704 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8705 + myh
->root
.u
.def
.section
->output_offset
8706 + myh
->root
.u
.def
.value
;
8708 errnode
->u
.v
.branch
->vma
= vma
;
8720 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8721 return locations after sections have been laid out, using
8722 specially-named symbols. */
8725 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8726 struct bfd_link_info
*link_info
)
8729 struct elf32_arm_link_hash_table
*globals
;
8732 if (bfd_link_relocatable (link_info
))
8735 /* Skip if this bfd does not correspond to an ELF image. */
8736 if (! is_arm_elf (abfd
))
8739 globals
= elf32_arm_hash_table (link_info
);
8740 if (globals
== NULL
)
8743 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8744 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8745 BFD_ASSERT (tmp_name
);
8747 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8749 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8750 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8752 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8754 struct elf_link_hash_entry
*myh
;
8757 switch (errnode
->type
)
8759 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8760 /* Find veneer symbol. */
8761 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8762 errnode
->u
.b
.veneer
->u
.v
.id
);
8764 myh
= elf_link_hash_lookup
8765 (&(globals
)->root
, tmp_name
, false, false, true);
8768 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8769 abfd
, "STM32L4XX", tmp_name
);
8771 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8772 + myh
->root
.u
.def
.section
->output_offset
8773 + myh
->root
.u
.def
.value
;
8775 errnode
->u
.b
.veneer
->vma
= vma
;
8778 case STM32L4XX_ERRATUM_VENEER
:
8779 /* Find return location. */
8780 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8783 myh
= elf_link_hash_lookup
8784 (&(globals
)->root
, tmp_name
, false, false, true);
8787 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8788 abfd
, "STM32L4XX", tmp_name
);
8790 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8791 + myh
->root
.u
.def
.section
->output_offset
8792 + myh
->root
.u
.def
.value
;
8794 errnode
->u
.v
.branch
->vma
= vma
;
8807 is_thumb2_ldmia (const insn32 insn
)
8809 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8810 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8811 return (insn
& 0xffd02000) == 0xe8900000;
8815 is_thumb2_ldmdb (const insn32 insn
)
8817 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8818 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8819 return (insn
& 0xffd02000) == 0xe9100000;
8823 is_thumb2_vldm (const insn32 insn
)
8825 /* A6.5 Extension register load or store instruction
8827 We look for SP 32-bit and DP 64-bit registers.
8828 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8829 <list> is consecutive 64-bit registers
8830 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8831 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8832 <list> is consecutive 32-bit registers
8833 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8834 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8835 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8837 (((insn
& 0xfe100f00) == 0xec100b00) ||
8838 ((insn
& 0xfe100f00) == 0xec100a00))
8839 && /* (IA without !). */
8840 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8841 /* (IA with !), includes VPOP (when reg number is SP). */
8842 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8844 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8847 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8849 - computes the number and the mode of memory accesses
8850 - decides if the replacement should be done:
8851 . replaces only if > 8-word accesses
8852 . or (testing purposes only) replaces all accesses. */
8855 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8856 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8860 /* The field encoding the register list is the same for both LDMIA
8861 and LDMDB encodings. */
8862 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8863 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8864 else if (is_thumb2_vldm (insn
))
8865 nb_words
= (insn
& 0xff);
8867 /* DEFAULT mode accounts for the real bug condition situation,
8868 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8869 return (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
8871 : stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
);
8874 /* Look for potentially-troublesome code sequences which might trigger
8875 the STM STM32L4XX erratum. */
8878 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8879 struct bfd_link_info
*link_info
)
8882 bfd_byte
*contents
= NULL
;
8883 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8885 if (globals
== NULL
)
8888 /* If we are only performing a partial link do not bother
8889 to construct any glue. */
8890 if (bfd_link_relocatable (link_info
))
8893 /* Skip if this bfd does not correspond to an ELF image. */
8894 if (! is_arm_elf (abfd
))
8897 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8900 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8901 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8904 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8906 unsigned int i
, span
;
8907 struct _arm_elf_section_data
*sec_data
;
8909 /* If we don't have executable progbits, we're not interested in this
8910 section. Also skip if section is to be excluded. */
8911 if (elf_section_type (sec
) != SHT_PROGBITS
8912 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8913 || (sec
->flags
& SEC_EXCLUDE
) != 0
8914 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8915 || sec
->output_section
== bfd_abs_section_ptr
8916 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8919 sec_data
= elf32_arm_section_data (sec
);
8921 if (sec_data
->mapcount
== 0)
8924 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8925 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8926 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8929 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8930 elf32_arm_compare_mapping
);
8932 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8934 unsigned int span_start
= sec_data
->map
[span
].vma
;
8935 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8936 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8937 char span_type
= sec_data
->map
[span
].type
;
8938 int itblock_current_pos
= 0;
8940 /* Only Thumb2 mode need be supported with this CM4 specific
8941 code, we should not encounter any arm mode eg span_type
8943 if (span_type
!= 't')
8946 for (i
= span_start
; i
< span_end
;)
8948 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8949 bool insn_32bit
= false;
8950 bool is_ldm
= false;
8951 bool is_vldm
= false;
8952 bool is_not_last_in_it_block
= false;
8954 /* The first 16-bits of all 32-bit thumb2 instructions start
8955 with opcode[15..13]=0b111 and the encoded op1 can be anything
8956 except opcode[12..11]!=0b00.
8957 See 32-bit Thumb instruction encoding. */
8958 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8961 /* Compute the predicate that tells if the instruction
8962 is concerned by the IT block
8963 - Creates an error if there is a ldm that is not
8964 last in the IT block thus cannot be replaced
8965 - Otherwise we can create a branch at the end of the
8966 IT block, it will be controlled naturally by IT
8967 with the proper pseudo-predicate
8968 - So the only interesting predicate is the one that
8969 tells that we are not on the last item of an IT
8971 if (itblock_current_pos
!= 0)
8972 is_not_last_in_it_block
= !!--itblock_current_pos
;
8976 /* Load the rest of the insn (in manual-friendly order). */
8977 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8978 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8979 is_vldm
= is_thumb2_vldm (insn
);
8981 /* Veneers are created for (v)ldm depending on
8982 option flags and memory accesses conditions; but
8983 if the instruction is not the last instruction of
8984 an IT block, we cannot create a jump there, so we
8986 if ((is_ldm
|| is_vldm
)
8987 && stm32l4xx_need_create_replacing_stub
8988 (insn
, globals
->stm32l4xx_fix
))
8990 if (is_not_last_in_it_block
)
8993 /* xgettext:c-format */
8994 (_("%pB(%pA+%#x): error: multiple load detected"
8995 " in non-last IT block instruction:"
8996 " STM32L4XX veneer cannot be generated; "
8997 "use gcc option -mrestrict-it to generate"
8998 " only one instruction per IT block"),
9003 elf32_stm32l4xx_erratum_list
*newerr
=
9004 (elf32_stm32l4xx_erratum_list
*)
9006 (sizeof (elf32_stm32l4xx_erratum_list
));
9008 elf32_arm_section_data (sec
)
9009 ->stm32l4xx_erratumcount
+= 1;
9010 newerr
->u
.b
.insn
= insn
;
9011 /* We create only thumb branches. */
9013 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
9014 record_stm32l4xx_erratum_veneer
9015 (link_info
, newerr
, abfd
, sec
,
9018 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
9019 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
9021 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
9022 sec_data
->stm32l4xx_erratumlist
= newerr
;
9029 IT blocks are only encoded in T1
9030 Encoding T1: IT{x{y{z}}} <firstcond>
9031 1 0 1 1 - 1 1 1 1 - firstcond - mask
9032 if mask = '0000' then see 'related encodings'
9033 We don't deal with UNPREDICTABLE, just ignore these.
9034 There can be no nested IT blocks so an IT block
9035 is naturally a new one for which it is worth
9036 computing its size. */
9037 bool is_newitblock
= ((insn
& 0xff00) == 0xbf00)
9038 && ((insn
& 0x000f) != 0x0000);
9039 /* If we have a new IT block we compute its size. */
9042 /* Compute the number of instructions controlled
9043 by the IT block, it will be used to decide
9044 whether we are inside an IT block or not. */
9045 unsigned int mask
= insn
& 0x000f;
9046 itblock_current_pos
= 4 - ctz (mask
);
9050 i
+= insn_32bit
? 4 : 2;
9054 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9062 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9068 /* Set target relocation values needed during linking. */
9071 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
9072 struct bfd_link_info
*link_info
,
9073 struct elf32_arm_params
*params
)
9075 struct elf32_arm_link_hash_table
*globals
;
9077 globals
= elf32_arm_hash_table (link_info
);
9078 if (globals
== NULL
)
9081 globals
->target1_is_rel
= params
->target1_is_rel
;
9082 if (globals
->fdpic_p
)
9083 globals
->target2_reloc
= R_ARM_GOT32
;
9084 else if (strcmp (params
->target2_type
, "rel") == 0)
9085 globals
->target2_reloc
= R_ARM_REL32
;
9086 else if (strcmp (params
->target2_type
, "abs") == 0)
9087 globals
->target2_reloc
= R_ARM_ABS32
;
9088 else if (strcmp (params
->target2_type
, "got-rel") == 0)
9089 globals
->target2_reloc
= R_ARM_GOT_PREL
;
9092 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9093 params
->target2_type
);
9095 globals
->fix_v4bx
= params
->fix_v4bx
;
9096 globals
->use_blx
|= params
->use_blx
;
9097 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9098 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9099 if (globals
->fdpic_p
)
9100 globals
->pic_veneer
= 1;
9102 globals
->pic_veneer
= params
->pic_veneer
;
9103 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9104 globals
->fix_arm1176
= params
->fix_arm1176
;
9105 globals
->cmse_implib
= params
->cmse_implib
;
9106 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9108 BFD_ASSERT (is_arm_elf (output_bfd
));
9109 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9110 = params
->no_enum_size_warning
;
9111 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9112 = params
->no_wchar_size_warning
;
9115 /* Replace the target offset of a Thumb bl or b.w instruction. */
9118 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9124 BFD_ASSERT ((offset
& 1) == 0);
9126 upper
= bfd_get_16 (abfd
, insn
);
9127 lower
= bfd_get_16 (abfd
, insn
+ 2);
9128 reloc_sign
= (offset
< 0) ? 1 : 0;
9129 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9130 | ((offset
>> 12) & 0x3ff)
9131 | (reloc_sign
<< 10);
9132 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9133 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9134 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9135 | ((offset
>> 1) & 0x7ff);
9136 bfd_put_16 (abfd
, upper
, insn
);
9137 bfd_put_16 (abfd
, lower
, insn
+ 2);
9140 /* Thumb code calling an ARM function. */
9143 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9147 asection
* input_section
,
9148 bfd_byte
* hit_data
,
9151 bfd_signed_vma addend
,
9153 char **error_message
)
9157 long int ret_offset
;
9158 struct elf_link_hash_entry
* myh
;
9159 struct elf32_arm_link_hash_table
* globals
;
9161 myh
= find_thumb_glue (info
, name
, error_message
);
9165 globals
= elf32_arm_hash_table (info
);
9166 BFD_ASSERT (globals
!= NULL
);
9167 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9169 my_offset
= myh
->root
.u
.def
.value
;
9171 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9172 THUMB2ARM_GLUE_SECTION_NAME
);
9174 BFD_ASSERT (s
!= NULL
);
9175 BFD_ASSERT (s
->contents
!= NULL
);
9176 BFD_ASSERT (s
->output_section
!= NULL
);
9178 if ((my_offset
& 0x01) == 0x01)
9181 && sym_sec
->owner
!= NULL
9182 && !INTERWORK_FLAG (sym_sec
->owner
))
9185 (_("%pB(%s): warning: interworking not enabled;"
9186 " first occurrence: %pB: %s call to %s"),
9187 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9193 myh
->root
.u
.def
.value
= my_offset
;
9195 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9196 s
->contents
+ my_offset
);
9198 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9199 s
->contents
+ my_offset
+ 2);
9202 /* Address of destination of the stub. */
9203 ((bfd_signed_vma
) val
)
9205 /* Offset from the start of the current section
9206 to the start of the stubs. */
9208 /* Offset of the start of this stub from the start of the stubs. */
9210 /* Address of the start of the current section. */
9211 + s
->output_section
->vma
)
9212 /* The branch instruction is 4 bytes into the stub. */
9214 /* ARM branches work from the pc of the instruction + 8. */
9217 put_arm_insn (globals
, output_bfd
,
9218 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9219 s
->contents
+ my_offset
+ 4);
9222 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9224 /* Now go back and fix up the original BL insn to point to here. */
9226 /* Address of where the stub is located. */
9227 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9228 /* Address of where the BL is located. */
9229 - (input_section
->output_section
->vma
+ input_section
->output_offset
9231 /* Addend in the relocation. */
9233 /* Biassing for PC-relative addressing. */
9236 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9241 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9243 static struct elf_link_hash_entry
*
9244 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9251 char ** error_message
)
9254 long int ret_offset
;
9255 struct elf_link_hash_entry
* myh
;
9256 struct elf32_arm_link_hash_table
* globals
;
9258 myh
= find_arm_glue (info
, name
, error_message
);
9262 globals
= elf32_arm_hash_table (info
);
9263 BFD_ASSERT (globals
!= NULL
);
9264 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9266 my_offset
= myh
->root
.u
.def
.value
;
9268 if ((my_offset
& 0x01) == 0x01)
9271 && sym_sec
->owner
!= NULL
9272 && !INTERWORK_FLAG (sym_sec
->owner
))
9275 (_("%pB(%s): warning: interworking not enabled;"
9276 " first occurrence: %pB: %s call to %s"),
9277 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9281 myh
->root
.u
.def
.value
= my_offset
;
9283 if (bfd_link_pic (info
)
9284 || globals
->pic_veneer
)
9286 /* For relocatable objects we can't use absolute addresses,
9287 so construct the address from a relative offset. */
9288 /* TODO: If the offset is small it's probably worth
9289 constructing the address with adds. */
9290 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9291 s
->contents
+ my_offset
);
9292 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9293 s
->contents
+ my_offset
+ 4);
9294 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9295 s
->contents
+ my_offset
+ 8);
9296 /* Adjust the offset by 4 for the position of the add,
9297 and 8 for the pipeline offset. */
9298 ret_offset
= (val
- (s
->output_offset
9299 + s
->output_section
->vma
9302 bfd_put_32 (output_bfd
, ret_offset
,
9303 s
->contents
+ my_offset
+ 12);
9305 else if (globals
->use_blx
)
9307 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9308 s
->contents
+ my_offset
);
9310 /* It's a thumb address. Add the low order bit. */
9311 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9312 s
->contents
+ my_offset
+ 4);
9316 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9317 s
->contents
+ my_offset
);
9319 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9320 s
->contents
+ my_offset
+ 4);
9322 /* It's a thumb address. Add the low order bit. */
9323 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9324 s
->contents
+ my_offset
+ 8);
9330 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9335 /* Arm code calling a Thumb function. */
9338 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9342 asection
* input_section
,
9343 bfd_byte
* hit_data
,
9346 bfd_signed_vma addend
,
9348 char **error_message
)
9350 unsigned long int tmp
;
9353 long int ret_offset
;
9354 struct elf_link_hash_entry
* myh
;
9355 struct elf32_arm_link_hash_table
* globals
;
9357 globals
= elf32_arm_hash_table (info
);
9358 BFD_ASSERT (globals
!= NULL
);
9359 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9361 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9362 ARM2THUMB_GLUE_SECTION_NAME
);
9363 BFD_ASSERT (s
!= NULL
);
9364 BFD_ASSERT (s
->contents
!= NULL
);
9365 BFD_ASSERT (s
->output_section
!= NULL
);
9367 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9368 sym_sec
, val
, s
, error_message
);
9372 my_offset
= myh
->root
.u
.def
.value
;
9373 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9374 tmp
= tmp
& 0xFF000000;
9376 /* Somehow these are both 4 too far, so subtract 8. */
9377 ret_offset
= (s
->output_offset
9379 + s
->output_section
->vma
9380 - (input_section
->output_offset
9381 + input_section
->output_section
->vma
9385 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9387 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9392 /* Populate Arm stub for an exported Thumb function. */
9395 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9397 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9399 struct elf_link_hash_entry
* myh
;
9400 struct elf32_arm_link_hash_entry
*eh
;
9401 struct elf32_arm_link_hash_table
* globals
;
9404 char *error_message
;
9406 eh
= elf32_arm_hash_entry (h
);
9407 /* Allocate stubs for exported Thumb functions on v4t. */
9408 if (eh
->export_glue
== NULL
)
9411 globals
= elf32_arm_hash_table (info
);
9412 BFD_ASSERT (globals
!= NULL
);
9413 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9415 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9416 ARM2THUMB_GLUE_SECTION_NAME
);
9417 BFD_ASSERT (s
!= NULL
);
9418 BFD_ASSERT (s
->contents
!= NULL
);
9419 BFD_ASSERT (s
->output_section
!= NULL
);
9421 sec
= eh
->export_glue
->root
.u
.def
.section
;
9423 BFD_ASSERT (sec
->output_section
!= NULL
);
9425 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9426 + sec
->output_section
->vma
;
9428 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9429 h
->root
.u
.def
.section
->owner
,
9430 globals
->obfd
, sec
, val
, s
,
9436 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9439 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9444 struct elf32_arm_link_hash_table
*globals
;
9446 globals
= elf32_arm_hash_table (info
);
9447 BFD_ASSERT (globals
!= NULL
);
9448 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9450 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9451 ARM_BX_GLUE_SECTION_NAME
);
9452 BFD_ASSERT (s
!= NULL
);
9453 BFD_ASSERT (s
->contents
!= NULL
);
9454 BFD_ASSERT (s
->output_section
!= NULL
);
9456 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9458 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9460 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9462 p
= s
->contents
+ glue_addr
;
9463 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9464 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9465 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9466 globals
->bx_glue_offset
[reg
] |= 1;
9469 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9472 /* Generate Arm stubs for exported Thumb symbols. */
9474 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9475 struct bfd_link_info
*link_info
)
9477 struct elf32_arm_link_hash_table
* globals
;
9479 if (link_info
== NULL
)
9480 /* Ignore this if we are not called by the ELF backend linker. */
9483 globals
= elf32_arm_hash_table (link_info
);
9484 if (globals
== NULL
)
9487 /* If blx is available then exported Thumb symbols are OK and there is
9489 if (globals
->use_blx
)
9492 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9496 /* Reserve space for COUNT dynamic relocations in relocation selection
9500 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9501 bfd_size_type count
)
9503 struct elf32_arm_link_hash_table
*htab
;
9505 htab
= elf32_arm_hash_table (info
);
9506 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9509 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9512 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9513 dynamic, the relocations should go in SRELOC, otherwise they should
9514 go in the special .rel.iplt section. */
9517 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9518 bfd_size_type count
)
9520 struct elf32_arm_link_hash_table
*htab
;
9522 htab
= elf32_arm_hash_table (info
);
9523 if (!htab
->root
.dynamic_sections_created
)
9524 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9527 BFD_ASSERT (sreloc
!= NULL
);
9528 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9532 /* Add relocation REL to the end of relocation section SRELOC. */
9535 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9536 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9539 struct elf32_arm_link_hash_table
*htab
;
9541 htab
= elf32_arm_hash_table (info
);
9542 if (!htab
->root
.dynamic_sections_created
9543 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9544 sreloc
= htab
->root
.irelplt
;
9547 loc
= sreloc
->contents
;
9548 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9549 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9551 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9554 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9555 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9559 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9561 union gotplt_union
*root_plt
,
9562 struct arm_plt_info
*arm_plt
)
9564 struct elf32_arm_link_hash_table
*htab
;
9568 htab
= elf32_arm_hash_table (info
);
9572 splt
= htab
->root
.iplt
;
9573 sgotplt
= htab
->root
.igotplt
;
9575 /* NaCl uses a special first entry in .iplt too. */
9576 if (htab
->root
.target_os
== is_nacl
&& splt
->size
== 0)
9577 splt
->size
+= htab
->plt_header_size
;
9579 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9580 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9584 splt
= htab
->root
.splt
;
9585 sgotplt
= htab
->root
.sgotplt
;
9589 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9590 /* For lazy binding, relocations will be put into .rel.plt, in
9591 .rel.got otherwise. */
9592 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9593 if (info
->flags
& DF_BIND_NOW
)
9594 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9596 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9600 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9601 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9604 /* If this is the first .plt entry, make room for the special
9606 if (splt
->size
== 0)
9607 splt
->size
+= htab
->plt_header_size
;
9609 htab
->next_tls_desc_index
++;
9612 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9613 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9614 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9615 root_plt
->offset
= splt
->size
;
9616 splt
->size
+= htab
->plt_entry_size
;
9618 /* We also need to make an entry in the .got.plt section, which
9619 will be placed in the .got section by the linker script. */
9621 arm_plt
->got_offset
= sgotplt
->size
;
9623 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9625 /* Function descriptor takes 64 bits in GOT. */
9632 arm_movw_immediate (bfd_vma value
)
9634 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9638 arm_movt_immediate (bfd_vma value
)
9640 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9643 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9644 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9645 Otherwise, DYNINDX is the index of the symbol in the dynamic
9646 symbol table and SYM_VALUE is undefined.
9648 ROOT_PLT points to the offset of the PLT entry from the start of its
9649 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9650 bookkeeping information.
9652 Returns FALSE if there was a problem. */
9655 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9656 union gotplt_union
*root_plt
,
9657 struct arm_plt_info
*arm_plt
,
9658 int dynindx
, bfd_vma sym_value
)
9660 struct elf32_arm_link_hash_table
*htab
;
9666 Elf_Internal_Rela rel
;
9667 bfd_vma got_header_size
;
9669 htab
= elf32_arm_hash_table (info
);
9671 /* Pick the appropriate sections and sizes. */
9674 splt
= htab
->root
.iplt
;
9675 sgot
= htab
->root
.igotplt
;
9676 srel
= htab
->root
.irelplt
;
9678 /* There are no reserved entries in .igot.plt, and no special
9679 first entry in .iplt. */
9680 got_header_size
= 0;
9684 splt
= htab
->root
.splt
;
9685 sgot
= htab
->root
.sgotplt
;
9686 srel
= htab
->root
.srelplt
;
9688 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9690 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9692 bfd_vma got_offset
, got_address
, plt_address
;
9693 bfd_vma got_displacement
, initial_got_entry
;
9696 BFD_ASSERT (sgot
!= NULL
);
9698 /* Get the offset into the .(i)got.plt table of the entry that
9699 corresponds to this function. */
9700 got_offset
= (arm_plt
->got_offset
& -2);
9702 /* Get the index in the procedure linkage table which
9703 corresponds to this symbol. This is the index of this symbol
9704 in all the symbols for which we are making plt entries.
9705 After the reserved .got.plt entries, all symbols appear in
9706 the same order as in .plt. */
9708 /* Function descriptor takes 8 bytes. */
9709 plt_index
= (got_offset
- got_header_size
) / 8;
9711 plt_index
= (got_offset
- got_header_size
) / 4;
9713 /* Calculate the address of the GOT entry. */
9714 got_address
= (sgot
->output_section
->vma
9715 + sgot
->output_offset
9718 /* ...and the address of the PLT entry. */
9719 plt_address
= (splt
->output_section
->vma
9720 + splt
->output_offset
9721 + root_plt
->offset
);
9723 ptr
= splt
->contents
+ root_plt
->offset
;
9724 if (htab
->root
.target_os
== is_vxworks
&& bfd_link_pic (info
))
9729 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9731 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9733 val
|= got_address
- sgot
->output_section
->vma
;
9735 val
|= plt_index
* RELOC_SIZE (htab
);
9736 if (i
== 2 || i
== 5)
9737 bfd_put_32 (output_bfd
, val
, ptr
);
9739 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9742 else if (htab
->root
.target_os
== is_vxworks
)
9747 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9749 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9753 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9755 val
|= plt_index
* RELOC_SIZE (htab
);
9756 if (i
== 2 || i
== 5)
9757 bfd_put_32 (output_bfd
, val
, ptr
);
9759 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9762 loc
= (htab
->srelplt2
->contents
9763 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9765 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9766 referencing the GOT for this PLT entry. */
9767 rel
.r_offset
= plt_address
+ 8;
9768 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9769 rel
.r_addend
= got_offset
;
9770 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9771 loc
+= RELOC_SIZE (htab
);
9773 /* Create the R_ARM_ABS32 relocation referencing the
9774 beginning of the PLT for this GOT entry. */
9775 rel
.r_offset
= got_address
;
9776 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9778 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9780 else if (htab
->root
.target_os
== is_nacl
)
9782 /* Calculate the displacement between the PLT slot and the
9783 common tail that's part of the special initial PLT slot. */
9784 int32_t tail_displacement
9785 = ((splt
->output_section
->vma
+ splt
->output_offset
9786 + ARM_NACL_PLT_TAIL_OFFSET
)
9787 - (plt_address
+ htab
->plt_entry_size
+ 4));
9788 BFD_ASSERT ((tail_displacement
& 3) == 0);
9789 tail_displacement
>>= 2;
9791 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9792 || (-tail_displacement
& 0xff000000) == 0);
9794 /* Calculate the displacement between the PLT slot and the entry
9795 in the GOT. The offset accounts for the value produced by
9796 adding to pc in the penultimate instruction of the PLT stub. */
9797 got_displacement
= (got_address
9798 - (plt_address
+ htab
->plt_entry_size
));
9800 /* NaCl does not support interworking at all. */
9801 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9803 put_arm_insn (htab
, output_bfd
,
9804 elf32_arm_nacl_plt_entry
[0]
9805 | arm_movw_immediate (got_displacement
),
9807 put_arm_insn (htab
, output_bfd
,
9808 elf32_arm_nacl_plt_entry
[1]
9809 | arm_movt_immediate (got_displacement
),
9811 put_arm_insn (htab
, output_bfd
,
9812 elf32_arm_nacl_plt_entry
[2],
9814 put_arm_insn (htab
, output_bfd
,
9815 elf32_arm_nacl_plt_entry
[3]
9816 | (tail_displacement
& 0x00ffffff),
9819 else if (htab
->fdpic_p
)
9821 const bfd_vma
*plt_entry
= using_thumb_only (htab
)
9822 ? elf32_arm_fdpic_thumb_plt_entry
9823 : elf32_arm_fdpic_plt_entry
;
9825 /* Fill-up Thumb stub if needed. */
9826 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9828 put_thumb_insn (htab
, output_bfd
,
9829 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9830 put_thumb_insn (htab
, output_bfd
,
9831 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9833 /* As we are using 32 bit instructions even for the Thumb
9834 version, we have to use 'put_arm_insn' instead of
9835 'put_thumb_insn'. */
9836 put_arm_insn (htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9837 put_arm_insn (htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9838 put_arm_insn (htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9839 put_arm_insn (htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9840 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9842 if (!(info
->flags
& DF_BIND_NOW
))
9844 /* funcdesc_value_reloc_offset. */
9845 bfd_put_32 (output_bfd
,
9846 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9848 put_arm_insn (htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9849 put_arm_insn (htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9850 put_arm_insn (htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9851 put_arm_insn (htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9854 else if (using_thumb_only (htab
))
9856 /* PR ld/16017: Generate thumb only PLT entries. */
9857 if (!using_thumb2 (htab
))
9859 /* FIXME: We ought to be able to generate thumb-1 PLT
9861 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9866 /* Calculate the displacement between the PLT slot and the entry in
9867 the GOT. The 12-byte offset accounts for the value produced by
9868 adding to pc in the 3rd instruction of the PLT stub. */
9869 got_displacement
= got_address
- (plt_address
+ 12);
9871 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9872 instead of 'put_thumb_insn'. */
9873 put_arm_insn (htab
, output_bfd
,
9874 elf32_thumb2_plt_entry
[0]
9875 | ((got_displacement
& 0x000000ff) << 16)
9876 | ((got_displacement
& 0x00000700) << 20)
9877 | ((got_displacement
& 0x00000800) >> 1)
9878 | ((got_displacement
& 0x0000f000) >> 12),
9880 put_arm_insn (htab
, output_bfd
,
9881 elf32_thumb2_plt_entry
[1]
9882 | ((got_displacement
& 0x00ff0000) )
9883 | ((got_displacement
& 0x07000000) << 4)
9884 | ((got_displacement
& 0x08000000) >> 17)
9885 | ((got_displacement
& 0xf0000000) >> 28),
9887 put_arm_insn (htab
, output_bfd
,
9888 elf32_thumb2_plt_entry
[2],
9890 put_arm_insn (htab
, output_bfd
,
9891 elf32_thumb2_plt_entry
[3],
9896 /* Calculate the displacement between the PLT slot and the
9897 entry in the GOT. The eight-byte offset accounts for the
9898 value produced by adding to pc in the first instruction
9900 got_displacement
= got_address
- (plt_address
+ 8);
9902 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9904 put_thumb_insn (htab
, output_bfd
,
9905 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9906 put_thumb_insn (htab
, output_bfd
,
9907 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9910 if (!elf32_arm_use_long_plt_entry
)
9912 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9914 put_arm_insn (htab
, output_bfd
,
9915 elf32_arm_plt_entry_short
[0]
9916 | ((got_displacement
& 0x0ff00000) >> 20),
9918 put_arm_insn (htab
, output_bfd
,
9919 elf32_arm_plt_entry_short
[1]
9920 | ((got_displacement
& 0x000ff000) >> 12),
9922 put_arm_insn (htab
, output_bfd
,
9923 elf32_arm_plt_entry_short
[2]
9924 | (got_displacement
& 0x00000fff),
9926 #ifdef FOUR_WORD_PLT
9927 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9932 put_arm_insn (htab
, output_bfd
,
9933 elf32_arm_plt_entry_long
[0]
9934 | ((got_displacement
& 0xf0000000) >> 28),
9936 put_arm_insn (htab
, output_bfd
,
9937 elf32_arm_plt_entry_long
[1]
9938 | ((got_displacement
& 0x0ff00000) >> 20),
9940 put_arm_insn (htab
, output_bfd
,
9941 elf32_arm_plt_entry_long
[2]
9942 | ((got_displacement
& 0x000ff000) >> 12),
9944 put_arm_insn (htab
, output_bfd
,
9945 elf32_arm_plt_entry_long
[3]
9946 | (got_displacement
& 0x00000fff),
9951 /* Fill in the entry in the .rel(a).(i)plt section. */
9952 rel
.r_offset
= got_address
;
9956 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9957 The dynamic linker or static executable then calls SYM_VALUE
9958 to determine the correct run-time value of the .igot.plt entry. */
9959 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9960 initial_got_entry
= sym_value
;
9964 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9965 used by PLT entry. */
9968 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9969 initial_got_entry
= 0;
9973 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9974 initial_got_entry
= (splt
->output_section
->vma
9975 + splt
->output_offset
);
9978 When thumb only we need to set the LSB for any address that
9979 will be used with an interworking branch instruction. */
9980 if (using_thumb_only (htab
))
9981 initial_got_entry
|= 1;
9985 /* Fill in the entry in the global offset table. */
9986 bfd_put_32 (output_bfd
, initial_got_entry
,
9987 sgot
->contents
+ got_offset
);
9989 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
9991 /* Setup initial funcdesc value. */
9992 /* FIXME: we don't support lazy binding because there is a
9993 race condition between both words getting written and
9994 some other thread attempting to read them. The ARM
9995 architecture does not have an atomic 64 bit load/store
9996 instruction that could be used to prevent it; it is
9997 recommended that threaded FDPIC applications run with the
9998 LD_BIND_NOW environment variable set. */
9999 bfd_put_32 (output_bfd
, plt_address
+ 0x18,
10000 sgot
->contents
+ got_offset
);
10001 bfd_put_32 (output_bfd
, -1 /*TODO*/,
10002 sgot
->contents
+ got_offset
+ 4);
10006 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
10011 /* For FDPIC we put PLT relocationss into .rel.got when not
10012 lazy binding otherwise we put them in .rel.plt. For now,
10013 we don't support lazy binding so put it in .rel.got. */
10014 if (info
->flags
& DF_BIND_NOW
)
10015 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelgot
, &rel
);
10017 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelplt
, &rel
);
10021 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
10022 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
10029 /* Some relocations map to different relocations depending on the
10030 target. Return the real relocation. */
10033 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
10038 case R_ARM_TARGET1
:
10039 if (globals
->target1_is_rel
)
10040 return R_ARM_REL32
;
10042 return R_ARM_ABS32
;
10044 case R_ARM_TARGET2
:
10045 return globals
->target2_reloc
;
10052 /* Return the base VMA address which should be subtracted from real addresses
10053 when resolving @dtpoff relocation.
10054 This is PT_TLS segment p_vaddr. */
10057 dtpoff_base (struct bfd_link_info
*info
)
10059 /* If tls_sec is NULL, we should have signalled an error already. */
10060 if (elf_hash_table (info
)->tls_sec
== NULL
)
10062 return elf_hash_table (info
)->tls_sec
->vma
;
10065 /* Return the relocation value for @tpoff relocation
10066 if STT_TLS virtual address is ADDRESS. */
10069 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10071 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10074 /* If tls_sec is NULL, we should have signalled an error already. */
10075 if (htab
->tls_sec
== NULL
)
10077 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10078 return address
- htab
->tls_sec
->vma
+ base
;
10081 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10082 VALUE is the relocation value. */
10084 static bfd_reloc_status_type
10085 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10088 return bfd_reloc_overflow
;
10090 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10091 bfd_put_32 (abfd
, value
, data
);
10092 return bfd_reloc_ok
;
10095 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10096 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10097 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10099 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10100 is to then call final_link_relocate. Return other values in the
10103 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10104 the pre-relaxed code. It would be nice if the relocs were updated
10105 to match the optimization. */
10107 static bfd_reloc_status_type
10108 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10109 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10110 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10112 unsigned long insn
;
10114 switch (ELF32_R_TYPE (rel
->r_info
))
10117 return bfd_reloc_notsupported
;
10119 case R_ARM_TLS_GOTDESC
:
10124 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10126 insn
-= 5; /* THUMB */
10128 insn
-= 8; /* ARM */
10130 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10131 return bfd_reloc_continue
;
10133 case R_ARM_THM_TLS_DESCSEQ
:
10135 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10136 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10140 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10142 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10146 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10149 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10151 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10155 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10158 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10159 contents
+ rel
->r_offset
);
10163 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10164 /* It's a 32 bit instruction, fetch the rest of it for
10165 error generation. */
10166 insn
= (insn
<< 16)
10167 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10169 /* xgettext:c-format */
10170 (_("%pB(%pA+%#" PRIx64
"): "
10171 "unexpected %s instruction '%#lx' in TLS trampoline"),
10172 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10174 return bfd_reloc_notsupported
;
10178 case R_ARM_TLS_DESCSEQ
:
10180 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10181 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10185 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10186 contents
+ rel
->r_offset
);
10188 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10192 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10195 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10196 contents
+ rel
->r_offset
);
10198 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10202 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10205 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10206 contents
+ rel
->r_offset
);
10211 /* xgettext:c-format */
10212 (_("%pB(%pA+%#" PRIx64
"): "
10213 "unexpected %s instruction '%#lx' in TLS trampoline"),
10214 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10216 return bfd_reloc_notsupported
;
10220 case R_ARM_TLS_CALL
:
10221 /* GD->IE relaxation, turn the instruction into 'nop' or
10222 'ldr r0, [pc,r0]' */
10223 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10224 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10227 case R_ARM_THM_TLS_CALL
:
10228 /* GD->IE relaxation. */
10230 /* add r0,pc; ldr r0, [r0] */
10232 else if (using_thumb2 (globals
))
10239 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10240 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10243 return bfd_reloc_ok
;
10246 /* For a given value of n, calculate the value of G_n as required to
10247 deal with group relocations. We return it in the form of an
10248 encoded constant-and-rotation, together with the final residual. If n is
10249 specified as less than zero, then final_residual is filled with the
10250 input value and no further action is performed. */
10253 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10257 bfd_vma encoded_g_n
= 0;
10258 bfd_vma residual
= value
; /* Also known as Y_n. */
10260 for (current_n
= 0; current_n
<= n
; current_n
++)
10264 /* Calculate which part of the value to mask. */
10271 /* Determine the most significant bit in the residual and
10272 align the resulting value to a 2-bit boundary. */
10273 for (msb
= 30; msb
>= 0; msb
-= 2)
10274 if (residual
& (3u << msb
))
10277 /* The desired shift is now (msb - 6), or zero, whichever
10284 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10285 g_n
= residual
& (0xff << shift
);
10286 encoded_g_n
= (g_n
>> shift
)
10287 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10289 /* Calculate the residual for the next time around. */
10293 *final_residual
= residual
;
10295 return encoded_g_n
;
10298 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10299 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10302 identify_add_or_sub (bfd_vma insn
)
10304 int opcode
= insn
& 0x1e00000;
10306 if (opcode
== 1 << 23) /* ADD */
10309 if (opcode
== 1 << 22) /* SUB */
10315 /* Perform a relocation as part of a final link. */
10317 static bfd_reloc_status_type
10318 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10321 asection
* input_section
,
10322 bfd_byte
* contents
,
10323 Elf_Internal_Rela
* rel
,
10325 struct bfd_link_info
* info
,
10326 asection
* sym_sec
,
10327 const char * sym_name
,
10328 unsigned char st_type
,
10329 enum arm_st_branch_type branch_type
,
10330 struct elf_link_hash_entry
* h
,
10331 bool * unresolved_reloc_p
,
10332 char ** error_message
)
10334 unsigned long r_type
= howto
->type
;
10335 unsigned long r_symndx
;
10336 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10337 bfd_vma
* local_got_offsets
;
10338 bfd_vma
* local_tlsdesc_gotents
;
10341 asection
* sreloc
= NULL
;
10342 asection
* srelgot
;
10344 bfd_signed_vma signed_addend
;
10345 unsigned char dynreloc_st_type
;
10346 bfd_vma dynreloc_value
;
10347 struct elf32_arm_link_hash_table
* globals
;
10348 struct elf32_arm_link_hash_entry
*eh
;
10349 union gotplt_union
*root_plt
;
10350 struct arm_plt_info
*arm_plt
;
10351 bfd_vma plt_offset
;
10352 bfd_vma gotplt_offset
;
10353 bool has_iplt_entry
;
10354 bool resolved_to_zero
;
10356 globals
= elf32_arm_hash_table (info
);
10357 if (globals
== NULL
)
10358 return bfd_reloc_notsupported
;
10360 BFD_ASSERT (is_arm_elf (input_bfd
));
10361 BFD_ASSERT (howto
!= NULL
);
10363 /* Some relocation types map to different relocations depending on the
10364 target. We pick the right one here. */
10365 r_type
= arm_real_reloc_type (globals
, r_type
);
10367 /* It is possible to have linker relaxations on some TLS access
10368 models. Update our information here. */
10369 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10371 if (r_type
!= howto
->type
)
10372 howto
= elf32_arm_howto_from_type (r_type
);
10374 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10375 sgot
= globals
->root
.sgot
;
10376 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10377 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10379 if (globals
->root
.dynamic_sections_created
)
10380 srelgot
= globals
->root
.srelgot
;
10384 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10386 if (globals
->use_rel
)
10390 switch (bfd_get_reloc_size (howto
))
10392 case 1: addend
= bfd_get_8 (input_bfd
, hit_data
); break;
10393 case 2: addend
= bfd_get_16 (input_bfd
, hit_data
); break;
10394 case 4: addend
= bfd_get_32 (input_bfd
, hit_data
); break;
10395 default: addend
= 0; break;
10397 /* Note: the addend and signed_addend calculated here are
10398 incorrect for any split field. */
10399 addend
&= howto
->src_mask
;
10400 sign
= howto
->src_mask
& ~(howto
->src_mask
>> 1);
10401 signed_addend
= (addend
^ sign
) - sign
;
10402 signed_addend
= (bfd_vma
) signed_addend
<< howto
->rightshift
;
10403 addend
<<= howto
->rightshift
;
10406 addend
= signed_addend
= rel
->r_addend
;
10408 /* Record the symbol information that should be used in dynamic
10410 dynreloc_st_type
= st_type
;
10411 dynreloc_value
= value
;
10412 if (branch_type
== ST_BRANCH_TO_THUMB
)
10413 dynreloc_value
|= 1;
10415 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10416 VALUE appropriately for relocations that we resolve at link time. */
10417 has_iplt_entry
= false;
10418 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10420 && root_plt
->offset
!= (bfd_vma
) -1)
10422 plt_offset
= root_plt
->offset
;
10423 gotplt_offset
= arm_plt
->got_offset
;
10425 if (h
== NULL
|| eh
->is_iplt
)
10427 has_iplt_entry
= true;
10428 splt
= globals
->root
.iplt
;
10430 /* Populate .iplt entries here, because not all of them will
10431 be seen by finish_dynamic_symbol. The lower bit is set if
10432 we have already populated the entry. */
10433 if (plt_offset
& 1)
10437 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10438 -1, dynreloc_value
))
10439 root_plt
->offset
|= 1;
10441 return bfd_reloc_notsupported
;
10444 /* Static relocations always resolve to the .iplt entry. */
10445 st_type
= STT_FUNC
;
10446 value
= (splt
->output_section
->vma
10447 + splt
->output_offset
10449 branch_type
= ST_BRANCH_TO_ARM
;
10451 /* If there are non-call relocations that resolve to the .iplt
10452 entry, then all dynamic ones must too. */
10453 if (arm_plt
->noncall_refcount
!= 0)
10455 dynreloc_st_type
= st_type
;
10456 dynreloc_value
= value
;
10460 /* We populate the .plt entry in finish_dynamic_symbol. */
10461 splt
= globals
->root
.splt
;
10466 plt_offset
= (bfd_vma
) -1;
10467 gotplt_offset
= (bfd_vma
) -1;
10470 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we are
10471 resolving a function call relocation. We want to inform the user
10472 that something is wrong. */
10473 if (using_thumb_only (globals
)
10474 && (r_type
== R_ARM_THM_CALL
10475 || r_type
== R_ARM_THM_JUMP24
)
10476 && branch_type
== ST_BRANCH_TO_ARM
10477 /* Calls through a PLT are special: the assembly source code
10478 cannot be annotated with '.type foo(PLT), %function', and
10479 they handled specifically below anyway. */
10482 if (sym_sec
== bfd_abs_section_ptr
)
10484 /* As an exception, assume that absolute symbols are of the
10485 right kind (Thumb). They are presumably defined in the
10486 linker script, where it is not possible to declare them as
10487 Thumb (and thus are seen as Arm mode). Inform the user with
10488 a warning, though. */
10489 branch_type
= ST_BRANCH_TO_THUMB
;
10491 if (sym_sec
->owner
)
10493 (_("warning: %pB(%s): Forcing bramch to absolute symbol in Thumb mode (Thumb-only CPU)"
10495 sym_sec
->owner
, sym_name
, input_bfd
);
10498 (_("warning: (%s): Forcing branch to absolute symbol in Thumb mode (Thumb-only CPU)"
10500 sym_name
, input_bfd
);
10503 /* Otherwise do not silently build a stub, and let the users
10504 know they have to fix their code. Indeed, we could decide
10505 to insert a stub involving Arm code and/or BLX, leading to
10506 a run-time crash. */
10507 branch_type
= ST_BRANCH_UNKNOWN
;
10510 /* Fail early if branch_type is ST_BRANCH_UNKNOWN and we target a
10511 Thumb-only CPU. We could emit a warning on Arm-capable targets
10512 too, but that would be too verbose (a lot of legacy code does not
10513 use the .type foo, %function directive). */
10514 if (using_thumb_only (globals
)
10515 && (r_type
== R_ARM_THM_CALL
10516 || r_type
== R_ARM_THM_JUMP24
)
10517 && branch_type
== ST_BRANCH_UNKNOWN
10518 /* Exception to the rule above: a branch to an undefined weak
10519 symbol is turned into a jump to the next instruction unless a
10520 PLT entry will be created (see below). */
10521 && !(h
&& h
->root
.type
== bfd_link_hash_undefweak
10522 && plt_offset
== (bfd_vma
) -1))
10524 if (sym_sec
!= NULL
10525 && sym_sec
->owner
!= NULL
)
10527 (_("%pB(%s): Unknown destination type (ARM/Thumb) in %pB"),
10528 sym_sec
->owner
, sym_name
, input_bfd
);
10531 (_("(%s): Unknown destination type (ARM/Thumb) in %pB"),
10532 sym_name
, input_bfd
);
10534 return bfd_reloc_notsupported
;
10537 resolved_to_zero
= (h
!= NULL
10538 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10543 /* We don't need to find a value for this symbol. It's just a
10545 *unresolved_reloc_p
= false;
10546 return bfd_reloc_ok
;
10549 if (globals
->root
.target_os
!= is_vxworks
)
10550 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10551 /* Fall through. */
10555 case R_ARM_ABS32_NOI
:
10557 case R_ARM_REL32_NOI
:
10563 /* Handle relocations which should use the PLT entry. ABS32/REL32
10564 will use the symbol's value, which may point to a PLT entry, but we
10565 don't need to handle that here. If we created a PLT entry, all
10566 branches in this object should go to it, except if the PLT is too
10567 far away, in which case a long branch stub should be inserted. */
10568 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10569 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10570 && r_type
!= R_ARM_CALL
10571 && r_type
!= R_ARM_JUMP24
10572 && r_type
!= R_ARM_PLT32
)
10573 && plt_offset
!= (bfd_vma
) -1)
10575 /* If we've created a .plt section, and assigned a PLT entry
10576 to this function, it must either be a STT_GNU_IFUNC reference
10577 or not be known to bind locally. In other cases, we should
10578 have cleared the PLT entry by now. */
10579 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10581 value
= (splt
->output_section
->vma
10582 + splt
->output_offset
10584 *unresolved_reloc_p
= false;
10585 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10586 contents
, rel
->r_offset
, value
,
10590 /* When generating a shared library or PIE, these relocations
10591 are copied into the output file to be resolved at run time. */
10592 if ((bfd_link_pic (info
)
10593 || globals
->fdpic_p
)
10594 && (input_section
->flags
& SEC_ALLOC
)
10595 && !(globals
->root
.target_os
== is_vxworks
10596 && strcmp (input_section
->output_section
->name
,
10598 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10599 || !SYMBOL_CALLS_LOCAL (info
, h
))
10600 && !(input_bfd
== globals
->stub_bfd
10601 && strstr (input_section
->name
, STUB_SUFFIX
))
10603 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10604 && !resolved_to_zero
)
10605 || h
->root
.type
!= bfd_link_hash_undefweak
)
10606 && r_type
!= R_ARM_PC24
10607 && r_type
!= R_ARM_CALL
10608 && r_type
!= R_ARM_JUMP24
10609 && r_type
!= R_ARM_PREL31
10610 && r_type
!= R_ARM_PLT32
)
10612 Elf_Internal_Rela outrel
;
10613 bool skip
, relocate
;
10616 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10617 && !h
->def_regular
)
10619 char *v
= _("shared object");
10621 if (bfd_link_executable (info
))
10622 v
= _("PIE executable");
10625 (_("%pB: relocation %s against external or undefined symbol `%s'"
10626 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10627 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10628 return bfd_reloc_notsupported
;
10631 *unresolved_reloc_p
= false;
10633 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10635 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10636 ! globals
->use_rel
);
10638 if (sreloc
== NULL
)
10639 return bfd_reloc_notsupported
;
10645 outrel
.r_addend
= addend
;
10647 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10649 if (outrel
.r_offset
== (bfd_vma
) -1)
10651 else if (outrel
.r_offset
== (bfd_vma
) -2)
10652 skip
= true, relocate
= true;
10653 outrel
.r_offset
+= (input_section
->output_section
->vma
10654 + input_section
->output_offset
);
10657 memset (&outrel
, 0, sizeof outrel
);
10659 && h
->dynindx
!= -1
10660 && (!bfd_link_pic (info
)
10661 || !(bfd_link_pie (info
)
10662 || SYMBOLIC_BIND (info
, h
))
10663 || !h
->def_regular
))
10664 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10669 /* This symbol is local, or marked to become local. */
10670 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10671 || (globals
->fdpic_p
&& !bfd_link_pic (info
)));
10672 /* On SVR4-ish systems, the dynamic loader cannot
10673 relocate the text and data segments independently,
10674 so the symbol does not matter. */
10676 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10677 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10678 to the .iplt entry. Instead, every non-call reference
10679 must use an R_ARM_IRELATIVE relocation to obtain the
10680 correct run-time address. */
10681 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10682 else if (globals
->fdpic_p
&& !bfd_link_pic (info
))
10685 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10686 if (globals
->use_rel
)
10689 outrel
.r_addend
+= dynreloc_value
;
10693 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10695 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10697 /* If this reloc is against an external symbol, we do not want to
10698 fiddle with the addend. Otherwise, we need to include the symbol
10699 value so that it becomes an addend for the dynamic reloc. */
10701 return bfd_reloc_ok
;
10703 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10704 contents
, rel
->r_offset
,
10705 dynreloc_value
, (bfd_vma
) 0);
10707 else switch (r_type
)
10710 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10712 case R_ARM_XPC25
: /* Arm BLX instruction. */
10715 case R_ARM_PC24
: /* Arm B/BL instruction. */
10718 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10720 if (r_type
== R_ARM_XPC25
)
10722 /* Check for Arm calling Arm function. */
10723 /* FIXME: Should we translate the instruction into a BL
10724 instruction instead ? */
10725 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10727 (_("\%pB: warning: %s BLX instruction targets"
10728 " %s function '%s'"),
10730 "ARM", h
? h
->root
.root
.string
: "(local)");
10732 else if (r_type
== R_ARM_PC24
)
10734 /* Check for Arm calling Thumb function. */
10735 if (branch_type
== ST_BRANCH_TO_THUMB
)
10737 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10738 output_bfd
, input_section
,
10739 hit_data
, sym_sec
, rel
->r_offset
,
10740 signed_addend
, value
,
10742 return bfd_reloc_ok
;
10744 return bfd_reloc_dangerous
;
10748 /* Check if a stub has to be inserted because the
10749 destination is too far or we are changing mode. */
10750 if ( r_type
== R_ARM_CALL
10751 || r_type
== R_ARM_JUMP24
10752 || r_type
== R_ARM_PLT32
)
10754 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10755 struct elf32_arm_link_hash_entry
*hash
;
10757 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10758 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10759 st_type
, &branch_type
,
10760 hash
, value
, sym_sec
,
10761 input_bfd
, sym_name
);
10763 if (stub_type
!= arm_stub_none
)
10765 /* The target is out of reach, so redirect the
10766 branch to the local stub for this function. */
10767 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10772 if (stub_entry
!= NULL
)
10773 value
= (stub_entry
->stub_offset
10774 + stub_entry
->stub_sec
->output_offset
10775 + stub_entry
->stub_sec
->output_section
->vma
);
10777 if (plt_offset
!= (bfd_vma
) -1)
10778 *unresolved_reloc_p
= false;
10783 /* If the call goes through a PLT entry, make sure to
10784 check distance to the right destination address. */
10785 if (plt_offset
!= (bfd_vma
) -1)
10787 value
= (splt
->output_section
->vma
10788 + splt
->output_offset
10790 *unresolved_reloc_p
= false;
10791 /* The PLT entry is in ARM mode, regardless of the
10792 target function. */
10793 branch_type
= ST_BRANCH_TO_ARM
;
10798 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10800 S is the address of the symbol in the relocation.
10801 P is address of the instruction being relocated.
10802 A is the addend (extracted from the instruction) in bytes.
10804 S is held in 'value'.
10805 P is the base address of the section containing the
10806 instruction plus the offset of the reloc into that
10808 (input_section->output_section->vma +
10809 input_section->output_offset +
10811 A is the addend, converted into bytes, ie:
10812 (signed_addend * 4)
10814 Note: None of these operations have knowledge of the pipeline
10815 size of the processor, thus it is up to the assembler to
10816 encode this information into the addend. */
10817 value
-= (input_section
->output_section
->vma
10818 + input_section
->output_offset
);
10819 value
-= rel
->r_offset
;
10820 value
+= signed_addend
;
10822 signed_addend
= value
;
10823 signed_addend
>>= howto
->rightshift
;
10825 /* A branch to an undefined weak symbol is turned into a jump to
10826 the next instruction unless a PLT entry will be created.
10827 Do the same for local undefined symbols (but not for STN_UNDEF).
10828 The jump to the next instruction is optimized as a NOP depending
10829 on the architecture. */
10830 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10831 && plt_offset
== (bfd_vma
) -1)
10832 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10834 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10836 if (arch_has_arm_nop (globals
))
10837 value
|= 0x0320f000;
10839 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10843 /* Perform a signed range check. */
10844 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10845 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10846 return bfd_reloc_overflow
;
10848 addend
= (value
& 2);
10850 value
= (signed_addend
& howto
->dst_mask
)
10851 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10853 if (r_type
== R_ARM_CALL
)
10855 /* Set the H bit in the BLX instruction. */
10856 if (branch_type
== ST_BRANCH_TO_THUMB
)
10859 value
|= (1 << 24);
10861 value
&= ~(bfd_vma
)(1 << 24);
10864 /* Select the correct instruction (BL or BLX). */
10865 /* Only if we are not handling a BL to a stub. In this
10866 case, mode switching is performed by the stub. */
10867 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10868 value
|= (1 << 28);
10869 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10871 value
&= ~(bfd_vma
)(1 << 28);
10872 value
|= (1 << 24);
10881 if (branch_type
== ST_BRANCH_TO_THUMB
)
10885 case R_ARM_ABS32_NOI
:
10891 if (branch_type
== ST_BRANCH_TO_THUMB
)
10893 value
-= (input_section
->output_section
->vma
10894 + input_section
->output_offset
+ rel
->r_offset
);
10897 case R_ARM_REL32_NOI
:
10899 value
-= (input_section
->output_section
->vma
10900 + input_section
->output_offset
+ rel
->r_offset
);
10904 value
-= (input_section
->output_section
->vma
10905 + input_section
->output_offset
+ rel
->r_offset
);
10906 value
+= signed_addend
;
10907 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10909 /* Check for overflow. */
10910 if ((value
^ (value
>> 1)) & (1 << 30))
10911 return bfd_reloc_overflow
;
10913 value
&= 0x7fffffff;
10914 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10915 if (branch_type
== ST_BRANCH_TO_THUMB
)
10920 bfd_put_32 (input_bfd
, value
, hit_data
);
10921 return bfd_reloc_ok
;
10926 /* There is no way to tell whether the user intended to use a signed or
10927 unsigned addend. When checking for overflow we accept either,
10928 as specified by the AAELF. */
10929 if ((long) value
> 0xff || (long) value
< -0x80)
10930 return bfd_reloc_overflow
;
10932 bfd_put_8 (input_bfd
, value
, hit_data
);
10933 return bfd_reloc_ok
;
10938 /* See comment for R_ARM_ABS8. */
10939 if ((long) value
> 0xffff || (long) value
< -0x8000)
10940 return bfd_reloc_overflow
;
10942 bfd_put_16 (input_bfd
, value
, hit_data
);
10943 return bfd_reloc_ok
;
10945 case R_ARM_THM_ABS5
:
10946 /* Support ldr and str instructions for the thumb. */
10947 if (globals
->use_rel
)
10949 /* Need to refetch addend. */
10950 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10951 /* ??? Need to determine shift amount from operand size. */
10952 addend
>>= howto
->rightshift
;
10956 /* ??? Isn't value unsigned? */
10957 if ((long) value
> 0x1f || (long) value
< -0x10)
10958 return bfd_reloc_overflow
;
10960 /* ??? Value needs to be properly shifted into place first. */
10961 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10962 bfd_put_16 (input_bfd
, value
, hit_data
);
10963 return bfd_reloc_ok
;
10965 case R_ARM_THM_ALU_PREL_11_0
:
10966 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10969 bfd_signed_vma relocation
;
10971 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10972 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10974 if (globals
->use_rel
)
10976 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10977 | ((insn
& (1 << 26)) >> 15);
10978 if (insn
& 0xf00000)
10979 signed_addend
= -signed_addend
;
10982 relocation
= value
+ signed_addend
;
10983 relocation
-= Pa (input_section
->output_section
->vma
10984 + input_section
->output_offset
10987 /* PR 21523: Use an absolute value. The user of this reloc will
10988 have already selected an ADD or SUB insn appropriately. */
10989 value
= llabs (relocation
);
10991 if (value
>= 0x1000)
10992 return bfd_reloc_overflow
;
10994 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10995 if (branch_type
== ST_BRANCH_TO_THUMB
)
10998 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10999 | ((value
& 0x700) << 4)
11000 | ((value
& 0x800) << 15);
11001 if (relocation
< 0)
11004 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11005 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11007 return bfd_reloc_ok
;
11010 case R_ARM_THM_PC8
:
11011 /* PR 10073: This reloc is not generated by the GNU toolchain,
11012 but it is supported for compatibility with third party libraries
11013 generated by other compilers, specifically the ARM/IAR. */
11016 bfd_signed_vma relocation
;
11018 insn
= bfd_get_16 (input_bfd
, hit_data
);
11020 if (globals
->use_rel
)
11021 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
11023 relocation
= value
+ addend
;
11024 relocation
-= Pa (input_section
->output_section
->vma
11025 + input_section
->output_offset
11028 value
= relocation
;
11030 /* We do not check for overflow of this reloc. Although strictly
11031 speaking this is incorrect, it appears to be necessary in order
11032 to work with IAR generated relocs. Since GCC and GAS do not
11033 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11034 a problem for them. */
11037 insn
= (insn
& 0xff00) | (value
>> 2);
11039 bfd_put_16 (input_bfd
, insn
, hit_data
);
11041 return bfd_reloc_ok
;
11044 case R_ARM_THM_PC12
:
11045 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11048 bfd_signed_vma relocation
;
11050 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
11051 | bfd_get_16 (input_bfd
, hit_data
+ 2);
11053 if (globals
->use_rel
)
11055 signed_addend
= insn
& 0xfff;
11056 if (!(insn
& (1 << 23)))
11057 signed_addend
= -signed_addend
;
11060 relocation
= value
+ signed_addend
;
11061 relocation
-= Pa (input_section
->output_section
->vma
11062 + input_section
->output_offset
11065 value
= relocation
;
11067 if (value
>= 0x1000)
11068 return bfd_reloc_overflow
;
11070 insn
= (insn
& 0xff7ff000) | value
;
11071 if (relocation
>= 0)
11074 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11075 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11077 return bfd_reloc_ok
;
11080 case R_ARM_THM_XPC22
:
11081 case R_ARM_THM_CALL
:
11082 case R_ARM_THM_JUMP24
:
11083 /* Thumb BL (branch long instruction). */
11085 bfd_vma relocation
;
11086 bfd_vma reloc_sign
;
11087 bool overflow
= false;
11088 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11089 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11090 bfd_signed_vma reloc_signed_max
;
11091 bfd_signed_vma reloc_signed_min
;
11093 bfd_signed_vma signed_check
;
11095 const int thumb2
= using_thumb2 (globals
);
11096 const int thumb2_bl
= using_thumb2_bl (globals
);
11098 /* A branch to an undefined weak symbol is turned into a jump to
11099 the next instruction unless a PLT entry will be created.
11100 The jump to the next instruction is optimized as a NOP.W for
11101 Thumb-2 enabled architectures. */
11102 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11103 && plt_offset
== (bfd_vma
) -1)
11107 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11108 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11112 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11113 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11115 return bfd_reloc_ok
;
11118 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11119 with Thumb-1) involving the J1 and J2 bits. */
11120 if (globals
->use_rel
)
11122 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11123 bfd_vma upper
= upper_insn
& 0x3ff;
11124 bfd_vma lower
= lower_insn
& 0x7ff;
11125 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11126 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11127 bfd_vma i1
= j1
^ s
? 0 : 1;
11128 bfd_vma i2
= j2
^ s
? 0 : 1;
11130 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11132 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11134 signed_addend
= addend
;
11137 if (r_type
== R_ARM_THM_XPC22
)
11139 /* Check for Thumb to Thumb call. */
11140 /* FIXME: Should we translate the instruction into a BL
11141 instruction instead ? */
11142 if (branch_type
== ST_BRANCH_TO_THUMB
)
11144 (_("%pB: warning: %s BLX instruction targets"
11145 " %s function '%s'"),
11146 input_bfd
, "Thumb",
11147 "Thumb", h
? h
->root
.root
.string
: "(local)");
11151 /* If it is not a call to Thumb, assume call to Arm.
11152 If it is a call relative to a section name, then it is not a
11153 function call at all, but rather a long jump. Calls through
11154 the PLT do not require stubs. */
11155 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11157 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11159 /* Convert BL to BLX. */
11160 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11162 else if (( r_type
!= R_ARM_THM_CALL
)
11163 && (r_type
!= R_ARM_THM_JUMP24
))
11165 if (elf32_thumb_to_arm_stub
11166 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11167 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11169 return bfd_reloc_ok
;
11171 return bfd_reloc_dangerous
;
11174 else if (branch_type
== ST_BRANCH_TO_THUMB
11175 && globals
->use_blx
11176 && r_type
== R_ARM_THM_CALL
)
11178 /* Make sure this is a BL. */
11179 lower_insn
|= 0x1800;
11183 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11184 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11186 /* Check if a stub has to be inserted because the destination
11188 struct elf32_arm_stub_hash_entry
*stub_entry
;
11189 struct elf32_arm_link_hash_entry
*hash
;
11191 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11193 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11194 st_type
, &branch_type
,
11195 hash
, value
, sym_sec
,
11196 input_bfd
, sym_name
);
11198 if (stub_type
!= arm_stub_none
)
11200 /* The target is out of reach or we are changing modes, so
11201 redirect the branch to the local stub for this
11203 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11207 if (stub_entry
!= NULL
)
11209 value
= (stub_entry
->stub_offset
11210 + stub_entry
->stub_sec
->output_offset
11211 + stub_entry
->stub_sec
->output_section
->vma
);
11213 if (plt_offset
!= (bfd_vma
) -1)
11214 *unresolved_reloc_p
= false;
11217 /* If this call becomes a call to Arm, force BLX. */
11218 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11221 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11222 || branch_type
!= ST_BRANCH_TO_THUMB
)
11223 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11228 /* Handle calls via the PLT. */
11229 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11231 value
= (splt
->output_section
->vma
11232 + splt
->output_offset
11235 if (globals
->use_blx
11236 && r_type
== R_ARM_THM_CALL
11237 && ! using_thumb_only (globals
))
11239 /* If the Thumb BLX instruction is available, convert
11240 the BL to a BLX instruction to call the ARM-mode
11242 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11243 branch_type
= ST_BRANCH_TO_ARM
;
11247 if (! using_thumb_only (globals
))
11248 /* Target the Thumb stub before the ARM PLT entry. */
11249 value
-= PLT_THUMB_STUB_SIZE
;
11250 branch_type
= ST_BRANCH_TO_THUMB
;
11252 *unresolved_reloc_p
= false;
11255 relocation
= value
+ signed_addend
;
11257 relocation
-= (input_section
->output_section
->vma
11258 + input_section
->output_offset
11261 check
= relocation
>> howto
->rightshift
;
11263 /* If this is a signed value, the rightshift just dropped
11264 leading 1 bits (assuming twos complement). */
11265 if ((bfd_signed_vma
) relocation
>= 0)
11266 signed_check
= check
;
11268 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11270 /* Calculate the permissable maximum and minimum values for
11271 this relocation according to whether we're relocating for
11273 bitsize
= howto
->bitsize
;
11276 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11277 reloc_signed_min
= ~reloc_signed_max
;
11279 /* Assumes two's complement. */
11280 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11283 if ((lower_insn
& 0x5000) == 0x4000)
11284 /* For a BLX instruction, make sure that the relocation is rounded up
11285 to a word boundary. This follows the semantics of the instruction
11286 which specifies that bit 1 of the target address will come from bit
11287 1 of the base address. */
11288 relocation
= (relocation
+ 2) & ~ 3;
11290 /* Put RELOCATION back into the insn. Assumes two's complement.
11291 We use the Thumb-2 encoding, which is safe even if dealing with
11292 a Thumb-1 instruction by virtue of our overflow check above. */
11293 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11294 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11295 | ((relocation
>> 12) & 0x3ff)
11296 | (reloc_sign
<< 10);
11297 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11298 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11299 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11300 | ((relocation
>> 1) & 0x7ff);
11302 /* Put the relocated value back in the object file: */
11303 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11304 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11306 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11310 case R_ARM_THM_JUMP19
:
11311 /* Thumb32 conditional branch instruction. */
11313 bfd_vma relocation
;
11314 bool overflow
= false;
11315 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11316 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11317 bfd_signed_vma reloc_signed_max
= 0xffffe;
11318 bfd_signed_vma reloc_signed_min
= -0x100000;
11319 bfd_signed_vma signed_check
;
11320 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11321 struct elf32_arm_stub_hash_entry
*stub_entry
;
11322 struct elf32_arm_link_hash_entry
*hash
;
11324 /* Need to refetch the addend, reconstruct the top three bits,
11325 and squish the two 11 bit pieces together. */
11326 if (globals
->use_rel
)
11328 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11329 bfd_vma upper
= (upper_insn
& 0x003f);
11330 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11331 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11332 bfd_vma lower
= (lower_insn
& 0x07ff);
11336 upper
|= (!S
) << 8;
11337 upper
-= 0x0100; /* Sign extend. */
11339 addend
= (upper
<< 12) | (lower
<< 1);
11340 signed_addend
= addend
;
11343 /* Handle calls via the PLT. */
11344 if (plt_offset
!= (bfd_vma
) -1)
11346 value
= (splt
->output_section
->vma
11347 + splt
->output_offset
11349 /* Target the Thumb stub before the ARM PLT entry. */
11350 value
-= PLT_THUMB_STUB_SIZE
;
11351 *unresolved_reloc_p
= false;
11354 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11356 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11357 st_type
, &branch_type
,
11358 hash
, value
, sym_sec
,
11359 input_bfd
, sym_name
);
11360 if (stub_type
!= arm_stub_none
)
11362 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11366 if (stub_entry
!= NULL
)
11368 value
= (stub_entry
->stub_offset
11369 + stub_entry
->stub_sec
->output_offset
11370 + stub_entry
->stub_sec
->output_section
->vma
);
11374 relocation
= value
+ signed_addend
;
11375 relocation
-= (input_section
->output_section
->vma
11376 + input_section
->output_offset
11378 signed_check
= (bfd_signed_vma
) relocation
;
11380 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11383 /* Put RELOCATION back into the insn. */
11385 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11386 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11387 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11388 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11389 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11391 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11392 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11395 /* Put the relocated value back in the object file: */
11396 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11397 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11399 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11402 case R_ARM_THM_JUMP11
:
11403 case R_ARM_THM_JUMP8
:
11404 case R_ARM_THM_JUMP6
:
11405 /* Thumb B (branch) instruction). */
11407 bfd_signed_vma relocation
;
11408 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11409 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11410 bfd_signed_vma signed_check
;
11412 /* CZB cannot jump backward. */
11413 if (r_type
== R_ARM_THM_JUMP6
)
11415 reloc_signed_min
= 0;
11416 if (globals
->use_rel
)
11417 signed_addend
= ((addend
& 0x200) >> 3) | ((addend
& 0xf8) >> 2);
11420 relocation
= value
+ signed_addend
;
11422 relocation
-= (input_section
->output_section
->vma
11423 + input_section
->output_offset
11426 relocation
>>= howto
->rightshift
;
11427 signed_check
= relocation
;
11429 if (r_type
== R_ARM_THM_JUMP6
)
11430 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11432 relocation
&= howto
->dst_mask
;
11433 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11435 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11437 /* Assumes two's complement. */
11438 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11439 return bfd_reloc_overflow
;
11441 return bfd_reloc_ok
;
11444 case R_ARM_ALU_PCREL7_0
:
11445 case R_ARM_ALU_PCREL15_8
:
11446 case R_ARM_ALU_PCREL23_15
:
11449 bfd_vma relocation
;
11451 insn
= bfd_get_32 (input_bfd
, hit_data
);
11452 if (globals
->use_rel
)
11454 /* Extract the addend. */
11455 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11456 signed_addend
= addend
;
11458 relocation
= value
+ signed_addend
;
11460 relocation
-= (input_section
->output_section
->vma
11461 + input_section
->output_offset
11463 insn
= (insn
& ~0xfff)
11464 | ((howto
->bitpos
<< 7) & 0xf00)
11465 | ((relocation
>> howto
->bitpos
) & 0xff);
11466 bfd_put_32 (input_bfd
, value
, hit_data
);
11468 return bfd_reloc_ok
;
11470 case R_ARM_GNU_VTINHERIT
:
11471 case R_ARM_GNU_VTENTRY
:
11472 return bfd_reloc_ok
;
11474 case R_ARM_GOTOFF32
:
11475 /* Relocation is relative to the start of the
11476 global offset table. */
11478 BFD_ASSERT (sgot
!= NULL
);
11480 return bfd_reloc_notsupported
;
11482 /* If we are addressing a Thumb function, we need to adjust the
11483 address by one, so that attempts to call the function pointer will
11484 correctly interpret it as Thumb code. */
11485 if (branch_type
== ST_BRANCH_TO_THUMB
)
11488 /* Note that sgot->output_offset is not involved in this
11489 calculation. We always want the start of .got. If we
11490 define _GLOBAL_OFFSET_TABLE in a different way, as is
11491 permitted by the ABI, we might have to change this
11493 value
-= sgot
->output_section
->vma
;
11494 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11495 contents
, rel
->r_offset
, value
,
11499 /* Use global offset table as symbol value. */
11500 BFD_ASSERT (sgot
!= NULL
);
11503 return bfd_reloc_notsupported
;
11505 *unresolved_reloc_p
= false;
11506 value
= sgot
->output_section
->vma
;
11507 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11508 contents
, rel
->r_offset
, value
,
11512 case R_ARM_GOT_PREL
:
11513 /* Relocation is to the entry for this symbol in the
11514 global offset table. */
11516 return bfd_reloc_notsupported
;
11518 if (dynreloc_st_type
== STT_GNU_IFUNC
11519 && plt_offset
!= (bfd_vma
) -1
11520 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11522 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11523 symbol, and the relocation resolves directly to the runtime
11524 target rather than to the .iplt entry. This means that any
11525 .got entry would be the same value as the .igot.plt entry,
11526 so there's no point creating both. */
11527 sgot
= globals
->root
.igotplt
;
11528 value
= sgot
->output_offset
+ gotplt_offset
;
11530 else if (h
!= NULL
)
11534 off
= h
->got
.offset
;
11535 BFD_ASSERT (off
!= (bfd_vma
) -1);
11536 if ((off
& 1) != 0)
11538 /* We have already processsed one GOT relocation against
11541 if (globals
->root
.dynamic_sections_created
11542 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11543 *unresolved_reloc_p
= false;
11547 Elf_Internal_Rela outrel
;
11550 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11551 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11553 /* If the symbol doesn't resolve locally in a static
11554 object, we have an undefined reference. If the
11555 symbol doesn't resolve locally in a dynamic object,
11556 it should be resolved by the dynamic linker. */
11557 if (globals
->root
.dynamic_sections_created
)
11559 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11560 *unresolved_reloc_p
= false;
11564 outrel
.r_addend
= 0;
11568 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11569 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11570 else if (bfd_link_pic (info
)
11571 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
11572 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11576 if (globals
->fdpic_p
)
11579 outrel
.r_addend
= dynreloc_value
;
11582 /* The GOT entry is initialized to zero by default.
11583 See if we should install a different value. */
11584 if (outrel
.r_addend
!= 0
11585 && (globals
->use_rel
|| outrel
.r_info
== 0))
11587 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11588 sgot
->contents
+ off
);
11589 outrel
.r_addend
= 0;
11593 arm_elf_add_rofixup (output_bfd
,
11594 elf32_arm_hash_table (info
)->srofixup
,
11595 sgot
->output_section
->vma
11596 + sgot
->output_offset
+ off
);
11598 else if (outrel
.r_info
!= 0)
11600 outrel
.r_offset
= (sgot
->output_section
->vma
11601 + sgot
->output_offset
11603 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11606 h
->got
.offset
|= 1;
11608 value
= sgot
->output_offset
+ off
;
11614 BFD_ASSERT (local_got_offsets
!= NULL
11615 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11617 off
= local_got_offsets
[r_symndx
];
11619 /* The offset must always be a multiple of 4. We use the
11620 least significant bit to record whether we have already
11621 generated the necessary reloc. */
11622 if ((off
& 1) != 0)
11626 Elf_Internal_Rela outrel
;
11629 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11630 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11631 else if (bfd_link_pic (info
))
11632 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11636 if (globals
->fdpic_p
)
11640 /* The GOT entry is initialized to zero by default.
11641 See if we should install a different value. */
11642 if (globals
->use_rel
|| outrel
.r_info
== 0)
11643 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11646 arm_elf_add_rofixup (output_bfd
,
11648 sgot
->output_section
->vma
11649 + sgot
->output_offset
+ off
);
11651 else if (outrel
.r_info
!= 0)
11653 outrel
.r_addend
= addend
+ dynreloc_value
;
11654 outrel
.r_offset
= (sgot
->output_section
->vma
11655 + sgot
->output_offset
11657 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11660 local_got_offsets
[r_symndx
] |= 1;
11663 value
= sgot
->output_offset
+ off
;
11665 if (r_type
!= R_ARM_GOT32
)
11666 value
+= sgot
->output_section
->vma
;
11668 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11669 contents
, rel
->r_offset
, value
,
11672 case R_ARM_TLS_LDO32
:
11673 value
= value
- dtpoff_base (info
);
11675 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11676 contents
, rel
->r_offset
, value
,
11679 case R_ARM_TLS_LDM32
:
11680 case R_ARM_TLS_LDM32_FDPIC
:
11687 off
= globals
->tls_ldm_got
.offset
;
11689 if ((off
& 1) != 0)
11693 /* If we don't know the module number, create a relocation
11695 if (bfd_link_dll (info
))
11697 Elf_Internal_Rela outrel
;
11699 if (srelgot
== NULL
)
11702 outrel
.r_addend
= 0;
11703 outrel
.r_offset
= (sgot
->output_section
->vma
11704 + sgot
->output_offset
+ off
);
11705 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11707 if (globals
->use_rel
)
11708 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11709 sgot
->contents
+ off
);
11711 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11714 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11716 globals
->tls_ldm_got
.offset
|= 1;
11719 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11721 bfd_put_32 (output_bfd
,
11722 globals
->root
.sgot
->output_offset
+ off
,
11723 contents
+ rel
->r_offset
);
11725 return bfd_reloc_ok
;
11729 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11730 - (input_section
->output_section
->vma
11731 + input_section
->output_offset
+ rel
->r_offset
);
11733 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11734 contents
, rel
->r_offset
, value
,
11739 case R_ARM_TLS_CALL
:
11740 case R_ARM_THM_TLS_CALL
:
11741 case R_ARM_TLS_GD32
:
11742 case R_ARM_TLS_GD32_FDPIC
:
11743 case R_ARM_TLS_IE32
:
11744 case R_ARM_TLS_IE32_FDPIC
:
11745 case R_ARM_TLS_GOTDESC
:
11746 case R_ARM_TLS_DESCSEQ
:
11747 case R_ARM_THM_TLS_DESCSEQ
:
11749 bfd_vma off
, offplt
;
11753 BFD_ASSERT (sgot
!= NULL
);
11758 dyn
= globals
->root
.dynamic_sections_created
;
11759 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11760 bfd_link_pic (info
),
11762 && (!bfd_link_pic (info
)
11763 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11765 *unresolved_reloc_p
= false;
11768 off
= h
->got
.offset
;
11769 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11770 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11774 BFD_ASSERT (local_got_offsets
!= NULL
);
11776 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
11778 _bfd_error_handler (_("\
11779 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11781 (unsigned long) elf32_arm_num_entries (input_bfd
),
11785 off
= local_got_offsets
[r_symndx
];
11786 offplt
= local_tlsdesc_gotents
[r_symndx
];
11787 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11790 /* Linker relaxations happens from one of the
11791 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11792 if (ELF32_R_TYPE (rel
->r_info
) != r_type
)
11793 tls_type
= GOT_TLS_IE
;
11795 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11797 if ((off
& 1) != 0)
11801 bool need_relocs
= false;
11802 Elf_Internal_Rela outrel
;
11805 /* The GOT entries have not been initialized yet. Do it
11806 now, and emit any relocations. If both an IE GOT and a
11807 GD GOT are necessary, we emit the GD first. */
11809 if ((bfd_link_dll (info
) || indx
!= 0)
11811 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11812 && !resolved_to_zero
)
11813 || h
->root
.type
!= bfd_link_hash_undefweak
))
11815 need_relocs
= true;
11816 BFD_ASSERT (srelgot
!= NULL
);
11819 if (tls_type
& GOT_TLS_GDESC
)
11823 /* We should have relaxed, unless this is an undefined
11825 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11826 || bfd_link_dll (info
));
11827 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11828 <= globals
->root
.sgotplt
->size
);
11830 outrel
.r_addend
= 0;
11831 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11832 + globals
->root
.sgotplt
->output_offset
11834 + globals
->sgotplt_jump_table_size
);
11836 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11837 sreloc
= globals
->root
.srelplt
;
11838 loc
= sreloc
->contents
;
11839 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11840 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11841 <= sreloc
->contents
+ sreloc
->size
);
11843 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11845 /* For globals, the first word in the relocation gets
11846 the relocation index and the top bit set, or zero,
11847 if we're binding now. For locals, it gets the
11848 symbol's offset in the tls section. */
11849 bfd_put_32 (output_bfd
,
11850 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11851 : info
->flags
& DF_BIND_NOW
? 0
11852 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11853 globals
->root
.sgotplt
->contents
+ offplt
11854 + globals
->sgotplt_jump_table_size
);
11856 /* Second word in the relocation is always zero. */
11857 bfd_put_32 (output_bfd
, 0,
11858 globals
->root
.sgotplt
->contents
+ offplt
11859 + globals
->sgotplt_jump_table_size
+ 4);
11861 if (tls_type
& GOT_TLS_GD
)
11865 outrel
.r_addend
= 0;
11866 outrel
.r_offset
= (sgot
->output_section
->vma
11867 + sgot
->output_offset
11869 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11871 if (globals
->use_rel
)
11872 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11873 sgot
->contents
+ cur_off
);
11875 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11878 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11879 sgot
->contents
+ cur_off
+ 4);
11882 outrel
.r_addend
= 0;
11883 outrel
.r_info
= ELF32_R_INFO (indx
,
11884 R_ARM_TLS_DTPOFF32
);
11885 outrel
.r_offset
+= 4;
11887 if (globals
->use_rel
)
11888 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11889 sgot
->contents
+ cur_off
+ 4);
11891 elf32_arm_add_dynreloc (output_bfd
, info
,
11897 /* If we are not emitting relocations for a
11898 general dynamic reference, then we must be in a
11899 static link or an executable link with the
11900 symbol binding locally. Mark it as belonging
11901 to module 1, the executable. */
11902 bfd_put_32 (output_bfd
, 1,
11903 sgot
->contents
+ cur_off
);
11904 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11905 sgot
->contents
+ cur_off
+ 4);
11911 if (tls_type
& GOT_TLS_IE
)
11916 outrel
.r_addend
= value
- dtpoff_base (info
);
11918 outrel
.r_addend
= 0;
11919 outrel
.r_offset
= (sgot
->output_section
->vma
11920 + sgot
->output_offset
11922 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11924 if (globals
->use_rel
)
11925 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11926 sgot
->contents
+ cur_off
);
11928 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11931 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11932 sgot
->contents
+ cur_off
);
11937 h
->got
.offset
|= 1;
11939 local_got_offsets
[r_symndx
] |= 1;
11942 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11944 else if (tls_type
& GOT_TLS_GDESC
)
11947 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
11948 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11950 bfd_signed_vma offset
;
11951 /* TLS stubs are arm mode. The original symbol is a
11952 data object, so branch_type is bogus. */
11953 branch_type
= ST_BRANCH_TO_ARM
;
11954 enum elf32_arm_stub_type stub_type
11955 = arm_type_of_stub (info
, input_section
, rel
,
11956 st_type
, &branch_type
,
11957 (struct elf32_arm_link_hash_entry
*)h
,
11958 globals
->tls_trampoline
, globals
->root
.splt
,
11959 input_bfd
, sym_name
);
11961 if (stub_type
!= arm_stub_none
)
11963 struct elf32_arm_stub_hash_entry
*stub_entry
11964 = elf32_arm_get_stub_entry
11965 (input_section
, globals
->root
.splt
, 0, rel
,
11966 globals
, stub_type
);
11967 offset
= (stub_entry
->stub_offset
11968 + stub_entry
->stub_sec
->output_offset
11969 + stub_entry
->stub_sec
->output_section
->vma
);
11972 offset
= (globals
->root
.splt
->output_section
->vma
11973 + globals
->root
.splt
->output_offset
11974 + globals
->tls_trampoline
);
11976 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
)
11978 unsigned long inst
;
11980 offset
-= (input_section
->output_section
->vma
11981 + input_section
->output_offset
11982 + rel
->r_offset
+ 8);
11984 inst
= offset
>> 2;
11985 inst
&= 0x00ffffff;
11986 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11990 /* Thumb blx encodes the offset in a complicated
11992 unsigned upper_insn
, lower_insn
;
11995 offset
-= (input_section
->output_section
->vma
11996 + input_section
->output_offset
11997 + rel
->r_offset
+ 4);
11999 if (stub_type
!= arm_stub_none
12000 && arm_stub_is_thumb (stub_type
))
12002 lower_insn
= 0xd000;
12006 lower_insn
= 0xc000;
12007 /* Round up the offset to a word boundary. */
12008 offset
= (offset
+ 2) & ~2;
12012 upper_insn
= (0xf000
12013 | ((offset
>> 12) & 0x3ff)
12015 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
12016 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
12017 | ((offset
>> 1) & 0x7ff);
12018 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12019 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12020 return bfd_reloc_ok
;
12023 /* These relocations needs special care, as besides the fact
12024 they point somewhere in .gotplt, the addend must be
12025 adjusted accordingly depending on the type of instruction
12027 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
12029 unsigned long data
, insn
;
12032 data
= bfd_get_signed_32 (input_bfd
, hit_data
);
12038 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
12039 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
12040 insn
= (insn
<< 16)
12041 | bfd_get_16 (input_bfd
,
12042 contents
+ rel
->r_offset
- data
+ 2);
12043 if ((insn
& 0xf800c000) == 0xf000c000)
12046 else if ((insn
& 0xffffff00) == 0x4400)
12052 /* xgettext:c-format */
12053 (_("%pB(%pA+%#" PRIx64
"): "
12054 "unexpected %s instruction '%#lx' "
12055 "referenced by TLS_GOTDESC"),
12056 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12058 return bfd_reloc_notsupported
;
12063 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
12065 switch (insn
>> 24)
12067 case 0xeb: /* bl */
12068 case 0xfa: /* blx */
12072 case 0xe0: /* add */
12078 /* xgettext:c-format */
12079 (_("%pB(%pA+%#" PRIx64
"): "
12080 "unexpected %s instruction '%#lx' "
12081 "referenced by TLS_GOTDESC"),
12082 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12084 return bfd_reloc_notsupported
;
12088 value
+= ((globals
->root
.sgotplt
->output_section
->vma
12089 + globals
->root
.sgotplt
->output_offset
+ off
)
12090 - (input_section
->output_section
->vma
12091 + input_section
->output_offset
12093 + globals
->sgotplt_jump_table_size
);
12096 value
= ((globals
->root
.sgot
->output_section
->vma
12097 + globals
->root
.sgot
->output_offset
+ off
)
12098 - (input_section
->output_section
->vma
12099 + input_section
->output_offset
+ rel
->r_offset
));
12101 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12102 r_type
== R_ARM_TLS_IE32_FDPIC
))
12104 /* For FDPIC relocations, resolve to the offset of the GOT
12105 entry from the start of GOT. */
12106 bfd_put_32 (output_bfd
,
12107 globals
->root
.sgot
->output_offset
+ off
,
12108 contents
+ rel
->r_offset
);
12110 return bfd_reloc_ok
;
12114 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12115 contents
, rel
->r_offset
, value
,
12120 case R_ARM_TLS_LE32
:
12121 if (bfd_link_dll (info
))
12124 /* xgettext:c-format */
12125 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12126 "in shared object"),
12127 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12128 return bfd_reloc_notsupported
;
12131 value
= tpoff (info
, value
);
12133 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12134 contents
, rel
->r_offset
, value
,
12138 if (globals
->fix_v4bx
)
12140 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12142 /* Ensure that we have a BX instruction. */
12143 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12145 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12147 /* Branch to veneer. */
12149 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12150 glue_addr
-= input_section
->output_section
->vma
12151 + input_section
->output_offset
12152 + rel
->r_offset
+ 8;
12153 insn
= (insn
& 0xf0000000) | 0x0a000000
12154 | ((glue_addr
>> 2) & 0x00ffffff);
12158 /* Preserve Rm (lowest four bits) and the condition code
12159 (highest four bits). Other bits encode MOV PC,Rm. */
12160 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12163 bfd_put_32 (input_bfd
, insn
, hit_data
);
12165 return bfd_reloc_ok
;
12167 case R_ARM_MOVW_ABS_NC
:
12168 case R_ARM_MOVT_ABS
:
12169 case R_ARM_MOVW_PREL_NC
:
12170 case R_ARM_MOVT_PREL
:
12171 /* Until we properly support segment-base-relative addressing then
12172 we assume the segment base to be zero, as for the group relocations.
12173 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12174 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12175 case R_ARM_MOVW_BREL_NC
:
12176 case R_ARM_MOVW_BREL
:
12177 case R_ARM_MOVT_BREL
:
12179 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12181 if (globals
->use_rel
)
12183 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12184 signed_addend
= (addend
^ 0x8000) - 0x8000;
12187 value
+= signed_addend
;
12189 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12190 value
-= (input_section
->output_section
->vma
12191 + input_section
->output_offset
+ rel
->r_offset
);
12193 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12194 return bfd_reloc_overflow
;
12196 if (branch_type
== ST_BRANCH_TO_THUMB
)
12199 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12200 || r_type
== R_ARM_MOVT_BREL
)
12203 insn
&= 0xfff0f000;
12204 insn
|= value
& 0xfff;
12205 insn
|= (value
& 0xf000) << 4;
12206 bfd_put_32 (input_bfd
, insn
, hit_data
);
12208 return bfd_reloc_ok
;
12210 case R_ARM_THM_MOVW_ABS_NC
:
12211 case R_ARM_THM_MOVT_ABS
:
12212 case R_ARM_THM_MOVW_PREL_NC
:
12213 case R_ARM_THM_MOVT_PREL
:
12214 /* Until we properly support segment-base-relative addressing then
12215 we assume the segment base to be zero, as for the above relocations.
12216 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12217 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12218 as R_ARM_THM_MOVT_ABS. */
12219 case R_ARM_THM_MOVW_BREL_NC
:
12220 case R_ARM_THM_MOVW_BREL
:
12221 case R_ARM_THM_MOVT_BREL
:
12225 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12226 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12228 if (globals
->use_rel
)
12230 addend
= ((insn
>> 4) & 0xf000)
12231 | ((insn
>> 15) & 0x0800)
12232 | ((insn
>> 4) & 0x0700)
12234 signed_addend
= (addend
^ 0x8000) - 0x8000;
12237 value
+= signed_addend
;
12239 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12240 value
-= (input_section
->output_section
->vma
12241 + input_section
->output_offset
+ rel
->r_offset
);
12243 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12244 return bfd_reloc_overflow
;
12246 if (branch_type
== ST_BRANCH_TO_THUMB
)
12249 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12250 || r_type
== R_ARM_THM_MOVT_BREL
)
12253 insn
&= 0xfbf08f00;
12254 insn
|= (value
& 0xf000) << 4;
12255 insn
|= (value
& 0x0800) << 15;
12256 insn
|= (value
& 0x0700) << 4;
12257 insn
|= (value
& 0x00ff);
12259 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12260 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12262 return bfd_reloc_ok
;
12264 case R_ARM_ALU_PC_G0_NC
:
12265 case R_ARM_ALU_PC_G1_NC
:
12266 case R_ARM_ALU_PC_G0
:
12267 case R_ARM_ALU_PC_G1
:
12268 case R_ARM_ALU_PC_G2
:
12269 case R_ARM_ALU_SB_G0_NC
:
12270 case R_ARM_ALU_SB_G1_NC
:
12271 case R_ARM_ALU_SB_G0
:
12272 case R_ARM_ALU_SB_G1
:
12273 case R_ARM_ALU_SB_G2
:
12275 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12276 bfd_vma pc
= input_section
->output_section
->vma
12277 + input_section
->output_offset
+ rel
->r_offset
;
12278 /* sb is the origin of the *segment* containing the symbol. */
12279 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12282 bfd_signed_vma signed_value
;
12285 /* Determine which group of bits to select. */
12288 case R_ARM_ALU_PC_G0_NC
:
12289 case R_ARM_ALU_PC_G0
:
12290 case R_ARM_ALU_SB_G0_NC
:
12291 case R_ARM_ALU_SB_G0
:
12295 case R_ARM_ALU_PC_G1_NC
:
12296 case R_ARM_ALU_PC_G1
:
12297 case R_ARM_ALU_SB_G1_NC
:
12298 case R_ARM_ALU_SB_G1
:
12302 case R_ARM_ALU_PC_G2
:
12303 case R_ARM_ALU_SB_G2
:
12311 /* If REL, extract the addend from the insn. If RELA, it will
12312 have already been fetched for us. */
12313 if (globals
->use_rel
)
12316 bfd_vma constant
= insn
& 0xff;
12317 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12320 signed_addend
= constant
;
12323 /* Compensate for the fact that in the instruction, the
12324 rotation is stored in multiples of 2 bits. */
12327 /* Rotate "constant" right by "rotation" bits. */
12328 signed_addend
= (constant
>> rotation
) |
12329 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12332 /* Determine if the instruction is an ADD or a SUB.
12333 (For REL, this determines the sign of the addend.) */
12334 negative
= identify_add_or_sub (insn
);
12338 /* xgettext:c-format */
12339 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12340 "are allowed for ALU group relocations"),
12341 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12342 return bfd_reloc_overflow
;
12345 signed_addend
*= negative
;
12348 /* Compute the value (X) to go in the place. */
12349 if (r_type
== R_ARM_ALU_PC_G0_NC
12350 || r_type
== R_ARM_ALU_PC_G1_NC
12351 || r_type
== R_ARM_ALU_PC_G0
12352 || r_type
== R_ARM_ALU_PC_G1
12353 || r_type
== R_ARM_ALU_PC_G2
)
12355 signed_value
= value
- pc
+ signed_addend
;
12357 /* Section base relative. */
12358 signed_value
= value
- sb
+ signed_addend
;
12360 /* If the target symbol is a Thumb function, then set the
12361 Thumb bit in the address. */
12362 if (branch_type
== ST_BRANCH_TO_THUMB
)
12365 /* Calculate the value of the relevant G_n, in encoded
12366 constant-with-rotation format. */
12367 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12370 /* Check for overflow if required. */
12371 if ((r_type
== R_ARM_ALU_PC_G0
12372 || r_type
== R_ARM_ALU_PC_G1
12373 || r_type
== R_ARM_ALU_PC_G2
12374 || r_type
== R_ARM_ALU_SB_G0
12375 || r_type
== R_ARM_ALU_SB_G1
12376 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12379 /* xgettext:c-format */
12380 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12381 "splitting %#" PRIx64
" for group relocation %s"),
12382 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12383 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12385 return bfd_reloc_overflow
;
12388 /* Mask out the value and the ADD/SUB part of the opcode; take care
12389 not to destroy the S bit. */
12390 insn
&= 0xff1ff000;
12392 /* Set the opcode according to whether the value to go in the
12393 place is negative. */
12394 if (signed_value
< 0)
12399 /* Encode the offset. */
12402 bfd_put_32 (input_bfd
, insn
, hit_data
);
12404 return bfd_reloc_ok
;
12406 case R_ARM_LDR_PC_G0
:
12407 case R_ARM_LDR_PC_G1
:
12408 case R_ARM_LDR_PC_G2
:
12409 case R_ARM_LDR_SB_G0
:
12410 case R_ARM_LDR_SB_G1
:
12411 case R_ARM_LDR_SB_G2
:
12413 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12414 bfd_vma pc
= input_section
->output_section
->vma
12415 + input_section
->output_offset
+ rel
->r_offset
;
12416 /* sb is the origin of the *segment* containing the symbol. */
12417 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12419 bfd_signed_vma signed_value
;
12422 /* Determine which groups of bits to calculate. */
12425 case R_ARM_LDR_PC_G0
:
12426 case R_ARM_LDR_SB_G0
:
12430 case R_ARM_LDR_PC_G1
:
12431 case R_ARM_LDR_SB_G1
:
12435 case R_ARM_LDR_PC_G2
:
12436 case R_ARM_LDR_SB_G2
:
12444 /* If REL, extract the addend from the insn. If RELA, it will
12445 have already been fetched for us. */
12446 if (globals
->use_rel
)
12448 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12449 signed_addend
= negative
* (insn
& 0xfff);
12452 /* Compute the value (X) to go in the place. */
12453 if (r_type
== R_ARM_LDR_PC_G0
12454 || r_type
== R_ARM_LDR_PC_G1
12455 || r_type
== R_ARM_LDR_PC_G2
)
12457 signed_value
= value
- pc
+ signed_addend
;
12459 /* Section base relative. */
12460 signed_value
= value
- sb
+ signed_addend
;
12462 /* Calculate the value of the relevant G_{n-1} to obtain
12463 the residual at that stage. */
12464 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12465 group
- 1, &residual
);
12467 /* Check for overflow. */
12468 if (residual
>= 0x1000)
12471 /* xgettext:c-format */
12472 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12473 "splitting %#" PRIx64
" for group relocation %s"),
12474 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12475 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12477 return bfd_reloc_overflow
;
12480 /* Mask out the value and U bit. */
12481 insn
&= 0xff7ff000;
12483 /* Set the U bit if the value to go in the place is non-negative. */
12484 if (signed_value
>= 0)
12487 /* Encode the offset. */
12490 bfd_put_32 (input_bfd
, insn
, hit_data
);
12492 return bfd_reloc_ok
;
12494 case R_ARM_LDRS_PC_G0
:
12495 case R_ARM_LDRS_PC_G1
:
12496 case R_ARM_LDRS_PC_G2
:
12497 case R_ARM_LDRS_SB_G0
:
12498 case R_ARM_LDRS_SB_G1
:
12499 case R_ARM_LDRS_SB_G2
:
12501 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12502 bfd_vma pc
= input_section
->output_section
->vma
12503 + input_section
->output_offset
+ rel
->r_offset
;
12504 /* sb is the origin of the *segment* containing the symbol. */
12505 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12507 bfd_signed_vma signed_value
;
12510 /* Determine which groups of bits to calculate. */
12513 case R_ARM_LDRS_PC_G0
:
12514 case R_ARM_LDRS_SB_G0
:
12518 case R_ARM_LDRS_PC_G1
:
12519 case R_ARM_LDRS_SB_G1
:
12523 case R_ARM_LDRS_PC_G2
:
12524 case R_ARM_LDRS_SB_G2
:
12532 /* If REL, extract the addend from the insn. If RELA, it will
12533 have already been fetched for us. */
12534 if (globals
->use_rel
)
12536 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12537 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12540 /* Compute the value (X) to go in the place. */
12541 if (r_type
== R_ARM_LDRS_PC_G0
12542 || r_type
== R_ARM_LDRS_PC_G1
12543 || r_type
== R_ARM_LDRS_PC_G2
)
12545 signed_value
= value
- pc
+ signed_addend
;
12547 /* Section base relative. */
12548 signed_value
= value
- sb
+ signed_addend
;
12550 /* Calculate the value of the relevant G_{n-1} to obtain
12551 the residual at that stage. */
12552 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12553 group
- 1, &residual
);
12555 /* Check for overflow. */
12556 if (residual
>= 0x100)
12559 /* xgettext:c-format */
12560 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12561 "splitting %#" PRIx64
" for group relocation %s"),
12562 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12563 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12565 return bfd_reloc_overflow
;
12568 /* Mask out the value and U bit. */
12569 insn
&= 0xff7ff0f0;
12571 /* Set the U bit if the value to go in the place is non-negative. */
12572 if (signed_value
>= 0)
12575 /* Encode the offset. */
12576 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12578 bfd_put_32 (input_bfd
, insn
, hit_data
);
12580 return bfd_reloc_ok
;
12582 case R_ARM_LDC_PC_G0
:
12583 case R_ARM_LDC_PC_G1
:
12584 case R_ARM_LDC_PC_G2
:
12585 case R_ARM_LDC_SB_G0
:
12586 case R_ARM_LDC_SB_G1
:
12587 case R_ARM_LDC_SB_G2
:
12589 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12590 bfd_vma pc
= input_section
->output_section
->vma
12591 + input_section
->output_offset
+ rel
->r_offset
;
12592 /* sb is the origin of the *segment* containing the symbol. */
12593 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12595 bfd_signed_vma signed_value
;
12598 /* Determine which groups of bits to calculate. */
12601 case R_ARM_LDC_PC_G0
:
12602 case R_ARM_LDC_SB_G0
:
12606 case R_ARM_LDC_PC_G1
:
12607 case R_ARM_LDC_SB_G1
:
12611 case R_ARM_LDC_PC_G2
:
12612 case R_ARM_LDC_SB_G2
:
12620 /* If REL, extract the addend from the insn. If RELA, it will
12621 have already been fetched for us. */
12622 if (globals
->use_rel
)
12624 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12625 signed_addend
= negative
* ((insn
& 0xff) << 2);
12628 /* Compute the value (X) to go in the place. */
12629 if (r_type
== R_ARM_LDC_PC_G0
12630 || r_type
== R_ARM_LDC_PC_G1
12631 || r_type
== R_ARM_LDC_PC_G2
)
12633 signed_value
= value
- pc
+ signed_addend
;
12635 /* Section base relative. */
12636 signed_value
= value
- sb
+ signed_addend
;
12638 /* Calculate the value of the relevant G_{n-1} to obtain
12639 the residual at that stage. */
12640 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12641 group
- 1, &residual
);
12643 /* Check for overflow. (The absolute value to go in the place must be
12644 divisible by four and, after having been divided by four, must
12645 fit in eight bits.) */
12646 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12649 /* xgettext:c-format */
12650 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12651 "splitting %#" PRIx64
" for group relocation %s"),
12652 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12653 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12655 return bfd_reloc_overflow
;
12658 /* Mask out the value and U bit. */
12659 insn
&= 0xff7fff00;
12661 /* Set the U bit if the value to go in the place is non-negative. */
12662 if (signed_value
>= 0)
12665 /* Encode the offset. */
12666 insn
|= residual
>> 2;
12668 bfd_put_32 (input_bfd
, insn
, hit_data
);
12670 return bfd_reloc_ok
;
12672 case R_ARM_THM_ALU_ABS_G0_NC
:
12673 case R_ARM_THM_ALU_ABS_G1_NC
:
12674 case R_ARM_THM_ALU_ABS_G2_NC
:
12675 case R_ARM_THM_ALU_ABS_G3_NC
:
12677 const int shift_array
[4] = {0, 8, 16, 24};
12678 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12679 bfd_vma addr
= value
;
12680 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12682 /* Compute address. */
12683 if (globals
->use_rel
)
12684 signed_addend
= insn
& 0xff;
12685 addr
+= signed_addend
;
12686 if (branch_type
== ST_BRANCH_TO_THUMB
)
12688 /* Clean imm8 insn. */
12690 /* And update with correct part of address. */
12691 insn
|= (addr
>> shift
) & 0xff;
12693 bfd_put_16 (input_bfd
, insn
, hit_data
);
12696 *unresolved_reloc_p
= false;
12697 return bfd_reloc_ok
;
12699 case R_ARM_GOTOFFFUNCDESC
:
12703 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12704 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12706 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12708 * error_message
= _("local symbol index too big");
12709 return bfd_reloc_dangerous
;
12712 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12713 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12716 if (bfd_link_pic (info
) && dynindx
== 0)
12718 * error_message
= _("no dynamic index information available");
12719 return bfd_reloc_dangerous
;
12722 /* Resolve relocation. */
12723 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
)
12724 , contents
+ rel
->r_offset
);
12725 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12727 arm_elf_fill_funcdesc (output_bfd
, info
,
12728 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12729 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12734 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12738 /* For static binaries, sym_sec can be null. */
12741 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12742 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12750 if (bfd_link_pic (info
) && dynindx
== 0)
12752 * error_message
= _("no dynamic index information available");
12753 return bfd_reloc_dangerous
;
12756 /* This case cannot occur since funcdesc is allocated by
12757 the dynamic loader so we cannot resolve the relocation. */
12758 if (h
->dynindx
!= -1)
12760 * error_message
= _("invalid dynamic index");
12761 return bfd_reloc_dangerous
;
12764 /* Resolve relocation. */
12765 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
),
12766 contents
+ rel
->r_offset
);
12767 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12768 arm_elf_fill_funcdesc (output_bfd
, info
,
12769 &eh
->fdpic_cnts
.funcdesc_offset
,
12770 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12773 *unresolved_reloc_p
= false;
12774 return bfd_reloc_ok
;
12776 case R_ARM_GOTFUNCDESC
:
12780 Elf_Internal_Rela outrel
;
12782 /* Resolve relocation. */
12783 bfd_put_32 (output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12784 + sgot
->output_offset
),
12785 contents
+ rel
->r_offset
);
12786 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12787 if (h
->dynindx
== -1)
12790 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12794 /* For static binaries sym_sec can be null. */
12797 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12798 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12806 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12807 arm_elf_fill_funcdesc (output_bfd
, info
,
12808 &eh
->fdpic_cnts
.funcdesc_offset
,
12809 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12812 /* Add a dynamic relocation on GOT entry if not already done. */
12813 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12815 if (h
->dynindx
== -1)
12817 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12818 if (h
->root
.type
== bfd_link_hash_undefweak
)
12819 bfd_put_32 (output_bfd
, 0, sgot
->contents
12820 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12822 bfd_put_32 (output_bfd
, sgot
->output_section
->vma
12823 + sgot
->output_offset
12824 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12826 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12830 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12832 outrel
.r_offset
= sgot
->output_section
->vma
12833 + sgot
->output_offset
12834 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12835 outrel
.r_addend
= 0;
12836 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
12837 if (h
->root
.type
== bfd_link_hash_undefweak
)
12838 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, -1);
12840 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
12843 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12844 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12849 /* Such relocation on static function should not have been
12850 emitted by the compiler. */
12851 return bfd_reloc_notsupported
;
12854 *unresolved_reloc_p
= false;
12855 return bfd_reloc_ok
;
12857 case R_ARM_FUNCDESC
:
12861 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12862 Elf_Internal_Rela outrel
;
12863 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12865 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12867 * error_message
= _("local symbol index too big");
12868 return bfd_reloc_dangerous
;
12871 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12872 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12875 if (bfd_link_pic (info
) && dynindx
== 0)
12877 * error_message
= _("dynamic index information not available");
12878 return bfd_reloc_dangerous
;
12881 /* Replace static FUNCDESC relocation with a
12882 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12884 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12885 outrel
.r_offset
= input_section
->output_section
->vma
12886 + input_section
->output_offset
+ rel
->r_offset
;
12887 outrel
.r_addend
= 0;
12888 if (bfd_link_pic (info
))
12889 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12891 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12893 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12894 + sgot
->output_offset
+ offset
, hit_data
);
12896 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12897 arm_elf_fill_funcdesc (output_bfd
, info
,
12898 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12899 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12903 if (h
->dynindx
== -1)
12906 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12909 Elf_Internal_Rela outrel
;
12911 /* For static binaries sym_sec can be null. */
12914 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12915 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12923 if (bfd_link_pic (info
) && dynindx
== 0)
12926 /* Replace static FUNCDESC relocation with a
12927 R_ARM_RELATIVE dynamic relocation. */
12928 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12929 outrel
.r_offset
= input_section
->output_section
->vma
12930 + input_section
->output_offset
+ rel
->r_offset
;
12931 outrel
.r_addend
= 0;
12932 if (bfd_link_pic (info
))
12933 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12935 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12937 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12938 + sgot
->output_offset
+ offset
, hit_data
);
12940 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12941 arm_elf_fill_funcdesc (output_bfd
, info
,
12942 &eh
->fdpic_cnts
.funcdesc_offset
,
12943 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12947 Elf_Internal_Rela outrel
;
12949 /* Add a dynamic relocation. */
12950 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12951 outrel
.r_offset
= input_section
->output_section
->vma
12952 + input_section
->output_offset
+ rel
->r_offset
;
12953 outrel
.r_addend
= 0;
12954 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12958 *unresolved_reloc_p
= false;
12959 return bfd_reloc_ok
;
12961 case R_ARM_THM_BF16
:
12963 bfd_vma relocation
;
12964 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12965 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12967 if (globals
->use_rel
)
12969 bfd_vma immA
= (upper_insn
& 0x001f);
12970 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12971 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12972 addend
= (immA
<< 12);
12973 addend
|= (immB
<< 2);
12974 addend
|= (immC
<< 1);
12977 signed_addend
= (addend
& 0x10000) ? addend
- (1 << 17) : addend
;
12980 relocation
= value
+ signed_addend
;
12981 relocation
-= (input_section
->output_section
->vma
12982 + input_section
->output_offset
12985 /* Put RELOCATION back into the insn. */
12987 bfd_vma immA
= (relocation
& 0x0001f000) >> 12;
12988 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12989 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12991 upper_insn
= (upper_insn
& 0xffe0) | immA
;
12992 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12995 /* Put the relocated value back in the object file: */
12996 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12997 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12999 return bfd_reloc_ok
;
13002 case R_ARM_THM_BF12
:
13004 bfd_vma relocation
;
13005 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
13006 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
13008 if (globals
->use_rel
)
13010 bfd_vma immA
= (upper_insn
& 0x0001);
13011 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
13012 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
13013 addend
= (immA
<< 12);
13014 addend
|= (immB
<< 2);
13015 addend
|= (immC
<< 1);
13018 addend
= (addend
& 0x1000) ? addend
- (1 << 13) : addend
;
13019 signed_addend
= addend
;
13022 relocation
= value
+ signed_addend
;
13023 relocation
-= (input_section
->output_section
->vma
13024 + input_section
->output_offset
13027 /* Put RELOCATION back into the insn. */
13029 bfd_vma immA
= (relocation
& 0x00001000) >> 12;
13030 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13031 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13033 upper_insn
= (upper_insn
& 0xfffe) | immA
;
13034 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13037 /* Put the relocated value back in the object file: */
13038 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13039 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13041 return bfd_reloc_ok
;
13044 case R_ARM_THM_BF18
:
13046 bfd_vma relocation
;
13047 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
13048 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
13050 if (globals
->use_rel
)
13052 bfd_vma immA
= (upper_insn
& 0x007f);
13053 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
13054 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
13055 addend
= (immA
<< 12);
13056 addend
|= (immB
<< 2);
13057 addend
|= (immC
<< 1);
13060 addend
= (addend
& 0x40000) ? addend
- (1 << 19) : addend
;
13061 signed_addend
= addend
;
13064 relocation
= value
+ signed_addend
;
13065 relocation
-= (input_section
->output_section
->vma
13066 + input_section
->output_offset
13069 /* Put RELOCATION back into the insn. */
13071 bfd_vma immA
= (relocation
& 0x0007f000) >> 12;
13072 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13073 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13075 upper_insn
= (upper_insn
& 0xff80) | immA
;
13076 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13079 /* Put the relocated value back in the object file: */
13080 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13081 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13083 return bfd_reloc_ok
;
13087 return bfd_reloc_notsupported
;
13091 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13093 arm_add_to_rel (bfd
* abfd
,
13094 bfd_byte
* address
,
13095 reloc_howto_type
* howto
,
13096 bfd_signed_vma increment
)
13098 bfd_signed_vma addend
;
13100 if (howto
->type
== R_ARM_THM_CALL
13101 || howto
->type
== R_ARM_THM_JUMP24
)
13103 int upper_insn
, lower_insn
;
13106 upper_insn
= bfd_get_16 (abfd
, address
);
13107 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
13108 upper
= upper_insn
& 0x7ff;
13109 lower
= lower_insn
& 0x7ff;
13111 addend
= (upper
<< 12) | (lower
<< 1);
13112 addend
+= increment
;
13115 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
13116 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
13118 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
13119 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
13125 contents
= bfd_get_32 (abfd
, address
);
13127 /* Get the (signed) value from the instruction. */
13128 addend
= contents
& howto
->src_mask
;
13129 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13131 bfd_signed_vma mask
;
13134 mask
&= ~ howto
->src_mask
;
13138 /* Add in the increment, (which is a byte value). */
13139 switch (howto
->type
)
13142 addend
+= increment
;
13149 addend
*= bfd_get_reloc_size (howto
);
13150 addend
+= increment
;
13152 /* Should we check for overflow here ? */
13154 /* Drop any undesired bits. */
13155 addend
>>= howto
->rightshift
;
13159 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
13161 bfd_put_32 (abfd
, contents
, address
);
13165 #define IS_ARM_TLS_RELOC(R_TYPE) \
13166 ((R_TYPE) == R_ARM_TLS_GD32 \
13167 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13168 || (R_TYPE) == R_ARM_TLS_LDO32 \
13169 || (R_TYPE) == R_ARM_TLS_LDM32 \
13170 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13171 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13172 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13173 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13174 || (R_TYPE) == R_ARM_TLS_LE32 \
13175 || (R_TYPE) == R_ARM_TLS_IE32 \
13176 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13177 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13179 /* Specific set of relocations for the gnu tls dialect. */
13180 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13181 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13182 || (R_TYPE) == R_ARM_TLS_CALL \
13183 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13184 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13185 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13187 /* Relocate an ARM ELF section. */
13190 elf32_arm_relocate_section (bfd
* output_bfd
,
13191 struct bfd_link_info
* info
,
13193 asection
* input_section
,
13194 bfd_byte
* contents
,
13195 Elf_Internal_Rela
* relocs
,
13196 Elf_Internal_Sym
* local_syms
,
13197 asection
** local_sections
)
13199 Elf_Internal_Shdr
*symtab_hdr
;
13200 struct elf_link_hash_entry
**sym_hashes
;
13201 Elf_Internal_Rela
*rel
;
13202 Elf_Internal_Rela
*relend
;
13204 struct elf32_arm_link_hash_table
* globals
;
13206 globals
= elf32_arm_hash_table (info
);
13207 if (globals
== NULL
)
13210 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
13211 sym_hashes
= elf_sym_hashes (input_bfd
);
13214 relend
= relocs
+ input_section
->reloc_count
;
13215 for (; rel
< relend
; rel
++)
13218 reloc_howto_type
* howto
;
13219 unsigned long r_symndx
;
13220 Elf_Internal_Sym
* sym
;
13222 struct elf_link_hash_entry
* h
;
13223 bfd_vma relocation
;
13224 bfd_reloc_status_type r
;
13227 bool unresolved_reloc
= false;
13228 char *error_message
= NULL
;
13230 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13231 r_type
= ELF32_R_TYPE (rel
->r_info
);
13232 r_type
= arm_real_reloc_type (globals
, r_type
);
13234 if ( r_type
== R_ARM_GNU_VTENTRY
13235 || r_type
== R_ARM_GNU_VTINHERIT
)
13238 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
13241 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13247 if (r_symndx
< symtab_hdr
->sh_info
)
13249 sym
= local_syms
+ r_symndx
;
13250 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13251 sec
= local_sections
[r_symndx
];
13253 /* An object file might have a reference to a local
13254 undefined symbol. This is a daft object file, but we
13255 should at least do something about it. V4BX & NONE
13256 relocations do not use the symbol and are explicitly
13257 allowed to use the undefined symbol, so allow those.
13258 Likewise for relocations against STN_UNDEF. */
13259 if (r_type
!= R_ARM_V4BX
13260 && r_type
!= R_ARM_NONE
13261 && r_symndx
!= STN_UNDEF
13262 && bfd_is_und_section (sec
)
13263 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13264 (*info
->callbacks
->undefined_symbol
)
13265 (info
, bfd_elf_string_from_elf_section
13266 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13267 input_bfd
, input_section
,
13268 rel
->r_offset
, true);
13270 if (globals
->use_rel
)
13272 relocation
= (sec
->output_section
->vma
13273 + sec
->output_offset
13275 if (!bfd_link_relocatable (info
)
13276 && (sec
->flags
& SEC_MERGE
)
13277 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13280 bfd_vma addend
, value
;
13284 case R_ARM_MOVW_ABS_NC
:
13285 case R_ARM_MOVT_ABS
:
13286 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13287 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13288 addend
= (addend
^ 0x8000) - 0x8000;
13291 case R_ARM_THM_MOVW_ABS_NC
:
13292 case R_ARM_THM_MOVT_ABS
:
13293 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13295 value
|= bfd_get_16 (input_bfd
,
13296 contents
+ rel
->r_offset
+ 2);
13297 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13298 | ((value
& 0x04000000) >> 15);
13299 addend
= (addend
^ 0x8000) - 0x8000;
13303 if (howto
->rightshift
13304 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13307 /* xgettext:c-format */
13308 (_("%pB(%pA+%#" PRIx64
"): "
13309 "%s relocation against SEC_MERGE section"),
13310 input_bfd
, input_section
,
13311 (uint64_t) rel
->r_offset
, howto
->name
);
13315 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13317 /* Get the (signed) value from the instruction. */
13318 addend
= value
& howto
->src_mask
;
13319 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13321 bfd_signed_vma mask
;
13324 mask
&= ~ howto
->src_mask
;
13332 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13334 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13336 /* Cases here must match those in the preceding
13337 switch statement. */
13340 case R_ARM_MOVW_ABS_NC
:
13341 case R_ARM_MOVT_ABS
:
13342 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13343 | (addend
& 0xfff);
13344 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13347 case R_ARM_THM_MOVW_ABS_NC
:
13348 case R_ARM_THM_MOVT_ABS
:
13349 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13350 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13351 bfd_put_16 (input_bfd
, value
>> 16,
13352 contents
+ rel
->r_offset
);
13353 bfd_put_16 (input_bfd
, value
,
13354 contents
+ rel
->r_offset
+ 2);
13358 value
= (value
& ~ howto
->dst_mask
)
13359 | (addend
& howto
->dst_mask
);
13360 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13366 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13370 bool warned
, ignored
;
13372 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13373 r_symndx
, symtab_hdr
, sym_hashes
,
13374 h
, sec
, relocation
,
13375 unresolved_reloc
, warned
, ignored
);
13377 sym_type
= h
->type
;
13380 if (sec
!= NULL
&& discarded_section (sec
))
13381 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13382 rel
, 1, relend
, howto
, 0, contents
);
13384 if (bfd_link_relocatable (info
))
13386 /* This is a relocatable link. We don't have to change
13387 anything, unless the reloc is against a section symbol,
13388 in which case we have to adjust according to where the
13389 section symbol winds up in the output section. */
13390 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13392 if (globals
->use_rel
)
13393 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13394 howto
, (bfd_signed_vma
) sec
->output_offset
);
13396 rel
->r_addend
+= sec
->output_offset
;
13402 name
= h
->root
.root
.string
;
13405 name
= (bfd_elf_string_from_elf_section
13406 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13407 if (name
== NULL
|| *name
== '\0')
13408 name
= bfd_section_name (sec
);
13411 if (r_symndx
!= STN_UNDEF
13412 && r_type
!= R_ARM_NONE
13414 || h
->root
.type
== bfd_link_hash_defined
13415 || h
->root
.type
== bfd_link_hash_defweak
)
13416 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13419 ((sym_type
== STT_TLS
13420 /* xgettext:c-format */
13421 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13422 /* xgettext:c-format */
13423 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13426 (uint64_t) rel
->r_offset
,
13431 /* We call elf32_arm_final_link_relocate unless we're completely
13432 done, i.e., the relaxation produced the final output we want,
13433 and we won't let anybody mess with it. Also, we have to do
13434 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13435 both in relaxed and non-relaxed cases. */
13436 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13437 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13438 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13439 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13442 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13443 contents
, rel
, h
== NULL
);
13444 /* This may have been marked unresolved because it came from
13445 a shared library. But we've just dealt with that. */
13446 unresolved_reloc
= 0;
13449 r
= bfd_reloc_continue
;
13451 if (r
== bfd_reloc_continue
)
13453 unsigned char branch_type
=
13454 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13455 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13457 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13458 input_section
, contents
, rel
,
13459 relocation
, info
, sec
, name
,
13460 sym_type
, branch_type
, h
,
13465 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13466 because such sections are not SEC_ALLOC and thus ld.so will
13467 not process them. */
13468 if (unresolved_reloc
13469 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13471 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13472 rel
->r_offset
) != (bfd_vma
) -1)
13475 /* xgettext:c-format */
13476 (_("%pB(%pA+%#" PRIx64
"): "
13477 "unresolvable %s relocation against symbol `%s'"),
13480 (uint64_t) rel
->r_offset
,
13482 h
->root
.root
.string
);
13486 if (r
!= bfd_reloc_ok
)
13490 case bfd_reloc_overflow
:
13491 /* If the overflowing reloc was to an undefined symbol,
13492 we have already printed one error message and there
13493 is no point complaining again. */
13494 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13495 (*info
->callbacks
->reloc_overflow
)
13496 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13497 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13500 case bfd_reloc_undefined
:
13501 (*info
->callbacks
->undefined_symbol
)
13502 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, true);
13505 case bfd_reloc_outofrange
:
13506 error_message
= _("out of range");
13509 case bfd_reloc_notsupported
:
13510 error_message
= _("unsupported relocation");
13513 case bfd_reloc_dangerous
:
13514 /* error_message should already be set. */
13518 error_message
= _("unknown error");
13519 /* Fall through. */
13522 BFD_ASSERT (error_message
!= NULL
);
13523 (*info
->callbacks
->reloc_dangerous
)
13524 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13533 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13534 adds the edit to the start of the list. (The list must be built in order of
13535 ascending TINDEX: the function's callers are primarily responsible for
13536 maintaining that condition). */
13539 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13540 arm_unwind_table_edit
**tail
,
13541 arm_unwind_edit_type type
,
13542 asection
*linked_section
,
13543 unsigned int tindex
)
13545 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13546 xmalloc (sizeof (arm_unwind_table_edit
));
13548 new_edit
->type
= type
;
13549 new_edit
->linked_section
= linked_section
;
13550 new_edit
->index
= tindex
;
13554 new_edit
->next
= NULL
;
13557 (*tail
)->next
= new_edit
;
13559 (*tail
) = new_edit
;
13562 (*head
) = new_edit
;
13566 new_edit
->next
= *head
;
13575 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13577 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13580 adjust_exidx_size (asection
*exidx_sec
, int adjust
)
13584 if (!exidx_sec
->rawsize
)
13585 exidx_sec
->rawsize
= exidx_sec
->size
;
13587 bfd_set_section_size (exidx_sec
, exidx_sec
->size
+ adjust
);
13588 out_sec
= exidx_sec
->output_section
;
13589 /* Adjust size of output section. */
13590 bfd_set_section_size (out_sec
, out_sec
->size
+ adjust
);
13593 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13596 insert_cantunwind_after (asection
*text_sec
, asection
*exidx_sec
)
13598 struct _arm_elf_section_data
*exidx_arm_data
;
13600 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13601 add_unwind_table_edit
13602 (&exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13603 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13604 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13606 exidx_arm_data
->additional_reloc_count
++;
13608 adjust_exidx_size (exidx_sec
, 8);
13611 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13612 made to those tables, such that:
13614 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13615 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13616 codes which have been inlined into the index).
13618 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13620 The edits are applied when the tables are written
13621 (in elf32_arm_write_section). */
13624 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13625 unsigned int num_text_sections
,
13626 struct bfd_link_info
*info
,
13627 bool merge_exidx_entries
)
13630 unsigned int last_second_word
= 0, i
;
13631 asection
*last_exidx_sec
= NULL
;
13632 asection
*last_text_sec
= NULL
;
13633 int last_unwind_type
= -1;
13635 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13637 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13641 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13643 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13644 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13646 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13649 if (elf_sec
->linked_to
)
13651 Elf_Internal_Shdr
*linked_hdr
13652 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13653 struct _arm_elf_section_data
*linked_sec_arm_data
13654 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13656 if (linked_sec_arm_data
== NULL
)
13659 /* Link this .ARM.exidx section back from the text section it
13661 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13666 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13667 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13668 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13670 for (i
= 0; i
< num_text_sections
; i
++)
13672 asection
*sec
= text_section_order
[i
];
13673 asection
*exidx_sec
;
13674 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13675 struct _arm_elf_section_data
*exidx_arm_data
;
13676 bfd_byte
*contents
= NULL
;
13677 int deleted_exidx_bytes
= 0;
13679 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13680 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13681 Elf_Internal_Shdr
*hdr
;
13684 if (arm_data
== NULL
)
13687 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13688 if (exidx_sec
== NULL
)
13690 /* Section has no unwind data. */
13691 if (last_unwind_type
== 0 || !last_exidx_sec
)
13694 /* Ignore zero sized sections. */
13695 if (sec
->size
== 0)
13698 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13699 last_unwind_type
= 0;
13703 /* Skip /DISCARD/ sections. */
13704 if (bfd_is_abs_section (exidx_sec
->output_section
))
13707 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13708 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13711 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13712 if (exidx_arm_data
== NULL
)
13715 ibfd
= exidx_sec
->owner
;
13717 if (hdr
->contents
!= NULL
)
13718 contents
= hdr
->contents
;
13719 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13723 if (last_unwind_type
> 0)
13725 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13726 /* Add cantunwind if first unwind item does not match section
13728 if (first_word
!= sec
->vma
)
13730 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13731 last_unwind_type
= 0;
13735 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13737 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13741 /* An EXIDX_CANTUNWIND entry. */
13742 if (second_word
== 1)
13744 if (last_unwind_type
== 0)
13748 /* Inlined unwinding data. Merge if equal to previous. */
13749 else if ((second_word
& 0x80000000) != 0)
13751 if (merge_exidx_entries
13752 && last_second_word
== second_word
&& last_unwind_type
== 1)
13755 last_second_word
= second_word
;
13757 /* Normal table entry. In theory we could merge these too,
13758 but duplicate entries are likely to be much less common. */
13762 if (elide
&& !bfd_link_relocatable (info
))
13764 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13765 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13767 deleted_exidx_bytes
+= 8;
13770 last_unwind_type
= unwind_type
;
13773 /* Free contents if we allocated it ourselves. */
13774 if (contents
!= hdr
->contents
)
13777 /* Record edits to be applied later (in elf32_arm_write_section). */
13778 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13779 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13781 if (deleted_exidx_bytes
> 0)
13782 adjust_exidx_size (exidx_sec
, - deleted_exidx_bytes
);
13784 last_exidx_sec
= exidx_sec
;
13785 last_text_sec
= sec
;
13788 /* Add terminating CANTUNWIND entry. */
13789 if (!bfd_link_relocatable (info
) && last_exidx_sec
13790 && last_unwind_type
!= 0)
13791 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13797 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13798 bfd
*ibfd
, const char *name
)
13800 asection
*sec
, *osec
;
13802 sec
= bfd_get_linker_section (ibfd
, name
);
13803 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13806 osec
= sec
->output_section
;
13807 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13810 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13811 sec
->output_offset
, sec
->size
))
13818 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13820 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13821 asection
*sec
, *osec
;
13823 if (globals
== NULL
)
13826 /* Invoke the regular ELF backend linker to do all the work. */
13827 if (!bfd_elf_final_link (abfd
, info
))
13830 /* Process stub sections (eg BE8 encoding, ...). */
13831 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13833 for (i
=0; i
<htab
->top_id
; i
++)
13835 sec
= htab
->stub_group
[i
].stub_sec
;
13836 /* Only process it once, in its link_sec slot. */
13837 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13839 osec
= sec
->output_section
;
13840 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13841 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13842 sec
->output_offset
, sec
->size
))
13847 /* Write out any glue sections now that we have created all the
13849 if (globals
->bfd_of_glue_owner
!= NULL
)
13851 if (! elf32_arm_output_glue_section (info
, abfd
,
13852 globals
->bfd_of_glue_owner
,
13853 ARM2THUMB_GLUE_SECTION_NAME
))
13856 if (! elf32_arm_output_glue_section (info
, abfd
,
13857 globals
->bfd_of_glue_owner
,
13858 THUMB2ARM_GLUE_SECTION_NAME
))
13861 if (! elf32_arm_output_glue_section (info
, abfd
,
13862 globals
->bfd_of_glue_owner
,
13863 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13866 if (! elf32_arm_output_glue_section (info
, abfd
,
13867 globals
->bfd_of_glue_owner
,
13868 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13871 if (! elf32_arm_output_glue_section (info
, abfd
,
13872 globals
->bfd_of_glue_owner
,
13873 ARM_BX_GLUE_SECTION_NAME
))
13880 /* Return a best guess for the machine number based on the attributes. */
13882 static unsigned int
13883 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13885 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13889 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13890 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13891 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13892 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13894 case TAG_CPU_ARCH_V5TE
:
13898 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13899 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13903 if (strcmp (name
, "IWMMXT2") == 0)
13904 return bfd_mach_arm_iWMMXt2
;
13906 if (strcmp (name
, "IWMMXT") == 0)
13907 return bfd_mach_arm_iWMMXt
;
13909 if (strcmp (name
, "XSCALE") == 0)
13913 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13914 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13917 case 1: return bfd_mach_arm_iWMMXt
;
13918 case 2: return bfd_mach_arm_iWMMXt2
;
13919 default: return bfd_mach_arm_XScale
;
13924 return bfd_mach_arm_5TE
;
13927 case TAG_CPU_ARCH_V5TEJ
:
13928 return bfd_mach_arm_5TEJ
;
13929 case TAG_CPU_ARCH_V6
:
13930 return bfd_mach_arm_6
;
13931 case TAG_CPU_ARCH_V6KZ
:
13932 return bfd_mach_arm_6KZ
;
13933 case TAG_CPU_ARCH_V6T2
:
13934 return bfd_mach_arm_6T2
;
13935 case TAG_CPU_ARCH_V6K
:
13936 return bfd_mach_arm_6K
;
13937 case TAG_CPU_ARCH_V7
:
13938 return bfd_mach_arm_7
;
13939 case TAG_CPU_ARCH_V6_M
:
13940 return bfd_mach_arm_6M
;
13941 case TAG_CPU_ARCH_V6S_M
:
13942 return bfd_mach_arm_6SM
;
13943 case TAG_CPU_ARCH_V7E_M
:
13944 return bfd_mach_arm_7EM
;
13945 case TAG_CPU_ARCH_V8
:
13946 return bfd_mach_arm_8
;
13947 case TAG_CPU_ARCH_V8R
:
13948 return bfd_mach_arm_8R
;
13949 case TAG_CPU_ARCH_V8M_BASE
:
13950 return bfd_mach_arm_8M_BASE
;
13951 case TAG_CPU_ARCH_V8M_MAIN
:
13952 return bfd_mach_arm_8M_MAIN
;
13953 case TAG_CPU_ARCH_V8_1M_MAIN
:
13954 return bfd_mach_arm_8_1M_MAIN
;
13955 case TAG_CPU_ARCH_V9
:
13956 return bfd_mach_arm_9
;
13959 /* Force entry to be added for any new known Tag_CPU_arch value. */
13960 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13962 /* Unknown Tag_CPU_arch value. */
13963 return bfd_mach_arm_unknown
;
13967 /* Set the right machine number. */
13970 elf32_arm_object_p (bfd
*abfd
)
13974 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13976 if (mach
== bfd_mach_arm_unknown
)
13977 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13979 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13983 /* Function to keep ARM specific flags in the ELF header. */
13986 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13988 if (elf_flags_init (abfd
)
13989 && elf_elfheader (abfd
)->e_flags
!= flags
)
13991 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13993 if (flags
& EF_ARM_INTERWORK
)
13995 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13999 (_("warning: clearing the interworking flag of %pB due to outside request"),
14005 elf_elfheader (abfd
)->e_flags
= flags
;
14006 elf_flags_init (abfd
) = true;
14012 /* Copy backend specific data from one object module to another. */
14015 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
14018 flagword out_flags
;
14020 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
14023 in_flags
= elf_elfheader (ibfd
)->e_flags
;
14024 out_flags
= elf_elfheader (obfd
)->e_flags
;
14026 if (elf_flags_init (obfd
)
14027 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
14028 && in_flags
!= out_flags
)
14030 /* Cannot mix APCS26 and APCS32 code. */
14031 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
14034 /* Cannot mix float APCS and non-float APCS code. */
14035 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
14038 /* If the src and dest have different interworking flags
14039 then turn off the interworking bit. */
14040 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
14042 if (out_flags
& EF_ARM_INTERWORK
)
14044 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14047 in_flags
&= ~EF_ARM_INTERWORK
;
14050 /* Likewise for PIC, though don't warn for this case. */
14051 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
14052 in_flags
&= ~EF_ARM_PIC
;
14055 elf_elfheader (obfd
)->e_flags
= in_flags
;
14056 elf_flags_init (obfd
) = true;
14058 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
14061 /* Values for Tag_ABI_PCS_R9_use. */
14070 /* Values for Tag_ABI_PCS_RW_data. */
14073 AEABI_PCS_RW_data_absolute
,
14074 AEABI_PCS_RW_data_PCrel
,
14075 AEABI_PCS_RW_data_SBrel
,
14076 AEABI_PCS_RW_data_unused
14079 /* Values for Tag_ABI_enum_size. */
14085 AEABI_enum_forced_wide
14088 /* Determine whether an object attribute tag takes an integer, a
14092 elf32_arm_obj_attrs_arg_type (int tag
)
14094 if (tag
== Tag_compatibility
)
14095 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
14096 else if (tag
== Tag_nodefaults
)
14097 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
14098 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
14099 return ATTR_TYPE_FLAG_STR_VAL
;
14101 return ATTR_TYPE_FLAG_INT_VAL
;
14103 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
14106 /* The ABI defines that Tag_conformance should be emitted first, and that
14107 Tag_nodefaults should be second (if either is defined). This sets those
14108 two positions, and bumps up the position of all the remaining tags to
14111 elf32_arm_obj_attrs_order (int num
)
14113 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
14114 return Tag_conformance
;
14115 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
14116 return Tag_nodefaults
;
14117 if ((num
- 2) < Tag_nodefaults
)
14119 if ((num
- 1) < Tag_conformance
)
14124 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14126 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
14128 if ((tag
& 127) < 64)
14131 (_("%pB: unknown mandatory EABI object attribute %d"),
14133 bfd_set_error (bfd_error_bad_value
);
14139 (_("warning: %pB: unknown EABI object attribute %d"),
14145 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14146 Returns -1 if no architecture could be read. */
14149 get_secondary_compatible_arch (bfd
*abfd
)
14151 obj_attribute
*attr
=
14152 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14154 /* Note: the tag and its argument below are uleb128 values, though
14155 currently-defined values fit in one byte for each. */
14157 && attr
->s
[0] == Tag_CPU_arch
14158 && (attr
->s
[1] & 128) != 128
14159 && attr
->s
[2] == 0)
14162 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14166 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14167 The tag is removed if ARCH is -1. */
14170 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
14172 obj_attribute
*attr
=
14173 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14181 /* Note: the tag and its argument below are uleb128 values, though
14182 currently-defined values fit in one byte for each. */
14184 attr
->s
= (char *) bfd_alloc (abfd
, 3);
14185 attr
->s
[0] = Tag_CPU_arch
;
14190 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14194 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
14195 int newtag
, int secondary_compat
, const char* name_table
[])
14197 #define T(X) TAG_CPU_ARCH_##X
14198 int tagl
, tagh
, result
;
14201 T(V6T2
), /* PRE_V4. */
14203 T(V6T2
), /* V4T. */
14204 T(V6T2
), /* V5T. */
14205 T(V6T2
), /* V5TE. */
14206 T(V6T2
), /* V5TEJ. */
14209 T(V6T2
) /* V6T2. */
14213 T(V6K
), /* PRE_V4. */
14217 T(V6K
), /* V5TE. */
14218 T(V6K
), /* V5TEJ. */
14220 T(V6KZ
), /* V6KZ. */
14226 T(V7
), /* PRE_V4. */
14231 T(V7
), /* V5TEJ. */
14244 T(V6K
), /* V5TE. */
14245 T(V6K
), /* V5TEJ. */
14247 T(V6KZ
), /* V6KZ. */
14251 T(V6_M
) /* V6_M. */
14253 const int v6s_m
[] =
14259 T(V6K
), /* V5TE. */
14260 T(V6K
), /* V5TEJ. */
14262 T(V6KZ
), /* V6KZ. */
14266 T(V6S_M
), /* V6_M. */
14267 T(V6S_M
) /* V6S_M. */
14269 const int v7e_m
[] =
14273 T(V7E_M
), /* V4T. */
14274 T(V7E_M
), /* V5T. */
14275 T(V7E_M
), /* V5TE. */
14276 T(V7E_M
), /* V5TEJ. */
14277 T(V7E_M
), /* V6. */
14278 T(V7E_M
), /* V6KZ. */
14279 T(V7E_M
), /* V6T2. */
14280 T(V7E_M
), /* V6K. */
14281 T(V7E_M
), /* V7. */
14282 T(V7E_M
), /* V6_M. */
14283 T(V7E_M
), /* V6S_M. */
14284 T(V7E_M
) /* V7E_M. */
14288 T(V8
), /* PRE_V4. */
14293 T(V8
), /* V5TEJ. */
14300 T(V8
), /* V6S_M. */
14301 T(V8
), /* V7E_M. */
14304 T(V8
), /* V8-M.BASE. */
14305 T(V8
), /* V8-M.MAIN. */
14309 T(V8
), /* V8.1-M.MAIN. */
14313 T(V8R
), /* PRE_V4. */
14317 T(V8R
), /* V5TE. */
14318 T(V8R
), /* V5TEJ. */
14320 T(V8R
), /* V6KZ. */
14321 T(V8R
), /* V6T2. */
14324 T(V8R
), /* V6_M. */
14325 T(V8R
), /* V6S_M. */
14326 T(V8R
), /* V7E_M. */
14330 const int v8m_baseline
[] =
14343 T(V8M_BASE
), /* V6_M. */
14344 T(V8M_BASE
), /* V6S_M. */
14348 T(V8M_BASE
) /* V8-M BASELINE. */
14350 const int v8m_mainline
[] =
14362 T(V8M_MAIN
), /* V7. */
14363 T(V8M_MAIN
), /* V6_M. */
14364 T(V8M_MAIN
), /* V6S_M. */
14365 T(V8M_MAIN
), /* V7E_M. */
14368 T(V8M_MAIN
), /* V8-M BASELINE. */
14369 T(V8M_MAIN
) /* V8-M MAINLINE. */
14371 const int v8_1m_mainline
[] =
14383 T(V8_1M_MAIN
), /* V7. */
14384 T(V8_1M_MAIN
), /* V6_M. */
14385 T(V8_1M_MAIN
), /* V6S_M. */
14386 T(V8_1M_MAIN
), /* V7E_M. */
14389 T(V8_1M_MAIN
), /* V8-M BASELINE. */
14390 T(V8_1M_MAIN
), /* V8-M MAINLINE. */
14391 -1, /* Unused (18). */
14392 -1, /* Unused (19). */
14393 -1, /* Unused (20). */
14394 T(V8_1M_MAIN
) /* V8.1-M MAINLINE. */
14398 T(V9
), /* PRE_V4. */
14403 T(V9
), /* V5TEJ. */
14410 T(V9
), /* V6S_M. */
14411 T(V9
), /* V7E_M. */
14414 T(V9
), /* V8-M.BASE. */
14415 T(V9
), /* V8-M.MAIN. */
14419 T(V9
), /* V8.1-M.MAIN. */
14422 const int v4t_plus_v6_m
[] =
14428 T(V5TE
), /* V5TE. */
14429 T(V5TEJ
), /* V5TEJ. */
14431 T(V6KZ
), /* V6KZ. */
14432 T(V6T2
), /* V6T2. */
14435 T(V6_M
), /* V6_M. */
14436 T(V6S_M
), /* V6S_M. */
14437 T(V7E_M
), /* V7E_M. */
14440 T(V8M_BASE
), /* V8-M BASELINE. */
14441 T(V8M_MAIN
), /* V8-M MAINLINE. */
14442 -1, /* Unused (18). */
14443 -1, /* Unused (19). */
14444 -1, /* Unused (20). */
14445 T(V8_1M_MAIN
), /* V8.1-M MAINLINE. */
14447 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14449 const int *comb
[] =
14466 /* Pseudo-architecture. */
14470 /* Check we've not got a higher architecture than we know about. */
14472 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14474 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14478 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14480 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14481 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14482 oldtag
= T(V4T_PLUS_V6_M
);
14484 /* And override the new tag if we have a Tag_also_compatible_with on the
14487 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14488 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14489 newtag
= T(V4T_PLUS_V6_M
);
14491 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14492 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14494 /* Architectures before V6KZ add features monotonically. */
14495 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14498 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14500 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14501 as the canonical version. */
14502 if (result
== T(V4T_PLUS_V6_M
))
14505 *secondary_compat_out
= T(V6_M
);
14508 *secondary_compat_out
= -1;
14512 _bfd_error_handler (_("error: conflicting CPU architectures %s vs %s in %pB"),
14513 name_table
[oldtag
], name_table
[newtag
], ibfd
);
14521 /* Query attributes object to see if integer divide instructions may be
14522 present in an object. */
14524 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14526 int arch
= attr
[Tag_CPU_arch
].i
;
14527 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14529 switch (attr
[Tag_DIV_use
].i
)
14532 /* Integer divide allowed if instruction contained in archetecture. */
14533 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14535 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14541 /* Integer divide explicitly prohibited. */
14545 /* Unrecognised case - treat as allowing divide everywhere. */
14547 /* Integer divide allowed in ARM state. */
14552 /* Query attributes object to see if integer divide instructions are
14553 forbidden to be in the object. This is not the inverse of
14554 elf32_arm_attributes_accept_div. */
14556 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14558 return attr
[Tag_DIV_use
].i
== 1;
14561 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14562 are conflicting attributes. */
14565 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14567 bfd
*obfd
= info
->output_bfd
;
14568 obj_attribute
*in_attr
;
14569 obj_attribute
*out_attr
;
14570 /* Some tags have 0 = don't care, 1 = strong requirement,
14571 2 = weak requirement. */
14572 static const int order_021
[3] = {0, 2, 1};
14574 bool result
= true;
14575 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14577 /* Skip the linker stubs file. This preserves previous behavior
14578 of accepting unknown attributes in the first input file - but
14580 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14583 /* Skip any input that hasn't attribute section.
14584 This enables to link object files without attribute section with
14586 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14589 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14591 /* This is the first object. Copy the attributes. */
14592 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14594 out_attr
= elf_known_obj_attributes_proc (obfd
);
14596 /* Use the Tag_null value to indicate the attributes have been
14600 /* We do not output objects with Tag_MPextension_use_legacy - we move
14601 the attribute's value to Tag_MPextension_use. */
14602 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14604 if (out_attr
[Tag_MPextension_use
].i
!= 0
14605 && out_attr
[Tag_MPextension_use_legacy
].i
14606 != out_attr
[Tag_MPextension_use
].i
)
14609 (_("Error: %pB has both the current and legacy "
14610 "Tag_MPextension_use attributes"), ibfd
);
14614 out_attr
[Tag_MPextension_use
] =
14615 out_attr
[Tag_MPextension_use_legacy
];
14616 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14617 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14620 /* PR 28859 and 28848: Handle the case where the first input file,
14621 eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
14622 Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
14624 FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
14625 if (out_attr
[Tag_ABI_HardFP_use
].i
== 3 && out_attr
[Tag_FP_arch
].i
== 0)
14626 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14631 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14632 out_attr
= elf_known_obj_attributes_proc (obfd
);
14633 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14634 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14636 /* Ignore mismatches if the object doesn't use floating point or is
14637 floating point ABI independent. */
14638 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14639 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14640 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14641 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14642 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14643 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14646 (_("error: %pB uses VFP register arguments, %pB does not"),
14647 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14648 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14653 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14655 /* Merge this attribute with existing attributes. */
14658 case Tag_CPU_raw_name
:
14660 /* These are merged after Tag_CPU_arch. */
14663 case Tag_ABI_optimization_goals
:
14664 case Tag_ABI_FP_optimization_goals
:
14665 /* Use the first value seen. */
14670 int secondary_compat
= -1, secondary_compat_out
= -1;
14671 unsigned int saved_out_attr
= out_attr
[i
].i
;
14673 static const char *name_table
[] =
14675 /* These aren't real CPU names, but we can't guess
14676 that from the architecture version alone. */
14693 "ARM v8-M.baseline",
14694 "ARM v8-M.mainline",
14698 "ARM v8.1-M.mainline",
14702 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14703 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14704 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14705 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14706 &secondary_compat_out
,
14711 /* Return with error if failed to merge. */
14712 if (arch_attr
== -1)
14715 out_attr
[i
].i
= arch_attr
;
14717 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14719 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14720 if (out_attr
[i
].i
== saved_out_attr
)
14721 ; /* Leave the names alone. */
14722 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14724 /* The output architecture has been changed to match the
14725 input architecture. Use the input names. */
14726 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14727 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14729 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14730 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14735 out_attr
[Tag_CPU_name
].s
= NULL
;
14736 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14739 /* If we still don't have a value for Tag_CPU_name,
14740 make one up now. Tag_CPU_raw_name remains blank. */
14741 if (out_attr
[Tag_CPU_name
].s
== NULL
14742 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14743 out_attr
[Tag_CPU_name
].s
=
14744 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14748 case Tag_ARM_ISA_use
:
14749 case Tag_THUMB_ISA_use
:
14750 case Tag_WMMX_arch
:
14751 case Tag_Advanced_SIMD_arch
:
14752 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14753 case Tag_ABI_FP_rounding
:
14754 case Tag_ABI_FP_exceptions
:
14755 case Tag_ABI_FP_user_exceptions
:
14756 case Tag_ABI_FP_number_model
:
14757 case Tag_FP_HP_extension
:
14758 case Tag_CPU_unaligned_access
:
14760 case Tag_MPextension_use
:
14762 case Tag_PAC_extension
:
14763 case Tag_BTI_extension
:
14765 case Tag_PACRET_use
:
14766 /* Use the largest value specified. */
14767 if (in_attr
[i
].i
> out_attr
[i
].i
)
14768 out_attr
[i
].i
= in_attr
[i
].i
;
14771 case Tag_ABI_align_preserved
:
14772 case Tag_ABI_PCS_RO_data
:
14773 /* Use the smallest value specified. */
14774 if (in_attr
[i
].i
< out_attr
[i
].i
)
14775 out_attr
[i
].i
= in_attr
[i
].i
;
14778 case Tag_ABI_align_needed
:
14779 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14780 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14781 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14783 /* This error message should be enabled once all non-conformant
14784 binaries in the toolchain have had the attributes set
14787 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14791 /* Fall through. */
14792 case Tag_ABI_FP_denormal
:
14793 case Tag_ABI_PCS_GOT_use
:
14794 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14795 value if greater than 2 (for future-proofing). */
14796 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14797 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14798 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14799 out_attr
[i
].i
= in_attr
[i
].i
;
14802 case Tag_Virtualization_use
:
14803 /* The virtualization tag effectively stores two bits of
14804 information: the intended use of TrustZone (in bit 0), and the
14805 intended use of Virtualization (in bit 1). */
14806 if (out_attr
[i
].i
== 0)
14807 out_attr
[i
].i
= in_attr
[i
].i
;
14808 else if (in_attr
[i
].i
!= 0
14809 && in_attr
[i
].i
!= out_attr
[i
].i
)
14811 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14816 (_("error: %pB: unable to merge virtualization attributes "
14824 case Tag_CPU_arch_profile
:
14825 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14827 /* 0 will merge with anything.
14828 'A' and 'S' merge to 'A'.
14829 'R' and 'S' merge to 'R'.
14830 'M' and 'A|R|S' is an error. */
14831 if (out_attr
[i
].i
== 0
14832 || (out_attr
[i
].i
== 'S'
14833 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14834 out_attr
[i
].i
= in_attr
[i
].i
;
14835 else if (in_attr
[i
].i
== 0
14836 || (in_attr
[i
].i
== 'S'
14837 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14838 ; /* Do nothing. */
14842 (_("error: %pB: conflicting architecture profiles %c/%c"),
14844 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14845 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14851 case Tag_DSP_extension
:
14852 /* No need to change output value if any of:
14853 - pre (<=) ARMv5T input architecture (do not have DSP)
14854 - M input profile not ARMv7E-M and do not have DSP. */
14855 if (in_attr
[Tag_CPU_arch
].i
<= 3
14856 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14857 && in_attr
[Tag_CPU_arch
].i
!= 13
14858 && in_attr
[i
].i
== 0))
14859 ; /* Do nothing. */
14860 /* Output value should be 0 if DSP part of architecture, ie.
14861 - post (>=) ARMv5te architecture output
14862 - A, R or S profile output or ARMv7E-M output architecture. */
14863 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14864 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14865 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14866 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14867 || out_attr
[Tag_CPU_arch
].i
== 13))
14869 /* Otherwise, DSP instructions are added and not part of output
14877 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14878 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14879 when it's 0. It might mean absence of FP hardware if
14880 Tag_FP_arch is zero. */
14882 #define VFP_VERSION_COUNT 9
14883 static const struct
14887 } vfp_versions
[VFP_VERSION_COUNT
] =
14903 /* If the output has no requirement about FP hardware,
14904 follow the requirement of the input. */
14905 if (out_attr
[i
].i
== 0)
14907 /* This assert is still reasonable, we shouldn't
14908 produce the suspicious build attribute
14909 combination (See below for in_attr). */
14910 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14911 out_attr
[i
].i
= in_attr
[i
].i
;
14912 out_attr
[Tag_ABI_HardFP_use
].i
14913 = in_attr
[Tag_ABI_HardFP_use
].i
;
14916 /* If the input has no requirement about FP hardware, do
14918 else if (in_attr
[i
].i
== 0)
14920 /* We used to assert that Tag_ABI_HardFP_use was
14921 zero here, but we should never assert when
14922 consuming an object file that has suspicious
14923 build attributes. The single precision variant
14924 of 'no FP architecture' is still 'no FP
14925 architecture', so we just ignore the tag in this
14930 /* Both the input and the output have nonzero Tag_FP_arch.
14931 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14933 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14935 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14936 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14938 /* If the input and the output have different Tag_ABI_HardFP_use,
14939 the combination of them is 0 (implied by Tag_FP_arch). */
14940 else if (in_attr
[Tag_ABI_HardFP_use
].i
14941 != out_attr
[Tag_ABI_HardFP_use
].i
)
14942 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14944 /* Now we can handle Tag_FP_arch. */
14946 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14947 pick the biggest. */
14948 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14949 && in_attr
[i
].i
> out_attr
[i
].i
)
14951 out_attr
[i
] = in_attr
[i
];
14954 /* The output uses the superset of input features
14955 (ISA version) and registers. */
14956 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14957 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14958 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14959 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14960 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14961 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14962 /* This assumes all possible supersets are also a valid
14964 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14966 if (regs
== vfp_versions
[newval
].regs
14967 && ver
== vfp_versions
[newval
].ver
)
14970 out_attr
[i
].i
= newval
;
14973 case Tag_PCS_config
:
14974 if (out_attr
[i
].i
== 0)
14975 out_attr
[i
].i
= in_attr
[i
].i
;
14976 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14978 /* It's sometimes ok to mix different configs, so this is only
14981 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14984 case Tag_ABI_PCS_R9_use
:
14985 if (in_attr
[i
].i
!= out_attr
[i
].i
14986 && out_attr
[i
].i
!= AEABI_R9_unused
14987 && in_attr
[i
].i
!= AEABI_R9_unused
)
14990 (_("error: %pB: conflicting use of R9"), ibfd
);
14993 if (out_attr
[i
].i
== AEABI_R9_unused
)
14994 out_attr
[i
].i
= in_attr
[i
].i
;
14996 case Tag_ABI_PCS_RW_data
:
14997 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14998 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14999 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
15002 (_("error: %pB: SB relative addressing conflicts with use of R9"),
15006 /* Use the smallest value specified. */
15007 if (in_attr
[i
].i
< out_attr
[i
].i
)
15008 out_attr
[i
].i
= in_attr
[i
].i
;
15010 case Tag_ABI_PCS_wchar_t
:
15011 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
15012 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
15015 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
15016 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
15018 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
15019 out_attr
[i
].i
= in_attr
[i
].i
;
15021 case Tag_ABI_enum_size
:
15022 if (in_attr
[i
].i
!= AEABI_enum_unused
)
15024 if (out_attr
[i
].i
== AEABI_enum_unused
15025 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
15027 /* The existing object is compatible with anything.
15028 Use whatever requirements the new object has. */
15029 out_attr
[i
].i
= in_attr
[i
].i
;
15031 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
15032 && out_attr
[i
].i
!= in_attr
[i
].i
15033 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
15035 static const char *aeabi_enum_names
[] =
15036 { "", "variable-size", "32-bit", "" };
15037 const char *in_name
=
15038 in_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
15039 ? aeabi_enum_names
[in_attr
[i
].i
]
15041 const char *out_name
=
15042 out_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
15043 ? aeabi_enum_names
[out_attr
[i
].i
]
15046 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
15047 ibfd
, in_name
, out_name
);
15051 case Tag_ABI_VFP_args
:
15054 case Tag_ABI_WMMX_args
:
15055 if (in_attr
[i
].i
!= out_attr
[i
].i
)
15058 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
15063 case Tag_compatibility
:
15064 /* Merged in target-independent code. */
15066 case Tag_ABI_HardFP_use
:
15067 /* This is handled along with Tag_FP_arch. */
15069 case Tag_ABI_FP_16bit_format
:
15070 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
15072 if (in_attr
[i
].i
!= out_attr
[i
].i
)
15075 (_("error: fp16 format mismatch between %pB and %pB"),
15080 if (in_attr
[i
].i
!= 0)
15081 out_attr
[i
].i
= in_attr
[i
].i
;
15085 /* A value of zero on input means that the divide instruction may
15086 be used if available in the base architecture as specified via
15087 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15088 the user did not want divide instructions. A value of 2
15089 explicitly means that divide instructions were allowed in ARM
15090 and Thumb state. */
15091 if (in_attr
[i
].i
== out_attr
[i
].i
)
15092 /* Do nothing. */ ;
15093 else if (elf32_arm_attributes_forbid_div (in_attr
)
15094 && !elf32_arm_attributes_accept_div (out_attr
))
15096 else if (elf32_arm_attributes_forbid_div (out_attr
)
15097 && elf32_arm_attributes_accept_div (in_attr
))
15098 out_attr
[i
].i
= in_attr
[i
].i
;
15099 else if (in_attr
[i
].i
== 2)
15100 out_attr
[i
].i
= in_attr
[i
].i
;
15103 case Tag_MPextension_use_legacy
:
15104 /* We don't output objects with Tag_MPextension_use_legacy - we
15105 move the value to Tag_MPextension_use. */
15106 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
15108 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
15111 (_("%pB has both the current and legacy "
15112 "Tag_MPextension_use attributes"),
15118 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
15119 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
15123 case Tag_nodefaults
:
15124 /* This tag is set if it exists, but the value is unused (and is
15125 typically zero). We don't actually need to do anything here -
15126 the merge happens automatically when the type flags are merged
15129 case Tag_also_compatible_with
:
15130 /* Already done in Tag_CPU_arch. */
15132 case Tag_conformance
:
15133 /* Keep the attribute if it matches. Throw it away otherwise.
15134 No attribute means no claim to conform. */
15135 if (!in_attr
[i
].s
|| !out_attr
[i
].s
15136 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
15137 out_attr
[i
].s
= NULL
;
15142 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
15145 /* If out_attr was copied from in_attr then it won't have a type yet. */
15146 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
15147 out_attr
[i
].type
= in_attr
[i
].type
;
15150 /* Merge Tag_compatibility attributes and any common GNU ones. */
15151 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
15154 /* Check for any attributes not known on ARM. */
15155 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
15161 /* Return TRUE if the two EABI versions are incompatible. */
15164 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
15166 /* v4 and v5 are the same spec before and after it was released,
15167 so allow mixing them. */
15168 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
15169 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
15172 return (iver
== over
);
15175 /* Merge backend specific data from an object file to the output
15176 object file when linking. */
15179 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
15181 /* Display the flags field. */
15184 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
15186 FILE * file
= (FILE *) ptr
;
15187 unsigned long flags
;
15189 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
15191 /* Print normal ELF private data. */
15192 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
15194 flags
= elf_elfheader (abfd
)->e_flags
;
15195 /* Ignore init flag - it may not be set, despite the flags field
15196 containing valid data. */
15198 fprintf (file
, _("private flags = 0x%lx:"), elf_elfheader (abfd
)->e_flags
);
15200 switch (EF_ARM_EABI_VERSION (flags
))
15202 case EF_ARM_EABI_UNKNOWN
:
15203 /* The following flag bits are GNU extensions and not part of the
15204 official ARM ELF extended ABI. Hence they are only decoded if
15205 the EABI version is not set. */
15206 if (flags
& EF_ARM_INTERWORK
)
15207 fprintf (file
, _(" [interworking enabled]"));
15209 if (flags
& EF_ARM_APCS_26
)
15210 fprintf (file
, " [APCS-26]");
15212 fprintf (file
, " [APCS-32]");
15214 if (flags
& EF_ARM_VFP_FLOAT
)
15215 fprintf (file
, _(" [VFP float format]"));
15217 fprintf (file
, _(" [FPA float format]"));
15219 if (flags
& EF_ARM_APCS_FLOAT
)
15220 fprintf (file
, _(" [floats passed in float registers]"));
15222 if (flags
& EF_ARM_PIC
)
15223 fprintf (file
, _(" [position independent]"));
15225 if (flags
& EF_ARM_NEW_ABI
)
15226 fprintf (file
, _(" [new ABI]"));
15228 if (flags
& EF_ARM_OLD_ABI
)
15229 fprintf (file
, _(" [old ABI]"));
15231 if (flags
& EF_ARM_SOFT_FLOAT
)
15232 fprintf (file
, _(" [software FP]"));
15234 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
15235 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
15236 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
);
15239 case EF_ARM_EABI_VER1
:
15240 fprintf (file
, _(" [Version1 EABI]"));
15242 if (flags
& EF_ARM_SYMSARESORTED
)
15243 fprintf (file
, _(" [sorted symbol table]"));
15245 fprintf (file
, _(" [unsorted symbol table]"));
15247 flags
&= ~ EF_ARM_SYMSARESORTED
;
15250 case EF_ARM_EABI_VER2
:
15251 fprintf (file
, _(" [Version2 EABI]"));
15253 if (flags
& EF_ARM_SYMSARESORTED
)
15254 fprintf (file
, _(" [sorted symbol table]"));
15256 fprintf (file
, _(" [unsorted symbol table]"));
15258 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
15259 fprintf (file
, _(" [dynamic symbols use segment index]"));
15261 if (flags
& EF_ARM_MAPSYMSFIRST
)
15262 fprintf (file
, _(" [mapping symbols precede others]"));
15264 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
15265 | EF_ARM_MAPSYMSFIRST
);
15268 case EF_ARM_EABI_VER3
:
15269 fprintf (file
, _(" [Version3 EABI]"));
15272 case EF_ARM_EABI_VER4
:
15273 fprintf (file
, _(" [Version4 EABI]"));
15276 case EF_ARM_EABI_VER5
:
15277 fprintf (file
, _(" [Version5 EABI]"));
15279 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
15280 fprintf (file
, _(" [soft-float ABI]"));
15282 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
15283 fprintf (file
, _(" [hard-float ABI]"));
15285 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
15288 if (flags
& EF_ARM_BE8
)
15289 fprintf (file
, _(" [BE8]"));
15291 if (flags
& EF_ARM_LE8
)
15292 fprintf (file
, _(" [LE8]"));
15294 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
15298 fprintf (file
, _(" <EABI version unrecognised>"));
15302 flags
&= ~ EF_ARM_EABIMASK
;
15304 if (flags
& EF_ARM_RELEXEC
)
15305 fprintf (file
, _(" [relocatable executable]"));
15307 if (flags
& EF_ARM_PIC
)
15308 fprintf (file
, _(" [position independent]"));
15310 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
15311 fprintf (file
, _(" [FDPIC ABI supplement]"));
15313 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
15316 fprintf (file
, _(" <Unrecognised flag bits set>"));
15318 fputc ('\n', file
);
15324 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
15326 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15328 case STT_ARM_TFUNC
:
15329 return ELF_ST_TYPE (elf_sym
->st_info
);
15331 case STT_ARM_16BIT
:
15332 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15333 This allows us to distinguish between data used by Thumb instructions
15334 and non-data (which is probably code) inside Thumb regions of an
15336 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15337 return ELF_ST_TYPE (elf_sym
->st_info
);
15348 elf32_arm_gc_mark_hook (asection
*sec
,
15349 struct bfd_link_info
*info
,
15350 Elf_Internal_Rela
*rel
,
15351 struct elf_link_hash_entry
*h
,
15352 Elf_Internal_Sym
*sym
)
15355 switch (ELF32_R_TYPE (rel
->r_info
))
15357 case R_ARM_GNU_VTINHERIT
:
15358 case R_ARM_GNU_VTENTRY
:
15362 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15365 /* Look through the relocs for a section during the first phase. */
15368 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15369 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15371 Elf_Internal_Shdr
*symtab_hdr
;
15372 struct elf_link_hash_entry
**sym_hashes
;
15373 const Elf_Internal_Rela
*rel
;
15374 const Elf_Internal_Rela
*rel_end
;
15377 struct elf32_arm_link_hash_table
*htab
;
15379 bool may_become_dynamic_p
;
15380 bool may_need_local_target_p
;
15381 unsigned long nsyms
;
15383 if (bfd_link_relocatable (info
))
15386 BFD_ASSERT (is_arm_elf (abfd
));
15388 htab
= elf32_arm_hash_table (info
);
15394 if (htab
->root
.dynobj
== NULL
)
15395 htab
->root
.dynobj
= abfd
;
15396 if (!create_ifunc_sections (info
))
15399 dynobj
= htab
->root
.dynobj
;
15401 symtab_hdr
= & elf_symtab_hdr (abfd
);
15402 sym_hashes
= elf_sym_hashes (abfd
);
15403 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15405 rel_end
= relocs
+ sec
->reloc_count
;
15406 for (rel
= relocs
; rel
< rel_end
; rel
++)
15408 Elf_Internal_Sym
*isym
;
15409 struct elf_link_hash_entry
*h
;
15410 struct elf32_arm_link_hash_entry
*eh
;
15411 unsigned int r_symndx
;
15414 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15415 r_type
= ELF32_R_TYPE (rel
->r_info
);
15416 r_type
= arm_real_reloc_type (htab
, r_type
);
15418 if (r_symndx
>= nsyms
15419 /* PR 9934: It is possible to have relocations that do not
15420 refer to symbols, thus it is also possible to have an
15421 object file containing relocations but no symbol table. */
15422 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15424 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15433 if (r_symndx
< symtab_hdr
->sh_info
)
15435 /* A local symbol. */
15436 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
,
15443 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15444 while (h
->root
.type
== bfd_link_hash_indirect
15445 || h
->root
.type
== bfd_link_hash_warning
)
15446 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15450 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15452 call_reloc_p
= false;
15453 may_become_dynamic_p
= false;
15454 may_need_local_target_p
= false;
15456 /* Could be done earlier, if h were already available. */
15457 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15460 case R_ARM_GOTOFFFUNCDESC
:
15464 if (!elf32_arm_allocate_local_sym_info (abfd
))
15466 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15468 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].gotofffuncdesc_cnt
+= 1;
15469 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15473 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15478 case R_ARM_GOTFUNCDESC
:
15482 /* Such a relocation is not supposed to be generated
15483 by gcc on a static function. */
15484 /* Anyway if needed it could be handled. */
15489 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15494 case R_ARM_FUNCDESC
:
15498 if (!elf32_arm_allocate_local_sym_info (abfd
))
15500 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15502 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_cnt
+= 1;
15503 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15507 eh
->fdpic_cnts
.funcdesc_cnt
++;
15513 case R_ARM_GOT_PREL
:
15514 case R_ARM_TLS_GD32
:
15515 case R_ARM_TLS_GD32_FDPIC
:
15516 case R_ARM_TLS_IE32
:
15517 case R_ARM_TLS_IE32_FDPIC
:
15518 case R_ARM_TLS_GOTDESC
:
15519 case R_ARM_TLS_DESCSEQ
:
15520 case R_ARM_THM_TLS_DESCSEQ
:
15521 case R_ARM_TLS_CALL
:
15522 case R_ARM_THM_TLS_CALL
:
15523 /* This symbol requires a global offset table entry. */
15525 int tls_type
, old_tls_type
;
15529 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15530 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15532 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15533 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15535 case R_ARM_TLS_GOTDESC
:
15536 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15537 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15538 tls_type
= GOT_TLS_GDESC
; break;
15540 default: tls_type
= GOT_NORMAL
; break;
15543 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15544 info
->flags
|= DF_STATIC_TLS
;
15549 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15553 /* This is a global offset table entry for a local symbol. */
15554 if (!elf32_arm_allocate_local_sym_info (abfd
))
15556 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15558 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15563 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15564 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15567 /* If a variable is accessed with both tls methods, two
15568 slots may be created. */
15569 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15570 && GOT_TLS_GD_ANY_P (tls_type
))
15571 tls_type
|= old_tls_type
;
15573 /* We will already have issued an error message if there
15574 is a TLS/non-TLS mismatch, based on the symbol
15575 type. So just combine any TLS types needed. */
15576 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15577 && tls_type
!= GOT_NORMAL
)
15578 tls_type
|= old_tls_type
;
15580 /* If the symbol is accessed in both IE and GDESC
15581 method, we're able to relax. Turn off the GDESC flag,
15582 without messing up with any other kind of tls types
15583 that may be involved. */
15584 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15585 tls_type
&= ~GOT_TLS_GDESC
;
15587 if (old_tls_type
!= tls_type
)
15590 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15592 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15595 /* Fall through. */
15597 case R_ARM_TLS_LDM32
:
15598 case R_ARM_TLS_LDM32_FDPIC
:
15599 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15600 htab
->tls_ldm_got
.refcount
++;
15601 /* Fall through. */
15603 case R_ARM_GOTOFF32
:
15605 if (htab
->root
.sgot
== NULL
15606 && !create_got_section (htab
->root
.dynobj
, info
))
15615 case R_ARM_THM_CALL
:
15616 case R_ARM_THM_JUMP24
:
15617 case R_ARM_THM_JUMP19
:
15618 call_reloc_p
= true;
15619 may_need_local_target_p
= true;
15623 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15624 ldr __GOTT_INDEX__ offsets. */
15625 if (htab
->root
.target_os
!= is_vxworks
)
15627 may_need_local_target_p
= true;
15630 else goto jump_over
;
15632 /* Fall through. */
15634 case R_ARM_MOVW_ABS_NC
:
15635 case R_ARM_MOVT_ABS
:
15636 case R_ARM_THM_MOVW_ABS_NC
:
15637 case R_ARM_THM_MOVT_ABS
:
15638 if (bfd_link_pic (info
))
15641 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15642 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15643 (h
) ? h
->root
.root
.string
: "a local symbol");
15644 bfd_set_error (bfd_error_bad_value
);
15648 /* Fall through. */
15650 case R_ARM_ABS32_NOI
:
15652 if (h
!= NULL
&& bfd_link_executable (info
))
15654 h
->pointer_equality_needed
= 1;
15656 /* Fall through. */
15658 case R_ARM_REL32_NOI
:
15659 case R_ARM_MOVW_PREL_NC
:
15660 case R_ARM_MOVT_PREL
:
15661 case R_ARM_THM_MOVW_PREL_NC
:
15662 case R_ARM_THM_MOVT_PREL
:
15664 /* Should the interworking branches be listed here? */
15665 if ((bfd_link_pic (info
)
15667 && (sec
->flags
& SEC_ALLOC
) != 0)
15670 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15672 /* In shared libraries and relocatable executables,
15673 we treat local relative references as calls;
15674 see the related SYMBOL_CALLS_LOCAL code in
15675 allocate_dynrelocs. */
15676 call_reloc_p
= true;
15677 may_need_local_target_p
= true;
15680 /* We are creating a shared library or relocatable
15681 executable, and this is a reloc against a global symbol,
15682 or a non-PC-relative reloc against a local symbol.
15683 We may need to copy the reloc into the output. */
15684 may_become_dynamic_p
= true;
15687 may_need_local_target_p
= true;
15690 /* This relocation describes the C++ object vtable hierarchy.
15691 Reconstruct it for later use during GC. */
15692 case R_ARM_GNU_VTINHERIT
:
15693 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15697 /* This relocation describes which C++ vtable entries are actually
15698 used. Record for later use during GC. */
15699 case R_ARM_GNU_VTENTRY
:
15700 if (!bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15708 /* We may need a .plt entry if the function this reloc
15709 refers to is in a different object, regardless of the
15710 symbol's type. We can't tell for sure yet, because
15711 something later might force the symbol local. */
15713 else if (may_need_local_target_p
)
15714 /* If this reloc is in a read-only section, we might
15715 need a copy reloc. We can't check reliably at this
15716 stage whether the section is read-only, as input
15717 sections have not yet been mapped to output sections.
15718 Tentatively set the flag for now, and correct in
15719 adjust_dynamic_symbol. */
15720 h
->non_got_ref
= 1;
15723 if (may_need_local_target_p
15724 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15726 union gotplt_union
*root_plt
;
15727 struct arm_plt_info
*arm_plt
;
15728 struct arm_local_iplt_info
*local_iplt
;
15732 root_plt
= &h
->plt
;
15733 arm_plt
= &eh
->plt
;
15737 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15738 if (local_iplt
== NULL
)
15740 root_plt
= &local_iplt
->root
;
15741 arm_plt
= &local_iplt
->arm
;
15744 /* If the symbol is a function that doesn't bind locally,
15745 this relocation will need a PLT entry. */
15746 if (root_plt
->refcount
!= -1)
15747 root_plt
->refcount
+= 1;
15750 arm_plt
->noncall_refcount
++;
15752 /* It's too early to use htab->use_blx here, so we have to
15753 record possible blx references separately from
15754 relocs that definitely need a thumb stub. */
15756 if (r_type
== R_ARM_THM_CALL
)
15757 arm_plt
->maybe_thumb_refcount
+= 1;
15759 if (r_type
== R_ARM_THM_JUMP24
15760 || r_type
== R_ARM_THM_JUMP19
)
15761 arm_plt
->thumb_refcount
+= 1;
15764 if (may_become_dynamic_p
)
15766 struct elf_dyn_relocs
*p
, **head
;
15768 /* Create a reloc section in dynobj. */
15769 if (sreloc
== NULL
)
15771 sreloc
= _bfd_elf_make_dynamic_reloc_section
15772 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15774 if (sreloc
== NULL
)
15778 /* If this is a global symbol, count the number of
15779 relocations we need for this symbol. */
15781 head
= &h
->dyn_relocs
;
15784 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15790 if (p
== NULL
|| p
->sec
!= sec
)
15792 size_t amt
= sizeof *p
;
15794 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15804 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15807 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic (info
)
15808 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
)
15810 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15811 that will become rofixup. */
15812 /* This is due to the fact that we suppose all will become rofixup. */
15814 (_("FDPIC does not yet support %s relocation"
15815 " to become dynamic for executable"),
15816 elf32_arm_howto_table_1
[r_type
].name
);
15826 elf32_arm_update_relocs (asection
*o
,
15827 struct bfd_elf_section_reloc_data
*reldata
)
15829 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15830 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15831 const struct elf_backend_data
*bed
;
15832 _arm_elf_section_data
*eado
;
15833 struct bfd_link_order
*p
;
15834 bfd_byte
*erela_head
, *erela
;
15835 Elf_Internal_Rela
*irela_head
, *irela
;
15836 Elf_Internal_Shdr
*rel_hdr
;
15838 unsigned int count
;
15840 eado
= get_arm_elf_section_data (o
);
15842 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15846 bed
= get_elf_backend_data (abfd
);
15847 rel_hdr
= reldata
->hdr
;
15849 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15851 swap_in
= bed
->s
->swap_reloc_in
;
15852 swap_out
= bed
->s
->swap_reloc_out
;
15854 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15856 swap_in
= bed
->s
->swap_reloca_in
;
15857 swap_out
= bed
->s
->swap_reloca_out
;
15862 erela_head
= rel_hdr
->contents
;
15863 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15864 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15866 erela
= erela_head
;
15867 irela
= irela_head
;
15870 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15872 if (p
->type
== bfd_section_reloc_link_order
15873 || p
->type
== bfd_symbol_reloc_link_order
)
15875 (*swap_in
) (abfd
, erela
, irela
);
15876 erela
+= rel_hdr
->sh_entsize
;
15880 else if (p
->type
== bfd_indirect_link_order
)
15882 struct bfd_elf_section_reloc_data
*input_reldata
;
15883 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15884 _arm_elf_section_data
*eadi
;
15889 i
= p
->u
.indirect
.section
;
15891 eadi
= get_arm_elf_section_data (i
);
15892 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15893 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15894 offset
= i
->output_offset
;
15896 if (eadi
->elf
.rel
.hdr
&&
15897 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15898 input_reldata
= &eadi
->elf
.rel
;
15899 else if (eadi
->elf
.rela
.hdr
&&
15900 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15901 input_reldata
= &eadi
->elf
.rela
;
15907 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15909 arm_unwind_table_edit
*edit_node
, *edit_next
;
15911 bfd_vma reloc_index
;
15913 (*swap_in
) (abfd
, erela
, irela
);
15914 reloc_index
= (irela
->r_offset
- offset
) / 8;
15917 edit_node
= edit_list
;
15918 for (edit_next
= edit_list
;
15919 edit_next
&& edit_next
->index
<= reloc_index
;
15920 edit_next
= edit_node
->next
)
15923 edit_node
= edit_next
;
15926 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15927 || edit_node
->index
!= reloc_index
)
15929 irela
->r_offset
-= bias
* 8;
15934 erela
+= rel_hdr
->sh_entsize
;
15937 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15939 /* New relocation entity. */
15940 asection
*text_sec
= edit_tail
->linked_section
;
15941 asection
*text_out
= text_sec
->output_section
;
15942 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15944 irela
->r_addend
= 0;
15945 irela
->r_offset
= exidx_offset
;
15946 irela
->r_info
= ELF32_R_INFO
15947 (text_out
->target_index
, R_ARM_PREL31
);
15954 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15956 (*swap_in
) (abfd
, erela
, irela
);
15957 erela
+= rel_hdr
->sh_entsize
;
15961 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15966 reldata
->count
= count
;
15967 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15969 erela
= erela_head
;
15970 irela
= irela_head
;
15973 (*swap_out
) (abfd
, irela
, erela
);
15974 erela
+= rel_hdr
->sh_entsize
;
15981 /* Hashes are no longer valid. */
15982 free (reldata
->hashes
);
15983 reldata
->hashes
= NULL
;
15986 /* Unwinding tables are not referenced directly. This pass marks them as
15987 required if the corresponding code section is marked. Similarly, ARMv8-M
15988 secure entry functions can only be referenced by SG veneers which are
15989 created after the GC process. They need to be marked in case they reside in
15990 their own section (as would be the case if code was compiled with
15991 -ffunction-sections). */
15994 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15995 elf_gc_mark_hook_fn gc_mark_hook
)
15998 Elf_Internal_Shdr
**elf_shdrp
;
15999 asection
*cmse_sec
;
16000 obj_attribute
*out_attr
;
16001 Elf_Internal_Shdr
*symtab_hdr
;
16002 unsigned i
, sym_count
, ext_start
;
16003 const struct elf_backend_data
*bed
;
16004 struct elf_link_hash_entry
**sym_hashes
;
16005 struct elf32_arm_link_hash_entry
*cmse_hash
;
16006 bool again
, is_v8m
, first_bfd_browse
= true;
16007 bool extra_marks_added
= false;
16010 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
16012 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
16013 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
16014 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
16016 /* Marking EH data may cause additional code sections to be marked,
16017 requiring multiple passes. */
16022 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
16026 if (! is_arm_elf (sub
))
16029 elf_shdrp
= elf_elfsections (sub
);
16030 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
16032 Elf_Internal_Shdr
*hdr
;
16034 hdr
= &elf_section_data (o
)->this_hdr
;
16035 if (hdr
->sh_type
== SHT_ARM_EXIDX
16037 && hdr
->sh_link
< elf_numsections (sub
)
16039 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
16042 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
16047 /* Mark section holding ARMv8-M secure entry functions. We mark all
16048 of them so no need for a second browsing. */
16049 if (is_v8m
&& first_bfd_browse
)
16051 bool debug_sec_need_to_be_marked
= false;
16053 sym_hashes
= elf_sym_hashes (sub
);
16054 bed
= get_elf_backend_data (sub
);
16055 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
16056 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
16057 ext_start
= symtab_hdr
->sh_info
;
16059 /* Scan symbols. */
16060 for (i
= ext_start
; i
< sym_count
; i
++)
16062 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
16063 if (cmse_hash
== NULL
)
16066 /* Assume it is a special symbol. If not, cmse_scan will
16067 warn about it and user can do something about it. */
16068 if (startswith (cmse_hash
->root
.root
.root
.string
,
16071 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
16072 if (!cmse_sec
->gc_mark
16073 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
16075 /* The debug sections related to these secure entry
16076 functions are marked on enabling below flag. */
16077 debug_sec_need_to_be_marked
= true;
16081 if (debug_sec_need_to_be_marked
)
16083 /* Looping over all the sections of the object file containing
16084 Armv8-M secure entry functions and marking all the debug
16086 for (isec
= sub
->sections
; isec
!= NULL
; isec
= isec
->next
)
16088 /* If not a debug sections, skip it. */
16089 if (!isec
->gc_mark
&& (isec
->flags
& SEC_DEBUGGING
))
16092 extra_marks_added
= true;
16095 debug_sec_need_to_be_marked
= false;
16100 first_bfd_browse
= false;
16103 /* PR 30354: If we have added extra marks then make sure that any
16104 dependencies of the newly marked sections are also marked. */
16105 if (extra_marks_added
)
16106 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
16111 /* Treat mapping symbols as special target symbols. */
16114 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
16116 return bfd_is_arm_special_symbol_name (sym
->name
,
16117 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
16120 /* If the ELF symbol SYM might be a function in SEC, return the
16121 function size and set *CODE_OFF to the function's entry point,
16122 otherwise return zero. */
16124 static bfd_size_type
16125 elf32_arm_maybe_function_sym (const asymbol
*sym
, asection
*sec
,
16128 bfd_size_type size
;
16129 elf_symbol_type
* elf_sym
= (elf_symbol_type
*) sym
;
16131 if ((sym
->flags
& (BSF_SECTION_SYM
| BSF_FILE
| BSF_OBJECT
16132 | BSF_THREAD_LOCAL
| BSF_RELC
| BSF_SRELC
)) != 0
16133 || sym
->section
!= sec
)
16136 size
= (sym
->flags
& BSF_SYNTHETIC
) ? 0 : elf_sym
->internal_elf_sym
.st_size
;
16138 if (!(sym
->flags
& BSF_SYNTHETIC
))
16139 switch (ELF_ST_TYPE (elf_sym
->internal_elf_sym
.st_info
))
16142 /* Ignore symbols created by the annobin plugin for gcc and clang.
16143 These symbols are hidden, local, notype and have a size of 0. */
16145 && sym
->flags
& BSF_LOCAL
16146 && ELF_ST_VISIBILITY (elf_sym
->internal_elf_sym
.st_other
) == STV_HIDDEN
)
16148 /* Fall through. */
16150 case STT_ARM_TFUNC
:
16151 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16157 if ((sym
->flags
& BSF_LOCAL
)
16158 && bfd_is_arm_special_symbol_name (sym
->name
,
16159 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
16162 *code_off
= sym
->value
;
16164 /* Do not return 0 for the function's size. */
16165 return size
? size
: 1;
16170 elf32_arm_find_inliner_info (bfd
* abfd
,
16171 const char ** filename_ptr
,
16172 const char ** functionname_ptr
,
16173 unsigned int * line_ptr
)
16176 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
16177 functionname_ptr
, line_ptr
,
16178 & elf_tdata (abfd
)->dwarf2_find_line_info
);
16182 /* Adjust a symbol defined by a dynamic object and referenced by a
16183 regular object. The current definition is in some section of the
16184 dynamic object, but we're not including those sections. We have to
16185 change the definition to something the rest of the link can
16189 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
16190 struct elf_link_hash_entry
* h
)
16193 asection
*s
, *srel
;
16194 struct elf32_arm_link_hash_entry
* eh
;
16195 struct elf32_arm_link_hash_table
*globals
;
16197 globals
= elf32_arm_hash_table (info
);
16198 if (globals
== NULL
)
16201 dynobj
= elf_hash_table (info
)->dynobj
;
16203 /* Make sure we know what is going on here. */
16204 BFD_ASSERT (dynobj
!= NULL
16206 || h
->type
== STT_GNU_IFUNC
16210 && !h
->def_regular
)));
16212 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16214 /* If this is a function, put it in the procedure linkage table. We
16215 will fill in the contents of the procedure linkage table later,
16216 when we know the address of the .got section. */
16217 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
16219 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16220 symbol binds locally. */
16221 if (h
->plt
.refcount
<= 0
16222 || (h
->type
!= STT_GNU_IFUNC
16223 && (SYMBOL_CALLS_LOCAL (info
, h
)
16224 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16225 && h
->root
.type
== bfd_link_hash_undefweak
))))
16227 /* This case can occur if we saw a PLT32 reloc in an input
16228 file, but the symbol was never referred to by a dynamic
16229 object, or if all references were garbage collected. In
16230 such a case, we don't actually need to build a procedure
16231 linkage table, and we can just do a PC24 reloc instead. */
16232 h
->plt
.offset
= (bfd_vma
) -1;
16233 eh
->plt
.thumb_refcount
= 0;
16234 eh
->plt
.maybe_thumb_refcount
= 0;
16235 eh
->plt
.noncall_refcount
= 0;
16243 /* It's possible that we incorrectly decided a .plt reloc was
16244 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16245 in check_relocs. We can't decide accurately between function
16246 and non-function syms in check-relocs; Objects loaded later in
16247 the link may change h->type. So fix it now. */
16248 h
->plt
.offset
= (bfd_vma
) -1;
16249 eh
->plt
.thumb_refcount
= 0;
16250 eh
->plt
.maybe_thumb_refcount
= 0;
16251 eh
->plt
.noncall_refcount
= 0;
16254 /* If this is a weak symbol, and there is a real definition, the
16255 processor independent code will have arranged for us to see the
16256 real definition first, and we can just use the same value. */
16257 if (h
->is_weakalias
)
16259 struct elf_link_hash_entry
*def
= weakdef (h
);
16260 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
16261 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
16262 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
16266 /* If there are no non-GOT references, we do not need a copy
16268 if (!h
->non_got_ref
)
16271 /* This is a reference to a symbol defined by a dynamic object which
16272 is not a function. */
16274 /* If we are creating a shared library, we must presume that the
16275 only references to the symbol are via the global offset table.
16276 For such cases we need not do anything here; the relocations will
16277 be handled correctly by relocate_section. */
16278 if (bfd_link_pic (info
))
16281 /* We must allocate the symbol in our .dynbss section, which will
16282 become part of the .bss section of the executable. There will be
16283 an entry for this symbol in the .dynsym section. The dynamic
16284 object will contain position independent code, so all references
16285 from the dynamic object to this symbol will go through the global
16286 offset table. The dynamic linker will use the .dynsym entry to
16287 determine the address it must put in the global offset table, so
16288 both the dynamic object and the regular object will refer to the
16289 same memory location for the variable. */
16290 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16291 linker to copy the initial value out of the dynamic object and into
16292 the runtime process image. We need to remember the offset into the
16293 .rel(a).bss section we are going to use. */
16294 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16296 s
= globals
->root
.sdynrelro
;
16297 srel
= globals
->root
.sreldynrelro
;
16301 s
= globals
->root
.sdynbss
;
16302 srel
= globals
->root
.srelbss
;
16304 if (info
->nocopyreloc
== 0
16305 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16308 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16312 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16315 /* Allocate space in .plt, .got and associated reloc sections for
16319 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16321 struct bfd_link_info
*info
;
16322 struct elf32_arm_link_hash_table
*htab
;
16323 struct elf32_arm_link_hash_entry
*eh
;
16324 struct elf_dyn_relocs
*p
;
16326 if (h
->root
.type
== bfd_link_hash_indirect
)
16329 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16331 info
= (struct bfd_link_info
*) inf
;
16332 htab
= elf32_arm_hash_table (info
);
16336 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16337 && h
->plt
.refcount
> 0)
16339 /* Make sure this symbol is output as a dynamic symbol.
16340 Undefined weak syms won't yet be marked as dynamic. */
16341 if (h
->dynindx
== -1 && !h
->forced_local
16342 && h
->root
.type
== bfd_link_hash_undefweak
)
16344 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16348 /* If the call in the PLT entry binds locally, the associated
16349 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16350 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16351 than the .plt section. */
16352 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16355 if (eh
->plt
.noncall_refcount
== 0
16356 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16357 /* All non-call references can be resolved directly.
16358 This means that they can (and in some cases, must)
16359 resolve directly to the run-time target, rather than
16360 to the PLT. That in turns means that any .got entry
16361 would be equal to the .igot.plt entry, so there's
16362 no point having both. */
16363 h
->got
.refcount
= 0;
16366 if (bfd_link_pic (info
)
16368 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16370 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16372 /* If this symbol is not defined in a regular file, and we are
16373 not generating a shared library, then set the symbol to this
16374 location in the .plt. This is required to make function
16375 pointers compare as equal between the normal executable and
16376 the shared library. */
16377 if (! bfd_link_pic (info
)
16378 && !h
->def_regular
)
16380 h
->root
.u
.def
.section
= htab
->root
.splt
;
16381 h
->root
.u
.def
.value
= h
->plt
.offset
;
16383 /* Make sure the function is not marked as Thumb, in case
16384 it is the target of an ABS32 relocation, which will
16385 point to the PLT entry. */
16386 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16389 /* VxWorks executables have a second set of relocations for
16390 each PLT entry. They go in a separate relocation section,
16391 which is processed by the kernel loader. */
16392 if (htab
->root
.target_os
== is_vxworks
&& !bfd_link_pic (info
))
16394 /* There is a relocation for the initial PLT entry:
16395 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16396 if (h
->plt
.offset
== htab
->plt_header_size
)
16397 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16399 /* There are two extra relocations for each subsequent
16400 PLT entry: an R_ARM_32 relocation for the GOT entry,
16401 and an R_ARM_32 relocation for the PLT entry. */
16402 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16407 h
->plt
.offset
= (bfd_vma
) -1;
16413 h
->plt
.offset
= (bfd_vma
) -1;
16417 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16418 eh
->tlsdesc_got
= (bfd_vma
) -1;
16420 if (h
->got
.refcount
> 0)
16424 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16427 /* Make sure this symbol is output as a dynamic symbol.
16428 Undefined weak syms won't yet be marked as dynamic. */
16429 if (htab
->root
.dynamic_sections_created
16430 && h
->dynindx
== -1
16431 && !h
->forced_local
16432 && h
->root
.type
== bfd_link_hash_undefweak
)
16434 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16438 s
= htab
->root
.sgot
;
16439 h
->got
.offset
= s
->size
;
16441 if (tls_type
== GOT_UNKNOWN
)
16444 if (tls_type
== GOT_NORMAL
)
16445 /* Non-TLS symbols need one GOT slot. */
16449 if (tls_type
& GOT_TLS_GDESC
)
16451 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16453 = (htab
->root
.sgotplt
->size
16454 - elf32_arm_compute_jump_table_size (htab
));
16455 htab
->root
.sgotplt
->size
+= 8;
16456 h
->got
.offset
= (bfd_vma
) -2;
16457 /* plt.got_offset needs to know there's a TLS_DESC
16458 reloc in the middle of .got.plt. */
16459 htab
->num_tls_desc
++;
16462 if (tls_type
& GOT_TLS_GD
)
16464 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16465 consecutive GOT slots. If the symbol is both GD
16466 and GDESC, got.offset may have been
16468 h
->got
.offset
= s
->size
;
16472 if (tls_type
& GOT_TLS_IE
)
16473 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16478 dyn
= htab
->root
.dynamic_sections_created
;
16481 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, bfd_link_pic (info
), h
)
16482 && (!bfd_link_pic (info
)
16483 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16486 if (tls_type
!= GOT_NORMAL
16487 && (bfd_link_dll (info
) || indx
!= 0)
16488 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16489 || h
->root
.type
!= bfd_link_hash_undefweak
))
16491 if (tls_type
& GOT_TLS_IE
)
16492 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16494 if (tls_type
& GOT_TLS_GD
)
16495 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16497 if (tls_type
& GOT_TLS_GDESC
)
16499 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16500 /* GDESC needs a trampoline to jump to. */
16501 htab
->tls_trampoline
= -1;
16504 /* Only GD needs it. GDESC just emits one relocation per
16506 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16507 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16509 else if (((indx
!= -1) || htab
->fdpic_p
)
16510 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16512 if (htab
->root
.dynamic_sections_created
)
16513 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16514 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16516 else if (h
->type
== STT_GNU_IFUNC
16517 && eh
->plt
.noncall_refcount
== 0)
16518 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16519 they all resolve dynamically instead. Reserve room for the
16520 GOT entry's R_ARM_IRELATIVE relocation. */
16521 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16522 else if (bfd_link_pic (info
)
16523 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16524 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16525 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16526 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16527 /* Reserve room for rofixup for FDPIC executable. */
16528 /* TLS relocs do not need space since they are completely
16530 htab
->srofixup
->size
+= 4;
16533 h
->got
.offset
= (bfd_vma
) -1;
16535 /* FDPIC support. */
16536 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16538 /* Symbol musn't be exported. */
16539 if (h
->dynindx
!= -1)
16542 /* We only allocate one function descriptor with its associated
16544 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16546 asection
*s
= htab
->root
.sgot
;
16548 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16550 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16551 if (bfd_link_pic (info
))
16552 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16554 htab
->srofixup
->size
+= 8;
16558 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16560 asection
*s
= htab
->root
.sgot
;
16562 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16563 && !h
->forced_local
)
16564 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16567 if (h
->dynindx
== -1)
16569 /* We only allocate one function descriptor with its
16570 associated relocation. */
16571 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16574 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16576 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16578 if (bfd_link_pic (info
))
16579 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16581 htab
->srofixup
->size
+= 8;
16585 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16586 R_ARM_RELATIVE/rofixup relocation on it. */
16587 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16589 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16590 htab
->srofixup
->size
+= 4;
16592 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16595 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16597 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16598 && !h
->forced_local
)
16599 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16602 if (h
->dynindx
== -1)
16604 /* We only allocate one function descriptor with its
16605 associated relocation. */
16606 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16608 asection
*s
= htab
->root
.sgot
;
16610 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16612 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16614 if (bfd_link_pic (info
))
16615 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16617 htab
->srofixup
->size
+= 8;
16620 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16622 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16623 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16627 /* Will need one dynamic reloc per reference. will be either
16628 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16629 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16630 eh
->fdpic_cnts
.funcdesc_cnt
);
16634 /* Allocate stubs for exported Thumb functions on v4t. */
16635 if (!htab
->use_blx
&& h
->dynindx
!= -1
16637 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16638 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16640 struct elf_link_hash_entry
* th
;
16641 struct bfd_link_hash_entry
* bh
;
16642 struct elf_link_hash_entry
* myh
;
16646 /* Create a new symbol to regist the real location of the function. */
16647 s
= h
->root
.u
.def
.section
;
16648 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16649 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16650 name
, BSF_GLOBAL
, s
,
16651 h
->root
.u
.def
.value
,
16652 NULL
, true, false, &bh
);
16654 myh
= (struct elf_link_hash_entry
*) bh
;
16655 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16656 myh
->forced_local
= 1;
16657 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16658 eh
->export_glue
= myh
;
16659 th
= record_arm_to_thumb_glue (info
, h
);
16660 /* Point the symbol at the stub. */
16661 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16662 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16663 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16664 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16667 if (h
->dyn_relocs
== NULL
)
16670 /* In the shared -Bsymbolic case, discard space allocated for
16671 dynamic pc-relative relocs against symbols which turn out to be
16672 defined in regular objects. For the normal shared case, discard
16673 space for pc-relative relocs that have become local due to symbol
16674 visibility changes. */
16676 if (bfd_link_pic (info
)
16679 /* Relocs that use pc_count are PC-relative forms, which will appear
16680 on something like ".long foo - ." or "movw REG, foo - .". We want
16681 calls to protected symbols to resolve directly to the function
16682 rather than going via the plt. If people want function pointer
16683 comparisons to work as expected then they should avoid writing
16684 assembly like ".long foo - .". */
16685 if (SYMBOL_CALLS_LOCAL (info
, h
))
16687 struct elf_dyn_relocs
**pp
;
16689 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16691 p
->count
-= p
->pc_count
;
16700 if (htab
->root
.target_os
== is_vxworks
)
16702 struct elf_dyn_relocs
**pp
;
16704 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16706 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16713 /* Also discard relocs on undefined weak syms with non-default
16715 if (h
->dyn_relocs
!= NULL
16716 && h
->root
.type
== bfd_link_hash_undefweak
)
16718 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16719 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16720 h
->dyn_relocs
= NULL
;
16722 /* Make sure undefined weak symbols are output as a dynamic
16724 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16725 && !h
->forced_local
)
16727 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16734 /* For the non-shared case, discard space for relocs against
16735 symbols which turn out to need copy relocs or are not
16738 if (!h
->non_got_ref
16739 && ((h
->def_dynamic
16740 && !h
->def_regular
)
16741 || (htab
->root
.dynamic_sections_created
16742 && (h
->root
.type
== bfd_link_hash_undefweak
16743 || h
->root
.type
== bfd_link_hash_undefined
))))
16745 /* Make sure this symbol is output as a dynamic symbol.
16746 Undefined weak syms won't yet be marked as dynamic. */
16747 if (h
->dynindx
== -1 && !h
->forced_local
16748 && h
->root
.type
== bfd_link_hash_undefweak
)
16750 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16754 /* If that succeeded, we know we'll be keeping all the
16756 if (h
->dynindx
!= -1)
16760 h
->dyn_relocs
= NULL
;
16765 /* Finally, allocate space. */
16766 for (p
= h
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16768 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16770 if (h
->type
== STT_GNU_IFUNC
16771 && eh
->plt
.noncall_refcount
== 0
16772 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16773 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16774 else if (h
->dynindx
!= -1
16775 && (!bfd_link_pic (info
) || !info
->symbolic
|| !h
->def_regular
))
16776 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16777 else if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16778 htab
->srofixup
->size
+= 4 * p
->count
;
16780 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16787 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16790 struct elf32_arm_link_hash_table
*globals
;
16792 globals
= elf32_arm_hash_table (info
);
16793 if (globals
== NULL
)
16796 globals
->byteswap_code
= byteswap_code
;
16799 /* Set the sizes of the dynamic sections. */
16802 elf32_arm_late_size_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16803 struct bfd_link_info
* info
)
16809 struct elf32_arm_link_hash_table
*htab
;
16811 htab
= elf32_arm_hash_table (info
);
16815 dynobj
= elf_hash_table (info
)->dynobj
;
16816 if (dynobj
== NULL
)
16819 check_use_blx (htab
);
16821 if (elf_hash_table (info
)->dynamic_sections_created
)
16823 /* Set the contents of the .interp section to the interpreter. */
16824 if (bfd_link_executable (info
) && !info
->nointerp
)
16826 s
= bfd_get_linker_section (dynobj
, ".interp");
16827 BFD_ASSERT (s
!= NULL
);
16828 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16829 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16834 /* Set up .got offsets for local syms, and space for local dynamic
16836 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16838 bfd_signed_vma
*local_got
;
16839 bfd_signed_vma
*end_local_got
;
16840 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16841 char *local_tls_type
;
16842 bfd_vma
*local_tlsdesc_gotent
;
16843 bfd_size_type locsymcount
;
16844 Elf_Internal_Shdr
*symtab_hdr
;
16846 unsigned int symndx
;
16847 struct fdpic_local
*local_fdpic_cnts
;
16849 if (! is_arm_elf (ibfd
))
16852 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16854 struct elf_dyn_relocs
*p
;
16856 for (p
= (struct elf_dyn_relocs
*)
16857 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16859 if (!bfd_is_abs_section (p
->sec
)
16860 && bfd_is_abs_section (p
->sec
->output_section
))
16862 /* Input section has been discarded, either because
16863 it is a copy of a linkonce section or due to
16864 linker script /DISCARD/, so we'll be discarding
16867 else if (htab
->root
.target_os
== is_vxworks
16868 && strcmp (p
->sec
->output_section
->name
,
16871 /* Relocations in vxworks .tls_vars sections are
16872 handled specially by the loader. */
16874 else if (p
->count
!= 0)
16876 srel
= elf_section_data (p
->sec
)->sreloc
;
16877 if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16878 htab
->srofixup
->size
+= 4 * p
->count
;
16880 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16881 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16882 info
->flags
|= DF_TEXTREL
;
16887 local_got
= elf_local_got_refcounts (ibfd
);
16888 if (local_got
== NULL
)
16891 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16892 locsymcount
= symtab_hdr
->sh_info
;
16893 end_local_got
= local_got
+ locsymcount
;
16894 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16895 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16896 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16897 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16899 s
= htab
->root
.sgot
;
16900 srel
= htab
->root
.srelgot
;
16901 for (; local_got
< end_local_got
;
16902 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16903 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16905 if (symndx
>= elf32_arm_num_entries (ibfd
))
16908 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16909 local_iplt
= *local_iplt_ptr
;
16911 /* FDPIC support. */
16912 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16914 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16916 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16919 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16920 if (bfd_link_pic (info
))
16921 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16923 htab
->srofixup
->size
+= 8;
16927 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16929 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16931 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16934 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16935 if (bfd_link_pic (info
))
16936 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16938 htab
->srofixup
->size
+= 8;
16941 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16942 if (bfd_link_pic (info
))
16943 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16945 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16948 if (local_iplt
!= NULL
)
16950 struct elf_dyn_relocs
*p
;
16952 if (local_iplt
->root
.refcount
> 0)
16954 elf32_arm_allocate_plt_entry (info
, true,
16957 if (local_iplt
->arm
.noncall_refcount
== 0)
16958 /* All references to the PLT are calls, so all
16959 non-call references can resolve directly to the
16960 run-time target. This means that the .got entry
16961 would be the same as the .igot.plt entry, so there's
16962 no point creating both. */
16967 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16968 local_iplt
->root
.offset
= (bfd_vma
) -1;
16971 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16975 psrel
= elf_section_data (p
->sec
)->sreloc
;
16976 if (local_iplt
->arm
.noncall_refcount
== 0)
16977 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16979 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16982 if (*local_got
> 0)
16984 Elf_Internal_Sym
*isym
;
16986 *local_got
= s
->size
;
16987 if (*local_tls_type
& GOT_TLS_GD
)
16988 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16990 if (*local_tls_type
& GOT_TLS_GDESC
)
16992 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16993 - elf32_arm_compute_jump_table_size (htab
);
16994 htab
->root
.sgotplt
->size
+= 8;
16995 *local_got
= (bfd_vma
) -2;
16996 /* plt.got_offset needs to know there's a TLS_DESC
16997 reloc in the middle of .got.plt. */
16998 htab
->num_tls_desc
++;
17000 if (*local_tls_type
& GOT_TLS_IE
)
17003 if (*local_tls_type
& GOT_NORMAL
)
17005 /* If the symbol is both GD and GDESC, *local_got
17006 may have been overwritten. */
17007 *local_got
= s
->size
;
17011 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
, ibfd
,
17016 /* If all references to an STT_GNU_IFUNC PLT are calls,
17017 then all non-call references, including this GOT entry,
17018 resolve directly to the run-time target. */
17019 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
17020 && (local_iplt
== NULL
17021 || local_iplt
->arm
.noncall_refcount
== 0))
17022 elf32_arm_allocate_irelocs (info
, srel
, 1);
17023 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
17025 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
17026 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
17027 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
17028 htab
->srofixup
->size
+= 4;
17030 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
17031 && *local_tls_type
& GOT_TLS_GDESC
)
17033 elf32_arm_allocate_dynrelocs (info
,
17034 htab
->root
.srelplt
, 1);
17035 htab
->tls_trampoline
= -1;
17040 *local_got
= (bfd_vma
) -1;
17044 if (htab
->tls_ldm_got
.refcount
> 0)
17046 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17047 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17048 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
17049 htab
->root
.sgot
->size
+= 8;
17050 if (bfd_link_pic (info
))
17051 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
17054 htab
->tls_ldm_got
.offset
= -1;
17056 /* At the very end of the .rofixup section is a pointer to the GOT,
17057 reserve space for it. */
17058 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17059 htab
->srofixup
->size
+= 4;
17061 /* Allocate global sym .plt and .got entries, and space for global
17062 sym dynamic relocs. */
17063 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
17065 /* Here we rummage through the found bfds to collect glue information. */
17066 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
17068 if (! is_arm_elf (ibfd
))
17071 /* Initialise mapping tables for code/data. */
17072 bfd_elf32_arm_init_maps (ibfd
);
17074 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
17075 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
17076 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
17077 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
17080 /* Allocate space for the glue sections now that we've sized them. */
17081 bfd_elf32_arm_allocate_interworking_sections (info
);
17083 /* For every jump slot reserved in the sgotplt, reloc_count is
17084 incremented. However, when we reserve space for TLS descriptors,
17085 it's not incremented, so in order to compute the space reserved
17086 for them, it suffices to multiply the reloc count by the jump
17088 if (htab
->root
.srelplt
)
17089 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size (htab
);
17091 if (htab
->tls_trampoline
)
17093 if (htab
->root
.splt
->size
== 0)
17094 htab
->root
.splt
->size
+= htab
->plt_header_size
;
17096 htab
->tls_trampoline
= htab
->root
.splt
->size
;
17097 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
17099 /* If we're not using lazy TLS relocations, don't generate the
17100 PLT and GOT entries they require. */
17101 if ((info
->flags
& DF_BIND_NOW
))
17102 htab
->root
.tlsdesc_plt
= 0;
17105 htab
->root
.tlsdesc_got
= htab
->root
.sgot
->size
;
17106 htab
->root
.sgot
->size
+= 4;
17108 htab
->root
.tlsdesc_plt
= htab
->root
.splt
->size
;
17109 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
17113 /* The check_relocs and adjust_dynamic_symbol entry points have
17114 determined the sizes of the various dynamic sections. Allocate
17115 memory for them. */
17117 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
17121 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
17124 /* It's OK to base decisions on the section name, because none
17125 of the dynobj section names depend upon the input files. */
17126 name
= bfd_section_name (s
);
17128 if (s
== htab
->root
.splt
)
17130 /* Remember whether there is a PLT. */
17133 else if (startswith (name
, ".rel"))
17137 /* Remember whether there are any reloc sections other
17138 than .rel(a).plt and .rela.plt.unloaded. */
17139 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
17142 /* We use the reloc_count field as a counter if we need
17143 to copy relocs into the output file. */
17144 s
->reloc_count
= 0;
17147 else if (s
!= htab
->root
.sgot
17148 && s
!= htab
->root
.sgotplt
17149 && s
!= htab
->root
.iplt
17150 && s
!= htab
->root
.igotplt
17151 && s
!= htab
->root
.sdynbss
17152 && s
!= htab
->root
.sdynrelro
17153 && s
!= htab
->srofixup
)
17155 /* It's not one of our sections, so don't allocate space. */
17161 /* If we don't need this section, strip it from the
17162 output file. This is mostly to handle .rel(a).bss and
17163 .rel(a).plt. We must create both sections in
17164 create_dynamic_sections, because they must be created
17165 before the linker maps input sections to output
17166 sections. The linker does that before
17167 adjust_dynamic_symbol is called, and it is that
17168 function which decides whether anything needs to go
17169 into these sections. */
17170 s
->flags
|= SEC_EXCLUDE
;
17174 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
17177 /* Allocate memory for the section contents. */
17178 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
17179 if (s
->contents
== NULL
)
17184 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd
, info
,
17188 /* Size sections even though they're not dynamic. We use it to setup
17189 _TLS_MODULE_BASE_, if needed. */
17192 elf32_arm_early_size_sections (bfd
*output_bfd
, struct bfd_link_info
*info
)
17195 struct elf32_arm_link_hash_table
*htab
;
17197 htab
= elf32_arm_hash_table (info
);
17199 if (bfd_link_relocatable (info
))
17202 tls_sec
= elf_hash_table (info
)->tls_sec
;
17206 struct elf_link_hash_entry
*tlsbase
;
17208 tlsbase
= elf_link_hash_lookup
17209 (elf_hash_table (info
), "_TLS_MODULE_BASE_", true, true, false);
17213 struct bfd_link_hash_entry
*bh
= NULL
;
17214 const struct elf_backend_data
*bed
17215 = get_elf_backend_data (output_bfd
);
17217 if (!(_bfd_generic_link_add_one_symbol
17218 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17219 tls_sec
, 0, NULL
, false,
17220 bed
->collect
, &bh
)))
17223 tlsbase
->type
= STT_TLS
;
17224 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17225 tlsbase
->def_regular
= 1;
17226 tlsbase
->other
= STV_HIDDEN
;
17227 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, true);
17231 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17232 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17233 "__stacksize", DEFAULT_STACK_SIZE
))
17239 /* Finish up dynamic symbol handling. We set the contents of various
17240 dynamic sections here. */
17243 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17244 struct bfd_link_info
* info
,
17245 struct elf_link_hash_entry
* h
,
17246 Elf_Internal_Sym
* sym
)
17248 struct elf32_arm_link_hash_table
*htab
;
17249 struct elf32_arm_link_hash_entry
*eh
;
17251 htab
= elf32_arm_hash_table (info
);
17253 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17255 if (h
->plt
.offset
!= (bfd_vma
) -1)
17259 BFD_ASSERT (h
->dynindx
!= -1);
17260 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17265 if (!h
->def_regular
)
17267 /* Mark the symbol as undefined, rather than as defined in
17268 the .plt section. */
17269 sym
->st_shndx
= SHN_UNDEF
;
17270 /* If the symbol is weak we need to clear the value.
17271 Otherwise, the PLT entry would provide a definition for
17272 the symbol even if the symbol wasn't defined anywhere,
17273 and so the symbol would never be NULL. Leave the value if
17274 there were any relocations where pointer equality matters
17275 (this is a clue for the dynamic linker, to make function
17276 pointer comparisons work between an application and shared
17278 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17281 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17283 /* At least one non-call relocation references this .iplt entry,
17284 so the .iplt entry is the function's canonical address. */
17285 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17286 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17287 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17288 (output_bfd
, htab
->root
.iplt
->output_section
));
17289 sym
->st_value
= (h
->plt
.offset
17290 + htab
->root
.iplt
->output_section
->vma
17291 + htab
->root
.iplt
->output_offset
);
17298 Elf_Internal_Rela rel
;
17300 /* This symbol needs a copy reloc. Set it up. */
17301 BFD_ASSERT (h
->dynindx
!= -1
17302 && (h
->root
.type
== bfd_link_hash_defined
17303 || h
->root
.type
== bfd_link_hash_defweak
));
17306 rel
.r_offset
= (h
->root
.u
.def
.value
17307 + h
->root
.u
.def
.section
->output_section
->vma
17308 + h
->root
.u
.def
.section
->output_offset
);
17309 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17310 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17311 s
= htab
->root
.sreldynrelro
;
17313 s
= htab
->root
.srelbss
;
17314 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17317 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17318 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17319 it is relative to the ".got" section. */
17320 if (h
== htab
->root
.hdynamic
17322 && htab
->root
.target_os
!= is_vxworks
17323 && h
== htab
->root
.hgot
))
17324 sym
->st_shndx
= SHN_ABS
;
17330 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17332 const unsigned long *template, unsigned count
)
17336 for (ix
= 0; ix
!= count
; ix
++)
17338 unsigned long insn
= template[ix
];
17340 /* Emit mov pc,rx if bx is not permitted. */
17341 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17342 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17343 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17347 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17348 other variants, NaCl needs this entry in a static executable's
17349 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17350 zero. For .iplt really only the last bundle is useful, and .iplt
17351 could have a shorter first entry, with each individual PLT entry's
17352 relative branch calculated differently so it targets the last
17353 bundle instead of the instruction before it (labelled .Lplt_tail
17354 above). But it's simpler to keep the size and layout of PLT0
17355 consistent with the dynamic case, at the cost of some dead code at
17356 the start of .iplt and the one dead store to the stack at the start
17359 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17360 asection
*plt
, bfd_vma got_displacement
)
17364 put_arm_insn (htab
, output_bfd
,
17365 elf32_arm_nacl_plt0_entry
[0]
17366 | arm_movw_immediate (got_displacement
),
17367 plt
->contents
+ 0);
17368 put_arm_insn (htab
, output_bfd
,
17369 elf32_arm_nacl_plt0_entry
[1]
17370 | arm_movt_immediate (got_displacement
),
17371 plt
->contents
+ 4);
17373 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17374 put_arm_insn (htab
, output_bfd
,
17375 elf32_arm_nacl_plt0_entry
[i
],
17376 plt
->contents
+ (i
* 4));
17379 /* Finish up the dynamic sections. */
17382 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17387 struct elf32_arm_link_hash_table
*htab
;
17389 htab
= elf32_arm_hash_table (info
);
17393 dynobj
= elf_hash_table (info
)->dynobj
;
17395 sgot
= htab
->root
.sgotplt
;
17396 /* A broken linker script might have discarded the dynamic sections.
17397 Catch this here so that we do not seg-fault later on. */
17398 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17400 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17402 if (elf_hash_table (info
)->dynamic_sections_created
)
17405 Elf32_External_Dyn
*dyncon
, *dynconend
;
17407 splt
= htab
->root
.splt
;
17408 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17409 BFD_ASSERT (sgot
!= NULL
);
17411 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17412 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17414 for (; dyncon
< dynconend
; dyncon
++)
17416 Elf_Internal_Dyn dyn
;
17420 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17425 if (htab
->root
.target_os
== is_vxworks
17426 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17427 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17442 name
= RELOC_SECTION (htab
, ".plt");
17444 s
= bfd_get_linker_section (dynobj
, name
);
17448 (_("could not find section %s"), name
);
17449 bfd_set_error (bfd_error_invalid_operation
);
17452 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17453 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17457 s
= htab
->root
.srelplt
;
17458 BFD_ASSERT (s
!= NULL
);
17459 dyn
.d_un
.d_val
= s
->size
;
17460 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17469 case DT_TLSDESC_PLT
:
17470 s
= htab
->root
.splt
;
17471 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17472 + htab
->root
.tlsdesc_plt
);
17473 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17476 case DT_TLSDESC_GOT
:
17477 s
= htab
->root
.sgot
;
17478 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17479 + htab
->root
.tlsdesc_got
);
17480 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17483 /* Set the bottom bit of DT_INIT/FINI if the
17484 corresponding function is Thumb. */
17486 name
= info
->init_function
;
17489 name
= info
->fini_function
;
17491 /* If it wasn't set by elf_bfd_final_link
17492 then there is nothing to adjust. */
17493 if (dyn
.d_un
.d_val
!= 0)
17495 struct elf_link_hash_entry
* eh
;
17497 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17498 false, false, true);
17500 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17501 == ST_BRANCH_TO_THUMB
)
17503 dyn
.d_un
.d_val
|= 1;
17504 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17511 /* Fill in the first entry in the procedure linkage table. */
17512 if (splt
->size
> 0 && htab
->plt_header_size
)
17514 const bfd_vma
*plt0_entry
;
17515 bfd_vma got_address
, plt_address
, got_displacement
;
17517 /* Calculate the addresses of the GOT and PLT. */
17518 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17519 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17521 if (htab
->root
.target_os
== is_vxworks
)
17523 /* The VxWorks GOT is relocated by the dynamic linker.
17524 Therefore, we must emit relocations rather than simply
17525 computing the values now. */
17526 Elf_Internal_Rela rel
;
17528 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17529 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17530 splt
->contents
+ 0);
17531 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17532 splt
->contents
+ 4);
17533 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17534 splt
->contents
+ 8);
17535 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17537 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17538 rel
.r_offset
= plt_address
+ 12;
17539 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17541 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17542 htab
->srelplt2
->contents
);
17544 else if (htab
->root
.target_os
== is_nacl
)
17545 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17546 got_address
+ 8 - (plt_address
+ 16));
17547 else if (using_thumb_only (htab
))
17549 got_displacement
= got_address
- (plt_address
+ 12);
17551 plt0_entry
= elf32_thumb2_plt0_entry
;
17552 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17553 splt
->contents
+ 0);
17554 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17555 splt
->contents
+ 4);
17556 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17557 splt
->contents
+ 8);
17559 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17563 got_displacement
= got_address
- (plt_address
+ 16);
17565 plt0_entry
= elf32_arm_plt0_entry
;
17566 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17567 splt
->contents
+ 0);
17568 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17569 splt
->contents
+ 4);
17570 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17571 splt
->contents
+ 8);
17572 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17573 splt
->contents
+ 12);
17575 #ifdef FOUR_WORD_PLT
17576 /* The displacement value goes in the otherwise-unused
17577 last word of the second entry. */
17578 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17580 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17585 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17586 really seem like the right value. */
17587 if (splt
->output_section
->owner
== output_bfd
)
17588 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17590 if (htab
->root
.tlsdesc_plt
)
17592 bfd_vma got_address
17593 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17594 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17595 + htab
->root
.sgot
->output_offset
);
17596 bfd_vma plt_address
17597 = splt
->output_section
->vma
+ splt
->output_offset
;
17599 arm_put_trampoline (htab
, output_bfd
,
17600 splt
->contents
+ htab
->root
.tlsdesc_plt
,
17601 dl_tlsdesc_lazy_trampoline
, 6);
17603 bfd_put_32 (output_bfd
,
17604 gotplt_address
+ htab
->root
.tlsdesc_got
17605 - (plt_address
+ htab
->root
.tlsdesc_plt
)
17606 - dl_tlsdesc_lazy_trampoline
[6],
17607 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24);
17608 bfd_put_32 (output_bfd
,
17609 got_address
- (plt_address
+ htab
->root
.tlsdesc_plt
)
17610 - dl_tlsdesc_lazy_trampoline
[7],
17611 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24 + 4);
17614 if (htab
->tls_trampoline
)
17616 arm_put_trampoline (htab
, output_bfd
,
17617 splt
->contents
+ htab
->tls_trampoline
,
17618 tls_trampoline
, 3);
17619 #ifdef FOUR_WORD_PLT
17620 bfd_put_32 (output_bfd
, 0x00000000,
17621 splt
->contents
+ htab
->tls_trampoline
+ 12);
17625 if (htab
->root
.target_os
== is_vxworks
17626 && !bfd_link_pic (info
)
17627 && htab
->root
.splt
->size
> 0)
17629 /* Correct the .rel(a).plt.unloaded relocations. They will have
17630 incorrect symbol indexes. */
17634 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17635 / htab
->plt_entry_size
);
17636 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17638 for (; num_plts
; num_plts
--)
17640 Elf_Internal_Rela rel
;
17642 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17643 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17644 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17645 p
+= RELOC_SIZE (htab
);
17647 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17648 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17649 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17650 p
+= RELOC_SIZE (htab
);
17655 if (htab
->root
.target_os
== is_nacl
17656 && htab
->root
.iplt
!= NULL
17657 && htab
->root
.iplt
->size
> 0)
17658 /* NaCl uses a special first entry in .iplt too. */
17659 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17661 /* Fill in the first three entries in the global offset table. */
17664 if (sgot
->size
> 0)
17667 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17669 bfd_put_32 (output_bfd
,
17670 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17672 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17673 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17676 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17679 /* At the very end of the .rofixup section is a pointer to the GOT. */
17680 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17682 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17684 bfd_vma got_value
= hgot
->root
.u
.def
.value
17685 + hgot
->root
.u
.def
.section
->output_section
->vma
17686 + hgot
->root
.u
.def
.section
->output_offset
;
17688 arm_elf_add_rofixup (output_bfd
, htab
->srofixup
, got_value
);
17690 /* Make sure we allocated and generated the same number of fixups. */
17691 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17698 elf32_arm_init_file_header (bfd
*abfd
, struct bfd_link_info
*link_info
)
17700 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17701 struct elf32_arm_link_hash_table
*globals
;
17702 struct elf_segment_map
*m
;
17704 if (!_bfd_elf_init_file_header (abfd
, link_info
))
17707 i_ehdrp
= elf_elfheader (abfd
);
17709 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17710 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17711 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17715 globals
= elf32_arm_hash_table (link_info
);
17716 if (globals
!= NULL
&& globals
->byteswap_code
)
17717 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17719 if (globals
->fdpic_p
)
17720 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17723 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17724 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17726 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17727 if (abi
== AEABI_VFP_args_vfp
)
17728 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17730 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17733 /* Scan segment to set p_flags attribute if it contains only sections with
17734 SHF_ARM_PURECODE flag. */
17735 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17741 for (j
= 0; j
< m
->count
; j
++)
17743 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17749 m
->p_flags_valid
= 1;
17755 static enum elf_reloc_type_class
17756 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17757 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17758 const Elf_Internal_Rela
*rela
)
17760 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
17762 if (htab
->root
.dynsym
!= NULL
17763 && htab
->root
.dynsym
->contents
!= NULL
)
17765 /* Check relocation against STT_GNU_IFUNC symbol if there are
17766 dynamic symbols. */
17767 bfd
*abfd
= info
->output_bfd
;
17768 const struct elf_backend_data
*bed
= get_elf_backend_data (abfd
);
17769 unsigned long r_symndx
= ELF32_R_SYM (rela
->r_info
);
17770 if (r_symndx
!= STN_UNDEF
)
17772 Elf_Internal_Sym sym
;
17773 if (!bed
->s
->swap_symbol_in (abfd
,
17774 (htab
->root
.dynsym
->contents
17775 + r_symndx
* bed
->s
->sizeof_sym
),
17778 /* xgettext:c-format */
17779 _bfd_error_handler (_("%pB symbol number %lu references"
17780 " nonexistent SHT_SYMTAB_SHNDX section"),
17782 /* Ideally an error class should be returned here. */
17784 else if (ELF_ST_TYPE (sym
.st_info
) == STT_GNU_IFUNC
)
17785 return reloc_class_ifunc
;
17789 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17791 case R_ARM_RELATIVE
:
17792 return reloc_class_relative
;
17793 case R_ARM_JUMP_SLOT
:
17794 return reloc_class_plt
;
17796 return reloc_class_copy
;
17797 case R_ARM_IRELATIVE
:
17798 return reloc_class_ifunc
;
17800 return reloc_class_normal
;
17805 arm_final_write_processing (bfd
*abfd
)
17807 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17811 elf32_arm_final_write_processing (bfd
*abfd
)
17813 arm_final_write_processing (abfd
);
17814 return _bfd_elf_final_write_processing (abfd
);
17817 /* Return TRUE if this is an unwinding table entry. */
17820 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17822 return (startswith (name
, ELF_STRING_ARM_unwind
)
17823 || startswith (name
, ELF_STRING_ARM_unwind_once
));
17827 /* Set the type and flags for an ARM section. We do this by
17828 the section name, which is a hack, but ought to work. */
17831 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17835 name
= bfd_section_name (sec
);
17837 if (is_arm_elf_unwind_section_name (abfd
, name
))
17839 hdr
->sh_type
= SHT_ARM_EXIDX
;
17840 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17843 if (sec
->flags
& SEC_ELF_PURECODE
)
17844 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17849 /* Handle an ARM specific section when reading an object file. This is
17850 called when bfd_section_from_shdr finds a section with an unknown
17854 elf32_arm_section_from_shdr (bfd
*abfd
,
17855 Elf_Internal_Shdr
* hdr
,
17859 /* There ought to be a place to keep ELF backend specific flags, but
17860 at the moment there isn't one. We just keep track of the
17861 sections by their name, instead. Fortunately, the ABI gives
17862 names for all the ARM specific sections, so we will probably get
17864 switch (hdr
->sh_type
)
17866 case SHT_ARM_EXIDX
:
17867 case SHT_ARM_PREEMPTMAP
:
17868 case SHT_ARM_ATTRIBUTES
:
17875 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17881 static _arm_elf_section_data
*
17882 get_arm_elf_section_data (asection
* sec
)
17884 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17885 return elf32_arm_section_data (sec
);
17893 struct bfd_link_info
*info
;
17896 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17897 asection
*, struct elf_link_hash_entry
*);
17898 } output_arch_syminfo
;
17900 enum map_symbol_type
17908 /* Output a single mapping symbol. */
17911 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17912 enum map_symbol_type type
,
17915 static const char *names
[3] = {"$a", "$t", "$d"};
17916 Elf_Internal_Sym sym
;
17918 sym
.st_value
= osi
->sec
->output_section
->vma
17919 + osi
->sec
->output_offset
17923 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17924 sym
.st_shndx
= osi
->sec_shndx
;
17925 sym
.st_target_internal
= ST_BRANCH_TO_ARM
;
17926 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17927 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17930 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17931 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17934 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17935 bool is_iplt_entry_p
,
17936 union gotplt_union
*root_plt
,
17937 struct arm_plt_info
*arm_plt
)
17939 struct elf32_arm_link_hash_table
*htab
;
17940 bfd_vma addr
, plt_header_size
;
17942 if (root_plt
->offset
== (bfd_vma
) -1)
17945 htab
= elf32_arm_hash_table (osi
->info
);
17949 if (is_iplt_entry_p
)
17951 osi
->sec
= htab
->root
.iplt
;
17952 plt_header_size
= 0;
17956 osi
->sec
= htab
->root
.splt
;
17957 plt_header_size
= htab
->plt_header_size
;
17959 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
17960 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
17962 addr
= root_plt
->offset
& -2;
17963 if (htab
->root
.target_os
== is_vxworks
)
17965 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17967 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
17969 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
17971 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
17974 else if (htab
->root
.target_os
== is_nacl
)
17976 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17979 else if (htab
->fdpic_p
)
17981 enum map_symbol_type type
= using_thumb_only (htab
)
17985 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
17986 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17988 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
17990 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
17992 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
))
17993 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
17996 else if (using_thumb_only (htab
))
17998 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
18005 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
18008 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
18011 #ifdef FOUR_WORD_PLT
18012 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18014 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
18017 /* A three-word PLT with no Thumb thunk contains only Arm code,
18018 so only need to output a mapping symbol for the first PLT entry and
18019 entries with thumb thunks. */
18020 if (thumb_stub_p
|| addr
== plt_header_size
)
18022 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18031 /* Output mapping symbols for PLT entries associated with H. */
18034 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
18036 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
18037 struct elf32_arm_link_hash_entry
*eh
;
18039 if (h
->root
.type
== bfd_link_hash_indirect
)
18042 if (h
->root
.type
== bfd_link_hash_warning
)
18043 /* When warning symbols are created, they **replace** the "real"
18044 entry in the hash table, thus we never get to see the real
18045 symbol in a hash traversal. So look at it now. */
18046 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
18048 eh
= (struct elf32_arm_link_hash_entry
*) h
;
18049 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
18050 &h
->plt
, &eh
->plt
);
18053 /* Bind a veneered symbol to its veneer identified by its hash entry
18054 STUB_ENTRY. The veneered location thus loose its symbol. */
18057 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
18059 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
18062 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
18063 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
18064 hash
->root
.size
= stub_entry
->stub_size
;
18067 /* Output a single local symbol for a generated stub. */
18070 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
18071 bfd_vma offset
, bfd_vma size
)
18073 Elf_Internal_Sym sym
;
18075 sym
.st_value
= osi
->sec
->output_section
->vma
18076 + osi
->sec
->output_offset
18078 sym
.st_size
= size
;
18080 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
18081 sym
.st_shndx
= osi
->sec_shndx
;
18082 sym
.st_target_internal
= ST_BRANCH_TO_ARM
;
18083 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
18087 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
18090 struct elf32_arm_stub_hash_entry
*stub_entry
;
18091 asection
*stub_sec
;
18094 output_arch_syminfo
*osi
;
18095 const insn_sequence
*template_sequence
;
18096 enum stub_insn_type prev_type
;
18099 enum map_symbol_type sym_type
;
18101 /* Massage our args to the form they really have. */
18102 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18103 osi
= (output_arch_syminfo
*) in_arg
;
18105 stub_sec
= stub_entry
->stub_sec
;
18107 /* Ensure this stub is attached to the current section being
18109 if (stub_sec
!= osi
->sec
)
18112 addr
= (bfd_vma
) stub_entry
->stub_offset
;
18113 template_sequence
= stub_entry
->stub_template
;
18115 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
18116 arm_stub_claim_sym (stub_entry
);
18119 stub_name
= stub_entry
->output_name
;
18120 switch (template_sequence
[0].type
)
18123 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
18124 stub_entry
->stub_size
))
18129 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
18130 stub_entry
->stub_size
))
18139 prev_type
= DATA_TYPE
;
18141 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
18143 switch (template_sequence
[i
].type
)
18146 sym_type
= ARM_MAP_ARM
;
18151 sym_type
= ARM_MAP_THUMB
;
18155 sym_type
= ARM_MAP_DATA
;
18163 if (template_sequence
[i
].type
!= prev_type
)
18165 prev_type
= template_sequence
[i
].type
;
18166 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18170 switch (template_sequence
[i
].type
)
18194 /* Output mapping symbols for linker generated sections,
18195 and for those data-only sections that do not have a
18199 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18200 struct bfd_link_info
*info
,
18202 int (*func
) (void *, const char *,
18203 Elf_Internal_Sym
*,
18205 struct elf_link_hash_entry
*))
18207 output_arch_syminfo osi
;
18208 struct elf32_arm_link_hash_table
*htab
;
18210 bfd_size_type size
;
18213 if (info
->strip
== strip_all
18214 && !info
->emitrelocations
18215 && !bfd_link_relocatable (info
))
18218 htab
= elf32_arm_hash_table (info
);
18222 check_use_blx (htab
);
18224 osi
.flaginfo
= flaginfo
;
18228 /* Add a $d mapping symbol to data-only sections that
18229 don't have any mapping symbol. This may result in (harmless) redundant
18230 mapping symbols. */
18231 for (input_bfd
= info
->input_bfds
;
18233 input_bfd
= input_bfd
->link
.next
)
18235 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18236 for (osi
.sec
= input_bfd
->sections
;
18238 osi
.sec
= osi
.sec
->next
)
18240 if (osi
.sec
->output_section
!= NULL
18241 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18243 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18244 == SEC_HAS_CONTENTS
18245 && get_arm_elf_section_data (osi
.sec
) != NULL
18246 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18247 && osi
.sec
->size
> 0
18248 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18250 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18251 (output_bfd
, osi
.sec
->output_section
);
18252 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18253 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18258 /* ARM->Thumb glue. */
18259 if (htab
->arm_glue_size
> 0)
18261 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18262 ARM2THUMB_GLUE_SECTION_NAME
);
18264 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18265 (output_bfd
, osi
.sec
->output_section
);
18266 if (bfd_link_pic (info
)
18267 || htab
->pic_veneer
)
18268 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18269 else if (htab
->use_blx
)
18270 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18272 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18274 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18276 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18277 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18281 /* Thumb->ARM glue. */
18282 if (htab
->thumb_glue_size
> 0)
18284 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18285 THUMB2ARM_GLUE_SECTION_NAME
);
18287 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18288 (output_bfd
, osi
.sec
->output_section
);
18289 size
= THUMB2ARM_GLUE_SIZE
;
18291 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18293 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18294 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18298 /* ARMv4 BX veneers. */
18299 if (htab
->bx_glue_size
> 0)
18301 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18302 ARM_BX_GLUE_SECTION_NAME
);
18304 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18305 (output_bfd
, osi
.sec
->output_section
);
18307 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18310 /* Long calls stubs. */
18311 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18313 asection
* stub_sec
;
18315 for (stub_sec
= htab
->stub_bfd
->sections
;
18317 stub_sec
= stub_sec
->next
)
18319 /* Ignore non-stub sections. */
18320 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18323 osi
.sec
= stub_sec
;
18325 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18326 (output_bfd
, osi
.sec
->output_section
);
18328 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18332 /* Finally, output mapping symbols for the PLT. */
18333 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18335 osi
.sec
= htab
->root
.splt
;
18336 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18337 (output_bfd
, osi
.sec
->output_section
));
18339 /* Output mapping symbols for the plt header. */
18340 if (htab
->root
.target_os
== is_vxworks
)
18342 /* VxWorks shared libraries have no PLT header. */
18343 if (!bfd_link_pic (info
))
18345 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18347 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18351 else if (htab
->root
.target_os
== is_nacl
)
18353 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18356 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18358 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18360 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18362 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18365 else if (!htab
->fdpic_p
)
18367 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18369 #ifndef FOUR_WORD_PLT
18370 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18375 if (htab
->root
.target_os
== is_nacl
18377 && htab
->root
.iplt
->size
> 0)
18379 /* NaCl uses a special first entry in .iplt too. */
18380 osi
.sec
= htab
->root
.iplt
;
18381 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18382 (output_bfd
, osi
.sec
->output_section
));
18383 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18386 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18387 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18389 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18390 for (input_bfd
= info
->input_bfds
;
18392 input_bfd
= input_bfd
->link
.next
)
18394 struct arm_local_iplt_info
**local_iplt
;
18395 unsigned int i
, num_syms
;
18397 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18398 if (local_iplt
!= NULL
)
18400 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18401 if (num_syms
> elf32_arm_num_entries (input_bfd
))
18403 _bfd_error_handler (_("\
18404 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18406 (unsigned long) elf32_arm_num_entries (input_bfd
),
18410 for (i
= 0; i
< num_syms
; i
++)
18411 if (local_iplt
[i
] != NULL
18412 && !elf32_arm_output_plt_map_1 (&osi
, true,
18413 &local_iplt
[i
]->root
,
18414 &local_iplt
[i
]->arm
))
18419 if (htab
->root
.tlsdesc_plt
!= 0)
18421 /* Mapping symbols for the lazy tls trampoline. */
18422 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
,
18423 htab
->root
.tlsdesc_plt
))
18426 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18427 htab
->root
.tlsdesc_plt
+ 24))
18430 if (htab
->tls_trampoline
!= 0)
18432 /* Mapping symbols for the tls trampoline. */
18433 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18435 #ifdef FOUR_WORD_PLT
18436 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18437 htab
->tls_trampoline
+ 12))
18445 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18446 the import library. All SYMCOUNT symbols of ABFD can be examined
18447 from their pointers in SYMS. Pointers of symbols to keep should be
18448 stored continuously at the beginning of that array.
18450 Returns the number of symbols to keep. */
18452 static unsigned int
18453 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18454 struct bfd_link_info
*info
,
18455 asymbol
**syms
, long symcount
)
18459 long src_count
, dst_count
= 0;
18460 struct elf32_arm_link_hash_table
*htab
;
18462 htab
= elf32_arm_hash_table (info
);
18463 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18467 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18468 BFD_ASSERT (cmse_name
);
18470 for (src_count
= 0; src_count
< symcount
; src_count
++)
18472 struct elf32_arm_link_hash_entry
*cmse_hash
;
18478 sym
= syms
[src_count
];
18479 flags
= sym
->flags
;
18480 name
= (char *) bfd_asymbol_name (sym
);
18482 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18484 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18487 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18488 if (namelen
> maxnamelen
)
18490 cmse_name
= (char *)
18491 bfd_realloc (cmse_name
, namelen
);
18492 maxnamelen
= namelen
;
18494 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18495 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18496 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, false, false, true);
18499 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18500 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18501 || cmse_hash
->root
.type
!= STT_FUNC
)
18504 syms
[dst_count
++] = sym
;
18508 syms
[dst_count
] = NULL
;
18513 /* Filter symbols of ABFD to include in the import library. All
18514 SYMCOUNT symbols of ABFD can be examined from their pointers in
18515 SYMS. Pointers of symbols to keep should be stored continuously at
18516 the beginning of that array.
18518 Returns the number of symbols to keep. */
18520 static unsigned int
18521 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18522 struct bfd_link_info
*info
,
18523 asymbol
**syms
, long symcount
)
18525 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18527 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18528 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18529 library to be a relocatable object file. */
18530 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18531 if (globals
->cmse_implib
)
18532 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18534 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18537 /* Allocate target specific section data. */
18540 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18542 _arm_elf_section_data
*sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
18545 sec
->used_by_bfd
= sdata
;
18547 return _bfd_elf_new_section_hook (abfd
, sec
);
18551 /* Used to order a list of mapping symbols by address. */
18554 elf32_arm_compare_mapping (const void * a
, const void * b
)
18556 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18557 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18559 if (amap
->vma
> bmap
->vma
)
18561 else if (amap
->vma
< bmap
->vma
)
18563 else if (amap
->type
> bmap
->type
)
18564 /* Ensure results do not depend on the host qsort for objects with
18565 multiple mapping symbols at the same address by sorting on type
18568 else if (amap
->type
< bmap
->type
)
18574 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18576 static unsigned long
18577 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18579 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18582 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18586 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18588 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18589 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18591 /* High bit of first word is supposed to be zero. */
18592 if ((first_word
& 0x80000000ul
) == 0)
18593 first_word
= offset_prel31 (first_word
, offset
);
18595 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18596 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18597 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18598 second_word
= offset_prel31 (second_word
, offset
);
18600 bfd_put_32 (output_bfd
, first_word
, to
);
18601 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18604 /* Data for make_branch_to_a8_stub(). */
18606 struct a8_branch_to_stub_data
18608 asection
*writing_section
;
18609 bfd_byte
*contents
;
18613 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18614 places for a particular section. */
18617 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18620 struct elf32_arm_stub_hash_entry
*stub_entry
;
18621 struct a8_branch_to_stub_data
*data
;
18622 bfd_byte
*contents
;
18623 unsigned long branch_insn
;
18624 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18625 bfd_signed_vma branch_offset
;
18629 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18630 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18632 if (stub_entry
->target_section
!= data
->writing_section
18633 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18636 contents
= data
->contents
;
18638 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18639 generated when both source and target are in the same section. */
18640 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18641 + stub_entry
->target_section
->output_offset
18642 + stub_entry
->source_value
;
18644 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18645 + stub_entry
->stub_sec
->output_offset
18646 + stub_entry
->stub_offset
;
18648 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18649 veneered_insn_loc
&= ~3u;
18651 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18653 abfd
= stub_entry
->target_section
->owner
;
18654 loc
= stub_entry
->source_value
;
18656 /* We attempt to avoid this condition by setting stubs_always_after_branch
18657 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18658 This check is just to be on the safe side... */
18659 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18661 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18662 "allocated in unsafe location"), abfd
);
18666 switch (stub_entry
->stub_type
)
18668 case arm_stub_a8_veneer_b
:
18669 case arm_stub_a8_veneer_b_cond
:
18670 branch_insn
= 0xf0009000;
18673 case arm_stub_a8_veneer_blx
:
18674 branch_insn
= 0xf000e800;
18677 case arm_stub_a8_veneer_bl
:
18679 unsigned int i1
, j1
, i2
, j2
, s
;
18681 branch_insn
= 0xf000d000;
18684 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18686 /* There's not much we can do apart from complain if this
18688 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18689 "of range (input file too large)"), abfd
);
18693 /* i1 = not(j1 eor s), so:
18695 j1 = (not i1) eor s. */
18697 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18698 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18699 i2
= (branch_offset
>> 22) & 1;
18700 i1
= (branch_offset
>> 23) & 1;
18701 s
= (branch_offset
>> 24) & 1;
18704 branch_insn
|= j2
<< 11;
18705 branch_insn
|= j1
<< 13;
18706 branch_insn
|= s
<< 26;
18715 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18716 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18721 /* Beginning of stm32l4xx work-around. */
18723 /* Functions encoding instructions necessary for the emission of the
18724 fix-stm32l4xx-629360.
18725 Encoding is extracted from the
18726 ARM (C) Architecture Reference Manual
18727 ARMv7-A and ARMv7-R edition
18728 ARM DDI 0406C.b (ID072512). */
18730 static inline bfd_vma
18731 create_instruction_branch_absolute (int branch_offset
)
18733 /* A8.8.18 B (A8-334)
18734 B target_address (Encoding T4). */
18735 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18736 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18737 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18739 int s
= ((branch_offset
& 0x1000000) >> 24);
18740 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18741 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18743 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18744 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18746 bfd_vma patched_inst
= 0xf0009000
18748 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18749 | j1
<< 13 /* J1. */
18750 | j2
<< 11 /* J2. */
18751 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18753 return patched_inst
;
18756 static inline bfd_vma
18757 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18759 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18760 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18761 bfd_vma patched_inst
= 0xe8900000
18762 | (/*W=*/wback
<< 21)
18764 | (reg_mask
& 0x0000ffff);
18766 return patched_inst
;
18769 static inline bfd_vma
18770 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18772 /* A8.8.60 LDMDB/LDMEA (A8-402)
18773 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18774 bfd_vma patched_inst
= 0xe9100000
18775 | (/*W=*/wback
<< 21)
18777 | (reg_mask
& 0x0000ffff);
18779 return patched_inst
;
18782 static inline bfd_vma
18783 create_instruction_mov (int target_reg
, int source_reg
)
18785 /* A8.8.103 MOV (register) (A8-486)
18786 MOV Rd, Rm (Encoding T1). */
18787 bfd_vma patched_inst
= 0x4600
18788 | (target_reg
& 0x7)
18789 | ((target_reg
& 0x8) >> 3) << 7
18790 | (source_reg
<< 3);
18792 return patched_inst
;
18795 static inline bfd_vma
18796 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18798 /* A8.8.221 SUB (immediate) (A8-708)
18799 SUB Rd, Rn, #value (Encoding T3). */
18800 bfd_vma patched_inst
= 0xf1a00000
18801 | (target_reg
<< 8)
18802 | (source_reg
<< 16)
18804 | ((value
& 0x800) >> 11) << 26
18805 | ((value
& 0x700) >> 8) << 12
18808 return patched_inst
;
18811 static inline bfd_vma
18812 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18815 /* A8.8.332 VLDM (A8-922)
18816 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18817 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18818 | (/*W=*/wback
<< 21)
18820 | (num_words
& 0x000000ff)
18821 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18822 | (first_reg
& 0x00000001) << 22;
18824 return patched_inst
;
18827 static inline bfd_vma
18828 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18831 /* A8.8.332 VLDM (A8-922)
18832 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18833 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18835 | (num_words
& 0x000000ff)
18836 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18837 | (first_reg
& 0x00000001) << 22;
18839 return patched_inst
;
18842 static inline bfd_vma
18843 create_instruction_udf_w (int value
)
18845 /* A8.8.247 UDF (A8-758)
18846 Undefined (Encoding T2). */
18847 bfd_vma patched_inst
= 0xf7f0a000
18848 | (value
& 0x00000fff)
18849 | (value
& 0x000f0000) << 16;
18851 return patched_inst
;
18854 static inline bfd_vma
18855 create_instruction_udf (int value
)
18857 /* A8.8.247 UDF (A8-758)
18858 Undefined (Encoding T1). */
18859 bfd_vma patched_inst
= 0xde00
18862 return patched_inst
;
18865 /* Functions writing an instruction in memory, returning the next
18866 memory position to write to. */
18868 static inline bfd_byte
*
18869 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18870 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18872 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18876 static inline bfd_byte
*
18877 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18878 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18880 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18884 /* Function filling up a region in memory with T1 and T2 UDFs taking
18885 care of alignment. */
18888 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18890 const bfd_byte
* const base_stub_contents
,
18891 bfd_byte
* const from_stub_contents
,
18892 const bfd_byte
* const end_stub_contents
)
18894 bfd_byte
*current_stub_contents
= from_stub_contents
;
18896 /* Fill the remaining of the stub with deterministic contents : UDF
18898 Check if realignment is needed on modulo 4 frontier using T1, to
18900 if ((current_stub_contents
< end_stub_contents
)
18901 && !((current_stub_contents
- base_stub_contents
) % 2)
18902 && ((current_stub_contents
- base_stub_contents
) % 4))
18903 current_stub_contents
=
18904 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18905 create_instruction_udf (0));
18907 for (; current_stub_contents
< end_stub_contents
;)
18908 current_stub_contents
=
18909 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18910 create_instruction_udf_w (0));
18912 return current_stub_contents
;
18915 /* Functions writing the stream of instructions equivalent to the
18916 derived sequence for ldmia, ldmdb, vldm respectively. */
18919 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18921 const insn32 initial_insn
,
18922 const bfd_byte
*const initial_insn_addr
,
18923 bfd_byte
*const base_stub_contents
)
18925 int wback
= (initial_insn
& 0x00200000) >> 21;
18926 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18927 int insn_all_registers
= initial_insn
& 0x0000ffff;
18928 int insn_low_registers
, insn_high_registers
;
18929 int usable_register_mask
;
18930 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18931 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18932 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18933 bfd_byte
*current_stub_contents
= base_stub_contents
;
18935 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18937 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18938 smaller than 8 registers load sequences that do not cause the
18940 if (nb_registers
<= 8)
18942 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18943 current_stub_contents
=
18944 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18947 /* B initial_insn_addr+4. */
18949 current_stub_contents
=
18950 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18951 create_instruction_branch_absolute
18952 (initial_insn_addr
- current_stub_contents
));
18954 /* Fill the remaining of the stub with deterministic contents. */
18955 current_stub_contents
=
18956 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18957 base_stub_contents
, current_stub_contents
,
18958 base_stub_contents
+
18959 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18964 /* - reg_list[13] == 0. */
18965 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
18967 /* - reg_list[14] & reg_list[15] != 1. */
18968 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18970 /* - if (wback==1) reg_list[rn] == 0. */
18971 BFD_ASSERT (!wback
|| !restore_rn
);
18973 /* - nb_registers > 8. */
18974 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18976 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18978 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18979 - One with the 7 lowest registers (register mask 0x007F)
18980 This LDM will finally contain between 2 and 7 registers
18981 - One with the 7 highest registers (register mask 0xDF80)
18982 This ldm will finally contain between 2 and 7 registers. */
18983 insn_low_registers
= insn_all_registers
& 0x007F;
18984 insn_high_registers
= insn_all_registers
& 0xDF80;
18986 /* A spare register may be needed during this veneer to temporarily
18987 handle the base register. This register will be restored with the
18988 last LDM operation.
18989 The usable register may be any general purpose register (that
18990 excludes PC, SP, LR : register mask is 0x1FFF). */
18991 usable_register_mask
= 0x1FFF;
18993 /* Generate the stub function. */
18996 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18997 current_stub_contents
=
18998 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18999 create_instruction_ldmia
19000 (rn
, /*wback=*/1, insn_low_registers
));
19002 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19003 current_stub_contents
=
19004 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19005 create_instruction_ldmia
19006 (rn
, /*wback=*/1, insn_high_registers
));
19009 /* B initial_insn_addr+4. */
19010 current_stub_contents
=
19011 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19012 create_instruction_branch_absolute
19013 (initial_insn_addr
- current_stub_contents
));
19016 else /* if (!wback). */
19020 /* If Rn is not part of the high-register-list, move it there. */
19021 if (!(insn_high_registers
& (1 << rn
)))
19023 /* Choose a Ri in the high-register-list that will be restored. */
19024 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19027 current_stub_contents
=
19028 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19029 create_instruction_mov (ri
, rn
));
19032 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19033 current_stub_contents
=
19034 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19035 create_instruction_ldmia
19036 (ri
, /*wback=*/1, insn_low_registers
));
19038 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19039 current_stub_contents
=
19040 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19041 create_instruction_ldmia
19042 (ri
, /*wback=*/0, insn_high_registers
));
19046 /* B initial_insn_addr+4. */
19047 current_stub_contents
=
19048 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19049 create_instruction_branch_absolute
19050 (initial_insn_addr
- current_stub_contents
));
19054 /* Fill the remaining of the stub with deterministic contents. */
19055 current_stub_contents
=
19056 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19057 base_stub_contents
, current_stub_contents
,
19058 base_stub_contents
+
19059 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19063 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
19065 const insn32 initial_insn
,
19066 const bfd_byte
*const initial_insn_addr
,
19067 bfd_byte
*const base_stub_contents
)
19069 int wback
= (initial_insn
& 0x00200000) >> 21;
19070 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
19071 int insn_all_registers
= initial_insn
& 0x0000ffff;
19072 int insn_low_registers
, insn_high_registers
;
19073 int usable_register_mask
;
19074 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
19075 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
19076 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
19077 bfd_byte
*current_stub_contents
= base_stub_contents
;
19079 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
19081 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19082 smaller than 8 registers load sequences that do not cause the
19084 if (nb_registers
<= 8)
19086 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19087 current_stub_contents
=
19088 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19091 /* B initial_insn_addr+4. */
19092 current_stub_contents
=
19093 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19094 create_instruction_branch_absolute
19095 (initial_insn_addr
- current_stub_contents
));
19097 /* Fill the remaining of the stub with deterministic contents. */
19098 current_stub_contents
=
19099 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19100 base_stub_contents
, current_stub_contents
,
19101 base_stub_contents
+
19102 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19107 /* - reg_list[13] == 0. */
19108 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
19110 /* - reg_list[14] & reg_list[15] != 1. */
19111 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
19113 /* - if (wback==1) reg_list[rn] == 0. */
19114 BFD_ASSERT (!wback
|| !restore_rn
);
19116 /* - nb_registers > 8. */
19117 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
19119 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19121 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19122 - One with the 7 lowest registers (register mask 0x007F)
19123 This LDM will finally contain between 2 and 7 registers
19124 - One with the 7 highest registers (register mask 0xDF80)
19125 This ldm will finally contain between 2 and 7 registers. */
19126 insn_low_registers
= insn_all_registers
& 0x007F;
19127 insn_high_registers
= insn_all_registers
& 0xDF80;
19129 /* A spare register may be needed during this veneer to temporarily
19130 handle the base register. This register will be restored with
19131 the last LDM operation.
19132 The usable register may be any general purpose register (that excludes
19133 PC, SP, LR : register mask is 0x1FFF). */
19134 usable_register_mask
= 0x1FFF;
19136 /* Generate the stub function. */
19137 if (!wback
&& !restore_pc
&& !restore_rn
)
19139 /* Choose a Ri in the low-register-list that will be restored. */
19140 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19143 current_stub_contents
=
19144 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19145 create_instruction_mov (ri
, rn
));
19147 /* LDMDB Ri!, {R-high-register-list}. */
19148 current_stub_contents
=
19149 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19150 create_instruction_ldmdb
19151 (ri
, /*wback=*/1, insn_high_registers
));
19153 /* LDMDB Ri, {R-low-register-list}. */
19154 current_stub_contents
=
19155 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19156 create_instruction_ldmdb
19157 (ri
, /*wback=*/0, insn_low_registers
));
19159 /* B initial_insn_addr+4. */
19160 current_stub_contents
=
19161 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19162 create_instruction_branch_absolute
19163 (initial_insn_addr
- current_stub_contents
));
19165 else if (wback
&& !restore_pc
&& !restore_rn
)
19167 /* LDMDB Rn!, {R-high-register-list}. */
19168 current_stub_contents
=
19169 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19170 create_instruction_ldmdb
19171 (rn
, /*wback=*/1, insn_high_registers
));
19173 /* LDMDB Rn!, {R-low-register-list}. */
19174 current_stub_contents
=
19175 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19176 create_instruction_ldmdb
19177 (rn
, /*wback=*/1, insn_low_registers
));
19179 /* B initial_insn_addr+4. */
19180 current_stub_contents
=
19181 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19182 create_instruction_branch_absolute
19183 (initial_insn_addr
- current_stub_contents
));
19185 else if (!wback
&& restore_pc
&& !restore_rn
)
19187 /* Choose a Ri in the high-register-list that will be restored. */
19188 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19190 /* SUB Ri, Rn, #(4*nb_registers). */
19191 current_stub_contents
=
19192 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19193 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19195 /* LDMIA Ri!, {R-low-register-list}. */
19196 current_stub_contents
=
19197 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19198 create_instruction_ldmia
19199 (ri
, /*wback=*/1, insn_low_registers
));
19201 /* LDMIA Ri, {R-high-register-list}. */
19202 current_stub_contents
=
19203 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19204 create_instruction_ldmia
19205 (ri
, /*wback=*/0, insn_high_registers
));
19207 else if (wback
&& restore_pc
&& !restore_rn
)
19209 /* Choose a Ri in the high-register-list that will be restored. */
19210 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19212 /* SUB Rn, Rn, #(4*nb_registers) */
19213 current_stub_contents
=
19214 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19215 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19218 current_stub_contents
=
19219 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19220 create_instruction_mov (ri
, rn
));
19222 /* LDMIA Ri!, {R-low-register-list}. */
19223 current_stub_contents
=
19224 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19225 create_instruction_ldmia
19226 (ri
, /*wback=*/1, insn_low_registers
));
19228 /* LDMIA Ri, {R-high-register-list}. */
19229 current_stub_contents
=
19230 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19231 create_instruction_ldmia
19232 (ri
, /*wback=*/0, insn_high_registers
));
19234 else if (!wback
&& !restore_pc
&& restore_rn
)
19237 if (!(insn_low_registers
& (1 << rn
)))
19239 /* Choose a Ri in the low-register-list that will be restored. */
19240 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19243 current_stub_contents
=
19244 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19245 create_instruction_mov (ri
, rn
));
19248 /* LDMDB Ri!, {R-high-register-list}. */
19249 current_stub_contents
=
19250 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19251 create_instruction_ldmdb
19252 (ri
, /*wback=*/1, insn_high_registers
));
19254 /* LDMDB Ri, {R-low-register-list}. */
19255 current_stub_contents
=
19256 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19257 create_instruction_ldmdb
19258 (ri
, /*wback=*/0, insn_low_registers
));
19260 /* B initial_insn_addr+4. */
19261 current_stub_contents
=
19262 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19263 create_instruction_branch_absolute
19264 (initial_insn_addr
- current_stub_contents
));
19266 else if (!wback
&& restore_pc
&& restore_rn
)
19269 if (!(insn_high_registers
& (1 << rn
)))
19271 /* Choose a Ri in the high-register-list that will be restored. */
19272 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19275 /* SUB Ri, Rn, #(4*nb_registers). */
19276 current_stub_contents
=
19277 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19278 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19280 /* LDMIA Ri!, {R-low-register-list}. */
19281 current_stub_contents
=
19282 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19283 create_instruction_ldmia
19284 (ri
, /*wback=*/1, insn_low_registers
));
19286 /* LDMIA Ri, {R-high-register-list}. */
19287 current_stub_contents
=
19288 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19289 create_instruction_ldmia
19290 (ri
, /*wback=*/0, insn_high_registers
));
19292 else if (wback
&& restore_rn
)
19294 /* The assembler should not have accepted to encode this. */
19295 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19296 "undefined behavior.\n");
19299 /* Fill the remaining of the stub with deterministic contents. */
19300 current_stub_contents
=
19301 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19302 base_stub_contents
, current_stub_contents
,
19303 base_stub_contents
+
19304 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19309 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19311 const insn32 initial_insn
,
19312 const bfd_byte
*const initial_insn_addr
,
19313 bfd_byte
*const base_stub_contents
)
19315 int num_words
= initial_insn
& 0xff;
19316 bfd_byte
*current_stub_contents
= base_stub_contents
;
19318 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19320 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19321 smaller than 8 words load sequences that do not cause the
19323 if (num_words
<= 8)
19325 /* Untouched instruction. */
19326 current_stub_contents
=
19327 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19330 /* B initial_insn_addr+4. */
19331 current_stub_contents
=
19332 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19333 create_instruction_branch_absolute
19334 (initial_insn_addr
- current_stub_contents
));
19338 bool is_dp
= /* DP encoding. */
19339 (initial_insn
& 0xfe100f00) == 0xec100b00;
19340 bool is_ia_nobang
= /* (IA without !). */
19341 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19342 bool is_ia_bang
= /* (IA with !) - includes VPOP. */
19343 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19344 bool is_db_bang
= /* (DB with !). */
19345 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19346 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19347 /* d = UInt (Vd:D);. */
19348 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19349 | (((unsigned int)initial_insn
<< 9) >> 31);
19351 /* Compute the number of 8-words chunks needed to split. */
19352 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19355 /* The test coverage has been done assuming the following
19356 hypothesis that exactly one of the previous is_ predicates is
19358 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19359 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19361 /* We treat the cutting of the words in one pass for all
19362 cases, then we emit the adjustments:
19365 -> vldm rx!, {8_words_or_less} for each needed 8_word
19366 -> sub rx, rx, #size (list)
19369 -> vldm rx!, {8_words_or_less} for each needed 8_word
19370 This also handles vpop instruction (when rx is sp)
19373 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19374 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19376 bfd_vma new_insn
= 0;
19378 if (is_ia_nobang
|| is_ia_bang
)
19380 new_insn
= create_instruction_vldmia
19384 chunks
- (chunk
+ 1) ?
19385 8 : num_words
- chunk
* 8,
19386 first_reg
+ chunk
* 8);
19388 else if (is_db_bang
)
19390 new_insn
= create_instruction_vldmdb
19393 chunks
- (chunk
+ 1) ?
19394 8 : num_words
- chunk
* 8,
19395 first_reg
+ chunk
* 8);
19399 current_stub_contents
=
19400 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19404 /* Only this case requires the base register compensation
19408 current_stub_contents
=
19409 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19410 create_instruction_sub
19411 (base_reg
, base_reg
, 4*num_words
));
19414 /* B initial_insn_addr+4. */
19415 current_stub_contents
=
19416 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19417 create_instruction_branch_absolute
19418 (initial_insn_addr
- current_stub_contents
));
19421 /* Fill the remaining of the stub with deterministic contents. */
19422 current_stub_contents
=
19423 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19424 base_stub_contents
, current_stub_contents
,
19425 base_stub_contents
+
19426 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19430 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19432 const insn32 wrong_insn
,
19433 const bfd_byte
*const wrong_insn_addr
,
19434 bfd_byte
*const stub_contents
)
19436 if (is_thumb2_ldmia (wrong_insn
))
19437 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19438 wrong_insn
, wrong_insn_addr
,
19440 else if (is_thumb2_ldmdb (wrong_insn
))
19441 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19442 wrong_insn
, wrong_insn_addr
,
19444 else if (is_thumb2_vldm (wrong_insn
))
19445 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19446 wrong_insn
, wrong_insn_addr
,
19450 /* End of stm32l4xx work-around. */
19453 /* Do code byteswapping. Return FALSE afterwards so that the section is
19454 written out as normal. */
19457 elf32_arm_write_section (bfd
*output_bfd
,
19458 struct bfd_link_info
*link_info
,
19460 bfd_byte
*contents
)
19462 unsigned int mapcount
, errcount
;
19463 _arm_elf_section_data
*arm_data
;
19464 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19465 elf32_arm_section_map
*map
;
19466 elf32_vfp11_erratum_list
*errnode
;
19467 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19470 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19474 if (globals
== NULL
)
19477 /* If this section has not been allocated an _arm_elf_section_data
19478 structure then we cannot record anything. */
19479 arm_data
= get_arm_elf_section_data (sec
);
19480 if (arm_data
== NULL
)
19483 mapcount
= arm_data
->mapcount
;
19484 map
= arm_data
->map
;
19485 errcount
= arm_data
->erratumcount
;
19489 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19491 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19492 errnode
= errnode
->next
)
19494 bfd_vma target
= errnode
->vma
- offset
;
19496 switch (errnode
->type
)
19498 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19500 bfd_vma branch_to_veneer
;
19501 /* Original condition code of instruction, plus bit mask for
19502 ARM B instruction. */
19503 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19506 /* The instruction is before the label. */
19509 /* Above offset included in -4 below. */
19510 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19511 - errnode
->vma
- 4;
19513 if ((signed) branch_to_veneer
< -(1 << 25)
19514 || (signed) branch_to_veneer
>= (1 << 25))
19515 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19516 "range"), output_bfd
);
19518 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19519 contents
[endianflip
^ target
] = insn
& 0xff;
19520 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19521 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19522 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19526 case VFP11_ERRATUM_ARM_VENEER
:
19528 bfd_vma branch_from_veneer
;
19531 /* Take size of veneer into account. */
19532 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19533 - errnode
->vma
- 12;
19535 if ((signed) branch_from_veneer
< -(1 << 25)
19536 || (signed) branch_from_veneer
>= (1 << 25))
19537 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19538 "range"), output_bfd
);
19540 /* Original instruction. */
19541 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19542 contents
[endianflip
^ target
] = insn
& 0xff;
19543 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19544 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19545 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19547 /* Branch back to insn after original insn. */
19548 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19549 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19550 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19551 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19552 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19562 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19564 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19565 stm32l4xx_errnode
!= 0;
19566 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19568 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19570 switch (stm32l4xx_errnode
->type
)
19572 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19575 bfd_vma branch_to_veneer
=
19576 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19578 if ((signed) branch_to_veneer
< -(1 << 24)
19579 || (signed) branch_to_veneer
>= (1 << 24))
19581 bfd_vma out_of_range
=
19582 ((signed) branch_to_veneer
< -(1 << 24)) ?
19583 - branch_to_veneer
- (1 << 24) :
19584 ((signed) branch_to_veneer
>= (1 << 24)) ?
19585 branch_to_veneer
- (1 << 24) : 0;
19588 (_("%pB(%#" PRIx64
"): error: "
19589 "cannot create STM32L4XX veneer; "
19590 "jump out of range by %" PRId64
" bytes; "
19591 "cannot encode branch instruction"),
19593 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19594 (int64_t) out_of_range
);
19598 insn
= create_instruction_branch_absolute
19599 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19601 /* The instruction is before the label. */
19604 put_thumb2_insn (globals
, output_bfd
,
19605 (bfd_vma
) insn
, contents
+ target
);
19609 case STM32L4XX_ERRATUM_VENEER
:
19612 bfd_byte
* veneer_r
;
19615 veneer
= contents
+ target
;
19617 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19618 - stm32l4xx_errnode
->vma
- 4;
19620 if ((signed) (veneer_r
- veneer
-
19621 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19622 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19623 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19624 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19625 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19627 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19628 "veneer"), output_bfd
);
19632 /* Original instruction. */
19633 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19635 stm32l4xx_create_replacing_stub
19636 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19646 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19648 arm_unwind_table_edit
*edit_node
19649 = arm_data
->u
.exidx
.unwind_edit_list
;
19650 /* Now, sec->size is the size of the section we will write. The original
19651 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19652 markers) was sec->rawsize. (This isn't the case if we perform no
19653 edits, then rawsize will be zero and we should use size). */
19654 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19655 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19656 unsigned int in_index
, out_index
;
19657 bfd_vma add_to_offsets
= 0;
19659 if (edited_contents
== NULL
)
19661 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19665 unsigned int edit_index
= edit_node
->index
;
19667 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19669 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19670 contents
+ in_index
* 8, add_to_offsets
);
19674 else if (in_index
== edit_index
19675 || (in_index
* 8 >= input_size
19676 && edit_index
== UINT_MAX
))
19678 switch (edit_node
->type
)
19680 case DELETE_EXIDX_ENTRY
:
19682 add_to_offsets
+= 8;
19685 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19687 asection
*text_sec
= edit_node
->linked_section
;
19688 bfd_vma text_offset
= text_sec
->output_section
->vma
19689 + text_sec
->output_offset
19691 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19692 unsigned long prel31_offset
;
19694 /* Note: this is meant to be equivalent to an
19695 R_ARM_PREL31 relocation. These synthetic
19696 EXIDX_CANTUNWIND markers are not relocated by the
19697 usual BFD method. */
19698 prel31_offset
= (text_offset
- exidx_offset
)
19700 if (bfd_link_relocatable (link_info
))
19702 /* Here relocation for new EXIDX_CANTUNWIND is
19703 created, so there is no need to
19704 adjust offset by hand. */
19705 prel31_offset
= text_sec
->output_offset
19709 /* First address we can't unwind. */
19710 bfd_put_32 (output_bfd
, prel31_offset
,
19711 &edited_contents
[out_index
* 8]);
19713 /* Code for EXIDX_CANTUNWIND. */
19714 bfd_put_32 (output_bfd
, 0x1,
19715 &edited_contents
[out_index
* 8 + 4]);
19718 add_to_offsets
-= 8;
19723 edit_node
= edit_node
->next
;
19728 /* No more edits, copy remaining entries verbatim. */
19729 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19730 contents
+ in_index
* 8, add_to_offsets
);
19736 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19737 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19739 (file_ptr
) sec
->output_offset
, sec
->size
);
19744 /* Fix code to point to Cortex-A8 erratum stubs. */
19745 if (globals
->fix_cortex_a8
)
19747 struct a8_branch_to_stub_data data
;
19749 data
.writing_section
= sec
;
19750 data
.contents
= contents
;
19752 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19759 if (globals
->byteswap_code
)
19761 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19764 for (i
= 0; i
< mapcount
; i
++)
19766 if (i
== mapcount
- 1)
19769 end
= map
[i
+ 1].vma
;
19771 switch (map
[i
].type
)
19774 /* Byte swap code words. */
19775 while (ptr
+ 3 < end
)
19777 tmp
= contents
[ptr
];
19778 contents
[ptr
] = contents
[ptr
+ 3];
19779 contents
[ptr
+ 3] = tmp
;
19780 tmp
= contents
[ptr
+ 1];
19781 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19782 contents
[ptr
+ 2] = tmp
;
19788 /* Byte swap code halfwords. */
19789 while (ptr
+ 1 < end
)
19791 tmp
= contents
[ptr
];
19792 contents
[ptr
] = contents
[ptr
+ 1];
19793 contents
[ptr
+ 1] = tmp
;
19799 /* Leave data alone. */
19807 arm_data
->mapcount
= -1;
19808 arm_data
->mapsize
= 0;
19809 arm_data
->map
= NULL
;
19814 /* Mangle thumb function symbols as we read them in. */
19817 elf32_arm_swap_symbol_in (bfd
* abfd
,
19820 Elf_Internal_Sym
*dst
)
19822 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19824 dst
->st_target_internal
= ST_BRANCH_TO_ARM
;
19826 /* New EABI objects mark thumb function symbols by setting the low bit of
19828 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19829 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19831 if (dst
->st_value
& 1)
19833 dst
->st_value
&= ~(bfd_vma
) 1;
19834 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19835 ST_BRANCH_TO_THUMB
);
19838 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19840 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19842 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19843 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19845 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19846 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19848 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19854 /* Mangle thumb function symbols as we write them out. */
19857 elf32_arm_swap_symbol_out (bfd
*abfd
,
19858 const Elf_Internal_Sym
*src
,
19862 Elf_Internal_Sym newsym
;
19864 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19865 of the address set, as per the new EABI. We do this unconditionally
19866 because objcopy does not set the elf header flags until after
19867 it writes out the symbol table. */
19868 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19871 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19872 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19873 if (newsym
.st_shndx
!= SHN_UNDEF
)
19875 /* Do this only for defined symbols. At link type, the static
19876 linker will simulate the work of dynamic linker of resolving
19877 symbols and will carry over the thumbness of found symbols to
19878 the output symbol table. It's not clear how it happens, but
19879 the thumbness of undefined symbols can well be different at
19880 runtime, and writing '1' for them will be confusing for users
19881 and possibly for dynamic linker itself.
19883 newsym
.st_value
|= 1;
19888 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19891 /* Add the PT_ARM_EXIDX program header. */
19894 elf32_arm_modify_segment_map (bfd
*abfd
,
19895 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19897 struct elf_segment_map
*m
;
19900 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19901 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19903 /* If there is already a PT_ARM_EXIDX header, then we do not
19904 want to add another one. This situation arises when running
19905 "strip"; the input binary already has the header. */
19906 m
= elf_seg_map (abfd
);
19907 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19911 m
= (struct elf_segment_map
*)
19912 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19915 m
->p_type
= PT_ARM_EXIDX
;
19917 m
->sections
[0] = sec
;
19919 m
->next
= elf_seg_map (abfd
);
19920 elf_seg_map (abfd
) = m
;
19927 /* We may add a PT_ARM_EXIDX program header. */
19930 elf32_arm_additional_program_headers (bfd
*abfd
,
19931 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19935 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19936 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19942 /* Hook called by the linker routine which adds symbols from an object
19946 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19947 Elf_Internal_Sym
*sym
, const char **namep
,
19948 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19950 if (elf32_arm_hash_table (info
) == NULL
)
19953 if (elf32_arm_hash_table (info
)->root
.target_os
== is_vxworks
19954 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19955 flagsp
, secp
, valp
))
19961 /* We use this to override swap_symbol_in and swap_symbol_out. */
19962 const struct elf_size_info elf32_arm_size_info
=
19964 sizeof (Elf32_External_Ehdr
),
19965 sizeof (Elf32_External_Phdr
),
19966 sizeof (Elf32_External_Shdr
),
19967 sizeof (Elf32_External_Rel
),
19968 sizeof (Elf32_External_Rela
),
19969 sizeof (Elf32_External_Sym
),
19970 sizeof (Elf32_External_Dyn
),
19971 sizeof (Elf_External_Note
),
19975 ELFCLASS32
, EV_CURRENT
,
19976 bfd_elf32_write_out_phdrs
,
19977 bfd_elf32_write_shdrs_and_ehdr
,
19978 bfd_elf32_checksum_contents
,
19979 bfd_elf32_write_relocs
,
19980 elf32_arm_swap_symbol_in
,
19981 elf32_arm_swap_symbol_out
,
19982 bfd_elf32_slurp_reloc_table
,
19983 bfd_elf32_slurp_symbol_table
,
19984 bfd_elf32_swap_dyn_in
,
19985 bfd_elf32_swap_dyn_out
,
19986 bfd_elf32_swap_reloc_in
,
19987 bfd_elf32_swap_reloc_out
,
19988 bfd_elf32_swap_reloca_in
,
19989 bfd_elf32_swap_reloca_out
19993 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
19995 /* V7 BE8 code is always little endian. */
19996 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19997 return bfd_getl32 (addr
);
19999 return bfd_get_32 (abfd
, addr
);
20003 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
20005 /* V7 BE8 code is always little endian. */
20006 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
20007 return bfd_getl16 (addr
);
20009 return bfd_get_16 (abfd
, addr
);
20012 /* Return size of plt0 entry starting at ADDR
20013 or (bfd_vma) -1 if size can not be determined. */
20016 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
,
20017 bfd_size_type data_size
)
20019 bfd_vma first_word
;
20023 return (bfd_vma
) -1;
20025 first_word
= read_code32 (abfd
, addr
);
20027 if (first_word
== elf32_arm_plt0_entry
[0])
20028 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
20029 else if (first_word
== elf32_thumb2_plt0_entry
[0])
20030 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
20032 /* We don't yet handle this PLT format. */
20033 return (bfd_vma
) -1;
20038 /* Return size of plt entry starting at offset OFFSET
20039 of plt section located at address START
20040 or (bfd_vma) -1 if size can not be determined. */
20043 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
,
20044 bfd_size_type data_size
)
20046 bfd_vma first_insn
;
20047 bfd_vma plt_size
= 0;
20049 /* PLT entry size if fixed on Thumb-only platforms. */
20050 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
20051 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
20053 /* Respect Thumb stub if necessary. */
20054 if (offset
+ 2 > data_size
)
20055 return (bfd_vma
) -1;
20056 if (read_code16 (abfd
, start
+ offset
) == elf32_arm_plt_thumb_stub
[0])
20058 plt_size
+= 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub
);
20061 /* Strip immediate from first add. */
20062 if (offset
+ plt_size
+ 4 > data_size
)
20063 return (bfd_vma
) -1;
20064 first_insn
= read_code32 (abfd
, start
+ offset
+ plt_size
) & 0xffffff00;
20066 #ifdef FOUR_WORD_PLT
20067 if (first_insn
== elf32_arm_plt_entry
[0])
20068 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
20070 if (first_insn
== elf32_arm_plt_entry_long
[0])
20071 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
20072 else if (first_insn
== elf32_arm_plt_entry_short
[0])
20073 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
20076 /* We don't yet handle this PLT format. */
20077 return (bfd_vma
) -1;
20082 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20085 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
20086 long symcount ATTRIBUTE_UNUSED
,
20087 asymbol
**syms ATTRIBUTE_UNUSED
,
20097 Elf_Internal_Shdr
*hdr
;
20105 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
20108 if (dynsymcount
<= 0)
20111 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
20112 if (relplt
== NULL
)
20115 hdr
= &elf_section_data (relplt
)->this_hdr
;
20116 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
20117 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
20120 plt
= bfd_get_section_by_name (abfd
, ".plt");
20124 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, true))
20128 if (!bfd_get_full_section_contents (abfd
, plt
, &data
))
20131 count
= NUM_SHDR_ENTRIES (hdr
);
20132 size
= count
* sizeof (asymbol
);
20133 p
= relplt
->relocation
;
20134 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20136 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
20137 if (p
->addend
!= 0)
20138 size
+= sizeof ("+0x") - 1 + 8;
20141 offset
= elf32_arm_plt0_size (abfd
, data
, plt
->size
);
20142 if (offset
== (bfd_vma
) -1
20143 || (s
= *ret
= (asymbol
*) bfd_malloc (size
)) == NULL
)
20149 names
= (char *) (s
+ count
);
20150 p
= relplt
->relocation
;
20152 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20156 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
, plt
->size
);
20157 if (plt_size
== (bfd_vma
) -1)
20160 *s
= **p
->sym_ptr_ptr
;
20161 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20162 we are defining a symbol, ensure one of them is set. */
20163 if ((s
->flags
& BSF_LOCAL
) == 0)
20164 s
->flags
|= BSF_GLOBAL
;
20165 s
->flags
|= BSF_SYNTHETIC
;
20170 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20171 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20173 if (p
->addend
!= 0)
20177 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20178 names
+= sizeof ("+0x") - 1;
20179 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20180 for (a
= buf
; *a
== '0'; ++a
)
20183 memcpy (names
, a
, len
);
20186 memcpy (names
, "@plt", sizeof ("@plt"));
20187 names
+= sizeof ("@plt");
20189 offset
+= plt_size
;
20197 elf32_arm_section_flags (const Elf_Internal_Shdr
*hdr
)
20199 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20200 hdr
->bfd_section
->flags
|= SEC_ELF_PURECODE
;
20205 elf32_arm_lookup_section_flags (char *flag_name
)
20207 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20208 return SHF_ARM_PURECODE
;
20210 return SEC_NO_FLAGS
;
20213 static unsigned int
20214 elf32_arm_count_additional_relocs (asection
*sec
)
20216 struct _arm_elf_section_data
*arm_data
;
20217 arm_data
= get_arm_elf_section_data (sec
);
20219 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20222 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20223 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20224 FALSE otherwise. ISECTION is the best guess matching section from the
20225 input bfd IBFD, but it might be NULL. */
20228 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20229 bfd
*obfd ATTRIBUTE_UNUSED
,
20230 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20231 Elf_Internal_Shdr
*osection
)
20233 switch (osection
->sh_type
)
20235 case SHT_ARM_EXIDX
:
20237 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20238 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20241 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20242 osection
->sh_info
= 0;
20244 /* The sh_link field must be set to the text section associated with
20245 this index section. Unfortunately the ARM EHABI does not specify
20246 exactly how to determine this association. Our caller does try
20247 to match up OSECTION with its corresponding input section however
20248 so that is a good first guess. */
20249 if (isection
!= NULL
20250 && osection
->bfd_section
!= NULL
20251 && isection
->bfd_section
!= NULL
20252 && isection
->bfd_section
->output_section
!= NULL
20253 && isection
->bfd_section
->output_section
== osection
->bfd_section
20254 && iheaders
!= NULL
20255 && isection
->sh_link
> 0
20256 && isection
->sh_link
< elf_numsections (ibfd
)
20257 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20258 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20261 for (i
= elf_numsections (obfd
); i
-- > 0;)
20262 if (oheaders
[i
]->bfd_section
20263 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20269 /* Failing that we have to find a matching section ourselves. If
20270 we had the output section name available we could compare that
20271 with input section names. Unfortunately we don't. So instead
20272 we use a simple heuristic and look for the nearest executable
20273 section before this one. */
20274 for (i
= elf_numsections (obfd
); i
-- > 0;)
20275 if (oheaders
[i
] == osection
)
20281 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20282 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20283 == (SHF_ALLOC
| SHF_EXECINSTR
))
20289 osection
->sh_link
= i
;
20290 /* If the text section was part of a group
20291 then the index section should be too. */
20292 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20293 osection
->sh_flags
|= SHF_GROUP
;
20299 case SHT_ARM_PREEMPTMAP
:
20300 osection
->sh_flags
= SHF_ALLOC
;
20303 case SHT_ARM_ATTRIBUTES
:
20304 case SHT_ARM_DEBUGOVERLAY
:
20305 case SHT_ARM_OVERLAYSECTION
:
20313 /* Returns TRUE if NAME is an ARM mapping symbol.
20314 Traditionally the symbols $a, $d and $t have been used.
20315 The ARM ELF standard also defines $x (for A64 code). It also allows a
20316 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20317 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20318 not support them here. $t.x indicates the start of ThumbEE instructions. */
20321 is_arm_mapping_symbol (const char * name
)
20323 return name
!= NULL
/* Paranoia. */
20324 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20325 the mapping symbols could have acquired a prefix.
20326 We do not support this here, since such symbols no
20327 longer conform to the ARM ELF ABI. */
20328 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20329 && (name
[2] == 0 || name
[2] == '.');
20330 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20331 any characters that follow the period are legal characters for the body
20332 of a symbol's name. For now we just assume that this is the case. */
20335 /* Make sure that mapping symbols in object files are not removed via the
20336 "strip --strip-unneeded" tool. These symbols are needed in order to
20337 correctly generate interworking veneers, and for byte swapping code
20338 regions. Once an object file has been linked, it is safe to remove the
20339 symbols as they will no longer be needed. */
20342 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20344 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20345 && sym
->section
!= bfd_abs_section_ptr
20346 && is_arm_mapping_symbol (sym
->name
))
20347 sym
->flags
|= BSF_KEEP
;
20350 #undef elf_backend_copy_special_section_fields
20351 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20353 #define ELF_ARCH bfd_arch_arm
20354 #define ELF_TARGET_ID ARM_ELF_DATA
20355 #define ELF_MACHINE_CODE EM_ARM
20356 #define ELF_MAXPAGESIZE 0x1000
20357 #define ELF_COMMONPAGESIZE 0x1000
20359 #define bfd_elf32_mkobject elf32_arm_mkobject
20361 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20362 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20363 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20364 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20365 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20366 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20367 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20368 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20369 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20370 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20371 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20372 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20374 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20375 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20376 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20377 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20378 #define elf_backend_check_relocs elf32_arm_check_relocs
20379 #define elf_backend_update_relocs elf32_arm_update_relocs
20380 #define elf_backend_relocate_section elf32_arm_relocate_section
20381 #define elf_backend_write_section elf32_arm_write_section
20382 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20383 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20384 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20385 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20386 #define elf_backend_late_size_sections elf32_arm_late_size_sections
20387 #define elf_backend_early_size_sections elf32_arm_early_size_sections
20388 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20389 #define elf_backend_init_file_header elf32_arm_init_file_header
20390 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20391 #define elf_backend_object_p elf32_arm_object_p
20392 #define elf_backend_fake_sections elf32_arm_fake_sections
20393 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20394 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20395 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20396 #define elf_backend_size_info elf32_arm_size_info
20397 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20398 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20399 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20400 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20401 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20402 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20403 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20404 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20406 #define elf_backend_can_refcount 1
20407 #define elf_backend_can_gc_sections 1
20408 #define elf_backend_plt_readonly 1
20409 #define elf_backend_want_got_plt 1
20410 #define elf_backend_want_plt_sym 0
20411 #define elf_backend_want_dynrelro 1
20412 #define elf_backend_may_use_rel_p 1
20413 #define elf_backend_may_use_rela_p 0
20414 #define elf_backend_default_use_rela_p 0
20415 #define elf_backend_dtrel_excludes_plt 1
20417 #define elf_backend_got_header_size 12
20418 #define elf_backend_extern_protected_data 0
20420 #undef elf_backend_obj_attrs_vendor
20421 #define elf_backend_obj_attrs_vendor "aeabi"
20422 #undef elf_backend_obj_attrs_section
20423 #define elf_backend_obj_attrs_section ".ARM.attributes"
20424 #undef elf_backend_obj_attrs_arg_type
20425 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20426 #undef elf_backend_obj_attrs_section_type
20427 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20428 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20429 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20431 #undef elf_backend_section_flags
20432 #define elf_backend_section_flags elf32_arm_section_flags
20433 #undef elf_backend_lookup_section_flags_hook
20434 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20436 #define elf_backend_linux_prpsinfo32_ugid16 true
20438 #include "elf32-target.h"
20440 /* Native Client targets. */
20442 #undef TARGET_LITTLE_SYM
20443 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20444 #undef TARGET_LITTLE_NAME
20445 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20446 #undef TARGET_BIG_SYM
20447 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20448 #undef TARGET_BIG_NAME
20449 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20451 /* Like elf32_arm_link_hash_table_create -- but overrides
20452 appropriately for NaCl. */
20454 static struct bfd_link_hash_table
*
20455 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20457 struct bfd_link_hash_table
*ret
;
20459 ret
= elf32_arm_link_hash_table_create (abfd
);
20462 struct elf32_arm_link_hash_table
*htab
20463 = (struct elf32_arm_link_hash_table
*) ret
;
20465 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20466 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20471 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20472 really need to use elf32_arm_modify_segment_map. But we do it
20473 anyway just to reduce gratuitous differences with the stock ARM backend. */
20476 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20478 return (elf32_arm_modify_segment_map (abfd
, info
)
20479 && nacl_modify_segment_map (abfd
, info
));
20483 elf32_arm_nacl_final_write_processing (bfd
*abfd
)
20485 arm_final_write_processing (abfd
);
20486 return nacl_final_write_processing (abfd
);
20490 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20491 const arelent
*rel ATTRIBUTE_UNUSED
)
20494 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20495 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20499 #define elf32_bed elf32_arm_nacl_bed
20500 #undef bfd_elf32_bfd_link_hash_table_create
20501 #define bfd_elf32_bfd_link_hash_table_create \
20502 elf32_arm_nacl_link_hash_table_create
20503 #undef elf_backend_plt_alignment
20504 #define elf_backend_plt_alignment 4
20505 #undef elf_backend_modify_segment_map
20506 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20507 #undef elf_backend_modify_headers
20508 #define elf_backend_modify_headers nacl_modify_headers
20509 #undef elf_backend_final_write_processing
20510 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20511 #undef bfd_elf32_get_synthetic_symtab
20512 #undef elf_backend_plt_sym_val
20513 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20514 #undef elf_backend_copy_special_section_fields
20516 #undef ELF_MINPAGESIZE
20517 #undef ELF_COMMONPAGESIZE
20519 #undef ELF_TARGET_OS
20520 #define ELF_TARGET_OS is_nacl
20522 #include "elf32-target.h"
20524 /* Reset to defaults. */
20525 #undef elf_backend_plt_alignment
20526 #undef elf_backend_modify_segment_map
20527 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20528 #undef elf_backend_modify_headers
20529 #undef elf_backend_final_write_processing
20530 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20531 #undef ELF_MINPAGESIZE
20532 #undef ELF_COMMONPAGESIZE
20533 #define ELF_COMMONPAGESIZE 0x1000
20536 /* FDPIC Targets. */
20538 #undef TARGET_LITTLE_SYM
20539 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20540 #undef TARGET_LITTLE_NAME
20541 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20542 #undef TARGET_BIG_SYM
20543 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20544 #undef TARGET_BIG_NAME
20545 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20546 #undef elf_match_priority
20547 #define elf_match_priority 128
20549 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20551 /* Like elf32_arm_link_hash_table_create -- but overrides
20552 appropriately for FDPIC. */
20554 static struct bfd_link_hash_table
*
20555 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20557 struct bfd_link_hash_table
*ret
;
20559 ret
= elf32_arm_link_hash_table_create (abfd
);
20562 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20569 /* We need dynamic symbols for every section, since segments can
20570 relocate independently. */
20572 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20573 struct bfd_link_info
*info
20575 asection
*p ATTRIBUTE_UNUSED
)
20577 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20581 /* If sh_type is yet undecided, assume it could be
20582 SHT_PROGBITS/SHT_NOBITS. */
20586 /* There shouldn't be section relative relocations
20587 against any other section. */
20594 #define elf32_bed elf32_arm_fdpic_bed
20596 #undef bfd_elf32_bfd_link_hash_table_create
20597 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20599 #undef elf_backend_omit_section_dynsym
20600 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20602 #undef ELF_TARGET_OS
20604 #include "elf32-target.h"
20606 #undef elf_match_priority
20608 #undef elf_backend_omit_section_dynsym
20610 /* VxWorks Targets. */
20612 #undef TARGET_LITTLE_SYM
20613 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20614 #undef TARGET_LITTLE_NAME
20615 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20616 #undef TARGET_BIG_SYM
20617 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20618 #undef TARGET_BIG_NAME
20619 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20621 /* Like elf32_arm_link_hash_table_create -- but overrides
20622 appropriately for VxWorks. */
20624 static struct bfd_link_hash_table
*
20625 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20627 struct bfd_link_hash_table
*ret
;
20629 ret
= elf32_arm_link_hash_table_create (abfd
);
20632 struct elf32_arm_link_hash_table
*htab
20633 = (struct elf32_arm_link_hash_table
*) ret
;
20640 elf32_arm_vxworks_final_write_processing (bfd
*abfd
)
20642 arm_final_write_processing (abfd
);
20643 return elf_vxworks_final_write_processing (abfd
);
20647 #define elf32_bed elf32_arm_vxworks_bed
20649 #undef bfd_elf32_bfd_link_hash_table_create
20650 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20651 #undef elf_backend_final_write_processing
20652 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20653 #undef elf_backend_emit_relocs
20654 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20656 #undef elf_backend_may_use_rel_p
20657 #define elf_backend_may_use_rel_p 0
20658 #undef elf_backend_may_use_rela_p
20659 #define elf_backend_may_use_rela_p 1
20660 #undef elf_backend_default_use_rela_p
20661 #define elf_backend_default_use_rela_p 1
20662 #undef elf_backend_want_plt_sym
20663 #define elf_backend_want_plt_sym 1
20664 #undef ELF_MAXPAGESIZE
20665 #define ELF_MAXPAGESIZE 0x1000
20666 #undef ELF_TARGET_OS
20667 #define ELF_TARGET_OS is_vxworks
20669 #include "elf32-target.h"
20672 /* Merge backend specific data from an object file to the output
20673 object file when linking. */
20676 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20678 bfd
*obfd
= info
->output_bfd
;
20679 flagword out_flags
;
20681 bool flags_compatible
= true;
20684 /* Check if we have the same endianness. */
20685 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20688 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20691 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20694 /* The input BFD must have had its flags initialised. */
20695 /* The following seems bogus to me -- The flags are initialized in
20696 the assembler but I don't think an elf_flags_init field is
20697 written into the object. */
20698 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20700 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20701 out_flags
= elf_elfheader (obfd
)->e_flags
;
20703 /* In theory there is no reason why we couldn't handle this. However
20704 in practice it isn't even close to working and there is no real
20705 reason to want it. */
20706 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20707 && !(ibfd
->flags
& DYNAMIC
)
20708 && (in_flags
& EF_ARM_BE8
))
20710 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20715 if (!elf_flags_init (obfd
))
20717 /* If the input has no flags set, then do not set the output flags.
20718 This will allow future bfds to determine the desired output flags.
20719 If no input bfds have any flags set, then neither will the output bfd.
20721 Note - we used to restrict this test to when the input architecture
20722 variant was the default variant, but this does not allow for
20723 linker scripts which override the default. See PR 28910 for an
20728 elf_flags_init (obfd
) = true;
20729 elf_elfheader (obfd
)->e_flags
= in_flags
;
20731 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20732 && bfd_get_arch_info (obfd
)->the_default
)
20733 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20738 /* Determine what should happen if the input ARM architecture
20739 does not match the output ARM architecture. */
20740 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20743 /* Identical flags must be compatible. */
20744 if (in_flags
== out_flags
)
20747 /* Check to see if the input BFD actually contains any sections. If
20748 not, its flags may not have been initialised either, but it
20749 cannot actually cause any incompatiblity. Do not short-circuit
20750 dynamic objects; their section list may be emptied by
20751 elf_link_add_object_symbols.
20753 Also check to see if there are no code sections in the input.
20754 In this case there is no need to check for code specific flags.
20755 XXX - do we need to worry about floating-point format compatability
20756 in data sections ? */
20757 if (!(ibfd
->flags
& DYNAMIC
))
20759 bool null_input_bfd
= true;
20760 bool only_data_sections
= true;
20762 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20764 /* Ignore synthetic glue sections. */
20765 if (strcmp (sec
->name
, ".glue_7")
20766 && strcmp (sec
->name
, ".glue_7t"))
20768 if ((bfd_section_flags (sec
)
20769 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20770 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20771 only_data_sections
= false;
20773 null_input_bfd
= false;
20778 if (null_input_bfd
|| only_data_sections
)
20782 /* Complain about various flag mismatches. */
20783 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20784 EF_ARM_EABI_VERSION (out_flags
)))
20787 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20788 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20789 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20793 /* Not sure what needs to be checked for EABI versions >= 1. */
20794 /* VxWorks libraries do not use these flags. */
20795 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20796 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20797 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20799 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20802 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20803 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20804 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20805 flags_compatible
= false;
20808 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20810 if (in_flags
& EF_ARM_APCS_FLOAT
)
20812 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20816 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20819 flags_compatible
= false;
20822 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20824 if (in_flags
& EF_ARM_VFP_FLOAT
)
20826 (_("error: %pB uses %s instructions, whereas %pB does not"),
20827 ibfd
, "VFP", obfd
);
20830 (_("error: %pB uses %s instructions, whereas %pB does not"),
20831 ibfd
, "FPA", obfd
);
20833 flags_compatible
= false;
20836 #ifdef EF_ARM_SOFT_FLOAT
20837 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20839 /* We can allow interworking between code that is VFP format
20840 layout, and uses either soft float or integer regs for
20841 passing floating point arguments and results. We already
20842 know that the APCS_FLOAT flags match; similarly for VFP
20844 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20845 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20847 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20849 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20853 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20856 flags_compatible
= false;
20861 /* Interworking mismatch is only a warning. */
20862 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20864 if (in_flags
& EF_ARM_INTERWORK
)
20867 (_("warning: %pB supports interworking, whereas %pB does not"),
20873 (_("warning: %pB does not support interworking, whereas %pB does"),
20879 return flags_compatible
;