1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "libiberty.h"
29 #include "elf-vxworks.h"
31 #include "elf32-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd
*output_bfd
,
70 struct bfd_link_info
*link_info
,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1
[] =
81 HOWTO (R_ARM_NONE
, /* type */
85 false, /* pc_relative */
87 complain_overflow_dont
,/* complain_on_overflow */
88 bfd_elf_generic_reloc
, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24
, /* type */
99 true, /* pc_relative */
101 complain_overflow_signed
,/* complain_on_overflow */
102 bfd_elf_generic_reloc
, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32
, /* type */
114 false, /* pc_relative */
116 complain_overflow_bitfield
,/* complain_on_overflow */
117 bfd_elf_generic_reloc
, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32
, /* type */
129 true, /* pc_relative */
131 complain_overflow_bitfield
,/* complain_on_overflow */
132 bfd_elf_generic_reloc
, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0
, /* type */
144 true, /* pc_relative */
146 complain_overflow_dont
,/* complain_on_overflow */
147 bfd_elf_generic_reloc
, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16
, /* type */
159 false, /* pc_relative */
161 complain_overflow_bitfield
,/* complain_on_overflow */
162 bfd_elf_generic_reloc
, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12
, /* type */
174 false, /* pc_relative */
176 complain_overflow_bitfield
,/* complain_on_overflow */
177 bfd_elf_generic_reloc
, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5
, /* type */
188 false, /* pc_relative */
190 complain_overflow_bitfield
,/* complain_on_overflow */
191 bfd_elf_generic_reloc
, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8
, /* type */
203 false, /* pc_relative */
205 complain_overflow_bitfield
,/* complain_on_overflow */
206 bfd_elf_generic_reloc
, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32
, /* type */
217 false, /* pc_relative */
219 complain_overflow_dont
,/* complain_on_overflow */
220 bfd_elf_generic_reloc
, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL
, /* type */
231 true, /* pc_relative */
233 complain_overflow_signed
,/* complain_on_overflow */
234 bfd_elf_generic_reloc
, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8
, /* type */
245 true, /* pc_relative */
247 complain_overflow_signed
,/* complain_on_overflow */
248 bfd_elf_generic_reloc
, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ
, /* type */
259 false, /* pc_relative */
261 complain_overflow_signed
,/* complain_on_overflow */
262 bfd_elf_generic_reloc
, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC
, /* type */
273 false, /* pc_relative */
275 complain_overflow_bitfield
,/* complain_on_overflow */
276 bfd_elf_generic_reloc
, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8
, /* type */
287 false, /* pc_relative */
289 complain_overflow_signed
,/* complain_on_overflow */
290 bfd_elf_generic_reloc
, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25
, /* type */
302 true, /* pc_relative */
304 complain_overflow_signed
,/* complain_on_overflow */
305 bfd_elf_generic_reloc
, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22
, /* type */
317 true, /* pc_relative */
319 complain_overflow_signed
,/* complain_on_overflow */
320 bfd_elf_generic_reloc
, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
333 false, /* pc_relative */
335 complain_overflow_bitfield
,/* complain_on_overflow */
336 bfd_elf_generic_reloc
, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
347 false, /* pc_relative */
349 complain_overflow_bitfield
,/* complain_on_overflow */
350 bfd_elf_generic_reloc
, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
361 false, /* pc_relative */
363 complain_overflow_bitfield
,/* complain_on_overflow */
364 bfd_elf_generic_reloc
, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY
, /* type */
377 false, /* pc_relative */
379 complain_overflow_bitfield
,/* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT
, /* type */
391 false, /* pc_relative */
393 complain_overflow_bitfield
,/* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT
, /* type */
405 false, /* pc_relative */
407 complain_overflow_bitfield
,/* complain_on_overflow */
408 bfd_elf_generic_reloc
, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE
, /* type */
419 false, /* pc_relative */
421 complain_overflow_bitfield
,/* complain_on_overflow */
422 bfd_elf_generic_reloc
, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32
, /* type */
433 false, /* pc_relative */
435 complain_overflow_bitfield
,/* complain_on_overflow */
436 bfd_elf_generic_reloc
, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC
, /* type */
447 true, /* pc_relative */
449 complain_overflow_bitfield
,/* complain_on_overflow */
450 bfd_elf_generic_reloc
, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32
, /* type */
461 false, /* pc_relative */
463 complain_overflow_bitfield
,/* complain_on_overflow */
464 bfd_elf_generic_reloc
, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32
, /* type */
475 true, /* pc_relative */
477 complain_overflow_bitfield
,/* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL
, /* type */
489 true, /* pc_relative */
491 complain_overflow_signed
,/* complain_on_overflow */
492 bfd_elf_generic_reloc
, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24
, /* type */
503 true, /* pc_relative */
505 complain_overflow_signed
,/* complain_on_overflow */
506 bfd_elf_generic_reloc
, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24
, /* type */
517 true, /* pc_relative */
519 complain_overflow_signed
,/* complain_on_overflow */
520 bfd_elf_generic_reloc
, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS
, /* type */
531 false, /* pc_relative */
533 complain_overflow_dont
,/* complain_on_overflow */
534 bfd_elf_generic_reloc
, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
545 true, /* pc_relative */
547 complain_overflow_dont
,/* complain_on_overflow */
548 bfd_elf_generic_reloc
, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
559 true, /* pc_relative */
561 complain_overflow_dont
,/* complain_on_overflow */
562 bfd_elf_generic_reloc
, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
573 true, /* pc_relative */
575 complain_overflow_dont
,/* complain_on_overflow */
576 bfd_elf_generic_reloc
, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
587 false, /* pc_relative */
589 complain_overflow_dont
,/* complain_on_overflow */
590 bfd_elf_generic_reloc
, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
601 false, /* pc_relative */
603 complain_overflow_dont
,/* complain_on_overflow */
604 bfd_elf_generic_reloc
, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
615 false, /* pc_relative */
617 complain_overflow_dont
,/* complain_on_overflow */
618 bfd_elf_generic_reloc
, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1
, /* type */
629 false, /* pc_relative */
631 complain_overflow_dont
,/* complain_on_overflow */
632 bfd_elf_generic_reloc
, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32
, /* type */
643 false, /* pc_relative */
645 complain_overflow_dont
,/* complain_on_overflow */
646 bfd_elf_generic_reloc
, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX
, /* type */
657 false, /* pc_relative */
659 complain_overflow_dont
,/* complain_on_overflow */
660 bfd_elf_generic_reloc
, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2
, /* type */
671 false, /* pc_relative */
673 complain_overflow_signed
,/* complain_on_overflow */
674 bfd_elf_generic_reloc
, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31
, /* type */
685 true, /* pc_relative */
687 complain_overflow_signed
,/* complain_on_overflow */
688 bfd_elf_generic_reloc
, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
699 false, /* pc_relative */
701 complain_overflow_dont
,/* complain_on_overflow */
702 bfd_elf_generic_reloc
, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS
, /* type */
713 false, /* pc_relative */
715 complain_overflow_bitfield
,/* complain_on_overflow */
716 bfd_elf_generic_reloc
, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
727 true, /* pc_relative */
729 complain_overflow_dont
,/* complain_on_overflow */
730 bfd_elf_generic_reloc
, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL
, /* type */
741 true, /* pc_relative */
743 complain_overflow_bitfield
,/* complain_on_overflow */
744 bfd_elf_generic_reloc
, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
755 false, /* pc_relative */
757 complain_overflow_dont
,/* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
769 false, /* pc_relative */
771 complain_overflow_bitfield
,/* complain_on_overflow */
772 bfd_elf_generic_reloc
, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
783 true, /* pc_relative */
785 complain_overflow_dont
,/* complain_on_overflow */
786 bfd_elf_generic_reloc
, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
797 true, /* pc_relative */
799 complain_overflow_bitfield
,/* complain_on_overflow */
800 bfd_elf_generic_reloc
, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19
, /* type */
811 true, /* pc_relative */
813 complain_overflow_signed
,/* complain_on_overflow */
814 bfd_elf_generic_reloc
, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6
, /* type */
825 true, /* pc_relative */
827 complain_overflow_unsigned
,/* complain_on_overflow */
828 bfd_elf_generic_reloc
, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
842 true, /* pc_relative */
844 complain_overflow_dont
,/* complain_on_overflow */
845 bfd_elf_generic_reloc
, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12
, /* type */
856 true, /* pc_relative */
858 complain_overflow_dont
,/* complain_on_overflow */
859 bfd_elf_generic_reloc
, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI
, /* type */
870 false, /* pc_relative */
872 complain_overflow_dont
,/* complain_on_overflow */
873 bfd_elf_generic_reloc
, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI
, /* type */
884 true, /* pc_relative */
886 complain_overflow_dont
,/* complain_on_overflow */
887 bfd_elf_generic_reloc
, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
900 true, /* pc_relative */
902 complain_overflow_dont
,/* complain_on_overflow */
903 bfd_elf_generic_reloc
, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0
, /* type */
914 true, /* pc_relative */
916 complain_overflow_dont
,/* complain_on_overflow */
917 bfd_elf_generic_reloc
, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
928 true, /* pc_relative */
930 complain_overflow_dont
,/* complain_on_overflow */
931 bfd_elf_generic_reloc
, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1
, /* type */
942 true, /* pc_relative */
944 complain_overflow_dont
,/* complain_on_overflow */
945 bfd_elf_generic_reloc
, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2
, /* type */
956 true, /* pc_relative */
958 complain_overflow_dont
,/* complain_on_overflow */
959 bfd_elf_generic_reloc
, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1
, /* type */
970 true, /* pc_relative */
972 complain_overflow_dont
,/* complain_on_overflow */
973 bfd_elf_generic_reloc
, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2
, /* type */
984 true, /* pc_relative */
986 complain_overflow_dont
,/* complain_on_overflow */
987 bfd_elf_generic_reloc
, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
998 true, /* pc_relative */
1000 complain_overflow_dont
,/* complain_on_overflow */
1001 bfd_elf_generic_reloc
, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1012 true, /* pc_relative */
1014 complain_overflow_dont
,/* complain_on_overflow */
1015 bfd_elf_generic_reloc
, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1026 true, /* pc_relative */
1028 complain_overflow_dont
,/* complain_on_overflow */
1029 bfd_elf_generic_reloc
, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1040 true, /* pc_relative */
1042 complain_overflow_dont
,/* complain_on_overflow */
1043 bfd_elf_generic_reloc
, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1054 true, /* pc_relative */
1056 complain_overflow_dont
,/* complain_on_overflow */
1057 bfd_elf_generic_reloc
, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1068 true, /* pc_relative */
1070 complain_overflow_dont
,/* complain_on_overflow */
1071 bfd_elf_generic_reloc
, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1082 true, /* pc_relative */
1084 complain_overflow_dont
,/* complain_on_overflow */
1085 bfd_elf_generic_reloc
, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1096 true, /* pc_relative */
1098 complain_overflow_dont
,/* complain_on_overflow */
1099 bfd_elf_generic_reloc
, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1110 true, /* pc_relative */
1112 complain_overflow_dont
,/* complain_on_overflow */
1113 bfd_elf_generic_reloc
, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1124 true, /* pc_relative */
1126 complain_overflow_dont
,/* complain_on_overflow */
1127 bfd_elf_generic_reloc
, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1138 true, /* pc_relative */
1140 complain_overflow_dont
,/* complain_on_overflow */
1141 bfd_elf_generic_reloc
, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1152 true, /* pc_relative */
1154 complain_overflow_dont
,/* complain_on_overflow */
1155 bfd_elf_generic_reloc
, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1166 true, /* pc_relative */
1168 complain_overflow_dont
,/* complain_on_overflow */
1169 bfd_elf_generic_reloc
, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1180 true, /* pc_relative */
1182 complain_overflow_dont
,/* complain_on_overflow */
1183 bfd_elf_generic_reloc
, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1194 true, /* pc_relative */
1196 complain_overflow_dont
,/* complain_on_overflow */
1197 bfd_elf_generic_reloc
, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1208 true, /* pc_relative */
1210 complain_overflow_dont
,/* complain_on_overflow */
1211 bfd_elf_generic_reloc
, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1222 true, /* pc_relative */
1224 complain_overflow_dont
,/* complain_on_overflow */
1225 bfd_elf_generic_reloc
, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1236 true, /* pc_relative */
1238 complain_overflow_dont
,/* complain_on_overflow */
1239 bfd_elf_generic_reloc
, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1250 true, /* pc_relative */
1252 complain_overflow_dont
,/* complain_on_overflow */
1253 bfd_elf_generic_reloc
, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1264 true, /* pc_relative */
1266 complain_overflow_dont
,/* complain_on_overflow */
1267 bfd_elf_generic_reloc
, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1280 false, /* pc_relative */
1282 complain_overflow_dont
,/* complain_on_overflow */
1283 bfd_elf_generic_reloc
, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL
, /* type */
1294 false, /* pc_relative */
1296 complain_overflow_bitfield
,/* complain_on_overflow */
1297 bfd_elf_generic_reloc
, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL
, /* type */
1308 false, /* pc_relative */
1310 complain_overflow_dont
,/* complain_on_overflow */
1311 bfd_elf_generic_reloc
, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1322 false, /* pc_relative */
1324 complain_overflow_dont
,/* complain_on_overflow */
1325 bfd_elf_generic_reloc
, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1336 false, /* pc_relative */
1338 complain_overflow_bitfield
,/* complain_on_overflow */
1339 bfd_elf_generic_reloc
, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1350 false, /* pc_relative */
1352 complain_overflow_dont
,/* complain_on_overflow */
1353 bfd_elf_generic_reloc
, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1364 false, /* pc_relative */
1366 complain_overflow_bitfield
,/* complain_on_overflow */
1367 NULL
, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL
, /* type */
1378 false, /* pc_relative */
1380 complain_overflow_dont
,/* complain_on_overflow */
1381 bfd_elf_generic_reloc
, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1392 false, /* pc_relative */
1394 complain_overflow_dont
,/* complain_on_overflow */
1395 bfd_elf_generic_reloc
, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1406 false, /* pc_relative */
1408 complain_overflow_dont
,/* complain_on_overflow */
1409 bfd_elf_generic_reloc
, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS
, /* type */
1420 false, /* pc_relative */
1422 complain_overflow_dont
,/* complain_on_overflow */
1423 bfd_elf_generic_reloc
, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS
, /* type */
1434 false, /* pc_relative */
1436 complain_overflow_dont
,/* complain_on_overflow */
1437 bfd_elf_generic_reloc
, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL
, /* type */
1448 true, /* pc_relative */
1450 complain_overflow_dont
, /* complain_on_overflow */
1451 bfd_elf_generic_reloc
, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12
, /* type */
1462 false, /* pc_relative */
1464 complain_overflow_bitfield
,/* complain_on_overflow */
1465 bfd_elf_generic_reloc
, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12
, /* type */
1476 false, /* pc_relative */
1478 complain_overflow_bitfield
,/* complain_on_overflow */
1479 bfd_elf_generic_reloc
, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1493 false, /* pc_relative */
1495 complain_overflow_dont
, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1508 false, /* pc_relative */
1510 complain_overflow_dont
, /* complain_on_overflow */
1511 NULL
, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11
, /* type */
1522 true, /* pc_relative */
1524 complain_overflow_signed
, /* complain_on_overflow */
1525 bfd_elf_generic_reloc
, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8
, /* type */
1536 true, /* pc_relative */
1538 complain_overflow_signed
, /* complain_on_overflow */
1539 bfd_elf_generic_reloc
, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32
, /* type */
1551 false, /* pc_relative */
1553 complain_overflow_bitfield
,/* complain_on_overflow */
1554 NULL
, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32
, /* type */
1565 false, /* pc_relative */
1567 complain_overflow_bitfield
,/* complain_on_overflow */
1568 bfd_elf_generic_reloc
, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32
, /* type */
1579 false, /* pc_relative */
1581 complain_overflow_bitfield
,/* complain_on_overflow */
1582 bfd_elf_generic_reloc
, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32
, /* type */
1593 false, /* pc_relative */
1595 complain_overflow_bitfield
,/* complain_on_overflow */
1596 NULL
, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32
, /* type */
1607 false, /* pc_relative */
1609 complain_overflow_bitfield
,/* complain_on_overflow */
1610 NULL
, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12
, /* type */
1621 false, /* pc_relative */
1623 complain_overflow_bitfield
,/* complain_on_overflow */
1624 bfd_elf_generic_reloc
, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12
, /* type */
1635 false, /* pc_relative */
1637 complain_overflow_bitfield
,/* complain_on_overflow */
1638 bfd_elf_generic_reloc
, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1649 false, /* pc_relative */
1651 complain_overflow_bitfield
,/* complain_on_overflow */
1652 bfd_elf_generic_reloc
, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1684 false, /* pc_relative */
1686 complain_overflow_dont
,/* complain_on_overflow */
1687 bfd_elf_generic_reloc
, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1696 0, /* rightshift. */
1699 false, /* pc_relative. */
1701 complain_overflow_bitfield
,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc
, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1709 0, /* rightshift. */
1712 false, /* pc_relative. */
1714 complain_overflow_bitfield
,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc
, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1722 0, /* rightshift. */
1725 false, /* pc_relative. */
1727 complain_overflow_bitfield
,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc
, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1735 0, /* rightshift. */
1738 false, /* pc_relative. */
1740 complain_overflow_bitfield
,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc
, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16
, /* type. */
1749 0, /* rightshift. */
1752 true, /* pc_relative. */
1754 complain_overflow_dont
,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc
, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12
, /* type. */
1762 0, /* rightshift. */
1765 true, /* pc_relative. */
1767 complain_overflow_dont
,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc
, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18
, /* type. */
1775 0, /* rightshift. */
1778 true, /* pc_relative. */
1780 complain_overflow_dont
,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc
, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1790 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1792 HOWTO (R_ARM_IRELATIVE
, /* type */
1796 false, /* pc_relative */
1798 complain_overflow_bitfield
,/* complain_on_overflow */
1799 bfd_elf_generic_reloc
, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1809 false, /* pc_relative */
1811 complain_overflow_bitfield
,/* complain_on_overflow */
1812 bfd_elf_generic_reloc
, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1822 false, /* pc_relative */
1824 complain_overflow_bitfield
,/* complain_on_overflow */
1825 bfd_elf_generic_reloc
, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC
, /* type */
1835 false, /* pc_relative */
1837 complain_overflow_bitfield
,/* complain_on_overflow */
1838 bfd_elf_generic_reloc
, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1848 false, /* pc_relative */
1850 complain_overflow_bitfield
,/* complain_on_overflow */
1851 bfd_elf_generic_reloc
, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1861 false, /* pc_relative */
1863 complain_overflow_bitfield
,/* complain_on_overflow */
1864 bfd_elf_generic_reloc
, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1874 false, /* pc_relative */
1876 complain_overflow_bitfield
,/* complain_on_overflow */
1877 bfd_elf_generic_reloc
, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1887 false, /* pc_relative */
1889 complain_overflow_bitfield
,/* complain_on_overflow */
1890 bfd_elf_generic_reloc
, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1901 HOWTO (R_ARM_RREL32
, /* type */
1905 false, /* pc_relative */
1907 complain_overflow_dont
,/* complain_on_overflow */
1908 bfd_elf_generic_reloc
, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32
, /* type */
1919 false, /* pc_relative */
1921 complain_overflow_dont
,/* complain_on_overflow */
1922 bfd_elf_generic_reloc
, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24
, /* type */
1933 false, /* pc_relative */
1935 complain_overflow_dont
,/* complain_on_overflow */
1936 bfd_elf_generic_reloc
, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE
, /* type */
1947 false, /* pc_relative */
1949 complain_overflow_dont
,/* complain_on_overflow */
1950 bfd_elf_generic_reloc
, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type
*
1959 elf32_arm_howto_from_type (unsigned int r_type
)
1961 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1962 return &elf32_arm_howto_table_1
[r_type
];
1964 if (r_type
>= R_ARM_IRELATIVE
1965 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1966 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1968 if (r_type
>= R_ARM_RREL32
1969 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1970 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1976 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1977 Elf_Internal_Rela
* elf_reloc
)
1979 unsigned int r_type
;
1981 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1982 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1987 bfd_set_error (bfd_error_bad_value
);
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val
;
1996 unsigned char elf_reloc_val
;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
2002 {BFD_RELOC_NONE
, R_ARM_NONE
},
2003 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
2004 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
2005 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
2006 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
2007 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
2008 {BFD_RELOC_32
, R_ARM_ABS32
},
2009 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
2010 {BFD_RELOC_8
, R_ARM_ABS8
},
2011 {BFD_RELOC_16
, R_ARM_ABS16
},
2012 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
2013 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
2020 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
2021 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
2022 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
2023 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
2024 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
2025 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
2026 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
2027 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2028 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
2029 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
2030 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
2031 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
2032 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
2033 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2034 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
2035 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
2036 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
2039 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
2040 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2041 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2042 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2045 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2046 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2047 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2048 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2049 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2051 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2056 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2057 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2058 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2059 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2060 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2061 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2062 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2063 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2067 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2069 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2070 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2071 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2072 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2073 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2074 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2075 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2076 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2077 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2078 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2079 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2081 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2083 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2084 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2085 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2086 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2087 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2088 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2089 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2090 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2091 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2092 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2093 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2094 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
},
2099 {BFD_RELOC_ARM_THUMB_BF17
, R_ARM_THM_BF16
},
2100 {BFD_RELOC_ARM_THUMB_BF13
, R_ARM_THM_BF12
},
2101 {BFD_RELOC_ARM_THUMB_BF19
, R_ARM_THM_BF18
}
2104 static reloc_howto_type
*
2105 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2106 bfd_reloc_code_real_type code
)
2110 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2111 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2117 static reloc_howto_type
*
2118 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2123 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2124 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2125 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2126 return &elf32_arm_howto_table_1
[i
];
2128 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2129 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2130 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2131 return &elf32_arm_howto_table_2
[i
];
2133 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2134 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2135 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2136 return &elf32_arm_howto_table_3
[i
];
2141 /* Support for core dump NOTE sections. */
2144 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2149 switch (note
->descsz
)
2154 case 148: /* Linux/ARM 32-bit. */
2156 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2159 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2170 size
, note
->descpos
+ offset
);
2174 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2176 switch (note
->descsz
)
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd
)->core
->pid
2183 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2184 elf_tdata (abfd
)->core
->program
2185 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2186 elf_tdata (abfd
)->core
->command
2187 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command
= elf_tdata (abfd
)->core
->command
;
2195 int n
= strlen (command
);
2197 if (0 < n
&& command
[n
- 1] == ' ')
2198 command
[n
- 1] = '\0';
2205 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2215 char data
[124] ATTRIBUTE_NONSTRING
;
2218 va_start (ap
, note_type
);
2219 memset (data
, 0, sizeof (data
));
2220 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2229 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2235 return elfcore_write_note (abfd
, buf
, bufsiz
,
2236 "CORE", note_type
, data
, sizeof (data
));
2247 va_start (ap
, note_type
);
2248 memset (data
, 0, sizeof (data
));
2249 pid
= va_arg (ap
, long);
2250 bfd_put_32 (abfd
, pid
, data
+ 24);
2251 cursig
= va_arg (ap
, int);
2252 bfd_put_16 (abfd
, cursig
, data
+ 12);
2253 greg
= va_arg (ap
, const void *);
2254 memcpy (data
+ 72, greg
, 72);
2257 return elfcore_write_note (abfd
, buf
, bufsiz
,
2258 "CORE", note_type
, data
, sizeof (data
));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32
;
2273 typedef unsigned short int insn16
;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline
[] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2377 static const bfd_vma elf32_arm_plt0_entry
[] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2387 static const bfd_vma elf32_arm_plt_entry
[] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2401 static const bfd_vma elf32_arm_plt0_entry
[] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short
[] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long
[] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry
= false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2449 static const bfd_vma elf32_thumb2_plt_entry
[] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2504 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2540 There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
2541 THM2_MAX_FWD_BRANCH_OFFSET. The first macro concerns the case when Thumb-2
2542 is not available, and second macro when Thumb-2 is available. Among other
2543 things, they affect the range of branches represented as BLX instructions
2544 in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
2545 Reference Manual ARMv7-A and ARMv7-R edition issue C.d. Such branches are
2546 specified there to have a maximum forward offset that is a multiple of 4.
2547 Previously, the respective values defined here were multiples of 2 but not
2548 4 and they are included in comments for reference. */
2549 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2550 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2551 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 4 + 4)
2552 /* #def THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 2 + 4) */
2553 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2554 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
2555 /* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
2556 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2557 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2558 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2568 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2569 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2570 is inserted in arm_build_one_stub(). */
2571 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2572 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2573 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2574 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2575 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2576 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2577 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2578 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2583 enum stub_insn_type type
;
2584 unsigned int r_type
;
2588 /* See note [Thumb nop sequence] when adding a veneer. */
2590 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2591 to reach the stub if necessary. */
2592 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2594 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2595 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2598 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2600 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2602 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2603 ARM_INSN (0xe12fff1c), /* bx ip */
2604 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2610 THUMB16_INSN (0xb401), /* push {r0} */
2611 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2612 THUMB16_INSN (0x4684), /* mov ip, r0 */
2613 THUMB16_INSN (0xbc01), /* pop {r0} */
2614 THUMB16_INSN (0x4760), /* bx ip */
2615 THUMB16_INSN (0xbf00), /* nop */
2616 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2619 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2620 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2622 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2623 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2626 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2627 M-profile architectures. */
2628 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2630 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2631 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2632 THUMB16_INSN (0x4760), /* bx ip */
2635 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0xe7fd), /* b .-2 */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe12fff1c), /* bx ip */
2643 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2646 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2648 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2650 THUMB16_INSN (0x4778), /* bx pc */
2651 THUMB16_INSN (0xe7fd), /* b .-2 */
2652 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2653 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2656 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2657 one, when the destination is close enough. */
2658 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2660 THUMB16_INSN (0x4778), /* bx pc */
2661 THUMB16_INSN (0xe7fd), /* b .-2 */
2662 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2665 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2666 blx to reach the stub if necessary. */
2667 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2669 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2670 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2671 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2674 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2675 blx to reach the stub if necessary. We can not add into pc;
2676 it is not guaranteed to mode switch (different in ARMv6 and
2678 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2680 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2681 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2682 ARM_INSN (0xe12fff1c), /* bx ip */
2683 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2686 /* V4T ARM -> ARM long branch stub, PIC. */
2687 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2689 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2690 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2691 ARM_INSN (0xe12fff1c), /* bx ip */
2692 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2695 /* V4T Thumb -> ARM long branch stub, PIC. */
2696 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2698 THUMB16_INSN (0x4778), /* bx pc */
2699 THUMB16_INSN (0xe7fd), /* b .-2 */
2700 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2701 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2702 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2705 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2707 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2709 THUMB16_INSN (0xb401), /* push {r0} */
2710 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2711 THUMB16_INSN (0x46fc), /* mov ip, pc */
2712 THUMB16_INSN (0x4484), /* add ip, r0 */
2713 THUMB16_INSN (0xbc01), /* pop {r0} */
2714 THUMB16_INSN (0x4760), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2718 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2720 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2722 THUMB16_INSN (0x4778), /* bx pc */
2723 THUMB16_INSN (0xe7fd), /* b .-2 */
2724 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2725 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2726 ARM_INSN (0xe12fff1c), /* bx ip */
2727 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2730 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2731 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2732 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2734 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2735 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2736 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2739 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2740 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2741 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2743 THUMB16_INSN (0x4778), /* bx pc */
2744 THUMB16_INSN (0xe7fd), /* b .-2 */
2745 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2746 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2747 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2750 /* NaCl ARM -> ARM long branch stub. */
2751 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2753 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2754 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2755 ARM_INSN (0xe12fff1c), /* bx ip */
2756 ARM_INSN (0xe320f000), /* nop */
2757 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2758 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2759 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2760 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2763 /* NaCl ARM -> ARM long branch stub, PIC. */
2764 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2766 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2767 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2768 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2769 ARM_INSN (0xe12fff1c), /* bx ip */
2770 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2771 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2772 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2773 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2776 /* Stub used for transition to secure state (aka SG veneer). */
2777 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2779 THUMB32_INSN (0xe97fe97f), /* sg. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2784 /* Cortex-A8 erratum-workaround stubs. */
2786 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2787 can't use a conditional branch to reach this stub). */
2789 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2791 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2792 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2793 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2796 /* Stub used for b.w and bl.w instructions. */
2798 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2800 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2803 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2805 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2808 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2809 instruction (which switches to ARM mode) to point to this stub. Jump to the
2810 real destination using an ARM-mode branch. */
2812 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2814 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2817 /* For each section group there can be a specially created linker section
2818 to hold the stubs for that group. The name of the stub section is based
2819 upon the name of another section within that group with the suffix below
2822 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2823 create what appeared to be a linker stub section when it actually
2824 contained user code/data. For example, consider this fragment:
2826 const char * stubborn_problems[] = { "np" };
2828 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2831 .data.rel.local.stubborn_problems
2833 This then causes problems in arm32_arm_build_stubs() as it triggers:
2835 // Ignore non-stub sections.
2836 if (!strstr (stub_sec->name, STUB_SUFFIX))
2839 And so the section would be ignored instead of being processed. Hence
2840 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2842 #define STUB_SUFFIX ".__stub"
2844 /* One entry per long/short branch stub defined above. */
2846 DEF_STUB (long_branch_any_any) \
2847 DEF_STUB (long_branch_v4t_arm_thumb) \
2848 DEF_STUB (long_branch_thumb_only) \
2849 DEF_STUB (long_branch_v4t_thumb_thumb) \
2850 DEF_STUB (long_branch_v4t_thumb_arm) \
2851 DEF_STUB (short_branch_v4t_thumb_arm) \
2852 DEF_STUB (long_branch_any_arm_pic) \
2853 DEF_STUB (long_branch_any_thumb_pic) \
2854 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2855 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2856 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2857 DEF_STUB (long_branch_thumb_only_pic) \
2858 DEF_STUB (long_branch_any_tls_pic) \
2859 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2860 DEF_STUB (long_branch_arm_nacl) \
2861 DEF_STUB (long_branch_arm_nacl_pic) \
2862 DEF_STUB (cmse_branch_thumb_only) \
2863 DEF_STUB (a8_veneer_b_cond) \
2864 DEF_STUB (a8_veneer_b) \
2865 DEF_STUB (a8_veneer_bl) \
2866 DEF_STUB (a8_veneer_blx) \
2867 DEF_STUB (long_branch_thumb2_only) \
2868 DEF_STUB (long_branch_thumb2_only_pure)
2870 #define DEF_STUB(x) arm_stub_##x,
2871 enum elf32_arm_stub_type
2879 /* Note the first a8_veneer type. */
2880 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2884 const insn_sequence
* template_sequence
;
2888 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2889 static const stub_def stub_definitions
[] =
2895 struct elf32_arm_stub_hash_entry
2897 /* Base hash table entry structure. */
2898 struct bfd_hash_entry root
;
2900 /* The stub section. */
2903 /* Offset within stub_sec of the beginning of this stub. */
2904 bfd_vma stub_offset
;
2906 /* Given the symbol's value and its section we can determine its final
2907 value when building the stubs (so the stub knows where to jump). */
2908 bfd_vma target_value
;
2909 asection
*target_section
;
2911 /* Same as above but for the source of the branch to the stub. Used for
2912 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2913 such, source section does not need to be recorded since Cortex-A8 erratum
2914 workaround stubs are only generated when both source and target are in the
2916 bfd_vma source_value
;
2918 /* The instruction which caused this stub to be generated (only valid for
2919 Cortex-A8 erratum workaround stubs at present). */
2920 unsigned long orig_insn
;
2922 /* The stub type. */
2923 enum elf32_arm_stub_type stub_type
;
2924 /* Its encoding size in bytes. */
2927 const insn_sequence
*stub_template
;
2928 /* The size of the template (number of entries). */
2929 int stub_template_size
;
2931 /* The symbol table entry, if any, that this was derived from. */
2932 struct elf32_arm_link_hash_entry
*h
;
2934 /* Type of branch. */
2935 enum arm_st_branch_type branch_type
;
2937 /* Where this stub is being called from, or, in the case of combined
2938 stub sections, the first input section in the group. */
2941 /* The name for the local symbol at the start of this stub. The
2942 stub name in the hash table has to be unique; this does not, so
2943 it can be friendlier. */
2947 /* Used to build a map of a section. This is required for mixed-endian
2950 typedef struct elf32_elf_section_map
2955 elf32_arm_section_map
;
2957 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2961 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2962 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2963 VFP11_ERRATUM_ARM_VENEER
,
2964 VFP11_ERRATUM_THUMB_VENEER
2966 elf32_vfp11_erratum_type
;
2968 typedef struct elf32_vfp11_erratum_list
2970 struct elf32_vfp11_erratum_list
*next
;
2976 struct elf32_vfp11_erratum_list
*veneer
;
2977 unsigned int vfp_insn
;
2981 struct elf32_vfp11_erratum_list
*branch
;
2985 elf32_vfp11_erratum_type type
;
2987 elf32_vfp11_erratum_list
;
2989 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2993 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2994 STM32L4XX_ERRATUM_VENEER
2996 elf32_stm32l4xx_erratum_type
;
2998 typedef struct elf32_stm32l4xx_erratum_list
3000 struct elf32_stm32l4xx_erratum_list
*next
;
3006 struct elf32_stm32l4xx_erratum_list
*veneer
;
3011 struct elf32_stm32l4xx_erratum_list
*branch
;
3015 elf32_stm32l4xx_erratum_type type
;
3017 elf32_stm32l4xx_erratum_list
;
3022 INSERT_EXIDX_CANTUNWIND_AT_END
3024 arm_unwind_edit_type
;
3026 /* A (sorted) list of edits to apply to an unwind table. */
3027 typedef struct arm_unwind_table_edit
3029 arm_unwind_edit_type type
;
3030 /* Note: we sometimes want to insert an unwind entry corresponding to a
3031 section different from the one we're currently writing out, so record the
3032 (text) section this edit relates to here. */
3033 asection
*linked_section
;
3035 struct arm_unwind_table_edit
*next
;
3037 arm_unwind_table_edit
;
3039 typedef struct _arm_elf_section_data
3041 /* Information about mapping symbols. */
3042 struct bfd_elf_section_data elf
;
3043 unsigned int mapcount
;
3044 unsigned int mapsize
;
3045 elf32_arm_section_map
*map
;
3046 /* Information about CPU errata. */
3047 unsigned int erratumcount
;
3048 elf32_vfp11_erratum_list
*erratumlist
;
3049 unsigned int stm32l4xx_erratumcount
;
3050 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
3051 unsigned int additional_reloc_count
;
3052 /* Information about unwind tables. */
3055 /* Unwind info attached to a text section. */
3058 asection
*arm_exidx_sec
;
3061 /* Unwind info attached to an .ARM.exidx section. */
3064 arm_unwind_table_edit
*unwind_edit_list
;
3065 arm_unwind_table_edit
*unwind_edit_tail
;
3069 _arm_elf_section_data
;
3071 #define elf32_arm_section_data(sec) \
3072 ((_arm_elf_section_data *) elf_section_data (sec))
3074 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3075 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3076 so may be created multiple times: we use an array of these entries whilst
3077 relaxing which we can refresh easily, then create stubs for each potentially
3078 erratum-triggering instruction once we've settled on a solution. */
3080 struct a8_erratum_fix
3085 bfd_vma target_offset
;
3086 unsigned long orig_insn
;
3088 enum elf32_arm_stub_type stub_type
;
3089 enum arm_st_branch_type branch_type
;
3092 /* A table of relocs applied to branches which might trigger Cortex-A8
3095 struct a8_erratum_reloc
3098 bfd_vma destination
;
3099 struct elf32_arm_link_hash_entry
*hash
;
3100 const char *sym_name
;
3101 unsigned int r_type
;
3102 enum arm_st_branch_type branch_type
;
3106 /* The size of the thread control block. */
3109 /* ARM-specific information about a PLT entry, over and above the usual
3113 /* We reference count Thumb references to a PLT entry separately,
3114 so that we can emit the Thumb trampoline only if needed. */
3115 bfd_signed_vma thumb_refcount
;
3117 /* Some references from Thumb code may be eliminated by BL->BLX
3118 conversion, so record them separately. */
3119 bfd_signed_vma maybe_thumb_refcount
;
3121 /* How many of the recorded PLT accesses were from non-call relocations.
3122 This information is useful when deciding whether anything takes the
3123 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3124 non-call references to the function should resolve directly to the
3125 real runtime target. */
3126 unsigned int noncall_refcount
;
3128 /* Since PLT entries have variable size if the Thumb prologue is
3129 used, we need to record the index into .got.plt instead of
3130 recomputing it from the PLT offset. */
3131 bfd_signed_vma got_offset
;
3134 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3135 struct arm_local_iplt_info
3137 /* The information that is usually found in the generic ELF part of
3138 the hash table entry. */
3139 union gotplt_union root
;
3141 /* The information that is usually found in the ARM-specific part of
3142 the hash table entry. */
3143 struct arm_plt_info arm
;
3145 /* A list of all potential dynamic relocations against this symbol. */
3146 struct elf_dyn_relocs
*dyn_relocs
;
3149 /* Structure to handle FDPIC support for local functions. */
3152 unsigned int funcdesc_cnt
;
3153 unsigned int gotofffuncdesc_cnt
;
3154 int funcdesc_offset
;
3157 struct elf_arm_obj_tdata
3159 struct elf_obj_tdata root
;
3161 /* Zero to warn when linking objects with incompatible enum sizes. */
3162 int no_enum_size_warning
;
3164 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3165 int no_wchar_size_warning
;
3167 /* The number of entries in each of the arrays in this strcuture.
3168 Used to avoid buffer overruns. */
3169 bfd_size_type num_entries
;
3171 /* tls_type for each local got entry. */
3172 char *local_got_tls_type
;
3174 /* GOTPLT entries for TLS descriptors. */
3175 bfd_vma
*local_tlsdesc_gotent
;
3177 /* Information for local symbols that need entries in .iplt. */
3178 struct arm_local_iplt_info
**local_iplt
;
3180 /* Maintains FDPIC counters and funcdesc info. */
3181 struct fdpic_local
*local_fdpic_cnts
;
3184 #define elf_arm_tdata(bfd) \
3185 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3187 #define elf32_arm_num_entries(bfd) \
3188 (elf_arm_tdata (bfd)->num_entries)
3190 #define elf32_arm_local_got_tls_type(bfd) \
3191 (elf_arm_tdata (bfd)->local_got_tls_type)
3193 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3194 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3196 #define elf32_arm_local_iplt(bfd) \
3197 (elf_arm_tdata (bfd)->local_iplt)
3199 #define elf32_arm_local_fdpic_cnts(bfd) \
3200 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3202 #define is_arm_elf(bfd) \
3203 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3204 && elf_tdata (bfd) != NULL \
3205 && elf_object_id (bfd) == ARM_ELF_DATA)
3208 elf32_arm_mkobject (bfd
*abfd
)
3210 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
3214 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3216 /* Structure to handle FDPIC support for extern functions. */
3217 struct fdpic_global
{
3218 unsigned int gotofffuncdesc_cnt
;
3219 unsigned int gotfuncdesc_cnt
;
3220 unsigned int funcdesc_cnt
;
3221 int funcdesc_offset
;
3222 int gotfuncdesc_offset
;
3225 /* Arm ELF linker hash entry. */
3226 struct elf32_arm_link_hash_entry
3228 struct elf_link_hash_entry root
;
3230 /* ARM-specific PLT information. */
3231 struct arm_plt_info plt
;
3233 #define GOT_UNKNOWN 0
3234 #define GOT_NORMAL 1
3235 #define GOT_TLS_GD 2
3236 #define GOT_TLS_IE 4
3237 #define GOT_TLS_GDESC 8
3238 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3239 unsigned int tls_type
: 8;
3241 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3242 unsigned int is_iplt
: 1;
3244 unsigned int unused
: 23;
3246 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3247 starting at the end of the jump table. */
3248 bfd_vma tlsdesc_got
;
3250 /* The symbol marking the real symbol location for exported thumb
3251 symbols with Arm stubs. */
3252 struct elf_link_hash_entry
*export_glue
;
3254 /* A pointer to the most recently used stub hash entry against this
3256 struct elf32_arm_stub_hash_entry
*stub_cache
;
3258 /* Counter for FDPIC relocations against this symbol. */
3259 struct fdpic_global fdpic_cnts
;
3262 /* Traverse an arm ELF linker hash table. */
3263 #define elf32_arm_link_hash_traverse(table, func, info) \
3264 (elf_link_hash_traverse \
3266 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3269 /* Get the ARM elf linker hash table from a link_info structure. */
3270 #define elf32_arm_hash_table(p) \
3271 ((is_elf_hash_table ((p)->hash) \
3272 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3273 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3275 #define arm_stub_hash_lookup(table, string, create, copy) \
3276 ((struct elf32_arm_stub_hash_entry *) \
3277 bfd_hash_lookup ((table), (string), (create), (copy)))
3279 /* Array to keep track of which stub sections have been created, and
3280 information on stub grouping. */
3283 /* This is the section to which stubs in the group will be
3286 /* The stub section. */
3290 #define elf32_arm_compute_jump_table_size(htab) \
3291 ((htab)->next_tls_desc_index * 4)
3293 /* ARM ELF linker hash table. */
3294 struct elf32_arm_link_hash_table
3296 /* The main hash table. */
3297 struct elf_link_hash_table root
;
3299 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3300 bfd_size_type thumb_glue_size
;
3302 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3303 bfd_size_type arm_glue_size
;
3305 /* The size in bytes of section containing the ARMv4 BX veneers. */
3306 bfd_size_type bx_glue_size
;
3308 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3309 veneer has been populated. */
3310 bfd_vma bx_glue_offset
[15];
3312 /* The size in bytes of the section containing glue for VFP11 erratum
3314 bfd_size_type vfp11_erratum_glue_size
;
3316 /* The size in bytes of the section containing glue for STM32L4XX erratum
3318 bfd_size_type stm32l4xx_erratum_glue_size
;
3320 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3321 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3322 elf32_arm_write_section(). */
3323 struct a8_erratum_fix
*a8_erratum_fixes
;
3324 unsigned int num_a8_erratum_fixes
;
3326 /* An arbitrary input BFD chosen to hold the glue sections. */
3327 bfd
* bfd_of_glue_owner
;
3329 /* Nonzero to output a BE8 image. */
3332 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3333 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3336 /* The relocation to use for R_ARM_TARGET2 relocations. */
3339 /* 0 = Ignore R_ARM_V4BX.
3340 1 = Convert BX to MOV PC.
3341 2 = Generate v4 interworing stubs. */
3344 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3347 /* Whether we should fix the ARM1176 BLX immediate issue. */
3350 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3353 /* What sort of code sequences we should look for which may trigger the
3354 VFP11 denorm erratum. */
3355 bfd_arm_vfp11_fix vfp11_fix
;
3357 /* Global counter for the number of fixes we have emitted. */
3358 int num_vfp11_fixes
;
3360 /* What sort of code sequences we should look for which may trigger the
3361 STM32L4XX erratum. */
3362 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3364 /* Global counter for the number of fixes we have emitted. */
3365 int num_stm32l4xx_fixes
;
3367 /* Nonzero to force PIC branch veneers. */
3370 /* The number of bytes in the initial entry in the PLT. */
3371 bfd_size_type plt_header_size
;
3373 /* The number of bytes in the subsequent PLT etries. */
3374 bfd_size_type plt_entry_size
;
3376 /* True if the target uses REL relocations. */
3379 /* Nonzero if import library must be a secure gateway import library
3380 as per ARMv8-M Security Extensions. */
3383 /* The import library whose symbols' address must remain stable in
3384 the import library generated. */
3387 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3388 bfd_vma next_tls_desc_index
;
3390 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3391 bfd_vma num_tls_desc
;
3393 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3396 /* Offset in .plt section of tls_arm_trampoline. */
3397 bfd_vma tls_trampoline
;
3399 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3402 bfd_signed_vma refcount
;
3406 /* For convenience in allocate_dynrelocs. */
3409 /* The amount of space used by the reserved portion of the sgotplt
3410 section, plus whatever space is used by the jump slots. */
3411 bfd_vma sgotplt_jump_table_size
;
3413 /* The stub hash table. */
3414 struct bfd_hash_table stub_hash_table
;
3416 /* Linker stub bfd. */
3419 /* Linker call-backs. */
3420 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3422 void (*layout_sections_again
) (void);
3424 /* Array to keep track of which stub sections have been created, and
3425 information on stub grouping. */
3426 struct map_stub
*stub_group
;
3428 /* Input stub section holding secure gateway veneers. */
3429 asection
*cmse_stub_sec
;
3431 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3432 start to be allocated. */
3433 bfd_vma new_cmse_stub_offset
;
3435 /* Number of elements in stub_group. */
3436 unsigned int top_id
;
3438 /* Assorted information used by elf32_arm_size_stubs. */
3439 unsigned int bfd_count
;
3440 unsigned int top_index
;
3441 asection
**input_list
;
3443 /* True if the target system uses FDPIC. */
3446 /* Fixup section. Used for FDPIC. */
3450 /* Add an FDPIC read-only fixup. */
3452 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3454 bfd_vma fixup_offset
;
3456 fixup_offset
= srofixup
->reloc_count
++ * 4;
3457 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3458 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3462 ctz (unsigned int mask
)
3464 #if GCC_VERSION >= 3004
3465 return __builtin_ctz (mask
);
3469 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3480 elf32_arm_popcount (unsigned int mask
)
3482 #if GCC_VERSION >= 3004
3483 return __builtin_popcount (mask
);
3488 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3498 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3499 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3502 arm_elf_fill_funcdesc (bfd
*output_bfd
,
3503 struct bfd_link_info
*info
,
3504 int *funcdesc_offset
,
3508 bfd_vma dynreloc_value
,
3511 if ((*funcdesc_offset
& 1) == 0)
3513 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3514 asection
*sgot
= globals
->root
.sgot
;
3516 if (bfd_link_pic (info
))
3518 asection
*srelgot
= globals
->root
.srelgot
;
3519 Elf_Internal_Rela outrel
;
3521 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3522 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3523 outrel
.r_addend
= 0;
3525 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3526 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3527 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3531 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3532 bfd_vma got_value
= hgot
->root
.u
.def
.value
3533 + hgot
->root
.u
.def
.section
->output_section
->vma
3534 + hgot
->root
.u
.def
.section
->output_offset
;
3536 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3537 sgot
->output_section
->vma
+ sgot
->output_offset
3539 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
3540 sgot
->output_section
->vma
+ sgot
->output_offset
3542 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3543 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3545 *funcdesc_offset
|= 1;
3549 /* Create an entry in an ARM ELF linker hash table. */
3551 static struct bfd_hash_entry
*
3552 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3553 struct bfd_hash_table
* table
,
3554 const char * string
)
3556 struct elf32_arm_link_hash_entry
* ret
=
3557 (struct elf32_arm_link_hash_entry
*) entry
;
3559 /* Allocate the structure if it has not already been allocated by a
3562 ret
= (struct elf32_arm_link_hash_entry
*)
3563 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3565 return (struct bfd_hash_entry
*) ret
;
3567 /* Call the allocation method of the superclass. */
3568 ret
= ((struct elf32_arm_link_hash_entry
*)
3569 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3573 ret
->tls_type
= GOT_UNKNOWN
;
3574 ret
->tlsdesc_got
= (bfd_vma
) -1;
3575 ret
->plt
.thumb_refcount
= 0;
3576 ret
->plt
.maybe_thumb_refcount
= 0;
3577 ret
->plt
.noncall_refcount
= 0;
3578 ret
->plt
.got_offset
= -1;
3579 ret
->is_iplt
= false;
3580 ret
->export_glue
= NULL
;
3582 ret
->stub_cache
= NULL
;
3584 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3585 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3586 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3587 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3588 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3591 return (struct bfd_hash_entry
*) ret
;
3594 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3598 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3600 if (elf_local_got_refcounts (abfd
) == NULL
)
3602 bfd_size_type num_syms
;
3604 elf32_arm_num_entries (abfd
) = 0;
3606 /* Whilst it might be tempting to allocate a single block of memory and
3607 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3608 structure, this interferes with the work of memory checkers looking
3609 for buffer overruns. So allocate each array individually. */
3611 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3613 elf_local_got_refcounts (abfd
) = bfd_zalloc
3614 (abfd
, num_syms
* sizeof (* elf_local_got_refcounts (abfd
)));
3616 if (elf_local_got_refcounts (abfd
) == NULL
)
3619 elf32_arm_local_tlsdesc_gotent (abfd
) = bfd_zalloc
3620 (abfd
, num_syms
* sizeof (* elf32_arm_local_tlsdesc_gotent (abfd
)));
3622 if (elf32_arm_local_tlsdesc_gotent (abfd
) == NULL
)
3625 elf32_arm_local_iplt (abfd
) = bfd_zalloc
3626 (abfd
, num_syms
* sizeof (* elf32_arm_local_iplt (abfd
)));
3628 if (elf32_arm_local_iplt (abfd
) == NULL
)
3631 elf32_arm_local_fdpic_cnts (abfd
) = bfd_zalloc
3632 (abfd
, num_syms
* sizeof (* elf32_arm_local_fdpic_cnts (abfd
)));
3634 if (elf32_arm_local_fdpic_cnts (abfd
) == NULL
)
3637 elf32_arm_local_got_tls_type (abfd
) = bfd_zalloc
3638 (abfd
, num_syms
* sizeof (* elf32_arm_local_got_tls_type (abfd
)));
3640 if (elf32_arm_local_got_tls_type (abfd
) == NULL
)
3643 elf32_arm_num_entries (abfd
) = num_syms
;
3645 #if GCC_VERSION >= 3000
3646 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
))
3647 <= __alignof__ (*elf_local_got_refcounts (abfd
)));
3648 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd
))
3649 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd
)));
3650 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd
))
3651 <= __alignof__ (*elf32_arm_local_iplt (abfd
)));
3652 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd
))
3653 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd
)));
3659 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3660 to input bfd ABFD. Create the information if it doesn't already exist.
3661 Return null if an allocation fails. */
3663 static struct arm_local_iplt_info
*
3664 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3666 struct arm_local_iplt_info
**ptr
;
3668 if (!elf32_arm_allocate_local_sym_info (abfd
))
3671 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3672 BFD_ASSERT (r_symndx
< elf32_arm_num_entries (abfd
));
3673 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3675 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3679 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3680 in ABFD's symbol table. If the symbol is global, H points to its
3681 hash table entry, otherwise H is null.
3683 Return true if the symbol does have PLT information. When returning
3684 true, point *ROOT_PLT at the target-independent reference count/offset
3685 union and *ARM_PLT at the ARM-specific information. */
3688 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3689 struct elf32_arm_link_hash_entry
*h
,
3690 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3691 struct arm_plt_info
**arm_plt
)
3693 struct arm_local_iplt_info
*local_iplt
;
3695 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3700 *root_plt
= &h
->root
.plt
;
3705 if (elf32_arm_local_iplt (abfd
) == NULL
)
3708 if (r_symndx
>= elf32_arm_num_entries (abfd
))
3711 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3712 if (local_iplt
== NULL
)
3715 *root_plt
= &local_iplt
->root
;
3716 *arm_plt
= &local_iplt
->arm
;
3720 static bool using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3722 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3726 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3727 struct arm_plt_info
*arm_plt
)
3729 struct elf32_arm_link_hash_table
*htab
;
3731 htab
= elf32_arm_hash_table (info
);
3733 return (!using_thumb_only (htab
) && (arm_plt
->thumb_refcount
!= 0
3734 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3737 /* Return a pointer to the head of the dynamic reloc list that should
3738 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3739 ABFD's symbol table. Return null if an error occurs. */
3741 static struct elf_dyn_relocs
**
3742 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3743 Elf_Internal_Sym
*isym
)
3745 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3747 struct arm_local_iplt_info
*local_iplt
;
3749 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3750 if (local_iplt
== NULL
)
3752 return &local_iplt
->dyn_relocs
;
3756 /* Track dynamic relocs needed for local syms too.
3757 We really need local syms available to do this
3762 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3766 vpp
= &elf_section_data (s
)->local_dynrel
;
3767 return (struct elf_dyn_relocs
**) vpp
;
3771 /* Initialize an entry in the stub hash table. */
3773 static struct bfd_hash_entry
*
3774 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3775 struct bfd_hash_table
*table
,
3778 /* Allocate the structure if it has not already been allocated by a
3782 entry
= (struct bfd_hash_entry
*)
3783 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3788 /* Call the allocation method of the superclass. */
3789 entry
= bfd_hash_newfunc (entry
, table
, string
);
3792 struct elf32_arm_stub_hash_entry
*eh
;
3794 /* Initialize the local fields. */
3795 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3796 eh
->stub_sec
= NULL
;
3797 eh
->stub_offset
= (bfd_vma
) -1;
3798 eh
->source_value
= 0;
3799 eh
->target_value
= 0;
3800 eh
->target_section
= NULL
;
3802 eh
->stub_type
= arm_stub_none
;
3804 eh
->stub_template
= NULL
;
3805 eh
->stub_template_size
= -1;
3808 eh
->output_name
= NULL
;
3814 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3815 shortcuts to them in our hash table. */
3818 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3820 struct elf32_arm_link_hash_table
*htab
;
3822 htab
= elf32_arm_hash_table (info
);
3826 if (! _bfd_elf_create_got_section (dynobj
, info
))
3829 /* Also create .rofixup. */
3832 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3833 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3834 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3835 if (htab
->srofixup
== NULL
3836 || !bfd_set_section_alignment (htab
->srofixup
, 2))
3843 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3846 create_ifunc_sections (struct bfd_link_info
*info
)
3848 struct elf32_arm_link_hash_table
*htab
;
3849 const struct elf_backend_data
*bed
;
3854 htab
= elf32_arm_hash_table (info
);
3855 dynobj
= htab
->root
.dynobj
;
3856 bed
= get_elf_backend_data (dynobj
);
3857 flags
= bed
->dynamic_sec_flags
;
3859 if (htab
->root
.iplt
== NULL
)
3861 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3862 flags
| SEC_READONLY
| SEC_CODE
);
3864 || !bfd_set_section_alignment (s
, bed
->plt_alignment
))
3866 htab
->root
.iplt
= s
;
3869 if (htab
->root
.irelplt
== NULL
)
3871 s
= bfd_make_section_anyway_with_flags (dynobj
,
3872 RELOC_SECTION (htab
, ".iplt"),
3873 flags
| SEC_READONLY
);
3875 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3877 htab
->root
.irelplt
= s
;
3880 if (htab
->root
.igotplt
== NULL
)
3882 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3884 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3886 htab
->root
.igotplt
= s
;
3891 /* Determine if we're dealing with a Thumb only architecture. */
3894 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3897 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3898 Tag_CPU_arch_profile
);
3901 return profile
== 'M';
3903 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3905 /* Force return logic to be reviewed for each new architecture. */
3906 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3908 if (arch
== TAG_CPU_ARCH_V6_M
3909 || arch
== TAG_CPU_ARCH_V6S_M
3910 || arch
== TAG_CPU_ARCH_V7E_M
3911 || arch
== TAG_CPU_ARCH_V8M_BASE
3912 || arch
== TAG_CPU_ARCH_V8M_MAIN
3913 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
)
3919 /* Determine if we're dealing with a Thumb-2 object. */
3922 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3925 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3928 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3930 return thumb_isa
== 2;
3932 /* Variant of thumb is described by the architecture tag. */
3933 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3935 /* Force return logic to be reviewed for each new architecture. */
3936 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3938 return (arch
== TAG_CPU_ARCH_V6T2
3939 || arch
== TAG_CPU_ARCH_V7
3940 || arch
== TAG_CPU_ARCH_V7E_M
3941 || arch
== TAG_CPU_ARCH_V8
3942 || arch
== TAG_CPU_ARCH_V8R
3943 || arch
== TAG_CPU_ARCH_V8M_MAIN
3944 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
);
3947 /* Determine whether Thumb-2 BL instruction is available. */
3950 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3953 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3955 /* Force return logic to be reviewed for each new architecture. */
3956 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
3958 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3959 return (arch
== TAG_CPU_ARCH_V6T2
3960 || arch
>= TAG_CPU_ARCH_V7
);
3963 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3964 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3968 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3970 struct elf32_arm_link_hash_table
*htab
;
3972 htab
= elf32_arm_hash_table (info
);
3976 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3979 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3982 if (htab
->root
.target_os
== is_vxworks
)
3984 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3987 if (bfd_link_pic (info
))
3989 htab
->plt_header_size
= 0;
3990 htab
->plt_entry_size
3991 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3995 htab
->plt_header_size
3996 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3997 htab
->plt_entry_size
3998 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
4001 if (elf_elfheader (dynobj
))
4002 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
4007 Test for thumb only architectures. Note - we cannot just call
4008 using_thumb_only() as the attributes in the output bfd have not been
4009 initialised at this point, so instead we use the input bfd. */
4010 bfd
* saved_obfd
= htab
->obfd
;
4012 htab
->obfd
= dynobj
;
4013 if (using_thumb_only (htab
))
4015 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
4016 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
4018 htab
->obfd
= saved_obfd
;
4021 if (htab
->fdpic_p
) {
4022 htab
->plt_header_size
= 0;
4023 if (info
->flags
& DF_BIND_NOW
)
4024 htab
->plt_entry_size
= 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry
) - 5);
4026 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
);
4029 if (!htab
->root
.splt
4030 || !htab
->root
.srelplt
4031 || !htab
->root
.sdynbss
4032 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
4038 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4041 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
4042 struct elf_link_hash_entry
*dir
,
4043 struct elf_link_hash_entry
*ind
)
4045 struct elf32_arm_link_hash_entry
*edir
, *eind
;
4047 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
4048 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
4050 if (ind
->root
.type
== bfd_link_hash_indirect
)
4052 /* Copy over PLT info. */
4053 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4054 eind
->plt
.thumb_refcount
= 0;
4055 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4056 eind
->plt
.maybe_thumb_refcount
= 0;
4057 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4058 eind
->plt
.noncall_refcount
= 0;
4060 /* Copy FDPIC counters. */
4061 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4062 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4063 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4065 /* We should only allocate a function to .iplt once the final
4066 symbol information is known. */
4067 BFD_ASSERT (!eind
->is_iplt
);
4069 if (dir
->got
.refcount
<= 0)
4071 edir
->tls_type
= eind
->tls_type
;
4072 eind
->tls_type
= GOT_UNKNOWN
;
4076 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4079 /* Destroy an ARM elf linker hash table. */
4082 elf32_arm_link_hash_table_free (bfd
*obfd
)
4084 struct elf32_arm_link_hash_table
*ret
4085 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4087 bfd_hash_table_free (&ret
->stub_hash_table
);
4088 _bfd_elf_link_hash_table_free (obfd
);
4091 /* Create an ARM elf linker hash table. */
4093 static struct bfd_link_hash_table
*
4094 elf32_arm_link_hash_table_create (bfd
*abfd
)
4096 struct elf32_arm_link_hash_table
*ret
;
4097 size_t amt
= sizeof (struct elf32_arm_link_hash_table
);
4099 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4103 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4104 elf32_arm_link_hash_newfunc
,
4105 sizeof (struct elf32_arm_link_hash_entry
),
4112 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4113 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4114 #ifdef FOUR_WORD_PLT
4115 ret
->plt_header_size
= 16;
4116 ret
->plt_entry_size
= 16;
4118 ret
->plt_header_size
= 20;
4119 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4121 ret
->use_rel
= true;
4125 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4126 sizeof (struct elf32_arm_stub_hash_entry
)))
4128 _bfd_elf_link_hash_table_free (abfd
);
4131 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4133 return &ret
->root
.root
;
4136 /* Determine what kind of NOPs are available. */
4139 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4141 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4144 /* Force return logic to be reviewed for each new architecture. */
4145 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V9
);
4147 return (arch
== TAG_CPU_ARCH_V6T2
4148 || arch
== TAG_CPU_ARCH_V6K
4149 || arch
== TAG_CPU_ARCH_V7
4150 || arch
== TAG_CPU_ARCH_V8
4151 || arch
== TAG_CPU_ARCH_V8R
4152 || arch
== TAG_CPU_ARCH_V9
);
4156 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4160 case arm_stub_long_branch_thumb_only
:
4161 case arm_stub_long_branch_thumb2_only
:
4162 case arm_stub_long_branch_thumb2_only_pure
:
4163 case arm_stub_long_branch_v4t_thumb_arm
:
4164 case arm_stub_short_branch_v4t_thumb_arm
:
4165 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4166 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4167 case arm_stub_long_branch_thumb_only_pic
:
4168 case arm_stub_cmse_branch_thumb_only
:
4179 /* Determine the type of stub needed, if any, for a call. */
4181 static enum elf32_arm_stub_type
4182 arm_type_of_stub (struct bfd_link_info
*info
,
4183 asection
*input_sec
,
4184 const Elf_Internal_Rela
*rel
,
4185 unsigned char st_type
,
4186 enum arm_st_branch_type
*actual_branch_type
,
4187 struct elf32_arm_link_hash_entry
*hash
,
4188 bfd_vma destination
,
4194 bfd_signed_vma branch_offset
;
4195 unsigned int r_type
;
4196 struct elf32_arm_link_hash_table
* globals
;
4197 bool thumb2
, thumb2_bl
, thumb_only
;
4198 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4200 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4201 union gotplt_union
*root_plt
;
4202 struct arm_plt_info
*arm_plt
;
4206 if (branch_type
== ST_BRANCH_LONG
)
4209 globals
= elf32_arm_hash_table (info
);
4210 if (globals
== NULL
)
4213 thumb_only
= using_thumb_only (globals
);
4214 thumb2
= using_thumb2 (globals
);
4215 thumb2_bl
= using_thumb2_bl (globals
);
4217 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4219 /* True for architectures that implement the thumb2 movw instruction. */
4220 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4222 /* Determine where the call point is. */
4223 location
= (input_sec
->output_offset
4224 + input_sec
->output_section
->vma
4227 r_type
= ELF32_R_TYPE (rel
->r_info
);
4229 /* Don't pretend we know what stub to use (if any) when we target a
4230 Thumb-only target and we don't know the actual destination
4232 if (branch_type
== ST_BRANCH_UNKNOWN
&& thumb_only
)
4235 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4236 are considering a function call relocation. */
4237 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4238 || r_type
== R_ARM_THM_JUMP19
)
4239 && branch_type
== ST_BRANCH_TO_ARM
)
4241 if (sym_sec
== bfd_abs_section_ptr
)
4242 /* As an exception, assume that absolute symbols are of the
4243 right kind (Thumb). They are presumably defined in the
4244 linker script, where it is not possible to declare them as
4245 Thumb (and thus are seen as Arm mode). We'll inform the
4246 user with a warning, though, in
4247 elf32_arm_final_link_relocate. */
4248 branch_type
= ST_BRANCH_TO_THUMB
;
4250 /* Otherwise do not silently build a stub, and let the users
4251 know they have to fix their code. Indeed, we could decide
4252 to insert a stub involving Arm code and/or BLX, leading to
4253 a run-time crash. */
4257 /* For TLS call relocs, it is the caller's responsibility to provide
4258 the address of the appropriate trampoline. */
4259 if (r_type
!= R_ARM_TLS_CALL
4260 && r_type
!= R_ARM_THM_TLS_CALL
4261 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4262 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4264 && root_plt
->offset
!= (bfd_vma
) -1)
4268 if (hash
== NULL
|| hash
->is_iplt
)
4269 splt
= globals
->root
.iplt
;
4271 splt
= globals
->root
.splt
;
4276 /* Note when dealing with PLT entries: the main PLT stub is in
4277 ARM mode, so if the branch is in Thumb mode, another
4278 Thumb->ARM stub will be inserted later just before the ARM
4279 PLT stub. If a long branch stub is needed, we'll add a
4280 Thumb->Arm one and branch directly to the ARM PLT entry.
4281 Here, we have to check if a pre-PLT Thumb->ARM stub
4282 is needed and if it will be close enough. */
4284 destination
= (splt
->output_section
->vma
4285 + splt
->output_offset
4286 + root_plt
->offset
);
4289 /* Thumb branch/call to PLT: it can become a branch to ARM
4290 or to Thumb. We must perform the same checks and
4291 corrections as in elf32_arm_final_link_relocate. */
4292 if ((r_type
== R_ARM_THM_CALL
)
4293 || (r_type
== R_ARM_THM_JUMP24
))
4295 if (globals
->use_blx
4296 && r_type
== R_ARM_THM_CALL
4299 /* If the Thumb BLX instruction is available, convert
4300 the BL to a BLX instruction to call the ARM-mode
4302 branch_type
= ST_BRANCH_TO_ARM
;
4307 /* Target the Thumb stub before the ARM PLT entry. */
4308 destination
-= PLT_THUMB_STUB_SIZE
;
4309 branch_type
= ST_BRANCH_TO_THUMB
;
4314 branch_type
= ST_BRANCH_TO_ARM
;
4318 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4319 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4321 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4323 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4324 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4326 /* Handle cases where:
4327 - this call goes too far (different Thumb/Thumb2 max
4329 - it's a Thumb->Arm call and blx is not available, or it's a
4330 Thumb->Arm branch (not bl). A stub is needed in this case,
4331 but only if this call is not through a PLT entry. Indeed,
4332 PLT stubs handle mode switching already. */
4334 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4335 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4337 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4338 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4340 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4341 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4342 && (r_type
== R_ARM_THM_JUMP19
))
4343 || (branch_type
== ST_BRANCH_TO_ARM
4344 && (((r_type
== R_ARM_THM_CALL
4345 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4346 || (r_type
== R_ARM_THM_JUMP24
)
4347 || (r_type
== R_ARM_THM_JUMP19
))
4350 /* If we need to insert a Thumb-Thumb long branch stub to a
4351 PLT, use one that branches directly to the ARM PLT
4352 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4353 stub, undo this now. */
4354 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4356 branch_type
= ST_BRANCH_TO_ARM
;
4357 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4360 if (branch_type
== ST_BRANCH_TO_THUMB
)
4362 /* Thumb to thumb. */
4365 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4367 (_("%pB(%pA): warning: long branch veneers used in"
4368 " section with SHF_ARM_PURECODE section"
4369 " attribute is only supported for M-profile"
4370 " targets that implement the movw instruction"),
4371 input_bfd
, input_sec
);
4373 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4375 ? ((globals
->use_blx
4376 && (r_type
== R_ARM_THM_CALL
))
4377 /* V5T and above. Stub starts with ARM code, so
4378 we must be able to switch mode before
4379 reaching it, which is only possible for 'bl'
4380 (ie R_ARM_THM_CALL relocation). */
4381 ? arm_stub_long_branch_any_thumb_pic
4382 /* On V4T, use Thumb code only. */
4383 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4385 /* non-PIC stubs. */
4386 : ((globals
->use_blx
4387 && (r_type
== R_ARM_THM_CALL
))
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4391 : arm_stub_long_branch_v4t_thumb_thumb
);
4395 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4396 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4399 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4401 (_("%pB(%pA): warning: long branch veneers used in"
4402 " section with SHF_ARM_PURECODE section"
4403 " attribute is only supported for M-profile"
4404 " targets that implement the movw instruction"),
4405 input_bfd
, input_sec
);
4407 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4409 ? arm_stub_long_branch_thumb_only_pic
4411 : (thumb2
? arm_stub_long_branch_thumb2_only
4412 : arm_stub_long_branch_thumb_only
);
4418 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4420 (_("%pB(%pA): warning: long branch veneers used in"
4421 " section with SHF_ARM_PURECODE section"
4422 " attribute is only supported" " for M-profile"
4423 " targets that implement the movw instruction"),
4424 input_bfd
, input_sec
);
4428 && sym_sec
->owner
!= NULL
4429 && !INTERWORK_FLAG (sym_sec
->owner
))
4432 (_("%pB(%s): warning: interworking not enabled;"
4433 " first occurrence: %pB: %s call to %s"),
4434 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4438 (bfd_link_pic (info
) | globals
->pic_veneer
)
4440 ? (r_type
== R_ARM_THM_TLS_CALL
4441 /* TLS PIC stubs. */
4442 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4443 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4444 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4445 /* V5T PIC and above. */
4446 ? arm_stub_long_branch_any_arm_pic
4448 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4450 /* non-PIC stubs. */
4451 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4452 /* V5T and above. */
4453 ? arm_stub_long_branch_any_any
4455 : arm_stub_long_branch_v4t_thumb_arm
);
4457 /* Handle v4t short branches. */
4458 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4459 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4460 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4461 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4465 else if (r_type
== R_ARM_CALL
4466 || r_type
== R_ARM_JUMP24
4467 || r_type
== R_ARM_PLT32
4468 || r_type
== R_ARM_TLS_CALL
)
4470 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4472 (_("%pB(%pA): warning: long branch veneers used in"
4473 " section with SHF_ARM_PURECODE section"
4474 " attribute is only supported for M-profile"
4475 " targets that implement the movw instruction"),
4476 input_bfd
, input_sec
);
4477 if (branch_type
== ST_BRANCH_TO_THUMB
)
4482 && sym_sec
->owner
!= NULL
4483 && !INTERWORK_FLAG (sym_sec
->owner
))
4486 (_("%pB(%s): warning: interworking not enabled;"
4487 " first occurrence: %pB: %s call to %s"),
4488 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4491 /* We have an extra 2-bytes reach because of
4492 the mode change (bit 24 (H) of BLX encoding). */
4493 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4494 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4495 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4496 || (r_type
== R_ARM_JUMP24
)
4497 || (r_type
== R_ARM_PLT32
))
4499 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4501 ? ((globals
->use_blx
)
4502 /* V5T and above. */
4503 ? arm_stub_long_branch_any_thumb_pic
4505 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4507 /* non-PIC stubs. */
4508 : ((globals
->use_blx
)
4509 /* V5T and above. */
4510 ? arm_stub_long_branch_any_any
4512 : arm_stub_long_branch_v4t_arm_thumb
);
4518 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4519 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4522 (bfd_link_pic (info
) | globals
->pic_veneer
)
4524 ? (r_type
== R_ARM_TLS_CALL
4526 ? arm_stub_long_branch_any_tls_pic
4527 : (globals
->root
.target_os
== is_nacl
4528 ? arm_stub_long_branch_arm_nacl_pic
4529 : arm_stub_long_branch_any_arm_pic
))
4530 /* non-PIC stubs. */
4531 : (globals
->root
.target_os
== is_nacl
4532 ? arm_stub_long_branch_arm_nacl
4533 : arm_stub_long_branch_any_any
);
4538 /* If a stub is needed, record the actual destination type. */
4539 if (stub_type
!= arm_stub_none
)
4540 *actual_branch_type
= branch_type
;
4545 /* Build a name for an entry in the stub hash table. */
4548 elf32_arm_stub_name (const asection
*input_section
,
4549 const asection
*sym_sec
,
4550 const struct elf32_arm_link_hash_entry
*hash
,
4551 const Elf_Internal_Rela
*rel
,
4552 enum elf32_arm_stub_type stub_type
)
4559 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4560 stub_name
= (char *) bfd_malloc (len
);
4561 if (stub_name
!= NULL
)
4562 sprintf (stub_name
, "%08x_%s+%x_%d",
4563 input_section
->id
& 0xffffffff,
4564 hash
->root
.root
.root
.string
,
4565 (int) rel
->r_addend
& 0xffffffff,
4570 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4571 stub_name
= (char *) bfd_malloc (len
);
4572 if (stub_name
!= NULL
)
4573 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4574 input_section
->id
& 0xffffffff,
4575 sym_sec
->id
& 0xffffffff,
4576 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4577 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4578 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4579 (int) rel
->r_addend
& 0xffffffff,
4586 /* Look up an entry in the stub hash. Stub entries are cached because
4587 creating the stub name takes a bit of time. */
4589 static struct elf32_arm_stub_hash_entry
*
4590 elf32_arm_get_stub_entry (const asection
*input_section
,
4591 const asection
*sym_sec
,
4592 struct elf_link_hash_entry
*hash
,
4593 const Elf_Internal_Rela
*rel
,
4594 struct elf32_arm_link_hash_table
*htab
,
4595 enum elf32_arm_stub_type stub_type
)
4597 struct elf32_arm_stub_hash_entry
*stub_entry
;
4598 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4599 const asection
*id_sec
;
4601 if ((input_section
->flags
& SEC_CODE
) == 0)
4604 /* If the input section is the CMSE stubs one and it needs a long
4605 branch stub to reach it's final destination, give up with an
4606 error message: this is not supported. See PR ld/24709. */
4607 if (!strncmp (input_section
->name
, CMSE_STUB_NAME
, strlen (CMSE_STUB_NAME
)))
4609 bfd
*output_bfd
= htab
->obfd
;
4610 asection
*out_sec
= bfd_get_section_by_name (output_bfd
, CMSE_STUB_NAME
);
4612 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4613 "(%#" PRIx64
") from destination (%#" PRIx64
")"),
4615 (uint64_t)out_sec
->output_section
->vma
4616 + out_sec
->output_offset
,
4617 (uint64_t)sym_sec
->output_section
->vma
4618 + sym_sec
->output_offset
4619 + h
->root
.root
.u
.def
.value
);
4620 /* Exit, rather than leave incompletely processed
4625 /* If this input section is part of a group of sections sharing one
4626 stub section, then use the id of the first section in the group.
4627 Stub names need to include a section id, as there may well be
4628 more than one stub used to reach say, printf, and we need to
4629 distinguish between them. */
4630 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4631 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4633 if (h
!= NULL
&& h
->stub_cache
!= NULL
4634 && h
->stub_cache
->h
== h
4635 && h
->stub_cache
->id_sec
== id_sec
4636 && h
->stub_cache
->stub_type
== stub_type
)
4638 stub_entry
= h
->stub_cache
;
4644 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4645 if (stub_name
== NULL
)
4648 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4649 stub_name
, false, false);
4651 h
->stub_cache
= stub_entry
;
4659 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4663 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4665 if (stub_type
>= max_stub_type
)
4666 abort (); /* Should be unreachable. */
4670 case arm_stub_cmse_branch_thumb_only
:
4677 abort (); /* Should be unreachable. */
4680 /* Required alignment (as a power of 2) for the dedicated section holding
4681 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4682 with input sections. */
4685 arm_dedicated_stub_output_section_required_alignment
4686 (enum elf32_arm_stub_type stub_type
)
4688 if (stub_type
>= max_stub_type
)
4689 abort (); /* Should be unreachable. */
4693 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4695 case arm_stub_cmse_branch_thumb_only
:
4699 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4703 abort (); /* Should be unreachable. */
4706 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4707 NULL if veneers of this type are interspersed with input sections. */
4710 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4712 if (stub_type
>= max_stub_type
)
4713 abort (); /* Should be unreachable. */
4717 case arm_stub_cmse_branch_thumb_only
:
4718 return CMSE_STUB_NAME
;
4721 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4725 abort (); /* Should be unreachable. */
4728 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4729 returns the address of the hash table field in HTAB holding a pointer to the
4730 corresponding input section. Otherwise, returns NULL. */
4733 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4734 enum elf32_arm_stub_type stub_type
)
4736 if (stub_type
>= max_stub_type
)
4737 abort (); /* Should be unreachable. */
4741 case arm_stub_cmse_branch_thumb_only
:
4742 return &htab
->cmse_stub_sec
;
4745 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4749 abort (); /* Should be unreachable. */
4752 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4753 is the section that branch into veneer and can be NULL if stub should go in
4754 a dedicated output section. Returns a pointer to the stub section, and the
4755 section to which the stub section will be attached (in *LINK_SEC_P).
4756 LINK_SEC_P may be NULL. */
4759 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4760 struct elf32_arm_link_hash_table
*htab
,
4761 enum elf32_arm_stub_type stub_type
)
4763 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4764 const char *stub_sec_prefix
;
4765 bool dedicated_output_section
=
4766 arm_dedicated_stub_output_section_required (stub_type
);
4769 if (dedicated_output_section
)
4771 bfd
*output_bfd
= htab
->obfd
;
4772 const char *out_sec_name
=
4773 arm_dedicated_stub_output_section_name (stub_type
);
4775 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4776 stub_sec_prefix
= out_sec_name
;
4777 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4778 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4779 if (out_sec
== NULL
)
4781 _bfd_error_handler (_("no address assigned to the veneers output "
4782 "section %s"), out_sec_name
);
4788 BFD_ASSERT (section
->id
<= htab
->top_id
);
4789 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4790 BFD_ASSERT (link_sec
!= NULL
);
4791 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4792 if (*stub_sec_p
== NULL
)
4793 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4794 stub_sec_prefix
= link_sec
->name
;
4795 out_sec
= link_sec
->output_section
;
4796 align
= htab
->root
.target_os
== is_nacl
? 4 : 3;
4799 if (*stub_sec_p
== NULL
)
4805 namelen
= strlen (stub_sec_prefix
);
4806 len
= namelen
+ sizeof (STUB_SUFFIX
);
4807 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4811 memcpy (s_name
, stub_sec_prefix
, namelen
);
4812 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4813 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4815 if (*stub_sec_p
== NULL
)
4818 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4819 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4823 if (!dedicated_output_section
)
4824 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4827 *link_sec_p
= link_sec
;
4832 /* Add a new stub entry to the stub hash. Not all fields of the new
4833 stub entry are initialised. */
4835 static struct elf32_arm_stub_hash_entry
*
4836 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4837 struct elf32_arm_link_hash_table
*htab
,
4838 enum elf32_arm_stub_type stub_type
)
4842 struct elf32_arm_stub_hash_entry
*stub_entry
;
4844 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4846 if (stub_sec
== NULL
)
4849 /* Enter this entry into the linker stub hash table. */
4850 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4852 if (stub_entry
== NULL
)
4854 if (section
== NULL
)
4856 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4857 section
->owner
, stub_name
);
4861 stub_entry
->stub_sec
= stub_sec
;
4862 stub_entry
->stub_offset
= (bfd_vma
) -1;
4863 stub_entry
->id_sec
= link_sec
;
4868 /* Store an Arm insn into an output section not processed by
4869 elf32_arm_write_section. */
4872 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4873 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4875 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4876 bfd_putl32 (val
, ptr
);
4878 bfd_putb32 (val
, ptr
);
4881 /* Store a 16-bit Thumb insn into an output section not processed by
4882 elf32_arm_write_section. */
4885 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4886 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4888 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4889 bfd_putl16 (val
, ptr
);
4891 bfd_putb16 (val
, ptr
);
4894 /* Store a Thumb2 insn into an output section not processed by
4895 elf32_arm_write_section. */
4898 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4899 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4901 /* T2 instructions are 16-bit streamed. */
4902 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4904 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4905 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4909 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4910 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4914 /* If it's possible to change R_TYPE to a more efficient access
4915 model, return the new reloc type. */
4918 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4919 struct elf_link_hash_entry
*h
)
4921 int is_local
= (h
== NULL
);
4923 if (bfd_link_dll (info
)
4924 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4927 /* We do not support relaxations for Old TLS models. */
4930 case R_ARM_TLS_GOTDESC
:
4931 case R_ARM_TLS_CALL
:
4932 case R_ARM_THM_TLS_CALL
:
4933 case R_ARM_TLS_DESCSEQ
:
4934 case R_ARM_THM_TLS_DESCSEQ
:
4935 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4941 static bfd_reloc_status_type elf32_arm_final_link_relocate
4942 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4943 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4944 const char *, unsigned char, enum arm_st_branch_type
,
4945 struct elf_link_hash_entry
*, bool *, char **);
4948 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4952 case arm_stub_a8_veneer_b_cond
:
4953 case arm_stub_a8_veneer_b
:
4954 case arm_stub_a8_veneer_bl
:
4957 case arm_stub_long_branch_any_any
:
4958 case arm_stub_long_branch_v4t_arm_thumb
:
4959 case arm_stub_long_branch_thumb_only
:
4960 case arm_stub_long_branch_thumb2_only
:
4961 case arm_stub_long_branch_thumb2_only_pure
:
4962 case arm_stub_long_branch_v4t_thumb_thumb
:
4963 case arm_stub_long_branch_v4t_thumb_arm
:
4964 case arm_stub_short_branch_v4t_thumb_arm
:
4965 case arm_stub_long_branch_any_arm_pic
:
4966 case arm_stub_long_branch_any_thumb_pic
:
4967 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4968 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4969 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4970 case arm_stub_long_branch_thumb_only_pic
:
4971 case arm_stub_long_branch_any_tls_pic
:
4972 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4973 case arm_stub_cmse_branch_thumb_only
:
4974 case arm_stub_a8_veneer_blx
:
4977 case arm_stub_long_branch_arm_nacl
:
4978 case arm_stub_long_branch_arm_nacl_pic
:
4982 abort (); /* Should be unreachable. */
4986 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4987 veneering (TRUE) or have their own symbol (FALSE). */
4990 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4992 if (stub_type
>= max_stub_type
)
4993 abort (); /* Should be unreachable. */
4997 case arm_stub_cmse_branch_thumb_only
:
5004 abort (); /* Should be unreachable. */
5007 /* Returns the padding needed for the dedicated section used stubs of type
5011 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
5013 if (stub_type
>= max_stub_type
)
5014 abort (); /* Should be unreachable. */
5018 case arm_stub_cmse_branch_thumb_only
:
5025 abort (); /* Should be unreachable. */
5028 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5029 returns the address of the hash table field in HTAB holding the offset at
5030 which new veneers should be layed out in the stub section. */
5033 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
5034 enum elf32_arm_stub_type stub_type
)
5038 case arm_stub_cmse_branch_thumb_only
:
5039 return &htab
->new_cmse_stub_offset
;
5042 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
5048 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
5052 bool removed_sg_veneer
;
5053 struct elf32_arm_stub_hash_entry
*stub_entry
;
5054 struct elf32_arm_link_hash_table
*globals
;
5055 struct bfd_link_info
*info
;
5062 const insn_sequence
*template_sequence
;
5064 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
5065 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
5067 int just_allocated
= 0;
5069 /* Massage our args to the form they really have. */
5070 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5071 info
= (struct bfd_link_info
*) in_arg
;
5073 /* Fail if the target section could not be assigned to an output
5074 section. The user should fix his linker script. */
5075 if (stub_entry
->target_section
->output_section
== NULL
5076 && info
->non_contiguous_regions
)
5077 info
->callbacks
->einfo (_("%F%P: Could not assign `%pA' to an output section. "
5078 "Retry without --enable-non-contiguous-regions.\n"),
5079 stub_entry
->target_section
);
5081 globals
= elf32_arm_hash_table (info
);
5082 if (globals
== NULL
)
5085 stub_sec
= stub_entry
->stub_sec
;
5087 if ((globals
->fix_cortex_a8
< 0)
5088 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
5089 /* We have to do less-strictly-aligned fixes last. */
5092 /* Assign a slot at the end of section if none assigned yet. */
5093 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5095 stub_entry
->stub_offset
= stub_sec
->size
;
5098 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5100 stub_bfd
= stub_sec
->owner
;
5102 /* This is the address of the stub destination. */
5103 sym_value
= (stub_entry
->target_value
5104 + stub_entry
->target_section
->output_offset
5105 + stub_entry
->target_section
->output_section
->vma
);
5107 template_sequence
= stub_entry
->stub_template
;
5108 template_size
= stub_entry
->stub_template_size
;
5111 for (i
= 0; i
< template_size
; i
++)
5113 switch (template_sequence
[i
].type
)
5117 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5118 if (template_sequence
[i
].reloc_addend
!= 0)
5120 /* We've borrowed the reloc_addend field to mean we should
5121 insert a condition code into this (Thumb-1 branch)
5122 instruction. See THUMB16_BCOND_INSN. */
5123 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5124 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5126 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5132 bfd_put_16 (stub_bfd
,
5133 (template_sequence
[i
].data
>> 16) & 0xffff,
5135 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5137 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5139 stub_reloc_idx
[nrelocs
] = i
;
5140 stub_reloc_offset
[nrelocs
++] = size
;
5146 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5148 /* Handle cases where the target is encoded within the
5150 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5152 stub_reloc_idx
[nrelocs
] = i
;
5153 stub_reloc_offset
[nrelocs
++] = size
;
5159 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5160 stub_reloc_idx
[nrelocs
] = i
;
5161 stub_reloc_offset
[nrelocs
++] = size
;
5172 stub_sec
->size
+= size
;
5174 /* Stub size has already been computed in arm_size_one_stub. Check
5176 BFD_ASSERT (size
== stub_entry
->stub_size
);
5178 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5179 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5182 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5183 to relocate in each stub. */
5185 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5186 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5188 for (i
= 0; i
< nrelocs
; i
++)
5190 Elf_Internal_Rela rel
;
5191 bool unresolved_reloc
;
5192 char *error_message
;
5194 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5196 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5197 rel
.r_info
= ELF32_R_INFO (0,
5198 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5201 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5202 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5203 template should refer back to the instruction after the original
5204 branch. We use target_section as Cortex-A8 erratum workaround stubs
5205 are only generated when both source and target are in the same
5207 points_to
= stub_entry
->target_section
->output_section
->vma
5208 + stub_entry
->target_section
->output_offset
5209 + stub_entry
->source_value
;
5211 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5212 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5213 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5214 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5215 stub_entry
->branch_type
,
5216 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5224 /* Calculate the template, template size and instruction size for a stub.
5225 Return value is the instruction size. */
5228 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5229 const insn_sequence
**stub_template
,
5230 int *stub_template_size
)
5232 const insn_sequence
*template_sequence
= NULL
;
5233 int template_size
= 0, i
;
5236 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5238 *stub_template
= template_sequence
;
5240 template_size
= stub_definitions
[stub_type
].template_size
;
5241 if (stub_template_size
)
5242 *stub_template_size
= template_size
;
5245 for (i
= 0; i
< template_size
; i
++)
5247 switch (template_sequence
[i
].type
)
5268 /* As above, but don't actually build the stub. Just bump offset so
5269 we know stub section sizes. */
5272 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5273 void *in_arg ATTRIBUTE_UNUSED
)
5275 struct elf32_arm_stub_hash_entry
*stub_entry
;
5276 const insn_sequence
*template_sequence
;
5277 int template_size
, size
;
5279 /* Massage our args to the form they really have. */
5280 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5282 BFD_ASSERT ((stub_entry
->stub_type
> arm_stub_none
)
5283 && stub_entry
->stub_type
< ARRAY_SIZE (stub_definitions
));
5285 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5288 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5289 if (stub_entry
->stub_template_size
)
5291 stub_entry
->stub_size
= size
;
5292 stub_entry
->stub_template
= template_sequence
;
5293 stub_entry
->stub_template_size
= template_size
;
5296 /* Already accounted for. */
5297 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5300 size
= (size
+ 7) & ~7;
5301 stub_entry
->stub_sec
->size
+= size
;
5306 /* External entry points for sizing and building linker stubs. */
5308 /* Set up various things so that we can make a list of input sections
5309 for each output section included in the link. Returns -1 on error,
5310 0 when no stubs will be needed, and 1 on success. */
5313 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5314 struct bfd_link_info
*info
)
5317 unsigned int bfd_count
;
5318 unsigned int top_id
, top_index
;
5320 asection
**input_list
, **list
;
5322 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5327 /* Count the number of input BFDs and find the top input section id. */
5328 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5330 input_bfd
= input_bfd
->link
.next
)
5333 for (section
= input_bfd
->sections
;
5335 section
= section
->next
)
5337 if (top_id
< section
->id
)
5338 top_id
= section
->id
;
5341 htab
->bfd_count
= bfd_count
;
5343 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5344 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5345 if (htab
->stub_group
== NULL
)
5347 htab
->top_id
= top_id
;
5349 /* We can't use output_bfd->section_count here to find the top output
5350 section index as some sections may have been removed, and
5351 _bfd_strip_section_from_output doesn't renumber the indices. */
5352 for (section
= output_bfd
->sections
, top_index
= 0;
5354 section
= section
->next
)
5356 if (top_index
< section
->index
)
5357 top_index
= section
->index
;
5360 htab
->top_index
= top_index
;
5361 amt
= sizeof (asection
*) * (top_index
+ 1);
5362 input_list
= (asection
**) bfd_malloc (amt
);
5363 htab
->input_list
= input_list
;
5364 if (input_list
== NULL
)
5367 /* For sections we aren't interested in, mark their entries with a
5368 value we can check later. */
5369 list
= input_list
+ top_index
;
5371 *list
= bfd_abs_section_ptr
;
5372 while (list
-- != input_list
);
5374 for (section
= output_bfd
->sections
;
5376 section
= section
->next
)
5378 if ((section
->flags
& SEC_CODE
) != 0)
5379 input_list
[section
->index
] = NULL
;
5385 /* The linker repeatedly calls this function for each input section,
5386 in the order that input sections are linked into output sections.
5387 Build lists of input sections to determine groupings between which
5388 we may insert linker stubs. */
5391 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5394 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5399 if (isec
->output_section
->index
<= htab
->top_index
)
5401 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5403 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5405 /* Steal the link_sec pointer for our list. */
5406 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5407 /* This happens to make the list in reverse order,
5408 which we reverse later. */
5409 PREV_SEC (isec
) = *list
;
5415 /* See whether we can group stub sections together. Grouping stub
5416 sections may result in fewer stubs. More importantly, we need to
5417 put all .init* and .fini* stubs at the end of the .init or
5418 .fini output sections respectively, because glibc splits the
5419 _init and _fini functions into multiple parts. Putting a stub in
5420 the middle of a function is not a good idea. */
5423 group_sections (struct elf32_arm_link_hash_table
*htab
,
5424 bfd_size_type stub_group_size
,
5425 bool stubs_always_after_branch
)
5427 asection
**list
= htab
->input_list
;
5431 asection
*tail
= *list
;
5434 if (tail
== bfd_abs_section_ptr
)
5437 /* Reverse the list: we must avoid placing stubs at the
5438 beginning of the section because the beginning of the text
5439 section may be required for an interrupt vector in bare metal
5441 #define NEXT_SEC PREV_SEC
5443 while (tail
!= NULL
)
5445 /* Pop from tail. */
5446 asection
*item
= tail
;
5447 tail
= PREV_SEC (item
);
5450 NEXT_SEC (item
) = head
;
5454 while (head
!= NULL
)
5458 bfd_vma stub_group_start
= head
->output_offset
;
5459 bfd_vma end_of_next
;
5462 while (NEXT_SEC (curr
) != NULL
)
5464 next
= NEXT_SEC (curr
);
5465 end_of_next
= next
->output_offset
+ next
->size
;
5466 if (end_of_next
- stub_group_start
>= stub_group_size
)
5467 /* End of NEXT is too far from start, so stop. */
5469 /* Add NEXT to the group. */
5473 /* OK, the size from the start to the start of CURR is less
5474 than stub_group_size and thus can be handled by one stub
5475 section. (Or the head section is itself larger than
5476 stub_group_size, in which case we may be toast.)
5477 We should really be keeping track of the total size of
5478 stubs added here, as stubs contribute to the final output
5482 next
= NEXT_SEC (head
);
5483 /* Set up this stub group. */
5484 htab
->stub_group
[head
->id
].link_sec
= curr
;
5486 while (head
!= curr
&& (head
= next
) != NULL
);
5488 /* But wait, there's more! Input sections up to stub_group_size
5489 bytes after the stub section can be handled by it too. */
5490 if (!stubs_always_after_branch
)
5492 stub_group_start
= curr
->output_offset
+ curr
->size
;
5494 while (next
!= NULL
)
5496 end_of_next
= next
->output_offset
+ next
->size
;
5497 if (end_of_next
- stub_group_start
>= stub_group_size
)
5498 /* End of NEXT is too far from stubs, so stop. */
5500 /* Add NEXT to the stub group. */
5502 next
= NEXT_SEC (head
);
5503 htab
->stub_group
[head
->id
].link_sec
= curr
;
5509 while (list
++ != htab
->input_list
+ htab
->top_index
);
5511 free (htab
->input_list
);
5516 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5520 a8_reloc_compare (const void *a
, const void *b
)
5522 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5523 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5525 if (ra
->from
< rb
->from
)
5527 else if (ra
->from
> rb
->from
)
5533 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5534 const char *, char **);
5536 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5537 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5538 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5542 cortex_a8_erratum_scan (bfd
*input_bfd
,
5543 struct bfd_link_info
*info
,
5544 struct a8_erratum_fix
**a8_fixes_p
,
5545 unsigned int *num_a8_fixes_p
,
5546 unsigned int *a8_fix_table_size_p
,
5547 struct a8_erratum_reloc
*a8_relocs
,
5548 unsigned int num_a8_relocs
,
5549 unsigned prev_num_a8_fixes
,
5550 bool *stub_changed_p
)
5553 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5554 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5555 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5556 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5561 for (section
= input_bfd
->sections
;
5563 section
= section
->next
)
5565 bfd_byte
*contents
= NULL
;
5566 struct _arm_elf_section_data
*sec_data
;
5570 if (elf_section_type (section
) != SHT_PROGBITS
5571 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5572 || (section
->flags
& SEC_EXCLUDE
) != 0
5573 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5574 || (section
->output_section
== bfd_abs_section_ptr
))
5577 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5579 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5580 contents
= elf_section_data (section
)->this_hdr
.contents
;
5581 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5584 sec_data
= elf32_arm_section_data (section
);
5586 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5588 unsigned int span_start
= sec_data
->map
[span
].vma
;
5589 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5590 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5592 char span_type
= sec_data
->map
[span
].type
;
5593 bool last_was_32bit
= false, last_was_branch
= false;
5595 if (span_type
!= 't')
5598 /* Span is entirely within a single 4KB region: skip scanning. */
5599 if (((base_vma
+ span_start
) & ~0xfff)
5600 == ((base_vma
+ span_end
) & ~0xfff))
5603 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5605 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5606 * The branch target is in the same 4KB region as the
5607 first half of the branch.
5608 * The instruction before the branch is a 32-bit
5609 length non-branch instruction. */
5610 for (i
= span_start
; i
< span_end
;)
5612 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5613 bool insn_32bit
= false, is_blx
= false, is_b
= false;
5614 bool is_bl
= false, is_bcc
= false, is_32bit_branch
;
5616 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5621 /* Load the rest of the insn (in manual-friendly order). */
5622 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5624 /* Encoding T4: B<c>.W. */
5625 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5626 /* Encoding T1: BL<c>.W. */
5627 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5628 /* Encoding T2: BLX<c>.W. */
5629 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5630 /* Encoding T3: B<c>.W (not permitted in IT block). */
5631 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5632 && (insn
& 0x07f00000) != 0x03800000;
5635 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5637 if (((base_vma
+ i
) & 0xfff) == 0xffe
5641 && ! last_was_branch
)
5643 bfd_signed_vma offset
= 0;
5644 bool force_target_arm
= false;
5645 bool force_target_thumb
= false;
5647 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5648 struct a8_erratum_reloc key
, *found
;
5649 bool use_plt
= false;
5651 key
.from
= base_vma
+ i
;
5652 found
= (struct a8_erratum_reloc
*)
5653 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5654 sizeof (struct a8_erratum_reloc
),
5659 char *error_message
= NULL
;
5660 struct elf_link_hash_entry
*entry
;
5662 /* We don't care about the error returned from this
5663 function, only if there is glue or not. */
5664 entry
= find_thumb_glue (info
, found
->sym_name
,
5668 found
->non_a8_stub
= true;
5670 /* Keep a simpler condition, for the sake of clarity. */
5671 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5672 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5675 if (found
->r_type
== R_ARM_THM_CALL
)
5677 if (found
->branch_type
== ST_BRANCH_TO_ARM
5679 force_target_arm
= true;
5681 force_target_thumb
= true;
5685 /* Check if we have an offending branch instruction. */
5687 if (found
&& found
->non_a8_stub
)
5688 /* We've already made a stub for this instruction, e.g.
5689 it's a long branch or a Thumb->ARM stub. Assume that
5690 stub will suffice to work around the A8 erratum (see
5691 setting of always_after_branch above). */
5695 offset
= (insn
& 0x7ff) << 1;
5696 offset
|= (insn
& 0x3f0000) >> 4;
5697 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5698 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5699 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5700 if (offset
& 0x100000)
5701 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5702 stub_type
= arm_stub_a8_veneer_b_cond
;
5704 else if (is_b
|| is_bl
|| is_blx
)
5706 int s
= (insn
& 0x4000000) != 0;
5707 int j1
= (insn
& 0x2000) != 0;
5708 int j2
= (insn
& 0x800) != 0;
5712 offset
= (insn
& 0x7ff) << 1;
5713 offset
|= (insn
& 0x3ff0000) >> 4;
5717 if (offset
& 0x1000000)
5718 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5721 offset
&= ~ ((bfd_signed_vma
) 3);
5723 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5724 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5727 if (stub_type
!= arm_stub_none
)
5729 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5731 /* The original instruction is a BL, but the target is
5732 an ARM instruction. If we were not making a stub,
5733 the BL would have been converted to a BLX. Use the
5734 BLX stub instead in that case. */
5735 if (htab
->use_blx
&& force_target_arm
5736 && stub_type
== arm_stub_a8_veneer_bl
)
5738 stub_type
= arm_stub_a8_veneer_blx
;
5742 /* Conversely, if the original instruction was
5743 BLX but the target is Thumb mode, use the BL
5745 else if (force_target_thumb
5746 && stub_type
== arm_stub_a8_veneer_blx
)
5748 stub_type
= arm_stub_a8_veneer_bl
;
5754 pc_for_insn
&= ~ ((bfd_vma
) 3);
5756 /* If we found a relocation, use the proper destination,
5757 not the offset in the (unrelocated) instruction.
5758 Note this is always done if we switched the stub type
5762 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5764 /* If the stub will use a Thumb-mode branch to a
5765 PLT target, redirect it to the preceding Thumb
5767 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5768 offset
-= PLT_THUMB_STUB_SIZE
;
5770 target
= pc_for_insn
+ offset
;
5772 /* The BLX stub is ARM-mode code. Adjust the offset to
5773 take the different PC value (+8 instead of +4) into
5775 if (stub_type
== arm_stub_a8_veneer_blx
)
5778 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5780 char *stub_name
= NULL
;
5782 if (num_a8_fixes
== a8_fix_table_size
)
5784 a8_fix_table_size
*= 2;
5785 a8_fixes
= (struct a8_erratum_fix
*)
5786 bfd_realloc (a8_fixes
,
5787 sizeof (struct a8_erratum_fix
)
5788 * a8_fix_table_size
);
5791 if (num_a8_fixes
< prev_num_a8_fixes
)
5793 /* If we're doing a subsequent scan,
5794 check if we've found the same fix as
5795 before, and try and reuse the stub
5797 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5798 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5799 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5803 *stub_changed_p
= true;
5809 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5810 if (stub_name
!= NULL
)
5811 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5814 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5815 a8_fixes
[num_a8_fixes
].section
= section
;
5816 a8_fixes
[num_a8_fixes
].offset
= i
;
5817 a8_fixes
[num_a8_fixes
].target_offset
=
5819 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5820 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5821 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5822 a8_fixes
[num_a8_fixes
].branch_type
=
5823 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5830 i
+= insn_32bit
? 4 : 2;
5831 last_was_32bit
= insn_32bit
;
5832 last_was_branch
= is_32bit_branch
;
5836 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5840 *a8_fixes_p
= a8_fixes
;
5841 *num_a8_fixes_p
= num_a8_fixes
;
5842 *a8_fix_table_size_p
= a8_fix_table_size
;
5847 /* Create or update a stub entry depending on whether the stub can already be
5848 found in HTAB. The stub is identified by:
5849 - its type STUB_TYPE
5850 - its source branch (note that several can share the same stub) whose
5851 section and relocation (if any) are given by SECTION and IRELA
5853 - its target symbol whose input section, hash, name, value and branch type
5854 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5857 If found, the value of the stub's target symbol is updated from SYM_VALUE
5858 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5859 TRUE and the stub entry is initialized.
5861 Returns the stub that was created or updated, or NULL if an error
5864 static struct elf32_arm_stub_hash_entry
*
5865 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5866 enum elf32_arm_stub_type stub_type
, asection
*section
,
5867 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5868 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5869 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5872 const asection
*id_sec
;
5874 struct elf32_arm_stub_hash_entry
*stub_entry
;
5875 unsigned int r_type
;
5876 bool sym_claimed
= arm_stub_sym_claimed (stub_type
);
5878 BFD_ASSERT (stub_type
!= arm_stub_none
);
5882 stub_name
= sym_name
;
5886 BFD_ASSERT (section
);
5887 BFD_ASSERT (section
->id
<= htab
->top_id
);
5889 /* Support for grouping stub sections. */
5890 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5892 /* Get the name of this stub. */
5893 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5899 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, false,
5901 /* The proper stub has already been created, just update its value. */
5902 if (stub_entry
!= NULL
)
5906 stub_entry
->target_value
= sym_value
;
5910 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5911 if (stub_entry
== NULL
)
5918 stub_entry
->target_value
= sym_value
;
5919 stub_entry
->target_section
= sym_sec
;
5920 stub_entry
->stub_type
= stub_type
;
5921 stub_entry
->h
= hash
;
5922 stub_entry
->branch_type
= branch_type
;
5925 stub_entry
->output_name
= sym_name
;
5928 if (sym_name
== NULL
)
5929 sym_name
= "unnamed";
5930 stub_entry
->output_name
= (char *)
5931 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5932 + strlen (sym_name
));
5933 if (stub_entry
->output_name
== NULL
)
5939 /* For historical reasons, use the existing names for ARM-to-Thumb and
5940 Thumb-to-ARM stubs. */
5941 r_type
= ELF32_R_TYPE (irela
->r_info
);
5942 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5943 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5944 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5945 && branch_type
== ST_BRANCH_TO_ARM
)
5946 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5947 else if ((r_type
== (unsigned int) R_ARM_CALL
5948 || r_type
== (unsigned int) R_ARM_JUMP24
)
5949 && branch_type
== ST_BRANCH_TO_THUMB
)
5950 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5952 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5959 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5960 gateway veneer to transition from non secure to secure state and create them
5963 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5964 defines the conditions that govern Secure Gateway veneer creation for a
5965 given symbol <SYM> as follows:
5966 - it has function type
5967 - it has non local binding
5968 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5969 same type, binding and value as <SYM> (called normal symbol).
5970 An entry function can handle secure state transition itself in which case
5971 its special symbol would have a different value from the normal symbol.
5973 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5974 entry mapping while HTAB gives the name to hash entry mapping.
5975 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5978 The return value gives whether a stub failed to be allocated. */
5981 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5982 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5983 int *cmse_stub_created
)
5985 const struct elf_backend_data
*bed
;
5986 Elf_Internal_Shdr
*symtab_hdr
;
5987 unsigned i
, j
, sym_count
, ext_start
;
5988 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5989 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5990 enum arm_st_branch_type branch_type
;
5991 char *sym_name
, *lsym_name
;
5994 struct elf32_arm_stub_hash_entry
*stub_entry
;
5995 bool is_v8m
, new_stub
, cmse_invalid
, ret
= true;
5997 bed
= get_elf_backend_data (input_bfd
);
5998 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5999 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
6000 ext_start
= symtab_hdr
->sh_info
;
6001 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
6002 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
6004 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6005 if (local_syms
== NULL
)
6006 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6007 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
6009 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
6013 for (i
= 0; i
< sym_count
; i
++)
6015 cmse_invalid
= false;
6019 cmse_sym
= &local_syms
[i
];
6020 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
6021 symtab_hdr
->sh_link
,
6023 if (!sym_name
|| !startswith (sym_name
, CMSE_PREFIX
))
6026 /* Special symbol with local binding. */
6027 cmse_invalid
= true;
6031 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
6032 if (cmse_hash
== NULL
)
6035 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
6036 if (!startswith (sym_name
, CMSE_PREFIX
))
6039 /* Special symbol has incorrect binding or type. */
6040 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
6041 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6042 || cmse_hash
->root
.type
!= STT_FUNC
)
6043 cmse_invalid
= true;
6048 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6049 "ARMv8-M architecture or later"),
6050 input_bfd
, sym_name
);
6051 is_v8m
= true; /* Avoid multiple warning. */
6057 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6058 " a global or weak function symbol"),
6059 input_bfd
, sym_name
);
6065 sym_name
+= strlen (CMSE_PREFIX
);
6066 hash
= (struct elf32_arm_link_hash_entry
*)
6067 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6069 /* No associated normal symbol or it is neither global nor weak. */
6071 || (hash
->root
.root
.type
!= bfd_link_hash_defined
6072 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6073 || hash
->root
.type
!= STT_FUNC
)
6075 /* Initialize here to avoid warning about use of possibly
6076 uninitialized variable. */
6081 /* Searching for a normal symbol with local binding. */
6082 for (; j
< ext_start
; j
++)
6085 bfd_elf_string_from_elf_section (input_bfd
,
6086 symtab_hdr
->sh_link
,
6087 local_syms
[j
].st_name
);
6088 if (!strcmp (sym_name
, lsym_name
))
6093 if (hash
|| j
< ext_start
)
6096 (_("%pB: invalid standard symbol `%s'; it must be "
6097 "a global or weak function symbol"),
6098 input_bfd
, sym_name
);
6102 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6108 sym_value
= hash
->root
.root
.u
.def
.value
;
6109 section
= hash
->root
.root
.u
.def
.section
;
6111 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6114 (_("%pB: `%s' and its special symbol are in different sections"),
6115 input_bfd
, sym_name
);
6118 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6119 continue; /* Ignore: could be an entry function starting with SG. */
6121 /* If this section is a link-once section that will be discarded, then
6122 don't create any stubs. */
6123 if (section
->output_section
== NULL
)
6126 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6130 if (hash
->root
.size
== 0)
6133 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6139 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6141 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6142 NULL
, NULL
, section
, hash
, sym_name
,
6143 sym_value
, branch_type
, &new_stub
);
6145 if (stub_entry
== NULL
)
6149 BFD_ASSERT (new_stub
);
6150 (*cmse_stub_created
)++;
6154 if (!symtab_hdr
->contents
)
6159 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6160 code entry function, ie can be called from non secure code without using a
6164 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6166 bfd_byte contents
[4];
6167 uint32_t first_insn
;
6172 /* Defined symbol of function type. */
6173 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6174 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6176 if (hash
->root
.type
!= STT_FUNC
)
6179 /* Read first instruction. */
6180 section
= hash
->root
.root
.u
.def
.section
;
6181 abfd
= section
->owner
;
6182 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6183 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6187 first_insn
= bfd_get_32 (abfd
, contents
);
6189 /* Starts by SG instruction. */
6190 return first_insn
== 0xe97fe97f;
6193 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6194 secure gateway veneers (ie. the veneers was not in the input import library)
6195 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6198 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6200 struct elf32_arm_stub_hash_entry
*stub_entry
;
6201 struct bfd_link_info
*info
;
6203 /* Massage our args to the form they really have. */
6204 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6205 info
= (struct bfd_link_info
*) gen_info
;
6207 if (info
->out_implib_bfd
)
6210 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6213 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6214 _bfd_error_handler (" %s", stub_entry
->output_name
);
6219 /* Set offset of each secure gateway veneers so that its address remain
6220 identical to the one in the input import library referred by
6221 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6222 (present in input import library but absent from the executable being
6223 linked) or if new veneers appeared and there is no output import library
6224 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6225 number of secure gateway veneers found in the input import library.
6227 The function returns whether an error occurred. If no error occurred,
6228 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6229 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6230 veneer observed set for new veneers to be layed out after. */
6233 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6234 struct elf32_arm_link_hash_table
*htab
,
6235 int *cmse_stub_created
)
6242 asection
*stub_out_sec
;
6244 Elf_Internal_Sym
*intsym
;
6245 const char *out_sec_name
;
6246 bfd_size_type cmse_stub_size
;
6247 asymbol
**sympp
= NULL
, *sym
;
6248 struct elf32_arm_link_hash_entry
*hash
;
6249 const insn_sequence
*cmse_stub_template
;
6250 struct elf32_arm_stub_hash_entry
*stub_entry
;
6251 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6252 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6253 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6255 /* No input secure gateway import library. */
6256 if (!htab
->in_implib_bfd
)
6259 in_implib_bfd
= htab
->in_implib_bfd
;
6260 if (!htab
->cmse_implib
)
6262 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6263 "Gateway import libraries"), in_implib_bfd
);
6267 /* Get symbol table size. */
6268 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6272 /* Read in the input secure gateway import library's symbol table. */
6273 sympp
= (asymbol
**) bfd_malloc (symsize
);
6277 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6284 htab
->new_cmse_stub_offset
= 0;
6286 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6287 &cmse_stub_template
,
6288 &cmse_stub_template_size
);
6290 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6292 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6293 if (stub_out_sec
!= NULL
)
6294 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6296 /* Set addresses of veneers mentionned in input secure gateway import
6297 library's symbol table. */
6298 for (i
= 0; i
< symcount
; i
++)
6302 sym_name
= (char *) bfd_asymbol_name (sym
);
6303 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6305 if (sym
->section
!= bfd_abs_section_ptr
6306 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6307 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6308 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6309 != ST_BRANCH_TO_THUMB
))
6311 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6312 "symbol should be absolute, global and "
6313 "refer to Thumb functions"),
6314 in_implib_bfd
, sym_name
);
6319 veneer_value
= bfd_asymbol_value (sym
);
6320 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6321 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6323 hash
= (struct elf32_arm_link_hash_entry
*)
6324 elf_link_hash_lookup (&(htab
)->root
, sym_name
, false, false, true);
6326 /* Stub entry should have been created by cmse_scan or the symbol be of
6327 a secure function callable from non secure code. */
6328 if (!stub_entry
&& !hash
)
6333 (_("entry function `%s' disappeared from secure code"), sym_name
);
6334 hash
= (struct elf32_arm_link_hash_entry
*)
6335 elf_link_hash_lookup (&(htab
)->root
, sym_name
, true, true, true);
6337 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6338 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6339 sym_name
, veneer_value
,
6340 ST_BRANCH_TO_THUMB
, &new_stub
);
6341 if (stub_entry
== NULL
)
6345 BFD_ASSERT (new_stub
);
6346 new_cmse_stubs_created
++;
6347 (*cmse_stub_created
)++;
6349 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6350 stub_entry
->stub_offset
= stub_offset
;
6352 /* Symbol found is not callable from non secure code. */
6353 else if (!stub_entry
)
6355 if (!cmse_entry_fct_p (hash
))
6357 _bfd_error_handler (_("`%s' refers to a non entry function"),
6365 /* Only stubs for SG veneers should have been created. */
6366 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6368 /* Check visibility hasn't changed. */
6369 if (!!(flags
& BSF_GLOBAL
)
6370 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6372 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6375 stub_entry
->stub_offset
= stub_offset
;
6378 /* Size should match that of a SG veneer. */
6379 if (intsym
->st_size
!= cmse_stub_size
)
6381 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6382 in_implib_bfd
, sym_name
);
6386 /* Previous veneer address is before current SG veneer section. */
6387 if (veneer_value
< cmse_stub_sec_vma
)
6389 /* Avoid offset underflow. */
6391 stub_entry
->stub_offset
= 0;
6396 /* Complain if stub offset not a multiple of stub size. */
6397 if (stub_offset
% cmse_stub_size
)
6400 (_("offset of veneer for entry function `%s' not a multiple of "
6401 "its size"), sym_name
);
6408 new_cmse_stubs_created
--;
6409 if (veneer_value
< cmse_stub_array_start
)
6410 cmse_stub_array_start
= veneer_value
;
6411 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6412 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6413 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6416 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6418 BFD_ASSERT (new_cmse_stubs_created
> 0);
6420 (_("new entry function(s) introduced but no output import library "
6422 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6425 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6428 (_("start address of `%s' is different from previous link"),
6438 /* Determine and set the size of the stub section for a final link.
6440 The basic idea here is to examine all the relocations looking for
6441 PC-relative calls to a target that is unreachable with a "bl"
6445 elf32_arm_size_stubs (bfd
*output_bfd
,
6447 struct bfd_link_info
*info
,
6448 bfd_signed_vma group_size
,
6449 asection
* (*add_stub_section
) (const char *, asection
*,
6452 void (*layout_sections_again
) (void))
6455 obj_attribute
*out_attr
;
6456 int cmse_stub_created
= 0;
6457 bfd_size_type stub_group_size
;
6458 bool m_profile
, stubs_always_after_branch
, first_veneer_scan
= true;
6459 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6460 struct a8_erratum_fix
*a8_fixes
= NULL
;
6461 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6462 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6463 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6468 if (htab
->fix_cortex_a8
)
6470 a8_fixes
= (struct a8_erratum_fix
*)
6471 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6472 a8_relocs
= (struct a8_erratum_reloc
*)
6473 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6476 /* Propagate mach to stub bfd, because it may not have been
6477 finalized when we created stub_bfd. */
6478 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6479 bfd_get_mach (output_bfd
));
6481 /* Stash our params away. */
6482 htab
->stub_bfd
= stub_bfd
;
6483 htab
->add_stub_section
= add_stub_section
;
6484 htab
->layout_sections_again
= layout_sections_again
;
6485 stubs_always_after_branch
= group_size
< 0;
6487 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6488 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6490 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6491 as the first half of a 32-bit branch straddling two 4K pages. This is a
6492 crude way of enforcing that. */
6493 if (htab
->fix_cortex_a8
)
6494 stubs_always_after_branch
= 1;
6497 stub_group_size
= -group_size
;
6499 stub_group_size
= group_size
;
6501 if (stub_group_size
== 1)
6503 /* Default values. */
6504 /* Thumb branch range is +-4MB has to be used as the default
6505 maximum size (a given section can contain both ARM and Thumb
6506 code, so the worst case has to be taken into account).
6508 This value is 24K less than that, which allows for 2025
6509 12-byte stubs. If we exceed that, then we will fail to link.
6510 The user will have to relink with an explicit group size
6512 stub_group_size
= 4170000;
6515 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6517 /* If we're applying the cortex A8 fix, we need to determine the
6518 program header size now, because we cannot change it later --
6519 that could alter section placements. Notice the A8 erratum fix
6520 ends up requiring the section addresses to remain unchanged
6521 modulo the page size. That's something we cannot represent
6522 inside BFD, and we don't want to force the section alignment to
6523 be the page size. */
6524 if (htab
->fix_cortex_a8
)
6525 (*htab
->layout_sections_again
) ();
6530 unsigned int bfd_indx
;
6532 enum elf32_arm_stub_type stub_type
;
6533 bool stub_changed
= false;
6534 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6537 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6539 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6541 Elf_Internal_Shdr
*symtab_hdr
;
6543 Elf_Internal_Sym
*local_syms
= NULL
;
6545 if (!is_arm_elf (input_bfd
))
6547 if ((input_bfd
->flags
& DYNAMIC
) != 0
6548 && (elf_sym_hashes (input_bfd
) == NULL
6549 || (elf_dyn_lib_class (input_bfd
) & DYN_AS_NEEDED
) != 0))
6554 /* We'll need the symbol table in a second. */
6555 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6556 if (symtab_hdr
->sh_info
== 0)
6559 /* Limit scan of symbols to object file whose profile is
6560 Microcontroller to not hinder performance in the general case. */
6561 if (m_profile
&& first_veneer_scan
)
6563 struct elf_link_hash_entry
**sym_hashes
;
6565 sym_hashes
= elf_sym_hashes (input_bfd
);
6566 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6567 &cmse_stub_created
))
6568 goto error_ret_free_local
;
6570 if (cmse_stub_created
!= 0)
6571 stub_changed
= true;
6574 /* Walk over each section attached to the input bfd. */
6575 for (section
= input_bfd
->sections
;
6577 section
= section
->next
)
6579 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6581 /* If there aren't any relocs, then there's nothing more
6583 if ((section
->flags
& SEC_RELOC
) == 0
6584 || section
->reloc_count
== 0
6585 || (section
->flags
& SEC_CODE
) == 0)
6588 /* If this section is a link-once section that will be
6589 discarded, then don't create any stubs. */
6590 if (section
->output_section
== NULL
6591 || section
->output_section
->owner
!= output_bfd
)
6594 /* Get the relocs. */
6596 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6597 NULL
, info
->keep_memory
);
6598 if (internal_relocs
== NULL
)
6599 goto error_ret_free_local
;
6601 /* Now examine each relocation. */
6602 irela
= internal_relocs
;
6603 irelaend
= irela
+ section
->reloc_count
;
6604 for (; irela
< irelaend
; irela
++)
6606 unsigned int r_type
, r_indx
;
6609 bfd_vma destination
;
6610 struct elf32_arm_link_hash_entry
*hash
;
6611 const char *sym_name
;
6612 unsigned char st_type
;
6613 enum arm_st_branch_type branch_type
;
6614 bool created_stub
= false;
6616 r_type
= ELF32_R_TYPE (irela
->r_info
);
6617 r_indx
= ELF32_R_SYM (irela
->r_info
);
6619 if (r_type
>= (unsigned int) R_ARM_max
)
6621 bfd_set_error (bfd_error_bad_value
);
6622 error_ret_free_internal
:
6623 if (elf_section_data (section
)->relocs
== NULL
)
6624 free (internal_relocs
);
6626 error_ret_free_local
:
6627 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6633 if (r_indx
>= symtab_hdr
->sh_info
)
6634 hash
= elf32_arm_hash_entry
6635 (elf_sym_hashes (input_bfd
)
6636 [r_indx
- symtab_hdr
->sh_info
]);
6638 /* Only look for stubs on branch instructions, or
6639 non-relaxed TLSCALL */
6640 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6641 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6642 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6643 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6644 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6645 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6646 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6647 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6648 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6649 && r_type
== (elf32_arm_tls_transition
6651 (struct elf_link_hash_entry
*) hash
))
6652 && ((hash
? hash
->tls_type
6653 : (elf32_arm_local_got_tls_type
6654 (input_bfd
)[r_indx
]))
6655 & GOT_TLS_GDESC
) != 0))
6658 /* Now determine the call target, its name, value,
6665 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6666 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6668 /* A non-relaxed TLS call. The target is the
6669 plt-resident trampoline and nothing to do
6671 BFD_ASSERT (htab
->tls_trampoline
> 0);
6672 sym_sec
= htab
->root
.splt
;
6673 sym_value
= htab
->tls_trampoline
;
6676 branch_type
= ST_BRANCH_TO_ARM
;
6680 /* It's a local symbol. */
6681 Elf_Internal_Sym
*sym
;
6683 if (local_syms
== NULL
)
6686 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6687 if (local_syms
== NULL
)
6689 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6690 symtab_hdr
->sh_info
, 0,
6692 if (local_syms
== NULL
)
6693 goto error_ret_free_internal
;
6696 sym
= local_syms
+ r_indx
;
6697 if (sym
->st_shndx
== SHN_UNDEF
)
6698 sym_sec
= bfd_und_section_ptr
;
6699 else if (sym
->st_shndx
== SHN_ABS
)
6700 sym_sec
= bfd_abs_section_ptr
;
6701 else if (sym
->st_shndx
== SHN_COMMON
)
6702 sym_sec
= bfd_com_section_ptr
;
6705 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6708 /* This is an undefined symbol. It can never
6712 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6713 sym_value
= sym
->st_value
;
6714 destination
= (sym_value
+ irela
->r_addend
6715 + sym_sec
->output_offset
6716 + sym_sec
->output_section
->vma
);
6717 st_type
= ELF_ST_TYPE (sym
->st_info
);
6719 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6721 = bfd_elf_string_from_elf_section (input_bfd
,
6722 symtab_hdr
->sh_link
,
6727 /* It's an external symbol. */
6728 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6729 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6730 hash
= ((struct elf32_arm_link_hash_entry
*)
6731 hash
->root
.root
.u
.i
.link
);
6733 if (hash
->root
.root
.type
== bfd_link_hash_defined
6734 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6736 sym_sec
= hash
->root
.root
.u
.def
.section
;
6737 sym_value
= hash
->root
.root
.u
.def
.value
;
6739 struct elf32_arm_link_hash_table
*globals
=
6740 elf32_arm_hash_table (info
);
6742 /* For a destination in a shared library,
6743 use the PLT stub as target address to
6744 decide whether a branch stub is
6747 && globals
->root
.splt
!= NULL
6749 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6751 sym_sec
= globals
->root
.splt
;
6752 sym_value
= hash
->root
.plt
.offset
;
6753 if (sym_sec
->output_section
!= NULL
)
6754 destination
= (sym_value
6755 + sym_sec
->output_offset
6756 + sym_sec
->output_section
->vma
);
6758 else if (sym_sec
->output_section
!= NULL
)
6759 destination
= (sym_value
+ irela
->r_addend
6760 + sym_sec
->output_offset
6761 + sym_sec
->output_section
->vma
);
6763 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6764 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6766 /* For a shared library, use the PLT stub as
6767 target address to decide whether a long
6768 branch stub is needed.
6769 For absolute code, they cannot be handled. */
6770 struct elf32_arm_link_hash_table
*globals
=
6771 elf32_arm_hash_table (info
);
6774 && globals
->root
.splt
!= NULL
6776 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6778 sym_sec
= globals
->root
.splt
;
6779 sym_value
= hash
->root
.plt
.offset
;
6780 if (sym_sec
->output_section
!= NULL
)
6781 destination
= (sym_value
6782 + sym_sec
->output_offset
6783 + sym_sec
->output_section
->vma
);
6790 bfd_set_error (bfd_error_bad_value
);
6791 goto error_ret_free_internal
;
6793 st_type
= hash
->root
.type
;
6795 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6796 sym_name
= hash
->root
.root
.root
.string
;
6802 struct elf32_arm_stub_hash_entry
*stub_entry
;
6804 /* Determine what (if any) linker stub is needed. */
6805 stub_type
= arm_type_of_stub (info
, section
, irela
,
6806 st_type
, &branch_type
,
6807 hash
, destination
, sym_sec
,
6808 input_bfd
, sym_name
);
6809 if (stub_type
== arm_stub_none
)
6812 /* We've either created a stub for this reloc already,
6813 or we are about to. */
6815 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6817 (char *) sym_name
, sym_value
,
6818 branch_type
, &new_stub
);
6820 created_stub
= stub_entry
!= NULL
;
6822 goto error_ret_free_internal
;
6826 stub_changed
= true;
6830 /* Look for relocations which might trigger Cortex-A8
6832 if (htab
->fix_cortex_a8
6833 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6834 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6835 || r_type
== (unsigned int) R_ARM_THM_CALL
6836 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6838 bfd_vma from
= section
->output_section
->vma
6839 + section
->output_offset
6842 if ((from
& 0xfff) == 0xffe)
6844 /* Found a candidate. Note we haven't checked the
6845 destination is within 4K here: if we do so (and
6846 don't create an entry in a8_relocs) we can't tell
6847 that a branch should have been relocated when
6849 if (num_a8_relocs
== a8_reloc_table_size
)
6851 a8_reloc_table_size
*= 2;
6852 a8_relocs
= (struct a8_erratum_reloc
*)
6853 bfd_realloc (a8_relocs
,
6854 sizeof (struct a8_erratum_reloc
)
6855 * a8_reloc_table_size
);
6858 a8_relocs
[num_a8_relocs
].from
= from
;
6859 a8_relocs
[num_a8_relocs
].destination
= destination
;
6860 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6861 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6862 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6863 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6864 a8_relocs
[num_a8_relocs
].hash
= hash
;
6871 /* We're done with the internal relocs, free them. */
6872 if (elf_section_data (section
)->relocs
== NULL
)
6873 free (internal_relocs
);
6876 if (htab
->fix_cortex_a8
)
6878 /* Sort relocs which might apply to Cortex-A8 erratum. */
6879 qsort (a8_relocs
, num_a8_relocs
,
6880 sizeof (struct a8_erratum_reloc
),
6883 /* Scan for branches which might trigger Cortex-A8 erratum. */
6884 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6885 &num_a8_fixes
, &a8_fix_table_size
,
6886 a8_relocs
, num_a8_relocs
,
6887 prev_num_a8_fixes
, &stub_changed
)
6889 goto error_ret_free_local
;
6892 if (local_syms
!= NULL
6893 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6895 if (!info
->keep_memory
)
6898 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6902 if (first_veneer_scan
6903 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6904 &cmse_stub_created
))
6907 if (prev_num_a8_fixes
!= num_a8_fixes
)
6908 stub_changed
= true;
6913 /* OK, we've added some stubs. Find out the new size of the
6915 for (stub_sec
= htab
->stub_bfd
->sections
;
6917 stub_sec
= stub_sec
->next
)
6919 /* Ignore non-stub sections. */
6920 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6926 /* Add new SG veneers after those already in the input import
6928 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6931 bfd_vma
*start_offset_p
;
6932 asection
**stub_sec_p
;
6934 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6935 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6936 if (start_offset_p
== NULL
)
6939 BFD_ASSERT (stub_sec_p
!= NULL
);
6940 if (*stub_sec_p
!= NULL
)
6941 (*stub_sec_p
)->size
= *start_offset_p
;
6944 /* Compute stub section size, considering padding. */
6945 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6946 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6950 asection
**stub_sec_p
;
6952 padding
= arm_dedicated_stub_section_padding (stub_type
);
6953 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6954 /* Skip if no stub input section or no stub section padding
6956 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6958 /* Stub section padding required but no dedicated section. */
6959 BFD_ASSERT (stub_sec_p
);
6961 size
= (*stub_sec_p
)->size
;
6962 size
= (size
+ padding
- 1) & ~(padding
- 1);
6963 (*stub_sec_p
)->size
= size
;
6966 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6967 if (htab
->fix_cortex_a8
)
6968 for (i
= 0; i
< num_a8_fixes
; i
++)
6970 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6971 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6973 if (stub_sec
== NULL
)
6977 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6982 /* Ask the linker to do its stuff. */
6983 (*htab
->layout_sections_again
) ();
6984 first_veneer_scan
= false;
6987 /* Add stubs for Cortex-A8 erratum fixes now. */
6988 if (htab
->fix_cortex_a8
)
6990 for (i
= 0; i
< num_a8_fixes
; i
++)
6992 struct elf32_arm_stub_hash_entry
*stub_entry
;
6993 char *stub_name
= a8_fixes
[i
].stub_name
;
6994 asection
*section
= a8_fixes
[i
].section
;
6995 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6996 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6997 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6998 const insn_sequence
*template_sequence
;
6999 int template_size
, size
= 0;
7001 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
7003 if (stub_entry
== NULL
)
7005 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
7006 section
->owner
, stub_name
);
7010 stub_entry
->stub_sec
= stub_sec
;
7011 stub_entry
->stub_offset
= (bfd_vma
) -1;
7012 stub_entry
->id_sec
= link_sec
;
7013 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
7014 stub_entry
->source_value
= a8_fixes
[i
].offset
;
7015 stub_entry
->target_section
= a8_fixes
[i
].section
;
7016 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
7017 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
7018 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
7020 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
7024 stub_entry
->stub_size
= size
;
7025 stub_entry
->stub_template
= template_sequence
;
7026 stub_entry
->stub_template_size
= template_size
;
7029 /* Stash the Cortex-A8 erratum fix array for use later in
7030 elf32_arm_write_section(). */
7031 htab
->a8_erratum_fixes
= a8_fixes
;
7032 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
7036 htab
->a8_erratum_fixes
= NULL
;
7037 htab
->num_a8_erratum_fixes
= 0;
7042 /* Build all the stubs associated with the current output file. The
7043 stubs are kept in a hash table attached to the main linker hash
7044 table. We also set up the .plt entries for statically linked PIC
7045 functions here. This function is called via arm_elf_finish in the
7049 elf32_arm_build_stubs (struct bfd_link_info
*info
)
7052 struct bfd_hash_table
*table
;
7053 enum elf32_arm_stub_type stub_type
;
7054 struct elf32_arm_link_hash_table
*htab
;
7056 htab
= elf32_arm_hash_table (info
);
7060 for (stub_sec
= htab
->stub_bfd
->sections
;
7062 stub_sec
= stub_sec
->next
)
7066 /* Ignore non-stub sections. */
7067 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
7070 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7071 must at least be done for stub section requiring padding and for SG
7072 veneers to ensure that a non secure code branching to a removed SG
7073 veneer causes an error. */
7074 size
= stub_sec
->size
;
7075 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
7076 if (stub_sec
->contents
== NULL
&& size
!= 0)
7082 /* Add new SG veneers after those already in the input import library. */
7083 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7085 bfd_vma
*start_offset_p
;
7086 asection
**stub_sec_p
;
7088 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
7089 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
7090 if (start_offset_p
== NULL
)
7093 BFD_ASSERT (stub_sec_p
!= NULL
);
7094 if (*stub_sec_p
!= NULL
)
7095 (*stub_sec_p
)->size
= *start_offset_p
;
7098 /* Build the stubs as directed by the stub hash table. */
7099 table
= &htab
->stub_hash_table
;
7100 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7101 if (htab
->fix_cortex_a8
)
7103 /* Place the cortex a8 stubs last. */
7104 htab
->fix_cortex_a8
= -1;
7105 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7111 /* Locate the Thumb encoded calling stub for NAME. */
7113 static struct elf_link_hash_entry
*
7114 find_thumb_glue (struct bfd_link_info
*link_info
,
7116 char **error_message
)
7119 struct elf_link_hash_entry
*hash
;
7120 struct elf32_arm_link_hash_table
*hash_table
;
7122 /* We need a pointer to the armelf specific hash table. */
7123 hash_table
= elf32_arm_hash_table (link_info
);
7124 if (hash_table
== NULL
)
7127 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7128 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7130 BFD_ASSERT (tmp_name
);
7132 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7134 hash
= elf_link_hash_lookup
7135 (&(hash_table
)->root
, tmp_name
, false, false, true);
7139 *error_message
= bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7140 "Thumb", tmp_name
, name
);
7141 if (*error_message
== NULL
)
7142 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7150 /* Locate the ARM encoded calling stub for NAME. */
7152 static struct elf_link_hash_entry
*
7153 find_arm_glue (struct bfd_link_info
*link_info
,
7155 char **error_message
)
7158 struct elf_link_hash_entry
*myh
;
7159 struct elf32_arm_link_hash_table
*hash_table
;
7161 /* We need a pointer to the elfarm specific hash table. */
7162 hash_table
= elf32_arm_hash_table (link_info
);
7163 if (hash_table
== NULL
)
7166 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7167 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7168 BFD_ASSERT (tmp_name
);
7170 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7172 myh
= elf_link_hash_lookup
7173 (&(hash_table
)->root
, tmp_name
, false, false, true);
7177 *error_message
= bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7178 "ARM", tmp_name
, name
);
7179 if (*error_message
== NULL
)
7180 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7187 /* ARM->Thumb glue (static images):
7191 ldr r12, __func_addr
7194 .word func @ behave as if you saw a ARM_32 reloc.
7201 .word func @ behave as if you saw a ARM_32 reloc.
7203 (relocatable images)
7206 ldr r12, __func_offset
7212 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7213 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7214 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7215 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7217 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7218 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7219 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7221 #define ARM2THUMB_PIC_GLUE_SIZE 16
7222 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7223 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7224 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7226 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7230 __func_from_thumb: __func_from_thumb:
7232 nop ldr r6, __func_addr
7242 #define THUMB2ARM_GLUE_SIZE 8
7243 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7244 static const insn16 t2a2_noop_insn
= 0x46c0;
7245 static const insn32 t2a3_b_insn
= 0xea000000;
7247 #define VFP11_ERRATUM_VENEER_SIZE 8
7248 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7249 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7251 #define ARM_BX_VENEER_SIZE 12
7252 static const insn32 armbx1_tst_insn
= 0xe3100001;
7253 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7254 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7256 #ifndef ELFARM_NABI_C_INCLUDED
7258 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7261 bfd_byte
* contents
;
7265 /* Do not include empty glue sections in the output. */
7268 s
= bfd_get_linker_section (abfd
, name
);
7270 s
->flags
|= SEC_EXCLUDE
;
7275 BFD_ASSERT (abfd
!= NULL
);
7277 s
= bfd_get_linker_section (abfd
, name
);
7278 BFD_ASSERT (s
!= NULL
);
7280 contents
= (bfd_byte
*) bfd_zalloc (abfd
, size
);
7282 BFD_ASSERT (s
->size
== size
);
7283 s
->contents
= contents
;
7287 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7289 struct elf32_arm_link_hash_table
* globals
;
7291 globals
= elf32_arm_hash_table (info
);
7292 BFD_ASSERT (globals
!= NULL
);
7294 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7295 globals
->arm_glue_size
,
7296 ARM2THUMB_GLUE_SECTION_NAME
);
7298 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7299 globals
->thumb_glue_size
,
7300 THUMB2ARM_GLUE_SECTION_NAME
);
7302 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7303 globals
->vfp11_erratum_glue_size
,
7304 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7306 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7307 globals
->stm32l4xx_erratum_glue_size
,
7308 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7310 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7311 globals
->bx_glue_size
,
7312 ARM_BX_GLUE_SECTION_NAME
);
7317 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7318 returns the symbol identifying the stub. */
7320 static struct elf_link_hash_entry
*
7321 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7322 struct elf_link_hash_entry
* h
)
7324 const char * name
= h
->root
.root
.string
;
7327 struct elf_link_hash_entry
* myh
;
7328 struct bfd_link_hash_entry
* bh
;
7329 struct elf32_arm_link_hash_table
* globals
;
7333 globals
= elf32_arm_hash_table (link_info
);
7334 BFD_ASSERT (globals
!= NULL
);
7335 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7337 s
= bfd_get_linker_section
7338 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7340 BFD_ASSERT (s
!= NULL
);
7342 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7343 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7344 BFD_ASSERT (tmp_name
);
7346 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7348 myh
= elf_link_hash_lookup
7349 (&(globals
)->root
, tmp_name
, false, false, true);
7353 /* We've already seen this guy. */
7358 /* The only trick here is using hash_table->arm_glue_size as the value.
7359 Even though the section isn't allocated yet, this is where we will be
7360 putting it. The +1 on the value marks that the stub has not been
7361 output yet - not that it is a Thumb function. */
7363 val
= globals
->arm_glue_size
+ 1;
7364 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7365 tmp_name
, BSF_GLOBAL
, s
, val
,
7366 NULL
, true, false, &bh
);
7368 myh
= (struct elf_link_hash_entry
*) bh
;
7369 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7370 myh
->forced_local
= 1;
7374 if (bfd_link_pic (link_info
)
7375 || globals
->pic_veneer
)
7376 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7377 else if (globals
->use_blx
)
7378 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7380 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7383 globals
->arm_glue_size
+= size
;
7388 /* Allocate space for ARMv4 BX veneers. */
7391 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7394 struct elf32_arm_link_hash_table
*globals
;
7396 struct elf_link_hash_entry
*myh
;
7397 struct bfd_link_hash_entry
*bh
;
7400 /* BX PC does not need a veneer. */
7404 globals
= elf32_arm_hash_table (link_info
);
7405 BFD_ASSERT (globals
!= NULL
);
7406 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7408 /* Check if this veneer has already been allocated. */
7409 if (globals
->bx_glue_offset
[reg
])
7412 s
= bfd_get_linker_section
7413 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7415 BFD_ASSERT (s
!= NULL
);
7417 /* Add symbol for veneer. */
7419 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7420 BFD_ASSERT (tmp_name
);
7422 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7424 myh
= elf_link_hash_lookup
7425 (&(globals
)->root
, tmp_name
, false, false, false);
7427 BFD_ASSERT (myh
== NULL
);
7430 val
= globals
->bx_glue_size
;
7431 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7432 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7433 NULL
, true, false, &bh
);
7435 myh
= (struct elf_link_hash_entry
*) bh
;
7436 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7437 myh
->forced_local
= 1;
7439 s
->size
+= ARM_BX_VENEER_SIZE
;
7440 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7441 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7445 /* Add an entry to the code/data map for section SEC. */
7448 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7450 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7451 unsigned int newidx
;
7453 if (sec_data
->map
== NULL
)
7455 sec_data
->map
= (elf32_arm_section_map
*)
7456 bfd_malloc (sizeof (elf32_arm_section_map
));
7457 sec_data
->mapcount
= 0;
7458 sec_data
->mapsize
= 1;
7461 newidx
= sec_data
->mapcount
++;
7463 if (sec_data
->mapcount
> sec_data
->mapsize
)
7465 sec_data
->mapsize
*= 2;
7466 sec_data
->map
= (elf32_arm_section_map
*)
7467 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7468 * sizeof (elf32_arm_section_map
));
7473 sec_data
->map
[newidx
].vma
= vma
;
7474 sec_data
->map
[newidx
].type
= type
;
7479 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7480 veneers are handled for now. */
7483 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7484 elf32_vfp11_erratum_list
*branch
,
7486 asection
*branch_sec
,
7487 unsigned int offset
)
7490 struct elf32_arm_link_hash_table
*hash_table
;
7492 struct elf_link_hash_entry
*myh
;
7493 struct bfd_link_hash_entry
*bh
;
7495 struct _arm_elf_section_data
*sec_data
;
7496 elf32_vfp11_erratum_list
*newerr
;
7498 hash_table
= elf32_arm_hash_table (link_info
);
7499 BFD_ASSERT (hash_table
!= NULL
);
7500 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7502 s
= bfd_get_linker_section
7503 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7505 sec_data
= elf32_arm_section_data (s
);
7507 BFD_ASSERT (s
!= NULL
);
7509 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7510 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7511 BFD_ASSERT (tmp_name
);
7513 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7514 hash_table
->num_vfp11_fixes
);
7516 myh
= elf_link_hash_lookup
7517 (&(hash_table
)->root
, tmp_name
, false, false, false);
7519 BFD_ASSERT (myh
== NULL
);
7522 val
= hash_table
->vfp11_erratum_glue_size
;
7523 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7524 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7525 NULL
, true, false, &bh
);
7527 myh
= (struct elf_link_hash_entry
*) bh
;
7528 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7529 myh
->forced_local
= 1;
7531 /* Link veneer back to calling location. */
7532 sec_data
->erratumcount
+= 1;
7533 newerr
= (elf32_vfp11_erratum_list
*)
7534 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7536 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7538 newerr
->u
.v
.branch
= branch
;
7539 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7540 branch
->u
.b
.veneer
= newerr
;
7542 newerr
->next
= sec_data
->erratumlist
;
7543 sec_data
->erratumlist
= newerr
;
7545 /* A symbol for the return from the veneer. */
7546 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7547 hash_table
->num_vfp11_fixes
);
7549 myh
= elf_link_hash_lookup
7550 (&(hash_table
)->root
, tmp_name
, false, false, false);
7557 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7558 branch_sec
, val
, NULL
, true, false, &bh
);
7560 myh
= (struct elf_link_hash_entry
*) bh
;
7561 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7562 myh
->forced_local
= 1;
7566 /* Generate a mapping symbol for the veneer section, and explicitly add an
7567 entry for that symbol to the code/data map for the section. */
7568 if (hash_table
->vfp11_erratum_glue_size
== 0)
7571 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7572 ever requires this erratum fix. */
7573 _bfd_generic_link_add_one_symbol (link_info
,
7574 hash_table
->bfd_of_glue_owner
, "$a",
7575 BSF_LOCAL
, s
, 0, NULL
,
7578 myh
= (struct elf_link_hash_entry
*) bh
;
7579 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7580 myh
->forced_local
= 1;
7582 /* The elf32_arm_init_maps function only cares about symbols from input
7583 BFDs. We must make a note of this generated mapping symbol
7584 ourselves so that code byteswapping works properly in
7585 elf32_arm_write_section. */
7586 elf32_arm_section_map_add (s
, 'a', 0);
7589 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7590 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7591 hash_table
->num_vfp11_fixes
++;
7593 /* The offset of the veneer. */
7597 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7598 veneers need to be handled because used only in Cortex-M. */
7601 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7602 elf32_stm32l4xx_erratum_list
*branch
,
7604 asection
*branch_sec
,
7605 unsigned int offset
,
7606 bfd_size_type veneer_size
)
7609 struct elf32_arm_link_hash_table
*hash_table
;
7611 struct elf_link_hash_entry
*myh
;
7612 struct bfd_link_hash_entry
*bh
;
7614 struct _arm_elf_section_data
*sec_data
;
7615 elf32_stm32l4xx_erratum_list
*newerr
;
7617 hash_table
= elf32_arm_hash_table (link_info
);
7618 BFD_ASSERT (hash_table
!= NULL
);
7619 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7621 s
= bfd_get_linker_section
7622 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7624 BFD_ASSERT (s
!= NULL
);
7626 sec_data
= elf32_arm_section_data (s
);
7628 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7629 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7630 BFD_ASSERT (tmp_name
);
7632 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7633 hash_table
->num_stm32l4xx_fixes
);
7635 myh
= elf_link_hash_lookup
7636 (&(hash_table
)->root
, tmp_name
, false, false, false);
7638 BFD_ASSERT (myh
== NULL
);
7641 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7642 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7643 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7644 NULL
, true, false, &bh
);
7646 myh
= (struct elf_link_hash_entry
*) bh
;
7647 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7648 myh
->forced_local
= 1;
7650 /* Link veneer back to calling location. */
7651 sec_data
->stm32l4xx_erratumcount
+= 1;
7652 newerr
= (elf32_stm32l4xx_erratum_list
*)
7653 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7655 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7657 newerr
->u
.v
.branch
= branch
;
7658 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7659 branch
->u
.b
.veneer
= newerr
;
7661 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7662 sec_data
->stm32l4xx_erratumlist
= newerr
;
7664 /* A symbol for the return from the veneer. */
7665 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7666 hash_table
->num_stm32l4xx_fixes
);
7668 myh
= elf_link_hash_lookup
7669 (&(hash_table
)->root
, tmp_name
, false, false, false);
7676 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7677 branch_sec
, val
, NULL
, true, false, &bh
);
7679 myh
= (struct elf_link_hash_entry
*) bh
;
7680 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7681 myh
->forced_local
= 1;
7685 /* Generate a mapping symbol for the veneer section, and explicitly add an
7686 entry for that symbol to the code/data map for the section. */
7687 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7690 /* Creates a THUMB symbol since there is no other choice. */
7691 _bfd_generic_link_add_one_symbol (link_info
,
7692 hash_table
->bfd_of_glue_owner
, "$t",
7693 BSF_LOCAL
, s
, 0, NULL
,
7696 myh
= (struct elf_link_hash_entry
*) bh
;
7697 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7698 myh
->forced_local
= 1;
7700 /* The elf32_arm_init_maps function only cares about symbols from input
7701 BFDs. We must make a note of this generated mapping symbol
7702 ourselves so that code byteswapping works properly in
7703 elf32_arm_write_section. */
7704 elf32_arm_section_map_add (s
, 't', 0);
7707 s
->size
+= veneer_size
;
7708 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7709 hash_table
->num_stm32l4xx_fixes
++;
7711 /* The offset of the veneer. */
7715 #define ARM_GLUE_SECTION_FLAGS \
7716 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7717 | SEC_READONLY | SEC_LINKER_CREATED)
7719 /* Create a fake section for use by the ARM backend of the linker. */
7722 arm_make_glue_section (bfd
* abfd
, const char * name
)
7726 sec
= bfd_get_linker_section (abfd
, name
);
7731 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7734 || !bfd_set_section_alignment (sec
, 2))
7737 /* Set the gc mark to prevent the section from being removed by garbage
7738 collection, despite the fact that no relocs refer to this section. */
7744 /* Set size of .plt entries. This function is called from the
7745 linker scripts in ld/emultempl/{armelf}.em. */
7748 bfd_elf32_arm_use_long_plt (void)
7750 elf32_arm_use_long_plt_entry
= true;
7753 /* Add the glue sections to ABFD. This function is called from the
7754 linker scripts in ld/emultempl/{armelf}.em. */
7757 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7758 struct bfd_link_info
*info
)
7760 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7761 bool dostm32l4xx
= globals
7762 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7765 /* If we are only performing a partial
7766 link do not bother adding the glue. */
7767 if (bfd_link_relocatable (info
))
7770 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7771 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7772 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7773 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7779 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7782 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7783 ensures they are not marked for deletion by
7784 strip_excluded_output_sections () when veneers are going to be created
7785 later. Not doing so would trigger assert on empty section size in
7786 lang_size_sections_1 (). */
7789 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7791 enum elf32_arm_stub_type stub_type
;
7793 /* If we are only performing a partial
7794 link do not bother adding the glue. */
7795 if (bfd_link_relocatable (info
))
7798 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7801 const char *out_sec_name
;
7803 if (!arm_dedicated_stub_output_section_required (stub_type
))
7806 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7807 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7808 if (out_sec
!= NULL
)
7809 out_sec
->flags
|= SEC_KEEP
;
7813 /* Select a BFD to be used to hold the sections used by the glue code.
7814 This function is called from the linker scripts in ld/emultempl/
7818 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7820 struct elf32_arm_link_hash_table
*globals
;
7822 /* If we are only performing a partial link
7823 do not bother getting a bfd to hold the glue. */
7824 if (bfd_link_relocatable (info
))
7827 /* Make sure we don't attach the glue sections to a dynamic object. */
7828 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7830 globals
= elf32_arm_hash_table (info
);
7831 BFD_ASSERT (globals
!= NULL
);
7833 if (globals
->bfd_of_glue_owner
!= NULL
)
7836 /* Save the bfd for later use. */
7837 globals
->bfd_of_glue_owner
= abfd
;
7843 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7847 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7850 if (globals
->fix_arm1176
)
7852 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7853 globals
->use_blx
= 1;
7857 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7858 globals
->use_blx
= 1;
7863 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7864 struct bfd_link_info
*link_info
)
7866 Elf_Internal_Shdr
*symtab_hdr
;
7867 Elf_Internal_Rela
*internal_relocs
= NULL
;
7868 Elf_Internal_Rela
*irel
, *irelend
;
7869 bfd_byte
*contents
= NULL
;
7872 struct elf32_arm_link_hash_table
*globals
;
7874 /* If we are only performing a partial link do not bother
7875 to construct any glue. */
7876 if (bfd_link_relocatable (link_info
))
7879 /* Here we have a bfd that is to be included on the link. We have a
7880 hook to do reloc rummaging, before section sizes are nailed down. */
7881 globals
= elf32_arm_hash_table (link_info
);
7882 BFD_ASSERT (globals
!= NULL
);
7884 check_use_blx (globals
);
7886 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7888 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7893 /* PR 5398: If we have not decided to include any loadable sections in
7894 the output then we will not have a glue owner bfd. This is OK, it
7895 just means that there is nothing else for us to do here. */
7896 if (globals
->bfd_of_glue_owner
== NULL
)
7899 /* Rummage around all the relocs and map the glue vectors. */
7900 sec
= abfd
->sections
;
7905 for (; sec
!= NULL
; sec
= sec
->next
)
7907 if (sec
->reloc_count
== 0)
7910 if ((sec
->flags
& SEC_EXCLUDE
) != 0
7911 || (sec
->flags
& SEC_HAS_CONTENTS
) == 0)
7914 symtab_hdr
= & elf_symtab_hdr (abfd
);
7916 /* Load the relocs. */
7918 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, false);
7920 if (internal_relocs
== NULL
)
7923 irelend
= internal_relocs
+ sec
->reloc_count
;
7924 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7927 unsigned long r_index
;
7929 struct elf_link_hash_entry
*h
;
7931 r_type
= ELF32_R_TYPE (irel
->r_info
);
7932 r_index
= ELF32_R_SYM (irel
->r_info
);
7934 /* These are the only relocation types we care about. */
7935 if ( r_type
!= R_ARM_PC24
7936 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7939 /* Get the section contents if we haven't done so already. */
7940 if (contents
== NULL
)
7942 /* Get cached copy if it exists. */
7943 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7944 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7947 /* Go get them off disk. */
7948 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7953 if (r_type
== R_ARM_V4BX
)
7957 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7958 record_arm_bx_glue (link_info
, reg
);
7962 /* If the relocation is not against a symbol it cannot concern us. */
7965 /* We don't care about local symbols. */
7966 if (r_index
< symtab_hdr
->sh_info
)
7969 /* This is an external symbol. */
7970 r_index
-= symtab_hdr
->sh_info
;
7971 h
= (struct elf_link_hash_entry
*)
7972 elf_sym_hashes (abfd
)[r_index
];
7974 /* If the relocation is against a static symbol it must be within
7975 the current section and so cannot be a cross ARM/Thumb relocation. */
7979 /* If the call will go through a PLT entry then we do not need
7981 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7987 /* This one is a call from arm code. We need to look up
7988 the target of the call. If it is a thumb target, we
7990 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7991 == ST_BRANCH_TO_THUMB
)
7992 record_arm_to_thumb_glue (link_info
, h
);
8000 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8004 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
8005 free (internal_relocs
);
8006 internal_relocs
= NULL
;
8012 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8014 if (elf_section_data (sec
)->relocs
!= internal_relocs
)
8015 free (internal_relocs
);
8022 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8025 bfd_elf32_arm_init_maps (bfd
*abfd
)
8027 Elf_Internal_Sym
*isymbuf
;
8028 Elf_Internal_Shdr
*hdr
;
8029 unsigned int i
, localsyms
;
8031 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8032 if (! is_arm_elf (abfd
))
8035 if ((abfd
->flags
& DYNAMIC
) != 0)
8038 hdr
= & elf_symtab_hdr (abfd
);
8039 localsyms
= hdr
->sh_info
;
8041 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8042 should contain the number of local symbols, which should come before any
8043 global symbols. Mapping symbols are always local. */
8044 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
8047 /* No internal symbols read? Skip this BFD. */
8048 if (isymbuf
== NULL
)
8051 for (i
= 0; i
< localsyms
; i
++)
8053 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
8054 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
8058 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
8060 name
= bfd_elf_string_from_elf_section (abfd
,
8061 hdr
->sh_link
, isym
->st_name
);
8063 if (bfd_is_arm_special_symbol_name (name
,
8064 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
8065 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
8071 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8072 say what they wanted. */
8075 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8077 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8078 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8080 if (globals
== NULL
)
8083 if (globals
->fix_cortex_a8
== -1)
8085 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8086 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
8087 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
8088 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
8089 globals
->fix_cortex_a8
= 1;
8091 globals
->fix_cortex_a8
= 0;
8097 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8099 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8100 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8102 if (globals
== NULL
)
8104 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8105 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8107 switch (globals
->vfp11_fix
)
8109 case BFD_ARM_VFP11_FIX_DEFAULT
:
8110 case BFD_ARM_VFP11_FIX_NONE
:
8111 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8115 /* Give a warning, but do as the user requests anyway. */
8116 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8117 "workaround is not necessary for target architecture"), obfd
);
8120 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8121 /* For earlier architectures, we might need the workaround, but do not
8122 enable it by default. If users is running with broken hardware, they
8123 must enable the erratum fix explicitly. */
8124 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8128 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8130 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8131 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8133 if (globals
== NULL
)
8136 /* We assume only Cortex-M4 may require the fix. */
8137 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8138 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8140 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8141 /* Give a warning, but do as the user requests anyway. */
8143 (_("%pB: warning: selected STM32L4XX erratum "
8144 "workaround is not necessary for target architecture"), obfd
);
8148 enum bfd_arm_vfp11_pipe
8156 /* Return a VFP register number. This is encoded as RX:X for single-precision
8157 registers, or X:RX for double-precision registers, where RX is the group of
8158 four bits in the instruction encoding and X is the single extension bit.
8159 RX and X fields are specified using their lowest (starting) bit. The return
8162 0...31: single-precision registers s0...s31
8163 32...63: double-precision registers d0...d31.
8165 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8166 encounter VFP3 instructions, so we allow the full range for DP registers. */
8169 bfd_arm_vfp11_regno (unsigned int insn
, bool is_double
, unsigned int rx
,
8173 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8175 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8178 /* Set bits in *WMASK according to a register number REG as encoded by
8179 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8182 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8187 *wmask
|= 3 << ((reg
- 32) * 2);
8190 /* Return TRUE if WMASK overwrites anything in REGS. */
8193 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8197 for (i
= 0; i
< numregs
; i
++)
8199 unsigned int reg
= regs
[i
];
8201 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8209 if ((wmask
& (3 << (reg
* 2))) != 0)
8216 /* In this function, we're interested in two things: finding input registers
8217 for VFP data-processing instructions, and finding the set of registers which
8218 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8219 hold the written set, so FLDM etc. are easy to deal with (we're only
8220 interested in 32 SP registers or 16 dp registers, due to the VFP version
8221 implemented by the chip in question). DP registers are marked by setting
8222 both SP registers in the write mask). */
8224 static enum bfd_arm_vfp11_pipe
8225 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8228 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8229 bool is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8231 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8234 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8235 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8237 pqrs
= ((insn
& 0x00800000) >> 20)
8238 | ((insn
& 0x00300000) >> 19)
8239 | ((insn
& 0x00000040) >> 6);
8243 case 0: /* fmac[sd]. */
8244 case 1: /* fnmac[sd]. */
8245 case 2: /* fmsc[sd]. */
8246 case 3: /* fnmsc[sd]. */
8248 bfd_arm_vfp11_write_mask (destmask
, fd
);
8250 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8255 case 4: /* fmul[sd]. */
8256 case 5: /* fnmul[sd]. */
8257 case 6: /* fadd[sd]. */
8258 case 7: /* fsub[sd]. */
8262 case 8: /* fdiv[sd]. */
8265 bfd_arm_vfp11_write_mask (destmask
, fd
);
8266 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8271 case 15: /* extended opcode. */
8273 unsigned int extn
= ((insn
>> 15) & 0x1e)
8274 | ((insn
>> 7) & 1);
8278 case 0: /* fcpy[sd]. */
8279 case 1: /* fabs[sd]. */
8280 case 2: /* fneg[sd]. */
8281 case 8: /* fcmp[sd]. */
8282 case 9: /* fcmpe[sd]. */
8283 case 10: /* fcmpz[sd]. */
8284 case 11: /* fcmpez[sd]. */
8285 case 16: /* fuito[sd]. */
8286 case 17: /* fsito[sd]. */
8287 case 24: /* ftoui[sd]. */
8288 case 25: /* ftouiz[sd]. */
8289 case 26: /* ftosi[sd]. */
8290 case 27: /* ftosiz[sd]. */
8291 /* These instructions will not bounce due to underflow. */
8296 case 3: /* fsqrt[sd]. */
8297 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8298 registers to cause the erratum in previous instructions. */
8299 bfd_arm_vfp11_write_mask (destmask
, fd
);
8303 case 15: /* fcvt{ds,sd}. */
8307 bfd_arm_vfp11_write_mask (destmask
, fd
);
8309 /* Only FCVTSD can underflow. */
8310 if ((insn
& 0x100) != 0)
8329 /* Two-register transfer. */
8330 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8332 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8334 if ((insn
& 0x100000) == 0)
8337 bfd_arm_vfp11_write_mask (destmask
, fm
);
8340 bfd_arm_vfp11_write_mask (destmask
, fm
);
8341 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8347 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8349 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8350 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8354 case 0: /* Two-reg transfer. We should catch these above. */
8357 case 2: /* fldm[sdx]. */
8361 unsigned int i
, offset
= insn
& 0xff;
8366 for (i
= fd
; i
< fd
+ offset
; i
++)
8367 bfd_arm_vfp11_write_mask (destmask
, i
);
8371 case 4: /* fld[sd]. */
8373 bfd_arm_vfp11_write_mask (destmask
, fd
);
8382 /* Single-register transfer. Note L==0. */
8383 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8385 unsigned int opcode
= (insn
>> 21) & 7;
8386 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8390 case 0: /* fmsr/fmdlr. */
8391 case 1: /* fmdhr. */
8392 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8393 destination register. I don't know if this is exactly right,
8394 but it is the conservative choice. */
8395 bfd_arm_vfp11_write_mask (destmask
, fn
);
8409 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8412 /* Look for potentially-troublesome code sequences which might trigger the
8413 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8414 (available from ARM) for details of the erratum. A short version is
8415 described in ld.texinfo. */
8418 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8421 bfd_byte
*contents
= NULL
;
8423 int regs
[3], numregs
= 0;
8424 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8425 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8427 if (globals
== NULL
)
8430 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8431 The states transition as follows:
8433 0 -> 1 (vector) or 0 -> 2 (scalar)
8434 A VFP FMAC-pipeline instruction has been seen. Fill
8435 regs[0]..regs[numregs-1] with its input operands. Remember this
8436 instruction in 'first_fmac'.
8439 Any instruction, except for a VFP instruction which overwrites
8444 A VFP instruction has been seen which overwrites any of regs[*].
8445 We must make a veneer! Reset state to 0 before examining next
8449 If we fail to match anything in state 2, reset to state 0 and reset
8450 the instruction pointer to the instruction after 'first_fmac'.
8452 If the VFP11 vector mode is in use, there must be at least two unrelated
8453 instructions between anti-dependent VFP11 instructions to properly avoid
8454 triggering the erratum, hence the use of the extra state 1. */
8456 /* If we are only performing a partial link do not bother
8457 to construct any glue. */
8458 if (bfd_link_relocatable (link_info
))
8461 /* Skip if this bfd does not correspond to an ELF image. */
8462 if (! is_arm_elf (abfd
))
8465 /* We should have chosen a fix type by the time we get here. */
8466 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8468 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8471 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8472 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8475 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8477 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8478 struct _arm_elf_section_data
*sec_data
;
8480 /* If we don't have executable progbits, we're not interested in this
8481 section. Also skip if section is to be excluded. */
8482 if (elf_section_type (sec
) != SHT_PROGBITS
8483 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8484 || (sec
->flags
& SEC_EXCLUDE
) != 0
8485 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8486 || sec
->output_section
== bfd_abs_section_ptr
8487 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8490 sec_data
= elf32_arm_section_data (sec
);
8492 if (sec_data
->mapcount
== 0)
8495 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8496 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8497 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8500 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8501 elf32_arm_compare_mapping
);
8503 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8505 unsigned int span_start
= sec_data
->map
[span
].vma
;
8506 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8507 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8508 char span_type
= sec_data
->map
[span
].type
;
8510 /* FIXME: Only ARM mode is supported at present. We may need to
8511 support Thumb-2 mode also at some point. */
8512 if (span_type
!= 'a')
8515 for (i
= span_start
; i
< span_end
;)
8517 unsigned int next_i
= i
+ 4;
8518 unsigned int insn
= bfd_big_endian (abfd
)
8519 ? (((unsigned) contents
[i
] << 24)
8520 | (contents
[i
+ 1] << 16)
8521 | (contents
[i
+ 2] << 8)
8523 : (((unsigned) contents
[i
+ 3] << 24)
8524 | (contents
[i
+ 2] << 16)
8525 | (contents
[i
+ 1] << 8)
8527 unsigned int writemask
= 0;
8528 enum bfd_arm_vfp11_pipe vpipe
;
8533 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8535 /* I'm assuming the VFP11 erratum can trigger with denorm
8536 operands on either the FMAC or the DS pipeline. This might
8537 lead to slightly overenthusiastic veneer insertion. */
8538 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8540 state
= use_vector
? 1 : 2;
8542 veneer_of_insn
= insn
;
8548 int other_regs
[3], other_numregs
;
8549 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8552 if (vpipe
!= VFP11_BAD
8553 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8563 int other_regs
[3], other_numregs
;
8564 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8567 if (vpipe
!= VFP11_BAD
8568 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8574 next_i
= first_fmac
+ 4;
8580 abort (); /* Should be unreachable. */
8585 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8586 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8588 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8590 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8595 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8602 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8607 newerr
->next
= sec_data
->erratumlist
;
8608 sec_data
->erratumlist
= newerr
;
8617 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8625 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8631 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8632 after sections have been laid out, using specially-named symbols. */
8635 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8636 struct bfd_link_info
*link_info
)
8639 struct elf32_arm_link_hash_table
*globals
;
8642 if (bfd_link_relocatable (link_info
))
8645 /* Skip if this bfd does not correspond to an ELF image. */
8646 if (! is_arm_elf (abfd
))
8649 globals
= elf32_arm_hash_table (link_info
);
8650 if (globals
== NULL
)
8653 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8654 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8655 BFD_ASSERT (tmp_name
);
8657 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8659 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8660 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8662 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8664 struct elf_link_hash_entry
*myh
;
8667 switch (errnode
->type
)
8669 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8670 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8671 /* Find veneer symbol. */
8672 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8673 errnode
->u
.b
.veneer
->u
.v
.id
);
8675 myh
= elf_link_hash_lookup
8676 (&(globals
)->root
, tmp_name
, false, false, true);
8679 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8680 abfd
, "VFP11", tmp_name
);
8682 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8683 + myh
->root
.u
.def
.section
->output_offset
8684 + myh
->root
.u
.def
.value
;
8686 errnode
->u
.b
.veneer
->vma
= vma
;
8689 case VFP11_ERRATUM_ARM_VENEER
:
8690 case VFP11_ERRATUM_THUMB_VENEER
:
8691 /* Find return location. */
8692 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8695 myh
= elf_link_hash_lookup
8696 (&(globals
)->root
, tmp_name
, false, false, true);
8699 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8700 abfd
, "VFP11", tmp_name
);
8702 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8703 + myh
->root
.u
.def
.section
->output_offset
8704 + myh
->root
.u
.def
.value
;
8706 errnode
->u
.v
.branch
->vma
= vma
;
8718 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8719 return locations after sections have been laid out, using
8720 specially-named symbols. */
8723 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8724 struct bfd_link_info
*link_info
)
8727 struct elf32_arm_link_hash_table
*globals
;
8730 if (bfd_link_relocatable (link_info
))
8733 /* Skip if this bfd does not correspond to an ELF image. */
8734 if (! is_arm_elf (abfd
))
8737 globals
= elf32_arm_hash_table (link_info
);
8738 if (globals
== NULL
)
8741 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8742 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8743 BFD_ASSERT (tmp_name
);
8745 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8747 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8748 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8750 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8752 struct elf_link_hash_entry
*myh
;
8755 switch (errnode
->type
)
8757 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8758 /* Find veneer symbol. */
8759 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8760 errnode
->u
.b
.veneer
->u
.v
.id
);
8762 myh
= elf_link_hash_lookup
8763 (&(globals
)->root
, tmp_name
, false, false, true);
8766 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8767 abfd
, "STM32L4XX", tmp_name
);
8769 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8770 + myh
->root
.u
.def
.section
->output_offset
8771 + myh
->root
.u
.def
.value
;
8773 errnode
->u
.b
.veneer
->vma
= vma
;
8776 case STM32L4XX_ERRATUM_VENEER
:
8777 /* Find return location. */
8778 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8781 myh
= elf_link_hash_lookup
8782 (&(globals
)->root
, tmp_name
, false, false, true);
8785 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8786 abfd
, "STM32L4XX", tmp_name
);
8788 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8789 + myh
->root
.u
.def
.section
->output_offset
8790 + myh
->root
.u
.def
.value
;
8792 errnode
->u
.v
.branch
->vma
= vma
;
8805 is_thumb2_ldmia (const insn32 insn
)
8807 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8808 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8809 return (insn
& 0xffd02000) == 0xe8900000;
8813 is_thumb2_ldmdb (const insn32 insn
)
8815 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8816 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8817 return (insn
& 0xffd02000) == 0xe9100000;
8821 is_thumb2_vldm (const insn32 insn
)
8823 /* A6.5 Extension register load or store instruction
8825 We look for SP 32-bit and DP 64-bit registers.
8826 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8827 <list> is consecutive 64-bit registers
8828 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8829 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8830 <list> is consecutive 32-bit registers
8831 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8832 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8833 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8835 (((insn
& 0xfe100f00) == 0xec100b00) ||
8836 ((insn
& 0xfe100f00) == 0xec100a00))
8837 && /* (IA without !). */
8838 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8839 /* (IA with !), includes VPOP (when reg number is SP). */
8840 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8842 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8845 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8847 - computes the number and the mode of memory accesses
8848 - decides if the replacement should be done:
8849 . replaces only if > 8-word accesses
8850 . or (testing purposes only) replaces all accesses. */
8853 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8854 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8858 /* The field encoding the register list is the same for both LDMIA
8859 and LDMDB encodings. */
8860 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8861 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8862 else if (is_thumb2_vldm (insn
))
8863 nb_words
= (insn
& 0xff);
8865 /* DEFAULT mode accounts for the real bug condition situation,
8866 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8867 return (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
8869 : stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
);
8872 /* Look for potentially-troublesome code sequences which might trigger
8873 the STM STM32L4XX erratum. */
8876 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8877 struct bfd_link_info
*link_info
)
8880 bfd_byte
*contents
= NULL
;
8881 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8883 if (globals
== NULL
)
8886 /* If we are only performing a partial link do not bother
8887 to construct any glue. */
8888 if (bfd_link_relocatable (link_info
))
8891 /* Skip if this bfd does not correspond to an ELF image. */
8892 if (! is_arm_elf (abfd
))
8895 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8898 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8899 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8902 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8904 unsigned int i
, span
;
8905 struct _arm_elf_section_data
*sec_data
;
8907 /* If we don't have executable progbits, we're not interested in this
8908 section. Also skip if section is to be excluded. */
8909 if (elf_section_type (sec
) != SHT_PROGBITS
8910 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8911 || (sec
->flags
& SEC_EXCLUDE
) != 0
8912 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8913 || sec
->output_section
== bfd_abs_section_ptr
8914 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8917 sec_data
= elf32_arm_section_data (sec
);
8919 if (sec_data
->mapcount
== 0)
8922 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8923 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8924 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8927 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8928 elf32_arm_compare_mapping
);
8930 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8932 unsigned int span_start
= sec_data
->map
[span
].vma
;
8933 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8934 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8935 char span_type
= sec_data
->map
[span
].type
;
8936 int itblock_current_pos
= 0;
8938 /* Only Thumb2 mode need be supported with this CM4 specific
8939 code, we should not encounter any arm mode eg span_type
8941 if (span_type
!= 't')
8944 for (i
= span_start
; i
< span_end
;)
8946 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8947 bool insn_32bit
= false;
8948 bool is_ldm
= false;
8949 bool is_vldm
= false;
8950 bool is_not_last_in_it_block
= false;
8952 /* The first 16-bits of all 32-bit thumb2 instructions start
8953 with opcode[15..13]=0b111 and the encoded op1 can be anything
8954 except opcode[12..11]!=0b00.
8955 See 32-bit Thumb instruction encoding. */
8956 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8959 /* Compute the predicate that tells if the instruction
8960 is concerned by the IT block
8961 - Creates an error if there is a ldm that is not
8962 last in the IT block thus cannot be replaced
8963 - Otherwise we can create a branch at the end of the
8964 IT block, it will be controlled naturally by IT
8965 with the proper pseudo-predicate
8966 - So the only interesting predicate is the one that
8967 tells that we are not on the last item of an IT
8969 if (itblock_current_pos
!= 0)
8970 is_not_last_in_it_block
= !!--itblock_current_pos
;
8974 /* Load the rest of the insn (in manual-friendly order). */
8975 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8976 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8977 is_vldm
= is_thumb2_vldm (insn
);
8979 /* Veneers are created for (v)ldm depending on
8980 option flags and memory accesses conditions; but
8981 if the instruction is not the last instruction of
8982 an IT block, we cannot create a jump there, so we
8984 if ((is_ldm
|| is_vldm
)
8985 && stm32l4xx_need_create_replacing_stub
8986 (insn
, globals
->stm32l4xx_fix
))
8988 if (is_not_last_in_it_block
)
8991 /* xgettext:c-format */
8992 (_("%pB(%pA+%#x): error: multiple load detected"
8993 " in non-last IT block instruction:"
8994 " STM32L4XX veneer cannot be generated; "
8995 "use gcc option -mrestrict-it to generate"
8996 " only one instruction per IT block"),
9001 elf32_stm32l4xx_erratum_list
*newerr
=
9002 (elf32_stm32l4xx_erratum_list
*)
9004 (sizeof (elf32_stm32l4xx_erratum_list
));
9006 elf32_arm_section_data (sec
)
9007 ->stm32l4xx_erratumcount
+= 1;
9008 newerr
->u
.b
.insn
= insn
;
9009 /* We create only thumb branches. */
9011 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
9012 record_stm32l4xx_erratum_veneer
9013 (link_info
, newerr
, abfd
, sec
,
9016 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
9017 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
9019 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
9020 sec_data
->stm32l4xx_erratumlist
= newerr
;
9027 IT blocks are only encoded in T1
9028 Encoding T1: IT{x{y{z}}} <firstcond>
9029 1 0 1 1 - 1 1 1 1 - firstcond - mask
9030 if mask = '0000' then see 'related encodings'
9031 We don't deal with UNPREDICTABLE, just ignore these.
9032 There can be no nested IT blocks so an IT block
9033 is naturally a new one for which it is worth
9034 computing its size. */
9035 bool is_newitblock
= ((insn
& 0xff00) == 0xbf00)
9036 && ((insn
& 0x000f) != 0x0000);
9037 /* If we have a new IT block we compute its size. */
9040 /* Compute the number of instructions controlled
9041 by the IT block, it will be used to decide
9042 whether we are inside an IT block or not. */
9043 unsigned int mask
= insn
& 0x000f;
9044 itblock_current_pos
= 4 - ctz (mask
);
9048 i
+= insn_32bit
? 4 : 2;
9052 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9060 if (elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9066 /* Set target relocation values needed during linking. */
9069 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
9070 struct bfd_link_info
*link_info
,
9071 struct elf32_arm_params
*params
)
9073 struct elf32_arm_link_hash_table
*globals
;
9075 globals
= elf32_arm_hash_table (link_info
);
9076 if (globals
== NULL
)
9079 globals
->target1_is_rel
= params
->target1_is_rel
;
9080 if (globals
->fdpic_p
)
9081 globals
->target2_reloc
= R_ARM_GOT32
;
9082 else if (strcmp (params
->target2_type
, "rel") == 0)
9083 globals
->target2_reloc
= R_ARM_REL32
;
9084 else if (strcmp (params
->target2_type
, "abs") == 0)
9085 globals
->target2_reloc
= R_ARM_ABS32
;
9086 else if (strcmp (params
->target2_type
, "got-rel") == 0)
9087 globals
->target2_reloc
= R_ARM_GOT_PREL
;
9090 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9091 params
->target2_type
);
9093 globals
->fix_v4bx
= params
->fix_v4bx
;
9094 globals
->use_blx
|= params
->use_blx
;
9095 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9096 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9097 if (globals
->fdpic_p
)
9098 globals
->pic_veneer
= 1;
9100 globals
->pic_veneer
= params
->pic_veneer
;
9101 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9102 globals
->fix_arm1176
= params
->fix_arm1176
;
9103 globals
->cmse_implib
= params
->cmse_implib
;
9104 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9106 BFD_ASSERT (is_arm_elf (output_bfd
));
9107 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9108 = params
->no_enum_size_warning
;
9109 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9110 = params
->no_wchar_size_warning
;
9113 /* Replace the target offset of a Thumb bl or b.w instruction. */
9116 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9122 BFD_ASSERT ((offset
& 1) == 0);
9124 upper
= bfd_get_16 (abfd
, insn
);
9125 lower
= bfd_get_16 (abfd
, insn
+ 2);
9126 reloc_sign
= (offset
< 0) ? 1 : 0;
9127 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9128 | ((offset
>> 12) & 0x3ff)
9129 | (reloc_sign
<< 10);
9130 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9131 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9132 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9133 | ((offset
>> 1) & 0x7ff);
9134 bfd_put_16 (abfd
, upper
, insn
);
9135 bfd_put_16 (abfd
, lower
, insn
+ 2);
9138 /* Thumb code calling an ARM function. */
9141 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9145 asection
* input_section
,
9146 bfd_byte
* hit_data
,
9149 bfd_signed_vma addend
,
9151 char **error_message
)
9155 long int ret_offset
;
9156 struct elf_link_hash_entry
* myh
;
9157 struct elf32_arm_link_hash_table
* globals
;
9159 myh
= find_thumb_glue (info
, name
, error_message
);
9163 globals
= elf32_arm_hash_table (info
);
9164 BFD_ASSERT (globals
!= NULL
);
9165 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9167 my_offset
= myh
->root
.u
.def
.value
;
9169 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9170 THUMB2ARM_GLUE_SECTION_NAME
);
9172 BFD_ASSERT (s
!= NULL
);
9173 BFD_ASSERT (s
->contents
!= NULL
);
9174 BFD_ASSERT (s
->output_section
!= NULL
);
9176 if ((my_offset
& 0x01) == 0x01)
9179 && sym_sec
->owner
!= NULL
9180 && !INTERWORK_FLAG (sym_sec
->owner
))
9183 (_("%pB(%s): warning: interworking not enabled;"
9184 " first occurrence: %pB: %s call to %s"),
9185 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9191 myh
->root
.u
.def
.value
= my_offset
;
9193 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9194 s
->contents
+ my_offset
);
9196 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9197 s
->contents
+ my_offset
+ 2);
9200 /* Address of destination of the stub. */
9201 ((bfd_signed_vma
) val
)
9203 /* Offset from the start of the current section
9204 to the start of the stubs. */
9206 /* Offset of the start of this stub from the start of the stubs. */
9208 /* Address of the start of the current section. */
9209 + s
->output_section
->vma
)
9210 /* The branch instruction is 4 bytes into the stub. */
9212 /* ARM branches work from the pc of the instruction + 8. */
9215 put_arm_insn (globals
, output_bfd
,
9216 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9217 s
->contents
+ my_offset
+ 4);
9220 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9222 /* Now go back and fix up the original BL insn to point to here. */
9224 /* Address of where the stub is located. */
9225 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9226 /* Address of where the BL is located. */
9227 - (input_section
->output_section
->vma
+ input_section
->output_offset
9229 /* Addend in the relocation. */
9231 /* Biassing for PC-relative addressing. */
9234 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9239 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9241 static struct elf_link_hash_entry
*
9242 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9249 char ** error_message
)
9252 long int ret_offset
;
9253 struct elf_link_hash_entry
* myh
;
9254 struct elf32_arm_link_hash_table
* globals
;
9256 myh
= find_arm_glue (info
, name
, error_message
);
9260 globals
= elf32_arm_hash_table (info
);
9261 BFD_ASSERT (globals
!= NULL
);
9262 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9264 my_offset
= myh
->root
.u
.def
.value
;
9266 if ((my_offset
& 0x01) == 0x01)
9269 && sym_sec
->owner
!= NULL
9270 && !INTERWORK_FLAG (sym_sec
->owner
))
9273 (_("%pB(%s): warning: interworking not enabled;"
9274 " first occurrence: %pB: %s call to %s"),
9275 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9279 myh
->root
.u
.def
.value
= my_offset
;
9281 if (bfd_link_pic (info
)
9282 || globals
->pic_veneer
)
9284 /* For relocatable objects we can't use absolute addresses,
9285 so construct the address from a relative offset. */
9286 /* TODO: If the offset is small it's probably worth
9287 constructing the address with adds. */
9288 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9289 s
->contents
+ my_offset
);
9290 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9291 s
->contents
+ my_offset
+ 4);
9292 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9293 s
->contents
+ my_offset
+ 8);
9294 /* Adjust the offset by 4 for the position of the add,
9295 and 8 for the pipeline offset. */
9296 ret_offset
= (val
- (s
->output_offset
9297 + s
->output_section
->vma
9300 bfd_put_32 (output_bfd
, ret_offset
,
9301 s
->contents
+ my_offset
+ 12);
9303 else if (globals
->use_blx
)
9305 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9306 s
->contents
+ my_offset
);
9308 /* It's a thumb address. Add the low order bit. */
9309 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9310 s
->contents
+ my_offset
+ 4);
9314 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9315 s
->contents
+ my_offset
);
9317 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9318 s
->contents
+ my_offset
+ 4);
9320 /* It's a thumb address. Add the low order bit. */
9321 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9322 s
->contents
+ my_offset
+ 8);
9328 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9333 /* Arm code calling a Thumb function. */
9336 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9340 asection
* input_section
,
9341 bfd_byte
* hit_data
,
9344 bfd_signed_vma addend
,
9346 char **error_message
)
9348 unsigned long int tmp
;
9351 long int ret_offset
;
9352 struct elf_link_hash_entry
* myh
;
9353 struct elf32_arm_link_hash_table
* globals
;
9355 globals
= elf32_arm_hash_table (info
);
9356 BFD_ASSERT (globals
!= NULL
);
9357 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9359 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9360 ARM2THUMB_GLUE_SECTION_NAME
);
9361 BFD_ASSERT (s
!= NULL
);
9362 BFD_ASSERT (s
->contents
!= NULL
);
9363 BFD_ASSERT (s
->output_section
!= NULL
);
9365 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9366 sym_sec
, val
, s
, error_message
);
9370 my_offset
= myh
->root
.u
.def
.value
;
9371 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9372 tmp
= tmp
& 0xFF000000;
9374 /* Somehow these are both 4 too far, so subtract 8. */
9375 ret_offset
= (s
->output_offset
9377 + s
->output_section
->vma
9378 - (input_section
->output_offset
9379 + input_section
->output_section
->vma
9383 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9385 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9390 /* Populate Arm stub for an exported Thumb function. */
9393 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9395 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9397 struct elf_link_hash_entry
* myh
;
9398 struct elf32_arm_link_hash_entry
*eh
;
9399 struct elf32_arm_link_hash_table
* globals
;
9402 char *error_message
;
9404 eh
= elf32_arm_hash_entry (h
);
9405 /* Allocate stubs for exported Thumb functions on v4t. */
9406 if (eh
->export_glue
== NULL
)
9409 globals
= elf32_arm_hash_table (info
);
9410 BFD_ASSERT (globals
!= NULL
);
9411 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9413 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9414 ARM2THUMB_GLUE_SECTION_NAME
);
9415 BFD_ASSERT (s
!= NULL
);
9416 BFD_ASSERT (s
->contents
!= NULL
);
9417 BFD_ASSERT (s
->output_section
!= NULL
);
9419 sec
= eh
->export_glue
->root
.u
.def
.section
;
9421 BFD_ASSERT (sec
->output_section
!= NULL
);
9423 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9424 + sec
->output_section
->vma
;
9426 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9427 h
->root
.u
.def
.section
->owner
,
9428 globals
->obfd
, sec
, val
, s
,
9434 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9437 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9442 struct elf32_arm_link_hash_table
*globals
;
9444 globals
= elf32_arm_hash_table (info
);
9445 BFD_ASSERT (globals
!= NULL
);
9446 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9448 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9449 ARM_BX_GLUE_SECTION_NAME
);
9450 BFD_ASSERT (s
!= NULL
);
9451 BFD_ASSERT (s
->contents
!= NULL
);
9452 BFD_ASSERT (s
->output_section
!= NULL
);
9454 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9456 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9458 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9460 p
= s
->contents
+ glue_addr
;
9461 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9462 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9463 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9464 globals
->bx_glue_offset
[reg
] |= 1;
9467 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9470 /* Generate Arm stubs for exported Thumb symbols. */
9472 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9473 struct bfd_link_info
*link_info
)
9475 struct elf32_arm_link_hash_table
* globals
;
9477 if (link_info
== NULL
)
9478 /* Ignore this if we are not called by the ELF backend linker. */
9481 globals
= elf32_arm_hash_table (link_info
);
9482 if (globals
== NULL
)
9485 /* If blx is available then exported Thumb symbols are OK and there is
9487 if (globals
->use_blx
)
9490 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9494 /* Reserve space for COUNT dynamic relocations in relocation selection
9498 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9499 bfd_size_type count
)
9501 struct elf32_arm_link_hash_table
*htab
;
9503 htab
= elf32_arm_hash_table (info
);
9504 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9507 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9510 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9511 dynamic, the relocations should go in SRELOC, otherwise they should
9512 go in the special .rel.iplt section. */
9515 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9516 bfd_size_type count
)
9518 struct elf32_arm_link_hash_table
*htab
;
9520 htab
= elf32_arm_hash_table (info
);
9521 if (!htab
->root
.dynamic_sections_created
)
9522 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9525 BFD_ASSERT (sreloc
!= NULL
);
9526 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9530 /* Add relocation REL to the end of relocation section SRELOC. */
9533 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9534 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9537 struct elf32_arm_link_hash_table
*htab
;
9539 htab
= elf32_arm_hash_table (info
);
9540 if (!htab
->root
.dynamic_sections_created
9541 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9542 sreloc
= htab
->root
.irelplt
;
9545 loc
= sreloc
->contents
;
9546 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9547 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9549 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9552 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9553 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9557 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9559 union gotplt_union
*root_plt
,
9560 struct arm_plt_info
*arm_plt
)
9562 struct elf32_arm_link_hash_table
*htab
;
9566 htab
= elf32_arm_hash_table (info
);
9570 splt
= htab
->root
.iplt
;
9571 sgotplt
= htab
->root
.igotplt
;
9573 /* NaCl uses a special first entry in .iplt too. */
9574 if (htab
->root
.target_os
== is_nacl
&& splt
->size
== 0)
9575 splt
->size
+= htab
->plt_header_size
;
9577 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9578 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9582 splt
= htab
->root
.splt
;
9583 sgotplt
= htab
->root
.sgotplt
;
9587 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9588 /* For lazy binding, relocations will be put into .rel.plt, in
9589 .rel.got otherwise. */
9590 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9591 if (info
->flags
& DF_BIND_NOW
)
9592 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9594 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9598 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9599 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9602 /* If this is the first .plt entry, make room for the special
9604 if (splt
->size
== 0)
9605 splt
->size
+= htab
->plt_header_size
;
9607 htab
->next_tls_desc_index
++;
9610 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9611 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9612 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9613 root_plt
->offset
= splt
->size
;
9614 splt
->size
+= htab
->plt_entry_size
;
9616 /* We also need to make an entry in the .got.plt section, which
9617 will be placed in the .got section by the linker script. */
9619 arm_plt
->got_offset
= sgotplt
->size
;
9621 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9623 /* Function descriptor takes 64 bits in GOT. */
9630 arm_movw_immediate (bfd_vma value
)
9632 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9636 arm_movt_immediate (bfd_vma value
)
9638 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9641 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9642 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9643 Otherwise, DYNINDX is the index of the symbol in the dynamic
9644 symbol table and SYM_VALUE is undefined.
9646 ROOT_PLT points to the offset of the PLT entry from the start of its
9647 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9648 bookkeeping information.
9650 Returns FALSE if there was a problem. */
9653 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9654 union gotplt_union
*root_plt
,
9655 struct arm_plt_info
*arm_plt
,
9656 int dynindx
, bfd_vma sym_value
)
9658 struct elf32_arm_link_hash_table
*htab
;
9664 Elf_Internal_Rela rel
;
9665 bfd_vma got_header_size
;
9667 htab
= elf32_arm_hash_table (info
);
9669 /* Pick the appropriate sections and sizes. */
9672 splt
= htab
->root
.iplt
;
9673 sgot
= htab
->root
.igotplt
;
9674 srel
= htab
->root
.irelplt
;
9676 /* There are no reserved entries in .igot.plt, and no special
9677 first entry in .iplt. */
9678 got_header_size
= 0;
9682 splt
= htab
->root
.splt
;
9683 sgot
= htab
->root
.sgotplt
;
9684 srel
= htab
->root
.srelplt
;
9686 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9688 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9690 bfd_vma got_offset
, got_address
, plt_address
;
9691 bfd_vma got_displacement
, initial_got_entry
;
9694 BFD_ASSERT (sgot
!= NULL
);
9696 /* Get the offset into the .(i)got.plt table of the entry that
9697 corresponds to this function. */
9698 got_offset
= (arm_plt
->got_offset
& -2);
9700 /* Get the index in the procedure linkage table which
9701 corresponds to this symbol. This is the index of this symbol
9702 in all the symbols for which we are making plt entries.
9703 After the reserved .got.plt entries, all symbols appear in
9704 the same order as in .plt. */
9706 /* Function descriptor takes 8 bytes. */
9707 plt_index
= (got_offset
- got_header_size
) / 8;
9709 plt_index
= (got_offset
- got_header_size
) / 4;
9711 /* Calculate the address of the GOT entry. */
9712 got_address
= (sgot
->output_section
->vma
9713 + sgot
->output_offset
9716 /* ...and the address of the PLT entry. */
9717 plt_address
= (splt
->output_section
->vma
9718 + splt
->output_offset
9719 + root_plt
->offset
);
9721 ptr
= splt
->contents
+ root_plt
->offset
;
9722 if (htab
->root
.target_os
== is_vxworks
&& bfd_link_pic (info
))
9727 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9729 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9731 val
|= got_address
- sgot
->output_section
->vma
;
9733 val
|= plt_index
* RELOC_SIZE (htab
);
9734 if (i
== 2 || i
== 5)
9735 bfd_put_32 (output_bfd
, val
, ptr
);
9737 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9740 else if (htab
->root
.target_os
== is_vxworks
)
9745 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9747 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9751 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9753 val
|= plt_index
* RELOC_SIZE (htab
);
9754 if (i
== 2 || i
== 5)
9755 bfd_put_32 (output_bfd
, val
, ptr
);
9757 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9760 loc
= (htab
->srelplt2
->contents
9761 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9763 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9764 referencing the GOT for this PLT entry. */
9765 rel
.r_offset
= plt_address
+ 8;
9766 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9767 rel
.r_addend
= got_offset
;
9768 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9769 loc
+= RELOC_SIZE (htab
);
9771 /* Create the R_ARM_ABS32 relocation referencing the
9772 beginning of the PLT for this GOT entry. */
9773 rel
.r_offset
= got_address
;
9774 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9776 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9778 else if (htab
->root
.target_os
== is_nacl
)
9780 /* Calculate the displacement between the PLT slot and the
9781 common tail that's part of the special initial PLT slot. */
9782 int32_t tail_displacement
9783 = ((splt
->output_section
->vma
+ splt
->output_offset
9784 + ARM_NACL_PLT_TAIL_OFFSET
)
9785 - (plt_address
+ htab
->plt_entry_size
+ 4));
9786 BFD_ASSERT ((tail_displacement
& 3) == 0);
9787 tail_displacement
>>= 2;
9789 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9790 || (-tail_displacement
& 0xff000000) == 0);
9792 /* Calculate the displacement between the PLT slot and the entry
9793 in the GOT. The offset accounts for the value produced by
9794 adding to pc in the penultimate instruction of the PLT stub. */
9795 got_displacement
= (got_address
9796 - (plt_address
+ htab
->plt_entry_size
));
9798 /* NaCl does not support interworking at all. */
9799 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9801 put_arm_insn (htab
, output_bfd
,
9802 elf32_arm_nacl_plt_entry
[0]
9803 | arm_movw_immediate (got_displacement
),
9805 put_arm_insn (htab
, output_bfd
,
9806 elf32_arm_nacl_plt_entry
[1]
9807 | arm_movt_immediate (got_displacement
),
9809 put_arm_insn (htab
, output_bfd
,
9810 elf32_arm_nacl_plt_entry
[2],
9812 put_arm_insn (htab
, output_bfd
,
9813 elf32_arm_nacl_plt_entry
[3]
9814 | (tail_displacement
& 0x00ffffff),
9817 else if (htab
->fdpic_p
)
9819 const bfd_vma
*plt_entry
= using_thumb_only (htab
)
9820 ? elf32_arm_fdpic_thumb_plt_entry
9821 : elf32_arm_fdpic_plt_entry
;
9823 /* Fill-up Thumb stub if needed. */
9824 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9826 put_thumb_insn (htab
, output_bfd
,
9827 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9828 put_thumb_insn (htab
, output_bfd
,
9829 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9831 /* As we are using 32 bit instructions even for the Thumb
9832 version, we have to use 'put_arm_insn' instead of
9833 'put_thumb_insn'. */
9834 put_arm_insn (htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9835 put_arm_insn (htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9836 put_arm_insn (htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9837 put_arm_insn (htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9838 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9840 if (!(info
->flags
& DF_BIND_NOW
))
9842 /* funcdesc_value_reloc_offset. */
9843 bfd_put_32 (output_bfd
,
9844 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9846 put_arm_insn (htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9847 put_arm_insn (htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9848 put_arm_insn (htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9849 put_arm_insn (htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9852 else if (using_thumb_only (htab
))
9854 /* PR ld/16017: Generate thumb only PLT entries. */
9855 if (!using_thumb2 (htab
))
9857 /* FIXME: We ought to be able to generate thumb-1 PLT
9859 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9864 /* Calculate the displacement between the PLT slot and the entry in
9865 the GOT. The 12-byte offset accounts for the value produced by
9866 adding to pc in the 3rd instruction of the PLT stub. */
9867 got_displacement
= got_address
- (plt_address
+ 12);
9869 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9870 instead of 'put_thumb_insn'. */
9871 put_arm_insn (htab
, output_bfd
,
9872 elf32_thumb2_plt_entry
[0]
9873 | ((got_displacement
& 0x000000ff) << 16)
9874 | ((got_displacement
& 0x00000700) << 20)
9875 | ((got_displacement
& 0x00000800) >> 1)
9876 | ((got_displacement
& 0x0000f000) >> 12),
9878 put_arm_insn (htab
, output_bfd
,
9879 elf32_thumb2_plt_entry
[1]
9880 | ((got_displacement
& 0x00ff0000) )
9881 | ((got_displacement
& 0x07000000) << 4)
9882 | ((got_displacement
& 0x08000000) >> 17)
9883 | ((got_displacement
& 0xf0000000) >> 28),
9885 put_arm_insn (htab
, output_bfd
,
9886 elf32_thumb2_plt_entry
[2],
9888 put_arm_insn (htab
, output_bfd
,
9889 elf32_thumb2_plt_entry
[3],
9894 /* Calculate the displacement between the PLT slot and the
9895 entry in the GOT. The eight-byte offset accounts for the
9896 value produced by adding to pc in the first instruction
9898 got_displacement
= got_address
- (plt_address
+ 8);
9900 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9902 put_thumb_insn (htab
, output_bfd
,
9903 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9904 put_thumb_insn (htab
, output_bfd
,
9905 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9908 if (!elf32_arm_use_long_plt_entry
)
9910 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9912 put_arm_insn (htab
, output_bfd
,
9913 elf32_arm_plt_entry_short
[0]
9914 | ((got_displacement
& 0x0ff00000) >> 20),
9916 put_arm_insn (htab
, output_bfd
,
9917 elf32_arm_plt_entry_short
[1]
9918 | ((got_displacement
& 0x000ff000) >> 12),
9920 put_arm_insn (htab
, output_bfd
,
9921 elf32_arm_plt_entry_short
[2]
9922 | (got_displacement
& 0x00000fff),
9924 #ifdef FOUR_WORD_PLT
9925 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9930 put_arm_insn (htab
, output_bfd
,
9931 elf32_arm_plt_entry_long
[0]
9932 | ((got_displacement
& 0xf0000000) >> 28),
9934 put_arm_insn (htab
, output_bfd
,
9935 elf32_arm_plt_entry_long
[1]
9936 | ((got_displacement
& 0x0ff00000) >> 20),
9938 put_arm_insn (htab
, output_bfd
,
9939 elf32_arm_plt_entry_long
[2]
9940 | ((got_displacement
& 0x000ff000) >> 12),
9942 put_arm_insn (htab
, output_bfd
,
9943 elf32_arm_plt_entry_long
[3]
9944 | (got_displacement
& 0x00000fff),
9949 /* Fill in the entry in the .rel(a).(i)plt section. */
9950 rel
.r_offset
= got_address
;
9954 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9955 The dynamic linker or static executable then calls SYM_VALUE
9956 to determine the correct run-time value of the .igot.plt entry. */
9957 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9958 initial_got_entry
= sym_value
;
9962 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9963 used by PLT entry. */
9966 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9967 initial_got_entry
= 0;
9971 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9972 initial_got_entry
= (splt
->output_section
->vma
9973 + splt
->output_offset
);
9976 When thumb only we need to set the LSB for any address that
9977 will be used with an interworking branch instruction. */
9978 if (using_thumb_only (htab
))
9979 initial_got_entry
|= 1;
9983 /* Fill in the entry in the global offset table. */
9984 bfd_put_32 (output_bfd
, initial_got_entry
,
9985 sgot
->contents
+ got_offset
);
9987 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
9989 /* Setup initial funcdesc value. */
9990 /* FIXME: we don't support lazy binding because there is a
9991 race condition between both words getting written and
9992 some other thread attempting to read them. The ARM
9993 architecture does not have an atomic 64 bit load/store
9994 instruction that could be used to prevent it; it is
9995 recommended that threaded FDPIC applications run with the
9996 LD_BIND_NOW environment variable set. */
9997 bfd_put_32 (output_bfd
, plt_address
+ 0x18,
9998 sgot
->contents
+ got_offset
);
9999 bfd_put_32 (output_bfd
, -1 /*TODO*/,
10000 sgot
->contents
+ got_offset
+ 4);
10004 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
10009 /* For FDPIC we put PLT relocationss into .rel.got when not
10010 lazy binding otherwise we put them in .rel.plt. For now,
10011 we don't support lazy binding so put it in .rel.got. */
10012 if (info
->flags
& DF_BIND_NOW
)
10013 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelgot
, &rel
);
10015 elf32_arm_add_dynreloc (output_bfd
, info
, htab
->root
.srelplt
, &rel
);
10019 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
10020 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
10027 /* Some relocations map to different relocations depending on the
10028 target. Return the real relocation. */
10031 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
10036 case R_ARM_TARGET1
:
10037 if (globals
->target1_is_rel
)
10038 return R_ARM_REL32
;
10040 return R_ARM_ABS32
;
10042 case R_ARM_TARGET2
:
10043 return globals
->target2_reloc
;
10050 /* Return the base VMA address which should be subtracted from real addresses
10051 when resolving @dtpoff relocation.
10052 This is PT_TLS segment p_vaddr. */
10055 dtpoff_base (struct bfd_link_info
*info
)
10057 /* If tls_sec is NULL, we should have signalled an error already. */
10058 if (elf_hash_table (info
)->tls_sec
== NULL
)
10060 return elf_hash_table (info
)->tls_sec
->vma
;
10063 /* Return the relocation value for @tpoff relocation
10064 if STT_TLS virtual address is ADDRESS. */
10067 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10069 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10072 /* If tls_sec is NULL, we should have signalled an error already. */
10073 if (htab
->tls_sec
== NULL
)
10075 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10076 return address
- htab
->tls_sec
->vma
+ base
;
10079 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10080 VALUE is the relocation value. */
10082 static bfd_reloc_status_type
10083 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10086 return bfd_reloc_overflow
;
10088 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10089 bfd_put_32 (abfd
, value
, data
);
10090 return bfd_reloc_ok
;
10093 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10094 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10095 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10097 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10098 is to then call final_link_relocate. Return other values in the
10101 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10102 the pre-relaxed code. It would be nice if the relocs were updated
10103 to match the optimization. */
10105 static bfd_reloc_status_type
10106 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10107 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10108 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10110 unsigned long insn
;
10112 switch (ELF32_R_TYPE (rel
->r_info
))
10115 return bfd_reloc_notsupported
;
10117 case R_ARM_TLS_GOTDESC
:
10122 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10124 insn
-= 5; /* THUMB */
10126 insn
-= 8; /* ARM */
10128 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10129 return bfd_reloc_continue
;
10131 case R_ARM_THM_TLS_DESCSEQ
:
10133 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10134 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10138 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10140 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10144 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10147 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10149 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10153 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10156 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10157 contents
+ rel
->r_offset
);
10161 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10162 /* It's a 32 bit instruction, fetch the rest of it for
10163 error generation. */
10164 insn
= (insn
<< 16)
10165 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10167 /* xgettext:c-format */
10168 (_("%pB(%pA+%#" PRIx64
"): "
10169 "unexpected %s instruction '%#lx' in TLS trampoline"),
10170 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10172 return bfd_reloc_notsupported
;
10176 case R_ARM_TLS_DESCSEQ
:
10178 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10179 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10183 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10184 contents
+ rel
->r_offset
);
10186 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10190 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10193 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10194 contents
+ rel
->r_offset
);
10196 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10200 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10203 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10204 contents
+ rel
->r_offset
);
10209 /* xgettext:c-format */
10210 (_("%pB(%pA+%#" PRIx64
"): "
10211 "unexpected %s instruction '%#lx' in TLS trampoline"),
10212 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10214 return bfd_reloc_notsupported
;
10218 case R_ARM_TLS_CALL
:
10219 /* GD->IE relaxation, turn the instruction into 'nop' or
10220 'ldr r0, [pc,r0]' */
10221 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10222 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10225 case R_ARM_THM_TLS_CALL
:
10226 /* GD->IE relaxation. */
10228 /* add r0,pc; ldr r0, [r0] */
10230 else if (using_thumb2 (globals
))
10237 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10238 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10241 return bfd_reloc_ok
;
10244 /* For a given value of n, calculate the value of G_n as required to
10245 deal with group relocations. We return it in the form of an
10246 encoded constant-and-rotation, together with the final residual. If n is
10247 specified as less than zero, then final_residual is filled with the
10248 input value and no further action is performed. */
10251 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10255 bfd_vma encoded_g_n
= 0;
10256 bfd_vma residual
= value
; /* Also known as Y_n. */
10258 for (current_n
= 0; current_n
<= n
; current_n
++)
10262 /* Calculate which part of the value to mask. */
10269 /* Determine the most significant bit in the residual and
10270 align the resulting value to a 2-bit boundary. */
10271 for (msb
= 30; msb
>= 0; msb
-= 2)
10272 if (residual
& (3u << msb
))
10275 /* The desired shift is now (msb - 6), or zero, whichever
10282 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10283 g_n
= residual
& (0xff << shift
);
10284 encoded_g_n
= (g_n
>> shift
)
10285 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10287 /* Calculate the residual for the next time around. */
10291 *final_residual
= residual
;
10293 return encoded_g_n
;
10296 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10297 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10300 identify_add_or_sub (bfd_vma insn
)
10302 int opcode
= insn
& 0x1e00000;
10304 if (opcode
== 1 << 23) /* ADD */
10307 if (opcode
== 1 << 22) /* SUB */
10313 /* Perform a relocation as part of a final link. */
10315 static bfd_reloc_status_type
10316 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10319 asection
* input_section
,
10320 bfd_byte
* contents
,
10321 Elf_Internal_Rela
* rel
,
10323 struct bfd_link_info
* info
,
10324 asection
* sym_sec
,
10325 const char * sym_name
,
10326 unsigned char st_type
,
10327 enum arm_st_branch_type branch_type
,
10328 struct elf_link_hash_entry
* h
,
10329 bool * unresolved_reloc_p
,
10330 char ** error_message
)
10332 unsigned long r_type
= howto
->type
;
10333 unsigned long r_symndx
;
10334 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10335 bfd_vma
* local_got_offsets
;
10336 bfd_vma
* local_tlsdesc_gotents
;
10339 asection
* sreloc
= NULL
;
10340 asection
* srelgot
;
10342 bfd_signed_vma signed_addend
;
10343 unsigned char dynreloc_st_type
;
10344 bfd_vma dynreloc_value
;
10345 struct elf32_arm_link_hash_table
* globals
;
10346 struct elf32_arm_link_hash_entry
*eh
;
10347 union gotplt_union
*root_plt
;
10348 struct arm_plt_info
*arm_plt
;
10349 bfd_vma plt_offset
;
10350 bfd_vma gotplt_offset
;
10351 bool has_iplt_entry
;
10352 bool resolved_to_zero
;
10354 globals
= elf32_arm_hash_table (info
);
10355 if (globals
== NULL
)
10356 return bfd_reloc_notsupported
;
10358 BFD_ASSERT (is_arm_elf (input_bfd
));
10359 BFD_ASSERT (howto
!= NULL
);
10361 /* Some relocation types map to different relocations depending on the
10362 target. We pick the right one here. */
10363 r_type
= arm_real_reloc_type (globals
, r_type
);
10365 /* It is possible to have linker relaxations on some TLS access
10366 models. Update our information here. */
10367 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10369 if (r_type
!= howto
->type
)
10370 howto
= elf32_arm_howto_from_type (r_type
);
10372 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10373 sgot
= globals
->root
.sgot
;
10374 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10375 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10377 if (globals
->root
.dynamic_sections_created
)
10378 srelgot
= globals
->root
.srelgot
;
10382 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10384 if (globals
->use_rel
)
10388 switch (bfd_get_reloc_size (howto
))
10390 case 1: addend
= bfd_get_8 (input_bfd
, hit_data
); break;
10391 case 2: addend
= bfd_get_16 (input_bfd
, hit_data
); break;
10392 case 4: addend
= bfd_get_32 (input_bfd
, hit_data
); break;
10393 default: addend
= 0; break;
10395 /* Note: the addend and signed_addend calculated here are
10396 incorrect for any split field. */
10397 addend
&= howto
->src_mask
;
10398 sign
= howto
->src_mask
& ~(howto
->src_mask
>> 1);
10399 signed_addend
= (addend
^ sign
) - sign
;
10400 signed_addend
= (bfd_vma
) signed_addend
<< howto
->rightshift
;
10401 addend
<<= howto
->rightshift
;
10404 addend
= signed_addend
= rel
->r_addend
;
10406 /* Record the symbol information that should be used in dynamic
10408 dynreloc_st_type
= st_type
;
10409 dynreloc_value
= value
;
10410 if (branch_type
== ST_BRANCH_TO_THUMB
)
10411 dynreloc_value
|= 1;
10413 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10414 VALUE appropriately for relocations that we resolve at link time. */
10415 has_iplt_entry
= false;
10416 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10418 && root_plt
->offset
!= (bfd_vma
) -1)
10420 plt_offset
= root_plt
->offset
;
10421 gotplt_offset
= arm_plt
->got_offset
;
10423 if (h
== NULL
|| eh
->is_iplt
)
10425 has_iplt_entry
= true;
10426 splt
= globals
->root
.iplt
;
10428 /* Populate .iplt entries here, because not all of them will
10429 be seen by finish_dynamic_symbol. The lower bit is set if
10430 we have already populated the entry. */
10431 if (plt_offset
& 1)
10435 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10436 -1, dynreloc_value
))
10437 root_plt
->offset
|= 1;
10439 return bfd_reloc_notsupported
;
10442 /* Static relocations always resolve to the .iplt entry. */
10443 st_type
= STT_FUNC
;
10444 value
= (splt
->output_section
->vma
10445 + splt
->output_offset
10447 branch_type
= ST_BRANCH_TO_ARM
;
10449 /* If there are non-call relocations that resolve to the .iplt
10450 entry, then all dynamic ones must too. */
10451 if (arm_plt
->noncall_refcount
!= 0)
10453 dynreloc_st_type
= st_type
;
10454 dynreloc_value
= value
;
10458 /* We populate the .plt entry in finish_dynamic_symbol. */
10459 splt
= globals
->root
.splt
;
10464 plt_offset
= (bfd_vma
) -1;
10465 gotplt_offset
= (bfd_vma
) -1;
10468 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we are
10469 resolving a function call relocation. We want to inform the user
10470 that something is wrong. */
10471 if (using_thumb_only (globals
)
10472 && (r_type
== R_ARM_THM_CALL
10473 || r_type
== R_ARM_THM_JUMP24
)
10474 && branch_type
== ST_BRANCH_TO_ARM
10475 /* Calls through a PLT are special: the assembly source code
10476 cannot be annotated with '.type foo(PLT), %function', and
10477 they handled specifically below anyway. */
10480 if (sym_sec
== bfd_abs_section_ptr
)
10482 /* As an exception, assume that absolute symbols are of the
10483 right kind (Thumb). They are presumably defined in the
10484 linker script, where it is not possible to declare them as
10485 Thumb (and thus are seen as Arm mode). Inform the user with
10486 a warning, though. */
10487 branch_type
= ST_BRANCH_TO_THUMB
;
10489 if (sym_sec
->owner
)
10491 (_("warning: %pB(%s): Forcing bramch to absolute symbol in Thumb mode (Thumb-only CPU)"
10493 sym_sec
->owner
, sym_name
, input_bfd
);
10496 (_("warning: (%s): Forcing branch to absolute symbol in Thumb mode (Thumb-only CPU)"
10498 sym_name
, input_bfd
);
10501 /* Otherwise do not silently build a stub, and let the users
10502 know they have to fix their code. Indeed, we could decide
10503 to insert a stub involving Arm code and/or BLX, leading to
10504 a run-time crash. */
10505 branch_type
= ST_BRANCH_UNKNOWN
;
10508 /* Fail early if branch_type is ST_BRANCH_UNKNOWN and we target a
10509 Thumb-only CPU. We could emit a warning on Arm-capable targets
10510 too, but that would be too verbose (a lot of legacy code does not
10511 use the .type foo, %function directive). */
10512 if (using_thumb_only (globals
)
10513 && (r_type
== R_ARM_THM_CALL
10514 || r_type
== R_ARM_THM_JUMP24
)
10515 && branch_type
== ST_BRANCH_UNKNOWN
10516 /* Exception to the rule above: a branch to an undefined weak
10517 symbol is turned into a jump to the next instruction unless a
10518 PLT entry will be created (see below). */
10519 && !(h
&& h
->root
.type
== bfd_link_hash_undefweak
10520 && plt_offset
== (bfd_vma
) -1))
10522 if (sym_sec
!= NULL
10523 && sym_sec
->owner
!= NULL
)
10525 (_("%pB(%s): Unknown destination type (ARM/Thumb) in %pB"),
10526 sym_sec
->owner
, sym_name
, input_bfd
);
10529 (_("(%s): Unknown destination type (ARM/Thumb) in %pB"),
10530 sym_name
, input_bfd
);
10532 return bfd_reloc_notsupported
;
10535 resolved_to_zero
= (h
!= NULL
10536 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10541 /* We don't need to find a value for this symbol. It's just a
10543 *unresolved_reloc_p
= false;
10544 return bfd_reloc_ok
;
10547 if (globals
->root
.target_os
!= is_vxworks
)
10548 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10549 /* Fall through. */
10553 case R_ARM_ABS32_NOI
:
10555 case R_ARM_REL32_NOI
:
10561 /* Handle relocations which should use the PLT entry. ABS32/REL32
10562 will use the symbol's value, which may point to a PLT entry, but we
10563 don't need to handle that here. If we created a PLT entry, all
10564 branches in this object should go to it, except if the PLT is too
10565 far away, in which case a long branch stub should be inserted. */
10566 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10567 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10568 && r_type
!= R_ARM_CALL
10569 && r_type
!= R_ARM_JUMP24
10570 && r_type
!= R_ARM_PLT32
)
10571 && plt_offset
!= (bfd_vma
) -1)
10573 /* If we've created a .plt section, and assigned a PLT entry
10574 to this function, it must either be a STT_GNU_IFUNC reference
10575 or not be known to bind locally. In other cases, we should
10576 have cleared the PLT entry by now. */
10577 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10579 value
= (splt
->output_section
->vma
10580 + splt
->output_offset
10582 *unresolved_reloc_p
= false;
10583 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10584 contents
, rel
->r_offset
, value
,
10588 /* When generating a shared library or PIE, these relocations
10589 are copied into the output file to be resolved at run time. */
10590 if ((bfd_link_pic (info
)
10591 || globals
->fdpic_p
)
10592 && (input_section
->flags
& SEC_ALLOC
)
10593 && !(globals
->root
.target_os
== is_vxworks
10594 && strcmp (input_section
->output_section
->name
,
10596 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10597 || !SYMBOL_CALLS_LOCAL (info
, h
))
10598 && !(input_bfd
== globals
->stub_bfd
10599 && strstr (input_section
->name
, STUB_SUFFIX
))
10601 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10602 && !resolved_to_zero
)
10603 || h
->root
.type
!= bfd_link_hash_undefweak
)
10604 && r_type
!= R_ARM_PC24
10605 && r_type
!= R_ARM_CALL
10606 && r_type
!= R_ARM_JUMP24
10607 && r_type
!= R_ARM_PREL31
10608 && r_type
!= R_ARM_PLT32
)
10610 Elf_Internal_Rela outrel
;
10611 bool skip
, relocate
;
10614 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10615 && !h
->def_regular
)
10617 char *v
= _("shared object");
10619 if (bfd_link_executable (info
))
10620 v
= _("PIE executable");
10623 (_("%pB: relocation %s against external or undefined symbol `%s'"
10624 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10625 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10626 return bfd_reloc_notsupported
;
10629 *unresolved_reloc_p
= false;
10631 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10633 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10634 ! globals
->use_rel
);
10636 if (sreloc
== NULL
)
10637 return bfd_reloc_notsupported
;
10643 outrel
.r_addend
= addend
;
10645 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10647 if (outrel
.r_offset
== (bfd_vma
) -1)
10649 else if (outrel
.r_offset
== (bfd_vma
) -2)
10650 skip
= true, relocate
= true;
10651 outrel
.r_offset
+= (input_section
->output_section
->vma
10652 + input_section
->output_offset
);
10655 memset (&outrel
, 0, sizeof outrel
);
10657 && h
->dynindx
!= -1
10658 && (!bfd_link_pic (info
)
10659 || !(bfd_link_pie (info
)
10660 || SYMBOLIC_BIND (info
, h
))
10661 || !h
->def_regular
))
10662 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10667 /* This symbol is local, or marked to become local. */
10668 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10669 || (globals
->fdpic_p
&& !bfd_link_pic (info
)));
10670 /* On SVR4-ish systems, the dynamic loader cannot
10671 relocate the text and data segments independently,
10672 so the symbol does not matter. */
10674 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10675 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10676 to the .iplt entry. Instead, every non-call reference
10677 must use an R_ARM_IRELATIVE relocation to obtain the
10678 correct run-time address. */
10679 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10680 else if (globals
->fdpic_p
&& !bfd_link_pic (info
))
10683 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10684 if (globals
->use_rel
)
10687 outrel
.r_addend
+= dynreloc_value
;
10691 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10693 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10695 /* If this reloc is against an external symbol, we do not want to
10696 fiddle with the addend. Otherwise, we need to include the symbol
10697 value so that it becomes an addend for the dynamic reloc. */
10699 return bfd_reloc_ok
;
10701 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10702 contents
, rel
->r_offset
,
10703 dynreloc_value
, (bfd_vma
) 0);
10705 else switch (r_type
)
10708 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10710 case R_ARM_XPC25
: /* Arm BLX instruction. */
10713 case R_ARM_PC24
: /* Arm B/BL instruction. */
10716 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10718 if (r_type
== R_ARM_XPC25
)
10720 /* Check for Arm calling Arm function. */
10721 /* FIXME: Should we translate the instruction into a BL
10722 instruction instead ? */
10723 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10725 (_("\%pB: warning: %s BLX instruction targets"
10726 " %s function '%s'"),
10728 "ARM", h
? h
->root
.root
.string
: "(local)");
10730 else if (r_type
== R_ARM_PC24
)
10732 /* Check for Arm calling Thumb function. */
10733 if (branch_type
== ST_BRANCH_TO_THUMB
)
10735 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10736 output_bfd
, input_section
,
10737 hit_data
, sym_sec
, rel
->r_offset
,
10738 signed_addend
, value
,
10740 return bfd_reloc_ok
;
10742 return bfd_reloc_dangerous
;
10746 /* Check if a stub has to be inserted because the
10747 destination is too far or we are changing mode. */
10748 if ( r_type
== R_ARM_CALL
10749 || r_type
== R_ARM_JUMP24
10750 || r_type
== R_ARM_PLT32
)
10752 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10753 struct elf32_arm_link_hash_entry
*hash
;
10755 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10756 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10757 st_type
, &branch_type
,
10758 hash
, value
, sym_sec
,
10759 input_bfd
, sym_name
);
10761 if (stub_type
!= arm_stub_none
)
10763 /* The target is out of reach, so redirect the
10764 branch to the local stub for this function. */
10765 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10770 if (stub_entry
!= NULL
)
10771 value
= (stub_entry
->stub_offset
10772 + stub_entry
->stub_sec
->output_offset
10773 + stub_entry
->stub_sec
->output_section
->vma
);
10775 if (plt_offset
!= (bfd_vma
) -1)
10776 *unresolved_reloc_p
= false;
10781 /* If the call goes through a PLT entry, make sure to
10782 check distance to the right destination address. */
10783 if (plt_offset
!= (bfd_vma
) -1)
10785 value
= (splt
->output_section
->vma
10786 + splt
->output_offset
10788 *unresolved_reloc_p
= false;
10789 /* The PLT entry is in ARM mode, regardless of the
10790 target function. */
10791 branch_type
= ST_BRANCH_TO_ARM
;
10796 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10798 S is the address of the symbol in the relocation.
10799 P is address of the instruction being relocated.
10800 A is the addend (extracted from the instruction) in bytes.
10802 S is held in 'value'.
10803 P is the base address of the section containing the
10804 instruction plus the offset of the reloc into that
10806 (input_section->output_section->vma +
10807 input_section->output_offset +
10809 A is the addend, converted into bytes, ie:
10810 (signed_addend * 4)
10812 Note: None of these operations have knowledge of the pipeline
10813 size of the processor, thus it is up to the assembler to
10814 encode this information into the addend. */
10815 value
-= (input_section
->output_section
->vma
10816 + input_section
->output_offset
);
10817 value
-= rel
->r_offset
;
10818 value
+= signed_addend
;
10820 signed_addend
= value
;
10821 signed_addend
>>= howto
->rightshift
;
10823 /* A branch to an undefined weak symbol is turned into a jump to
10824 the next instruction unless a PLT entry will be created.
10825 Do the same for local undefined symbols (but not for STN_UNDEF).
10826 The jump to the next instruction is optimized as a NOP depending
10827 on the architecture. */
10828 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10829 && plt_offset
== (bfd_vma
) -1)
10830 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10832 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10834 if (arch_has_arm_nop (globals
))
10835 value
|= 0x0320f000;
10837 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10841 /* Perform a signed range check. */
10842 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10843 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10844 return bfd_reloc_overflow
;
10846 addend
= (value
& 2);
10848 value
= (signed_addend
& howto
->dst_mask
)
10849 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10851 if (r_type
== R_ARM_CALL
)
10853 /* Set the H bit in the BLX instruction. */
10854 if (branch_type
== ST_BRANCH_TO_THUMB
)
10857 value
|= (1 << 24);
10859 value
&= ~(bfd_vma
)(1 << 24);
10862 /* Select the correct instruction (BL or BLX). */
10863 /* Only if we are not handling a BL to a stub. In this
10864 case, mode switching is performed by the stub. */
10865 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10866 value
|= (1 << 28);
10867 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10869 value
&= ~(bfd_vma
)(1 << 28);
10870 value
|= (1 << 24);
10879 if (branch_type
== ST_BRANCH_TO_THUMB
)
10883 case R_ARM_ABS32_NOI
:
10889 if (branch_type
== ST_BRANCH_TO_THUMB
)
10891 value
-= (input_section
->output_section
->vma
10892 + input_section
->output_offset
+ rel
->r_offset
);
10895 case R_ARM_REL32_NOI
:
10897 value
-= (input_section
->output_section
->vma
10898 + input_section
->output_offset
+ rel
->r_offset
);
10902 value
-= (input_section
->output_section
->vma
10903 + input_section
->output_offset
+ rel
->r_offset
);
10904 value
+= signed_addend
;
10905 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10907 /* Check for overflow. */
10908 if ((value
^ (value
>> 1)) & (1 << 30))
10909 return bfd_reloc_overflow
;
10911 value
&= 0x7fffffff;
10912 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10913 if (branch_type
== ST_BRANCH_TO_THUMB
)
10918 bfd_put_32 (input_bfd
, value
, hit_data
);
10919 return bfd_reloc_ok
;
10924 /* There is no way to tell whether the user intended to use a signed or
10925 unsigned addend. When checking for overflow we accept either,
10926 as specified by the AAELF. */
10927 if ((long) value
> 0xff || (long) value
< -0x80)
10928 return bfd_reloc_overflow
;
10930 bfd_put_8 (input_bfd
, value
, hit_data
);
10931 return bfd_reloc_ok
;
10936 /* See comment for R_ARM_ABS8. */
10937 if ((long) value
> 0xffff || (long) value
< -0x8000)
10938 return bfd_reloc_overflow
;
10940 bfd_put_16 (input_bfd
, value
, hit_data
);
10941 return bfd_reloc_ok
;
10943 case R_ARM_THM_ABS5
:
10944 /* Support ldr and str instructions for the thumb. */
10945 if (globals
->use_rel
)
10947 /* Need to refetch addend. */
10948 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10949 /* ??? Need to determine shift amount from operand size. */
10950 addend
>>= howto
->rightshift
;
10954 /* ??? Isn't value unsigned? */
10955 if ((long) value
> 0x1f || (long) value
< -0x10)
10956 return bfd_reloc_overflow
;
10958 /* ??? Value needs to be properly shifted into place first. */
10959 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10960 bfd_put_16 (input_bfd
, value
, hit_data
);
10961 return bfd_reloc_ok
;
10963 case R_ARM_THM_ALU_PREL_11_0
:
10964 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10967 bfd_signed_vma relocation
;
10969 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10970 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10972 if (globals
->use_rel
)
10974 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10975 | ((insn
& (1 << 26)) >> 15);
10976 if (insn
& 0xf00000)
10977 signed_addend
= -signed_addend
;
10980 relocation
= value
+ signed_addend
;
10981 relocation
-= Pa (input_section
->output_section
->vma
10982 + input_section
->output_offset
10985 /* PR 21523: Use an absolute value. The user of this reloc will
10986 have already selected an ADD or SUB insn appropriately. */
10987 value
= llabs (relocation
);
10989 if (value
>= 0x1000)
10990 return bfd_reloc_overflow
;
10992 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10993 if (branch_type
== ST_BRANCH_TO_THUMB
)
10996 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10997 | ((value
& 0x700) << 4)
10998 | ((value
& 0x800) << 15);
10999 if (relocation
< 0)
11002 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11003 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11005 return bfd_reloc_ok
;
11008 case R_ARM_THM_PC8
:
11009 /* PR 10073: This reloc is not generated by the GNU toolchain,
11010 but it is supported for compatibility with third party libraries
11011 generated by other compilers, specifically the ARM/IAR. */
11014 bfd_signed_vma relocation
;
11016 insn
= bfd_get_16 (input_bfd
, hit_data
);
11018 if (globals
->use_rel
)
11019 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
11021 relocation
= value
+ addend
;
11022 relocation
-= Pa (input_section
->output_section
->vma
11023 + input_section
->output_offset
11026 value
= relocation
;
11028 /* We do not check for overflow of this reloc. Although strictly
11029 speaking this is incorrect, it appears to be necessary in order
11030 to work with IAR generated relocs. Since GCC and GAS do not
11031 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11032 a problem for them. */
11035 insn
= (insn
& 0xff00) | (value
>> 2);
11037 bfd_put_16 (input_bfd
, insn
, hit_data
);
11039 return bfd_reloc_ok
;
11042 case R_ARM_THM_PC12
:
11043 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11046 bfd_signed_vma relocation
;
11048 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
11049 | bfd_get_16 (input_bfd
, hit_data
+ 2);
11051 if (globals
->use_rel
)
11053 signed_addend
= insn
& 0xfff;
11054 if (!(insn
& (1 << 23)))
11055 signed_addend
= -signed_addend
;
11058 relocation
= value
+ signed_addend
;
11059 relocation
-= Pa (input_section
->output_section
->vma
11060 + input_section
->output_offset
11063 value
= relocation
;
11065 if (value
>= 0x1000)
11066 return bfd_reloc_overflow
;
11068 insn
= (insn
& 0xff7ff000) | value
;
11069 if (relocation
>= 0)
11072 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11073 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11075 return bfd_reloc_ok
;
11078 case R_ARM_THM_XPC22
:
11079 case R_ARM_THM_CALL
:
11080 case R_ARM_THM_JUMP24
:
11081 /* Thumb BL (branch long instruction). */
11083 bfd_vma relocation
;
11084 bfd_vma reloc_sign
;
11085 bool overflow
= false;
11086 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11087 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11088 bfd_signed_vma reloc_signed_max
;
11089 bfd_signed_vma reloc_signed_min
;
11091 bfd_signed_vma signed_check
;
11093 const int thumb2
= using_thumb2 (globals
);
11094 const int thumb2_bl
= using_thumb2_bl (globals
);
11096 /* A branch to an undefined weak symbol is turned into a jump to
11097 the next instruction unless a PLT entry will be created.
11098 The jump to the next instruction is optimized as a NOP.W for
11099 Thumb-2 enabled architectures. */
11100 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11101 && plt_offset
== (bfd_vma
) -1)
11105 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11106 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11110 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11111 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11113 return bfd_reloc_ok
;
11116 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11117 with Thumb-1) involving the J1 and J2 bits. */
11118 if (globals
->use_rel
)
11120 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11121 bfd_vma upper
= upper_insn
& 0x3ff;
11122 bfd_vma lower
= lower_insn
& 0x7ff;
11123 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11124 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11125 bfd_vma i1
= j1
^ s
? 0 : 1;
11126 bfd_vma i2
= j2
^ s
? 0 : 1;
11128 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11130 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11132 signed_addend
= addend
;
11135 if (r_type
== R_ARM_THM_XPC22
)
11137 /* Check for Thumb to Thumb call. */
11138 /* FIXME: Should we translate the instruction into a BL
11139 instruction instead ? */
11140 if (branch_type
== ST_BRANCH_TO_THUMB
)
11142 (_("%pB: warning: %s BLX instruction targets"
11143 " %s function '%s'"),
11144 input_bfd
, "Thumb",
11145 "Thumb", h
? h
->root
.root
.string
: "(local)");
11149 /* If it is not a call to Thumb, assume call to Arm.
11150 If it is a call relative to a section name, then it is not a
11151 function call at all, but rather a long jump. Calls through
11152 the PLT do not require stubs. */
11153 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11155 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11157 /* Convert BL to BLX. */
11158 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11160 else if (( r_type
!= R_ARM_THM_CALL
)
11161 && (r_type
!= R_ARM_THM_JUMP24
))
11163 if (elf32_thumb_to_arm_stub
11164 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11165 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11167 return bfd_reloc_ok
;
11169 return bfd_reloc_dangerous
;
11172 else if (branch_type
== ST_BRANCH_TO_THUMB
11173 && globals
->use_blx
11174 && r_type
== R_ARM_THM_CALL
)
11176 /* Make sure this is a BL. */
11177 lower_insn
|= 0x1800;
11181 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11182 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11184 /* Check if a stub has to be inserted because the destination
11186 struct elf32_arm_stub_hash_entry
*stub_entry
;
11187 struct elf32_arm_link_hash_entry
*hash
;
11189 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11191 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11192 st_type
, &branch_type
,
11193 hash
, value
, sym_sec
,
11194 input_bfd
, sym_name
);
11196 if (stub_type
!= arm_stub_none
)
11198 /* The target is out of reach or we are changing modes, so
11199 redirect the branch to the local stub for this
11201 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11205 if (stub_entry
!= NULL
)
11207 value
= (stub_entry
->stub_offset
11208 + stub_entry
->stub_sec
->output_offset
11209 + stub_entry
->stub_sec
->output_section
->vma
);
11211 if (plt_offset
!= (bfd_vma
) -1)
11212 *unresolved_reloc_p
= false;
11215 /* If this call becomes a call to Arm, force BLX. */
11216 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11219 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11220 || branch_type
!= ST_BRANCH_TO_THUMB
)
11221 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11226 /* Handle calls via the PLT. */
11227 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11229 value
= (splt
->output_section
->vma
11230 + splt
->output_offset
11233 if (globals
->use_blx
11234 && r_type
== R_ARM_THM_CALL
11235 && ! using_thumb_only (globals
))
11237 /* If the Thumb BLX instruction is available, convert
11238 the BL to a BLX instruction to call the ARM-mode
11240 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11241 branch_type
= ST_BRANCH_TO_ARM
;
11245 if (! using_thumb_only (globals
))
11246 /* Target the Thumb stub before the ARM PLT entry. */
11247 value
-= PLT_THUMB_STUB_SIZE
;
11248 branch_type
= ST_BRANCH_TO_THUMB
;
11250 *unresolved_reloc_p
= false;
11253 relocation
= value
+ signed_addend
;
11255 relocation
-= (input_section
->output_section
->vma
11256 + input_section
->output_offset
11259 check
= relocation
>> howto
->rightshift
;
11261 /* If this is a signed value, the rightshift just dropped
11262 leading 1 bits (assuming twos complement). */
11263 if ((bfd_signed_vma
) relocation
>= 0)
11264 signed_check
= check
;
11266 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11268 /* Calculate the permissable maximum and minimum values for
11269 this relocation according to whether we're relocating for
11271 bitsize
= howto
->bitsize
;
11274 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11275 reloc_signed_min
= ~reloc_signed_max
;
11277 /* Assumes two's complement. */
11278 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11281 if ((lower_insn
& 0x5000) == 0x4000)
11282 /* For a BLX instruction, make sure that the relocation is rounded up
11283 to a word boundary. This follows the semantics of the instruction
11284 which specifies that bit 1 of the target address will come from bit
11285 1 of the base address. */
11286 relocation
= (relocation
+ 2) & ~ 3;
11288 /* Put RELOCATION back into the insn. Assumes two's complement.
11289 We use the Thumb-2 encoding, which is safe even if dealing with
11290 a Thumb-1 instruction by virtue of our overflow check above. */
11291 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11292 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11293 | ((relocation
>> 12) & 0x3ff)
11294 | (reloc_sign
<< 10);
11295 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11296 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11297 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11298 | ((relocation
>> 1) & 0x7ff);
11300 /* Put the relocated value back in the object file: */
11301 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11302 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11304 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11308 case R_ARM_THM_JUMP19
:
11309 /* Thumb32 conditional branch instruction. */
11311 bfd_vma relocation
;
11312 bool overflow
= false;
11313 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11314 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11315 bfd_signed_vma reloc_signed_max
= 0xffffe;
11316 bfd_signed_vma reloc_signed_min
= -0x100000;
11317 bfd_signed_vma signed_check
;
11318 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11319 struct elf32_arm_stub_hash_entry
*stub_entry
;
11320 struct elf32_arm_link_hash_entry
*hash
;
11322 /* Need to refetch the addend, reconstruct the top three bits,
11323 and squish the two 11 bit pieces together. */
11324 if (globals
->use_rel
)
11326 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11327 bfd_vma upper
= (upper_insn
& 0x003f);
11328 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11329 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11330 bfd_vma lower
= (lower_insn
& 0x07ff);
11334 upper
|= (!S
) << 8;
11335 upper
-= 0x0100; /* Sign extend. */
11337 addend
= (upper
<< 12) | (lower
<< 1);
11338 signed_addend
= addend
;
11341 /* Handle calls via the PLT. */
11342 if (plt_offset
!= (bfd_vma
) -1)
11344 value
= (splt
->output_section
->vma
11345 + splt
->output_offset
11347 /* Target the Thumb stub before the ARM PLT entry. */
11348 value
-= PLT_THUMB_STUB_SIZE
;
11349 *unresolved_reloc_p
= false;
11352 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11354 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11355 st_type
, &branch_type
,
11356 hash
, value
, sym_sec
,
11357 input_bfd
, sym_name
);
11358 if (stub_type
!= arm_stub_none
)
11360 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11364 if (stub_entry
!= NULL
)
11366 value
= (stub_entry
->stub_offset
11367 + stub_entry
->stub_sec
->output_offset
11368 + stub_entry
->stub_sec
->output_section
->vma
);
11372 relocation
= value
+ signed_addend
;
11373 relocation
-= (input_section
->output_section
->vma
11374 + input_section
->output_offset
11376 signed_check
= (bfd_signed_vma
) relocation
;
11378 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11381 /* Put RELOCATION back into the insn. */
11383 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11384 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11385 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11386 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11387 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11389 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11390 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11393 /* Put the relocated value back in the object file: */
11394 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11395 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11397 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11400 case R_ARM_THM_JUMP11
:
11401 case R_ARM_THM_JUMP8
:
11402 case R_ARM_THM_JUMP6
:
11403 /* Thumb B (branch) instruction). */
11405 bfd_signed_vma relocation
;
11406 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11407 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11408 bfd_signed_vma signed_check
;
11410 /* CZB cannot jump backward. */
11411 if (r_type
== R_ARM_THM_JUMP6
)
11413 reloc_signed_min
= 0;
11414 if (globals
->use_rel
)
11415 signed_addend
= ((addend
& 0x200) >> 3) | ((addend
& 0xf8) >> 2);
11418 relocation
= value
+ signed_addend
;
11420 relocation
-= (input_section
->output_section
->vma
11421 + input_section
->output_offset
11424 relocation
>>= howto
->rightshift
;
11425 signed_check
= relocation
;
11427 if (r_type
== R_ARM_THM_JUMP6
)
11428 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11430 relocation
&= howto
->dst_mask
;
11431 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11433 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11435 /* Assumes two's complement. */
11436 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11437 return bfd_reloc_overflow
;
11439 return bfd_reloc_ok
;
11442 case R_ARM_ALU_PCREL7_0
:
11443 case R_ARM_ALU_PCREL15_8
:
11444 case R_ARM_ALU_PCREL23_15
:
11447 bfd_vma relocation
;
11449 insn
= bfd_get_32 (input_bfd
, hit_data
);
11450 if (globals
->use_rel
)
11452 /* Extract the addend. */
11453 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11454 signed_addend
= addend
;
11456 relocation
= value
+ signed_addend
;
11458 relocation
-= (input_section
->output_section
->vma
11459 + input_section
->output_offset
11461 insn
= (insn
& ~0xfff)
11462 | ((howto
->bitpos
<< 7) & 0xf00)
11463 | ((relocation
>> howto
->bitpos
) & 0xff);
11464 bfd_put_32 (input_bfd
, value
, hit_data
);
11466 return bfd_reloc_ok
;
11468 case R_ARM_GNU_VTINHERIT
:
11469 case R_ARM_GNU_VTENTRY
:
11470 return bfd_reloc_ok
;
11472 case R_ARM_GOTOFF32
:
11473 /* Relocation is relative to the start of the
11474 global offset table. */
11476 BFD_ASSERT (sgot
!= NULL
);
11478 return bfd_reloc_notsupported
;
11480 /* If we are addressing a Thumb function, we need to adjust the
11481 address by one, so that attempts to call the function pointer will
11482 correctly interpret it as Thumb code. */
11483 if (branch_type
== ST_BRANCH_TO_THUMB
)
11486 /* Note that sgot->output_offset is not involved in this
11487 calculation. We always want the start of .got. If we
11488 define _GLOBAL_OFFSET_TABLE in a different way, as is
11489 permitted by the ABI, we might have to change this
11491 value
-= sgot
->output_section
->vma
;
11492 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11493 contents
, rel
->r_offset
, value
,
11497 /* Use global offset table as symbol value. */
11498 BFD_ASSERT (sgot
!= NULL
);
11501 return bfd_reloc_notsupported
;
11503 *unresolved_reloc_p
= false;
11504 value
= sgot
->output_section
->vma
;
11505 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11506 contents
, rel
->r_offset
, value
,
11510 case R_ARM_GOT_PREL
:
11511 /* Relocation is to the entry for this symbol in the
11512 global offset table. */
11514 return bfd_reloc_notsupported
;
11516 if (dynreloc_st_type
== STT_GNU_IFUNC
11517 && plt_offset
!= (bfd_vma
) -1
11518 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11520 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11521 symbol, and the relocation resolves directly to the runtime
11522 target rather than to the .iplt entry. This means that any
11523 .got entry would be the same value as the .igot.plt entry,
11524 so there's no point creating both. */
11525 sgot
= globals
->root
.igotplt
;
11526 value
= sgot
->output_offset
+ gotplt_offset
;
11528 else if (h
!= NULL
)
11532 off
= h
->got
.offset
;
11533 BFD_ASSERT (off
!= (bfd_vma
) -1);
11534 if ((off
& 1) != 0)
11536 /* We have already processsed one GOT relocation against
11539 if (globals
->root
.dynamic_sections_created
11540 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11541 *unresolved_reloc_p
= false;
11545 Elf_Internal_Rela outrel
;
11548 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11549 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11551 /* If the symbol doesn't resolve locally in a static
11552 object, we have an undefined reference. If the
11553 symbol doesn't resolve locally in a dynamic object,
11554 it should be resolved by the dynamic linker. */
11555 if (globals
->root
.dynamic_sections_created
)
11557 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11558 *unresolved_reloc_p
= false;
11562 outrel
.r_addend
= 0;
11566 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11567 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11568 else if (bfd_link_pic (info
)
11569 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
11570 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11574 if (globals
->fdpic_p
)
11577 outrel
.r_addend
= dynreloc_value
;
11580 /* The GOT entry is initialized to zero by default.
11581 See if we should install a different value. */
11582 if (outrel
.r_addend
!= 0
11583 && (globals
->use_rel
|| outrel
.r_info
== 0))
11585 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11586 sgot
->contents
+ off
);
11587 outrel
.r_addend
= 0;
11591 arm_elf_add_rofixup (output_bfd
,
11592 elf32_arm_hash_table (info
)->srofixup
,
11593 sgot
->output_section
->vma
11594 + sgot
->output_offset
+ off
);
11596 else if (outrel
.r_info
!= 0)
11598 outrel
.r_offset
= (sgot
->output_section
->vma
11599 + sgot
->output_offset
11601 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11604 h
->got
.offset
|= 1;
11606 value
= sgot
->output_offset
+ off
;
11612 BFD_ASSERT (local_got_offsets
!= NULL
11613 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11615 off
= local_got_offsets
[r_symndx
];
11617 /* The offset must always be a multiple of 4. We use the
11618 least significant bit to record whether we have already
11619 generated the necessary reloc. */
11620 if ((off
& 1) != 0)
11624 Elf_Internal_Rela outrel
;
11627 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11628 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11629 else if (bfd_link_pic (info
))
11630 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11634 if (globals
->fdpic_p
)
11638 /* The GOT entry is initialized to zero by default.
11639 See if we should install a different value. */
11640 if (globals
->use_rel
|| outrel
.r_info
== 0)
11641 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11644 arm_elf_add_rofixup (output_bfd
,
11646 sgot
->output_section
->vma
11647 + sgot
->output_offset
+ off
);
11649 else if (outrel
.r_info
!= 0)
11651 outrel
.r_addend
= addend
+ dynreloc_value
;
11652 outrel
.r_offset
= (sgot
->output_section
->vma
11653 + sgot
->output_offset
11655 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11658 local_got_offsets
[r_symndx
] |= 1;
11661 value
= sgot
->output_offset
+ off
;
11663 if (r_type
!= R_ARM_GOT32
)
11664 value
+= sgot
->output_section
->vma
;
11666 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11667 contents
, rel
->r_offset
, value
,
11670 case R_ARM_TLS_LDO32
:
11671 value
= value
- dtpoff_base (info
);
11673 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11674 contents
, rel
->r_offset
, value
,
11677 case R_ARM_TLS_LDM32
:
11678 case R_ARM_TLS_LDM32_FDPIC
:
11685 off
= globals
->tls_ldm_got
.offset
;
11687 if ((off
& 1) != 0)
11691 /* If we don't know the module number, create a relocation
11693 if (bfd_link_dll (info
))
11695 Elf_Internal_Rela outrel
;
11697 if (srelgot
== NULL
)
11700 outrel
.r_addend
= 0;
11701 outrel
.r_offset
= (sgot
->output_section
->vma
11702 + sgot
->output_offset
+ off
);
11703 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11705 if (globals
->use_rel
)
11706 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11707 sgot
->contents
+ off
);
11709 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11712 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11714 globals
->tls_ldm_got
.offset
|= 1;
11717 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11719 bfd_put_32 (output_bfd
,
11720 globals
->root
.sgot
->output_offset
+ off
,
11721 contents
+ rel
->r_offset
);
11723 return bfd_reloc_ok
;
11727 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11728 - (input_section
->output_section
->vma
11729 + input_section
->output_offset
+ rel
->r_offset
);
11731 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11732 contents
, rel
->r_offset
, value
,
11737 case R_ARM_TLS_CALL
:
11738 case R_ARM_THM_TLS_CALL
:
11739 case R_ARM_TLS_GD32
:
11740 case R_ARM_TLS_GD32_FDPIC
:
11741 case R_ARM_TLS_IE32
:
11742 case R_ARM_TLS_IE32_FDPIC
:
11743 case R_ARM_TLS_GOTDESC
:
11744 case R_ARM_TLS_DESCSEQ
:
11745 case R_ARM_THM_TLS_DESCSEQ
:
11747 bfd_vma off
, offplt
;
11751 BFD_ASSERT (sgot
!= NULL
);
11756 dyn
= globals
->root
.dynamic_sections_created
;
11757 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11758 bfd_link_pic (info
),
11760 && (!bfd_link_pic (info
)
11761 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11763 *unresolved_reloc_p
= false;
11766 off
= h
->got
.offset
;
11767 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11768 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11772 BFD_ASSERT (local_got_offsets
!= NULL
);
11774 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
11776 _bfd_error_handler (_("\
11777 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11779 (unsigned long) elf32_arm_num_entries (input_bfd
),
11783 off
= local_got_offsets
[r_symndx
];
11784 offplt
= local_tlsdesc_gotents
[r_symndx
];
11785 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11788 /* Linker relaxations happens from one of the
11789 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11790 if (ELF32_R_TYPE (rel
->r_info
) != r_type
)
11791 tls_type
= GOT_TLS_IE
;
11793 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11795 if ((off
& 1) != 0)
11799 bool need_relocs
= false;
11800 Elf_Internal_Rela outrel
;
11803 /* The GOT entries have not been initialized yet. Do it
11804 now, and emit any relocations. If both an IE GOT and a
11805 GD GOT are necessary, we emit the GD first. */
11807 if ((bfd_link_dll (info
) || indx
!= 0)
11809 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11810 && !resolved_to_zero
)
11811 || h
->root
.type
!= bfd_link_hash_undefweak
))
11813 need_relocs
= true;
11814 BFD_ASSERT (srelgot
!= NULL
);
11817 if (tls_type
& GOT_TLS_GDESC
)
11821 /* We should have relaxed, unless this is an undefined
11823 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11824 || bfd_link_dll (info
));
11825 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11826 <= globals
->root
.sgotplt
->size
);
11828 outrel
.r_addend
= 0;
11829 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11830 + globals
->root
.sgotplt
->output_offset
11832 + globals
->sgotplt_jump_table_size
);
11834 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11835 sreloc
= globals
->root
.srelplt
;
11836 loc
= sreloc
->contents
;
11837 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11838 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11839 <= sreloc
->contents
+ sreloc
->size
);
11841 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11843 /* For globals, the first word in the relocation gets
11844 the relocation index and the top bit set, or zero,
11845 if we're binding now. For locals, it gets the
11846 symbol's offset in the tls section. */
11847 bfd_put_32 (output_bfd
,
11848 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11849 : info
->flags
& DF_BIND_NOW
? 0
11850 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11851 globals
->root
.sgotplt
->contents
+ offplt
11852 + globals
->sgotplt_jump_table_size
);
11854 /* Second word in the relocation is always zero. */
11855 bfd_put_32 (output_bfd
, 0,
11856 globals
->root
.sgotplt
->contents
+ offplt
11857 + globals
->sgotplt_jump_table_size
+ 4);
11859 if (tls_type
& GOT_TLS_GD
)
11863 outrel
.r_addend
= 0;
11864 outrel
.r_offset
= (sgot
->output_section
->vma
11865 + sgot
->output_offset
11867 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11869 if (globals
->use_rel
)
11870 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11871 sgot
->contents
+ cur_off
);
11873 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11876 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11877 sgot
->contents
+ cur_off
+ 4);
11880 outrel
.r_addend
= 0;
11881 outrel
.r_info
= ELF32_R_INFO (indx
,
11882 R_ARM_TLS_DTPOFF32
);
11883 outrel
.r_offset
+= 4;
11885 if (globals
->use_rel
)
11886 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11887 sgot
->contents
+ cur_off
+ 4);
11889 elf32_arm_add_dynreloc (output_bfd
, info
,
11895 /* If we are not emitting relocations for a
11896 general dynamic reference, then we must be in a
11897 static link or an executable link with the
11898 symbol binding locally. Mark it as belonging
11899 to module 1, the executable. */
11900 bfd_put_32 (output_bfd
, 1,
11901 sgot
->contents
+ cur_off
);
11902 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11903 sgot
->contents
+ cur_off
+ 4);
11909 if (tls_type
& GOT_TLS_IE
)
11914 outrel
.r_addend
= value
- dtpoff_base (info
);
11916 outrel
.r_addend
= 0;
11917 outrel
.r_offset
= (sgot
->output_section
->vma
11918 + sgot
->output_offset
11920 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11922 if (globals
->use_rel
)
11923 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11924 sgot
->contents
+ cur_off
);
11926 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11929 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11930 sgot
->contents
+ cur_off
);
11935 h
->got
.offset
|= 1;
11937 local_got_offsets
[r_symndx
] |= 1;
11940 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11942 else if (tls_type
& GOT_TLS_GDESC
)
11945 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
11946 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11948 bfd_signed_vma offset
;
11949 /* TLS stubs are arm mode. The original symbol is a
11950 data object, so branch_type is bogus. */
11951 branch_type
= ST_BRANCH_TO_ARM
;
11952 enum elf32_arm_stub_type stub_type
11953 = arm_type_of_stub (info
, input_section
, rel
,
11954 st_type
, &branch_type
,
11955 (struct elf32_arm_link_hash_entry
*)h
,
11956 globals
->tls_trampoline
, globals
->root
.splt
,
11957 input_bfd
, sym_name
);
11959 if (stub_type
!= arm_stub_none
)
11961 struct elf32_arm_stub_hash_entry
*stub_entry
11962 = elf32_arm_get_stub_entry
11963 (input_section
, globals
->root
.splt
, 0, rel
,
11964 globals
, stub_type
);
11965 offset
= (stub_entry
->stub_offset
11966 + stub_entry
->stub_sec
->output_offset
11967 + stub_entry
->stub_sec
->output_section
->vma
);
11970 offset
= (globals
->root
.splt
->output_section
->vma
11971 + globals
->root
.splt
->output_offset
11972 + globals
->tls_trampoline
);
11974 if (ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
)
11976 unsigned long inst
;
11978 offset
-= (input_section
->output_section
->vma
11979 + input_section
->output_offset
11980 + rel
->r_offset
+ 8);
11982 inst
= offset
>> 2;
11983 inst
&= 0x00ffffff;
11984 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11988 /* Thumb blx encodes the offset in a complicated
11990 unsigned upper_insn
, lower_insn
;
11993 offset
-= (input_section
->output_section
->vma
11994 + input_section
->output_offset
11995 + rel
->r_offset
+ 4);
11997 if (stub_type
!= arm_stub_none
11998 && arm_stub_is_thumb (stub_type
))
12000 lower_insn
= 0xd000;
12004 lower_insn
= 0xc000;
12005 /* Round up the offset to a word boundary. */
12006 offset
= (offset
+ 2) & ~2;
12010 upper_insn
= (0xf000
12011 | ((offset
>> 12) & 0x3ff)
12013 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
12014 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
12015 | ((offset
>> 1) & 0x7ff);
12016 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12017 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12018 return bfd_reloc_ok
;
12021 /* These relocations needs special care, as besides the fact
12022 they point somewhere in .gotplt, the addend must be
12023 adjusted accordingly depending on the type of instruction
12025 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
12027 unsigned long data
, insn
;
12030 data
= bfd_get_signed_32 (input_bfd
, hit_data
);
12036 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
12037 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
12038 insn
= (insn
<< 16)
12039 | bfd_get_16 (input_bfd
,
12040 contents
+ rel
->r_offset
- data
+ 2);
12041 if ((insn
& 0xf800c000) == 0xf000c000)
12044 else if ((insn
& 0xffffff00) == 0x4400)
12050 /* xgettext:c-format */
12051 (_("%pB(%pA+%#" PRIx64
"): "
12052 "unexpected %s instruction '%#lx' "
12053 "referenced by TLS_GOTDESC"),
12054 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12056 return bfd_reloc_notsupported
;
12061 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
12063 switch (insn
>> 24)
12065 case 0xeb: /* bl */
12066 case 0xfa: /* blx */
12070 case 0xe0: /* add */
12076 /* xgettext:c-format */
12077 (_("%pB(%pA+%#" PRIx64
"): "
12078 "unexpected %s instruction '%#lx' "
12079 "referenced by TLS_GOTDESC"),
12080 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12082 return bfd_reloc_notsupported
;
12086 value
+= ((globals
->root
.sgotplt
->output_section
->vma
12087 + globals
->root
.sgotplt
->output_offset
+ off
)
12088 - (input_section
->output_section
->vma
12089 + input_section
->output_offset
12091 + globals
->sgotplt_jump_table_size
);
12094 value
= ((globals
->root
.sgot
->output_section
->vma
12095 + globals
->root
.sgot
->output_offset
+ off
)
12096 - (input_section
->output_section
->vma
12097 + input_section
->output_offset
+ rel
->r_offset
));
12099 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12100 r_type
== R_ARM_TLS_IE32_FDPIC
))
12102 /* For FDPIC relocations, resolve to the offset of the GOT
12103 entry from the start of GOT. */
12104 bfd_put_32 (output_bfd
,
12105 globals
->root
.sgot
->output_offset
+ off
,
12106 contents
+ rel
->r_offset
);
12108 return bfd_reloc_ok
;
12112 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12113 contents
, rel
->r_offset
, value
,
12118 case R_ARM_TLS_LE32
:
12119 if (bfd_link_dll (info
))
12122 /* xgettext:c-format */
12123 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12124 "in shared object"),
12125 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12126 return bfd_reloc_notsupported
;
12129 value
= tpoff (info
, value
);
12131 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12132 contents
, rel
->r_offset
, value
,
12136 if (globals
->fix_v4bx
)
12138 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12140 /* Ensure that we have a BX instruction. */
12141 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12143 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12145 /* Branch to veneer. */
12147 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12148 glue_addr
-= input_section
->output_section
->vma
12149 + input_section
->output_offset
12150 + rel
->r_offset
+ 8;
12151 insn
= (insn
& 0xf0000000) | 0x0a000000
12152 | ((glue_addr
>> 2) & 0x00ffffff);
12156 /* Preserve Rm (lowest four bits) and the condition code
12157 (highest four bits). Other bits encode MOV PC,Rm. */
12158 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12161 bfd_put_32 (input_bfd
, insn
, hit_data
);
12163 return bfd_reloc_ok
;
12165 case R_ARM_MOVW_ABS_NC
:
12166 case R_ARM_MOVT_ABS
:
12167 case R_ARM_MOVW_PREL_NC
:
12168 case R_ARM_MOVT_PREL
:
12169 /* Until we properly support segment-base-relative addressing then
12170 we assume the segment base to be zero, as for the group relocations.
12171 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12172 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12173 case R_ARM_MOVW_BREL_NC
:
12174 case R_ARM_MOVW_BREL
:
12175 case R_ARM_MOVT_BREL
:
12177 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12179 if (globals
->use_rel
)
12181 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12182 signed_addend
= (addend
^ 0x8000) - 0x8000;
12185 value
+= signed_addend
;
12187 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12188 value
-= (input_section
->output_section
->vma
12189 + input_section
->output_offset
+ rel
->r_offset
);
12191 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12192 return bfd_reloc_overflow
;
12194 if (branch_type
== ST_BRANCH_TO_THUMB
)
12197 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12198 || r_type
== R_ARM_MOVT_BREL
)
12201 insn
&= 0xfff0f000;
12202 insn
|= value
& 0xfff;
12203 insn
|= (value
& 0xf000) << 4;
12204 bfd_put_32 (input_bfd
, insn
, hit_data
);
12206 return bfd_reloc_ok
;
12208 case R_ARM_THM_MOVW_ABS_NC
:
12209 case R_ARM_THM_MOVT_ABS
:
12210 case R_ARM_THM_MOVW_PREL_NC
:
12211 case R_ARM_THM_MOVT_PREL
:
12212 /* Until we properly support segment-base-relative addressing then
12213 we assume the segment base to be zero, as for the above relocations.
12214 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12215 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12216 as R_ARM_THM_MOVT_ABS. */
12217 case R_ARM_THM_MOVW_BREL_NC
:
12218 case R_ARM_THM_MOVW_BREL
:
12219 case R_ARM_THM_MOVT_BREL
:
12223 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12224 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12226 if (globals
->use_rel
)
12228 addend
= ((insn
>> 4) & 0xf000)
12229 | ((insn
>> 15) & 0x0800)
12230 | ((insn
>> 4) & 0x0700)
12232 signed_addend
= (addend
^ 0x8000) - 0x8000;
12235 value
+= signed_addend
;
12237 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12238 value
-= (input_section
->output_section
->vma
12239 + input_section
->output_offset
+ rel
->r_offset
);
12241 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12242 return bfd_reloc_overflow
;
12244 if (branch_type
== ST_BRANCH_TO_THUMB
)
12247 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12248 || r_type
== R_ARM_THM_MOVT_BREL
)
12251 insn
&= 0xfbf08f00;
12252 insn
|= (value
& 0xf000) << 4;
12253 insn
|= (value
& 0x0800) << 15;
12254 insn
|= (value
& 0x0700) << 4;
12255 insn
|= (value
& 0x00ff);
12257 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12258 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12260 return bfd_reloc_ok
;
12262 case R_ARM_ALU_PC_G0_NC
:
12263 case R_ARM_ALU_PC_G1_NC
:
12264 case R_ARM_ALU_PC_G0
:
12265 case R_ARM_ALU_PC_G1
:
12266 case R_ARM_ALU_PC_G2
:
12267 case R_ARM_ALU_SB_G0_NC
:
12268 case R_ARM_ALU_SB_G1_NC
:
12269 case R_ARM_ALU_SB_G0
:
12270 case R_ARM_ALU_SB_G1
:
12271 case R_ARM_ALU_SB_G2
:
12273 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12274 bfd_vma pc
= input_section
->output_section
->vma
12275 + input_section
->output_offset
+ rel
->r_offset
;
12276 /* sb is the origin of the *segment* containing the symbol. */
12277 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12280 bfd_signed_vma signed_value
;
12283 /* Determine which group of bits to select. */
12286 case R_ARM_ALU_PC_G0_NC
:
12287 case R_ARM_ALU_PC_G0
:
12288 case R_ARM_ALU_SB_G0_NC
:
12289 case R_ARM_ALU_SB_G0
:
12293 case R_ARM_ALU_PC_G1_NC
:
12294 case R_ARM_ALU_PC_G1
:
12295 case R_ARM_ALU_SB_G1_NC
:
12296 case R_ARM_ALU_SB_G1
:
12300 case R_ARM_ALU_PC_G2
:
12301 case R_ARM_ALU_SB_G2
:
12309 /* If REL, extract the addend from the insn. If RELA, it will
12310 have already been fetched for us. */
12311 if (globals
->use_rel
)
12314 bfd_vma constant
= insn
& 0xff;
12315 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12318 signed_addend
= constant
;
12321 /* Compensate for the fact that in the instruction, the
12322 rotation is stored in multiples of 2 bits. */
12325 /* Rotate "constant" right by "rotation" bits. */
12326 signed_addend
= (constant
>> rotation
) |
12327 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12330 /* Determine if the instruction is an ADD or a SUB.
12331 (For REL, this determines the sign of the addend.) */
12332 negative
= identify_add_or_sub (insn
);
12336 /* xgettext:c-format */
12337 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12338 "are allowed for ALU group relocations"),
12339 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12340 return bfd_reloc_overflow
;
12343 signed_addend
*= negative
;
12346 /* Compute the value (X) to go in the place. */
12347 if (r_type
== R_ARM_ALU_PC_G0_NC
12348 || r_type
== R_ARM_ALU_PC_G1_NC
12349 || r_type
== R_ARM_ALU_PC_G0
12350 || r_type
== R_ARM_ALU_PC_G1
12351 || r_type
== R_ARM_ALU_PC_G2
)
12353 signed_value
= value
- pc
+ signed_addend
;
12355 /* Section base relative. */
12356 signed_value
= value
- sb
+ signed_addend
;
12358 /* If the target symbol is a Thumb function, then set the
12359 Thumb bit in the address. */
12360 if (branch_type
== ST_BRANCH_TO_THUMB
)
12363 /* Calculate the value of the relevant G_n, in encoded
12364 constant-with-rotation format. */
12365 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12368 /* Check for overflow if required. */
12369 if ((r_type
== R_ARM_ALU_PC_G0
12370 || r_type
== R_ARM_ALU_PC_G1
12371 || r_type
== R_ARM_ALU_PC_G2
12372 || r_type
== R_ARM_ALU_SB_G0
12373 || r_type
== R_ARM_ALU_SB_G1
12374 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12377 /* xgettext:c-format */
12378 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12379 "splitting %#" PRIx64
" for group relocation %s"),
12380 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12381 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12383 return bfd_reloc_overflow
;
12386 /* Mask out the value and the ADD/SUB part of the opcode; take care
12387 not to destroy the S bit. */
12388 insn
&= 0xff1ff000;
12390 /* Set the opcode according to whether the value to go in the
12391 place is negative. */
12392 if (signed_value
< 0)
12397 /* Encode the offset. */
12400 bfd_put_32 (input_bfd
, insn
, hit_data
);
12402 return bfd_reloc_ok
;
12404 case R_ARM_LDR_PC_G0
:
12405 case R_ARM_LDR_PC_G1
:
12406 case R_ARM_LDR_PC_G2
:
12407 case R_ARM_LDR_SB_G0
:
12408 case R_ARM_LDR_SB_G1
:
12409 case R_ARM_LDR_SB_G2
:
12411 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12412 bfd_vma pc
= input_section
->output_section
->vma
12413 + input_section
->output_offset
+ rel
->r_offset
;
12414 /* sb is the origin of the *segment* containing the symbol. */
12415 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12417 bfd_signed_vma signed_value
;
12420 /* Determine which groups of bits to calculate. */
12423 case R_ARM_LDR_PC_G0
:
12424 case R_ARM_LDR_SB_G0
:
12428 case R_ARM_LDR_PC_G1
:
12429 case R_ARM_LDR_SB_G1
:
12433 case R_ARM_LDR_PC_G2
:
12434 case R_ARM_LDR_SB_G2
:
12442 /* If REL, extract the addend from the insn. If RELA, it will
12443 have already been fetched for us. */
12444 if (globals
->use_rel
)
12446 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12447 signed_addend
= negative
* (insn
& 0xfff);
12450 /* Compute the value (X) to go in the place. */
12451 if (r_type
== R_ARM_LDR_PC_G0
12452 || r_type
== R_ARM_LDR_PC_G1
12453 || r_type
== R_ARM_LDR_PC_G2
)
12455 signed_value
= value
- pc
+ signed_addend
;
12457 /* Section base relative. */
12458 signed_value
= value
- sb
+ signed_addend
;
12460 /* Calculate the value of the relevant G_{n-1} to obtain
12461 the residual at that stage. */
12462 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12463 group
- 1, &residual
);
12465 /* Check for overflow. */
12466 if (residual
>= 0x1000)
12469 /* xgettext:c-format */
12470 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12471 "splitting %#" PRIx64
" for group relocation %s"),
12472 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12473 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12475 return bfd_reloc_overflow
;
12478 /* Mask out the value and U bit. */
12479 insn
&= 0xff7ff000;
12481 /* Set the U bit if the value to go in the place is non-negative. */
12482 if (signed_value
>= 0)
12485 /* Encode the offset. */
12488 bfd_put_32 (input_bfd
, insn
, hit_data
);
12490 return bfd_reloc_ok
;
12492 case R_ARM_LDRS_PC_G0
:
12493 case R_ARM_LDRS_PC_G1
:
12494 case R_ARM_LDRS_PC_G2
:
12495 case R_ARM_LDRS_SB_G0
:
12496 case R_ARM_LDRS_SB_G1
:
12497 case R_ARM_LDRS_SB_G2
:
12499 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12500 bfd_vma pc
= input_section
->output_section
->vma
12501 + input_section
->output_offset
+ rel
->r_offset
;
12502 /* sb is the origin of the *segment* containing the symbol. */
12503 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12505 bfd_signed_vma signed_value
;
12508 /* Determine which groups of bits to calculate. */
12511 case R_ARM_LDRS_PC_G0
:
12512 case R_ARM_LDRS_SB_G0
:
12516 case R_ARM_LDRS_PC_G1
:
12517 case R_ARM_LDRS_SB_G1
:
12521 case R_ARM_LDRS_PC_G2
:
12522 case R_ARM_LDRS_SB_G2
:
12530 /* If REL, extract the addend from the insn. If RELA, it will
12531 have already been fetched for us. */
12532 if (globals
->use_rel
)
12534 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12535 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12538 /* Compute the value (X) to go in the place. */
12539 if (r_type
== R_ARM_LDRS_PC_G0
12540 || r_type
== R_ARM_LDRS_PC_G1
12541 || r_type
== R_ARM_LDRS_PC_G2
)
12543 signed_value
= value
- pc
+ signed_addend
;
12545 /* Section base relative. */
12546 signed_value
= value
- sb
+ signed_addend
;
12548 /* Calculate the value of the relevant G_{n-1} to obtain
12549 the residual at that stage. */
12550 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12551 group
- 1, &residual
);
12553 /* Check for overflow. */
12554 if (residual
>= 0x100)
12557 /* xgettext:c-format */
12558 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12559 "splitting %#" PRIx64
" for group relocation %s"),
12560 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12561 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12563 return bfd_reloc_overflow
;
12566 /* Mask out the value and U bit. */
12567 insn
&= 0xff7ff0f0;
12569 /* Set the U bit if the value to go in the place is non-negative. */
12570 if (signed_value
>= 0)
12573 /* Encode the offset. */
12574 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12576 bfd_put_32 (input_bfd
, insn
, hit_data
);
12578 return bfd_reloc_ok
;
12580 case R_ARM_LDC_PC_G0
:
12581 case R_ARM_LDC_PC_G1
:
12582 case R_ARM_LDC_PC_G2
:
12583 case R_ARM_LDC_SB_G0
:
12584 case R_ARM_LDC_SB_G1
:
12585 case R_ARM_LDC_SB_G2
:
12587 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12588 bfd_vma pc
= input_section
->output_section
->vma
12589 + input_section
->output_offset
+ rel
->r_offset
;
12590 /* sb is the origin of the *segment* containing the symbol. */
12591 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12593 bfd_signed_vma signed_value
;
12596 /* Determine which groups of bits to calculate. */
12599 case R_ARM_LDC_PC_G0
:
12600 case R_ARM_LDC_SB_G0
:
12604 case R_ARM_LDC_PC_G1
:
12605 case R_ARM_LDC_SB_G1
:
12609 case R_ARM_LDC_PC_G2
:
12610 case R_ARM_LDC_SB_G2
:
12618 /* If REL, extract the addend from the insn. If RELA, it will
12619 have already been fetched for us. */
12620 if (globals
->use_rel
)
12622 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12623 signed_addend
= negative
* ((insn
& 0xff) << 2);
12626 /* Compute the value (X) to go in the place. */
12627 if (r_type
== R_ARM_LDC_PC_G0
12628 || r_type
== R_ARM_LDC_PC_G1
12629 || r_type
== R_ARM_LDC_PC_G2
)
12631 signed_value
= value
- pc
+ signed_addend
;
12633 /* Section base relative. */
12634 signed_value
= value
- sb
+ signed_addend
;
12636 /* Calculate the value of the relevant G_{n-1} to obtain
12637 the residual at that stage. */
12638 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12639 group
- 1, &residual
);
12641 /* Check for overflow. (The absolute value to go in the place must be
12642 divisible by four and, after having been divided by four, must
12643 fit in eight bits.) */
12644 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12647 /* xgettext:c-format */
12648 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12649 "splitting %#" PRIx64
" for group relocation %s"),
12650 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12651 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12653 return bfd_reloc_overflow
;
12656 /* Mask out the value and U bit. */
12657 insn
&= 0xff7fff00;
12659 /* Set the U bit if the value to go in the place is non-negative. */
12660 if (signed_value
>= 0)
12663 /* Encode the offset. */
12664 insn
|= residual
>> 2;
12666 bfd_put_32 (input_bfd
, insn
, hit_data
);
12668 return bfd_reloc_ok
;
12670 case R_ARM_THM_ALU_ABS_G0_NC
:
12671 case R_ARM_THM_ALU_ABS_G1_NC
:
12672 case R_ARM_THM_ALU_ABS_G2_NC
:
12673 case R_ARM_THM_ALU_ABS_G3_NC
:
12675 const int shift_array
[4] = {0, 8, 16, 24};
12676 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12677 bfd_vma addr
= value
;
12678 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12680 /* Compute address. */
12681 if (globals
->use_rel
)
12682 signed_addend
= insn
& 0xff;
12683 addr
+= signed_addend
;
12684 if (branch_type
== ST_BRANCH_TO_THUMB
)
12686 /* Clean imm8 insn. */
12688 /* And update with correct part of address. */
12689 insn
|= (addr
>> shift
) & 0xff;
12691 bfd_put_16 (input_bfd
, insn
, hit_data
);
12694 *unresolved_reloc_p
= false;
12695 return bfd_reloc_ok
;
12697 case R_ARM_GOTOFFFUNCDESC
:
12701 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12702 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12704 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12706 * error_message
= _("local symbol index too big");
12707 return bfd_reloc_dangerous
;
12710 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12711 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12714 if (bfd_link_pic (info
) && dynindx
== 0)
12716 * error_message
= _("no dynamic index information available");
12717 return bfd_reloc_dangerous
;
12720 /* Resolve relocation. */
12721 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
)
12722 , contents
+ rel
->r_offset
);
12723 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12725 arm_elf_fill_funcdesc (output_bfd
, info
,
12726 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12727 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12732 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12736 /* For static binaries, sym_sec can be null. */
12739 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12740 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12748 if (bfd_link_pic (info
) && dynindx
== 0)
12750 * error_message
= _("no dynamic index information available");
12751 return bfd_reloc_dangerous
;
12754 /* This case cannot occur since funcdesc is allocated by
12755 the dynamic loader so we cannot resolve the relocation. */
12756 if (h
->dynindx
!= -1)
12758 * error_message
= _("invalid dynamic index");
12759 return bfd_reloc_dangerous
;
12762 /* Resolve relocation. */
12763 bfd_put_32 (output_bfd
, (offset
+ sgot
->output_offset
),
12764 contents
+ rel
->r_offset
);
12765 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12766 arm_elf_fill_funcdesc (output_bfd
, info
,
12767 &eh
->fdpic_cnts
.funcdesc_offset
,
12768 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12771 *unresolved_reloc_p
= false;
12772 return bfd_reloc_ok
;
12774 case R_ARM_GOTFUNCDESC
:
12778 Elf_Internal_Rela outrel
;
12780 /* Resolve relocation. */
12781 bfd_put_32 (output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12782 + sgot
->output_offset
),
12783 contents
+ rel
->r_offset
);
12784 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12785 if (h
->dynindx
== -1)
12788 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12792 /* For static binaries sym_sec can be null. */
12795 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12796 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12804 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12805 arm_elf_fill_funcdesc (output_bfd
, info
,
12806 &eh
->fdpic_cnts
.funcdesc_offset
,
12807 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12810 /* Add a dynamic relocation on GOT entry if not already done. */
12811 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12813 if (h
->dynindx
== -1)
12815 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12816 if (h
->root
.type
== bfd_link_hash_undefweak
)
12817 bfd_put_32 (output_bfd
, 0, sgot
->contents
12818 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12820 bfd_put_32 (output_bfd
, sgot
->output_section
->vma
12821 + sgot
->output_offset
12822 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12824 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12828 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12830 outrel
.r_offset
= sgot
->output_section
->vma
12831 + sgot
->output_offset
12832 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12833 outrel
.r_addend
= 0;
12834 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
12835 if (h
->root
.type
== bfd_link_hash_undefweak
)
12836 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, -1);
12838 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
,
12841 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12842 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12847 /* Such relocation on static function should not have been
12848 emitted by the compiler. */
12849 return bfd_reloc_notsupported
;
12852 *unresolved_reloc_p
= false;
12853 return bfd_reloc_ok
;
12855 case R_ARM_FUNCDESC
:
12859 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (input_bfd
);
12860 Elf_Internal_Rela outrel
;
12861 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12863 if (r_symndx
>= elf32_arm_num_entries (input_bfd
))
12865 * error_message
= _("local symbol index too big");
12866 return bfd_reloc_dangerous
;
12869 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12870 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12873 if (bfd_link_pic (info
) && dynindx
== 0)
12875 * error_message
= _("dynamic index information not available");
12876 return bfd_reloc_dangerous
;
12879 /* Replace static FUNCDESC relocation with a
12880 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12882 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12883 outrel
.r_offset
= input_section
->output_section
->vma
12884 + input_section
->output_offset
+ rel
->r_offset
;
12885 outrel
.r_addend
= 0;
12886 if (bfd_link_pic (info
))
12887 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12889 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12891 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12892 + sgot
->output_offset
+ offset
, hit_data
);
12894 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12895 arm_elf_fill_funcdesc (output_bfd
, info
,
12896 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12897 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12901 if (h
->dynindx
== -1)
12904 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12907 Elf_Internal_Rela outrel
;
12909 /* For static binaries sym_sec can be null. */
12912 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12913 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12921 if (bfd_link_pic (info
) && dynindx
== 0)
12924 /* Replace static FUNCDESC relocation with a
12925 R_ARM_RELATIVE dynamic relocation. */
12926 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12927 outrel
.r_offset
= input_section
->output_section
->vma
12928 + input_section
->output_offset
+ rel
->r_offset
;
12929 outrel
.r_addend
= 0;
12930 if (bfd_link_pic (info
))
12931 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12933 arm_elf_add_rofixup (output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12935 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12936 + sgot
->output_offset
+ offset
, hit_data
);
12938 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12939 arm_elf_fill_funcdesc (output_bfd
, info
,
12940 &eh
->fdpic_cnts
.funcdesc_offset
,
12941 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12945 Elf_Internal_Rela outrel
;
12947 /* Add a dynamic relocation. */
12948 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12949 outrel
.r_offset
= input_section
->output_section
->vma
12950 + input_section
->output_offset
+ rel
->r_offset
;
12951 outrel
.r_addend
= 0;
12952 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12956 *unresolved_reloc_p
= false;
12957 return bfd_reloc_ok
;
12959 case R_ARM_THM_BF16
:
12961 bfd_vma relocation
;
12962 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12963 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12965 if (globals
->use_rel
)
12967 bfd_vma immA
= (upper_insn
& 0x001f);
12968 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12969 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12970 addend
= (immA
<< 12);
12971 addend
|= (immB
<< 2);
12972 addend
|= (immC
<< 1);
12975 signed_addend
= (addend
& 0x10000) ? addend
- (1 << 17) : addend
;
12978 relocation
= value
+ signed_addend
;
12979 relocation
-= (input_section
->output_section
->vma
12980 + input_section
->output_offset
12983 /* Put RELOCATION back into the insn. */
12985 bfd_vma immA
= (relocation
& 0x0001f000) >> 12;
12986 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12987 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12989 upper_insn
= (upper_insn
& 0xffe0) | immA
;
12990 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12993 /* Put the relocated value back in the object file: */
12994 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12995 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12997 return bfd_reloc_ok
;
13000 case R_ARM_THM_BF12
:
13002 bfd_vma relocation
;
13003 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
13004 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
13006 if (globals
->use_rel
)
13008 bfd_vma immA
= (upper_insn
& 0x0001);
13009 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
13010 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
13011 addend
= (immA
<< 12);
13012 addend
|= (immB
<< 2);
13013 addend
|= (immC
<< 1);
13016 addend
= (addend
& 0x1000) ? addend
- (1 << 13) : addend
;
13017 signed_addend
= addend
;
13020 relocation
= value
+ signed_addend
;
13021 relocation
-= (input_section
->output_section
->vma
13022 + input_section
->output_offset
13025 /* Put RELOCATION back into the insn. */
13027 bfd_vma immA
= (relocation
& 0x00001000) >> 12;
13028 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13029 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13031 upper_insn
= (upper_insn
& 0xfffe) | immA
;
13032 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13035 /* Put the relocated value back in the object file: */
13036 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13037 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13039 return bfd_reloc_ok
;
13042 case R_ARM_THM_BF18
:
13044 bfd_vma relocation
;
13045 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
13046 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
13048 if (globals
->use_rel
)
13050 bfd_vma immA
= (upper_insn
& 0x007f);
13051 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
13052 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
13053 addend
= (immA
<< 12);
13054 addend
|= (immB
<< 2);
13055 addend
|= (immC
<< 1);
13058 addend
= (addend
& 0x40000) ? addend
- (1 << 19) : addend
;
13059 signed_addend
= addend
;
13062 relocation
= value
+ signed_addend
;
13063 relocation
-= (input_section
->output_section
->vma
13064 + input_section
->output_offset
13067 /* Put RELOCATION back into the insn. */
13069 bfd_vma immA
= (relocation
& 0x0007f000) >> 12;
13070 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13071 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13073 upper_insn
= (upper_insn
& 0xff80) | immA
;
13074 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13077 /* Put the relocated value back in the object file: */
13078 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13079 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13081 return bfd_reloc_ok
;
13085 return bfd_reloc_notsupported
;
13089 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13091 arm_add_to_rel (bfd
* abfd
,
13092 bfd_byte
* address
,
13093 reloc_howto_type
* howto
,
13094 bfd_signed_vma increment
)
13096 bfd_signed_vma addend
;
13098 if (howto
->type
== R_ARM_THM_CALL
13099 || howto
->type
== R_ARM_THM_JUMP24
)
13101 int upper_insn
, lower_insn
;
13104 upper_insn
= bfd_get_16 (abfd
, address
);
13105 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
13106 upper
= upper_insn
& 0x7ff;
13107 lower
= lower_insn
& 0x7ff;
13109 addend
= (upper
<< 12) | (lower
<< 1);
13110 addend
+= increment
;
13113 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
13114 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
13116 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
13117 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
13123 contents
= bfd_get_32 (abfd
, address
);
13125 /* Get the (signed) value from the instruction. */
13126 addend
= contents
& howto
->src_mask
;
13127 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13129 bfd_signed_vma mask
;
13132 mask
&= ~ howto
->src_mask
;
13136 /* Add in the increment, (which is a byte value). */
13137 switch (howto
->type
)
13140 addend
+= increment
;
13147 addend
*= bfd_get_reloc_size (howto
);
13148 addend
+= increment
;
13150 /* Should we check for overflow here ? */
13152 /* Drop any undesired bits. */
13153 addend
>>= howto
->rightshift
;
13157 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
13159 bfd_put_32 (abfd
, contents
, address
);
13163 #define IS_ARM_TLS_RELOC(R_TYPE) \
13164 ((R_TYPE) == R_ARM_TLS_GD32 \
13165 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13166 || (R_TYPE) == R_ARM_TLS_LDO32 \
13167 || (R_TYPE) == R_ARM_TLS_LDM32 \
13168 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13169 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13170 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13171 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13172 || (R_TYPE) == R_ARM_TLS_LE32 \
13173 || (R_TYPE) == R_ARM_TLS_IE32 \
13174 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13175 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13177 /* Specific set of relocations for the gnu tls dialect. */
13178 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13179 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13180 || (R_TYPE) == R_ARM_TLS_CALL \
13181 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13182 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13183 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13185 /* Relocate an ARM ELF section. */
13188 elf32_arm_relocate_section (bfd
* output_bfd
,
13189 struct bfd_link_info
* info
,
13191 asection
* input_section
,
13192 bfd_byte
* contents
,
13193 Elf_Internal_Rela
* relocs
,
13194 Elf_Internal_Sym
* local_syms
,
13195 asection
** local_sections
)
13197 Elf_Internal_Shdr
*symtab_hdr
;
13198 struct elf_link_hash_entry
**sym_hashes
;
13199 Elf_Internal_Rela
*rel
;
13200 Elf_Internal_Rela
*relend
;
13202 struct elf32_arm_link_hash_table
* globals
;
13204 globals
= elf32_arm_hash_table (info
);
13205 if (globals
== NULL
)
13208 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
13209 sym_hashes
= elf_sym_hashes (input_bfd
);
13212 relend
= relocs
+ input_section
->reloc_count
;
13213 for (; rel
< relend
; rel
++)
13216 reloc_howto_type
* howto
;
13217 unsigned long r_symndx
;
13218 Elf_Internal_Sym
* sym
;
13220 struct elf_link_hash_entry
* h
;
13221 bfd_vma relocation
;
13222 bfd_reloc_status_type r
;
13225 bool unresolved_reloc
= false;
13226 char *error_message
= NULL
;
13228 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13229 r_type
= ELF32_R_TYPE (rel
->r_info
);
13230 r_type
= arm_real_reloc_type (globals
, r_type
);
13232 if ( r_type
== R_ARM_GNU_VTENTRY
13233 || r_type
== R_ARM_GNU_VTINHERIT
)
13236 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
13239 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13245 if (r_symndx
< symtab_hdr
->sh_info
)
13247 sym
= local_syms
+ r_symndx
;
13248 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13249 sec
= local_sections
[r_symndx
];
13251 /* An object file might have a reference to a local
13252 undefined symbol. This is a daft object file, but we
13253 should at least do something about it. V4BX & NONE
13254 relocations do not use the symbol and are explicitly
13255 allowed to use the undefined symbol, so allow those.
13256 Likewise for relocations against STN_UNDEF. */
13257 if (r_type
!= R_ARM_V4BX
13258 && r_type
!= R_ARM_NONE
13259 && r_symndx
!= STN_UNDEF
13260 && bfd_is_und_section (sec
)
13261 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13262 (*info
->callbacks
->undefined_symbol
)
13263 (info
, bfd_elf_string_from_elf_section
13264 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13265 input_bfd
, input_section
,
13266 rel
->r_offset
, true);
13268 if (globals
->use_rel
)
13270 relocation
= (sec
->output_section
->vma
13271 + sec
->output_offset
13273 if (!bfd_link_relocatable (info
)
13274 && (sec
->flags
& SEC_MERGE
)
13275 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13278 bfd_vma addend
, value
;
13282 case R_ARM_MOVW_ABS_NC
:
13283 case R_ARM_MOVT_ABS
:
13284 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13285 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13286 addend
= (addend
^ 0x8000) - 0x8000;
13289 case R_ARM_THM_MOVW_ABS_NC
:
13290 case R_ARM_THM_MOVT_ABS
:
13291 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13293 value
|= bfd_get_16 (input_bfd
,
13294 contents
+ rel
->r_offset
+ 2);
13295 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13296 | ((value
& 0x04000000) >> 15);
13297 addend
= (addend
^ 0x8000) - 0x8000;
13301 if (howto
->rightshift
13302 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13305 /* xgettext:c-format */
13306 (_("%pB(%pA+%#" PRIx64
"): "
13307 "%s relocation against SEC_MERGE section"),
13308 input_bfd
, input_section
,
13309 (uint64_t) rel
->r_offset
, howto
->name
);
13313 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13315 /* Get the (signed) value from the instruction. */
13316 addend
= value
& howto
->src_mask
;
13317 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13319 bfd_signed_vma mask
;
13322 mask
&= ~ howto
->src_mask
;
13330 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13332 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13334 /* Cases here must match those in the preceding
13335 switch statement. */
13338 case R_ARM_MOVW_ABS_NC
:
13339 case R_ARM_MOVT_ABS
:
13340 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13341 | (addend
& 0xfff);
13342 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13345 case R_ARM_THM_MOVW_ABS_NC
:
13346 case R_ARM_THM_MOVT_ABS
:
13347 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13348 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13349 bfd_put_16 (input_bfd
, value
>> 16,
13350 contents
+ rel
->r_offset
);
13351 bfd_put_16 (input_bfd
, value
,
13352 contents
+ rel
->r_offset
+ 2);
13356 value
= (value
& ~ howto
->dst_mask
)
13357 | (addend
& howto
->dst_mask
);
13358 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13364 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13368 bool warned
, ignored
;
13370 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13371 r_symndx
, symtab_hdr
, sym_hashes
,
13372 h
, sec
, relocation
,
13373 unresolved_reloc
, warned
, ignored
);
13375 sym_type
= h
->type
;
13378 if (sec
!= NULL
&& discarded_section (sec
))
13379 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13380 rel
, 1, relend
, howto
, 0, contents
);
13382 if (bfd_link_relocatable (info
))
13384 /* This is a relocatable link. We don't have to change
13385 anything, unless the reloc is against a section symbol,
13386 in which case we have to adjust according to where the
13387 section symbol winds up in the output section. */
13388 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13390 if (globals
->use_rel
)
13391 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13392 howto
, (bfd_signed_vma
) sec
->output_offset
);
13394 rel
->r_addend
+= sec
->output_offset
;
13400 name
= h
->root
.root
.string
;
13403 name
= (bfd_elf_string_from_elf_section
13404 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13405 if (name
== NULL
|| *name
== '\0')
13406 name
= bfd_section_name (sec
);
13409 if (r_symndx
!= STN_UNDEF
13410 && r_type
!= R_ARM_NONE
13412 || h
->root
.type
== bfd_link_hash_defined
13413 || h
->root
.type
== bfd_link_hash_defweak
)
13414 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13417 ((sym_type
== STT_TLS
13418 /* xgettext:c-format */
13419 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13420 /* xgettext:c-format */
13421 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13424 (uint64_t) rel
->r_offset
,
13429 /* We call elf32_arm_final_link_relocate unless we're completely
13430 done, i.e., the relaxation produced the final output we want,
13431 and we won't let anybody mess with it. Also, we have to do
13432 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13433 both in relaxed and non-relaxed cases. */
13434 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13435 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13436 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13437 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13440 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13441 contents
, rel
, h
== NULL
);
13442 /* This may have been marked unresolved because it came from
13443 a shared library. But we've just dealt with that. */
13444 unresolved_reloc
= 0;
13447 r
= bfd_reloc_continue
;
13449 if (r
== bfd_reloc_continue
)
13451 unsigned char branch_type
=
13452 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13453 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13455 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13456 input_section
, contents
, rel
,
13457 relocation
, info
, sec
, name
,
13458 sym_type
, branch_type
, h
,
13463 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13464 because such sections are not SEC_ALLOC and thus ld.so will
13465 not process them. */
13466 if (unresolved_reloc
13467 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13469 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13470 rel
->r_offset
) != (bfd_vma
) -1)
13473 /* xgettext:c-format */
13474 (_("%pB(%pA+%#" PRIx64
"): "
13475 "unresolvable %s relocation against symbol `%s'"),
13478 (uint64_t) rel
->r_offset
,
13480 h
->root
.root
.string
);
13484 if (r
!= bfd_reloc_ok
)
13488 case bfd_reloc_overflow
:
13489 /* If the overflowing reloc was to an undefined symbol,
13490 we have already printed one error message and there
13491 is no point complaining again. */
13492 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13493 (*info
->callbacks
->reloc_overflow
)
13494 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13495 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13498 case bfd_reloc_undefined
:
13499 (*info
->callbacks
->undefined_symbol
)
13500 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, true);
13503 case bfd_reloc_outofrange
:
13504 error_message
= _("out of range");
13507 case bfd_reloc_notsupported
:
13508 error_message
= _("unsupported relocation");
13511 case bfd_reloc_dangerous
:
13512 /* error_message should already be set. */
13516 error_message
= _("unknown error");
13517 /* Fall through. */
13520 BFD_ASSERT (error_message
!= NULL
);
13521 (*info
->callbacks
->reloc_dangerous
)
13522 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13531 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13532 adds the edit to the start of the list. (The list must be built in order of
13533 ascending TINDEX: the function's callers are primarily responsible for
13534 maintaining that condition). */
13537 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13538 arm_unwind_table_edit
**tail
,
13539 arm_unwind_edit_type type
,
13540 asection
*linked_section
,
13541 unsigned int tindex
)
13543 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13544 xmalloc (sizeof (arm_unwind_table_edit
));
13546 new_edit
->type
= type
;
13547 new_edit
->linked_section
= linked_section
;
13548 new_edit
->index
= tindex
;
13552 new_edit
->next
= NULL
;
13555 (*tail
)->next
= new_edit
;
13557 (*tail
) = new_edit
;
13560 (*head
) = new_edit
;
13564 new_edit
->next
= *head
;
13573 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13575 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13578 adjust_exidx_size (asection
*exidx_sec
, int adjust
)
13582 if (!exidx_sec
->rawsize
)
13583 exidx_sec
->rawsize
= exidx_sec
->size
;
13585 bfd_set_section_size (exidx_sec
, exidx_sec
->size
+ adjust
);
13586 out_sec
= exidx_sec
->output_section
;
13587 /* Adjust size of output section. */
13588 bfd_set_section_size (out_sec
, out_sec
->size
+ adjust
);
13591 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13594 insert_cantunwind_after (asection
*text_sec
, asection
*exidx_sec
)
13596 struct _arm_elf_section_data
*exidx_arm_data
;
13598 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13599 add_unwind_table_edit
13600 (&exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13601 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13602 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13604 exidx_arm_data
->additional_reloc_count
++;
13606 adjust_exidx_size (exidx_sec
, 8);
13609 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13610 made to those tables, such that:
13612 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13613 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13614 codes which have been inlined into the index).
13616 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13618 The edits are applied when the tables are written
13619 (in elf32_arm_write_section). */
13622 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13623 unsigned int num_text_sections
,
13624 struct bfd_link_info
*info
,
13625 bool merge_exidx_entries
)
13628 unsigned int last_second_word
= 0, i
;
13629 asection
*last_exidx_sec
= NULL
;
13630 asection
*last_text_sec
= NULL
;
13631 int last_unwind_type
= -1;
13633 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13635 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13639 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13641 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13642 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13644 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13647 if (elf_sec
->linked_to
)
13649 Elf_Internal_Shdr
*linked_hdr
13650 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13651 struct _arm_elf_section_data
*linked_sec_arm_data
13652 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13654 if (linked_sec_arm_data
== NULL
)
13657 /* Link this .ARM.exidx section back from the text section it
13659 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13664 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13665 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13666 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13668 for (i
= 0; i
< num_text_sections
; i
++)
13670 asection
*sec
= text_section_order
[i
];
13671 asection
*exidx_sec
;
13672 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13673 struct _arm_elf_section_data
*exidx_arm_data
;
13674 bfd_byte
*contents
= NULL
;
13675 int deleted_exidx_bytes
= 0;
13677 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13678 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13679 Elf_Internal_Shdr
*hdr
;
13682 if (arm_data
== NULL
)
13685 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13686 if (exidx_sec
== NULL
)
13688 /* Section has no unwind data. */
13689 if (last_unwind_type
== 0 || !last_exidx_sec
)
13692 /* Ignore zero sized sections. */
13693 if (sec
->size
== 0)
13696 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13697 last_unwind_type
= 0;
13701 /* Skip /DISCARD/ sections. */
13702 if (bfd_is_abs_section (exidx_sec
->output_section
))
13705 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13706 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13709 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13710 if (exidx_arm_data
== NULL
)
13713 ibfd
= exidx_sec
->owner
;
13715 if (hdr
->contents
!= NULL
)
13716 contents
= hdr
->contents
;
13717 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13721 if (last_unwind_type
> 0)
13723 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13724 /* Add cantunwind if first unwind item does not match section
13726 if (first_word
!= sec
->vma
)
13728 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13729 last_unwind_type
= 0;
13733 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13735 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13739 /* An EXIDX_CANTUNWIND entry. */
13740 if (second_word
== 1)
13742 if (last_unwind_type
== 0)
13746 /* Inlined unwinding data. Merge if equal to previous. */
13747 else if ((second_word
& 0x80000000) != 0)
13749 if (merge_exidx_entries
13750 && last_second_word
== second_word
&& last_unwind_type
== 1)
13753 last_second_word
= second_word
;
13755 /* Normal table entry. In theory we could merge these too,
13756 but duplicate entries are likely to be much less common. */
13760 if (elide
&& !bfd_link_relocatable (info
))
13762 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13763 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13765 deleted_exidx_bytes
+= 8;
13768 last_unwind_type
= unwind_type
;
13771 /* Free contents if we allocated it ourselves. */
13772 if (contents
!= hdr
->contents
)
13775 /* Record edits to be applied later (in elf32_arm_write_section). */
13776 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13777 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13779 if (deleted_exidx_bytes
> 0)
13780 adjust_exidx_size (exidx_sec
, - deleted_exidx_bytes
);
13782 last_exidx_sec
= exidx_sec
;
13783 last_text_sec
= sec
;
13786 /* Add terminating CANTUNWIND entry. */
13787 if (!bfd_link_relocatable (info
) && last_exidx_sec
13788 && last_unwind_type
!= 0)
13789 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13795 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13796 bfd
*ibfd
, const char *name
)
13798 asection
*sec
, *osec
;
13800 sec
= bfd_get_linker_section (ibfd
, name
);
13801 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13804 osec
= sec
->output_section
;
13805 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13808 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13809 sec
->output_offset
, sec
->size
))
13816 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13818 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13819 asection
*sec
, *osec
;
13821 if (globals
== NULL
)
13824 /* Invoke the regular ELF backend linker to do all the work. */
13825 if (!bfd_elf_final_link (abfd
, info
))
13828 /* Process stub sections (eg BE8 encoding, ...). */
13829 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13831 for (i
=0; i
<htab
->top_id
; i
++)
13833 sec
= htab
->stub_group
[i
].stub_sec
;
13834 /* Only process it once, in its link_sec slot. */
13835 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13837 osec
= sec
->output_section
;
13838 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13839 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13840 sec
->output_offset
, sec
->size
))
13845 /* Write out any glue sections now that we have created all the
13847 if (globals
->bfd_of_glue_owner
!= NULL
)
13849 if (! elf32_arm_output_glue_section (info
, abfd
,
13850 globals
->bfd_of_glue_owner
,
13851 ARM2THUMB_GLUE_SECTION_NAME
))
13854 if (! elf32_arm_output_glue_section (info
, abfd
,
13855 globals
->bfd_of_glue_owner
,
13856 THUMB2ARM_GLUE_SECTION_NAME
))
13859 if (! elf32_arm_output_glue_section (info
, abfd
,
13860 globals
->bfd_of_glue_owner
,
13861 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13864 if (! elf32_arm_output_glue_section (info
, abfd
,
13865 globals
->bfd_of_glue_owner
,
13866 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13869 if (! elf32_arm_output_glue_section (info
, abfd
,
13870 globals
->bfd_of_glue_owner
,
13871 ARM_BX_GLUE_SECTION_NAME
))
13878 /* Return a best guess for the machine number based on the attributes. */
13880 static unsigned int
13881 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13883 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13887 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13888 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13889 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13890 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13892 case TAG_CPU_ARCH_V5TE
:
13896 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13897 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13901 if (strcmp (name
, "IWMMXT2") == 0)
13902 return bfd_mach_arm_iWMMXt2
;
13904 if (strcmp (name
, "IWMMXT") == 0)
13905 return bfd_mach_arm_iWMMXt
;
13907 if (strcmp (name
, "XSCALE") == 0)
13911 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13912 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13915 case 1: return bfd_mach_arm_iWMMXt
;
13916 case 2: return bfd_mach_arm_iWMMXt2
;
13917 default: return bfd_mach_arm_XScale
;
13922 return bfd_mach_arm_5TE
;
13925 case TAG_CPU_ARCH_V5TEJ
:
13926 return bfd_mach_arm_5TEJ
;
13927 case TAG_CPU_ARCH_V6
:
13928 return bfd_mach_arm_6
;
13929 case TAG_CPU_ARCH_V6KZ
:
13930 return bfd_mach_arm_6KZ
;
13931 case TAG_CPU_ARCH_V6T2
:
13932 return bfd_mach_arm_6T2
;
13933 case TAG_CPU_ARCH_V6K
:
13934 return bfd_mach_arm_6K
;
13935 case TAG_CPU_ARCH_V7
:
13936 return bfd_mach_arm_7
;
13937 case TAG_CPU_ARCH_V6_M
:
13938 return bfd_mach_arm_6M
;
13939 case TAG_CPU_ARCH_V6S_M
:
13940 return bfd_mach_arm_6SM
;
13941 case TAG_CPU_ARCH_V7E_M
:
13942 return bfd_mach_arm_7EM
;
13943 case TAG_CPU_ARCH_V8
:
13944 return bfd_mach_arm_8
;
13945 case TAG_CPU_ARCH_V8R
:
13946 return bfd_mach_arm_8R
;
13947 case TAG_CPU_ARCH_V8M_BASE
:
13948 return bfd_mach_arm_8M_BASE
;
13949 case TAG_CPU_ARCH_V8M_MAIN
:
13950 return bfd_mach_arm_8M_MAIN
;
13951 case TAG_CPU_ARCH_V8_1M_MAIN
:
13952 return bfd_mach_arm_8_1M_MAIN
;
13953 case TAG_CPU_ARCH_V9
:
13954 return bfd_mach_arm_9
;
13957 /* Force entry to be added for any new known Tag_CPU_arch value. */
13958 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13960 /* Unknown Tag_CPU_arch value. */
13961 return bfd_mach_arm_unknown
;
13965 /* Set the right machine number. */
13968 elf32_arm_object_p (bfd
*abfd
)
13972 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13974 if (mach
== bfd_mach_arm_unknown
)
13975 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13977 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13981 /* Function to keep ARM specific flags in the ELF header. */
13984 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13986 if (elf_flags_init (abfd
)
13987 && elf_elfheader (abfd
)->e_flags
!= flags
)
13989 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13991 if (flags
& EF_ARM_INTERWORK
)
13993 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13997 (_("warning: clearing the interworking flag of %pB due to outside request"),
14003 elf_elfheader (abfd
)->e_flags
= flags
;
14004 elf_flags_init (abfd
) = true;
14010 /* Copy backend specific data from one object module to another. */
14013 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
14016 flagword out_flags
;
14018 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
14021 in_flags
= elf_elfheader (ibfd
)->e_flags
;
14022 out_flags
= elf_elfheader (obfd
)->e_flags
;
14024 if (elf_flags_init (obfd
)
14025 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
14026 && in_flags
!= out_flags
)
14028 /* Cannot mix APCS26 and APCS32 code. */
14029 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
14032 /* Cannot mix float APCS and non-float APCS code. */
14033 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
14036 /* If the src and dest have different interworking flags
14037 then turn off the interworking bit. */
14038 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
14040 if (out_flags
& EF_ARM_INTERWORK
)
14042 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14045 in_flags
&= ~EF_ARM_INTERWORK
;
14048 /* Likewise for PIC, though don't warn for this case. */
14049 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
14050 in_flags
&= ~EF_ARM_PIC
;
14053 elf_elfheader (obfd
)->e_flags
= in_flags
;
14054 elf_flags_init (obfd
) = true;
14056 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
14059 /* Values for Tag_ABI_PCS_R9_use. */
14068 /* Values for Tag_ABI_PCS_RW_data. */
14071 AEABI_PCS_RW_data_absolute
,
14072 AEABI_PCS_RW_data_PCrel
,
14073 AEABI_PCS_RW_data_SBrel
,
14074 AEABI_PCS_RW_data_unused
14077 /* Values for Tag_ABI_enum_size. */
14083 AEABI_enum_forced_wide
14086 /* Determine whether an object attribute tag takes an integer, a
14090 elf32_arm_obj_attrs_arg_type (int tag
)
14092 if (tag
== Tag_compatibility
)
14093 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
14094 else if (tag
== Tag_nodefaults
)
14095 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
14096 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
14097 return ATTR_TYPE_FLAG_STR_VAL
;
14099 return ATTR_TYPE_FLAG_INT_VAL
;
14101 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
14104 /* The ABI defines that Tag_conformance should be emitted first, and that
14105 Tag_nodefaults should be second (if either is defined). This sets those
14106 two positions, and bumps up the position of all the remaining tags to
14109 elf32_arm_obj_attrs_order (int num
)
14111 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
14112 return Tag_conformance
;
14113 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
14114 return Tag_nodefaults
;
14115 if ((num
- 2) < Tag_nodefaults
)
14117 if ((num
- 1) < Tag_conformance
)
14122 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14124 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
14126 if ((tag
& 127) < 64)
14129 (_("%pB: unknown mandatory EABI object attribute %d"),
14131 bfd_set_error (bfd_error_bad_value
);
14137 (_("warning: %pB: unknown EABI object attribute %d"),
14143 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14144 Returns -1 if no architecture could be read. */
14147 get_secondary_compatible_arch (bfd
*abfd
)
14149 obj_attribute
*attr
=
14150 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14152 /* Note: the tag and its argument below are uleb128 values, though
14153 currently-defined values fit in one byte for each. */
14155 && attr
->s
[0] == Tag_CPU_arch
14156 && (attr
->s
[1] & 128) != 128
14157 && attr
->s
[2] == 0)
14160 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14164 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14165 The tag is removed if ARCH is -1. */
14168 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
14170 obj_attribute
*attr
=
14171 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14179 /* Note: the tag and its argument below are uleb128 values, though
14180 currently-defined values fit in one byte for each. */
14182 attr
->s
= (char *) bfd_alloc (abfd
, 3);
14183 attr
->s
[0] = Tag_CPU_arch
;
14188 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14192 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
14193 int newtag
, int secondary_compat
, const char* name_table
[])
14195 #define T(X) TAG_CPU_ARCH_##X
14196 int tagl
, tagh
, result
;
14199 T(V6T2
), /* PRE_V4. */
14201 T(V6T2
), /* V4T. */
14202 T(V6T2
), /* V5T. */
14203 T(V6T2
), /* V5TE. */
14204 T(V6T2
), /* V5TEJ. */
14207 T(V6T2
) /* V6T2. */
14211 T(V6K
), /* PRE_V4. */
14215 T(V6K
), /* V5TE. */
14216 T(V6K
), /* V5TEJ. */
14218 T(V6KZ
), /* V6KZ. */
14224 T(V7
), /* PRE_V4. */
14229 T(V7
), /* V5TEJ. */
14242 T(V6K
), /* V5TE. */
14243 T(V6K
), /* V5TEJ. */
14245 T(V6KZ
), /* V6KZ. */
14249 T(V6_M
) /* V6_M. */
14251 const int v6s_m
[] =
14257 T(V6K
), /* V5TE. */
14258 T(V6K
), /* V5TEJ. */
14260 T(V6KZ
), /* V6KZ. */
14264 T(V6S_M
), /* V6_M. */
14265 T(V6S_M
) /* V6S_M. */
14267 const int v7e_m
[] =
14271 T(V7E_M
), /* V4T. */
14272 T(V7E_M
), /* V5T. */
14273 T(V7E_M
), /* V5TE. */
14274 T(V7E_M
), /* V5TEJ. */
14275 T(V7E_M
), /* V6. */
14276 T(V7E_M
), /* V6KZ. */
14277 T(V7E_M
), /* V6T2. */
14278 T(V7E_M
), /* V6K. */
14279 T(V7E_M
), /* V7. */
14280 T(V7E_M
), /* V6_M. */
14281 T(V7E_M
), /* V6S_M. */
14282 T(V7E_M
) /* V7E_M. */
14286 T(V8
), /* PRE_V4. */
14291 T(V8
), /* V5TEJ. */
14298 T(V8
), /* V6S_M. */
14299 T(V8
), /* V7E_M. */
14302 T(V8
), /* V8-M.BASE. */
14303 T(V8
), /* V8-M.MAIN. */
14307 T(V8
), /* V8.1-M.MAIN. */
14311 T(V8R
), /* PRE_V4. */
14315 T(V8R
), /* V5TE. */
14316 T(V8R
), /* V5TEJ. */
14318 T(V8R
), /* V6KZ. */
14319 T(V8R
), /* V6T2. */
14322 T(V8R
), /* V6_M. */
14323 T(V8R
), /* V6S_M. */
14324 T(V8R
), /* V7E_M. */
14328 const int v8m_baseline
[] =
14341 T(V8M_BASE
), /* V6_M. */
14342 T(V8M_BASE
), /* V6S_M. */
14346 T(V8M_BASE
) /* V8-M BASELINE. */
14348 const int v8m_mainline
[] =
14360 T(V8M_MAIN
), /* V7. */
14361 T(V8M_MAIN
), /* V6_M. */
14362 T(V8M_MAIN
), /* V6S_M. */
14363 T(V8M_MAIN
), /* V7E_M. */
14366 T(V8M_MAIN
), /* V8-M BASELINE. */
14367 T(V8M_MAIN
) /* V8-M MAINLINE. */
14369 const int v8_1m_mainline
[] =
14381 T(V8_1M_MAIN
), /* V7. */
14382 T(V8_1M_MAIN
), /* V6_M. */
14383 T(V8_1M_MAIN
), /* V6S_M. */
14384 T(V8_1M_MAIN
), /* V7E_M. */
14387 T(V8_1M_MAIN
), /* V8-M BASELINE. */
14388 T(V8_1M_MAIN
), /* V8-M MAINLINE. */
14389 -1, /* Unused (18). */
14390 -1, /* Unused (19). */
14391 -1, /* Unused (20). */
14392 T(V8_1M_MAIN
) /* V8.1-M MAINLINE. */
14396 T(V9
), /* PRE_V4. */
14401 T(V9
), /* V5TEJ. */
14408 T(V9
), /* V6S_M. */
14409 T(V9
), /* V7E_M. */
14412 T(V9
), /* V8-M.BASE. */
14413 T(V9
), /* V8-M.MAIN. */
14417 T(V9
), /* V8.1-M.MAIN. */
14420 const int v4t_plus_v6_m
[] =
14426 T(V5TE
), /* V5TE. */
14427 T(V5TEJ
), /* V5TEJ. */
14429 T(V6KZ
), /* V6KZ. */
14430 T(V6T2
), /* V6T2. */
14433 T(V6_M
), /* V6_M. */
14434 T(V6S_M
), /* V6S_M. */
14435 T(V7E_M
), /* V7E_M. */
14438 T(V8M_BASE
), /* V8-M BASELINE. */
14439 T(V8M_MAIN
), /* V8-M MAINLINE. */
14440 -1, /* Unused (18). */
14441 -1, /* Unused (19). */
14442 -1, /* Unused (20). */
14443 T(V8_1M_MAIN
), /* V8.1-M MAINLINE. */
14445 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14447 const int *comb
[] =
14464 /* Pseudo-architecture. */
14468 /* Check we've not got a higher architecture than we know about. */
14470 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14472 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14476 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14478 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14479 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14480 oldtag
= T(V4T_PLUS_V6_M
);
14482 /* And override the new tag if we have a Tag_also_compatible_with on the
14485 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14486 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14487 newtag
= T(V4T_PLUS_V6_M
);
14489 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14490 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14492 /* Architectures before V6KZ add features monotonically. */
14493 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14496 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14498 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14499 as the canonical version. */
14500 if (result
== T(V4T_PLUS_V6_M
))
14503 *secondary_compat_out
= T(V6_M
);
14506 *secondary_compat_out
= -1;
14510 _bfd_error_handler (_("error: conflicting CPU architectures %s vs %s in %pB"),
14511 name_table
[oldtag
], name_table
[newtag
], ibfd
);
14519 /* Query attributes object to see if integer divide instructions may be
14520 present in an object. */
14522 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14524 int arch
= attr
[Tag_CPU_arch
].i
;
14525 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14527 switch (attr
[Tag_DIV_use
].i
)
14530 /* Integer divide allowed if instruction contained in archetecture. */
14531 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14533 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14539 /* Integer divide explicitly prohibited. */
14543 /* Unrecognised case - treat as allowing divide everywhere. */
14545 /* Integer divide allowed in ARM state. */
14550 /* Query attributes object to see if integer divide instructions are
14551 forbidden to be in the object. This is not the inverse of
14552 elf32_arm_attributes_accept_div. */
14554 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14556 return attr
[Tag_DIV_use
].i
== 1;
14559 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14560 are conflicting attributes. */
14563 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14565 bfd
*obfd
= info
->output_bfd
;
14566 obj_attribute
*in_attr
;
14567 obj_attribute
*out_attr
;
14568 /* Some tags have 0 = don't care, 1 = strong requirement,
14569 2 = weak requirement. */
14570 static const int order_021
[3] = {0, 2, 1};
14572 bool result
= true;
14573 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14575 /* Skip the linker stubs file. This preserves previous behavior
14576 of accepting unknown attributes in the first input file - but
14578 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14581 /* Skip any input that hasn't attribute section.
14582 This enables to link object files without attribute section with
14584 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14587 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14589 /* This is the first object. Copy the attributes. */
14590 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14592 out_attr
= elf_known_obj_attributes_proc (obfd
);
14594 /* Use the Tag_null value to indicate the attributes have been
14598 /* We do not output objects with Tag_MPextension_use_legacy - we move
14599 the attribute's value to Tag_MPextension_use. */
14600 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14602 if (out_attr
[Tag_MPextension_use
].i
!= 0
14603 && out_attr
[Tag_MPextension_use_legacy
].i
14604 != out_attr
[Tag_MPextension_use
].i
)
14607 (_("Error: %pB has both the current and legacy "
14608 "Tag_MPextension_use attributes"), ibfd
);
14612 out_attr
[Tag_MPextension_use
] =
14613 out_attr
[Tag_MPextension_use_legacy
];
14614 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14615 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14618 /* PR 28859 and 28848: Handle the case where the first input file,
14619 eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
14620 Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
14622 FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
14623 if (out_attr
[Tag_ABI_HardFP_use
].i
== 3 && out_attr
[Tag_FP_arch
].i
== 0)
14624 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14629 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14630 out_attr
= elf_known_obj_attributes_proc (obfd
);
14631 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14632 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14634 /* Ignore mismatches if the object doesn't use floating point or is
14635 floating point ABI independent. */
14636 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14637 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14638 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14639 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14640 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14641 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14644 (_("error: %pB uses VFP register arguments, %pB does not"),
14645 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14646 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14651 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14653 /* Merge this attribute with existing attributes. */
14656 case Tag_CPU_raw_name
:
14658 /* These are merged after Tag_CPU_arch. */
14661 case Tag_ABI_optimization_goals
:
14662 case Tag_ABI_FP_optimization_goals
:
14663 /* Use the first value seen. */
14668 int secondary_compat
= -1, secondary_compat_out
= -1;
14669 unsigned int saved_out_attr
= out_attr
[i
].i
;
14671 static const char *name_table
[] =
14673 /* These aren't real CPU names, but we can't guess
14674 that from the architecture version alone. */
14691 "ARM v8-M.baseline",
14692 "ARM v8-M.mainline",
14696 "ARM v8.1-M.mainline",
14700 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14701 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14702 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14703 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14704 &secondary_compat_out
,
14709 /* Return with error if failed to merge. */
14710 if (arch_attr
== -1)
14713 out_attr
[i
].i
= arch_attr
;
14715 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14717 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14718 if (out_attr
[i
].i
== saved_out_attr
)
14719 ; /* Leave the names alone. */
14720 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14722 /* The output architecture has been changed to match the
14723 input architecture. Use the input names. */
14724 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14725 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14727 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14728 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14733 out_attr
[Tag_CPU_name
].s
= NULL
;
14734 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14737 /* If we still don't have a value for Tag_CPU_name,
14738 make one up now. Tag_CPU_raw_name remains blank. */
14739 if (out_attr
[Tag_CPU_name
].s
== NULL
14740 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14741 out_attr
[Tag_CPU_name
].s
=
14742 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14746 case Tag_ARM_ISA_use
:
14747 case Tag_THUMB_ISA_use
:
14748 case Tag_WMMX_arch
:
14749 case Tag_Advanced_SIMD_arch
:
14750 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14751 case Tag_ABI_FP_rounding
:
14752 case Tag_ABI_FP_exceptions
:
14753 case Tag_ABI_FP_user_exceptions
:
14754 case Tag_ABI_FP_number_model
:
14755 case Tag_FP_HP_extension
:
14756 case Tag_CPU_unaligned_access
:
14758 case Tag_MPextension_use
:
14760 case Tag_PAC_extension
:
14761 case Tag_BTI_extension
:
14763 case Tag_PACRET_use
:
14764 /* Use the largest value specified. */
14765 if (in_attr
[i
].i
> out_attr
[i
].i
)
14766 out_attr
[i
].i
= in_attr
[i
].i
;
14769 case Tag_ABI_align_preserved
:
14770 case Tag_ABI_PCS_RO_data
:
14771 /* Use the smallest value specified. */
14772 if (in_attr
[i
].i
< out_attr
[i
].i
)
14773 out_attr
[i
].i
= in_attr
[i
].i
;
14776 case Tag_ABI_align_needed
:
14777 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14778 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14779 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14781 /* This error message should be enabled once all non-conformant
14782 binaries in the toolchain have had the attributes set
14785 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14789 /* Fall through. */
14790 case Tag_ABI_FP_denormal
:
14791 case Tag_ABI_PCS_GOT_use
:
14792 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14793 value if greater than 2 (for future-proofing). */
14794 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14795 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14796 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14797 out_attr
[i
].i
= in_attr
[i
].i
;
14800 case Tag_Virtualization_use
:
14801 /* The virtualization tag effectively stores two bits of
14802 information: the intended use of TrustZone (in bit 0), and the
14803 intended use of Virtualization (in bit 1). */
14804 if (out_attr
[i
].i
== 0)
14805 out_attr
[i
].i
= in_attr
[i
].i
;
14806 else if (in_attr
[i
].i
!= 0
14807 && in_attr
[i
].i
!= out_attr
[i
].i
)
14809 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14814 (_("error: %pB: unable to merge virtualization attributes "
14822 case Tag_CPU_arch_profile
:
14823 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14825 /* 0 will merge with anything.
14826 'A' and 'S' merge to 'A'.
14827 'R' and 'S' merge to 'R'.
14828 'M' and 'A|R|S' is an error. */
14829 if (out_attr
[i
].i
== 0
14830 || (out_attr
[i
].i
== 'S'
14831 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14832 out_attr
[i
].i
= in_attr
[i
].i
;
14833 else if (in_attr
[i
].i
== 0
14834 || (in_attr
[i
].i
== 'S'
14835 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14836 ; /* Do nothing. */
14840 (_("error: %pB: conflicting architecture profiles %c/%c"),
14842 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14843 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14849 case Tag_DSP_extension
:
14850 /* No need to change output value if any of:
14851 - pre (<=) ARMv5T input architecture (do not have DSP)
14852 - M input profile not ARMv7E-M and do not have DSP. */
14853 if (in_attr
[Tag_CPU_arch
].i
<= 3
14854 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14855 && in_attr
[Tag_CPU_arch
].i
!= 13
14856 && in_attr
[i
].i
== 0))
14857 ; /* Do nothing. */
14858 /* Output value should be 0 if DSP part of architecture, ie.
14859 - post (>=) ARMv5te architecture output
14860 - A, R or S profile output or ARMv7E-M output architecture. */
14861 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14862 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14863 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14864 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14865 || out_attr
[Tag_CPU_arch
].i
== 13))
14867 /* Otherwise, DSP instructions are added and not part of output
14875 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14876 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14877 when it's 0. It might mean absence of FP hardware if
14878 Tag_FP_arch is zero. */
14880 #define VFP_VERSION_COUNT 9
14881 static const struct
14885 } vfp_versions
[VFP_VERSION_COUNT
] =
14901 /* If the output has no requirement about FP hardware,
14902 follow the requirement of the input. */
14903 if (out_attr
[i
].i
== 0)
14905 /* This assert is still reasonable, we shouldn't
14906 produce the suspicious build attribute
14907 combination (See below for in_attr). */
14908 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14909 out_attr
[i
].i
= in_attr
[i
].i
;
14910 out_attr
[Tag_ABI_HardFP_use
].i
14911 = in_attr
[Tag_ABI_HardFP_use
].i
;
14914 /* If the input has no requirement about FP hardware, do
14916 else if (in_attr
[i
].i
== 0)
14918 /* We used to assert that Tag_ABI_HardFP_use was
14919 zero here, but we should never assert when
14920 consuming an object file that has suspicious
14921 build attributes. The single precision variant
14922 of 'no FP architecture' is still 'no FP
14923 architecture', so we just ignore the tag in this
14928 /* Both the input and the output have nonzero Tag_FP_arch.
14929 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14931 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14933 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14934 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14936 /* If the input and the output have different Tag_ABI_HardFP_use,
14937 the combination of them is 0 (implied by Tag_FP_arch). */
14938 else if (in_attr
[Tag_ABI_HardFP_use
].i
14939 != out_attr
[Tag_ABI_HardFP_use
].i
)
14940 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14942 /* Now we can handle Tag_FP_arch. */
14944 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14945 pick the biggest. */
14946 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14947 && in_attr
[i
].i
> out_attr
[i
].i
)
14949 out_attr
[i
] = in_attr
[i
];
14952 /* The output uses the superset of input features
14953 (ISA version) and registers. */
14954 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14955 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14956 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14957 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14958 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14959 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14960 /* This assumes all possible supersets are also a valid
14962 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14964 if (regs
== vfp_versions
[newval
].regs
14965 && ver
== vfp_versions
[newval
].ver
)
14968 out_attr
[i
].i
= newval
;
14971 case Tag_PCS_config
:
14972 if (out_attr
[i
].i
== 0)
14973 out_attr
[i
].i
= in_attr
[i
].i
;
14974 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14976 /* It's sometimes ok to mix different configs, so this is only
14979 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14982 case Tag_ABI_PCS_R9_use
:
14983 if (in_attr
[i
].i
!= out_attr
[i
].i
14984 && out_attr
[i
].i
!= AEABI_R9_unused
14985 && in_attr
[i
].i
!= AEABI_R9_unused
)
14988 (_("error: %pB: conflicting use of R9"), ibfd
);
14991 if (out_attr
[i
].i
== AEABI_R9_unused
)
14992 out_attr
[i
].i
= in_attr
[i
].i
;
14994 case Tag_ABI_PCS_RW_data
:
14995 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14996 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14997 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
15000 (_("error: %pB: SB relative addressing conflicts with use of R9"),
15004 /* Use the smallest value specified. */
15005 if (in_attr
[i
].i
< out_attr
[i
].i
)
15006 out_attr
[i
].i
= in_attr
[i
].i
;
15008 case Tag_ABI_PCS_wchar_t
:
15009 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
15010 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
15013 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
15014 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
15016 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
15017 out_attr
[i
].i
= in_attr
[i
].i
;
15019 case Tag_ABI_enum_size
:
15020 if (in_attr
[i
].i
!= AEABI_enum_unused
)
15022 if (out_attr
[i
].i
== AEABI_enum_unused
15023 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
15025 /* The existing object is compatible with anything.
15026 Use whatever requirements the new object has. */
15027 out_attr
[i
].i
= in_attr
[i
].i
;
15029 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
15030 && out_attr
[i
].i
!= in_attr
[i
].i
15031 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
15033 static const char *aeabi_enum_names
[] =
15034 { "", "variable-size", "32-bit", "" };
15035 const char *in_name
=
15036 in_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
15037 ? aeabi_enum_names
[in_attr
[i
].i
]
15039 const char *out_name
=
15040 out_attr
[i
].i
< ARRAY_SIZE (aeabi_enum_names
)
15041 ? aeabi_enum_names
[out_attr
[i
].i
]
15044 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
15045 ibfd
, in_name
, out_name
);
15049 case Tag_ABI_VFP_args
:
15052 case Tag_ABI_WMMX_args
:
15053 if (in_attr
[i
].i
!= out_attr
[i
].i
)
15056 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
15061 case Tag_compatibility
:
15062 /* Merged in target-independent code. */
15064 case Tag_ABI_HardFP_use
:
15065 /* This is handled along with Tag_FP_arch. */
15067 case Tag_ABI_FP_16bit_format
:
15068 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
15070 if (in_attr
[i
].i
!= out_attr
[i
].i
)
15073 (_("error: fp16 format mismatch between %pB and %pB"),
15078 if (in_attr
[i
].i
!= 0)
15079 out_attr
[i
].i
= in_attr
[i
].i
;
15083 /* A value of zero on input means that the divide instruction may
15084 be used if available in the base architecture as specified via
15085 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15086 the user did not want divide instructions. A value of 2
15087 explicitly means that divide instructions were allowed in ARM
15088 and Thumb state. */
15089 if (in_attr
[i
].i
== out_attr
[i
].i
)
15090 /* Do nothing. */ ;
15091 else if (elf32_arm_attributes_forbid_div (in_attr
)
15092 && !elf32_arm_attributes_accept_div (out_attr
))
15094 else if (elf32_arm_attributes_forbid_div (out_attr
)
15095 && elf32_arm_attributes_accept_div (in_attr
))
15096 out_attr
[i
].i
= in_attr
[i
].i
;
15097 else if (in_attr
[i
].i
== 2)
15098 out_attr
[i
].i
= in_attr
[i
].i
;
15101 case Tag_MPextension_use_legacy
:
15102 /* We don't output objects with Tag_MPextension_use_legacy - we
15103 move the value to Tag_MPextension_use. */
15104 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
15106 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
15109 (_("%pB has both the current and legacy "
15110 "Tag_MPextension_use attributes"),
15116 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
15117 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
15121 case Tag_nodefaults
:
15122 /* This tag is set if it exists, but the value is unused (and is
15123 typically zero). We don't actually need to do anything here -
15124 the merge happens automatically when the type flags are merged
15127 case Tag_also_compatible_with
:
15128 /* Already done in Tag_CPU_arch. */
15130 case Tag_conformance
:
15131 /* Keep the attribute if it matches. Throw it away otherwise.
15132 No attribute means no claim to conform. */
15133 if (!in_attr
[i
].s
|| !out_attr
[i
].s
15134 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
15135 out_attr
[i
].s
= NULL
;
15140 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
15143 /* If out_attr was copied from in_attr then it won't have a type yet. */
15144 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
15145 out_attr
[i
].type
= in_attr
[i
].type
;
15148 /* Merge Tag_compatibility attributes and any common GNU ones. */
15149 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
15152 /* Check for any attributes not known on ARM. */
15153 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
15159 /* Return TRUE if the two EABI versions are incompatible. */
15162 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
15164 /* v4 and v5 are the same spec before and after it was released,
15165 so allow mixing them. */
15166 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
15167 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
15170 return (iver
== over
);
15173 /* Merge backend specific data from an object file to the output
15174 object file when linking. */
15177 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
15179 /* Display the flags field. */
15182 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
15184 FILE * file
= (FILE *) ptr
;
15185 unsigned long flags
;
15187 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
15189 /* Print normal ELF private data. */
15190 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
15192 flags
= elf_elfheader (abfd
)->e_flags
;
15193 /* Ignore init flag - it may not be set, despite the flags field
15194 containing valid data. */
15196 fprintf (file
, _("private flags = 0x%lx:"), elf_elfheader (abfd
)->e_flags
);
15198 switch (EF_ARM_EABI_VERSION (flags
))
15200 case EF_ARM_EABI_UNKNOWN
:
15201 /* The following flag bits are GNU extensions and not part of the
15202 official ARM ELF extended ABI. Hence they are only decoded if
15203 the EABI version is not set. */
15204 if (flags
& EF_ARM_INTERWORK
)
15205 fprintf (file
, _(" [interworking enabled]"));
15207 if (flags
& EF_ARM_APCS_26
)
15208 fprintf (file
, " [APCS-26]");
15210 fprintf (file
, " [APCS-32]");
15212 if (flags
& EF_ARM_VFP_FLOAT
)
15213 fprintf (file
, _(" [VFP float format]"));
15215 fprintf (file
, _(" [FPA float format]"));
15217 if (flags
& EF_ARM_APCS_FLOAT
)
15218 fprintf (file
, _(" [floats passed in float registers]"));
15220 if (flags
& EF_ARM_PIC
)
15221 fprintf (file
, _(" [position independent]"));
15223 if (flags
& EF_ARM_NEW_ABI
)
15224 fprintf (file
, _(" [new ABI]"));
15226 if (flags
& EF_ARM_OLD_ABI
)
15227 fprintf (file
, _(" [old ABI]"));
15229 if (flags
& EF_ARM_SOFT_FLOAT
)
15230 fprintf (file
, _(" [software FP]"));
15232 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
15233 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
15234 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
);
15237 case EF_ARM_EABI_VER1
:
15238 fprintf (file
, _(" [Version1 EABI]"));
15240 if (flags
& EF_ARM_SYMSARESORTED
)
15241 fprintf (file
, _(" [sorted symbol table]"));
15243 fprintf (file
, _(" [unsorted symbol table]"));
15245 flags
&= ~ EF_ARM_SYMSARESORTED
;
15248 case EF_ARM_EABI_VER2
:
15249 fprintf (file
, _(" [Version2 EABI]"));
15251 if (flags
& EF_ARM_SYMSARESORTED
)
15252 fprintf (file
, _(" [sorted symbol table]"));
15254 fprintf (file
, _(" [unsorted symbol table]"));
15256 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
15257 fprintf (file
, _(" [dynamic symbols use segment index]"));
15259 if (flags
& EF_ARM_MAPSYMSFIRST
)
15260 fprintf (file
, _(" [mapping symbols precede others]"));
15262 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
15263 | EF_ARM_MAPSYMSFIRST
);
15266 case EF_ARM_EABI_VER3
:
15267 fprintf (file
, _(" [Version3 EABI]"));
15270 case EF_ARM_EABI_VER4
:
15271 fprintf (file
, _(" [Version4 EABI]"));
15274 case EF_ARM_EABI_VER5
:
15275 fprintf (file
, _(" [Version5 EABI]"));
15277 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
15278 fprintf (file
, _(" [soft-float ABI]"));
15280 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
15281 fprintf (file
, _(" [hard-float ABI]"));
15283 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
15286 if (flags
& EF_ARM_BE8
)
15287 fprintf (file
, _(" [BE8]"));
15289 if (flags
& EF_ARM_LE8
)
15290 fprintf (file
, _(" [LE8]"));
15292 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
15296 fprintf (file
, _(" <EABI version unrecognised>"));
15300 flags
&= ~ EF_ARM_EABIMASK
;
15302 if (flags
& EF_ARM_RELEXEC
)
15303 fprintf (file
, _(" [relocatable executable]"));
15305 if (flags
& EF_ARM_PIC
)
15306 fprintf (file
, _(" [position independent]"));
15308 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
15309 fprintf (file
, _(" [FDPIC ABI supplement]"));
15311 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
15314 fprintf (file
, _(" <Unrecognised flag bits set>"));
15316 fputc ('\n', file
);
15322 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
15324 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15326 case STT_ARM_TFUNC
:
15327 return ELF_ST_TYPE (elf_sym
->st_info
);
15329 case STT_ARM_16BIT
:
15330 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15331 This allows us to distinguish between data used by Thumb instructions
15332 and non-data (which is probably code) inside Thumb regions of an
15334 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15335 return ELF_ST_TYPE (elf_sym
->st_info
);
15346 elf32_arm_gc_mark_hook (asection
*sec
,
15347 struct bfd_link_info
*info
,
15348 Elf_Internal_Rela
*rel
,
15349 struct elf_link_hash_entry
*h
,
15350 Elf_Internal_Sym
*sym
)
15353 switch (ELF32_R_TYPE (rel
->r_info
))
15355 case R_ARM_GNU_VTINHERIT
:
15356 case R_ARM_GNU_VTENTRY
:
15360 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15363 /* Look through the relocs for a section during the first phase. */
15366 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15367 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15369 Elf_Internal_Shdr
*symtab_hdr
;
15370 struct elf_link_hash_entry
**sym_hashes
;
15371 const Elf_Internal_Rela
*rel
;
15372 const Elf_Internal_Rela
*rel_end
;
15375 struct elf32_arm_link_hash_table
*htab
;
15377 bool may_become_dynamic_p
;
15378 bool may_need_local_target_p
;
15379 unsigned long nsyms
;
15381 if (bfd_link_relocatable (info
))
15384 BFD_ASSERT (is_arm_elf (abfd
));
15386 htab
= elf32_arm_hash_table (info
);
15392 if (htab
->root
.dynobj
== NULL
)
15393 htab
->root
.dynobj
= abfd
;
15394 if (!create_ifunc_sections (info
))
15397 dynobj
= htab
->root
.dynobj
;
15399 symtab_hdr
= & elf_symtab_hdr (abfd
);
15400 sym_hashes
= elf_sym_hashes (abfd
);
15401 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15403 rel_end
= relocs
+ sec
->reloc_count
;
15404 for (rel
= relocs
; rel
< rel_end
; rel
++)
15406 Elf_Internal_Sym
*isym
;
15407 struct elf_link_hash_entry
*h
;
15408 struct elf32_arm_link_hash_entry
*eh
;
15409 unsigned int r_symndx
;
15412 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15413 r_type
= ELF32_R_TYPE (rel
->r_info
);
15414 r_type
= arm_real_reloc_type (htab
, r_type
);
15416 if (r_symndx
>= nsyms
15417 /* PR 9934: It is possible to have relocations that do not
15418 refer to symbols, thus it is also possible to have an
15419 object file containing relocations but no symbol table. */
15420 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15422 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15431 if (r_symndx
< symtab_hdr
->sh_info
)
15433 /* A local symbol. */
15434 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
,
15441 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15442 while (h
->root
.type
== bfd_link_hash_indirect
15443 || h
->root
.type
== bfd_link_hash_warning
)
15444 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15448 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15450 call_reloc_p
= false;
15451 may_become_dynamic_p
= false;
15452 may_need_local_target_p
= false;
15454 /* Could be done earlier, if h were already available. */
15455 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15458 case R_ARM_GOTOFFFUNCDESC
:
15462 if (!elf32_arm_allocate_local_sym_info (abfd
))
15464 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15466 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].gotofffuncdesc_cnt
+= 1;
15467 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15471 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15476 case R_ARM_GOTFUNCDESC
:
15480 /* Such a relocation is not supposed to be generated
15481 by gcc on a static function. */
15482 /* Anyway if needed it could be handled. */
15487 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15492 case R_ARM_FUNCDESC
:
15496 if (!elf32_arm_allocate_local_sym_info (abfd
))
15498 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15500 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_cnt
+= 1;
15501 elf32_arm_local_fdpic_cnts (abfd
) [r_symndx
].funcdesc_offset
= -1;
15505 eh
->fdpic_cnts
.funcdesc_cnt
++;
15511 case R_ARM_GOT_PREL
:
15512 case R_ARM_TLS_GD32
:
15513 case R_ARM_TLS_GD32_FDPIC
:
15514 case R_ARM_TLS_IE32
:
15515 case R_ARM_TLS_IE32_FDPIC
:
15516 case R_ARM_TLS_GOTDESC
:
15517 case R_ARM_TLS_DESCSEQ
:
15518 case R_ARM_THM_TLS_DESCSEQ
:
15519 case R_ARM_TLS_CALL
:
15520 case R_ARM_THM_TLS_CALL
:
15521 /* This symbol requires a global offset table entry. */
15523 int tls_type
, old_tls_type
;
15527 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15528 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15530 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15531 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15533 case R_ARM_TLS_GOTDESC
:
15534 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15535 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15536 tls_type
= GOT_TLS_GDESC
; break;
15538 default: tls_type
= GOT_NORMAL
; break;
15541 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15542 info
->flags
|= DF_STATIC_TLS
;
15547 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15551 /* This is a global offset table entry for a local symbol. */
15552 if (!elf32_arm_allocate_local_sym_info (abfd
))
15554 if (r_symndx
>= elf32_arm_num_entries (abfd
))
15556 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15561 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15562 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15565 /* If a variable is accessed with both tls methods, two
15566 slots may be created. */
15567 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15568 && GOT_TLS_GD_ANY_P (tls_type
))
15569 tls_type
|= old_tls_type
;
15571 /* We will already have issued an error message if there
15572 is a TLS/non-TLS mismatch, based on the symbol
15573 type. So just combine any TLS types needed. */
15574 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15575 && tls_type
!= GOT_NORMAL
)
15576 tls_type
|= old_tls_type
;
15578 /* If the symbol is accessed in both IE and GDESC
15579 method, we're able to relax. Turn off the GDESC flag,
15580 without messing up with any other kind of tls types
15581 that may be involved. */
15582 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15583 tls_type
&= ~GOT_TLS_GDESC
;
15585 if (old_tls_type
!= tls_type
)
15588 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15590 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15593 /* Fall through. */
15595 case R_ARM_TLS_LDM32
:
15596 case R_ARM_TLS_LDM32_FDPIC
:
15597 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15598 htab
->tls_ldm_got
.refcount
++;
15599 /* Fall through. */
15601 case R_ARM_GOTOFF32
:
15603 if (htab
->root
.sgot
== NULL
15604 && !create_got_section (htab
->root
.dynobj
, info
))
15613 case R_ARM_THM_CALL
:
15614 case R_ARM_THM_JUMP24
:
15615 case R_ARM_THM_JUMP19
:
15616 call_reloc_p
= true;
15617 may_need_local_target_p
= true;
15621 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15622 ldr __GOTT_INDEX__ offsets. */
15623 if (htab
->root
.target_os
!= is_vxworks
)
15625 may_need_local_target_p
= true;
15628 else goto jump_over
;
15630 /* Fall through. */
15632 case R_ARM_MOVW_ABS_NC
:
15633 case R_ARM_MOVT_ABS
:
15634 case R_ARM_THM_MOVW_ABS_NC
:
15635 case R_ARM_THM_MOVT_ABS
:
15636 if (bfd_link_pic (info
))
15639 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15640 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15641 (h
) ? h
->root
.root
.string
: "a local symbol");
15642 bfd_set_error (bfd_error_bad_value
);
15646 /* Fall through. */
15648 case R_ARM_ABS32_NOI
:
15650 if (h
!= NULL
&& bfd_link_executable (info
))
15652 h
->pointer_equality_needed
= 1;
15654 /* Fall through. */
15656 case R_ARM_REL32_NOI
:
15657 case R_ARM_MOVW_PREL_NC
:
15658 case R_ARM_MOVT_PREL
:
15659 case R_ARM_THM_MOVW_PREL_NC
:
15660 case R_ARM_THM_MOVT_PREL
:
15662 /* Should the interworking branches be listed here? */
15663 if ((bfd_link_pic (info
)
15665 && (sec
->flags
& SEC_ALLOC
) != 0)
15668 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15670 /* In shared libraries and relocatable executables,
15671 we treat local relative references as calls;
15672 see the related SYMBOL_CALLS_LOCAL code in
15673 allocate_dynrelocs. */
15674 call_reloc_p
= true;
15675 may_need_local_target_p
= true;
15678 /* We are creating a shared library or relocatable
15679 executable, and this is a reloc against a global symbol,
15680 or a non-PC-relative reloc against a local symbol.
15681 We may need to copy the reloc into the output. */
15682 may_become_dynamic_p
= true;
15685 may_need_local_target_p
= true;
15688 /* This relocation describes the C++ object vtable hierarchy.
15689 Reconstruct it for later use during GC. */
15690 case R_ARM_GNU_VTINHERIT
:
15691 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15695 /* This relocation describes which C++ vtable entries are actually
15696 used. Record for later use during GC. */
15697 case R_ARM_GNU_VTENTRY
:
15698 if (!bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15706 /* We may need a .plt entry if the function this reloc
15707 refers to is in a different object, regardless of the
15708 symbol's type. We can't tell for sure yet, because
15709 something later might force the symbol local. */
15711 else if (may_need_local_target_p
)
15712 /* If this reloc is in a read-only section, we might
15713 need a copy reloc. We can't check reliably at this
15714 stage whether the section is read-only, as input
15715 sections have not yet been mapped to output sections.
15716 Tentatively set the flag for now, and correct in
15717 adjust_dynamic_symbol. */
15718 h
->non_got_ref
= 1;
15721 if (may_need_local_target_p
15722 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15724 union gotplt_union
*root_plt
;
15725 struct arm_plt_info
*arm_plt
;
15726 struct arm_local_iplt_info
*local_iplt
;
15730 root_plt
= &h
->plt
;
15731 arm_plt
= &eh
->plt
;
15735 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15736 if (local_iplt
== NULL
)
15738 root_plt
= &local_iplt
->root
;
15739 arm_plt
= &local_iplt
->arm
;
15742 /* If the symbol is a function that doesn't bind locally,
15743 this relocation will need a PLT entry. */
15744 if (root_plt
->refcount
!= -1)
15745 root_plt
->refcount
+= 1;
15748 arm_plt
->noncall_refcount
++;
15750 /* It's too early to use htab->use_blx here, so we have to
15751 record possible blx references separately from
15752 relocs that definitely need a thumb stub. */
15754 if (r_type
== R_ARM_THM_CALL
)
15755 arm_plt
->maybe_thumb_refcount
+= 1;
15757 if (r_type
== R_ARM_THM_JUMP24
15758 || r_type
== R_ARM_THM_JUMP19
)
15759 arm_plt
->thumb_refcount
+= 1;
15762 if (may_become_dynamic_p
)
15764 struct elf_dyn_relocs
*p
, **head
;
15766 /* Create a reloc section in dynobj. */
15767 if (sreloc
== NULL
)
15769 sreloc
= _bfd_elf_make_dynamic_reloc_section
15770 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15772 if (sreloc
== NULL
)
15776 /* If this is a global symbol, count the number of
15777 relocations we need for this symbol. */
15779 head
= &h
->dyn_relocs
;
15782 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15788 if (p
== NULL
|| p
->sec
!= sec
)
15790 size_t amt
= sizeof *p
;
15792 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15802 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15805 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic (info
)
15806 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
)
15808 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15809 that will become rofixup. */
15810 /* This is due to the fact that we suppose all will become rofixup. */
15812 (_("FDPIC does not yet support %s relocation"
15813 " to become dynamic for executable"),
15814 elf32_arm_howto_table_1
[r_type
].name
);
15824 elf32_arm_update_relocs (asection
*o
,
15825 struct bfd_elf_section_reloc_data
*reldata
)
15827 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15828 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15829 const struct elf_backend_data
*bed
;
15830 _arm_elf_section_data
*eado
;
15831 struct bfd_link_order
*p
;
15832 bfd_byte
*erela_head
, *erela
;
15833 Elf_Internal_Rela
*irela_head
, *irela
;
15834 Elf_Internal_Shdr
*rel_hdr
;
15836 unsigned int count
;
15838 eado
= get_arm_elf_section_data (o
);
15840 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15844 bed
= get_elf_backend_data (abfd
);
15845 rel_hdr
= reldata
->hdr
;
15847 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15849 swap_in
= bed
->s
->swap_reloc_in
;
15850 swap_out
= bed
->s
->swap_reloc_out
;
15852 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15854 swap_in
= bed
->s
->swap_reloca_in
;
15855 swap_out
= bed
->s
->swap_reloca_out
;
15860 erela_head
= rel_hdr
->contents
;
15861 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15862 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15864 erela
= erela_head
;
15865 irela
= irela_head
;
15868 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15870 if (p
->type
== bfd_section_reloc_link_order
15871 || p
->type
== bfd_symbol_reloc_link_order
)
15873 (*swap_in
) (abfd
, erela
, irela
);
15874 erela
+= rel_hdr
->sh_entsize
;
15878 else if (p
->type
== bfd_indirect_link_order
)
15880 struct bfd_elf_section_reloc_data
*input_reldata
;
15881 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15882 _arm_elf_section_data
*eadi
;
15887 i
= p
->u
.indirect
.section
;
15889 eadi
= get_arm_elf_section_data (i
);
15890 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15891 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15892 offset
= i
->output_offset
;
15894 if (eadi
->elf
.rel
.hdr
&&
15895 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15896 input_reldata
= &eadi
->elf
.rel
;
15897 else if (eadi
->elf
.rela
.hdr
&&
15898 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15899 input_reldata
= &eadi
->elf
.rela
;
15905 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15907 arm_unwind_table_edit
*edit_node
, *edit_next
;
15909 bfd_vma reloc_index
;
15911 (*swap_in
) (abfd
, erela
, irela
);
15912 reloc_index
= (irela
->r_offset
- offset
) / 8;
15915 edit_node
= edit_list
;
15916 for (edit_next
= edit_list
;
15917 edit_next
&& edit_next
->index
<= reloc_index
;
15918 edit_next
= edit_node
->next
)
15921 edit_node
= edit_next
;
15924 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15925 || edit_node
->index
!= reloc_index
)
15927 irela
->r_offset
-= bias
* 8;
15932 erela
+= rel_hdr
->sh_entsize
;
15935 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15937 /* New relocation entity. */
15938 asection
*text_sec
= edit_tail
->linked_section
;
15939 asection
*text_out
= text_sec
->output_section
;
15940 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15942 irela
->r_addend
= 0;
15943 irela
->r_offset
= exidx_offset
;
15944 irela
->r_info
= ELF32_R_INFO
15945 (text_out
->target_index
, R_ARM_PREL31
);
15952 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15954 (*swap_in
) (abfd
, erela
, irela
);
15955 erela
+= rel_hdr
->sh_entsize
;
15959 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15964 reldata
->count
= count
;
15965 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15967 erela
= erela_head
;
15968 irela
= irela_head
;
15971 (*swap_out
) (abfd
, irela
, erela
);
15972 erela
+= rel_hdr
->sh_entsize
;
15979 /* Hashes are no longer valid. */
15980 free (reldata
->hashes
);
15981 reldata
->hashes
= NULL
;
15984 /* Unwinding tables are not referenced directly. This pass marks them as
15985 required if the corresponding code section is marked. Similarly, ARMv8-M
15986 secure entry functions can only be referenced by SG veneers which are
15987 created after the GC process. They need to be marked in case they reside in
15988 their own section (as would be the case if code was compiled with
15989 -ffunction-sections). */
15992 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15993 elf_gc_mark_hook_fn gc_mark_hook
)
15996 Elf_Internal_Shdr
**elf_shdrp
;
15997 asection
*cmse_sec
;
15998 obj_attribute
*out_attr
;
15999 Elf_Internal_Shdr
*symtab_hdr
;
16000 unsigned i
, sym_count
, ext_start
;
16001 const struct elf_backend_data
*bed
;
16002 struct elf_link_hash_entry
**sym_hashes
;
16003 struct elf32_arm_link_hash_entry
*cmse_hash
;
16004 bool again
, is_v8m
, first_bfd_browse
= true;
16005 bool extra_marks_added
= false;
16008 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
16010 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
16011 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
16012 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
16014 /* Marking EH data may cause additional code sections to be marked,
16015 requiring multiple passes. */
16020 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
16024 if (! is_arm_elf (sub
))
16027 elf_shdrp
= elf_elfsections (sub
);
16028 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
16030 Elf_Internal_Shdr
*hdr
;
16032 hdr
= &elf_section_data (o
)->this_hdr
;
16033 if (hdr
->sh_type
== SHT_ARM_EXIDX
16035 && hdr
->sh_link
< elf_numsections (sub
)
16037 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
16040 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
16045 /* Mark section holding ARMv8-M secure entry functions. We mark all
16046 of them so no need for a second browsing. */
16047 if (is_v8m
&& first_bfd_browse
)
16049 bool debug_sec_need_to_be_marked
= false;
16051 sym_hashes
= elf_sym_hashes (sub
);
16052 bed
= get_elf_backend_data (sub
);
16053 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
16054 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
16055 ext_start
= symtab_hdr
->sh_info
;
16057 /* Scan symbols. */
16058 for (i
= ext_start
; i
< sym_count
; i
++)
16060 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
16061 if (cmse_hash
== NULL
)
16064 /* Assume it is a special symbol. If not, cmse_scan will
16065 warn about it and user can do something about it. */
16066 if (startswith (cmse_hash
->root
.root
.root
.string
,
16069 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
16070 if (!cmse_sec
->gc_mark
16071 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
16073 /* The debug sections related to these secure entry
16074 functions are marked on enabling below flag. */
16075 debug_sec_need_to_be_marked
= true;
16079 if (debug_sec_need_to_be_marked
)
16081 /* Looping over all the sections of the object file containing
16082 Armv8-M secure entry functions and marking all the debug
16084 for (isec
= sub
->sections
; isec
!= NULL
; isec
= isec
->next
)
16086 /* If not a debug sections, skip it. */
16087 if (!isec
->gc_mark
&& (isec
->flags
& SEC_DEBUGGING
))
16090 extra_marks_added
= true;
16093 debug_sec_need_to_be_marked
= false;
16098 first_bfd_browse
= false;
16101 /* PR 30354: If we have added extra marks then make sure that any
16102 dependencies of the newly marked sections are also marked. */
16103 if (extra_marks_added
)
16104 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
16109 /* Treat mapping symbols as special target symbols. */
16112 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
16114 return bfd_is_arm_special_symbol_name (sym
->name
,
16115 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
16118 /* If the ELF symbol SYM might be a function in SEC, return the
16119 function size and set *CODE_OFF to the function's entry point,
16120 otherwise return zero. */
16122 static bfd_size_type
16123 elf32_arm_maybe_function_sym (const asymbol
*sym
, asection
*sec
,
16126 bfd_size_type size
;
16127 elf_symbol_type
* elf_sym
= (elf_symbol_type
*) sym
;
16129 if ((sym
->flags
& (BSF_SECTION_SYM
| BSF_FILE
| BSF_OBJECT
16130 | BSF_THREAD_LOCAL
| BSF_RELC
| BSF_SRELC
)) != 0
16131 || sym
->section
!= sec
)
16134 size
= (sym
->flags
& BSF_SYNTHETIC
) ? 0 : elf_sym
->internal_elf_sym
.st_size
;
16136 if (!(sym
->flags
& BSF_SYNTHETIC
))
16137 switch (ELF_ST_TYPE (elf_sym
->internal_elf_sym
.st_info
))
16140 /* Ignore symbols created by the annobin plugin for gcc and clang.
16141 These symbols are hidden, local, notype and have a size of 0. */
16143 && sym
->flags
& BSF_LOCAL
16144 && ELF_ST_VISIBILITY (elf_sym
->internal_elf_sym
.st_other
) == STV_HIDDEN
)
16146 /* Fall through. */
16148 case STT_ARM_TFUNC
:
16149 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16155 if ((sym
->flags
& BSF_LOCAL
)
16156 && bfd_is_arm_special_symbol_name (sym
->name
,
16157 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
16160 *code_off
= sym
->value
;
16162 /* Do not return 0 for the function's size. */
16163 return size
? size
: 1;
16168 elf32_arm_find_inliner_info (bfd
* abfd
,
16169 const char ** filename_ptr
,
16170 const char ** functionname_ptr
,
16171 unsigned int * line_ptr
)
16174 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
16175 functionname_ptr
, line_ptr
,
16176 & elf_tdata (abfd
)->dwarf2_find_line_info
);
16180 /* Adjust a symbol defined by a dynamic object and referenced by a
16181 regular object. The current definition is in some section of the
16182 dynamic object, but we're not including those sections. We have to
16183 change the definition to something the rest of the link can
16187 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
16188 struct elf_link_hash_entry
* h
)
16191 asection
*s
, *srel
;
16192 struct elf32_arm_link_hash_entry
* eh
;
16193 struct elf32_arm_link_hash_table
*globals
;
16195 globals
= elf32_arm_hash_table (info
);
16196 if (globals
== NULL
)
16199 dynobj
= elf_hash_table (info
)->dynobj
;
16201 /* Make sure we know what is going on here. */
16202 BFD_ASSERT (dynobj
!= NULL
16204 || h
->type
== STT_GNU_IFUNC
16208 && !h
->def_regular
)));
16210 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16212 /* If this is a function, put it in the procedure linkage table. We
16213 will fill in the contents of the procedure linkage table later,
16214 when we know the address of the .got section. */
16215 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
16217 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16218 symbol binds locally. */
16219 if (h
->plt
.refcount
<= 0
16220 || (h
->type
!= STT_GNU_IFUNC
16221 && (SYMBOL_CALLS_LOCAL (info
, h
)
16222 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16223 && h
->root
.type
== bfd_link_hash_undefweak
))))
16225 /* This case can occur if we saw a PLT32 reloc in an input
16226 file, but the symbol was never referred to by a dynamic
16227 object, or if all references were garbage collected. In
16228 such a case, we don't actually need to build a procedure
16229 linkage table, and we can just do a PC24 reloc instead. */
16230 h
->plt
.offset
= (bfd_vma
) -1;
16231 eh
->plt
.thumb_refcount
= 0;
16232 eh
->plt
.maybe_thumb_refcount
= 0;
16233 eh
->plt
.noncall_refcount
= 0;
16241 /* It's possible that we incorrectly decided a .plt reloc was
16242 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16243 in check_relocs. We can't decide accurately between function
16244 and non-function syms in check-relocs; Objects loaded later in
16245 the link may change h->type. So fix it now. */
16246 h
->plt
.offset
= (bfd_vma
) -1;
16247 eh
->plt
.thumb_refcount
= 0;
16248 eh
->plt
.maybe_thumb_refcount
= 0;
16249 eh
->plt
.noncall_refcount
= 0;
16252 /* If this is a weak symbol, and there is a real definition, the
16253 processor independent code will have arranged for us to see the
16254 real definition first, and we can just use the same value. */
16255 if (h
->is_weakalias
)
16257 struct elf_link_hash_entry
*def
= weakdef (h
);
16258 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
16259 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
16260 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
16264 /* If there are no non-GOT references, we do not need a copy
16266 if (!h
->non_got_ref
)
16269 /* This is a reference to a symbol defined by a dynamic object which
16270 is not a function. */
16272 /* If we are creating a shared library, we must presume that the
16273 only references to the symbol are via the global offset table.
16274 For such cases we need not do anything here; the relocations will
16275 be handled correctly by relocate_section. */
16276 if (bfd_link_pic (info
))
16279 /* We must allocate the symbol in our .dynbss section, which will
16280 become part of the .bss section of the executable. There will be
16281 an entry for this symbol in the .dynsym section. The dynamic
16282 object will contain position independent code, so all references
16283 from the dynamic object to this symbol will go through the global
16284 offset table. The dynamic linker will use the .dynsym entry to
16285 determine the address it must put in the global offset table, so
16286 both the dynamic object and the regular object will refer to the
16287 same memory location for the variable. */
16288 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16289 linker to copy the initial value out of the dynamic object and into
16290 the runtime process image. We need to remember the offset into the
16291 .rel(a).bss section we are going to use. */
16292 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16294 s
= globals
->root
.sdynrelro
;
16295 srel
= globals
->root
.sreldynrelro
;
16299 s
= globals
->root
.sdynbss
;
16300 srel
= globals
->root
.srelbss
;
16302 if (info
->nocopyreloc
== 0
16303 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16306 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16310 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16313 /* Allocate space in .plt, .got and associated reloc sections for
16317 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16319 struct bfd_link_info
*info
;
16320 struct elf32_arm_link_hash_table
*htab
;
16321 struct elf32_arm_link_hash_entry
*eh
;
16322 struct elf_dyn_relocs
*p
;
16324 if (h
->root
.type
== bfd_link_hash_indirect
)
16327 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16329 info
= (struct bfd_link_info
*) inf
;
16330 htab
= elf32_arm_hash_table (info
);
16334 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16335 && h
->plt
.refcount
> 0)
16337 /* Make sure this symbol is output as a dynamic symbol.
16338 Undefined weak syms won't yet be marked as dynamic. */
16339 if (h
->dynindx
== -1 && !h
->forced_local
16340 && h
->root
.type
== bfd_link_hash_undefweak
)
16342 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16346 /* If the call in the PLT entry binds locally, the associated
16347 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16348 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16349 than the .plt section. */
16350 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16353 if (eh
->plt
.noncall_refcount
== 0
16354 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16355 /* All non-call references can be resolved directly.
16356 This means that they can (and in some cases, must)
16357 resolve directly to the run-time target, rather than
16358 to the PLT. That in turns means that any .got entry
16359 would be equal to the .igot.plt entry, so there's
16360 no point having both. */
16361 h
->got
.refcount
= 0;
16364 if (bfd_link_pic (info
)
16366 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16368 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16370 /* If this symbol is not defined in a regular file, and we are
16371 not generating a shared library, then set the symbol to this
16372 location in the .plt. This is required to make function
16373 pointers compare as equal between the normal executable and
16374 the shared library. */
16375 if (! bfd_link_pic (info
)
16376 && !h
->def_regular
)
16378 h
->root
.u
.def
.section
= htab
->root
.splt
;
16379 h
->root
.u
.def
.value
= h
->plt
.offset
;
16381 /* Make sure the function is not marked as Thumb, in case
16382 it is the target of an ABS32 relocation, which will
16383 point to the PLT entry. */
16384 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16387 /* VxWorks executables have a second set of relocations for
16388 each PLT entry. They go in a separate relocation section,
16389 which is processed by the kernel loader. */
16390 if (htab
->root
.target_os
== is_vxworks
&& !bfd_link_pic (info
))
16392 /* There is a relocation for the initial PLT entry:
16393 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16394 if (h
->plt
.offset
== htab
->plt_header_size
)
16395 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16397 /* There are two extra relocations for each subsequent
16398 PLT entry: an R_ARM_32 relocation for the GOT entry,
16399 and an R_ARM_32 relocation for the PLT entry. */
16400 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16405 h
->plt
.offset
= (bfd_vma
) -1;
16411 h
->plt
.offset
= (bfd_vma
) -1;
16415 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16416 eh
->tlsdesc_got
= (bfd_vma
) -1;
16418 if (h
->got
.refcount
> 0)
16422 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16425 /* Make sure this symbol is output as a dynamic symbol.
16426 Undefined weak syms won't yet be marked as dynamic. */
16427 if (htab
->root
.dynamic_sections_created
16428 && h
->dynindx
== -1
16429 && !h
->forced_local
16430 && h
->root
.type
== bfd_link_hash_undefweak
)
16432 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16436 s
= htab
->root
.sgot
;
16437 h
->got
.offset
= s
->size
;
16439 if (tls_type
== GOT_UNKNOWN
)
16442 if (tls_type
== GOT_NORMAL
)
16443 /* Non-TLS symbols need one GOT slot. */
16447 if (tls_type
& GOT_TLS_GDESC
)
16449 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16451 = (htab
->root
.sgotplt
->size
16452 - elf32_arm_compute_jump_table_size (htab
));
16453 htab
->root
.sgotplt
->size
+= 8;
16454 h
->got
.offset
= (bfd_vma
) -2;
16455 /* plt.got_offset needs to know there's a TLS_DESC
16456 reloc in the middle of .got.plt. */
16457 htab
->num_tls_desc
++;
16460 if (tls_type
& GOT_TLS_GD
)
16462 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16463 consecutive GOT slots. If the symbol is both GD
16464 and GDESC, got.offset may have been
16466 h
->got
.offset
= s
->size
;
16470 if (tls_type
& GOT_TLS_IE
)
16471 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16476 dyn
= htab
->root
.dynamic_sections_created
;
16479 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, bfd_link_pic (info
), h
)
16480 && (!bfd_link_pic (info
)
16481 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16484 if (tls_type
!= GOT_NORMAL
16485 && (bfd_link_dll (info
) || indx
!= 0)
16486 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16487 || h
->root
.type
!= bfd_link_hash_undefweak
))
16489 if (tls_type
& GOT_TLS_IE
)
16490 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16492 if (tls_type
& GOT_TLS_GD
)
16493 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16495 if (tls_type
& GOT_TLS_GDESC
)
16497 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16498 /* GDESC needs a trampoline to jump to. */
16499 htab
->tls_trampoline
= -1;
16502 /* Only GD needs it. GDESC just emits one relocation per
16504 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16505 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16507 else if (((indx
!= -1) || htab
->fdpic_p
)
16508 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16510 if (htab
->root
.dynamic_sections_created
)
16511 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16512 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16514 else if (h
->type
== STT_GNU_IFUNC
16515 && eh
->plt
.noncall_refcount
== 0)
16516 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16517 they all resolve dynamically instead. Reserve room for the
16518 GOT entry's R_ARM_IRELATIVE relocation. */
16519 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16520 else if (bfd_link_pic (info
)
16521 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16522 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16523 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16524 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16525 /* Reserve room for rofixup for FDPIC executable. */
16526 /* TLS relocs do not need space since they are completely
16528 htab
->srofixup
->size
+= 4;
16531 h
->got
.offset
= (bfd_vma
) -1;
16533 /* FDPIC support. */
16534 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16536 /* Symbol musn't be exported. */
16537 if (h
->dynindx
!= -1)
16540 /* We only allocate one function descriptor with its associated
16542 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16544 asection
*s
= htab
->root
.sgot
;
16546 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16548 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16549 if (bfd_link_pic (info
))
16550 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16552 htab
->srofixup
->size
+= 8;
16556 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16558 asection
*s
= htab
->root
.sgot
;
16560 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16561 && !h
->forced_local
)
16562 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16565 if (h
->dynindx
== -1)
16567 /* We only allocate one function descriptor with its
16568 associated relocation. */
16569 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16572 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16574 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16576 if (bfd_link_pic (info
))
16577 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16579 htab
->srofixup
->size
+= 8;
16583 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16584 R_ARM_RELATIVE/rofixup relocation on it. */
16585 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16587 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16588 htab
->srofixup
->size
+= 4;
16590 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16593 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16595 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16596 && !h
->forced_local
)
16597 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16600 if (h
->dynindx
== -1)
16602 /* We only allocate one function descriptor with its
16603 associated relocation. */
16604 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16606 asection
*s
= htab
->root
.sgot
;
16608 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16610 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16612 if (bfd_link_pic (info
))
16613 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16615 htab
->srofixup
->size
+= 8;
16618 if (h
->dynindx
== -1 && !bfd_link_pic (info
))
16620 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16621 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16625 /* Will need one dynamic reloc per reference. will be either
16626 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16627 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16628 eh
->fdpic_cnts
.funcdesc_cnt
);
16632 /* Allocate stubs for exported Thumb functions on v4t. */
16633 if (!htab
->use_blx
&& h
->dynindx
!= -1
16635 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16636 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16638 struct elf_link_hash_entry
* th
;
16639 struct bfd_link_hash_entry
* bh
;
16640 struct elf_link_hash_entry
* myh
;
16644 /* Create a new symbol to regist the real location of the function. */
16645 s
= h
->root
.u
.def
.section
;
16646 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16647 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16648 name
, BSF_GLOBAL
, s
,
16649 h
->root
.u
.def
.value
,
16650 NULL
, true, false, &bh
);
16652 myh
= (struct elf_link_hash_entry
*) bh
;
16653 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16654 myh
->forced_local
= 1;
16655 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16656 eh
->export_glue
= myh
;
16657 th
= record_arm_to_thumb_glue (info
, h
);
16658 /* Point the symbol at the stub. */
16659 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16660 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16661 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16662 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16665 if (h
->dyn_relocs
== NULL
)
16668 /* In the shared -Bsymbolic case, discard space allocated for
16669 dynamic pc-relative relocs against symbols which turn out to be
16670 defined in regular objects. For the normal shared case, discard
16671 space for pc-relative relocs that have become local due to symbol
16672 visibility changes. */
16674 if (bfd_link_pic (info
)
16677 /* Relocs that use pc_count are PC-relative forms, which will appear
16678 on something like ".long foo - ." or "movw REG, foo - .". We want
16679 calls to protected symbols to resolve directly to the function
16680 rather than going via the plt. If people want function pointer
16681 comparisons to work as expected then they should avoid writing
16682 assembly like ".long foo - .". */
16683 if (SYMBOL_CALLS_LOCAL (info
, h
))
16685 struct elf_dyn_relocs
**pp
;
16687 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16689 p
->count
-= p
->pc_count
;
16698 if (htab
->root
.target_os
== is_vxworks
)
16700 struct elf_dyn_relocs
**pp
;
16702 for (pp
= &h
->dyn_relocs
; (p
= *pp
) != NULL
; )
16704 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16711 /* Also discard relocs on undefined weak syms with non-default
16713 if (h
->dyn_relocs
!= NULL
16714 && h
->root
.type
== bfd_link_hash_undefweak
)
16716 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16717 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16718 h
->dyn_relocs
= NULL
;
16720 /* Make sure undefined weak symbols are output as a dynamic
16722 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16723 && !h
->forced_local
)
16725 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16732 /* For the non-shared case, discard space for relocs against
16733 symbols which turn out to need copy relocs or are not
16736 if (!h
->non_got_ref
16737 && ((h
->def_dynamic
16738 && !h
->def_regular
)
16739 || (htab
->root
.dynamic_sections_created
16740 && (h
->root
.type
== bfd_link_hash_undefweak
16741 || h
->root
.type
== bfd_link_hash_undefined
))))
16743 /* Make sure this symbol is output as a dynamic symbol.
16744 Undefined weak syms won't yet be marked as dynamic. */
16745 if (h
->dynindx
== -1 && !h
->forced_local
16746 && h
->root
.type
== bfd_link_hash_undefweak
)
16748 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16752 /* If that succeeded, we know we'll be keeping all the
16754 if (h
->dynindx
!= -1)
16758 h
->dyn_relocs
= NULL
;
16763 /* Finally, allocate space. */
16764 for (p
= h
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16766 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16768 if (h
->type
== STT_GNU_IFUNC
16769 && eh
->plt
.noncall_refcount
== 0
16770 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16771 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16772 else if (h
->dynindx
!= -1
16773 && (!bfd_link_pic (info
) || !info
->symbolic
|| !h
->def_regular
))
16774 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16775 else if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16776 htab
->srofixup
->size
+= 4 * p
->count
;
16778 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16785 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16788 struct elf32_arm_link_hash_table
*globals
;
16790 globals
= elf32_arm_hash_table (info
);
16791 if (globals
== NULL
)
16794 globals
->byteswap_code
= byteswap_code
;
16797 /* Set the sizes of the dynamic sections. */
16800 elf32_arm_late_size_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16801 struct bfd_link_info
* info
)
16807 struct elf32_arm_link_hash_table
*htab
;
16809 htab
= elf32_arm_hash_table (info
);
16813 dynobj
= elf_hash_table (info
)->dynobj
;
16814 if (dynobj
== NULL
)
16817 check_use_blx (htab
);
16819 if (elf_hash_table (info
)->dynamic_sections_created
)
16821 /* Set the contents of the .interp section to the interpreter. */
16822 if (bfd_link_executable (info
) && !info
->nointerp
)
16824 s
= bfd_get_linker_section (dynobj
, ".interp");
16825 BFD_ASSERT (s
!= NULL
);
16826 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16827 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16831 /* Set up .got offsets for local syms, and space for local dynamic
16833 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16835 bfd_signed_vma
*local_got
;
16836 bfd_signed_vma
*end_local_got
;
16837 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16838 char *local_tls_type
;
16839 bfd_vma
*local_tlsdesc_gotent
;
16840 bfd_size_type locsymcount
;
16841 Elf_Internal_Shdr
*symtab_hdr
;
16843 unsigned int symndx
;
16844 struct fdpic_local
*local_fdpic_cnts
;
16846 if (! is_arm_elf (ibfd
))
16849 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16851 struct elf_dyn_relocs
*p
;
16853 for (p
= (struct elf_dyn_relocs
*)
16854 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16856 if (!bfd_is_abs_section (p
->sec
)
16857 && bfd_is_abs_section (p
->sec
->output_section
))
16859 /* Input section has been discarded, either because
16860 it is a copy of a linkonce section or due to
16861 linker script /DISCARD/, so we'll be discarding
16864 else if (htab
->root
.target_os
== is_vxworks
16865 && strcmp (p
->sec
->output_section
->name
,
16868 /* Relocations in vxworks .tls_vars sections are
16869 handled specially by the loader. */
16871 else if (p
->count
!= 0)
16873 srel
= elf_section_data (p
->sec
)->sreloc
;
16874 if (htab
->fdpic_p
&& !bfd_link_pic (info
))
16875 htab
->srofixup
->size
+= 4 * p
->count
;
16877 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16878 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16879 info
->flags
|= DF_TEXTREL
;
16884 local_got
= elf_local_got_refcounts (ibfd
);
16885 if (local_got
== NULL
)
16888 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16889 locsymcount
= symtab_hdr
->sh_info
;
16890 end_local_got
= local_got
+ locsymcount
;
16891 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16892 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16893 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16894 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16896 s
= htab
->root
.sgot
;
16897 srel
= htab
->root
.srelgot
;
16898 for (; local_got
< end_local_got
;
16899 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16900 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16902 if (symndx
>= elf32_arm_num_entries (ibfd
))
16905 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16906 local_iplt
= *local_iplt_ptr
;
16908 /* FDPIC support. */
16909 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16911 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16913 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16916 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16917 if (bfd_link_pic (info
))
16918 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16920 htab
->srofixup
->size
+= 8;
16924 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16926 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16928 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16931 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16932 if (bfd_link_pic (info
))
16933 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16935 htab
->srofixup
->size
+= 8;
16938 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16939 if (bfd_link_pic (info
))
16940 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16942 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16945 if (local_iplt
!= NULL
)
16947 struct elf_dyn_relocs
*p
;
16949 if (local_iplt
->root
.refcount
> 0)
16951 elf32_arm_allocate_plt_entry (info
, true,
16954 if (local_iplt
->arm
.noncall_refcount
== 0)
16955 /* All references to the PLT are calls, so all
16956 non-call references can resolve directly to the
16957 run-time target. This means that the .got entry
16958 would be the same as the .igot.plt entry, so there's
16959 no point creating both. */
16964 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16965 local_iplt
->root
.offset
= (bfd_vma
) -1;
16968 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16972 psrel
= elf_section_data (p
->sec
)->sreloc
;
16973 if (local_iplt
->arm
.noncall_refcount
== 0)
16974 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16976 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16979 if (*local_got
> 0)
16981 Elf_Internal_Sym
*isym
;
16983 *local_got
= s
->size
;
16984 if (*local_tls_type
& GOT_TLS_GD
)
16985 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16987 if (*local_tls_type
& GOT_TLS_GDESC
)
16989 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16990 - elf32_arm_compute_jump_table_size (htab
);
16991 htab
->root
.sgotplt
->size
+= 8;
16992 *local_got
= (bfd_vma
) -2;
16993 /* plt.got_offset needs to know there's a TLS_DESC
16994 reloc in the middle of .got.plt. */
16995 htab
->num_tls_desc
++;
16997 if (*local_tls_type
& GOT_TLS_IE
)
17000 if (*local_tls_type
& GOT_NORMAL
)
17002 /* If the symbol is both GD and GDESC, *local_got
17003 may have been overwritten. */
17004 *local_got
= s
->size
;
17008 isym
= bfd_sym_from_r_symndx (&htab
->root
.sym_cache
, ibfd
,
17013 /* If all references to an STT_GNU_IFUNC PLT are calls,
17014 then all non-call references, including this GOT entry,
17015 resolve directly to the run-time target. */
17016 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
17017 && (local_iplt
== NULL
17018 || local_iplt
->arm
.noncall_refcount
== 0))
17019 elf32_arm_allocate_irelocs (info
, srel
, 1);
17020 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
17022 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
17023 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
17024 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
17025 htab
->srofixup
->size
+= 4;
17027 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
17028 && *local_tls_type
& GOT_TLS_GDESC
)
17030 elf32_arm_allocate_dynrelocs (info
,
17031 htab
->root
.srelplt
, 1);
17032 htab
->tls_trampoline
= -1;
17037 *local_got
= (bfd_vma
) -1;
17041 if (htab
->tls_ldm_got
.refcount
> 0)
17043 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17044 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17045 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
17046 htab
->root
.sgot
->size
+= 8;
17047 if (bfd_link_pic (info
))
17048 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
17051 htab
->tls_ldm_got
.offset
= -1;
17053 /* At the very end of the .rofixup section is a pointer to the GOT,
17054 reserve space for it. */
17055 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17056 htab
->srofixup
->size
+= 4;
17058 /* Allocate global sym .plt and .got entries, and space for global
17059 sym dynamic relocs. */
17060 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
17062 /* Here we rummage through the found bfds to collect glue information. */
17063 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
17065 if (! is_arm_elf (ibfd
))
17068 /* Initialise mapping tables for code/data. */
17069 bfd_elf32_arm_init_maps (ibfd
);
17071 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
17072 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
17073 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
17074 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
17077 /* Allocate space for the glue sections now that we've sized them. */
17078 bfd_elf32_arm_allocate_interworking_sections (info
);
17080 /* For every jump slot reserved in the sgotplt, reloc_count is
17081 incremented. However, when we reserve space for TLS descriptors,
17082 it's not incremented, so in order to compute the space reserved
17083 for them, it suffices to multiply the reloc count by the jump
17085 if (htab
->root
.srelplt
)
17086 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size (htab
);
17088 if (htab
->tls_trampoline
)
17090 if (htab
->root
.splt
->size
== 0)
17091 htab
->root
.splt
->size
+= htab
->plt_header_size
;
17093 htab
->tls_trampoline
= htab
->root
.splt
->size
;
17094 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
17096 /* If we're not using lazy TLS relocations, don't generate the
17097 PLT and GOT entries they require. */
17098 if ((info
->flags
& DF_BIND_NOW
))
17099 htab
->root
.tlsdesc_plt
= 0;
17102 htab
->root
.tlsdesc_got
= htab
->root
.sgot
->size
;
17103 htab
->root
.sgot
->size
+= 4;
17105 htab
->root
.tlsdesc_plt
= htab
->root
.splt
->size
;
17106 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
17110 /* The check_relocs and adjust_dynamic_symbol entry points have
17111 determined the sizes of the various dynamic sections. Allocate
17112 memory for them. */
17114 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
17118 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
17121 /* It's OK to base decisions on the section name, because none
17122 of the dynobj section names depend upon the input files. */
17123 name
= bfd_section_name (s
);
17125 if (s
== htab
->root
.splt
)
17127 /* Remember whether there is a PLT. */
17130 else if (startswith (name
, ".rel"))
17134 /* Remember whether there are any reloc sections other
17135 than .rel(a).plt and .rela.plt.unloaded. */
17136 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
17139 /* We use the reloc_count field as a counter if we need
17140 to copy relocs into the output file. */
17141 s
->reloc_count
= 0;
17144 else if (s
!= htab
->root
.sgot
17145 && s
!= htab
->root
.sgotplt
17146 && s
!= htab
->root
.iplt
17147 && s
!= htab
->root
.igotplt
17148 && s
!= htab
->root
.sdynbss
17149 && s
!= htab
->root
.sdynrelro
17150 && s
!= htab
->srofixup
)
17152 /* It's not one of our sections, so don't allocate space. */
17158 /* If we don't need this section, strip it from the
17159 output file. This is mostly to handle .rel(a).bss and
17160 .rel(a).plt. We must create both sections in
17161 create_dynamic_sections, because they must be created
17162 before the linker maps input sections to output
17163 sections. The linker does that before
17164 adjust_dynamic_symbol is called, and it is that
17165 function which decides whether anything needs to go
17166 into these sections. */
17167 s
->flags
|= SEC_EXCLUDE
;
17171 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
17174 /* Allocate memory for the section contents. */
17175 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
17176 if (s
->contents
== NULL
)
17180 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd
, info
,
17184 /* Size sections even though they're not dynamic. We use it to setup
17185 _TLS_MODULE_BASE_, if needed. */
17188 elf32_arm_early_size_sections (bfd
*output_bfd
, struct bfd_link_info
*info
)
17191 struct elf32_arm_link_hash_table
*htab
;
17193 htab
= elf32_arm_hash_table (info
);
17195 if (bfd_link_relocatable (info
))
17198 tls_sec
= elf_hash_table (info
)->tls_sec
;
17202 struct elf_link_hash_entry
*tlsbase
;
17204 tlsbase
= elf_link_hash_lookup
17205 (elf_hash_table (info
), "_TLS_MODULE_BASE_", true, true, false);
17209 struct bfd_link_hash_entry
*bh
= NULL
;
17210 const struct elf_backend_data
*bed
17211 = get_elf_backend_data (output_bfd
);
17213 if (!(_bfd_generic_link_add_one_symbol
17214 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17215 tls_sec
, 0, NULL
, false,
17216 bed
->collect
, &bh
)))
17219 tlsbase
->type
= STT_TLS
;
17220 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17221 tlsbase
->def_regular
= 1;
17222 tlsbase
->other
= STV_HIDDEN
;
17223 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, true);
17227 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17228 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17229 "__stacksize", DEFAULT_STACK_SIZE
))
17235 /* Finish up dynamic symbol handling. We set the contents of various
17236 dynamic sections here. */
17239 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17240 struct bfd_link_info
* info
,
17241 struct elf_link_hash_entry
* h
,
17242 Elf_Internal_Sym
* sym
)
17244 struct elf32_arm_link_hash_table
*htab
;
17245 struct elf32_arm_link_hash_entry
*eh
;
17247 htab
= elf32_arm_hash_table (info
);
17249 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17251 if (h
->plt
.offset
!= (bfd_vma
) -1)
17255 BFD_ASSERT (h
->dynindx
!= -1);
17256 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17261 if (!h
->def_regular
)
17263 /* Mark the symbol as undefined, rather than as defined in
17264 the .plt section. */
17265 sym
->st_shndx
= SHN_UNDEF
;
17266 /* If the symbol is weak we need to clear the value.
17267 Otherwise, the PLT entry would provide a definition for
17268 the symbol even if the symbol wasn't defined anywhere,
17269 and so the symbol would never be NULL. Leave the value if
17270 there were any relocations where pointer equality matters
17271 (this is a clue for the dynamic linker, to make function
17272 pointer comparisons work between an application and shared
17274 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17277 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17279 /* At least one non-call relocation references this .iplt entry,
17280 so the .iplt entry is the function's canonical address. */
17281 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17282 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17283 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17284 (output_bfd
, htab
->root
.iplt
->output_section
));
17285 sym
->st_value
= (h
->plt
.offset
17286 + htab
->root
.iplt
->output_section
->vma
17287 + htab
->root
.iplt
->output_offset
);
17294 Elf_Internal_Rela rel
;
17296 /* This symbol needs a copy reloc. Set it up. */
17297 BFD_ASSERT (h
->dynindx
!= -1
17298 && (h
->root
.type
== bfd_link_hash_defined
17299 || h
->root
.type
== bfd_link_hash_defweak
));
17302 rel
.r_offset
= (h
->root
.u
.def
.value
17303 + h
->root
.u
.def
.section
->output_section
->vma
17304 + h
->root
.u
.def
.section
->output_offset
);
17305 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17306 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17307 s
= htab
->root
.sreldynrelro
;
17309 s
= htab
->root
.srelbss
;
17310 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17313 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17314 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17315 it is relative to the ".got" section. */
17316 if (h
== htab
->root
.hdynamic
17318 && htab
->root
.target_os
!= is_vxworks
17319 && h
== htab
->root
.hgot
))
17320 sym
->st_shndx
= SHN_ABS
;
17326 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17328 const unsigned long *template, unsigned count
)
17332 for (ix
= 0; ix
!= count
; ix
++)
17334 unsigned long insn
= template[ix
];
17336 /* Emit mov pc,rx if bx is not permitted. */
17337 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17338 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17339 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17343 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17344 other variants, NaCl needs this entry in a static executable's
17345 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17346 zero. For .iplt really only the last bundle is useful, and .iplt
17347 could have a shorter first entry, with each individual PLT entry's
17348 relative branch calculated differently so it targets the last
17349 bundle instead of the instruction before it (labelled .Lplt_tail
17350 above). But it's simpler to keep the size and layout of PLT0
17351 consistent with the dynamic case, at the cost of some dead code at
17352 the start of .iplt and the one dead store to the stack at the start
17355 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17356 asection
*plt
, bfd_vma got_displacement
)
17360 put_arm_insn (htab
, output_bfd
,
17361 elf32_arm_nacl_plt0_entry
[0]
17362 | arm_movw_immediate (got_displacement
),
17363 plt
->contents
+ 0);
17364 put_arm_insn (htab
, output_bfd
,
17365 elf32_arm_nacl_plt0_entry
[1]
17366 | arm_movt_immediate (got_displacement
),
17367 plt
->contents
+ 4);
17369 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17370 put_arm_insn (htab
, output_bfd
,
17371 elf32_arm_nacl_plt0_entry
[i
],
17372 plt
->contents
+ (i
* 4));
17375 /* Finish up the dynamic sections. */
17378 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17383 struct elf32_arm_link_hash_table
*htab
;
17385 htab
= elf32_arm_hash_table (info
);
17389 dynobj
= elf_hash_table (info
)->dynobj
;
17391 sgot
= htab
->root
.sgotplt
;
17392 /* A broken linker script might have discarded the dynamic sections.
17393 Catch this here so that we do not seg-fault later on. */
17394 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17396 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17398 if (elf_hash_table (info
)->dynamic_sections_created
)
17401 Elf32_External_Dyn
*dyncon
, *dynconend
;
17403 splt
= htab
->root
.splt
;
17404 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17405 BFD_ASSERT (sgot
!= NULL
);
17407 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17408 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17410 for (; dyncon
< dynconend
; dyncon
++)
17412 Elf_Internal_Dyn dyn
;
17416 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17421 if (htab
->root
.target_os
== is_vxworks
17422 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17423 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17438 name
= RELOC_SECTION (htab
, ".plt");
17440 s
= bfd_get_linker_section (dynobj
, name
);
17444 (_("could not find section %s"), name
);
17445 bfd_set_error (bfd_error_invalid_operation
);
17448 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17449 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17453 s
= htab
->root
.srelplt
;
17454 BFD_ASSERT (s
!= NULL
);
17455 dyn
.d_un
.d_val
= s
->size
;
17456 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17465 case DT_TLSDESC_PLT
:
17466 s
= htab
->root
.splt
;
17467 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17468 + htab
->root
.tlsdesc_plt
);
17469 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17472 case DT_TLSDESC_GOT
:
17473 s
= htab
->root
.sgot
;
17474 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17475 + htab
->root
.tlsdesc_got
);
17476 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17479 /* Set the bottom bit of DT_INIT/FINI if the
17480 corresponding function is Thumb. */
17482 name
= info
->init_function
;
17485 name
= info
->fini_function
;
17487 /* If it wasn't set by elf_bfd_final_link
17488 then there is nothing to adjust. */
17489 if (dyn
.d_un
.d_val
!= 0)
17491 struct elf_link_hash_entry
* eh
;
17493 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17494 false, false, true);
17496 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17497 == ST_BRANCH_TO_THUMB
)
17499 dyn
.d_un
.d_val
|= 1;
17500 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17507 /* Fill in the first entry in the procedure linkage table. */
17508 if (splt
->size
> 0 && htab
->plt_header_size
)
17510 const bfd_vma
*plt0_entry
;
17511 bfd_vma got_address
, plt_address
, got_displacement
;
17513 /* Calculate the addresses of the GOT and PLT. */
17514 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17515 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17517 if (htab
->root
.target_os
== is_vxworks
)
17519 /* The VxWorks GOT is relocated by the dynamic linker.
17520 Therefore, we must emit relocations rather than simply
17521 computing the values now. */
17522 Elf_Internal_Rela rel
;
17524 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17525 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17526 splt
->contents
+ 0);
17527 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17528 splt
->contents
+ 4);
17529 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17530 splt
->contents
+ 8);
17531 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17533 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17534 rel
.r_offset
= plt_address
+ 12;
17535 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17537 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17538 htab
->srelplt2
->contents
);
17540 else if (htab
->root
.target_os
== is_nacl
)
17541 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17542 got_address
+ 8 - (plt_address
+ 16));
17543 else if (using_thumb_only (htab
))
17545 got_displacement
= got_address
- (plt_address
+ 12);
17547 plt0_entry
= elf32_thumb2_plt0_entry
;
17548 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17549 splt
->contents
+ 0);
17550 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17551 splt
->contents
+ 4);
17552 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17553 splt
->contents
+ 8);
17555 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17559 got_displacement
= got_address
- (plt_address
+ 16);
17561 plt0_entry
= elf32_arm_plt0_entry
;
17562 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17563 splt
->contents
+ 0);
17564 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17565 splt
->contents
+ 4);
17566 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17567 splt
->contents
+ 8);
17568 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17569 splt
->contents
+ 12);
17571 #ifdef FOUR_WORD_PLT
17572 /* The displacement value goes in the otherwise-unused
17573 last word of the second entry. */
17574 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17576 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17581 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17582 really seem like the right value. */
17583 if (splt
->output_section
->owner
== output_bfd
)
17584 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17586 if (htab
->root
.tlsdesc_plt
)
17588 bfd_vma got_address
17589 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17590 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17591 + htab
->root
.sgot
->output_offset
);
17592 bfd_vma plt_address
17593 = splt
->output_section
->vma
+ splt
->output_offset
;
17595 arm_put_trampoline (htab
, output_bfd
,
17596 splt
->contents
+ htab
->root
.tlsdesc_plt
,
17597 dl_tlsdesc_lazy_trampoline
, 6);
17599 bfd_put_32 (output_bfd
,
17600 gotplt_address
+ htab
->root
.tlsdesc_got
17601 - (plt_address
+ htab
->root
.tlsdesc_plt
)
17602 - dl_tlsdesc_lazy_trampoline
[6],
17603 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24);
17604 bfd_put_32 (output_bfd
,
17605 got_address
- (plt_address
+ htab
->root
.tlsdesc_plt
)
17606 - dl_tlsdesc_lazy_trampoline
[7],
17607 splt
->contents
+ htab
->root
.tlsdesc_plt
+ 24 + 4);
17610 if (htab
->tls_trampoline
)
17612 arm_put_trampoline (htab
, output_bfd
,
17613 splt
->contents
+ htab
->tls_trampoline
,
17614 tls_trampoline
, 3);
17615 #ifdef FOUR_WORD_PLT
17616 bfd_put_32 (output_bfd
, 0x00000000,
17617 splt
->contents
+ htab
->tls_trampoline
+ 12);
17621 if (htab
->root
.target_os
== is_vxworks
17622 && !bfd_link_pic (info
)
17623 && htab
->root
.splt
->size
> 0)
17625 /* Correct the .rel(a).plt.unloaded relocations. They will have
17626 incorrect symbol indexes. */
17630 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17631 / htab
->plt_entry_size
);
17632 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17634 for (; num_plts
; num_plts
--)
17636 Elf_Internal_Rela rel
;
17638 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17639 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17640 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17641 p
+= RELOC_SIZE (htab
);
17643 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17644 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17645 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17646 p
+= RELOC_SIZE (htab
);
17651 if (htab
->root
.target_os
== is_nacl
17652 && htab
->root
.iplt
!= NULL
17653 && htab
->root
.iplt
->size
> 0)
17654 /* NaCl uses a special first entry in .iplt too. */
17655 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17657 /* Fill in the first three entries in the global offset table. */
17660 if (sgot
->size
> 0)
17663 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17665 bfd_put_32 (output_bfd
,
17666 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17668 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17669 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17672 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17675 /* At the very end of the .rofixup section is a pointer to the GOT. */
17676 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17678 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17680 bfd_vma got_value
= hgot
->root
.u
.def
.value
17681 + hgot
->root
.u
.def
.section
->output_section
->vma
17682 + hgot
->root
.u
.def
.section
->output_offset
;
17684 arm_elf_add_rofixup (output_bfd
, htab
->srofixup
, got_value
);
17686 /* Make sure we allocated and generated the same number of fixups. */
17687 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17694 elf32_arm_init_file_header (bfd
*abfd
, struct bfd_link_info
*link_info
)
17696 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17697 struct elf32_arm_link_hash_table
*globals
;
17698 struct elf_segment_map
*m
;
17700 if (!_bfd_elf_init_file_header (abfd
, link_info
))
17703 i_ehdrp
= elf_elfheader (abfd
);
17705 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17706 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17707 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17711 globals
= elf32_arm_hash_table (link_info
);
17712 if (globals
!= NULL
&& globals
->byteswap_code
)
17713 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17715 if (globals
->fdpic_p
)
17716 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17719 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17720 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17722 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17723 if (abi
== AEABI_VFP_args_vfp
)
17724 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17726 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17729 /* Scan segment to set p_flags attribute if it contains only sections with
17730 SHF_ARM_PURECODE flag. */
17731 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17737 for (j
= 0; j
< m
->count
; j
++)
17739 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17745 m
->p_flags_valid
= 1;
17751 static enum elf_reloc_type_class
17752 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17753 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17754 const Elf_Internal_Rela
*rela
)
17756 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
17758 if (htab
->root
.dynsym
!= NULL
17759 && htab
->root
.dynsym
->contents
!= NULL
)
17761 /* Check relocation against STT_GNU_IFUNC symbol if there are
17762 dynamic symbols. */
17763 bfd
*abfd
= info
->output_bfd
;
17764 const struct elf_backend_data
*bed
= get_elf_backend_data (abfd
);
17765 unsigned long r_symndx
= ELF32_R_SYM (rela
->r_info
);
17766 if (r_symndx
!= STN_UNDEF
)
17768 Elf_Internal_Sym sym
;
17769 if (!bed
->s
->swap_symbol_in (abfd
,
17770 (htab
->root
.dynsym
->contents
17771 + r_symndx
* bed
->s
->sizeof_sym
),
17774 /* xgettext:c-format */
17775 _bfd_error_handler (_("%pB symbol number %lu references"
17776 " nonexistent SHT_SYMTAB_SHNDX section"),
17778 /* Ideally an error class should be returned here. */
17780 else if (ELF_ST_TYPE (sym
.st_info
) == STT_GNU_IFUNC
)
17781 return reloc_class_ifunc
;
17785 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17787 case R_ARM_RELATIVE
:
17788 return reloc_class_relative
;
17789 case R_ARM_JUMP_SLOT
:
17790 return reloc_class_plt
;
17792 return reloc_class_copy
;
17793 case R_ARM_IRELATIVE
:
17794 return reloc_class_ifunc
;
17796 return reloc_class_normal
;
17801 arm_final_write_processing (bfd
*abfd
)
17803 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17807 elf32_arm_final_write_processing (bfd
*abfd
)
17809 arm_final_write_processing (abfd
);
17810 return _bfd_elf_final_write_processing (abfd
);
17813 /* Return TRUE if this is an unwinding table entry. */
17816 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17818 return (startswith (name
, ELF_STRING_ARM_unwind
)
17819 || startswith (name
, ELF_STRING_ARM_unwind_once
));
17823 /* Set the type and flags for an ARM section. We do this by
17824 the section name, which is a hack, but ought to work. */
17827 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17831 name
= bfd_section_name (sec
);
17833 if (is_arm_elf_unwind_section_name (abfd
, name
))
17835 hdr
->sh_type
= SHT_ARM_EXIDX
;
17836 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17839 if (sec
->flags
& SEC_ELF_PURECODE
)
17840 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17845 /* Handle an ARM specific section when reading an object file. This is
17846 called when bfd_section_from_shdr finds a section with an unknown
17850 elf32_arm_section_from_shdr (bfd
*abfd
,
17851 Elf_Internal_Shdr
* hdr
,
17855 /* There ought to be a place to keep ELF backend specific flags, but
17856 at the moment there isn't one. We just keep track of the
17857 sections by their name, instead. Fortunately, the ABI gives
17858 names for all the ARM specific sections, so we will probably get
17860 switch (hdr
->sh_type
)
17862 case SHT_ARM_EXIDX
:
17863 case SHT_ARM_PREEMPTMAP
:
17864 case SHT_ARM_ATTRIBUTES
:
17871 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17877 static _arm_elf_section_data
*
17878 get_arm_elf_section_data (asection
* sec
)
17880 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17881 return elf32_arm_section_data (sec
);
17889 struct bfd_link_info
*info
;
17892 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17893 asection
*, struct elf_link_hash_entry
*);
17894 } output_arch_syminfo
;
17896 enum map_symbol_type
17904 /* Output a single mapping symbol. */
17907 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17908 enum map_symbol_type type
,
17911 static const char *names
[3] = {"$a", "$t", "$d"};
17912 Elf_Internal_Sym sym
;
17914 sym
.st_value
= osi
->sec
->output_section
->vma
17915 + osi
->sec
->output_offset
17919 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17920 sym
.st_shndx
= osi
->sec_shndx
;
17921 sym
.st_target_internal
= ST_BRANCH_TO_ARM
;
17922 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17923 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17926 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17927 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17930 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17931 bool is_iplt_entry_p
,
17932 union gotplt_union
*root_plt
,
17933 struct arm_plt_info
*arm_plt
)
17935 struct elf32_arm_link_hash_table
*htab
;
17936 bfd_vma addr
, plt_header_size
;
17938 if (root_plt
->offset
== (bfd_vma
) -1)
17941 htab
= elf32_arm_hash_table (osi
->info
);
17945 if (is_iplt_entry_p
)
17947 osi
->sec
= htab
->root
.iplt
;
17948 plt_header_size
= 0;
17952 osi
->sec
= htab
->root
.splt
;
17953 plt_header_size
= htab
->plt_header_size
;
17955 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
17956 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
17958 addr
= root_plt
->offset
& -2;
17959 if (htab
->root
.target_os
== is_vxworks
)
17961 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17963 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
17965 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
17967 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
17970 else if (htab
->root
.target_os
== is_nacl
)
17972 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17975 else if (htab
->fdpic_p
)
17977 enum map_symbol_type type
= using_thumb_only (htab
)
17981 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
17982 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17984 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
17986 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
17988 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry
))
17989 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
17992 else if (using_thumb_only (htab
))
17994 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
18001 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
18004 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
18007 #ifdef FOUR_WORD_PLT
18008 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18010 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
18013 /* A three-word PLT with no Thumb thunk contains only Arm code,
18014 so only need to output a mapping symbol for the first PLT entry and
18015 entries with thumb thunks. */
18016 if (thumb_stub_p
|| addr
== plt_header_size
)
18018 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18027 /* Output mapping symbols for PLT entries associated with H. */
18030 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
18032 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
18033 struct elf32_arm_link_hash_entry
*eh
;
18035 if (h
->root
.type
== bfd_link_hash_indirect
)
18038 if (h
->root
.type
== bfd_link_hash_warning
)
18039 /* When warning symbols are created, they **replace** the "real"
18040 entry in the hash table, thus we never get to see the real
18041 symbol in a hash traversal. So look at it now. */
18042 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
18044 eh
= (struct elf32_arm_link_hash_entry
*) h
;
18045 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
18046 &h
->plt
, &eh
->plt
);
18049 /* Bind a veneered symbol to its veneer identified by its hash entry
18050 STUB_ENTRY. The veneered location thus loose its symbol. */
18053 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
18055 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
18058 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
18059 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
18060 hash
->root
.size
= stub_entry
->stub_size
;
18063 /* Output a single local symbol for a generated stub. */
18066 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
18067 bfd_vma offset
, bfd_vma size
)
18069 Elf_Internal_Sym sym
;
18071 sym
.st_value
= osi
->sec
->output_section
->vma
18072 + osi
->sec
->output_offset
18074 sym
.st_size
= size
;
18076 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
18077 sym
.st_shndx
= osi
->sec_shndx
;
18078 sym
.st_target_internal
= ST_BRANCH_TO_ARM
;
18079 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
18083 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
18086 struct elf32_arm_stub_hash_entry
*stub_entry
;
18087 asection
*stub_sec
;
18090 output_arch_syminfo
*osi
;
18091 const insn_sequence
*template_sequence
;
18092 enum stub_insn_type prev_type
;
18095 enum map_symbol_type sym_type
;
18097 /* Massage our args to the form they really have. */
18098 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18099 osi
= (output_arch_syminfo
*) in_arg
;
18101 stub_sec
= stub_entry
->stub_sec
;
18103 /* Ensure this stub is attached to the current section being
18105 if (stub_sec
!= osi
->sec
)
18108 addr
= (bfd_vma
) stub_entry
->stub_offset
;
18109 template_sequence
= stub_entry
->stub_template
;
18111 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
18112 arm_stub_claim_sym (stub_entry
);
18115 stub_name
= stub_entry
->output_name
;
18116 switch (template_sequence
[0].type
)
18119 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
18120 stub_entry
->stub_size
))
18125 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
18126 stub_entry
->stub_size
))
18135 prev_type
= DATA_TYPE
;
18137 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
18139 switch (template_sequence
[i
].type
)
18142 sym_type
= ARM_MAP_ARM
;
18147 sym_type
= ARM_MAP_THUMB
;
18151 sym_type
= ARM_MAP_DATA
;
18159 if (template_sequence
[i
].type
!= prev_type
)
18161 prev_type
= template_sequence
[i
].type
;
18162 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18166 switch (template_sequence
[i
].type
)
18190 /* Output mapping symbols for linker generated sections,
18191 and for those data-only sections that do not have a
18195 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18196 struct bfd_link_info
*info
,
18198 int (*func
) (void *, const char *,
18199 Elf_Internal_Sym
*,
18201 struct elf_link_hash_entry
*))
18203 output_arch_syminfo osi
;
18204 struct elf32_arm_link_hash_table
*htab
;
18206 bfd_size_type size
;
18209 if (info
->strip
== strip_all
18210 && !info
->emitrelocations
18211 && !bfd_link_relocatable (info
))
18214 htab
= elf32_arm_hash_table (info
);
18218 check_use_blx (htab
);
18220 osi
.flaginfo
= flaginfo
;
18224 /* Add a $d mapping symbol to data-only sections that
18225 don't have any mapping symbol. This may result in (harmless) redundant
18226 mapping symbols. */
18227 for (input_bfd
= info
->input_bfds
;
18229 input_bfd
= input_bfd
->link
.next
)
18231 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18232 for (osi
.sec
= input_bfd
->sections
;
18234 osi
.sec
= osi
.sec
->next
)
18236 if (osi
.sec
->output_section
!= NULL
18237 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18239 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18240 == SEC_HAS_CONTENTS
18241 && get_arm_elf_section_data (osi
.sec
) != NULL
18242 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18243 && osi
.sec
->size
> 0
18244 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18246 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18247 (output_bfd
, osi
.sec
->output_section
);
18248 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18249 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18254 /* ARM->Thumb glue. */
18255 if (htab
->arm_glue_size
> 0)
18257 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18258 ARM2THUMB_GLUE_SECTION_NAME
);
18260 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18261 (output_bfd
, osi
.sec
->output_section
);
18262 if (bfd_link_pic (info
)
18263 || htab
->pic_veneer
)
18264 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18265 else if (htab
->use_blx
)
18266 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18268 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18270 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18272 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18273 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18277 /* Thumb->ARM glue. */
18278 if (htab
->thumb_glue_size
> 0)
18280 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18281 THUMB2ARM_GLUE_SECTION_NAME
);
18283 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18284 (output_bfd
, osi
.sec
->output_section
);
18285 size
= THUMB2ARM_GLUE_SIZE
;
18287 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18289 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18290 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18294 /* ARMv4 BX veneers. */
18295 if (htab
->bx_glue_size
> 0)
18297 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18298 ARM_BX_GLUE_SECTION_NAME
);
18300 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18301 (output_bfd
, osi
.sec
->output_section
);
18303 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18306 /* Long calls stubs. */
18307 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18309 asection
* stub_sec
;
18311 for (stub_sec
= htab
->stub_bfd
->sections
;
18313 stub_sec
= stub_sec
->next
)
18315 /* Ignore non-stub sections. */
18316 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18319 osi
.sec
= stub_sec
;
18321 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18322 (output_bfd
, osi
.sec
->output_section
);
18324 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18328 /* Finally, output mapping symbols for the PLT. */
18329 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18331 osi
.sec
= htab
->root
.splt
;
18332 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18333 (output_bfd
, osi
.sec
->output_section
));
18335 /* Output mapping symbols for the plt header. */
18336 if (htab
->root
.target_os
== is_vxworks
)
18338 /* VxWorks shared libraries have no PLT header. */
18339 if (!bfd_link_pic (info
))
18341 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18343 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18347 else if (htab
->root
.target_os
== is_nacl
)
18349 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18352 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18354 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18356 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18358 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18361 else if (!htab
->fdpic_p
)
18363 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18365 #ifndef FOUR_WORD_PLT
18366 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18371 if (htab
->root
.target_os
== is_nacl
18373 && htab
->root
.iplt
->size
> 0)
18375 /* NaCl uses a special first entry in .iplt too. */
18376 osi
.sec
= htab
->root
.iplt
;
18377 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18378 (output_bfd
, osi
.sec
->output_section
));
18379 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18382 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18383 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18385 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18386 for (input_bfd
= info
->input_bfds
;
18388 input_bfd
= input_bfd
->link
.next
)
18390 struct arm_local_iplt_info
**local_iplt
;
18391 unsigned int i
, num_syms
;
18393 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18394 if (local_iplt
!= NULL
)
18396 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18397 if (num_syms
> elf32_arm_num_entries (input_bfd
))
18399 _bfd_error_handler (_("\
18400 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18402 (unsigned long) elf32_arm_num_entries (input_bfd
),
18406 for (i
= 0; i
< num_syms
; i
++)
18407 if (local_iplt
[i
] != NULL
18408 && !elf32_arm_output_plt_map_1 (&osi
, true,
18409 &local_iplt
[i
]->root
,
18410 &local_iplt
[i
]->arm
))
18415 if (htab
->root
.tlsdesc_plt
!= 0)
18417 /* Mapping symbols for the lazy tls trampoline. */
18418 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
,
18419 htab
->root
.tlsdesc_plt
))
18422 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18423 htab
->root
.tlsdesc_plt
+ 24))
18426 if (htab
->tls_trampoline
!= 0)
18428 /* Mapping symbols for the tls trampoline. */
18429 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18431 #ifdef FOUR_WORD_PLT
18432 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18433 htab
->tls_trampoline
+ 12))
18441 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18442 the import library. All SYMCOUNT symbols of ABFD can be examined
18443 from their pointers in SYMS. Pointers of symbols to keep should be
18444 stored continuously at the beginning of that array.
18446 Returns the number of symbols to keep. */
18448 static unsigned int
18449 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18450 struct bfd_link_info
*info
,
18451 asymbol
**syms
, long symcount
)
18455 long src_count
, dst_count
= 0;
18456 struct elf32_arm_link_hash_table
*htab
;
18458 htab
= elf32_arm_hash_table (info
);
18459 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18463 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18464 BFD_ASSERT (cmse_name
);
18466 for (src_count
= 0; src_count
< symcount
; src_count
++)
18468 struct elf32_arm_link_hash_entry
*cmse_hash
;
18474 sym
= syms
[src_count
];
18475 flags
= sym
->flags
;
18476 name
= (char *) bfd_asymbol_name (sym
);
18478 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18480 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18483 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18484 if (namelen
> maxnamelen
)
18486 cmse_name
= (char *)
18487 bfd_realloc (cmse_name
, namelen
);
18488 maxnamelen
= namelen
;
18490 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18491 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18492 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, false, false, true);
18495 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18496 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18497 || cmse_hash
->root
.type
!= STT_FUNC
)
18500 syms
[dst_count
++] = sym
;
18504 syms
[dst_count
] = NULL
;
18509 /* Filter symbols of ABFD to include in the import library. All
18510 SYMCOUNT symbols of ABFD can be examined from their pointers in
18511 SYMS. Pointers of symbols to keep should be stored continuously at
18512 the beginning of that array.
18514 Returns the number of symbols to keep. */
18516 static unsigned int
18517 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18518 struct bfd_link_info
*info
,
18519 asymbol
**syms
, long symcount
)
18521 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18523 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18524 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18525 library to be a relocatable object file. */
18526 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18527 if (globals
->cmse_implib
)
18528 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18530 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18533 /* Allocate target specific section data. */
18536 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18538 if (!sec
->used_by_bfd
)
18540 _arm_elf_section_data
*sdata
;
18541 size_t amt
= sizeof (*sdata
);
18543 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
18546 sec
->used_by_bfd
= sdata
;
18549 return _bfd_elf_new_section_hook (abfd
, sec
);
18553 /* Used to order a list of mapping symbols by address. */
18556 elf32_arm_compare_mapping (const void * a
, const void * b
)
18558 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18559 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18561 if (amap
->vma
> bmap
->vma
)
18563 else if (amap
->vma
< bmap
->vma
)
18565 else if (amap
->type
> bmap
->type
)
18566 /* Ensure results do not depend on the host qsort for objects with
18567 multiple mapping symbols at the same address by sorting on type
18570 else if (amap
->type
< bmap
->type
)
18576 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18578 static unsigned long
18579 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18581 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18584 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18588 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18590 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18591 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18593 /* High bit of first word is supposed to be zero. */
18594 if ((first_word
& 0x80000000ul
) == 0)
18595 first_word
= offset_prel31 (first_word
, offset
);
18597 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18598 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18599 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18600 second_word
= offset_prel31 (second_word
, offset
);
18602 bfd_put_32 (output_bfd
, first_word
, to
);
18603 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18606 /* Data for make_branch_to_a8_stub(). */
18608 struct a8_branch_to_stub_data
18610 asection
*writing_section
;
18611 bfd_byte
*contents
;
18615 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18616 places for a particular section. */
18619 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18622 struct elf32_arm_stub_hash_entry
*stub_entry
;
18623 struct a8_branch_to_stub_data
*data
;
18624 bfd_byte
*contents
;
18625 unsigned long branch_insn
;
18626 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18627 bfd_signed_vma branch_offset
;
18631 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18632 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18634 if (stub_entry
->target_section
!= data
->writing_section
18635 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18638 contents
= data
->contents
;
18640 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18641 generated when both source and target are in the same section. */
18642 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18643 + stub_entry
->target_section
->output_offset
18644 + stub_entry
->source_value
;
18646 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18647 + stub_entry
->stub_sec
->output_offset
18648 + stub_entry
->stub_offset
;
18650 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18651 veneered_insn_loc
&= ~3u;
18653 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18655 abfd
= stub_entry
->target_section
->owner
;
18656 loc
= stub_entry
->source_value
;
18658 /* We attempt to avoid this condition by setting stubs_always_after_branch
18659 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18660 This check is just to be on the safe side... */
18661 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18663 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18664 "allocated in unsafe location"), abfd
);
18668 switch (stub_entry
->stub_type
)
18670 case arm_stub_a8_veneer_b
:
18671 case arm_stub_a8_veneer_b_cond
:
18672 branch_insn
= 0xf0009000;
18675 case arm_stub_a8_veneer_blx
:
18676 branch_insn
= 0xf000e800;
18679 case arm_stub_a8_veneer_bl
:
18681 unsigned int i1
, j1
, i2
, j2
, s
;
18683 branch_insn
= 0xf000d000;
18686 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18688 /* There's not much we can do apart from complain if this
18690 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18691 "of range (input file too large)"), abfd
);
18695 /* i1 = not(j1 eor s), so:
18697 j1 = (not i1) eor s. */
18699 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18700 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18701 i2
= (branch_offset
>> 22) & 1;
18702 i1
= (branch_offset
>> 23) & 1;
18703 s
= (branch_offset
>> 24) & 1;
18706 branch_insn
|= j2
<< 11;
18707 branch_insn
|= j1
<< 13;
18708 branch_insn
|= s
<< 26;
18717 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18718 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18723 /* Beginning of stm32l4xx work-around. */
18725 /* Functions encoding instructions necessary for the emission of the
18726 fix-stm32l4xx-629360.
18727 Encoding is extracted from the
18728 ARM (C) Architecture Reference Manual
18729 ARMv7-A and ARMv7-R edition
18730 ARM DDI 0406C.b (ID072512). */
18732 static inline bfd_vma
18733 create_instruction_branch_absolute (int branch_offset
)
18735 /* A8.8.18 B (A8-334)
18736 B target_address (Encoding T4). */
18737 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18738 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18739 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18741 int s
= ((branch_offset
& 0x1000000) >> 24);
18742 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18743 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18745 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18746 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18748 bfd_vma patched_inst
= 0xf0009000
18750 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18751 | j1
<< 13 /* J1. */
18752 | j2
<< 11 /* J2. */
18753 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18755 return patched_inst
;
18758 static inline bfd_vma
18759 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18761 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18762 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18763 bfd_vma patched_inst
= 0xe8900000
18764 | (/*W=*/wback
<< 21)
18766 | (reg_mask
& 0x0000ffff);
18768 return patched_inst
;
18771 static inline bfd_vma
18772 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18774 /* A8.8.60 LDMDB/LDMEA (A8-402)
18775 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18776 bfd_vma patched_inst
= 0xe9100000
18777 | (/*W=*/wback
<< 21)
18779 | (reg_mask
& 0x0000ffff);
18781 return patched_inst
;
18784 static inline bfd_vma
18785 create_instruction_mov (int target_reg
, int source_reg
)
18787 /* A8.8.103 MOV (register) (A8-486)
18788 MOV Rd, Rm (Encoding T1). */
18789 bfd_vma patched_inst
= 0x4600
18790 | (target_reg
& 0x7)
18791 | ((target_reg
& 0x8) >> 3) << 7
18792 | (source_reg
<< 3);
18794 return patched_inst
;
18797 static inline bfd_vma
18798 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18800 /* A8.8.221 SUB (immediate) (A8-708)
18801 SUB Rd, Rn, #value (Encoding T3). */
18802 bfd_vma patched_inst
= 0xf1a00000
18803 | (target_reg
<< 8)
18804 | (source_reg
<< 16)
18806 | ((value
& 0x800) >> 11) << 26
18807 | ((value
& 0x700) >> 8) << 12
18810 return patched_inst
;
18813 static inline bfd_vma
18814 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18817 /* A8.8.332 VLDM (A8-922)
18818 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18819 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18820 | (/*W=*/wback
<< 21)
18822 | (num_words
& 0x000000ff)
18823 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18824 | (first_reg
& 0x00000001) << 22;
18826 return patched_inst
;
18829 static inline bfd_vma
18830 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18833 /* A8.8.332 VLDM (A8-922)
18834 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18835 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18837 | (num_words
& 0x000000ff)
18838 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18839 | (first_reg
& 0x00000001) << 22;
18841 return patched_inst
;
18844 static inline bfd_vma
18845 create_instruction_udf_w (int value
)
18847 /* A8.8.247 UDF (A8-758)
18848 Undefined (Encoding T2). */
18849 bfd_vma patched_inst
= 0xf7f0a000
18850 | (value
& 0x00000fff)
18851 | (value
& 0x000f0000) << 16;
18853 return patched_inst
;
18856 static inline bfd_vma
18857 create_instruction_udf (int value
)
18859 /* A8.8.247 UDF (A8-758)
18860 Undefined (Encoding T1). */
18861 bfd_vma patched_inst
= 0xde00
18864 return patched_inst
;
18867 /* Functions writing an instruction in memory, returning the next
18868 memory position to write to. */
18870 static inline bfd_byte
*
18871 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18872 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18874 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18878 static inline bfd_byte
*
18879 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18880 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18882 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18886 /* Function filling up a region in memory with T1 and T2 UDFs taking
18887 care of alignment. */
18890 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18892 const bfd_byte
* const base_stub_contents
,
18893 bfd_byte
* const from_stub_contents
,
18894 const bfd_byte
* const end_stub_contents
)
18896 bfd_byte
*current_stub_contents
= from_stub_contents
;
18898 /* Fill the remaining of the stub with deterministic contents : UDF
18900 Check if realignment is needed on modulo 4 frontier using T1, to
18902 if ((current_stub_contents
< end_stub_contents
)
18903 && !((current_stub_contents
- base_stub_contents
) % 2)
18904 && ((current_stub_contents
- base_stub_contents
) % 4))
18905 current_stub_contents
=
18906 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18907 create_instruction_udf (0));
18909 for (; current_stub_contents
< end_stub_contents
;)
18910 current_stub_contents
=
18911 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18912 create_instruction_udf_w (0));
18914 return current_stub_contents
;
18917 /* Functions writing the stream of instructions equivalent to the
18918 derived sequence for ldmia, ldmdb, vldm respectively. */
18921 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18923 const insn32 initial_insn
,
18924 const bfd_byte
*const initial_insn_addr
,
18925 bfd_byte
*const base_stub_contents
)
18927 int wback
= (initial_insn
& 0x00200000) >> 21;
18928 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18929 int insn_all_registers
= initial_insn
& 0x0000ffff;
18930 int insn_low_registers
, insn_high_registers
;
18931 int usable_register_mask
;
18932 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18933 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18934 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18935 bfd_byte
*current_stub_contents
= base_stub_contents
;
18937 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18939 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18940 smaller than 8 registers load sequences that do not cause the
18942 if (nb_registers
<= 8)
18944 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18945 current_stub_contents
=
18946 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18949 /* B initial_insn_addr+4. */
18951 current_stub_contents
=
18952 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18953 create_instruction_branch_absolute
18954 (initial_insn_addr
- current_stub_contents
));
18956 /* Fill the remaining of the stub with deterministic contents. */
18957 current_stub_contents
=
18958 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18959 base_stub_contents
, current_stub_contents
,
18960 base_stub_contents
+
18961 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18966 /* - reg_list[13] == 0. */
18967 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
18969 /* - reg_list[14] & reg_list[15] != 1. */
18970 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18972 /* - if (wback==1) reg_list[rn] == 0. */
18973 BFD_ASSERT (!wback
|| !restore_rn
);
18975 /* - nb_registers > 8. */
18976 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18978 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18980 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18981 - One with the 7 lowest registers (register mask 0x007F)
18982 This LDM will finally contain between 2 and 7 registers
18983 - One with the 7 highest registers (register mask 0xDF80)
18984 This ldm will finally contain between 2 and 7 registers. */
18985 insn_low_registers
= insn_all_registers
& 0x007F;
18986 insn_high_registers
= insn_all_registers
& 0xDF80;
18988 /* A spare register may be needed during this veneer to temporarily
18989 handle the base register. This register will be restored with the
18990 last LDM operation.
18991 The usable register may be any general purpose register (that
18992 excludes PC, SP, LR : register mask is 0x1FFF). */
18993 usable_register_mask
= 0x1FFF;
18995 /* Generate the stub function. */
18998 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18999 current_stub_contents
=
19000 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19001 create_instruction_ldmia
19002 (rn
, /*wback=*/1, insn_low_registers
));
19004 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19005 current_stub_contents
=
19006 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19007 create_instruction_ldmia
19008 (rn
, /*wback=*/1, insn_high_registers
));
19011 /* B initial_insn_addr+4. */
19012 current_stub_contents
=
19013 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19014 create_instruction_branch_absolute
19015 (initial_insn_addr
- current_stub_contents
));
19018 else /* if (!wback). */
19022 /* If Rn is not part of the high-register-list, move it there. */
19023 if (!(insn_high_registers
& (1 << rn
)))
19025 /* Choose a Ri in the high-register-list that will be restored. */
19026 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19029 current_stub_contents
=
19030 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19031 create_instruction_mov (ri
, rn
));
19034 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19035 current_stub_contents
=
19036 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19037 create_instruction_ldmia
19038 (ri
, /*wback=*/1, insn_low_registers
));
19040 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19041 current_stub_contents
=
19042 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19043 create_instruction_ldmia
19044 (ri
, /*wback=*/0, insn_high_registers
));
19048 /* B initial_insn_addr+4. */
19049 current_stub_contents
=
19050 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19051 create_instruction_branch_absolute
19052 (initial_insn_addr
- current_stub_contents
));
19056 /* Fill the remaining of the stub with deterministic contents. */
19057 current_stub_contents
=
19058 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19059 base_stub_contents
, current_stub_contents
,
19060 base_stub_contents
+
19061 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19065 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
19067 const insn32 initial_insn
,
19068 const bfd_byte
*const initial_insn_addr
,
19069 bfd_byte
*const base_stub_contents
)
19071 int wback
= (initial_insn
& 0x00200000) >> 21;
19072 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
19073 int insn_all_registers
= initial_insn
& 0x0000ffff;
19074 int insn_low_registers
, insn_high_registers
;
19075 int usable_register_mask
;
19076 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
19077 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
19078 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
19079 bfd_byte
*current_stub_contents
= base_stub_contents
;
19081 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
19083 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19084 smaller than 8 registers load sequences that do not cause the
19086 if (nb_registers
<= 8)
19088 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19089 current_stub_contents
=
19090 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19093 /* B initial_insn_addr+4. */
19094 current_stub_contents
=
19095 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19096 create_instruction_branch_absolute
19097 (initial_insn_addr
- current_stub_contents
));
19099 /* Fill the remaining of the stub with deterministic contents. */
19100 current_stub_contents
=
19101 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19102 base_stub_contents
, current_stub_contents
,
19103 base_stub_contents
+
19104 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19109 /* - reg_list[13] == 0. */
19110 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
19112 /* - reg_list[14] & reg_list[15] != 1. */
19113 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
19115 /* - if (wback==1) reg_list[rn] == 0. */
19116 BFD_ASSERT (!wback
|| !restore_rn
);
19118 /* - nb_registers > 8. */
19119 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
19121 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19123 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19124 - One with the 7 lowest registers (register mask 0x007F)
19125 This LDM will finally contain between 2 and 7 registers
19126 - One with the 7 highest registers (register mask 0xDF80)
19127 This ldm will finally contain between 2 and 7 registers. */
19128 insn_low_registers
= insn_all_registers
& 0x007F;
19129 insn_high_registers
= insn_all_registers
& 0xDF80;
19131 /* A spare register may be needed during this veneer to temporarily
19132 handle the base register. This register will be restored with
19133 the last LDM operation.
19134 The usable register may be any general purpose register (that excludes
19135 PC, SP, LR : register mask is 0x1FFF). */
19136 usable_register_mask
= 0x1FFF;
19138 /* Generate the stub function. */
19139 if (!wback
&& !restore_pc
&& !restore_rn
)
19141 /* Choose a Ri in the low-register-list that will be restored. */
19142 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19145 current_stub_contents
=
19146 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19147 create_instruction_mov (ri
, rn
));
19149 /* LDMDB Ri!, {R-high-register-list}. */
19150 current_stub_contents
=
19151 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19152 create_instruction_ldmdb
19153 (ri
, /*wback=*/1, insn_high_registers
));
19155 /* LDMDB Ri, {R-low-register-list}. */
19156 current_stub_contents
=
19157 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19158 create_instruction_ldmdb
19159 (ri
, /*wback=*/0, insn_low_registers
));
19161 /* B initial_insn_addr+4. */
19162 current_stub_contents
=
19163 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19164 create_instruction_branch_absolute
19165 (initial_insn_addr
- current_stub_contents
));
19167 else if (wback
&& !restore_pc
&& !restore_rn
)
19169 /* LDMDB Rn!, {R-high-register-list}. */
19170 current_stub_contents
=
19171 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19172 create_instruction_ldmdb
19173 (rn
, /*wback=*/1, insn_high_registers
));
19175 /* LDMDB Rn!, {R-low-register-list}. */
19176 current_stub_contents
=
19177 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19178 create_instruction_ldmdb
19179 (rn
, /*wback=*/1, insn_low_registers
));
19181 /* B initial_insn_addr+4. */
19182 current_stub_contents
=
19183 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19184 create_instruction_branch_absolute
19185 (initial_insn_addr
- current_stub_contents
));
19187 else if (!wback
&& restore_pc
&& !restore_rn
)
19189 /* Choose a Ri in the high-register-list that will be restored. */
19190 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19192 /* SUB Ri, Rn, #(4*nb_registers). */
19193 current_stub_contents
=
19194 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19195 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19197 /* LDMIA Ri!, {R-low-register-list}. */
19198 current_stub_contents
=
19199 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19200 create_instruction_ldmia
19201 (ri
, /*wback=*/1, insn_low_registers
));
19203 /* LDMIA Ri, {R-high-register-list}. */
19204 current_stub_contents
=
19205 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19206 create_instruction_ldmia
19207 (ri
, /*wback=*/0, insn_high_registers
));
19209 else if (wback
&& restore_pc
&& !restore_rn
)
19211 /* Choose a Ri in the high-register-list that will be restored. */
19212 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19214 /* SUB Rn, Rn, #(4*nb_registers) */
19215 current_stub_contents
=
19216 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19217 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19220 current_stub_contents
=
19221 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19222 create_instruction_mov (ri
, rn
));
19224 /* LDMIA Ri!, {R-low-register-list}. */
19225 current_stub_contents
=
19226 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19227 create_instruction_ldmia
19228 (ri
, /*wback=*/1, insn_low_registers
));
19230 /* LDMIA Ri, {R-high-register-list}. */
19231 current_stub_contents
=
19232 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19233 create_instruction_ldmia
19234 (ri
, /*wback=*/0, insn_high_registers
));
19236 else if (!wback
&& !restore_pc
&& restore_rn
)
19239 if (!(insn_low_registers
& (1 << rn
)))
19241 /* Choose a Ri in the low-register-list that will be restored. */
19242 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19245 current_stub_contents
=
19246 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19247 create_instruction_mov (ri
, rn
));
19250 /* LDMDB Ri!, {R-high-register-list}. */
19251 current_stub_contents
=
19252 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19253 create_instruction_ldmdb
19254 (ri
, /*wback=*/1, insn_high_registers
));
19256 /* LDMDB Ri, {R-low-register-list}. */
19257 current_stub_contents
=
19258 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19259 create_instruction_ldmdb
19260 (ri
, /*wback=*/0, insn_low_registers
));
19262 /* B initial_insn_addr+4. */
19263 current_stub_contents
=
19264 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19265 create_instruction_branch_absolute
19266 (initial_insn_addr
- current_stub_contents
));
19268 else if (!wback
&& restore_pc
&& restore_rn
)
19271 if (!(insn_high_registers
& (1 << rn
)))
19273 /* Choose a Ri in the high-register-list that will be restored. */
19274 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19277 /* SUB Ri, Rn, #(4*nb_registers). */
19278 current_stub_contents
=
19279 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19280 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19282 /* LDMIA Ri!, {R-low-register-list}. */
19283 current_stub_contents
=
19284 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19285 create_instruction_ldmia
19286 (ri
, /*wback=*/1, insn_low_registers
));
19288 /* LDMIA Ri, {R-high-register-list}. */
19289 current_stub_contents
=
19290 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19291 create_instruction_ldmia
19292 (ri
, /*wback=*/0, insn_high_registers
));
19294 else if (wback
&& restore_rn
)
19296 /* The assembler should not have accepted to encode this. */
19297 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19298 "undefined behavior.\n");
19301 /* Fill the remaining of the stub with deterministic contents. */
19302 current_stub_contents
=
19303 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19304 base_stub_contents
, current_stub_contents
,
19305 base_stub_contents
+
19306 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19311 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19313 const insn32 initial_insn
,
19314 const bfd_byte
*const initial_insn_addr
,
19315 bfd_byte
*const base_stub_contents
)
19317 int num_words
= initial_insn
& 0xff;
19318 bfd_byte
*current_stub_contents
= base_stub_contents
;
19320 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19322 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19323 smaller than 8 words load sequences that do not cause the
19325 if (num_words
<= 8)
19327 /* Untouched instruction. */
19328 current_stub_contents
=
19329 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19332 /* B initial_insn_addr+4. */
19333 current_stub_contents
=
19334 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19335 create_instruction_branch_absolute
19336 (initial_insn_addr
- current_stub_contents
));
19340 bool is_dp
= /* DP encoding. */
19341 (initial_insn
& 0xfe100f00) == 0xec100b00;
19342 bool is_ia_nobang
= /* (IA without !). */
19343 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19344 bool is_ia_bang
= /* (IA with !) - includes VPOP. */
19345 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19346 bool is_db_bang
= /* (DB with !). */
19347 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19348 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19349 /* d = UInt (Vd:D);. */
19350 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19351 | (((unsigned int)initial_insn
<< 9) >> 31);
19353 /* Compute the number of 8-words chunks needed to split. */
19354 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19357 /* The test coverage has been done assuming the following
19358 hypothesis that exactly one of the previous is_ predicates is
19360 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19361 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19363 /* We treat the cutting of the words in one pass for all
19364 cases, then we emit the adjustments:
19367 -> vldm rx!, {8_words_or_less} for each needed 8_word
19368 -> sub rx, rx, #size (list)
19371 -> vldm rx!, {8_words_or_less} for each needed 8_word
19372 This also handles vpop instruction (when rx is sp)
19375 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19376 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19378 bfd_vma new_insn
= 0;
19380 if (is_ia_nobang
|| is_ia_bang
)
19382 new_insn
= create_instruction_vldmia
19386 chunks
- (chunk
+ 1) ?
19387 8 : num_words
- chunk
* 8,
19388 first_reg
+ chunk
* 8);
19390 else if (is_db_bang
)
19392 new_insn
= create_instruction_vldmdb
19395 chunks
- (chunk
+ 1) ?
19396 8 : num_words
- chunk
* 8,
19397 first_reg
+ chunk
* 8);
19401 current_stub_contents
=
19402 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19406 /* Only this case requires the base register compensation
19410 current_stub_contents
=
19411 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19412 create_instruction_sub
19413 (base_reg
, base_reg
, 4*num_words
));
19416 /* B initial_insn_addr+4. */
19417 current_stub_contents
=
19418 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19419 create_instruction_branch_absolute
19420 (initial_insn_addr
- current_stub_contents
));
19423 /* Fill the remaining of the stub with deterministic contents. */
19424 current_stub_contents
=
19425 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19426 base_stub_contents
, current_stub_contents
,
19427 base_stub_contents
+
19428 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19432 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19434 const insn32 wrong_insn
,
19435 const bfd_byte
*const wrong_insn_addr
,
19436 bfd_byte
*const stub_contents
)
19438 if (is_thumb2_ldmia (wrong_insn
))
19439 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19440 wrong_insn
, wrong_insn_addr
,
19442 else if (is_thumb2_ldmdb (wrong_insn
))
19443 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19444 wrong_insn
, wrong_insn_addr
,
19446 else if (is_thumb2_vldm (wrong_insn
))
19447 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19448 wrong_insn
, wrong_insn_addr
,
19452 /* End of stm32l4xx work-around. */
19455 /* Do code byteswapping. Return FALSE afterwards so that the section is
19456 written out as normal. */
19459 elf32_arm_write_section (bfd
*output_bfd
,
19460 struct bfd_link_info
*link_info
,
19462 bfd_byte
*contents
)
19464 unsigned int mapcount
, errcount
;
19465 _arm_elf_section_data
*arm_data
;
19466 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19467 elf32_arm_section_map
*map
;
19468 elf32_vfp11_erratum_list
*errnode
;
19469 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19472 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19476 if (globals
== NULL
)
19479 /* If this section has not been allocated an _arm_elf_section_data
19480 structure then we cannot record anything. */
19481 arm_data
= get_arm_elf_section_data (sec
);
19482 if (arm_data
== NULL
)
19485 mapcount
= arm_data
->mapcount
;
19486 map
= arm_data
->map
;
19487 errcount
= arm_data
->erratumcount
;
19491 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19493 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19494 errnode
= errnode
->next
)
19496 bfd_vma target
= errnode
->vma
- offset
;
19498 switch (errnode
->type
)
19500 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19502 bfd_vma branch_to_veneer
;
19503 /* Original condition code of instruction, plus bit mask for
19504 ARM B instruction. */
19505 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19508 /* The instruction is before the label. */
19511 /* Above offset included in -4 below. */
19512 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19513 - errnode
->vma
- 4;
19515 if ((signed) branch_to_veneer
< -(1 << 25)
19516 || (signed) branch_to_veneer
>= (1 << 25))
19517 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19518 "range"), output_bfd
);
19520 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19521 contents
[endianflip
^ target
] = insn
& 0xff;
19522 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19523 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19524 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19528 case VFP11_ERRATUM_ARM_VENEER
:
19530 bfd_vma branch_from_veneer
;
19533 /* Take size of veneer into account. */
19534 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19535 - errnode
->vma
- 12;
19537 if ((signed) branch_from_veneer
< -(1 << 25)
19538 || (signed) branch_from_veneer
>= (1 << 25))
19539 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19540 "range"), output_bfd
);
19542 /* Original instruction. */
19543 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19544 contents
[endianflip
^ target
] = insn
& 0xff;
19545 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19546 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19547 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19549 /* Branch back to insn after original insn. */
19550 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19551 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19552 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19553 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19554 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19564 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19566 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19567 stm32l4xx_errnode
!= 0;
19568 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19570 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19572 switch (stm32l4xx_errnode
->type
)
19574 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19577 bfd_vma branch_to_veneer
=
19578 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19580 if ((signed) branch_to_veneer
< -(1 << 24)
19581 || (signed) branch_to_veneer
>= (1 << 24))
19583 bfd_vma out_of_range
=
19584 ((signed) branch_to_veneer
< -(1 << 24)) ?
19585 - branch_to_veneer
- (1 << 24) :
19586 ((signed) branch_to_veneer
>= (1 << 24)) ?
19587 branch_to_veneer
- (1 << 24) : 0;
19590 (_("%pB(%#" PRIx64
"): error: "
19591 "cannot create STM32L4XX veneer; "
19592 "jump out of range by %" PRId64
" bytes; "
19593 "cannot encode branch instruction"),
19595 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19596 (int64_t) out_of_range
);
19600 insn
= create_instruction_branch_absolute
19601 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19603 /* The instruction is before the label. */
19606 put_thumb2_insn (globals
, output_bfd
,
19607 (bfd_vma
) insn
, contents
+ target
);
19611 case STM32L4XX_ERRATUM_VENEER
:
19614 bfd_byte
* veneer_r
;
19617 veneer
= contents
+ target
;
19619 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19620 - stm32l4xx_errnode
->vma
- 4;
19622 if ((signed) (veneer_r
- veneer
-
19623 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19624 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19625 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19626 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19627 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19629 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19630 "veneer"), output_bfd
);
19634 /* Original instruction. */
19635 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19637 stm32l4xx_create_replacing_stub
19638 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19648 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19650 arm_unwind_table_edit
*edit_node
19651 = arm_data
->u
.exidx
.unwind_edit_list
;
19652 /* Now, sec->size is the size of the section we will write. The original
19653 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19654 markers) was sec->rawsize. (This isn't the case if we perform no
19655 edits, then rawsize will be zero and we should use size). */
19656 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19657 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19658 unsigned int in_index
, out_index
;
19659 bfd_vma add_to_offsets
= 0;
19661 if (edited_contents
== NULL
)
19663 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19667 unsigned int edit_index
= edit_node
->index
;
19669 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19671 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19672 contents
+ in_index
* 8, add_to_offsets
);
19676 else if (in_index
== edit_index
19677 || (in_index
* 8 >= input_size
19678 && edit_index
== UINT_MAX
))
19680 switch (edit_node
->type
)
19682 case DELETE_EXIDX_ENTRY
:
19684 add_to_offsets
+= 8;
19687 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19689 asection
*text_sec
= edit_node
->linked_section
;
19690 bfd_vma text_offset
= text_sec
->output_section
->vma
19691 + text_sec
->output_offset
19693 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19694 unsigned long prel31_offset
;
19696 /* Note: this is meant to be equivalent to an
19697 R_ARM_PREL31 relocation. These synthetic
19698 EXIDX_CANTUNWIND markers are not relocated by the
19699 usual BFD method. */
19700 prel31_offset
= (text_offset
- exidx_offset
)
19702 if (bfd_link_relocatable (link_info
))
19704 /* Here relocation for new EXIDX_CANTUNWIND is
19705 created, so there is no need to
19706 adjust offset by hand. */
19707 prel31_offset
= text_sec
->output_offset
19711 /* First address we can't unwind. */
19712 bfd_put_32 (output_bfd
, prel31_offset
,
19713 &edited_contents
[out_index
* 8]);
19715 /* Code for EXIDX_CANTUNWIND. */
19716 bfd_put_32 (output_bfd
, 0x1,
19717 &edited_contents
[out_index
* 8 + 4]);
19720 add_to_offsets
-= 8;
19725 edit_node
= edit_node
->next
;
19730 /* No more edits, copy remaining entries verbatim. */
19731 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19732 contents
+ in_index
* 8, add_to_offsets
);
19738 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19739 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19741 (file_ptr
) sec
->output_offset
, sec
->size
);
19746 /* Fix code to point to Cortex-A8 erratum stubs. */
19747 if (globals
->fix_cortex_a8
)
19749 struct a8_branch_to_stub_data data
;
19751 data
.writing_section
= sec
;
19752 data
.contents
= contents
;
19754 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19761 if (globals
->byteswap_code
)
19763 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19766 for (i
= 0; i
< mapcount
; i
++)
19768 if (i
== mapcount
- 1)
19771 end
= map
[i
+ 1].vma
;
19773 switch (map
[i
].type
)
19776 /* Byte swap code words. */
19777 while (ptr
+ 3 < end
)
19779 tmp
= contents
[ptr
];
19780 contents
[ptr
] = contents
[ptr
+ 3];
19781 contents
[ptr
+ 3] = tmp
;
19782 tmp
= contents
[ptr
+ 1];
19783 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19784 contents
[ptr
+ 2] = tmp
;
19790 /* Byte swap code halfwords. */
19791 while (ptr
+ 1 < end
)
19793 tmp
= contents
[ptr
];
19794 contents
[ptr
] = contents
[ptr
+ 1];
19795 contents
[ptr
+ 1] = tmp
;
19801 /* Leave data alone. */
19809 arm_data
->mapcount
= -1;
19810 arm_data
->mapsize
= 0;
19811 arm_data
->map
= NULL
;
19816 /* Mangle thumb function symbols as we read them in. */
19819 elf32_arm_swap_symbol_in (bfd
* abfd
,
19822 Elf_Internal_Sym
*dst
)
19824 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19826 dst
->st_target_internal
= ST_BRANCH_TO_ARM
;
19828 /* New EABI objects mark thumb function symbols by setting the low bit of
19830 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19831 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19833 if (dst
->st_value
& 1)
19835 dst
->st_value
&= ~(bfd_vma
) 1;
19836 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19837 ST_BRANCH_TO_THUMB
);
19840 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19842 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19844 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19845 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19847 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19848 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19850 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19856 /* Mangle thumb function symbols as we write them out. */
19859 elf32_arm_swap_symbol_out (bfd
*abfd
,
19860 const Elf_Internal_Sym
*src
,
19864 Elf_Internal_Sym newsym
;
19866 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19867 of the address set, as per the new EABI. We do this unconditionally
19868 because objcopy does not set the elf header flags until after
19869 it writes out the symbol table. */
19870 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19873 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19874 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19875 if (newsym
.st_shndx
!= SHN_UNDEF
)
19877 /* Do this only for defined symbols. At link type, the static
19878 linker will simulate the work of dynamic linker of resolving
19879 symbols and will carry over the thumbness of found symbols to
19880 the output symbol table. It's not clear how it happens, but
19881 the thumbness of undefined symbols can well be different at
19882 runtime, and writing '1' for them will be confusing for users
19883 and possibly for dynamic linker itself.
19885 newsym
.st_value
|= 1;
19890 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19893 /* Add the PT_ARM_EXIDX program header. */
19896 elf32_arm_modify_segment_map (bfd
*abfd
,
19897 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19899 struct elf_segment_map
*m
;
19902 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19903 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19905 /* If there is already a PT_ARM_EXIDX header, then we do not
19906 want to add another one. This situation arises when running
19907 "strip"; the input binary already has the header. */
19908 m
= elf_seg_map (abfd
);
19909 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19913 m
= (struct elf_segment_map
*)
19914 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19917 m
->p_type
= PT_ARM_EXIDX
;
19919 m
->sections
[0] = sec
;
19921 m
->next
= elf_seg_map (abfd
);
19922 elf_seg_map (abfd
) = m
;
19929 /* We may add a PT_ARM_EXIDX program header. */
19932 elf32_arm_additional_program_headers (bfd
*abfd
,
19933 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19937 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19938 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19944 /* Hook called by the linker routine which adds symbols from an object
19948 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19949 Elf_Internal_Sym
*sym
, const char **namep
,
19950 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19952 if (elf32_arm_hash_table (info
) == NULL
)
19955 if (elf32_arm_hash_table (info
)->root
.target_os
== is_vxworks
19956 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19957 flagsp
, secp
, valp
))
19963 /* We use this to override swap_symbol_in and swap_symbol_out. */
19964 const struct elf_size_info elf32_arm_size_info
=
19966 sizeof (Elf32_External_Ehdr
),
19967 sizeof (Elf32_External_Phdr
),
19968 sizeof (Elf32_External_Shdr
),
19969 sizeof (Elf32_External_Rel
),
19970 sizeof (Elf32_External_Rela
),
19971 sizeof (Elf32_External_Sym
),
19972 sizeof (Elf32_External_Dyn
),
19973 sizeof (Elf_External_Note
),
19977 ELFCLASS32
, EV_CURRENT
,
19978 bfd_elf32_write_out_phdrs
,
19979 bfd_elf32_write_shdrs_and_ehdr
,
19980 bfd_elf32_checksum_contents
,
19981 bfd_elf32_write_relocs
,
19982 elf32_arm_swap_symbol_in
,
19983 elf32_arm_swap_symbol_out
,
19984 bfd_elf32_slurp_reloc_table
,
19985 bfd_elf32_slurp_symbol_table
,
19986 bfd_elf32_swap_dyn_in
,
19987 bfd_elf32_swap_dyn_out
,
19988 bfd_elf32_swap_reloc_in
,
19989 bfd_elf32_swap_reloc_out
,
19990 bfd_elf32_swap_reloca_in
,
19991 bfd_elf32_swap_reloca_out
19995 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
19997 /* V7 BE8 code is always little endian. */
19998 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19999 return bfd_getl32 (addr
);
20001 return bfd_get_32 (abfd
, addr
);
20005 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
20007 /* V7 BE8 code is always little endian. */
20008 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
20009 return bfd_getl16 (addr
);
20011 return bfd_get_16 (abfd
, addr
);
20014 /* Return size of plt0 entry starting at ADDR
20015 or (bfd_vma) -1 if size can not be determined. */
20018 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
,
20019 bfd_size_type data_size
)
20021 bfd_vma first_word
;
20025 return (bfd_vma
) -1;
20027 first_word
= read_code32 (abfd
, addr
);
20029 if (first_word
== elf32_arm_plt0_entry
[0])
20030 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
20031 else if (first_word
== elf32_thumb2_plt0_entry
[0])
20032 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
20034 /* We don't yet handle this PLT format. */
20035 return (bfd_vma
) -1;
20040 /* Return size of plt entry starting at offset OFFSET
20041 of plt section located at address START
20042 or (bfd_vma) -1 if size can not be determined. */
20045 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
,
20046 bfd_size_type data_size
)
20048 bfd_vma first_insn
;
20049 bfd_vma plt_size
= 0;
20051 /* PLT entry size if fixed on Thumb-only platforms. */
20052 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
20053 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
20055 /* Respect Thumb stub if necessary. */
20056 if (offset
+ 2 > data_size
)
20057 return (bfd_vma
) -1;
20058 if (read_code16 (abfd
, start
+ offset
) == elf32_arm_plt_thumb_stub
[0])
20060 plt_size
+= 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub
);
20063 /* Strip immediate from first add. */
20064 if (offset
+ plt_size
+ 4 > data_size
)
20065 return (bfd_vma
) -1;
20066 first_insn
= read_code32 (abfd
, start
+ offset
+ plt_size
) & 0xffffff00;
20068 #ifdef FOUR_WORD_PLT
20069 if (first_insn
== elf32_arm_plt_entry
[0])
20070 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
20072 if (first_insn
== elf32_arm_plt_entry_long
[0])
20073 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
20074 else if (first_insn
== elf32_arm_plt_entry_short
[0])
20075 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
20078 /* We don't yet handle this PLT format. */
20079 return (bfd_vma
) -1;
20084 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20087 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
20088 long symcount ATTRIBUTE_UNUSED
,
20089 asymbol
**syms ATTRIBUTE_UNUSED
,
20099 Elf_Internal_Shdr
*hdr
;
20107 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
20110 if (dynsymcount
<= 0)
20113 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
20114 if (relplt
== NULL
)
20117 hdr
= &elf_section_data (relplt
)->this_hdr
;
20118 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
20119 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
20122 plt
= bfd_get_section_by_name (abfd
, ".plt");
20126 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, true))
20130 if (!bfd_get_full_section_contents (abfd
, plt
, &data
))
20133 count
= NUM_SHDR_ENTRIES (hdr
);
20134 size
= count
* sizeof (asymbol
);
20135 p
= relplt
->relocation
;
20136 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20138 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
20139 if (p
->addend
!= 0)
20140 size
+= sizeof ("+0x") - 1 + 8;
20143 offset
= elf32_arm_plt0_size (abfd
, data
, plt
->size
);
20144 if (offset
== (bfd_vma
) -1
20145 || (s
= *ret
= (asymbol
*) bfd_malloc (size
)) == NULL
)
20151 names
= (char *) (s
+ count
);
20152 p
= relplt
->relocation
;
20154 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20158 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
, plt
->size
);
20159 if (plt_size
== (bfd_vma
) -1)
20162 *s
= **p
->sym_ptr_ptr
;
20163 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20164 we are defining a symbol, ensure one of them is set. */
20165 if ((s
->flags
& BSF_LOCAL
) == 0)
20166 s
->flags
|= BSF_GLOBAL
;
20167 s
->flags
|= BSF_SYNTHETIC
;
20172 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20173 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20175 if (p
->addend
!= 0)
20179 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20180 names
+= sizeof ("+0x") - 1;
20181 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20182 for (a
= buf
; *a
== '0'; ++a
)
20185 memcpy (names
, a
, len
);
20188 memcpy (names
, "@plt", sizeof ("@plt"));
20189 names
+= sizeof ("@plt");
20191 offset
+= plt_size
;
20199 elf32_arm_section_flags (const Elf_Internal_Shdr
*hdr
)
20201 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20202 hdr
->bfd_section
->flags
|= SEC_ELF_PURECODE
;
20207 elf32_arm_lookup_section_flags (char *flag_name
)
20209 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20210 return SHF_ARM_PURECODE
;
20212 return SEC_NO_FLAGS
;
20215 static unsigned int
20216 elf32_arm_count_additional_relocs (asection
*sec
)
20218 struct _arm_elf_section_data
*arm_data
;
20219 arm_data
= get_arm_elf_section_data (sec
);
20221 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20224 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20225 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20226 FALSE otherwise. ISECTION is the best guess matching section from the
20227 input bfd IBFD, but it might be NULL. */
20230 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20231 bfd
*obfd ATTRIBUTE_UNUSED
,
20232 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20233 Elf_Internal_Shdr
*osection
)
20235 switch (osection
->sh_type
)
20237 case SHT_ARM_EXIDX
:
20239 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20240 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20243 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20244 osection
->sh_info
= 0;
20246 /* The sh_link field must be set to the text section associated with
20247 this index section. Unfortunately the ARM EHABI does not specify
20248 exactly how to determine this association. Our caller does try
20249 to match up OSECTION with its corresponding input section however
20250 so that is a good first guess. */
20251 if (isection
!= NULL
20252 && osection
->bfd_section
!= NULL
20253 && isection
->bfd_section
!= NULL
20254 && isection
->bfd_section
->output_section
!= NULL
20255 && isection
->bfd_section
->output_section
== osection
->bfd_section
20256 && iheaders
!= NULL
20257 && isection
->sh_link
> 0
20258 && isection
->sh_link
< elf_numsections (ibfd
)
20259 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20260 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20263 for (i
= elf_numsections (obfd
); i
-- > 0;)
20264 if (oheaders
[i
]->bfd_section
20265 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20271 /* Failing that we have to find a matching section ourselves. If
20272 we had the output section name available we could compare that
20273 with input section names. Unfortunately we don't. So instead
20274 we use a simple heuristic and look for the nearest executable
20275 section before this one. */
20276 for (i
= elf_numsections (obfd
); i
-- > 0;)
20277 if (oheaders
[i
] == osection
)
20283 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20284 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20285 == (SHF_ALLOC
| SHF_EXECINSTR
))
20291 osection
->sh_link
= i
;
20292 /* If the text section was part of a group
20293 then the index section should be too. */
20294 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20295 osection
->sh_flags
|= SHF_GROUP
;
20301 case SHT_ARM_PREEMPTMAP
:
20302 osection
->sh_flags
= SHF_ALLOC
;
20305 case SHT_ARM_ATTRIBUTES
:
20306 case SHT_ARM_DEBUGOVERLAY
:
20307 case SHT_ARM_OVERLAYSECTION
:
20315 /* Returns TRUE if NAME is an ARM mapping symbol.
20316 Traditionally the symbols $a, $d and $t have been used.
20317 The ARM ELF standard also defines $x (for A64 code). It also allows a
20318 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20319 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20320 not support them here. $t.x indicates the start of ThumbEE instructions. */
20323 is_arm_mapping_symbol (const char * name
)
20325 return name
!= NULL
/* Paranoia. */
20326 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20327 the mapping symbols could have acquired a prefix.
20328 We do not support this here, since such symbols no
20329 longer conform to the ARM ELF ABI. */
20330 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20331 && (name
[2] == 0 || name
[2] == '.');
20332 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20333 any characters that follow the period are legal characters for the body
20334 of a symbol's name. For now we just assume that this is the case. */
20337 /* Make sure that mapping symbols in object files are not removed via the
20338 "strip --strip-unneeded" tool. These symbols are needed in order to
20339 correctly generate interworking veneers, and for byte swapping code
20340 regions. Once an object file has been linked, it is safe to remove the
20341 symbols as they will no longer be needed. */
20344 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20346 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20347 && sym
->section
!= bfd_abs_section_ptr
20348 && is_arm_mapping_symbol (sym
->name
))
20349 sym
->flags
|= BSF_KEEP
;
20352 #undef elf_backend_copy_special_section_fields
20353 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20355 #define ELF_ARCH bfd_arch_arm
20356 #define ELF_TARGET_ID ARM_ELF_DATA
20357 #define ELF_MACHINE_CODE EM_ARM
20358 #define ELF_MAXPAGESIZE 0x1000
20359 #define ELF_COMMONPAGESIZE 0x1000
20361 #define bfd_elf32_mkobject elf32_arm_mkobject
20363 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20364 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20365 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20366 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20367 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20368 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20369 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20370 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20371 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20372 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20373 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20374 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20376 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20377 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20378 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20379 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20380 #define elf_backend_check_relocs elf32_arm_check_relocs
20381 #define elf_backend_update_relocs elf32_arm_update_relocs
20382 #define elf_backend_relocate_section elf32_arm_relocate_section
20383 #define elf_backend_write_section elf32_arm_write_section
20384 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20385 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20386 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20387 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20388 #define elf_backend_late_size_sections elf32_arm_late_size_sections
20389 #define elf_backend_early_size_sections elf32_arm_early_size_sections
20390 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20391 #define elf_backend_init_file_header elf32_arm_init_file_header
20392 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20393 #define elf_backend_object_p elf32_arm_object_p
20394 #define elf_backend_fake_sections elf32_arm_fake_sections
20395 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20396 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20397 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20398 #define elf_backend_size_info elf32_arm_size_info
20399 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20400 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20401 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20402 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20403 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20404 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20405 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20406 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20408 #define elf_backend_can_refcount 1
20409 #define elf_backend_can_gc_sections 1
20410 #define elf_backend_plt_readonly 1
20411 #define elf_backend_want_got_plt 1
20412 #define elf_backend_want_plt_sym 0
20413 #define elf_backend_want_dynrelro 1
20414 #define elf_backend_may_use_rel_p 1
20415 #define elf_backend_may_use_rela_p 0
20416 #define elf_backend_default_use_rela_p 0
20417 #define elf_backend_dtrel_excludes_plt 1
20419 #define elf_backend_got_header_size 12
20420 #define elf_backend_extern_protected_data 0
20422 #undef elf_backend_obj_attrs_vendor
20423 #define elf_backend_obj_attrs_vendor "aeabi"
20424 #undef elf_backend_obj_attrs_section
20425 #define elf_backend_obj_attrs_section ".ARM.attributes"
20426 #undef elf_backend_obj_attrs_arg_type
20427 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20428 #undef elf_backend_obj_attrs_section_type
20429 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20430 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20431 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20433 #undef elf_backend_section_flags
20434 #define elf_backend_section_flags elf32_arm_section_flags
20435 #undef elf_backend_lookup_section_flags_hook
20436 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20438 #define elf_backend_linux_prpsinfo32_ugid16 true
20440 #include "elf32-target.h"
20442 /* Native Client targets. */
20444 #undef TARGET_LITTLE_SYM
20445 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20446 #undef TARGET_LITTLE_NAME
20447 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20448 #undef TARGET_BIG_SYM
20449 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20450 #undef TARGET_BIG_NAME
20451 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20453 /* Like elf32_arm_link_hash_table_create -- but overrides
20454 appropriately for NaCl. */
20456 static struct bfd_link_hash_table
*
20457 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20459 struct bfd_link_hash_table
*ret
;
20461 ret
= elf32_arm_link_hash_table_create (abfd
);
20464 struct elf32_arm_link_hash_table
*htab
20465 = (struct elf32_arm_link_hash_table
*) ret
;
20467 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20468 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20473 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20474 really need to use elf32_arm_modify_segment_map. But we do it
20475 anyway just to reduce gratuitous differences with the stock ARM backend. */
20478 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20480 return (elf32_arm_modify_segment_map (abfd
, info
)
20481 && nacl_modify_segment_map (abfd
, info
));
20485 elf32_arm_nacl_final_write_processing (bfd
*abfd
)
20487 arm_final_write_processing (abfd
);
20488 return nacl_final_write_processing (abfd
);
20492 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20493 const arelent
*rel ATTRIBUTE_UNUSED
)
20496 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20497 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20501 #define elf32_bed elf32_arm_nacl_bed
20502 #undef bfd_elf32_bfd_link_hash_table_create
20503 #define bfd_elf32_bfd_link_hash_table_create \
20504 elf32_arm_nacl_link_hash_table_create
20505 #undef elf_backend_plt_alignment
20506 #define elf_backend_plt_alignment 4
20507 #undef elf_backend_modify_segment_map
20508 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20509 #undef elf_backend_modify_headers
20510 #define elf_backend_modify_headers nacl_modify_headers
20511 #undef elf_backend_final_write_processing
20512 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20513 #undef bfd_elf32_get_synthetic_symtab
20514 #undef elf_backend_plt_sym_val
20515 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20516 #undef elf_backend_copy_special_section_fields
20518 #undef ELF_MINPAGESIZE
20519 #undef ELF_COMMONPAGESIZE
20521 #undef ELF_TARGET_OS
20522 #define ELF_TARGET_OS is_nacl
20524 #include "elf32-target.h"
20526 /* Reset to defaults. */
20527 #undef elf_backend_plt_alignment
20528 #undef elf_backend_modify_segment_map
20529 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20530 #undef elf_backend_modify_headers
20531 #undef elf_backend_final_write_processing
20532 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20533 #undef ELF_MINPAGESIZE
20534 #undef ELF_COMMONPAGESIZE
20535 #define ELF_COMMONPAGESIZE 0x1000
20538 /* FDPIC Targets. */
20540 #undef TARGET_LITTLE_SYM
20541 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20542 #undef TARGET_LITTLE_NAME
20543 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20544 #undef TARGET_BIG_SYM
20545 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20546 #undef TARGET_BIG_NAME
20547 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20548 #undef elf_match_priority
20549 #define elf_match_priority 128
20551 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20553 /* Like elf32_arm_link_hash_table_create -- but overrides
20554 appropriately for FDPIC. */
20556 static struct bfd_link_hash_table
*
20557 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20559 struct bfd_link_hash_table
*ret
;
20561 ret
= elf32_arm_link_hash_table_create (abfd
);
20564 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20571 /* We need dynamic symbols for every section, since segments can
20572 relocate independently. */
20574 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20575 struct bfd_link_info
*info
20577 asection
*p ATTRIBUTE_UNUSED
)
20579 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20583 /* If sh_type is yet undecided, assume it could be
20584 SHT_PROGBITS/SHT_NOBITS. */
20588 /* There shouldn't be section relative relocations
20589 against any other section. */
20596 #define elf32_bed elf32_arm_fdpic_bed
20598 #undef bfd_elf32_bfd_link_hash_table_create
20599 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20601 #undef elf_backend_omit_section_dynsym
20602 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20604 #undef ELF_TARGET_OS
20606 #include "elf32-target.h"
20608 #undef elf_match_priority
20610 #undef elf_backend_omit_section_dynsym
20612 /* VxWorks Targets. */
20614 #undef TARGET_LITTLE_SYM
20615 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20616 #undef TARGET_LITTLE_NAME
20617 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20618 #undef TARGET_BIG_SYM
20619 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20620 #undef TARGET_BIG_NAME
20621 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20623 /* Like elf32_arm_link_hash_table_create -- but overrides
20624 appropriately for VxWorks. */
20626 static struct bfd_link_hash_table
*
20627 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20629 struct bfd_link_hash_table
*ret
;
20631 ret
= elf32_arm_link_hash_table_create (abfd
);
20634 struct elf32_arm_link_hash_table
*htab
20635 = (struct elf32_arm_link_hash_table
*) ret
;
20642 elf32_arm_vxworks_final_write_processing (bfd
*abfd
)
20644 arm_final_write_processing (abfd
);
20645 return elf_vxworks_final_write_processing (abfd
);
20649 #define elf32_bed elf32_arm_vxworks_bed
20651 #undef bfd_elf32_bfd_link_hash_table_create
20652 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20653 #undef elf_backend_final_write_processing
20654 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20655 #undef elf_backend_emit_relocs
20656 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20658 #undef elf_backend_may_use_rel_p
20659 #define elf_backend_may_use_rel_p 0
20660 #undef elf_backend_may_use_rela_p
20661 #define elf_backend_may_use_rela_p 1
20662 #undef elf_backend_default_use_rela_p
20663 #define elf_backend_default_use_rela_p 1
20664 #undef elf_backend_want_plt_sym
20665 #define elf_backend_want_plt_sym 1
20666 #undef ELF_MAXPAGESIZE
20667 #define ELF_MAXPAGESIZE 0x1000
20668 #undef ELF_TARGET_OS
20669 #define ELF_TARGET_OS is_vxworks
20671 #include "elf32-target.h"
20674 /* Merge backend specific data from an object file to the output
20675 object file when linking. */
20678 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20680 bfd
*obfd
= info
->output_bfd
;
20681 flagword out_flags
;
20683 bool flags_compatible
= true;
20686 /* Check if we have the same endianness. */
20687 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20690 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20693 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20696 /* The input BFD must have had its flags initialised. */
20697 /* The following seems bogus to me -- The flags are initialized in
20698 the assembler but I don't think an elf_flags_init field is
20699 written into the object. */
20700 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20702 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20703 out_flags
= elf_elfheader (obfd
)->e_flags
;
20705 /* In theory there is no reason why we couldn't handle this. However
20706 in practice it isn't even close to working and there is no real
20707 reason to want it. */
20708 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20709 && !(ibfd
->flags
& DYNAMIC
)
20710 && (in_flags
& EF_ARM_BE8
))
20712 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20717 if (!elf_flags_init (obfd
))
20719 /* If the input has no flags set, then do not set the output flags.
20720 This will allow future bfds to determine the desired output flags.
20721 If no input bfds have any flags set, then neither will the output bfd.
20723 Note - we used to restrict this test to when the input architecture
20724 variant was the default variant, but this does not allow for
20725 linker scripts which override the default. See PR 28910 for an
20730 elf_flags_init (obfd
) = true;
20731 elf_elfheader (obfd
)->e_flags
= in_flags
;
20733 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20734 && bfd_get_arch_info (obfd
)->the_default
)
20735 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20740 /* Determine what should happen if the input ARM architecture
20741 does not match the output ARM architecture. */
20742 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20745 /* Identical flags must be compatible. */
20746 if (in_flags
== out_flags
)
20749 /* Check to see if the input BFD actually contains any sections. If
20750 not, its flags may not have been initialised either, but it
20751 cannot actually cause any incompatiblity. Do not short-circuit
20752 dynamic objects; their section list may be emptied by
20753 elf_link_add_object_symbols.
20755 Also check to see if there are no code sections in the input.
20756 In this case there is no need to check for code specific flags.
20757 XXX - do we need to worry about floating-point format compatability
20758 in data sections ? */
20759 if (!(ibfd
->flags
& DYNAMIC
))
20761 bool null_input_bfd
= true;
20762 bool only_data_sections
= true;
20764 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20766 /* Ignore synthetic glue sections. */
20767 if (strcmp (sec
->name
, ".glue_7")
20768 && strcmp (sec
->name
, ".glue_7t"))
20770 if ((bfd_section_flags (sec
)
20771 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20772 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20773 only_data_sections
= false;
20775 null_input_bfd
= false;
20780 if (null_input_bfd
|| only_data_sections
)
20784 /* Complain about various flag mismatches. */
20785 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20786 EF_ARM_EABI_VERSION (out_flags
)))
20789 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20790 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20791 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20795 /* Not sure what needs to be checked for EABI versions >= 1. */
20796 /* VxWorks libraries do not use these flags. */
20797 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20798 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20799 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20801 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20804 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20805 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20806 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20807 flags_compatible
= false;
20810 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20812 if (in_flags
& EF_ARM_APCS_FLOAT
)
20814 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20818 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20821 flags_compatible
= false;
20824 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20826 if (in_flags
& EF_ARM_VFP_FLOAT
)
20828 (_("error: %pB uses %s instructions, whereas %pB does not"),
20829 ibfd
, "VFP", obfd
);
20832 (_("error: %pB uses %s instructions, whereas %pB does not"),
20833 ibfd
, "FPA", obfd
);
20835 flags_compatible
= false;
20838 #ifdef EF_ARM_SOFT_FLOAT
20839 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20841 /* We can allow interworking between code that is VFP format
20842 layout, and uses either soft float or integer regs for
20843 passing floating point arguments and results. We already
20844 know that the APCS_FLOAT flags match; similarly for VFP
20846 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20847 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20849 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20851 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20855 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20858 flags_compatible
= false;
20863 /* Interworking mismatch is only a warning. */
20864 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20866 if (in_flags
& EF_ARM_INTERWORK
)
20869 (_("warning: %pB supports interworking, whereas %pB does not"),
20875 (_("warning: %pB does not support interworking, whereas %pB does"),
20881 return flags_compatible
;