Support x86 Intel MSR_IMM
[binutils-gdb.git] / bfd / elf32-arm.c
blob17df8b30eb63f821c1c0db047d7d3484cd07ca1d
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include <limits.h>
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
78 static reloc_howto_type elf32_arm_howto_table_1[] =
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 0, /* size */
84 0, /* bitsize */
85 false, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 4, /* size */
98 24, /* bitsize */
99 true, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 4, /* size */
113 32, /* bitsize */
114 false, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 4, /* size */
128 32, /* bitsize */
129 true, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 1, /* size */
143 32, /* bitsize */
144 true, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 2, /* size */
158 16, /* bitsize */
159 false, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 4, /* size */
173 12, /* bitsize */
174 false, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 2, /* size */
187 5, /* bitsize */
188 false, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 1, /* size */
202 8, /* bitsize */
203 false, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 4, /* size */
216 32, /* bitsize */
217 false, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 4, /* size */
230 24, /* bitsize */
231 true, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 2, /* size */
244 8, /* bitsize */
245 true, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 2, /* size */
258 32, /* bitsize */
259 false, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 4, /* size */
272 32, /* bitsize */
273 false, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size */
286 0, /* bitsize */
287 false, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 4, /* size */
301 24, /* bitsize */
302 true, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 4, /* size */
316 24, /* bitsize */
317 true, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 4, /* size */
332 32, /* bitsize */
333 false, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 4, /* size */
346 32, /* bitsize */
347 false, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 4, /* size */
360 32, /* bitsize */
361 false, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 4, /* size */
376 32, /* bitsize */
377 false, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 4, /* size */
390 32, /* bitsize */
391 false, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 4, /* size */
404 32, /* bitsize */
405 false, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 4, /* size */
418 32, /* bitsize */
419 false, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 4, /* size */
432 32, /* bitsize */
433 false, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 4, /* size */
446 32, /* bitsize */
447 true, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 4, /* size */
460 32, /* bitsize */
461 false, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 4, /* size */
474 24, /* bitsize */
475 true, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 4, /* size */
488 24, /* bitsize */
489 true, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 4, /* size */
502 24, /* bitsize */
503 true, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 4, /* size */
516 24, /* bitsize */
517 true, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 4, /* size */
530 32, /* bitsize */
531 false, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 4, /* size */
544 12, /* bitsize */
545 true, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 4, /* size */
558 12, /* bitsize */
559 true, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 4, /* size */
572 12, /* bitsize */
573 true, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 4, /* size */
586 12, /* bitsize */
587 false, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 4, /* size */
600 8, /* bitsize */
601 false, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 4, /* size */
614 8, /* bitsize */
615 false, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 4, /* size */
628 32, /* bitsize */
629 false, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 4, /* size */
642 32, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 4, /* size */
656 32, /* bitsize */
657 false, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 4, /* size */
670 32, /* bitsize */
671 false, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 4, /* size */
684 31, /* bitsize */
685 true, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 4, /* size */
698 16, /* bitsize */
699 false, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 4, /* size */
712 16, /* bitsize */
713 false, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 4, /* size */
726 16, /* bitsize */
727 true, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 4, /* size */
740 16, /* bitsize */
741 true, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 4, /* size */
754 16, /* bitsize */
755 false, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 4, /* size */
768 16, /* bitsize */
769 false, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 4, /* size */
782 16, /* bitsize */
783 true, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 4, /* size */
796 16, /* bitsize */
797 true, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 4, /* size */
810 19, /* bitsize */
811 true, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 2, /* size */
824 6, /* bitsize */
825 true, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 4, /* size */
841 13, /* bitsize */
842 true, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 4, /* size */
855 13, /* bitsize */
856 true, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 4, /* size */
869 32, /* bitsize */
870 false, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 4, /* size */
883 32, /* bitsize */
884 true, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 4, /* size */
899 32, /* bitsize */
900 true, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 4, /* size */
913 32, /* bitsize */
914 true, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 4, /* size */
927 32, /* bitsize */
928 true, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 4, /* size */
941 32, /* bitsize */
942 true, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 4, /* size */
955 32, /* bitsize */
956 true, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 4, /* size */
969 32, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 4, /* size */
983 32, /* bitsize */
984 true, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 4, /* size */
997 32, /* bitsize */
998 true, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 4, /* size */
1011 32, /* bitsize */
1012 true, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 4, /* size */
1025 32, /* bitsize */
1026 true, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 4, /* size */
1039 32, /* bitsize */
1040 true, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 4, /* size */
1053 32, /* bitsize */
1054 true, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 4, /* size */
1067 32, /* bitsize */
1068 true, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 4, /* size */
1081 32, /* bitsize */
1082 true, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 4, /* size */
1095 32, /* bitsize */
1096 true, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 4, /* size */
1109 32, /* bitsize */
1110 true, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 4, /* size */
1123 32, /* bitsize */
1124 true, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 4, /* size */
1137 32, /* bitsize */
1138 true, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 4, /* size */
1151 32, /* bitsize */
1152 true, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 4, /* size */
1165 32, /* bitsize */
1166 true, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 4, /* size */
1179 32, /* bitsize */
1180 true, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 4, /* size */
1193 32, /* bitsize */
1194 true, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 4, /* size */
1207 32, /* bitsize */
1208 true, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 4, /* size */
1221 32, /* bitsize */
1222 true, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 4, /* size */
1235 32, /* bitsize */
1236 true, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 4, /* size */
1249 32, /* bitsize */
1250 true, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 4, /* size */
1263 32, /* bitsize */
1264 true, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 4, /* size */
1279 16, /* bitsize */
1280 false, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 4, /* size */
1293 16, /* bitsize */
1294 false, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 4, /* size */
1307 16, /* bitsize */
1308 false, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 4, /* size */
1321 16, /* bitsize */
1322 false, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 4, /* size */
1335 16, /* bitsize */
1336 false, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 4, /* size */
1349 16, /* bitsize */
1350 false, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 4, /* size */
1363 32, /* bitsize */
1364 false, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 4, /* size */
1377 24, /* bitsize */
1378 false, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 4, /* size */
1391 0, /* bitsize */
1392 false, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 4, /* size */
1405 24, /* bitsize */
1406 false, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 4, /* size */
1419 32, /* bitsize */
1420 false, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 4, /* size */
1433 32, /* bitsize */
1434 false, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 4, /* size */
1447 32, /* bitsize */
1448 true, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 4, /* size */
1461 12, /* bitsize */
1462 false, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 4, /* size */
1475 12, /* bitsize */
1476 false, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 4, /* size */
1492 0, /* bitsize */
1493 false, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 4, /* size */
1507 0, /* bitsize */
1508 false, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 2, /* size */
1521 11, /* bitsize */
1522 true, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 2, /* size */
1535 8, /* bitsize */
1536 true, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 4, /* size */
1550 32, /* bitsize */
1551 false, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 4, /* size */
1564 32, /* bitsize */
1565 false, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 4, /* size */
1578 32, /* bitsize */
1579 false, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 4, /* size */
1592 32, /* bitsize */
1593 false, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 4, /* size */
1606 32, /* bitsize */
1607 false, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 4, /* size */
1620 12, /* bitsize */
1621 false, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 4, /* size */
1634 12, /* bitsize */
1635 false, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 4, /* size */
1648 12, /* bitsize */
1649 false, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 2, /* size */
1683 0, /* bitsize */
1684 false, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 2, /* size. */
1698 16, /* bitsize. */
1699 false, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 2, /* size. */
1711 16, /* bitsize. */
1712 false, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 2, /* size. */
1724 16, /* bitsize. */
1725 false, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 2, /* size. */
1737 16, /* bitsize. */
1738 false, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 2, /* size. */
1751 16, /* bitsize. */
1752 true, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 2, /* size. */
1764 12, /* bitsize. */
1765 true, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 2, /* size. */
1777 18, /* bitsize. */
1778 true, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 4, /* size */
1795 32, /* bitsize */
1796 false, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 4, /* size */
1808 32, /* bitsize */
1809 false, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 4, /* size */
1821 32, /* bitsize */
1822 false, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 4, /* size */
1834 32, /* bitsize */
1835 false, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 4, /* size */
1847 64, /* bitsize */
1848 false, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 4, /* size */
1860 32, /* bitsize */
1861 false, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 4, /* size */
1873 32, /* bitsize */
1874 false, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 4, /* size */
1886 32, /* bitsize */
1887 false, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size */
1904 0, /* bitsize */
1905 false, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size */
1918 0, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size */
1932 0, /* bitsize */
1933 false, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size */
1946 0, /* bitsize */
1947 false, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1972 return NULL;
1975 static bool
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1979 unsigned int r_type;
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return false;
1990 return true;
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2108 unsigned int i;
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2114 return NULL;
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2121 unsigned int i;
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2138 return NULL;
2141 /* Support for core dump NOTE sections. */
2143 static bool
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2146 int offset;
2147 size_t size;
2149 switch (note->descsz)
2151 default:
2152 return false;
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2165 break;
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2173 static bool
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2176 switch (note->descsz)
2178 default:
2179 return false;
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2201 return true;
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2208 switch (note_type)
2210 default:
2211 return NULL;
2213 case NT_PRPSINFO:
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2239 case NT_PRSTATUS:
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline [] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry = false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2503 linker first. */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2506 /* First bundle: */
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2516 /* Third bundle: */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2520 /* .Lplt_tail: */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2539 /* PR 28924:
2540 There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
2541 THM2_MAX_FWD_BRANCH_OFFSET. The first macro concerns the case when Thumb-2
2542 is not available, and second macro when Thumb-2 is available. Among other
2543 things, they affect the range of branches represented as BLX instructions
2544 in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
2545 Reference Manual ARMv7-A and ARMv7-R edition issue C.d. Such branches are
2546 specified there to have a maximum forward offset that is a multiple of 4.
2547 Previously, the respective values defined here were multiples of 2 but not
2548 4 and they are included in comments for reference. */
2549 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2550 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2551 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 4 + 4)
2552 /* #def THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 2 + 4) */
2553 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2554 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
2555 /* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
2556 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2557 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2558 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2560 enum stub_insn_type
2562 THUMB16_TYPE = 1,
2563 THUMB32_TYPE,
2564 ARM_TYPE,
2565 DATA_TYPE
2568 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2569 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2570 is inserted in arm_build_one_stub(). */
2571 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2572 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2573 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2574 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2575 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2576 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2577 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2578 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2580 typedef struct
2582 bfd_vma data;
2583 enum stub_insn_type type;
2584 unsigned int r_type;
2585 int reloc_addend;
2586 } insn_sequence;
2588 /* See note [Thumb nop sequence] when adding a veneer. */
2590 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2591 to reach the stub if necessary. */
2592 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2594 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2595 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2598 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2599 available. */
2600 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2602 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2603 ARM_INSN (0xe12fff1c), /* bx ip */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2610 THUMB16_INSN (0xb401), /* push {r0} */
2611 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2612 THUMB16_INSN (0x4684), /* mov ip, r0 */
2613 THUMB16_INSN (0xbc01), /* pop {r0} */
2614 THUMB16_INSN (0x4760), /* bx ip */
2615 THUMB16_INSN (0xbf00), /* nop */
2616 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2619 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2620 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2622 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2623 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2626 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2627 M-profile architectures. */
2628 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2630 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2631 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2632 THUMB16_INSN (0x4760), /* bx ip */
2635 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2636 allowed. */
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0xe7fd), /* b .-2 */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe12fff1c), /* bx ip */
2643 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2646 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2647 available. */
2648 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2650 THUMB16_INSN (0x4778), /* bx pc */
2651 THUMB16_INSN (0xe7fd), /* b .-2 */
2652 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2653 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2656 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2657 one, when the destination is close enough. */
2658 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2660 THUMB16_INSN (0x4778), /* bx pc */
2661 THUMB16_INSN (0xe7fd), /* b .-2 */
2662 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2665 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2666 blx to reach the stub if necessary. */
2667 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2669 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2670 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2671 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2674 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2675 blx to reach the stub if necessary. We can not add into pc;
2676 it is not guaranteed to mode switch (different in ARMv6 and
2677 ARMv7). */
2678 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2680 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2681 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2682 ARM_INSN (0xe12fff1c), /* bx ip */
2683 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2686 /* V4T ARM -> ARM long branch stub, PIC. */
2687 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2689 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2690 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2691 ARM_INSN (0xe12fff1c), /* bx ip */
2692 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2695 /* V4T Thumb -> ARM long branch stub, PIC. */
2696 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2698 THUMB16_INSN (0x4778), /* bx pc */
2699 THUMB16_INSN (0xe7fd), /* b .-2 */
2700 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2701 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2702 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2705 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2706 architectures. */
2707 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2709 THUMB16_INSN (0xb401), /* push {r0} */
2710 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2711 THUMB16_INSN (0x46fc), /* mov ip, pc */
2712 THUMB16_INSN (0x4484), /* add ip, r0 */
2713 THUMB16_INSN (0xbc01), /* pop {r0} */
2714 THUMB16_INSN (0x4760), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2718 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2719 allowed. */
2720 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2722 THUMB16_INSN (0x4778), /* bx pc */
2723 THUMB16_INSN (0xe7fd), /* b .-2 */
2724 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2725 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2726 ARM_INSN (0xe12fff1c), /* bx ip */
2727 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2730 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2731 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2732 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2734 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2735 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2736 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2739 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2740 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2741 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2743 THUMB16_INSN (0x4778), /* bx pc */
2744 THUMB16_INSN (0xe7fd), /* b .-2 */
2745 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2746 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2747 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2750 /* NaCl ARM -> ARM long branch stub. */
2751 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2753 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2754 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2755 ARM_INSN (0xe12fff1c), /* bx ip */
2756 ARM_INSN (0xe320f000), /* nop */
2757 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2758 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2759 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2763 /* NaCl ARM -> ARM long branch stub, PIC. */
2764 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2766 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2767 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2768 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2769 ARM_INSN (0xe12fff1c), /* bx ip */
2770 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2771 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2772 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2773 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2776 /* Stub used for transition to secure state (aka SG veneer). */
2777 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2779 THUMB32_INSN (0xe97fe97f), /* sg. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2784 /* Cortex-A8 erratum-workaround stubs. */
2786 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2787 can't use a conditional branch to reach this stub). */
2789 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2791 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2792 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2793 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2796 /* Stub used for b.w and bl.w instructions. */
2798 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2800 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2803 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2805 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2808 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2809 instruction (which switches to ARM mode) to point to this stub. Jump to the
2810 real destination using an ARM-mode branch. */
2812 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2814 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2817 /* For each section group there can be a specially created linker section
2818 to hold the stubs for that group. The name of the stub section is based
2819 upon the name of another section within that group with the suffix below
2820 applied.
2822 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2823 create what appeared to be a linker stub section when it actually
2824 contained user code/data. For example, consider this fragment:
2826 const char * stubborn_problems[] = { "np" };
2828 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2829 section called:
2831 .data.rel.local.stubborn_problems
2833 This then causes problems in arm32_arm_build_stubs() as it triggers:
2835 // Ignore non-stub sections.
2836 if (!strstr (stub_sec->name, STUB_SUFFIX))
2837 continue;
2839 And so the section would be ignored instead of being processed. Hence
2840 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2841 C identifier. */
2842 #define STUB_SUFFIX ".__stub"
2844 /* One entry per long/short branch stub defined above. */
2845 #define DEF_STUBS \
2846 DEF_STUB (long_branch_any_any) \
2847 DEF_STUB (long_branch_v4t_arm_thumb) \
2848 DEF_STUB (long_branch_thumb_only) \
2849 DEF_STUB (long_branch_v4t_thumb_thumb) \
2850 DEF_STUB (long_branch_v4t_thumb_arm) \
2851 DEF_STUB (short_branch_v4t_thumb_arm) \
2852 DEF_STUB (long_branch_any_arm_pic) \
2853 DEF_STUB (long_branch_any_thumb_pic) \
2854 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2855 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2856 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2857 DEF_STUB (long_branch_thumb_only_pic) \
2858 DEF_STUB (long_branch_any_tls_pic) \
2859 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2860 DEF_STUB (long_branch_arm_nacl) \
2861 DEF_STUB (long_branch_arm_nacl_pic) \
2862 DEF_STUB (cmse_branch_thumb_only) \
2863 DEF_STUB (a8_veneer_b_cond) \
2864 DEF_STUB (a8_veneer_b) \
2865 DEF_STUB (a8_veneer_bl) \
2866 DEF_STUB (a8_veneer_blx) \
2867 DEF_STUB (long_branch_thumb2_only) \
2868 DEF_STUB (long_branch_thumb2_only_pure)
2870 #define DEF_STUB(x) arm_stub_##x,
2871 enum elf32_arm_stub_type
2873 arm_stub_none,
2874 DEF_STUBS
2875 max_stub_type
2877 #undef DEF_STUB
2879 /* Note the first a8_veneer type. */
2880 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2882 typedef struct
2884 const insn_sequence* template_sequence;
2885 int template_size;
2886 } stub_def;
2888 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2889 static const stub_def stub_definitions[] =
2891 {NULL, 0},
2892 DEF_STUBS
2895 struct elf32_arm_stub_hash_entry
2897 /* Base hash table entry structure. */
2898 struct bfd_hash_entry root;
2900 /* The stub section. */
2901 asection *stub_sec;
2903 /* Offset within stub_sec of the beginning of this stub. */
2904 bfd_vma stub_offset;
2906 /* Given the symbol's value and its section we can determine its final
2907 value when building the stubs (so the stub knows where to jump). */
2908 bfd_vma target_value;
2909 asection *target_section;
2911 /* Same as above but for the source of the branch to the stub. Used for
2912 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2913 such, source section does not need to be recorded since Cortex-A8 erratum
2914 workaround stubs are only generated when both source and target are in the
2915 same section. */
2916 bfd_vma source_value;
2918 /* The instruction which caused this stub to be generated (only valid for
2919 Cortex-A8 erratum workaround stubs at present). */
2920 unsigned long orig_insn;
2922 /* The stub type. */
2923 enum elf32_arm_stub_type stub_type;
2924 /* Its encoding size in bytes. */
2925 int stub_size;
2926 /* Its template. */
2927 const insn_sequence *stub_template;
2928 /* The size of the template (number of entries). */
2929 int stub_template_size;
2931 /* The symbol table entry, if any, that this was derived from. */
2932 struct elf32_arm_link_hash_entry *h;
2934 /* Type of branch. */
2935 enum arm_st_branch_type branch_type;
2937 /* Where this stub is being called from, or, in the case of combined
2938 stub sections, the first input section in the group. */
2939 asection *id_sec;
2941 /* The name for the local symbol at the start of this stub. The
2942 stub name in the hash table has to be unique; this does not, so
2943 it can be friendlier. */
2944 char *output_name;
2947 /* Used to build a map of a section. This is required for mixed-endian
2948 code/data. */
2950 typedef struct elf32_elf_section_map
2952 bfd_vma vma;
2953 char type;
2955 elf32_arm_section_map;
2957 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2959 typedef enum
2961 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2962 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2963 VFP11_ERRATUM_ARM_VENEER,
2964 VFP11_ERRATUM_THUMB_VENEER
2966 elf32_vfp11_erratum_type;
2968 typedef struct elf32_vfp11_erratum_list
2970 struct elf32_vfp11_erratum_list *next;
2971 bfd_vma vma;
2972 union
2974 struct
2976 struct elf32_vfp11_erratum_list *veneer;
2977 unsigned int vfp_insn;
2978 } b;
2979 struct
2981 struct elf32_vfp11_erratum_list *branch;
2982 unsigned int id;
2983 } v;
2984 } u;
2985 elf32_vfp11_erratum_type type;
2987 elf32_vfp11_erratum_list;
2989 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2990 veneer. */
2991 typedef enum
2993 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2994 STM32L4XX_ERRATUM_VENEER
2996 elf32_stm32l4xx_erratum_type;
2998 typedef struct elf32_stm32l4xx_erratum_list
3000 struct elf32_stm32l4xx_erratum_list *next;
3001 bfd_vma vma;
3002 union
3004 struct
3006 struct elf32_stm32l4xx_erratum_list *veneer;
3007 unsigned int insn;
3008 } b;
3009 struct
3011 struct elf32_stm32l4xx_erratum_list *branch;
3012 unsigned int id;
3013 } v;
3014 } u;
3015 elf32_stm32l4xx_erratum_type type;
3017 elf32_stm32l4xx_erratum_list;
3019 typedef enum
3021 DELETE_EXIDX_ENTRY,
3022 INSERT_EXIDX_CANTUNWIND_AT_END
3024 arm_unwind_edit_type;
3026 /* A (sorted) list of edits to apply to an unwind table. */
3027 typedef struct arm_unwind_table_edit
3029 arm_unwind_edit_type type;
3030 /* Note: we sometimes want to insert an unwind entry corresponding to a
3031 section different from the one we're currently writing out, so record the
3032 (text) section this edit relates to here. */
3033 asection *linked_section;
3034 unsigned int index;
3035 struct arm_unwind_table_edit *next;
3037 arm_unwind_table_edit;
3039 typedef struct _arm_elf_section_data
3041 /* Information about mapping symbols. */
3042 struct bfd_elf_section_data elf;
3043 unsigned int mapcount;
3044 unsigned int mapsize;
3045 elf32_arm_section_map *map;
3046 /* Information about CPU errata. */
3047 unsigned int erratumcount;
3048 elf32_vfp11_erratum_list *erratumlist;
3049 unsigned int stm32l4xx_erratumcount;
3050 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3051 unsigned int additional_reloc_count;
3052 /* Information about unwind tables. */
3053 union
3055 /* Unwind info attached to a text section. */
3056 struct
3058 asection *arm_exidx_sec;
3059 } text;
3061 /* Unwind info attached to an .ARM.exidx section. */
3062 struct
3064 arm_unwind_table_edit *unwind_edit_list;
3065 arm_unwind_table_edit *unwind_edit_tail;
3066 } exidx;
3067 } u;
3069 _arm_elf_section_data;
3071 #define elf32_arm_section_data(sec) \
3072 ((_arm_elf_section_data *) elf_section_data (sec))
3074 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3075 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3076 so may be created multiple times: we use an array of these entries whilst
3077 relaxing which we can refresh easily, then create stubs for each potentially
3078 erratum-triggering instruction once we've settled on a solution. */
3080 struct a8_erratum_fix
3082 bfd *input_bfd;
3083 asection *section;
3084 bfd_vma offset;
3085 bfd_vma target_offset;
3086 unsigned long orig_insn;
3087 char *stub_name;
3088 enum elf32_arm_stub_type stub_type;
3089 enum arm_st_branch_type branch_type;
3092 /* A table of relocs applied to branches which might trigger Cortex-A8
3093 erratum. */
3095 struct a8_erratum_reloc
3097 bfd_vma from;
3098 bfd_vma destination;
3099 struct elf32_arm_link_hash_entry *hash;
3100 const char *sym_name;
3101 unsigned int r_type;
3102 enum arm_st_branch_type branch_type;
3103 bool non_a8_stub;
3106 /* The size of the thread control block. */
3107 #define TCB_SIZE 8
3109 /* ARM-specific information about a PLT entry, over and above the usual
3110 gotplt_union. */
3111 struct arm_plt_info
3113 /* We reference count Thumb references to a PLT entry separately,
3114 so that we can emit the Thumb trampoline only if needed. */
3115 bfd_signed_vma thumb_refcount;
3117 /* Some references from Thumb code may be eliminated by BL->BLX
3118 conversion, so record them separately. */
3119 bfd_signed_vma maybe_thumb_refcount;
3121 /* How many of the recorded PLT accesses were from non-call relocations.
3122 This information is useful when deciding whether anything takes the
3123 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3124 non-call references to the function should resolve directly to the
3125 real runtime target. */
3126 unsigned int noncall_refcount;
3128 /* Since PLT entries have variable size if the Thumb prologue is
3129 used, we need to record the index into .got.plt instead of
3130 recomputing it from the PLT offset. */
3131 bfd_signed_vma got_offset;
3134 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3135 struct arm_local_iplt_info
3137 /* The information that is usually found in the generic ELF part of
3138 the hash table entry. */
3139 union gotplt_union root;
3141 /* The information that is usually found in the ARM-specific part of
3142 the hash table entry. */
3143 struct arm_plt_info arm;
3145 /* A list of all potential dynamic relocations against this symbol. */
3146 struct elf_dyn_relocs *dyn_relocs;
3149 /* Structure to handle FDPIC support for local functions. */
3150 struct fdpic_local
3152 unsigned int funcdesc_cnt;
3153 unsigned int gotofffuncdesc_cnt;
3154 int funcdesc_offset;
3157 struct elf_arm_obj_tdata
3159 struct elf_obj_tdata root;
3161 /* Zero to warn when linking objects with incompatible enum sizes. */
3162 int no_enum_size_warning;
3164 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3165 int no_wchar_size_warning;
3167 /* The number of entries in each of the arrays in this strcuture.
3168 Used to avoid buffer overruns. */
3169 bfd_size_type num_entries;
3171 /* tls_type for each local got entry. */
3172 char *local_got_tls_type;
3174 /* GOTPLT entries for TLS descriptors. */
3175 bfd_vma *local_tlsdesc_gotent;
3177 /* Information for local symbols that need entries in .iplt. */
3178 struct arm_local_iplt_info **local_iplt;
3180 /* Maintains FDPIC counters and funcdesc info. */
3181 struct fdpic_local *local_fdpic_cnts;
3184 #define elf_arm_tdata(bfd) \
3185 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3187 #define elf32_arm_num_entries(bfd) \
3188 (elf_arm_tdata (bfd)->num_entries)
3190 #define elf32_arm_local_got_tls_type(bfd) \
3191 (elf_arm_tdata (bfd)->local_got_tls_type)
3193 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3194 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3196 #define elf32_arm_local_iplt(bfd) \
3197 (elf_arm_tdata (bfd)->local_iplt)
3199 #define elf32_arm_local_fdpic_cnts(bfd) \
3200 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3202 #define is_arm_elf(bfd) \
3203 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3204 && elf_tdata (bfd) != NULL \
3205 && elf_object_id (bfd) == ARM_ELF_DATA)
3207 static bool
3208 elf32_arm_mkobject (bfd *abfd)
3210 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3211 ARM_ELF_DATA);
3214 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3216 /* Structure to handle FDPIC support for extern functions. */
3217 struct fdpic_global {
3218 unsigned int gotofffuncdesc_cnt;
3219 unsigned int gotfuncdesc_cnt;
3220 unsigned int funcdesc_cnt;
3221 int funcdesc_offset;
3222 int gotfuncdesc_offset;
3225 /* Arm ELF linker hash entry. */
3226 struct elf32_arm_link_hash_entry
3228 struct elf_link_hash_entry root;
3230 /* ARM-specific PLT information. */
3231 struct arm_plt_info plt;
3233 #define GOT_UNKNOWN 0
3234 #define GOT_NORMAL 1
3235 #define GOT_TLS_GD 2
3236 #define GOT_TLS_IE 4
3237 #define GOT_TLS_GDESC 8
3238 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3239 unsigned int tls_type : 8;
3241 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3242 unsigned int is_iplt : 1;
3244 unsigned int unused : 23;
3246 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3247 starting at the end of the jump table. */
3248 bfd_vma tlsdesc_got;
3250 /* The symbol marking the real symbol location for exported thumb
3251 symbols with Arm stubs. */
3252 struct elf_link_hash_entry *export_glue;
3254 /* A pointer to the most recently used stub hash entry against this
3255 symbol. */
3256 struct elf32_arm_stub_hash_entry *stub_cache;
3258 /* Counter for FDPIC relocations against this symbol. */
3259 struct fdpic_global fdpic_cnts;
3262 /* Traverse an arm ELF linker hash table. */
3263 #define elf32_arm_link_hash_traverse(table, func, info) \
3264 (elf_link_hash_traverse \
3265 (&(table)->root, \
3266 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3267 (info)))
3269 /* Get the ARM elf linker hash table from a link_info structure. */
3270 #define elf32_arm_hash_table(p) \
3271 ((is_elf_hash_table ((p)->hash) \
3272 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3273 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3275 #define arm_stub_hash_lookup(table, string, create, copy) \
3276 ((struct elf32_arm_stub_hash_entry *) \
3277 bfd_hash_lookup ((table), (string), (create), (copy)))
3279 /* Array to keep track of which stub sections have been created, and
3280 information on stub grouping. */
3281 struct map_stub
3283 /* This is the section to which stubs in the group will be
3284 attached. */
3285 asection *link_sec;
3286 /* The stub section. */
3287 asection *stub_sec;
3290 #define elf32_arm_compute_jump_table_size(htab) \
3291 ((htab)->next_tls_desc_index * 4)
3293 /* ARM ELF linker hash table. */
3294 struct elf32_arm_link_hash_table
3296 /* The main hash table. */
3297 struct elf_link_hash_table root;
3299 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3300 bfd_size_type thumb_glue_size;
3302 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3303 bfd_size_type arm_glue_size;
3305 /* The size in bytes of section containing the ARMv4 BX veneers. */
3306 bfd_size_type bx_glue_size;
3308 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3309 veneer has been populated. */
3310 bfd_vma bx_glue_offset[15];
3312 /* The size in bytes of the section containing glue for VFP11 erratum
3313 veneers. */
3314 bfd_size_type vfp11_erratum_glue_size;
3316 /* The size in bytes of the section containing glue for STM32L4XX erratum
3317 veneers. */
3318 bfd_size_type stm32l4xx_erratum_glue_size;
3320 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3321 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3322 elf32_arm_write_section(). */
3323 struct a8_erratum_fix *a8_erratum_fixes;
3324 unsigned int num_a8_erratum_fixes;
3326 /* An arbitrary input BFD chosen to hold the glue sections. */
3327 bfd * bfd_of_glue_owner;
3329 /* Nonzero to output a BE8 image. */
3330 int byteswap_code;
3332 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3333 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3334 int target1_is_rel;
3336 /* The relocation to use for R_ARM_TARGET2 relocations. */
3337 int target2_reloc;
3339 /* 0 = Ignore R_ARM_V4BX.
3340 1 = Convert BX to MOV PC.
3341 2 = Generate v4 interworing stubs. */
3342 int fix_v4bx;
3344 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3345 int fix_cortex_a8;
3347 /* Whether we should fix the ARM1176 BLX immediate issue. */
3348 int fix_arm1176;
3350 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3351 int use_blx;
3353 /* What sort of code sequences we should look for which may trigger the
3354 VFP11 denorm erratum. */
3355 bfd_arm_vfp11_fix vfp11_fix;
3357 /* Global counter for the number of fixes we have emitted. */
3358 int num_vfp11_fixes;
3360 /* What sort of code sequences we should look for which may trigger the
3361 STM32L4XX erratum. */
3362 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3364 /* Global counter for the number of fixes we have emitted. */
3365 int num_stm32l4xx_fixes;
3367 /* Nonzero to force PIC branch veneers. */
3368 int pic_veneer;
3370 /* The number of bytes in the initial entry in the PLT. */
3371 bfd_size_type plt_header_size;
3373 /* The number of bytes in the subsequent PLT etries. */
3374 bfd_size_type plt_entry_size;
3376 /* True if the target uses REL relocations. */
3377 bool use_rel;
3379 /* Nonzero if import library must be a secure gateway import library
3380 as per ARMv8-M Security Extensions. */
3381 int cmse_implib;
3383 /* The import library whose symbols' address must remain stable in
3384 the import library generated. */
3385 bfd *in_implib_bfd;
3387 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3388 bfd_vma next_tls_desc_index;
3390 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3391 bfd_vma num_tls_desc;
3393 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3394 asection *srelplt2;
3396 /* Offset in .plt section of tls_arm_trampoline. */
3397 bfd_vma tls_trampoline;
3399 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3400 union
3402 bfd_signed_vma refcount;
3403 bfd_vma offset;
3404 } tls_ldm_got;
3406 /* For convenience in allocate_dynrelocs. */
3407 bfd * obfd;
3409 /* The amount of space used by the reserved portion of the sgotplt
3410 section, plus whatever space is used by the jump slots. */
3411 bfd_vma sgotplt_jump_table_size;
3413 /* The stub hash table. */
3414 struct bfd_hash_table stub_hash_table;
3416 /* Linker stub bfd. */
3417 bfd *stub_bfd;
3419 /* Linker call-backs. */
3420 asection * (*add_stub_section) (const char *, asection *, asection *,
3421 unsigned int);
3422 void (*layout_sections_again) (void);
3424 /* Array to keep track of which stub sections have been created, and
3425 information on stub grouping. */
3426 struct map_stub *stub_group;
3428 /* Input stub section holding secure gateway veneers. */
3429 asection *cmse_stub_sec;
3431 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3432 start to be allocated. */
3433 bfd_vma new_cmse_stub_offset;
3435 /* Number of elements in stub_group. */
3436 unsigned int top_id;
3438 /* Assorted information used by elf32_arm_size_stubs. */
3439 unsigned int bfd_count;
3440 unsigned int top_index;
3441 asection **input_list;
3443 /* True if the target system uses FDPIC. */
3444 int fdpic_p;
3446 /* Fixup section. Used for FDPIC. */
3447 asection *srofixup;
3450 /* Add an FDPIC read-only fixup. */
3451 static void
3452 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3454 bfd_vma fixup_offset;
3456 fixup_offset = srofixup->reloc_count++ * 4;
3457 BFD_ASSERT (fixup_offset < srofixup->size);
3458 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3461 static inline int
3462 ctz (unsigned int mask)
3464 #if GCC_VERSION >= 3004
3465 return __builtin_ctz (mask);
3466 #else
3467 unsigned int i;
3469 for (i = 0; i < 8 * sizeof (mask); i++)
3471 if (mask & 0x1)
3472 break;
3473 mask = (mask >> 1);
3475 return i;
3476 #endif
3479 static inline int
3480 elf32_arm_popcount (unsigned int mask)
3482 #if GCC_VERSION >= 3004
3483 return __builtin_popcount (mask);
3484 #else
3485 unsigned int i;
3486 int sum = 0;
3488 for (i = 0; i < 8 * sizeof (mask); i++)
3490 if (mask & 0x1)
3491 sum++;
3492 mask = (mask >> 1);
3494 return sum;
3495 #endif
3498 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3499 asection *sreloc, Elf_Internal_Rela *rel);
3501 static void
3502 arm_elf_fill_funcdesc (bfd *output_bfd,
3503 struct bfd_link_info *info,
3504 int *funcdesc_offset,
3505 int dynindx,
3506 int offset,
3507 bfd_vma addr,
3508 bfd_vma dynreloc_value,
3509 bfd_vma seg)
3511 if ((*funcdesc_offset & 1) == 0)
3513 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3514 asection *sgot = globals->root.sgot;
3516 if (bfd_link_pic (info))
3518 asection *srelgot = globals->root.srelgot;
3519 Elf_Internal_Rela outrel;
3521 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3522 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3523 outrel.r_addend = 0;
3525 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3526 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3527 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3529 else
3531 struct elf_link_hash_entry *hgot = globals->root.hgot;
3532 bfd_vma got_value = hgot->root.u.def.value
3533 + hgot->root.u.def.section->output_section->vma
3534 + hgot->root.u.def.section->output_offset;
3536 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3537 sgot->output_section->vma + sgot->output_offset
3538 + offset);
3539 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3540 sgot->output_section->vma + sgot->output_offset
3541 + offset + 4);
3542 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3543 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3545 *funcdesc_offset |= 1;
3549 /* Create an entry in an ARM ELF linker hash table. */
3551 static struct bfd_hash_entry *
3552 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3553 struct bfd_hash_table * table,
3554 const char * string)
3556 struct elf32_arm_link_hash_entry * ret =
3557 (struct elf32_arm_link_hash_entry *) entry;
3559 /* Allocate the structure if it has not already been allocated by a
3560 subclass. */
3561 if (ret == NULL)
3562 ret = (struct elf32_arm_link_hash_entry *)
3563 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3564 if (ret == NULL)
3565 return (struct bfd_hash_entry *) ret;
3567 /* Call the allocation method of the superclass. */
3568 ret = ((struct elf32_arm_link_hash_entry *)
3569 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3570 table, string));
3571 if (ret != NULL)
3573 ret->tls_type = GOT_UNKNOWN;
3574 ret->tlsdesc_got = (bfd_vma) -1;
3575 ret->plt.thumb_refcount = 0;
3576 ret->plt.maybe_thumb_refcount = 0;
3577 ret->plt.noncall_refcount = 0;
3578 ret->plt.got_offset = -1;
3579 ret->is_iplt = false;
3580 ret->export_glue = NULL;
3582 ret->stub_cache = NULL;
3584 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3585 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3586 ret->fdpic_cnts.funcdesc_cnt = 0;
3587 ret->fdpic_cnts.funcdesc_offset = -1;
3588 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3591 return (struct bfd_hash_entry *) ret;
3594 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3595 symbols. */
3597 static bool
3598 elf32_arm_allocate_local_sym_info (bfd *abfd)
3600 if (elf_local_got_refcounts (abfd) == NULL)
3602 bfd_size_type num_syms;
3604 elf32_arm_num_entries (abfd) = 0;
3606 /* Whilst it might be tempting to allocate a single block of memory and
3607 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3608 structure, this interferes with the work of memory checkers looking
3609 for buffer overruns. So allocate each array individually. */
3611 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3613 elf_local_got_refcounts (abfd) = bfd_zalloc
3614 (abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3616 if (elf_local_got_refcounts (abfd) == NULL)
3617 return false;
3619 elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3620 (abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3622 if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3623 return false;
3625 elf32_arm_local_iplt (abfd) = bfd_zalloc
3626 (abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3628 if (elf32_arm_local_iplt (abfd) == NULL)
3629 return false;
3631 elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3632 (abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3634 if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3635 return false;
3637 elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3638 (abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3640 if (elf32_arm_local_got_tls_type (abfd) == NULL)
3641 return false;
3643 elf32_arm_num_entries (abfd) = num_syms;
3645 #if GCC_VERSION >= 3000
3646 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3647 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3648 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3649 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3650 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3651 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3652 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3653 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3654 #endif
3656 return true;
3659 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3660 to input bfd ABFD. Create the information if it doesn't already exist.
3661 Return null if an allocation fails. */
3663 static struct arm_local_iplt_info *
3664 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3666 struct arm_local_iplt_info **ptr;
3668 if (!elf32_arm_allocate_local_sym_info (abfd))
3669 return NULL;
3671 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3672 BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3673 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3674 if (*ptr == NULL)
3675 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3676 return *ptr;
3679 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3680 in ABFD's symbol table. If the symbol is global, H points to its
3681 hash table entry, otherwise H is null.
3683 Return true if the symbol does have PLT information. When returning
3684 true, point *ROOT_PLT at the target-independent reference count/offset
3685 union and *ARM_PLT at the ARM-specific information. */
3687 static bool
3688 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3689 struct elf32_arm_link_hash_entry *h,
3690 unsigned long r_symndx, union gotplt_union **root_plt,
3691 struct arm_plt_info **arm_plt)
3693 struct arm_local_iplt_info *local_iplt;
3695 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3696 return false;
3698 if (h != NULL)
3700 *root_plt = &h->root.plt;
3701 *arm_plt = &h->plt;
3702 return true;
3705 if (elf32_arm_local_iplt (abfd) == NULL)
3706 return false;
3708 if (r_symndx >= elf32_arm_num_entries (abfd))
3709 return false;
3711 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3712 if (local_iplt == NULL)
3713 return false;
3715 *root_plt = &local_iplt->root;
3716 *arm_plt = &local_iplt->arm;
3717 return true;
3720 static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3722 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3723 before it. */
3725 static bool
3726 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3727 struct arm_plt_info *arm_plt)
3729 struct elf32_arm_link_hash_table *htab;
3731 htab = elf32_arm_hash_table (info);
3733 return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3734 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3737 /* Return a pointer to the head of the dynamic reloc list that should
3738 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3739 ABFD's symbol table. Return null if an error occurs. */
3741 static struct elf_dyn_relocs **
3742 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3743 Elf_Internal_Sym *isym)
3745 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3747 struct arm_local_iplt_info *local_iplt;
3749 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3750 if (local_iplt == NULL)
3751 return NULL;
3752 return &local_iplt->dyn_relocs;
3754 else
3756 /* Track dynamic relocs needed for local syms too.
3757 We really need local syms available to do this
3758 easily. Oh well. */
3759 asection *s;
3760 void *vpp;
3762 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3763 if (s == NULL)
3764 return NULL;
3766 vpp = &elf_section_data (s)->local_dynrel;
3767 return (struct elf_dyn_relocs **) vpp;
3771 /* Initialize an entry in the stub hash table. */
3773 static struct bfd_hash_entry *
3774 stub_hash_newfunc (struct bfd_hash_entry *entry,
3775 struct bfd_hash_table *table,
3776 const char *string)
3778 /* Allocate the structure if it has not already been allocated by a
3779 subclass. */
3780 if (entry == NULL)
3782 entry = (struct bfd_hash_entry *)
3783 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3784 if (entry == NULL)
3785 return entry;
3788 /* Call the allocation method of the superclass. */
3789 entry = bfd_hash_newfunc (entry, table, string);
3790 if (entry != NULL)
3792 struct elf32_arm_stub_hash_entry *eh;
3794 /* Initialize the local fields. */
3795 eh = (struct elf32_arm_stub_hash_entry *) entry;
3796 eh->stub_sec = NULL;
3797 eh->stub_offset = (bfd_vma) -1;
3798 eh->source_value = 0;
3799 eh->target_value = 0;
3800 eh->target_section = NULL;
3801 eh->orig_insn = 0;
3802 eh->stub_type = arm_stub_none;
3803 eh->stub_size = 0;
3804 eh->stub_template = NULL;
3805 eh->stub_template_size = -1;
3806 eh->h = NULL;
3807 eh->id_sec = NULL;
3808 eh->output_name = NULL;
3811 return entry;
3814 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3815 shortcuts to them in our hash table. */
3817 static bool
3818 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3820 struct elf32_arm_link_hash_table *htab;
3822 htab = elf32_arm_hash_table (info);
3823 if (htab == NULL)
3824 return false;
3826 if (! _bfd_elf_create_got_section (dynobj, info))
3827 return false;
3829 /* Also create .rofixup. */
3830 if (htab->fdpic_p)
3832 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3833 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3834 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3835 if (htab->srofixup == NULL
3836 || !bfd_set_section_alignment (htab->srofixup, 2))
3837 return false;
3840 return true;
3843 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3845 static bool
3846 create_ifunc_sections (struct bfd_link_info *info)
3848 struct elf32_arm_link_hash_table *htab;
3849 const struct elf_backend_data *bed;
3850 bfd *dynobj;
3851 asection *s;
3852 flagword flags;
3854 htab = elf32_arm_hash_table (info);
3855 dynobj = htab->root.dynobj;
3856 bed = get_elf_backend_data (dynobj);
3857 flags = bed->dynamic_sec_flags;
3859 if (htab->root.iplt == NULL)
3861 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3862 flags | SEC_READONLY | SEC_CODE);
3863 if (s == NULL
3864 || !bfd_set_section_alignment (s, bed->plt_alignment))
3865 return false;
3866 htab->root.iplt = s;
3869 if (htab->root.irelplt == NULL)
3871 s = bfd_make_section_anyway_with_flags (dynobj,
3872 RELOC_SECTION (htab, ".iplt"),
3873 flags | SEC_READONLY);
3874 if (s == NULL
3875 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3876 return false;
3877 htab->root.irelplt = s;
3880 if (htab->root.igotplt == NULL)
3882 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3883 if (s == NULL
3884 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3885 return false;
3886 htab->root.igotplt = s;
3888 return true;
3891 /* Determine if we're dealing with a Thumb only architecture. */
3893 static bool
3894 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3896 int arch;
3897 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3898 Tag_CPU_arch_profile);
3900 if (profile)
3901 return profile == 'M';
3903 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3905 /* Force return logic to be reviewed for each new architecture. */
3906 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3908 if (arch == TAG_CPU_ARCH_V6_M
3909 || arch == TAG_CPU_ARCH_V6S_M
3910 || arch == TAG_CPU_ARCH_V7E_M
3911 || arch == TAG_CPU_ARCH_V8M_BASE
3912 || arch == TAG_CPU_ARCH_V8M_MAIN
3913 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3914 return true;
3916 return false;
3919 /* Determine if we're dealing with a Thumb-2 object. */
3921 static bool
3922 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3924 int arch;
3925 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3926 Tag_THUMB_ISA_use);
3928 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3929 if (thumb_isa < 3)
3930 return thumb_isa == 2;
3932 /* Variant of thumb is described by the architecture tag. */
3933 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3935 /* Force return logic to be reviewed for each new architecture. */
3936 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3938 return (arch == TAG_CPU_ARCH_V6T2
3939 || arch == TAG_CPU_ARCH_V7
3940 || arch == TAG_CPU_ARCH_V7E_M
3941 || arch == TAG_CPU_ARCH_V8
3942 || arch == TAG_CPU_ARCH_V8R
3943 || arch == TAG_CPU_ARCH_V8M_MAIN
3944 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3947 /* Determine whether Thumb-2 BL instruction is available. */
3949 static bool
3950 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3952 int arch =
3953 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3955 /* Force return logic to be reviewed for each new architecture. */
3956 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
3958 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3959 return (arch == TAG_CPU_ARCH_V6T2
3960 || arch >= TAG_CPU_ARCH_V7);
3963 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3964 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3965 hash table. */
3967 static bool
3968 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3970 struct elf32_arm_link_hash_table *htab;
3972 htab = elf32_arm_hash_table (info);
3973 if (htab == NULL)
3974 return false;
3976 if (!htab->root.sgot && !create_got_section (dynobj, info))
3977 return false;
3979 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3980 return false;
3982 if (htab->root.target_os == is_vxworks)
3984 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3985 return false;
3987 if (bfd_link_pic (info))
3989 htab->plt_header_size = 0;
3990 htab->plt_entry_size
3991 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3993 else
3995 htab->plt_header_size
3996 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3997 htab->plt_entry_size
3998 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
4001 if (elf_elfheader (dynobj))
4002 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
4004 else
4006 /* PR ld/16017
4007 Test for thumb only architectures. Note - we cannot just call
4008 using_thumb_only() as the attributes in the output bfd have not been
4009 initialised at this point, so instead we use the input bfd. */
4010 bfd * saved_obfd = htab->obfd;
4012 htab->obfd = dynobj;
4013 if (using_thumb_only (htab))
4015 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4016 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4018 htab->obfd = saved_obfd;
4021 if (htab->fdpic_p) {
4022 htab->plt_header_size = 0;
4023 if (info->flags & DF_BIND_NOW)
4024 htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4025 else
4026 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4029 if (!htab->root.splt
4030 || !htab->root.srelplt
4031 || !htab->root.sdynbss
4032 || (!bfd_link_pic (info) && !htab->root.srelbss))
4033 abort ();
4035 return true;
4038 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4040 static void
4041 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4042 struct elf_link_hash_entry *dir,
4043 struct elf_link_hash_entry *ind)
4045 struct elf32_arm_link_hash_entry *edir, *eind;
4047 edir = (struct elf32_arm_link_hash_entry *) dir;
4048 eind = (struct elf32_arm_link_hash_entry *) ind;
4050 if (ind->root.type == bfd_link_hash_indirect)
4052 /* Copy over PLT info. */
4053 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4054 eind->plt.thumb_refcount = 0;
4055 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4056 eind->plt.maybe_thumb_refcount = 0;
4057 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4058 eind->plt.noncall_refcount = 0;
4060 /* Copy FDPIC counters. */
4061 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4062 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4063 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4065 /* We should only allocate a function to .iplt once the final
4066 symbol information is known. */
4067 BFD_ASSERT (!eind->is_iplt);
4069 if (dir->got.refcount <= 0)
4071 edir->tls_type = eind->tls_type;
4072 eind->tls_type = GOT_UNKNOWN;
4076 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4079 /* Destroy an ARM elf linker hash table. */
4081 static void
4082 elf32_arm_link_hash_table_free (bfd *obfd)
4084 struct elf32_arm_link_hash_table *ret
4085 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4087 bfd_hash_table_free (&ret->stub_hash_table);
4088 _bfd_elf_link_hash_table_free (obfd);
4091 /* Create an ARM elf linker hash table. */
4093 static struct bfd_link_hash_table *
4094 elf32_arm_link_hash_table_create (bfd *abfd)
4096 struct elf32_arm_link_hash_table *ret;
4097 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4099 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4100 if (ret == NULL)
4101 return NULL;
4103 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4104 elf32_arm_link_hash_newfunc,
4105 sizeof (struct elf32_arm_link_hash_entry),
4106 ARM_ELF_DATA))
4108 free (ret);
4109 return NULL;
4112 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4113 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4114 #ifdef FOUR_WORD_PLT
4115 ret->plt_header_size = 16;
4116 ret->plt_entry_size = 16;
4117 #else
4118 ret->plt_header_size = 20;
4119 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4120 #endif
4121 ret->use_rel = true;
4122 ret->obfd = abfd;
4123 ret->fdpic_p = 0;
4125 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4126 sizeof (struct elf32_arm_stub_hash_entry)))
4128 _bfd_elf_link_hash_table_free (abfd);
4129 return NULL;
4131 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4133 return &ret->root.root;
4136 /* Determine what kind of NOPs are available. */
4138 static bool
4139 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4141 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4142 Tag_CPU_arch);
4144 /* Force return logic to be reviewed for each new architecture. */
4145 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
4147 return (arch == TAG_CPU_ARCH_V6T2
4148 || arch == TAG_CPU_ARCH_V6K
4149 || arch == TAG_CPU_ARCH_V7
4150 || arch == TAG_CPU_ARCH_V8
4151 || arch == TAG_CPU_ARCH_V8R
4152 || arch == TAG_CPU_ARCH_V9);
4155 static bool
4156 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4158 switch (stub_type)
4160 case arm_stub_long_branch_thumb_only:
4161 case arm_stub_long_branch_thumb2_only:
4162 case arm_stub_long_branch_thumb2_only_pure:
4163 case arm_stub_long_branch_v4t_thumb_arm:
4164 case arm_stub_short_branch_v4t_thumb_arm:
4165 case arm_stub_long_branch_v4t_thumb_arm_pic:
4166 case arm_stub_long_branch_v4t_thumb_tls_pic:
4167 case arm_stub_long_branch_thumb_only_pic:
4168 case arm_stub_cmse_branch_thumb_only:
4169 return true;
4170 case arm_stub_none:
4171 BFD_FAIL ();
4172 return false;
4173 break;
4174 default:
4175 return false;
4179 /* Determine the type of stub needed, if any, for a call. */
4181 static enum elf32_arm_stub_type
4182 arm_type_of_stub (struct bfd_link_info *info,
4183 asection *input_sec,
4184 const Elf_Internal_Rela *rel,
4185 unsigned char st_type,
4186 enum arm_st_branch_type *actual_branch_type,
4187 struct elf32_arm_link_hash_entry *hash,
4188 bfd_vma destination,
4189 asection *sym_sec,
4190 bfd *input_bfd,
4191 const char *name)
4193 bfd_vma location;
4194 bfd_signed_vma branch_offset;
4195 unsigned int r_type;
4196 struct elf32_arm_link_hash_table * globals;
4197 bool thumb2, thumb2_bl, thumb_only;
4198 enum elf32_arm_stub_type stub_type = arm_stub_none;
4199 int use_plt = 0;
4200 enum arm_st_branch_type branch_type = *actual_branch_type;
4201 union gotplt_union *root_plt;
4202 struct arm_plt_info *arm_plt;
4203 int arch;
4204 int thumb2_movw;
4206 if (branch_type == ST_BRANCH_LONG)
4207 return stub_type;
4209 globals = elf32_arm_hash_table (info);
4210 if (globals == NULL)
4211 return stub_type;
4213 thumb_only = using_thumb_only (globals);
4214 thumb2 = using_thumb2 (globals);
4215 thumb2_bl = using_thumb2_bl (globals);
4217 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4219 /* True for architectures that implement the thumb2 movw instruction. */
4220 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4222 /* Determine where the call point is. */
4223 location = (input_sec->output_offset
4224 + input_sec->output_section->vma
4225 + rel->r_offset);
4227 r_type = ELF32_R_TYPE (rel->r_info);
4229 /* Don't pretend we know what stub to use (if any) when we target a
4230 Thumb-only target and we don't know the actual destination
4231 type. */
4232 if (branch_type == ST_BRANCH_UNKNOWN && thumb_only)
4233 return stub_type;
4235 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4236 are considering a function call relocation. */
4237 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4238 || r_type == R_ARM_THM_JUMP19)
4239 && branch_type == ST_BRANCH_TO_ARM)
4241 if (sym_sec == bfd_abs_section_ptr)
4242 /* As an exception, assume that absolute symbols are of the
4243 right kind (Thumb). They are presumably defined in the
4244 linker script, where it is not possible to declare them as
4245 Thumb (and thus are seen as Arm mode). We'll inform the
4246 user with a warning, though, in
4247 elf32_arm_final_link_relocate. */
4248 branch_type = ST_BRANCH_TO_THUMB;
4249 else
4250 /* Otherwise do not silently build a stub, and let the users
4251 know they have to fix their code. Indeed, we could decide
4252 to insert a stub involving Arm code and/or BLX, leading to
4253 a run-time crash. */
4254 return stub_type;
4257 /* For TLS call relocs, it is the caller's responsibility to provide
4258 the address of the appropriate trampoline. */
4259 if (r_type != R_ARM_TLS_CALL
4260 && r_type != R_ARM_THM_TLS_CALL
4261 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4262 ELF32_R_SYM (rel->r_info), &root_plt,
4263 &arm_plt)
4264 && root_plt->offset != (bfd_vma) -1)
4266 asection *splt;
4268 if (hash == NULL || hash->is_iplt)
4269 splt = globals->root.iplt;
4270 else
4271 splt = globals->root.splt;
4272 if (splt != NULL)
4274 use_plt = 1;
4276 /* Note when dealing with PLT entries: the main PLT stub is in
4277 ARM mode, so if the branch is in Thumb mode, another
4278 Thumb->ARM stub will be inserted later just before the ARM
4279 PLT stub. If a long branch stub is needed, we'll add a
4280 Thumb->Arm one and branch directly to the ARM PLT entry.
4281 Here, we have to check if a pre-PLT Thumb->ARM stub
4282 is needed and if it will be close enough. */
4284 destination = (splt->output_section->vma
4285 + splt->output_offset
4286 + root_plt->offset);
4287 st_type = STT_FUNC;
4289 /* Thumb branch/call to PLT: it can become a branch to ARM
4290 or to Thumb. We must perform the same checks and
4291 corrections as in elf32_arm_final_link_relocate. */
4292 if ((r_type == R_ARM_THM_CALL)
4293 || (r_type == R_ARM_THM_JUMP24))
4295 if (globals->use_blx
4296 && r_type == R_ARM_THM_CALL
4297 && !thumb_only)
4299 /* If the Thumb BLX instruction is available, convert
4300 the BL to a BLX instruction to call the ARM-mode
4301 PLT entry. */
4302 branch_type = ST_BRANCH_TO_ARM;
4304 else
4306 if (!thumb_only)
4307 /* Target the Thumb stub before the ARM PLT entry. */
4308 destination -= PLT_THUMB_STUB_SIZE;
4309 branch_type = ST_BRANCH_TO_THUMB;
4312 else
4314 branch_type = ST_BRANCH_TO_ARM;
4318 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4319 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4321 branch_offset = (bfd_signed_vma)(destination - location);
4323 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4324 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4326 /* Handle cases where:
4327 - this call goes too far (different Thumb/Thumb2 max
4328 distance)
4329 - it's a Thumb->Arm call and blx is not available, or it's a
4330 Thumb->Arm branch (not bl). A stub is needed in this case,
4331 but only if this call is not through a PLT entry. Indeed,
4332 PLT stubs handle mode switching already. */
4333 if ((!thumb2_bl
4334 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4335 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4336 || (thumb2_bl
4337 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4338 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4339 || (thumb2
4340 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4341 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4342 && (r_type == R_ARM_THM_JUMP19))
4343 || (branch_type == ST_BRANCH_TO_ARM
4344 && (((r_type == R_ARM_THM_CALL
4345 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4346 || (r_type == R_ARM_THM_JUMP24)
4347 || (r_type == R_ARM_THM_JUMP19))
4348 && !use_plt))
4350 /* If we need to insert a Thumb-Thumb long branch stub to a
4351 PLT, use one that branches directly to the ARM PLT
4352 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4353 stub, undo this now. */
4354 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4356 branch_type = ST_BRANCH_TO_ARM;
4357 branch_offset += PLT_THUMB_STUB_SIZE;
4360 if (branch_type == ST_BRANCH_TO_THUMB)
4362 /* Thumb to thumb. */
4363 if (!thumb_only)
4365 if (input_sec->flags & SEC_ELF_PURECODE)
4366 _bfd_error_handler
4367 (_("%pB(%pA): warning: long branch veneers used in"
4368 " section with SHF_ARM_PURECODE section"
4369 " attribute is only supported for M-profile"
4370 " targets that implement the movw instruction"),
4371 input_bfd, input_sec);
4373 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4374 /* PIC stubs. */
4375 ? ((globals->use_blx
4376 && (r_type == R_ARM_THM_CALL))
4377 /* V5T and above. Stub starts with ARM code, so
4378 we must be able to switch mode before
4379 reaching it, which is only possible for 'bl'
4380 (ie R_ARM_THM_CALL relocation). */
4381 ? arm_stub_long_branch_any_thumb_pic
4382 /* On V4T, use Thumb code only. */
4383 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4385 /* non-PIC stubs. */
4386 : ((globals->use_blx
4387 && (r_type == R_ARM_THM_CALL))
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4390 /* V4T. */
4391 : arm_stub_long_branch_v4t_thumb_thumb);
4393 else
4395 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4396 stub_type = arm_stub_long_branch_thumb2_only_pure;
4397 else
4399 if (input_sec->flags & SEC_ELF_PURECODE)
4400 _bfd_error_handler
4401 (_("%pB(%pA): warning: long branch veneers used in"
4402 " section with SHF_ARM_PURECODE section"
4403 " attribute is only supported for M-profile"
4404 " targets that implement the movw instruction"),
4405 input_bfd, input_sec);
4407 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4408 /* PIC stub. */
4409 ? arm_stub_long_branch_thumb_only_pic
4410 /* non-PIC stub. */
4411 : (thumb2 ? arm_stub_long_branch_thumb2_only
4412 : arm_stub_long_branch_thumb_only);
4416 else
4418 if (input_sec->flags & SEC_ELF_PURECODE)
4419 _bfd_error_handler
4420 (_("%pB(%pA): warning: long branch veneers used in"
4421 " section with SHF_ARM_PURECODE section"
4422 " attribute is only supported" " for M-profile"
4423 " targets that implement the movw instruction"),
4424 input_bfd, input_sec);
4426 /* Thumb to arm. */
4427 if (sym_sec != NULL
4428 && sym_sec->owner != NULL
4429 && !INTERWORK_FLAG (sym_sec->owner))
4431 _bfd_error_handler
4432 (_("%pB(%s): warning: interworking not enabled;"
4433 " first occurrence: %pB: %s call to %s"),
4434 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4437 stub_type =
4438 (bfd_link_pic (info) | globals->pic_veneer)
4439 /* PIC stubs. */
4440 ? (r_type == R_ARM_THM_TLS_CALL
4441 /* TLS PIC stubs. */
4442 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4443 : arm_stub_long_branch_v4t_thumb_tls_pic)
4444 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4445 /* V5T PIC and above. */
4446 ? arm_stub_long_branch_any_arm_pic
4447 /* V4T PIC stub. */
4448 : arm_stub_long_branch_v4t_thumb_arm_pic))
4450 /* non-PIC stubs. */
4451 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4452 /* V5T and above. */
4453 ? arm_stub_long_branch_any_any
4454 /* V4T. */
4455 : arm_stub_long_branch_v4t_thumb_arm);
4457 /* Handle v4t short branches. */
4458 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4459 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4460 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4461 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4465 else if (r_type == R_ARM_CALL
4466 || r_type == R_ARM_JUMP24
4467 || r_type == R_ARM_PLT32
4468 || r_type == R_ARM_TLS_CALL)
4470 if (input_sec->flags & SEC_ELF_PURECODE)
4471 _bfd_error_handler
4472 (_("%pB(%pA): warning: long branch veneers used in"
4473 " section with SHF_ARM_PURECODE section"
4474 " attribute is only supported for M-profile"
4475 " targets that implement the movw instruction"),
4476 input_bfd, input_sec);
4477 if (branch_type == ST_BRANCH_TO_THUMB)
4479 /* Arm to thumb. */
4481 if (sym_sec != NULL
4482 && sym_sec->owner != NULL
4483 && !INTERWORK_FLAG (sym_sec->owner))
4485 _bfd_error_handler
4486 (_("%pB(%s): warning: interworking not enabled;"
4487 " first occurrence: %pB: %s call to %s"),
4488 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4491 /* We have an extra 2-bytes reach because of
4492 the mode change (bit 24 (H) of BLX encoding). */
4493 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4494 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4495 || (r_type == R_ARM_CALL && !globals->use_blx)
4496 || (r_type == R_ARM_JUMP24)
4497 || (r_type == R_ARM_PLT32))
4499 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4500 /* PIC stubs. */
4501 ? ((globals->use_blx)
4502 /* V5T and above. */
4503 ? arm_stub_long_branch_any_thumb_pic
4504 /* V4T stub. */
4505 : arm_stub_long_branch_v4t_arm_thumb_pic)
4507 /* non-PIC stubs. */
4508 : ((globals->use_blx)
4509 /* V5T and above. */
4510 ? arm_stub_long_branch_any_any
4511 /* V4T. */
4512 : arm_stub_long_branch_v4t_arm_thumb);
4515 else
4517 /* Arm to arm. */
4518 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4519 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4521 stub_type =
4522 (bfd_link_pic (info) | globals->pic_veneer)
4523 /* PIC stubs. */
4524 ? (r_type == R_ARM_TLS_CALL
4525 /* TLS PIC Stub. */
4526 ? arm_stub_long_branch_any_tls_pic
4527 : (globals->root.target_os == is_nacl
4528 ? arm_stub_long_branch_arm_nacl_pic
4529 : arm_stub_long_branch_any_arm_pic))
4530 /* non-PIC stubs. */
4531 : (globals->root.target_os == is_nacl
4532 ? arm_stub_long_branch_arm_nacl
4533 : arm_stub_long_branch_any_any);
4538 /* If a stub is needed, record the actual destination type. */
4539 if (stub_type != arm_stub_none)
4540 *actual_branch_type = branch_type;
4542 return stub_type;
4545 /* Build a name for an entry in the stub hash table. */
4547 static char *
4548 elf32_arm_stub_name (const asection *input_section,
4549 const asection *sym_sec,
4550 const struct elf32_arm_link_hash_entry *hash,
4551 const Elf_Internal_Rela *rel,
4552 enum elf32_arm_stub_type stub_type)
4554 char *stub_name;
4555 bfd_size_type len;
4557 if (hash)
4559 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4560 stub_name = (char *) bfd_malloc (len);
4561 if (stub_name != NULL)
4562 sprintf (stub_name, "%08x_%s+%x_%d",
4563 input_section->id & 0xffffffff,
4564 hash->root.root.root.string,
4565 (int) rel->r_addend & 0xffffffff,
4566 (int) stub_type);
4568 else
4570 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4571 stub_name = (char *) bfd_malloc (len);
4572 if (stub_name != NULL)
4573 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4574 input_section->id & 0xffffffff,
4575 sym_sec->id & 0xffffffff,
4576 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4577 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4578 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4579 (int) rel->r_addend & 0xffffffff,
4580 (int) stub_type);
4583 return stub_name;
4586 /* Look up an entry in the stub hash. Stub entries are cached because
4587 creating the stub name takes a bit of time. */
4589 static struct elf32_arm_stub_hash_entry *
4590 elf32_arm_get_stub_entry (const asection *input_section,
4591 const asection *sym_sec,
4592 struct elf_link_hash_entry *hash,
4593 const Elf_Internal_Rela *rel,
4594 struct elf32_arm_link_hash_table *htab,
4595 enum elf32_arm_stub_type stub_type)
4597 struct elf32_arm_stub_hash_entry *stub_entry;
4598 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4599 const asection *id_sec;
4601 if ((input_section->flags & SEC_CODE) == 0)
4602 return NULL;
4604 /* If the input section is the CMSE stubs one and it needs a long
4605 branch stub to reach it's final destination, give up with an
4606 error message: this is not supported. See PR ld/24709. */
4607 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4609 bfd *output_bfd = htab->obfd;
4610 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4612 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4613 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4614 CMSE_STUB_NAME,
4615 (uint64_t)out_sec->output_section->vma
4616 + out_sec->output_offset,
4617 (uint64_t)sym_sec->output_section->vma
4618 + sym_sec->output_offset
4619 + h->root.root.u.def.value);
4620 /* Exit, rather than leave incompletely processed
4621 relocations. */
4622 xexit (1);
4625 /* If this input section is part of a group of sections sharing one
4626 stub section, then use the id of the first section in the group.
4627 Stub names need to include a section id, as there may well be
4628 more than one stub used to reach say, printf, and we need to
4629 distinguish between them. */
4630 BFD_ASSERT (input_section->id <= htab->top_id);
4631 id_sec = htab->stub_group[input_section->id].link_sec;
4633 if (h != NULL && h->stub_cache != NULL
4634 && h->stub_cache->h == h
4635 && h->stub_cache->id_sec == id_sec
4636 && h->stub_cache->stub_type == stub_type)
4638 stub_entry = h->stub_cache;
4640 else
4642 char *stub_name;
4644 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4645 if (stub_name == NULL)
4646 return NULL;
4648 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4649 stub_name, false, false);
4650 if (h != NULL)
4651 h->stub_cache = stub_entry;
4653 free (stub_name);
4656 return stub_entry;
4659 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4660 section. */
4662 static bool
4663 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4665 if (stub_type >= max_stub_type)
4666 abort (); /* Should be unreachable. */
4668 switch (stub_type)
4670 case arm_stub_cmse_branch_thumb_only:
4671 return true;
4673 default:
4674 return false;
4677 abort (); /* Should be unreachable. */
4680 /* Required alignment (as a power of 2) for the dedicated section holding
4681 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4682 with input sections. */
4684 static int
4685 arm_dedicated_stub_output_section_required_alignment
4686 (enum elf32_arm_stub_type stub_type)
4688 if (stub_type >= max_stub_type)
4689 abort (); /* Should be unreachable. */
4691 switch (stub_type)
4693 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4694 boundary. */
4695 case arm_stub_cmse_branch_thumb_only:
4696 return 5;
4698 default:
4699 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4700 return 0;
4703 abort (); /* Should be unreachable. */
4706 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4707 NULL if veneers of this type are interspersed with input sections. */
4709 static const char *
4710 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4712 if (stub_type >= max_stub_type)
4713 abort (); /* Should be unreachable. */
4715 switch (stub_type)
4717 case arm_stub_cmse_branch_thumb_only:
4718 return CMSE_STUB_NAME;
4720 default:
4721 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4722 return NULL;
4725 abort (); /* Should be unreachable. */
4728 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4729 returns the address of the hash table field in HTAB holding a pointer to the
4730 corresponding input section. Otherwise, returns NULL. */
4732 static asection **
4733 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4734 enum elf32_arm_stub_type stub_type)
4736 if (stub_type >= max_stub_type)
4737 abort (); /* Should be unreachable. */
4739 switch (stub_type)
4741 case arm_stub_cmse_branch_thumb_only:
4742 return &htab->cmse_stub_sec;
4744 default:
4745 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4746 return NULL;
4749 abort (); /* Should be unreachable. */
4752 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4753 is the section that branch into veneer and can be NULL if stub should go in
4754 a dedicated output section. Returns a pointer to the stub section, and the
4755 section to which the stub section will be attached (in *LINK_SEC_P).
4756 LINK_SEC_P may be NULL. */
4758 static asection *
4759 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4760 struct elf32_arm_link_hash_table *htab,
4761 enum elf32_arm_stub_type stub_type)
4763 asection *link_sec, *out_sec, **stub_sec_p;
4764 const char *stub_sec_prefix;
4765 bool dedicated_output_section =
4766 arm_dedicated_stub_output_section_required (stub_type);
4767 int align;
4769 if (dedicated_output_section)
4771 bfd *output_bfd = htab->obfd;
4772 const char *out_sec_name =
4773 arm_dedicated_stub_output_section_name (stub_type);
4774 link_sec = NULL;
4775 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4776 stub_sec_prefix = out_sec_name;
4777 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4778 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4779 if (out_sec == NULL)
4781 _bfd_error_handler (_("no address assigned to the veneers output "
4782 "section %s"), out_sec_name);
4783 return NULL;
4786 else
4788 BFD_ASSERT (section->id <= htab->top_id);
4789 link_sec = htab->stub_group[section->id].link_sec;
4790 BFD_ASSERT (link_sec != NULL);
4791 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4792 if (*stub_sec_p == NULL)
4793 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4794 stub_sec_prefix = link_sec->name;
4795 out_sec = link_sec->output_section;
4796 align = htab->root.target_os == is_nacl ? 4 : 3;
4799 if (*stub_sec_p == NULL)
4801 size_t namelen;
4802 bfd_size_type len;
4803 char *s_name;
4805 namelen = strlen (stub_sec_prefix);
4806 len = namelen + sizeof (STUB_SUFFIX);
4807 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4808 if (s_name == NULL)
4809 return NULL;
4811 memcpy (s_name, stub_sec_prefix, namelen);
4812 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4813 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4814 align);
4815 if (*stub_sec_p == NULL)
4816 return NULL;
4818 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4819 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4820 | SEC_KEEP;
4823 if (!dedicated_output_section)
4824 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4826 if (link_sec_p)
4827 *link_sec_p = link_sec;
4829 return *stub_sec_p;
4832 /* Add a new stub entry to the stub hash. Not all fields of the new
4833 stub entry are initialised. */
4835 static struct elf32_arm_stub_hash_entry *
4836 elf32_arm_add_stub (const char *stub_name, asection *section,
4837 struct elf32_arm_link_hash_table *htab,
4838 enum elf32_arm_stub_type stub_type)
4840 asection *link_sec;
4841 asection *stub_sec;
4842 struct elf32_arm_stub_hash_entry *stub_entry;
4844 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4845 stub_type);
4846 if (stub_sec == NULL)
4847 return NULL;
4849 /* Enter this entry into the linker stub hash table. */
4850 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4851 true, false);
4852 if (stub_entry == NULL)
4854 if (section == NULL)
4855 section = stub_sec;
4856 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4857 section->owner, stub_name);
4858 return NULL;
4861 stub_entry->stub_sec = stub_sec;
4862 stub_entry->stub_offset = (bfd_vma) -1;
4863 stub_entry->id_sec = link_sec;
4865 return stub_entry;
4868 /* Store an Arm insn into an output section not processed by
4869 elf32_arm_write_section. */
4871 static void
4872 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4873 bfd * output_bfd, bfd_vma val, void * ptr)
4875 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4876 bfd_putl32 (val, ptr);
4877 else
4878 bfd_putb32 (val, ptr);
4881 /* Store a 16-bit Thumb insn into an output section not processed by
4882 elf32_arm_write_section. */
4884 static void
4885 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4886 bfd * output_bfd, bfd_vma val, void * ptr)
4888 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4889 bfd_putl16 (val, ptr);
4890 else
4891 bfd_putb16 (val, ptr);
4894 /* Store a Thumb2 insn into an output section not processed by
4895 elf32_arm_write_section. */
4897 static void
4898 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4899 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4901 /* T2 instructions are 16-bit streamed. */
4902 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4904 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4905 bfd_putl16 ((val & 0xffff), ptr + 2);
4907 else
4909 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4910 bfd_putb16 ((val & 0xffff), ptr + 2);
4914 /* If it's possible to change R_TYPE to a more efficient access
4915 model, return the new reloc type. */
4917 static unsigned
4918 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4919 struct elf_link_hash_entry *h)
4921 int is_local = (h == NULL);
4923 if (bfd_link_dll (info)
4924 || (h && h->root.type == bfd_link_hash_undefweak))
4925 return r_type;
4927 /* We do not support relaxations for Old TLS models. */
4928 switch (r_type)
4930 case R_ARM_TLS_GOTDESC:
4931 case R_ARM_TLS_CALL:
4932 case R_ARM_THM_TLS_CALL:
4933 case R_ARM_TLS_DESCSEQ:
4934 case R_ARM_THM_TLS_DESCSEQ:
4935 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4938 return r_type;
4941 static bfd_reloc_status_type elf32_arm_final_link_relocate
4942 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4943 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4944 const char *, unsigned char, enum arm_st_branch_type,
4945 struct elf_link_hash_entry *, bool *, char **);
4947 static unsigned int
4948 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4950 switch (stub_type)
4952 case arm_stub_a8_veneer_b_cond:
4953 case arm_stub_a8_veneer_b:
4954 case arm_stub_a8_veneer_bl:
4955 return 2;
4957 case arm_stub_long_branch_any_any:
4958 case arm_stub_long_branch_v4t_arm_thumb:
4959 case arm_stub_long_branch_thumb_only:
4960 case arm_stub_long_branch_thumb2_only:
4961 case arm_stub_long_branch_thumb2_only_pure:
4962 case arm_stub_long_branch_v4t_thumb_thumb:
4963 case arm_stub_long_branch_v4t_thumb_arm:
4964 case arm_stub_short_branch_v4t_thumb_arm:
4965 case arm_stub_long_branch_any_arm_pic:
4966 case arm_stub_long_branch_any_thumb_pic:
4967 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4968 case arm_stub_long_branch_v4t_arm_thumb_pic:
4969 case arm_stub_long_branch_v4t_thumb_arm_pic:
4970 case arm_stub_long_branch_thumb_only_pic:
4971 case arm_stub_long_branch_any_tls_pic:
4972 case arm_stub_long_branch_v4t_thumb_tls_pic:
4973 case arm_stub_cmse_branch_thumb_only:
4974 case arm_stub_a8_veneer_blx:
4975 return 4;
4977 case arm_stub_long_branch_arm_nacl:
4978 case arm_stub_long_branch_arm_nacl_pic:
4979 return 16;
4981 default:
4982 abort (); /* Should be unreachable. */
4986 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4987 veneering (TRUE) or have their own symbol (FALSE). */
4989 static bool
4990 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4992 if (stub_type >= max_stub_type)
4993 abort (); /* Should be unreachable. */
4995 switch (stub_type)
4997 case arm_stub_cmse_branch_thumb_only:
4998 return true;
5000 default:
5001 return false;
5004 abort (); /* Should be unreachable. */
5007 /* Returns the padding needed for the dedicated section used stubs of type
5008 STUB_TYPE. */
5010 static int
5011 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
5013 if (stub_type >= max_stub_type)
5014 abort (); /* Should be unreachable. */
5016 switch (stub_type)
5018 case arm_stub_cmse_branch_thumb_only:
5019 return 32;
5021 default:
5022 return 0;
5025 abort (); /* Should be unreachable. */
5028 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5029 returns the address of the hash table field in HTAB holding the offset at
5030 which new veneers should be layed out in the stub section. */
5032 static bfd_vma*
5033 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5034 enum elf32_arm_stub_type stub_type)
5036 switch (stub_type)
5038 case arm_stub_cmse_branch_thumb_only:
5039 return &htab->new_cmse_stub_offset;
5041 default:
5042 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5043 return NULL;
5047 static bool
5048 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5049 void * in_arg)
5051 #define MAXRELOCS 3
5052 bool removed_sg_veneer;
5053 struct elf32_arm_stub_hash_entry *stub_entry;
5054 struct elf32_arm_link_hash_table *globals;
5055 struct bfd_link_info *info;
5056 asection *stub_sec;
5057 bfd *stub_bfd;
5058 bfd_byte *loc;
5059 bfd_vma sym_value;
5060 int template_size;
5061 int size;
5062 const insn_sequence *template_sequence;
5063 int i;
5064 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5065 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5066 int nrelocs = 0;
5067 int just_allocated = 0;
5069 /* Massage our args to the form they really have. */
5070 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5071 info = (struct bfd_link_info *) in_arg;
5073 /* Fail if the target section could not be assigned to an output
5074 section. The user should fix his linker script. */
5075 if (stub_entry->target_section->output_section == NULL
5076 && info->non_contiguous_regions)
5077 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
5078 "Retry without --enable-non-contiguous-regions.\n"),
5079 stub_entry->target_section);
5081 globals = elf32_arm_hash_table (info);
5082 if (globals == NULL)
5083 return false;
5085 stub_sec = stub_entry->stub_sec;
5087 if ((globals->fix_cortex_a8 < 0)
5088 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5089 /* We have to do less-strictly-aligned fixes last. */
5090 return true;
5092 /* Assign a slot at the end of section if none assigned yet. */
5093 if (stub_entry->stub_offset == (bfd_vma) -1)
5095 stub_entry->stub_offset = stub_sec->size;
5096 just_allocated = 1;
5098 loc = stub_sec->contents + stub_entry->stub_offset;
5100 stub_bfd = stub_sec->owner;
5102 /* This is the address of the stub destination. */
5103 sym_value = (stub_entry->target_value
5104 + stub_entry->target_section->output_offset
5105 + stub_entry->target_section->output_section->vma);
5107 template_sequence = stub_entry->stub_template;
5108 template_size = stub_entry->stub_template_size;
5110 size = 0;
5111 for (i = 0; i < template_size; i++)
5113 switch (template_sequence[i].type)
5115 case THUMB16_TYPE:
5117 bfd_vma data = (bfd_vma) template_sequence[i].data;
5118 if (template_sequence[i].reloc_addend != 0)
5120 /* We've borrowed the reloc_addend field to mean we should
5121 insert a condition code into this (Thumb-1 branch)
5122 instruction. See THUMB16_BCOND_INSN. */
5123 BFD_ASSERT ((data & 0xff00) == 0xd000);
5124 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5126 bfd_put_16 (stub_bfd, data, loc + size);
5127 size += 2;
5129 break;
5131 case THUMB32_TYPE:
5132 bfd_put_16 (stub_bfd,
5133 (template_sequence[i].data >> 16) & 0xffff,
5134 loc + size);
5135 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5136 loc + size + 2);
5137 if (template_sequence[i].r_type != R_ARM_NONE)
5139 stub_reloc_idx[nrelocs] = i;
5140 stub_reloc_offset[nrelocs++] = size;
5142 size += 4;
5143 break;
5145 case ARM_TYPE:
5146 bfd_put_32 (stub_bfd, template_sequence[i].data,
5147 loc + size);
5148 /* Handle cases where the target is encoded within the
5149 instruction. */
5150 if (template_sequence[i].r_type == R_ARM_JUMP24)
5152 stub_reloc_idx[nrelocs] = i;
5153 stub_reloc_offset[nrelocs++] = size;
5155 size += 4;
5156 break;
5158 case DATA_TYPE:
5159 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5160 stub_reloc_idx[nrelocs] = i;
5161 stub_reloc_offset[nrelocs++] = size;
5162 size += 4;
5163 break;
5165 default:
5166 BFD_FAIL ();
5167 return false;
5171 if (just_allocated)
5172 stub_sec->size += size;
5174 /* Stub size has already been computed in arm_size_one_stub. Check
5175 consistency. */
5176 BFD_ASSERT (size == stub_entry->stub_size);
5178 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5179 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5180 sym_value |= 1;
5182 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5183 to relocate in each stub. */
5184 removed_sg_veneer =
5185 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5186 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5188 for (i = 0; i < nrelocs; i++)
5190 Elf_Internal_Rela rel;
5191 bool unresolved_reloc;
5192 char *error_message;
5193 bfd_vma points_to =
5194 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5196 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5197 rel.r_info = ELF32_R_INFO (0,
5198 template_sequence[stub_reloc_idx[i]].r_type);
5199 rel.r_addend = 0;
5201 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5202 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5203 template should refer back to the instruction after the original
5204 branch. We use target_section as Cortex-A8 erratum workaround stubs
5205 are only generated when both source and target are in the same
5206 section. */
5207 points_to = stub_entry->target_section->output_section->vma
5208 + stub_entry->target_section->output_offset
5209 + stub_entry->source_value;
5211 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5212 (template_sequence[stub_reloc_idx[i]].r_type),
5213 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5214 points_to, info, stub_entry->target_section, "", STT_FUNC,
5215 stub_entry->branch_type,
5216 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5217 &error_message);
5220 return true;
5221 #undef MAXRELOCS
5224 /* Calculate the template, template size and instruction size for a stub.
5225 Return value is the instruction size. */
5227 static unsigned int
5228 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5229 const insn_sequence **stub_template,
5230 int *stub_template_size)
5232 const insn_sequence *template_sequence = NULL;
5233 int template_size = 0, i;
5234 unsigned int size;
5236 template_sequence = stub_definitions[stub_type].template_sequence;
5237 if (stub_template)
5238 *stub_template = template_sequence;
5240 template_size = stub_definitions[stub_type].template_size;
5241 if (stub_template_size)
5242 *stub_template_size = template_size;
5244 size = 0;
5245 for (i = 0; i < template_size; i++)
5247 switch (template_sequence[i].type)
5249 case THUMB16_TYPE:
5250 size += 2;
5251 break;
5253 case ARM_TYPE:
5254 case THUMB32_TYPE:
5255 case DATA_TYPE:
5256 size += 4;
5257 break;
5259 default:
5260 BFD_FAIL ();
5261 return 0;
5265 return size;
5268 /* As above, but don't actually build the stub. Just bump offset so
5269 we know stub section sizes. */
5271 static bool
5272 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5273 void *in_arg ATTRIBUTE_UNUSED)
5275 struct elf32_arm_stub_hash_entry *stub_entry;
5276 const insn_sequence *template_sequence;
5277 int template_size, size;
5279 /* Massage our args to the form they really have. */
5280 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5282 BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5283 && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5285 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5286 &template_size);
5288 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5289 if (stub_entry->stub_template_size)
5291 stub_entry->stub_size = size;
5292 stub_entry->stub_template = template_sequence;
5293 stub_entry->stub_template_size = template_size;
5296 /* Already accounted for. */
5297 if (stub_entry->stub_offset != (bfd_vma) -1)
5298 return true;
5300 size = (size + 7) & ~7;
5301 stub_entry->stub_sec->size += size;
5303 return true;
5306 /* External entry points for sizing and building linker stubs. */
5308 /* Set up various things so that we can make a list of input sections
5309 for each output section included in the link. Returns -1 on error,
5310 0 when no stubs will be needed, and 1 on success. */
5313 elf32_arm_setup_section_lists (bfd *output_bfd,
5314 struct bfd_link_info *info)
5316 bfd *input_bfd;
5317 unsigned int bfd_count;
5318 unsigned int top_id, top_index;
5319 asection *section;
5320 asection **input_list, **list;
5321 size_t amt;
5322 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5324 if (htab == NULL)
5325 return 0;
5327 /* Count the number of input BFDs and find the top input section id. */
5328 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5329 input_bfd != NULL;
5330 input_bfd = input_bfd->link.next)
5332 bfd_count += 1;
5333 for (section = input_bfd->sections;
5334 section != NULL;
5335 section = section->next)
5337 if (top_id < section->id)
5338 top_id = section->id;
5341 htab->bfd_count = bfd_count;
5343 amt = sizeof (struct map_stub) * (top_id + 1);
5344 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5345 if (htab->stub_group == NULL)
5346 return -1;
5347 htab->top_id = top_id;
5349 /* We can't use output_bfd->section_count here to find the top output
5350 section index as some sections may have been removed, and
5351 _bfd_strip_section_from_output doesn't renumber the indices. */
5352 for (section = output_bfd->sections, top_index = 0;
5353 section != NULL;
5354 section = section->next)
5356 if (top_index < section->index)
5357 top_index = section->index;
5360 htab->top_index = top_index;
5361 amt = sizeof (asection *) * (top_index + 1);
5362 input_list = (asection **) bfd_malloc (amt);
5363 htab->input_list = input_list;
5364 if (input_list == NULL)
5365 return -1;
5367 /* For sections we aren't interested in, mark their entries with a
5368 value we can check later. */
5369 list = input_list + top_index;
5371 *list = bfd_abs_section_ptr;
5372 while (list-- != input_list);
5374 for (section = output_bfd->sections;
5375 section != NULL;
5376 section = section->next)
5378 if ((section->flags & SEC_CODE) != 0)
5379 input_list[section->index] = NULL;
5382 return 1;
5385 /* The linker repeatedly calls this function for each input section,
5386 in the order that input sections are linked into output sections.
5387 Build lists of input sections to determine groupings between which
5388 we may insert linker stubs. */
5390 void
5391 elf32_arm_next_input_section (struct bfd_link_info *info,
5392 asection *isec)
5394 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5396 if (htab == NULL)
5397 return;
5399 if (isec->output_section->index <= htab->top_index)
5401 asection **list = htab->input_list + isec->output_section->index;
5403 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5405 /* Steal the link_sec pointer for our list. */
5406 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5407 /* This happens to make the list in reverse order,
5408 which we reverse later. */
5409 PREV_SEC (isec) = *list;
5410 *list = isec;
5415 /* See whether we can group stub sections together. Grouping stub
5416 sections may result in fewer stubs. More importantly, we need to
5417 put all .init* and .fini* stubs at the end of the .init or
5418 .fini output sections respectively, because glibc splits the
5419 _init and _fini functions into multiple parts. Putting a stub in
5420 the middle of a function is not a good idea. */
5422 static void
5423 group_sections (struct elf32_arm_link_hash_table *htab,
5424 bfd_size_type stub_group_size,
5425 bool stubs_always_after_branch)
5427 asection **list = htab->input_list;
5431 asection *tail = *list;
5432 asection *head;
5434 if (tail == bfd_abs_section_ptr)
5435 continue;
5437 /* Reverse the list: we must avoid placing stubs at the
5438 beginning of the section because the beginning of the text
5439 section may be required for an interrupt vector in bare metal
5440 code. */
5441 #define NEXT_SEC PREV_SEC
5442 head = NULL;
5443 while (tail != NULL)
5445 /* Pop from tail. */
5446 asection *item = tail;
5447 tail = PREV_SEC (item);
5449 /* Push on head. */
5450 NEXT_SEC (item) = head;
5451 head = item;
5454 while (head != NULL)
5456 asection *curr;
5457 asection *next;
5458 bfd_vma stub_group_start = head->output_offset;
5459 bfd_vma end_of_next;
5461 curr = head;
5462 while (NEXT_SEC (curr) != NULL)
5464 next = NEXT_SEC (curr);
5465 end_of_next = next->output_offset + next->size;
5466 if (end_of_next - stub_group_start >= stub_group_size)
5467 /* End of NEXT is too far from start, so stop. */
5468 break;
5469 /* Add NEXT to the group. */
5470 curr = next;
5473 /* OK, the size from the start to the start of CURR is less
5474 than stub_group_size and thus can be handled by one stub
5475 section. (Or the head section is itself larger than
5476 stub_group_size, in which case we may be toast.)
5477 We should really be keeping track of the total size of
5478 stubs added here, as stubs contribute to the final output
5479 section size. */
5482 next = NEXT_SEC (head);
5483 /* Set up this stub group. */
5484 htab->stub_group[head->id].link_sec = curr;
5486 while (head != curr && (head = next) != NULL);
5488 /* But wait, there's more! Input sections up to stub_group_size
5489 bytes after the stub section can be handled by it too. */
5490 if (!stubs_always_after_branch)
5492 stub_group_start = curr->output_offset + curr->size;
5494 while (next != NULL)
5496 end_of_next = next->output_offset + next->size;
5497 if (end_of_next - stub_group_start >= stub_group_size)
5498 /* End of NEXT is too far from stubs, so stop. */
5499 break;
5500 /* Add NEXT to the stub group. */
5501 head = next;
5502 next = NEXT_SEC (head);
5503 htab->stub_group[head->id].link_sec = curr;
5506 head = next;
5509 while (list++ != htab->input_list + htab->top_index);
5511 free (htab->input_list);
5512 #undef PREV_SEC
5513 #undef NEXT_SEC
5516 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5517 erratum fix. */
5519 static int
5520 a8_reloc_compare (const void *a, const void *b)
5522 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5523 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5525 if (ra->from < rb->from)
5526 return -1;
5527 else if (ra->from > rb->from)
5528 return 1;
5529 else
5530 return 0;
5533 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5534 const char *, char **);
5536 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5537 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5538 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5539 otherwise. */
5541 static bool
5542 cortex_a8_erratum_scan (bfd *input_bfd,
5543 struct bfd_link_info *info,
5544 struct a8_erratum_fix **a8_fixes_p,
5545 unsigned int *num_a8_fixes_p,
5546 unsigned int *a8_fix_table_size_p,
5547 struct a8_erratum_reloc *a8_relocs,
5548 unsigned int num_a8_relocs,
5549 unsigned prev_num_a8_fixes,
5550 bool *stub_changed_p)
5552 asection *section;
5553 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5554 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5555 unsigned int num_a8_fixes = *num_a8_fixes_p;
5556 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5558 if (htab == NULL)
5559 return false;
5561 for (section = input_bfd->sections;
5562 section != NULL;
5563 section = section->next)
5565 bfd_byte *contents = NULL;
5566 struct _arm_elf_section_data *sec_data;
5567 unsigned int span;
5568 bfd_vma base_vma;
5570 if (elf_section_type (section) != SHT_PROGBITS
5571 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5572 || (section->flags & SEC_EXCLUDE) != 0
5573 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5574 || (section->output_section == bfd_abs_section_ptr))
5575 continue;
5577 base_vma = section->output_section->vma + section->output_offset;
5579 if (elf_section_data (section)->this_hdr.contents != NULL)
5580 contents = elf_section_data (section)->this_hdr.contents;
5581 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5582 return true;
5584 sec_data = elf32_arm_section_data (section);
5586 for (span = 0; span < sec_data->mapcount; span++)
5588 unsigned int span_start = sec_data->map[span].vma;
5589 unsigned int span_end = (span == sec_data->mapcount - 1)
5590 ? section->size : sec_data->map[span + 1].vma;
5591 unsigned int i;
5592 char span_type = sec_data->map[span].type;
5593 bool last_was_32bit = false, last_was_branch = false;
5595 if (span_type != 't')
5596 continue;
5598 /* Span is entirely within a single 4KB region: skip scanning. */
5599 if (((base_vma + span_start) & ~0xfff)
5600 == ((base_vma + span_end) & ~0xfff))
5601 continue;
5603 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5605 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5606 * The branch target is in the same 4KB region as the
5607 first half of the branch.
5608 * The instruction before the branch is a 32-bit
5609 length non-branch instruction. */
5610 for (i = span_start; i < span_end;)
5612 unsigned int insn = bfd_getl16 (&contents[i]);
5613 bool insn_32bit = false, is_blx = false, is_b = false;
5614 bool is_bl = false, is_bcc = false, is_32bit_branch;
5616 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5617 insn_32bit = true;
5619 if (insn_32bit)
5621 /* Load the rest of the insn (in manual-friendly order). */
5622 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5624 /* Encoding T4: B<c>.W. */
5625 is_b = (insn & 0xf800d000) == 0xf0009000;
5626 /* Encoding T1: BL<c>.W. */
5627 is_bl = (insn & 0xf800d000) == 0xf000d000;
5628 /* Encoding T2: BLX<c>.W. */
5629 is_blx = (insn & 0xf800d000) == 0xf000c000;
5630 /* Encoding T3: B<c>.W (not permitted in IT block). */
5631 is_bcc = (insn & 0xf800d000) == 0xf0008000
5632 && (insn & 0x07f00000) != 0x03800000;
5635 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5637 if (((base_vma + i) & 0xfff) == 0xffe
5638 && insn_32bit
5639 && is_32bit_branch
5640 && last_was_32bit
5641 && ! last_was_branch)
5643 bfd_signed_vma offset = 0;
5644 bool force_target_arm = false;
5645 bool force_target_thumb = false;
5646 bfd_vma target;
5647 enum elf32_arm_stub_type stub_type = arm_stub_none;
5648 struct a8_erratum_reloc key, *found;
5649 bool use_plt = false;
5651 key.from = base_vma + i;
5652 found = (struct a8_erratum_reloc *)
5653 bsearch (&key, a8_relocs, num_a8_relocs,
5654 sizeof (struct a8_erratum_reloc),
5655 &a8_reloc_compare);
5657 if (found)
5659 char *error_message = NULL;
5660 struct elf_link_hash_entry *entry;
5662 /* We don't care about the error returned from this
5663 function, only if there is glue or not. */
5664 entry = find_thumb_glue (info, found->sym_name,
5665 &error_message);
5667 if (entry)
5668 found->non_a8_stub = true;
5670 /* Keep a simpler condition, for the sake of clarity. */
5671 if (htab->root.splt != NULL && found->hash != NULL
5672 && found->hash->root.plt.offset != (bfd_vma) -1)
5673 use_plt = true;
5675 if (found->r_type == R_ARM_THM_CALL)
5677 if (found->branch_type == ST_BRANCH_TO_ARM
5678 || use_plt)
5679 force_target_arm = true;
5680 else
5681 force_target_thumb = true;
5685 /* Check if we have an offending branch instruction. */
5687 if (found && found->non_a8_stub)
5688 /* We've already made a stub for this instruction, e.g.
5689 it's a long branch or a Thumb->ARM stub. Assume that
5690 stub will suffice to work around the A8 erratum (see
5691 setting of always_after_branch above). */
5693 else if (is_bcc)
5695 offset = (insn & 0x7ff) << 1;
5696 offset |= (insn & 0x3f0000) >> 4;
5697 offset |= (insn & 0x2000) ? 0x40000 : 0;
5698 offset |= (insn & 0x800) ? 0x80000 : 0;
5699 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5700 if (offset & 0x100000)
5701 offset |= ~ ((bfd_signed_vma) 0xfffff);
5702 stub_type = arm_stub_a8_veneer_b_cond;
5704 else if (is_b || is_bl || is_blx)
5706 int s = (insn & 0x4000000) != 0;
5707 int j1 = (insn & 0x2000) != 0;
5708 int j2 = (insn & 0x800) != 0;
5709 int i1 = !(j1 ^ s);
5710 int i2 = !(j2 ^ s);
5712 offset = (insn & 0x7ff) << 1;
5713 offset |= (insn & 0x3ff0000) >> 4;
5714 offset |= i2 << 22;
5715 offset |= i1 << 23;
5716 offset |= s << 24;
5717 if (offset & 0x1000000)
5718 offset |= ~ ((bfd_signed_vma) 0xffffff);
5720 if (is_blx)
5721 offset &= ~ ((bfd_signed_vma) 3);
5723 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5724 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5727 if (stub_type != arm_stub_none)
5729 bfd_vma pc_for_insn = base_vma + i + 4;
5731 /* The original instruction is a BL, but the target is
5732 an ARM instruction. If we were not making a stub,
5733 the BL would have been converted to a BLX. Use the
5734 BLX stub instead in that case. */
5735 if (htab->use_blx && force_target_arm
5736 && stub_type == arm_stub_a8_veneer_bl)
5738 stub_type = arm_stub_a8_veneer_blx;
5739 is_blx = true;
5740 is_bl = false;
5742 /* Conversely, if the original instruction was
5743 BLX but the target is Thumb mode, use the BL
5744 stub. */
5745 else if (force_target_thumb
5746 && stub_type == arm_stub_a8_veneer_blx)
5748 stub_type = arm_stub_a8_veneer_bl;
5749 is_blx = false;
5750 is_bl = true;
5753 if (is_blx)
5754 pc_for_insn &= ~ ((bfd_vma) 3);
5756 /* If we found a relocation, use the proper destination,
5757 not the offset in the (unrelocated) instruction.
5758 Note this is always done if we switched the stub type
5759 above. */
5760 if (found)
5761 offset =
5762 (bfd_signed_vma) (found->destination - pc_for_insn);
5764 /* If the stub will use a Thumb-mode branch to a
5765 PLT target, redirect it to the preceding Thumb
5766 entry point. */
5767 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5768 offset -= PLT_THUMB_STUB_SIZE;
5770 target = pc_for_insn + offset;
5772 /* The BLX stub is ARM-mode code. Adjust the offset to
5773 take the different PC value (+8 instead of +4) into
5774 account. */
5775 if (stub_type == arm_stub_a8_veneer_blx)
5776 offset += 4;
5778 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5780 char *stub_name = NULL;
5782 if (num_a8_fixes == a8_fix_table_size)
5784 a8_fix_table_size *= 2;
5785 a8_fixes = (struct a8_erratum_fix *)
5786 bfd_realloc (a8_fixes,
5787 sizeof (struct a8_erratum_fix)
5788 * a8_fix_table_size);
5791 if (num_a8_fixes < prev_num_a8_fixes)
5793 /* If we're doing a subsequent scan,
5794 check if we've found the same fix as
5795 before, and try and reuse the stub
5796 name. */
5797 stub_name = a8_fixes[num_a8_fixes].stub_name;
5798 if ((a8_fixes[num_a8_fixes].section != section)
5799 || (a8_fixes[num_a8_fixes].offset != i))
5801 free (stub_name);
5802 stub_name = NULL;
5803 *stub_changed_p = true;
5807 if (!stub_name)
5809 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5810 if (stub_name != NULL)
5811 sprintf (stub_name, "%x:%x", section->id, i);
5814 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5815 a8_fixes[num_a8_fixes].section = section;
5816 a8_fixes[num_a8_fixes].offset = i;
5817 a8_fixes[num_a8_fixes].target_offset =
5818 target - base_vma;
5819 a8_fixes[num_a8_fixes].orig_insn = insn;
5820 a8_fixes[num_a8_fixes].stub_name = stub_name;
5821 a8_fixes[num_a8_fixes].stub_type = stub_type;
5822 a8_fixes[num_a8_fixes].branch_type =
5823 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5825 num_a8_fixes++;
5830 i += insn_32bit ? 4 : 2;
5831 last_was_32bit = insn_32bit;
5832 last_was_branch = is_32bit_branch;
5836 if (elf_section_data (section)->this_hdr.contents == NULL)
5837 free (contents);
5840 *a8_fixes_p = a8_fixes;
5841 *num_a8_fixes_p = num_a8_fixes;
5842 *a8_fix_table_size_p = a8_fix_table_size;
5844 return false;
5847 /* Create or update a stub entry depending on whether the stub can already be
5848 found in HTAB. The stub is identified by:
5849 - its type STUB_TYPE
5850 - its source branch (note that several can share the same stub) whose
5851 section and relocation (if any) are given by SECTION and IRELA
5852 respectively
5853 - its target symbol whose input section, hash, name, value and branch type
5854 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5855 respectively
5857 If found, the value of the stub's target symbol is updated from SYM_VALUE
5858 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5859 TRUE and the stub entry is initialized.
5861 Returns the stub that was created or updated, or NULL if an error
5862 occurred. */
5864 static struct elf32_arm_stub_hash_entry *
5865 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5866 enum elf32_arm_stub_type stub_type, asection *section,
5867 Elf_Internal_Rela *irela, asection *sym_sec,
5868 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5869 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5870 bool *new_stub)
5872 const asection *id_sec;
5873 char *stub_name;
5874 struct elf32_arm_stub_hash_entry *stub_entry;
5875 unsigned int r_type;
5876 bool sym_claimed = arm_stub_sym_claimed (stub_type);
5878 BFD_ASSERT (stub_type != arm_stub_none);
5879 *new_stub = false;
5881 if (sym_claimed)
5882 stub_name = sym_name;
5883 else
5885 BFD_ASSERT (irela);
5886 BFD_ASSERT (section);
5887 BFD_ASSERT (section->id <= htab->top_id);
5889 /* Support for grouping stub sections. */
5890 id_sec = htab->stub_group[section->id].link_sec;
5892 /* Get the name of this stub. */
5893 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5894 stub_type);
5895 if (!stub_name)
5896 return NULL;
5899 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5900 false);
5901 /* The proper stub has already been created, just update its value. */
5902 if (stub_entry != NULL)
5904 if (!sym_claimed)
5905 free (stub_name);
5906 stub_entry->target_value = sym_value;
5907 return stub_entry;
5910 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5911 if (stub_entry == NULL)
5913 if (!sym_claimed)
5914 free (stub_name);
5915 return NULL;
5918 stub_entry->target_value = sym_value;
5919 stub_entry->target_section = sym_sec;
5920 stub_entry->stub_type = stub_type;
5921 stub_entry->h = hash;
5922 stub_entry->branch_type = branch_type;
5924 if (sym_claimed)
5925 stub_entry->output_name = sym_name;
5926 else
5928 if (sym_name == NULL)
5929 sym_name = "unnamed";
5930 stub_entry->output_name = (char *)
5931 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5932 + strlen (sym_name));
5933 if (stub_entry->output_name == NULL)
5935 free (stub_name);
5936 return NULL;
5939 /* For historical reasons, use the existing names for ARM-to-Thumb and
5940 Thumb-to-ARM stubs. */
5941 r_type = ELF32_R_TYPE (irela->r_info);
5942 if ((r_type == (unsigned int) R_ARM_THM_CALL
5943 || r_type == (unsigned int) R_ARM_THM_JUMP24
5944 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5945 && branch_type == ST_BRANCH_TO_ARM)
5946 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5947 else if ((r_type == (unsigned int) R_ARM_CALL
5948 || r_type == (unsigned int) R_ARM_JUMP24)
5949 && branch_type == ST_BRANCH_TO_THUMB)
5950 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5951 else
5952 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5955 *new_stub = true;
5956 return stub_entry;
5959 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5960 gateway veneer to transition from non secure to secure state and create them
5961 accordingly.
5963 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5964 defines the conditions that govern Secure Gateway veneer creation for a
5965 given symbol <SYM> as follows:
5966 - it has function type
5967 - it has non local binding
5968 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5969 same type, binding and value as <SYM> (called normal symbol).
5970 An entry function can handle secure state transition itself in which case
5971 its special symbol would have a different value from the normal symbol.
5973 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5974 entry mapping while HTAB gives the name to hash entry mapping.
5975 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5976 created.
5978 The return value gives whether a stub failed to be allocated. */
5980 static bool
5981 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5982 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5983 int *cmse_stub_created)
5985 const struct elf_backend_data *bed;
5986 Elf_Internal_Shdr *symtab_hdr;
5987 unsigned i, j, sym_count, ext_start;
5988 Elf_Internal_Sym *cmse_sym, *local_syms;
5989 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5990 enum arm_st_branch_type branch_type;
5991 char *sym_name, *lsym_name;
5992 bfd_vma sym_value;
5993 asection *section;
5994 struct elf32_arm_stub_hash_entry *stub_entry;
5995 bool is_v8m, new_stub, cmse_invalid, ret = true;
5997 bed = get_elf_backend_data (input_bfd);
5998 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5999 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
6000 ext_start = symtab_hdr->sh_info;
6001 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
6002 && out_attr[Tag_CPU_arch_profile].i == 'M');
6004 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
6005 if (local_syms == NULL)
6006 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6007 symtab_hdr->sh_info, 0, NULL, NULL,
6008 NULL);
6009 if (symtab_hdr->sh_info && local_syms == NULL)
6010 return false;
6012 /* Scan symbols. */
6013 for (i = 0; i < sym_count; i++)
6015 cmse_invalid = false;
6017 if (i < ext_start)
6019 cmse_sym = &local_syms[i];
6020 sym_name = bfd_elf_string_from_elf_section (input_bfd,
6021 symtab_hdr->sh_link,
6022 cmse_sym->st_name);
6023 if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
6024 continue;
6026 /* Special symbol with local binding. */
6027 cmse_invalid = true;
6029 else
6031 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6032 if (cmse_hash == NULL)
6033 continue;
6035 sym_name = (char *) cmse_hash->root.root.root.string;
6036 if (!startswith (sym_name, CMSE_PREFIX))
6037 continue;
6039 /* Special symbol has incorrect binding or type. */
6040 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6041 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6042 || cmse_hash->root.type != STT_FUNC)
6043 cmse_invalid = true;
6046 if (!is_v8m)
6048 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6049 "ARMv8-M architecture or later"),
6050 input_bfd, sym_name);
6051 is_v8m = true; /* Avoid multiple warning. */
6052 ret = false;
6055 if (cmse_invalid)
6057 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6058 " a global or weak function symbol"),
6059 input_bfd, sym_name);
6060 ret = false;
6061 if (i < ext_start)
6062 continue;
6065 sym_name += strlen (CMSE_PREFIX);
6066 hash = (struct elf32_arm_link_hash_entry *)
6067 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6069 /* No associated normal symbol or it is neither global nor weak. */
6070 if (!hash
6071 || (hash->root.root.type != bfd_link_hash_defined
6072 && hash->root.root.type != bfd_link_hash_defweak)
6073 || hash->root.type != STT_FUNC)
6075 /* Initialize here to avoid warning about use of possibly
6076 uninitialized variable. */
6077 j = 0;
6079 if (!hash)
6081 /* Searching for a normal symbol with local binding. */
6082 for (; j < ext_start; j++)
6084 lsym_name =
6085 bfd_elf_string_from_elf_section (input_bfd,
6086 symtab_hdr->sh_link,
6087 local_syms[j].st_name);
6088 if (!strcmp (sym_name, lsym_name))
6089 break;
6093 if (hash || j < ext_start)
6095 _bfd_error_handler
6096 (_("%pB: invalid standard symbol `%s'; it must be "
6097 "a global or weak function symbol"),
6098 input_bfd, sym_name);
6100 else
6101 _bfd_error_handler
6102 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6103 ret = false;
6104 if (!hash)
6105 continue;
6108 sym_value = hash->root.root.u.def.value;
6109 section = hash->root.root.u.def.section;
6111 if (cmse_hash->root.root.u.def.section != section)
6113 _bfd_error_handler
6114 (_("%pB: `%s' and its special symbol are in different sections"),
6115 input_bfd, sym_name);
6116 ret = false;
6118 if (cmse_hash->root.root.u.def.value != sym_value)
6119 continue; /* Ignore: could be an entry function starting with SG. */
6121 /* If this section is a link-once section that will be discarded, then
6122 don't create any stubs. */
6123 if (section->output_section == NULL)
6125 _bfd_error_handler
6126 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6127 continue;
6130 if (hash->root.size == 0)
6132 _bfd_error_handler
6133 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6134 ret = false;
6137 if (!ret)
6138 continue;
6139 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6140 stub_entry
6141 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6142 NULL, NULL, section, hash, sym_name,
6143 sym_value, branch_type, &new_stub);
6145 if (stub_entry == NULL)
6146 ret = false;
6147 else
6149 BFD_ASSERT (new_stub);
6150 (*cmse_stub_created)++;
6154 if (!symtab_hdr->contents)
6155 free (local_syms);
6156 return ret;
6159 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6160 code entry function, ie can be called from non secure code without using a
6161 veneer. */
6163 static bool
6164 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6166 bfd_byte contents[4];
6167 uint32_t first_insn;
6168 asection *section;
6169 file_ptr offset;
6170 bfd *abfd;
6172 /* Defined symbol of function type. */
6173 if (hash->root.root.type != bfd_link_hash_defined
6174 && hash->root.root.type != bfd_link_hash_defweak)
6175 return false;
6176 if (hash->root.type != STT_FUNC)
6177 return false;
6179 /* Read first instruction. */
6180 section = hash->root.root.u.def.section;
6181 abfd = section->owner;
6182 offset = hash->root.root.u.def.value - section->vma;
6183 if (!bfd_get_section_contents (abfd, section, contents, offset,
6184 sizeof (contents)))
6185 return false;
6187 first_insn = bfd_get_32 (abfd, contents);
6189 /* Starts by SG instruction. */
6190 return first_insn == 0xe97fe97f;
6193 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6194 secure gateway veneers (ie. the veneers was not in the input import library)
6195 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6197 static bool
6198 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6200 struct elf32_arm_stub_hash_entry *stub_entry;
6201 struct bfd_link_info *info;
6203 /* Massage our args to the form they really have. */
6204 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6205 info = (struct bfd_link_info *) gen_info;
6207 if (info->out_implib_bfd)
6208 return true;
6210 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6211 return true;
6213 if (stub_entry->stub_offset == (bfd_vma) -1)
6214 _bfd_error_handler (" %s", stub_entry->output_name);
6216 return true;
6219 /* Set offset of each secure gateway veneers so that its address remain
6220 identical to the one in the input import library referred by
6221 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6222 (present in input import library but absent from the executable being
6223 linked) or if new veneers appeared and there is no output import library
6224 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6225 number of secure gateway veneers found in the input import library.
6227 The function returns whether an error occurred. If no error occurred,
6228 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6229 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6230 veneer observed set for new veneers to be layed out after. */
6232 static bool
6233 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6234 struct elf32_arm_link_hash_table *htab,
6235 int *cmse_stub_created)
6237 long symsize;
6238 char *sym_name;
6239 flagword flags;
6240 long i, symcount;
6241 bfd *in_implib_bfd;
6242 asection *stub_out_sec;
6243 bool ret = true;
6244 Elf_Internal_Sym *intsym;
6245 const char *out_sec_name;
6246 bfd_size_type cmse_stub_size;
6247 asymbol **sympp = NULL, *sym;
6248 struct elf32_arm_link_hash_entry *hash;
6249 const insn_sequence *cmse_stub_template;
6250 struct elf32_arm_stub_hash_entry *stub_entry;
6251 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6252 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6253 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6255 /* No input secure gateway import library. */
6256 if (!htab->in_implib_bfd)
6257 return true;
6259 in_implib_bfd = htab->in_implib_bfd;
6260 if (!htab->cmse_implib)
6262 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6263 "Gateway import libraries"), in_implib_bfd);
6264 return false;
6267 /* Get symbol table size. */
6268 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6269 if (symsize < 0)
6270 return false;
6272 /* Read in the input secure gateway import library's symbol table. */
6273 sympp = (asymbol **) bfd_malloc (symsize);
6274 if (sympp == NULL)
6275 return false;
6277 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6278 if (symcount < 0)
6280 ret = false;
6281 goto free_sym_buf;
6284 htab->new_cmse_stub_offset = 0;
6285 cmse_stub_size =
6286 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6287 &cmse_stub_template,
6288 &cmse_stub_template_size);
6289 out_sec_name =
6290 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6291 stub_out_sec =
6292 bfd_get_section_by_name (htab->obfd, out_sec_name);
6293 if (stub_out_sec != NULL)
6294 cmse_stub_sec_vma = stub_out_sec->vma;
6296 /* Set addresses of veneers mentionned in input secure gateway import
6297 library's symbol table. */
6298 for (i = 0; i < symcount; i++)
6300 sym = sympp[i];
6301 flags = sym->flags;
6302 sym_name = (char *) bfd_asymbol_name (sym);
6303 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6305 if (sym->section != bfd_abs_section_ptr
6306 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6307 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6308 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6309 != ST_BRANCH_TO_THUMB))
6311 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6312 "symbol should be absolute, global and "
6313 "refer to Thumb functions"),
6314 in_implib_bfd, sym_name);
6315 ret = false;
6316 continue;
6319 veneer_value = bfd_asymbol_value (sym);
6320 stub_offset = veneer_value - cmse_stub_sec_vma;
6321 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6322 false, false);
6323 hash = (struct elf32_arm_link_hash_entry *)
6324 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6326 /* Stub entry should have been created by cmse_scan or the symbol be of
6327 a secure function callable from non secure code. */
6328 if (!stub_entry && !hash)
6330 bool new_stub;
6332 _bfd_error_handler
6333 (_("entry function `%s' disappeared from secure code"), sym_name);
6334 hash = (struct elf32_arm_link_hash_entry *)
6335 elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6336 stub_entry
6337 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6338 NULL, NULL, bfd_abs_section_ptr, hash,
6339 sym_name, veneer_value,
6340 ST_BRANCH_TO_THUMB, &new_stub);
6341 if (stub_entry == NULL)
6342 ret = false;
6343 else
6345 BFD_ASSERT (new_stub);
6346 new_cmse_stubs_created++;
6347 (*cmse_stub_created)++;
6349 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6350 stub_entry->stub_offset = stub_offset;
6352 /* Symbol found is not callable from non secure code. */
6353 else if (!stub_entry)
6355 if (!cmse_entry_fct_p (hash))
6357 _bfd_error_handler (_("`%s' refers to a non entry function"),
6358 sym_name);
6359 ret = false;
6361 continue;
6363 else
6365 /* Only stubs for SG veneers should have been created. */
6366 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6368 /* Check visibility hasn't changed. */
6369 if (!!(flags & BSF_GLOBAL)
6370 != (hash->root.root.type == bfd_link_hash_defined))
6371 _bfd_error_handler
6372 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6373 sym_name);
6375 stub_entry->stub_offset = stub_offset;
6378 /* Size should match that of a SG veneer. */
6379 if (intsym->st_size != cmse_stub_size)
6381 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6382 in_implib_bfd, sym_name);
6383 ret = false;
6386 /* Previous veneer address is before current SG veneer section. */
6387 if (veneer_value < cmse_stub_sec_vma)
6389 /* Avoid offset underflow. */
6390 if (stub_entry)
6391 stub_entry->stub_offset = 0;
6392 stub_offset = 0;
6393 ret = false;
6396 /* Complain if stub offset not a multiple of stub size. */
6397 if (stub_offset % cmse_stub_size)
6399 _bfd_error_handler
6400 (_("offset of veneer for entry function `%s' not a multiple of "
6401 "its size"), sym_name);
6402 ret = false;
6405 if (!ret)
6406 continue;
6408 new_cmse_stubs_created--;
6409 if (veneer_value < cmse_stub_array_start)
6410 cmse_stub_array_start = veneer_value;
6411 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6412 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6413 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6416 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6418 BFD_ASSERT (new_cmse_stubs_created > 0);
6419 _bfd_error_handler
6420 (_("new entry function(s) introduced but no output import library "
6421 "specified:"));
6422 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6425 if (cmse_stub_array_start != cmse_stub_sec_vma)
6427 _bfd_error_handler
6428 (_("start address of `%s' is different from previous link"),
6429 out_sec_name);
6430 ret = false;
6433 free_sym_buf:
6434 free (sympp);
6435 return ret;
6438 /* Determine and set the size of the stub section for a final link.
6440 The basic idea here is to examine all the relocations looking for
6441 PC-relative calls to a target that is unreachable with a "bl"
6442 instruction. */
6444 bool
6445 elf32_arm_size_stubs (bfd *output_bfd,
6446 bfd *stub_bfd,
6447 struct bfd_link_info *info,
6448 bfd_signed_vma group_size,
6449 asection * (*add_stub_section) (const char *, asection *,
6450 asection *,
6451 unsigned int),
6452 void (*layout_sections_again) (void))
6454 bool ret = true;
6455 obj_attribute *out_attr;
6456 int cmse_stub_created = 0;
6457 bfd_size_type stub_group_size;
6458 bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6459 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6460 struct a8_erratum_fix *a8_fixes = NULL;
6461 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6462 struct a8_erratum_reloc *a8_relocs = NULL;
6463 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6465 if (htab == NULL)
6466 return false;
6468 if (htab->fix_cortex_a8)
6470 a8_fixes = (struct a8_erratum_fix *)
6471 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6472 a8_relocs = (struct a8_erratum_reloc *)
6473 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6476 /* Propagate mach to stub bfd, because it may not have been
6477 finalized when we created stub_bfd. */
6478 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6479 bfd_get_mach (output_bfd));
6481 /* Stash our params away. */
6482 htab->stub_bfd = stub_bfd;
6483 htab->add_stub_section = add_stub_section;
6484 htab->layout_sections_again = layout_sections_again;
6485 stubs_always_after_branch = group_size < 0;
6487 out_attr = elf_known_obj_attributes_proc (output_bfd);
6488 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6490 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6491 as the first half of a 32-bit branch straddling two 4K pages. This is a
6492 crude way of enforcing that. */
6493 if (htab->fix_cortex_a8)
6494 stubs_always_after_branch = 1;
6496 if (group_size < 0)
6497 stub_group_size = -group_size;
6498 else
6499 stub_group_size = group_size;
6501 if (stub_group_size == 1)
6503 /* Default values. */
6504 /* Thumb branch range is +-4MB has to be used as the default
6505 maximum size (a given section can contain both ARM and Thumb
6506 code, so the worst case has to be taken into account).
6508 This value is 24K less than that, which allows for 2025
6509 12-byte stubs. If we exceed that, then we will fail to link.
6510 The user will have to relink with an explicit group size
6511 option. */
6512 stub_group_size = 4170000;
6515 group_sections (htab, stub_group_size, stubs_always_after_branch);
6517 /* If we're applying the cortex A8 fix, we need to determine the
6518 program header size now, because we cannot change it later --
6519 that could alter section placements. Notice the A8 erratum fix
6520 ends up requiring the section addresses to remain unchanged
6521 modulo the page size. That's something we cannot represent
6522 inside BFD, and we don't want to force the section alignment to
6523 be the page size. */
6524 if (htab->fix_cortex_a8)
6525 (*htab->layout_sections_again) ();
6527 while (1)
6529 bfd *input_bfd;
6530 unsigned int bfd_indx;
6531 asection *stub_sec;
6532 enum elf32_arm_stub_type stub_type;
6533 bool stub_changed = false;
6534 unsigned prev_num_a8_fixes = num_a8_fixes;
6536 num_a8_fixes = 0;
6537 for (input_bfd = info->input_bfds, bfd_indx = 0;
6538 input_bfd != NULL;
6539 input_bfd = input_bfd->link.next, bfd_indx++)
6541 Elf_Internal_Shdr *symtab_hdr;
6542 asection *section;
6543 Elf_Internal_Sym *local_syms = NULL;
6545 if (!is_arm_elf (input_bfd))
6546 continue;
6547 if ((input_bfd->flags & DYNAMIC) != 0
6548 && (elf_sym_hashes (input_bfd) == NULL
6549 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6550 continue;
6552 num_a8_relocs = 0;
6554 /* We'll need the symbol table in a second. */
6555 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6556 if (symtab_hdr->sh_info == 0)
6557 continue;
6559 /* Limit scan of symbols to object file whose profile is
6560 Microcontroller to not hinder performance in the general case. */
6561 if (m_profile && first_veneer_scan)
6563 struct elf_link_hash_entry **sym_hashes;
6565 sym_hashes = elf_sym_hashes (input_bfd);
6566 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6567 &cmse_stub_created))
6568 goto error_ret_free_local;
6570 if (cmse_stub_created != 0)
6571 stub_changed = true;
6574 /* Walk over each section attached to the input bfd. */
6575 for (section = input_bfd->sections;
6576 section != NULL;
6577 section = section->next)
6579 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6581 /* If there aren't any relocs, then there's nothing more
6582 to do. */
6583 if ((section->flags & SEC_RELOC) == 0
6584 || section->reloc_count == 0
6585 || (section->flags & SEC_CODE) == 0)
6586 continue;
6588 /* If this section is a link-once section that will be
6589 discarded, then don't create any stubs. */
6590 if (section->output_section == NULL
6591 || section->output_section->owner != output_bfd)
6592 continue;
6594 /* Get the relocs. */
6595 internal_relocs
6596 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6597 NULL, info->keep_memory);
6598 if (internal_relocs == NULL)
6599 goto error_ret_free_local;
6601 /* Now examine each relocation. */
6602 irela = internal_relocs;
6603 irelaend = irela + section->reloc_count;
6604 for (; irela < irelaend; irela++)
6606 unsigned int r_type, r_indx;
6607 asection *sym_sec;
6608 bfd_vma sym_value;
6609 bfd_vma destination;
6610 struct elf32_arm_link_hash_entry *hash;
6611 const char *sym_name;
6612 unsigned char st_type;
6613 enum arm_st_branch_type branch_type;
6614 bool created_stub = false;
6616 r_type = ELF32_R_TYPE (irela->r_info);
6617 r_indx = ELF32_R_SYM (irela->r_info);
6619 if (r_type >= (unsigned int) R_ARM_max)
6621 bfd_set_error (bfd_error_bad_value);
6622 error_ret_free_internal:
6623 if (elf_section_data (section)->relocs == NULL)
6624 free (internal_relocs);
6625 /* Fall through. */
6626 error_ret_free_local:
6627 if (symtab_hdr->contents != (unsigned char *) local_syms)
6628 free (local_syms);
6629 return false;
6632 hash = NULL;
6633 if (r_indx >= symtab_hdr->sh_info)
6634 hash = elf32_arm_hash_entry
6635 (elf_sym_hashes (input_bfd)
6636 [r_indx - symtab_hdr->sh_info]);
6638 /* Only look for stubs on branch instructions, or
6639 non-relaxed TLSCALL */
6640 if ((r_type != (unsigned int) R_ARM_CALL)
6641 && (r_type != (unsigned int) R_ARM_THM_CALL)
6642 && (r_type != (unsigned int) R_ARM_JUMP24)
6643 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6644 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6645 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6646 && (r_type != (unsigned int) R_ARM_PLT32)
6647 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6648 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6649 && r_type == (elf32_arm_tls_transition
6650 (info, r_type,
6651 (struct elf_link_hash_entry *) hash))
6652 && ((hash ? hash->tls_type
6653 : (elf32_arm_local_got_tls_type
6654 (input_bfd)[r_indx]))
6655 & GOT_TLS_GDESC) != 0))
6656 continue;
6658 /* Now determine the call target, its name, value,
6659 section. */
6660 sym_sec = NULL;
6661 sym_value = 0;
6662 destination = 0;
6663 sym_name = NULL;
6665 if (r_type == (unsigned int) R_ARM_TLS_CALL
6666 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6668 /* A non-relaxed TLS call. The target is the
6669 plt-resident trampoline and nothing to do
6670 with the symbol. */
6671 BFD_ASSERT (htab->tls_trampoline > 0);
6672 sym_sec = htab->root.splt;
6673 sym_value = htab->tls_trampoline;
6674 hash = 0;
6675 st_type = STT_FUNC;
6676 branch_type = ST_BRANCH_TO_ARM;
6678 else if (!hash)
6680 /* It's a local symbol. */
6681 Elf_Internal_Sym *sym;
6683 if (local_syms == NULL)
6685 local_syms
6686 = (Elf_Internal_Sym *) symtab_hdr->contents;
6687 if (local_syms == NULL)
6688 local_syms
6689 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6690 symtab_hdr->sh_info, 0,
6691 NULL, NULL, NULL);
6692 if (local_syms == NULL)
6693 goto error_ret_free_internal;
6696 sym = local_syms + r_indx;
6697 if (sym->st_shndx == SHN_UNDEF)
6698 sym_sec = bfd_und_section_ptr;
6699 else if (sym->st_shndx == SHN_ABS)
6700 sym_sec = bfd_abs_section_ptr;
6701 else if (sym->st_shndx == SHN_COMMON)
6702 sym_sec = bfd_com_section_ptr;
6703 else
6704 sym_sec =
6705 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6707 if (!sym_sec)
6708 /* This is an undefined symbol. It can never
6709 be resolved. */
6710 continue;
6712 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6713 sym_value = sym->st_value;
6714 destination = (sym_value + irela->r_addend
6715 + sym_sec->output_offset
6716 + sym_sec->output_section->vma);
6717 st_type = ELF_ST_TYPE (sym->st_info);
6718 branch_type =
6719 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6720 sym_name
6721 = bfd_elf_string_from_elf_section (input_bfd,
6722 symtab_hdr->sh_link,
6723 sym->st_name);
6725 else
6727 /* It's an external symbol. */
6728 while (hash->root.root.type == bfd_link_hash_indirect
6729 || hash->root.root.type == bfd_link_hash_warning)
6730 hash = ((struct elf32_arm_link_hash_entry *)
6731 hash->root.root.u.i.link);
6733 if (hash->root.root.type == bfd_link_hash_defined
6734 || hash->root.root.type == bfd_link_hash_defweak)
6736 sym_sec = hash->root.root.u.def.section;
6737 sym_value = hash->root.root.u.def.value;
6739 struct elf32_arm_link_hash_table *globals =
6740 elf32_arm_hash_table (info);
6742 /* For a destination in a shared library,
6743 use the PLT stub as target address to
6744 decide whether a branch stub is
6745 needed. */
6746 if (globals != NULL
6747 && globals->root.splt != NULL
6748 && hash != NULL
6749 && hash->root.plt.offset != (bfd_vma) -1)
6751 sym_sec = globals->root.splt;
6752 sym_value = hash->root.plt.offset;
6753 if (sym_sec->output_section != NULL)
6754 destination = (sym_value
6755 + sym_sec->output_offset
6756 + sym_sec->output_section->vma);
6758 else if (sym_sec->output_section != NULL)
6759 destination = (sym_value + irela->r_addend
6760 + sym_sec->output_offset
6761 + sym_sec->output_section->vma);
6763 else if ((hash->root.root.type == bfd_link_hash_undefined)
6764 || (hash->root.root.type == bfd_link_hash_undefweak))
6766 /* For a shared library, use the PLT stub as
6767 target address to decide whether a long
6768 branch stub is needed.
6769 For absolute code, they cannot be handled. */
6770 struct elf32_arm_link_hash_table *globals =
6771 elf32_arm_hash_table (info);
6773 if (globals != NULL
6774 && globals->root.splt != NULL
6775 && hash != NULL
6776 && hash->root.plt.offset != (bfd_vma) -1)
6778 sym_sec = globals->root.splt;
6779 sym_value = hash->root.plt.offset;
6780 if (sym_sec->output_section != NULL)
6781 destination = (sym_value
6782 + sym_sec->output_offset
6783 + sym_sec->output_section->vma);
6785 else
6786 continue;
6788 else
6790 bfd_set_error (bfd_error_bad_value);
6791 goto error_ret_free_internal;
6793 st_type = hash->root.type;
6794 branch_type =
6795 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6796 sym_name = hash->root.root.root.string;
6801 bool new_stub;
6802 struct elf32_arm_stub_hash_entry *stub_entry;
6804 /* Determine what (if any) linker stub is needed. */
6805 stub_type = arm_type_of_stub (info, section, irela,
6806 st_type, &branch_type,
6807 hash, destination, sym_sec,
6808 input_bfd, sym_name);
6809 if (stub_type == arm_stub_none)
6810 break;
6812 /* We've either created a stub for this reloc already,
6813 or we are about to. */
6814 stub_entry =
6815 elf32_arm_create_stub (htab, stub_type, section, irela,
6816 sym_sec, hash,
6817 (char *) sym_name, sym_value,
6818 branch_type, &new_stub);
6820 created_stub = stub_entry != NULL;
6821 if (!created_stub)
6822 goto error_ret_free_internal;
6823 else if (!new_stub)
6824 break;
6825 else
6826 stub_changed = true;
6828 while (0);
6830 /* Look for relocations which might trigger Cortex-A8
6831 erratum. */
6832 if (htab->fix_cortex_a8
6833 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6834 || r_type == (unsigned int) R_ARM_THM_JUMP19
6835 || r_type == (unsigned int) R_ARM_THM_CALL
6836 || r_type == (unsigned int) R_ARM_THM_XPC22))
6838 bfd_vma from = section->output_section->vma
6839 + section->output_offset
6840 + irela->r_offset;
6842 if ((from & 0xfff) == 0xffe)
6844 /* Found a candidate. Note we haven't checked the
6845 destination is within 4K here: if we do so (and
6846 don't create an entry in a8_relocs) we can't tell
6847 that a branch should have been relocated when
6848 scanning later. */
6849 if (num_a8_relocs == a8_reloc_table_size)
6851 a8_reloc_table_size *= 2;
6852 a8_relocs = (struct a8_erratum_reloc *)
6853 bfd_realloc (a8_relocs,
6854 sizeof (struct a8_erratum_reloc)
6855 * a8_reloc_table_size);
6858 a8_relocs[num_a8_relocs].from = from;
6859 a8_relocs[num_a8_relocs].destination = destination;
6860 a8_relocs[num_a8_relocs].r_type = r_type;
6861 a8_relocs[num_a8_relocs].branch_type = branch_type;
6862 a8_relocs[num_a8_relocs].sym_name = sym_name;
6863 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6864 a8_relocs[num_a8_relocs].hash = hash;
6866 num_a8_relocs++;
6871 /* We're done with the internal relocs, free them. */
6872 if (elf_section_data (section)->relocs == NULL)
6873 free (internal_relocs);
6876 if (htab->fix_cortex_a8)
6878 /* Sort relocs which might apply to Cortex-A8 erratum. */
6879 qsort (a8_relocs, num_a8_relocs,
6880 sizeof (struct a8_erratum_reloc),
6881 &a8_reloc_compare);
6883 /* Scan for branches which might trigger Cortex-A8 erratum. */
6884 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6885 &num_a8_fixes, &a8_fix_table_size,
6886 a8_relocs, num_a8_relocs,
6887 prev_num_a8_fixes, &stub_changed)
6888 != 0)
6889 goto error_ret_free_local;
6892 if (local_syms != NULL
6893 && symtab_hdr->contents != (unsigned char *) local_syms)
6895 if (!info->keep_memory)
6896 free (local_syms);
6897 else
6898 symtab_hdr->contents = (unsigned char *) local_syms;
6902 if (first_veneer_scan
6903 && !set_cmse_veneer_addr_from_implib (info, htab,
6904 &cmse_stub_created))
6905 ret = false;
6907 if (prev_num_a8_fixes != num_a8_fixes)
6908 stub_changed = true;
6910 if (!stub_changed)
6911 break;
6913 /* OK, we've added some stubs. Find out the new size of the
6914 stub sections. */
6915 for (stub_sec = htab->stub_bfd->sections;
6916 stub_sec != NULL;
6917 stub_sec = stub_sec->next)
6919 /* Ignore non-stub sections. */
6920 if (!strstr (stub_sec->name, STUB_SUFFIX))
6921 continue;
6923 stub_sec->size = 0;
6926 /* Add new SG veneers after those already in the input import
6927 library. */
6928 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6929 stub_type++)
6931 bfd_vma *start_offset_p;
6932 asection **stub_sec_p;
6934 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6935 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6936 if (start_offset_p == NULL)
6937 continue;
6939 BFD_ASSERT (stub_sec_p != NULL);
6940 if (*stub_sec_p != NULL)
6941 (*stub_sec_p)->size = *start_offset_p;
6944 /* Compute stub section size, considering padding. */
6945 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6946 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6947 stub_type++)
6949 int size, padding;
6950 asection **stub_sec_p;
6952 padding = arm_dedicated_stub_section_padding (stub_type);
6953 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6954 /* Skip if no stub input section or no stub section padding
6955 required. */
6956 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6957 continue;
6958 /* Stub section padding required but no dedicated section. */
6959 BFD_ASSERT (stub_sec_p);
6961 size = (*stub_sec_p)->size;
6962 size = (size + padding - 1) & ~(padding - 1);
6963 (*stub_sec_p)->size = size;
6966 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6967 if (htab->fix_cortex_a8)
6968 for (i = 0; i < num_a8_fixes; i++)
6970 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6971 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6973 if (stub_sec == NULL)
6974 return false;
6976 stub_sec->size
6977 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6978 NULL);
6982 /* Ask the linker to do its stuff. */
6983 (*htab->layout_sections_again) ();
6984 first_veneer_scan = false;
6987 /* Add stubs for Cortex-A8 erratum fixes now. */
6988 if (htab->fix_cortex_a8)
6990 for (i = 0; i < num_a8_fixes; i++)
6992 struct elf32_arm_stub_hash_entry *stub_entry;
6993 char *stub_name = a8_fixes[i].stub_name;
6994 asection *section = a8_fixes[i].section;
6995 unsigned int section_id = a8_fixes[i].section->id;
6996 asection *link_sec = htab->stub_group[section_id].link_sec;
6997 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6998 const insn_sequence *template_sequence;
6999 int template_size, size = 0;
7001 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
7002 true, false);
7003 if (stub_entry == NULL)
7005 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
7006 section->owner, stub_name);
7007 return false;
7010 stub_entry->stub_sec = stub_sec;
7011 stub_entry->stub_offset = (bfd_vma) -1;
7012 stub_entry->id_sec = link_sec;
7013 stub_entry->stub_type = a8_fixes[i].stub_type;
7014 stub_entry->source_value = a8_fixes[i].offset;
7015 stub_entry->target_section = a8_fixes[i].section;
7016 stub_entry->target_value = a8_fixes[i].target_offset;
7017 stub_entry->orig_insn = a8_fixes[i].orig_insn;
7018 stub_entry->branch_type = a8_fixes[i].branch_type;
7020 size = find_stub_size_and_template (a8_fixes[i].stub_type,
7021 &template_sequence,
7022 &template_size);
7024 stub_entry->stub_size = size;
7025 stub_entry->stub_template = template_sequence;
7026 stub_entry->stub_template_size = template_size;
7029 /* Stash the Cortex-A8 erratum fix array for use later in
7030 elf32_arm_write_section(). */
7031 htab->a8_erratum_fixes = a8_fixes;
7032 htab->num_a8_erratum_fixes = num_a8_fixes;
7034 else
7036 htab->a8_erratum_fixes = NULL;
7037 htab->num_a8_erratum_fixes = 0;
7039 return ret;
7042 /* Build all the stubs associated with the current output file. The
7043 stubs are kept in a hash table attached to the main linker hash
7044 table. We also set up the .plt entries for statically linked PIC
7045 functions here. This function is called via arm_elf_finish in the
7046 linker. */
7048 bool
7049 elf32_arm_build_stubs (struct bfd_link_info *info)
7051 asection *stub_sec;
7052 struct bfd_hash_table *table;
7053 enum elf32_arm_stub_type stub_type;
7054 struct elf32_arm_link_hash_table *htab;
7056 htab = elf32_arm_hash_table (info);
7057 if (htab == NULL)
7058 return false;
7060 for (stub_sec = htab->stub_bfd->sections;
7061 stub_sec != NULL;
7062 stub_sec = stub_sec->next)
7064 bfd_size_type size;
7066 /* Ignore non-stub sections. */
7067 if (!strstr (stub_sec->name, STUB_SUFFIX))
7068 continue;
7070 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7071 must at least be done for stub section requiring padding and for SG
7072 veneers to ensure that a non secure code branching to a removed SG
7073 veneer causes an error. */
7074 size = stub_sec->size;
7075 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7076 if (stub_sec->contents == NULL && size != 0)
7077 return false;
7079 stub_sec->size = 0;
7082 /* Add new SG veneers after those already in the input import library. */
7083 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7085 bfd_vma *start_offset_p;
7086 asection **stub_sec_p;
7088 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7089 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7090 if (start_offset_p == NULL)
7091 continue;
7093 BFD_ASSERT (stub_sec_p != NULL);
7094 if (*stub_sec_p != NULL)
7095 (*stub_sec_p)->size = *start_offset_p;
7098 /* Build the stubs as directed by the stub hash table. */
7099 table = &htab->stub_hash_table;
7100 bfd_hash_traverse (table, arm_build_one_stub, info);
7101 if (htab->fix_cortex_a8)
7103 /* Place the cortex a8 stubs last. */
7104 htab->fix_cortex_a8 = -1;
7105 bfd_hash_traverse (table, arm_build_one_stub, info);
7108 return true;
7111 /* Locate the Thumb encoded calling stub for NAME. */
7113 static struct elf_link_hash_entry *
7114 find_thumb_glue (struct bfd_link_info *link_info,
7115 const char *name,
7116 char **error_message)
7118 char *tmp_name;
7119 struct elf_link_hash_entry *hash;
7120 struct elf32_arm_link_hash_table *hash_table;
7122 /* We need a pointer to the armelf specific hash table. */
7123 hash_table = elf32_arm_hash_table (link_info);
7124 if (hash_table == NULL)
7125 return NULL;
7127 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7128 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7130 BFD_ASSERT (tmp_name);
7132 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7134 hash = elf_link_hash_lookup
7135 (&(hash_table)->root, tmp_name, false, false, true);
7137 if (hash == NULL)
7139 *error_message = bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7140 "Thumb", tmp_name, name);
7141 if (*error_message == NULL)
7142 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7145 free (tmp_name);
7147 return hash;
7150 /* Locate the ARM encoded calling stub for NAME. */
7152 static struct elf_link_hash_entry *
7153 find_arm_glue (struct bfd_link_info *link_info,
7154 const char *name,
7155 char **error_message)
7157 char *tmp_name;
7158 struct elf_link_hash_entry *myh;
7159 struct elf32_arm_link_hash_table *hash_table;
7161 /* We need a pointer to the elfarm specific hash table. */
7162 hash_table = elf32_arm_hash_table (link_info);
7163 if (hash_table == NULL)
7164 return NULL;
7166 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7167 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7168 BFD_ASSERT (tmp_name);
7170 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7172 myh = elf_link_hash_lookup
7173 (&(hash_table)->root, tmp_name, false, false, true);
7175 if (myh == NULL)
7177 *error_message = bfd_asprintf (_("unable to find %s glue '%s' for '%s'"),
7178 "ARM", tmp_name, name);
7179 if (*error_message == NULL)
7180 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7182 free (tmp_name);
7184 return myh;
7187 /* ARM->Thumb glue (static images):
7189 .arm
7190 __func_from_arm:
7191 ldr r12, __func_addr
7192 bx r12
7193 __func_addr:
7194 .word func @ behave as if you saw a ARM_32 reloc.
7196 (v5t static images)
7197 .arm
7198 __func_from_arm:
7199 ldr pc, __func_addr
7200 __func_addr:
7201 .word func @ behave as if you saw a ARM_32 reloc.
7203 (relocatable images)
7204 .arm
7205 __func_from_arm:
7206 ldr r12, __func_offset
7207 add r12, r12, pc
7208 bx r12
7209 __func_offset:
7210 .word func - . */
7212 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7213 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7214 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7215 static const insn32 a2t3_func_addr_insn = 0x00000001;
7217 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7218 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7219 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7221 #define ARM2THUMB_PIC_GLUE_SIZE 16
7222 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7223 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7224 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7226 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7228 .thumb .thumb
7229 .align 2 .align 2
7230 __func_from_thumb: __func_from_thumb:
7231 bx pc push {r6, lr}
7232 nop ldr r6, __func_addr
7233 .arm mov lr, pc
7234 b func bx r6
7235 .arm
7236 ;; back_to_thumb
7237 ldmia r13! {r6, lr}
7238 bx lr
7239 __func_addr:
7240 .word func */
7242 #define THUMB2ARM_GLUE_SIZE 8
7243 static const insn16 t2a1_bx_pc_insn = 0x4778;
7244 static const insn16 t2a2_noop_insn = 0x46c0;
7245 static const insn32 t2a3_b_insn = 0xea000000;
7247 #define VFP11_ERRATUM_VENEER_SIZE 8
7248 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7249 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7251 #define ARM_BX_VENEER_SIZE 12
7252 static const insn32 armbx1_tst_insn = 0xe3100001;
7253 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7254 static const insn32 armbx3_bx_insn = 0xe12fff10;
7256 #ifndef ELFARM_NABI_C_INCLUDED
7257 static void
7258 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7260 asection * s;
7261 bfd_byte * contents;
7263 if (size == 0)
7265 /* Do not include empty glue sections in the output. */
7266 if (abfd != NULL)
7268 s = bfd_get_linker_section (abfd, name);
7269 if (s != NULL)
7270 s->flags |= SEC_EXCLUDE;
7272 return;
7275 BFD_ASSERT (abfd != NULL);
7277 s = bfd_get_linker_section (abfd, name);
7278 BFD_ASSERT (s != NULL);
7280 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7282 BFD_ASSERT (s->size == size);
7283 s->contents = contents;
7286 bool
7287 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7289 struct elf32_arm_link_hash_table * globals;
7291 globals = elf32_arm_hash_table (info);
7292 BFD_ASSERT (globals != NULL);
7294 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7295 globals->arm_glue_size,
7296 ARM2THUMB_GLUE_SECTION_NAME);
7298 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7299 globals->thumb_glue_size,
7300 THUMB2ARM_GLUE_SECTION_NAME);
7302 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7303 globals->vfp11_erratum_glue_size,
7304 VFP11_ERRATUM_VENEER_SECTION_NAME);
7306 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7307 globals->stm32l4xx_erratum_glue_size,
7308 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7310 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7311 globals->bx_glue_size,
7312 ARM_BX_GLUE_SECTION_NAME);
7314 return true;
7317 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7318 returns the symbol identifying the stub. */
7320 static struct elf_link_hash_entry *
7321 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7322 struct elf_link_hash_entry * h)
7324 const char * name = h->root.root.string;
7325 asection * s;
7326 char * tmp_name;
7327 struct elf_link_hash_entry * myh;
7328 struct bfd_link_hash_entry * bh;
7329 struct elf32_arm_link_hash_table * globals;
7330 bfd_vma val;
7331 bfd_size_type size;
7333 globals = elf32_arm_hash_table (link_info);
7334 BFD_ASSERT (globals != NULL);
7335 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7337 s = bfd_get_linker_section
7338 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7340 BFD_ASSERT (s != NULL);
7342 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7343 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7344 BFD_ASSERT (tmp_name);
7346 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7348 myh = elf_link_hash_lookup
7349 (&(globals)->root, tmp_name, false, false, true);
7351 if (myh != NULL)
7353 /* We've already seen this guy. */
7354 free (tmp_name);
7355 return myh;
7358 /* The only trick here is using hash_table->arm_glue_size as the value.
7359 Even though the section isn't allocated yet, this is where we will be
7360 putting it. The +1 on the value marks that the stub has not been
7361 output yet - not that it is a Thumb function. */
7362 bh = NULL;
7363 val = globals->arm_glue_size + 1;
7364 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7365 tmp_name, BSF_GLOBAL, s, val,
7366 NULL, true, false, &bh);
7368 myh = (struct elf_link_hash_entry *) bh;
7369 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7370 myh->forced_local = 1;
7372 free (tmp_name);
7374 if (bfd_link_pic (link_info)
7375 || globals->pic_veneer)
7376 size = ARM2THUMB_PIC_GLUE_SIZE;
7377 else if (globals->use_blx)
7378 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7379 else
7380 size = ARM2THUMB_STATIC_GLUE_SIZE;
7382 s->size += size;
7383 globals->arm_glue_size += size;
7385 return myh;
7388 /* Allocate space for ARMv4 BX veneers. */
7390 static void
7391 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7393 asection * s;
7394 struct elf32_arm_link_hash_table *globals;
7395 char *tmp_name;
7396 struct elf_link_hash_entry *myh;
7397 struct bfd_link_hash_entry *bh;
7398 bfd_vma val;
7400 /* BX PC does not need a veneer. */
7401 if (reg == 15)
7402 return;
7404 globals = elf32_arm_hash_table (link_info);
7405 BFD_ASSERT (globals != NULL);
7406 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7408 /* Check if this veneer has already been allocated. */
7409 if (globals->bx_glue_offset[reg])
7410 return;
7412 s = bfd_get_linker_section
7413 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7415 BFD_ASSERT (s != NULL);
7417 /* Add symbol for veneer. */
7418 tmp_name = (char *)
7419 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7420 BFD_ASSERT (tmp_name);
7422 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7424 myh = elf_link_hash_lookup
7425 (&(globals)->root, tmp_name, false, false, false);
7427 BFD_ASSERT (myh == NULL);
7429 bh = NULL;
7430 val = globals->bx_glue_size;
7431 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7432 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7433 NULL, true, false, &bh);
7435 myh = (struct elf_link_hash_entry *) bh;
7436 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7437 myh->forced_local = 1;
7439 s->size += ARM_BX_VENEER_SIZE;
7440 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7441 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7445 /* Add an entry to the code/data map for section SEC. */
7447 static void
7448 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7450 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7451 unsigned int newidx;
7453 if (sec_data->map == NULL)
7455 sec_data->map = (elf32_arm_section_map *)
7456 bfd_malloc (sizeof (elf32_arm_section_map));
7457 sec_data->mapcount = 0;
7458 sec_data->mapsize = 1;
7461 newidx = sec_data->mapcount++;
7463 if (sec_data->mapcount > sec_data->mapsize)
7465 sec_data->mapsize *= 2;
7466 sec_data->map = (elf32_arm_section_map *)
7467 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7468 * sizeof (elf32_arm_section_map));
7471 if (sec_data->map)
7473 sec_data->map[newidx].vma = vma;
7474 sec_data->map[newidx].type = type;
7479 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7480 veneers are handled for now. */
7482 static bfd_vma
7483 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7484 elf32_vfp11_erratum_list *branch,
7485 bfd *branch_bfd,
7486 asection *branch_sec,
7487 unsigned int offset)
7489 asection *s;
7490 struct elf32_arm_link_hash_table *hash_table;
7491 char *tmp_name;
7492 struct elf_link_hash_entry *myh;
7493 struct bfd_link_hash_entry *bh;
7494 bfd_vma val;
7495 struct _arm_elf_section_data *sec_data;
7496 elf32_vfp11_erratum_list *newerr;
7498 hash_table = elf32_arm_hash_table (link_info);
7499 BFD_ASSERT (hash_table != NULL);
7500 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7502 s = bfd_get_linker_section
7503 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7505 sec_data = elf32_arm_section_data (s);
7507 BFD_ASSERT (s != NULL);
7509 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7510 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7511 BFD_ASSERT (tmp_name);
7513 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7514 hash_table->num_vfp11_fixes);
7516 myh = elf_link_hash_lookup
7517 (&(hash_table)->root, tmp_name, false, false, false);
7519 BFD_ASSERT (myh == NULL);
7521 bh = NULL;
7522 val = hash_table->vfp11_erratum_glue_size;
7523 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7524 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7525 NULL, true, false, &bh);
7527 myh = (struct elf_link_hash_entry *) bh;
7528 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7529 myh->forced_local = 1;
7531 /* Link veneer back to calling location. */
7532 sec_data->erratumcount += 1;
7533 newerr = (elf32_vfp11_erratum_list *)
7534 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7536 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7537 newerr->vma = -1;
7538 newerr->u.v.branch = branch;
7539 newerr->u.v.id = hash_table->num_vfp11_fixes;
7540 branch->u.b.veneer = newerr;
7542 newerr->next = sec_data->erratumlist;
7543 sec_data->erratumlist = newerr;
7545 /* A symbol for the return from the veneer. */
7546 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7547 hash_table->num_vfp11_fixes);
7549 myh = elf_link_hash_lookup
7550 (&(hash_table)->root, tmp_name, false, false, false);
7552 if (myh != NULL)
7553 abort ();
7555 bh = NULL;
7556 val = offset + 4;
7557 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7558 branch_sec, val, NULL, true, false, &bh);
7560 myh = (struct elf_link_hash_entry *) bh;
7561 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7562 myh->forced_local = 1;
7564 free (tmp_name);
7566 /* Generate a mapping symbol for the veneer section, and explicitly add an
7567 entry for that symbol to the code/data map for the section. */
7568 if (hash_table->vfp11_erratum_glue_size == 0)
7570 bh = NULL;
7571 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7572 ever requires this erratum fix. */
7573 _bfd_generic_link_add_one_symbol (link_info,
7574 hash_table->bfd_of_glue_owner, "$a",
7575 BSF_LOCAL, s, 0, NULL,
7576 true, false, &bh);
7578 myh = (struct elf_link_hash_entry *) bh;
7579 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7580 myh->forced_local = 1;
7582 /* The elf32_arm_init_maps function only cares about symbols from input
7583 BFDs. We must make a note of this generated mapping symbol
7584 ourselves so that code byteswapping works properly in
7585 elf32_arm_write_section. */
7586 elf32_arm_section_map_add (s, 'a', 0);
7589 s->size += VFP11_ERRATUM_VENEER_SIZE;
7590 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7591 hash_table->num_vfp11_fixes++;
7593 /* The offset of the veneer. */
7594 return val;
7597 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7598 veneers need to be handled because used only in Cortex-M. */
7600 static bfd_vma
7601 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7602 elf32_stm32l4xx_erratum_list *branch,
7603 bfd *branch_bfd,
7604 asection *branch_sec,
7605 unsigned int offset,
7606 bfd_size_type veneer_size)
7608 asection *s;
7609 struct elf32_arm_link_hash_table *hash_table;
7610 char *tmp_name;
7611 struct elf_link_hash_entry *myh;
7612 struct bfd_link_hash_entry *bh;
7613 bfd_vma val;
7614 struct _arm_elf_section_data *sec_data;
7615 elf32_stm32l4xx_erratum_list *newerr;
7617 hash_table = elf32_arm_hash_table (link_info);
7618 BFD_ASSERT (hash_table != NULL);
7619 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7621 s = bfd_get_linker_section
7622 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7624 BFD_ASSERT (s != NULL);
7626 sec_data = elf32_arm_section_data (s);
7628 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7629 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7630 BFD_ASSERT (tmp_name);
7632 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7633 hash_table->num_stm32l4xx_fixes);
7635 myh = elf_link_hash_lookup
7636 (&(hash_table)->root, tmp_name, false, false, false);
7638 BFD_ASSERT (myh == NULL);
7640 bh = NULL;
7641 val = hash_table->stm32l4xx_erratum_glue_size;
7642 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7643 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7644 NULL, true, false, &bh);
7646 myh = (struct elf_link_hash_entry *) bh;
7647 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7648 myh->forced_local = 1;
7650 /* Link veneer back to calling location. */
7651 sec_data->stm32l4xx_erratumcount += 1;
7652 newerr = (elf32_stm32l4xx_erratum_list *)
7653 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7655 newerr->type = STM32L4XX_ERRATUM_VENEER;
7656 newerr->vma = -1;
7657 newerr->u.v.branch = branch;
7658 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7659 branch->u.b.veneer = newerr;
7661 newerr->next = sec_data->stm32l4xx_erratumlist;
7662 sec_data->stm32l4xx_erratumlist = newerr;
7664 /* A symbol for the return from the veneer. */
7665 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7666 hash_table->num_stm32l4xx_fixes);
7668 myh = elf_link_hash_lookup
7669 (&(hash_table)->root, tmp_name, false, false, false);
7671 if (myh != NULL)
7672 abort ();
7674 bh = NULL;
7675 val = offset + 4;
7676 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7677 branch_sec, val, NULL, true, false, &bh);
7679 myh = (struct elf_link_hash_entry *) bh;
7680 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7681 myh->forced_local = 1;
7683 free (tmp_name);
7685 /* Generate a mapping symbol for the veneer section, and explicitly add an
7686 entry for that symbol to the code/data map for the section. */
7687 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7689 bh = NULL;
7690 /* Creates a THUMB symbol since there is no other choice. */
7691 _bfd_generic_link_add_one_symbol (link_info,
7692 hash_table->bfd_of_glue_owner, "$t",
7693 BSF_LOCAL, s, 0, NULL,
7694 true, false, &bh);
7696 myh = (struct elf_link_hash_entry *) bh;
7697 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7698 myh->forced_local = 1;
7700 /* The elf32_arm_init_maps function only cares about symbols from input
7701 BFDs. We must make a note of this generated mapping symbol
7702 ourselves so that code byteswapping works properly in
7703 elf32_arm_write_section. */
7704 elf32_arm_section_map_add (s, 't', 0);
7707 s->size += veneer_size;
7708 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7709 hash_table->num_stm32l4xx_fixes++;
7711 /* The offset of the veneer. */
7712 return val;
7715 #define ARM_GLUE_SECTION_FLAGS \
7716 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7717 | SEC_READONLY | SEC_LINKER_CREATED)
7719 /* Create a fake section for use by the ARM backend of the linker. */
7721 static bool
7722 arm_make_glue_section (bfd * abfd, const char * name)
7724 asection * sec;
7726 sec = bfd_get_linker_section (abfd, name);
7727 if (sec != NULL)
7728 /* Already made. */
7729 return true;
7731 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7733 if (sec == NULL
7734 || !bfd_set_section_alignment (sec, 2))
7735 return false;
7737 /* Set the gc mark to prevent the section from being removed by garbage
7738 collection, despite the fact that no relocs refer to this section. */
7739 sec->gc_mark = 1;
7741 return true;
7744 /* Set size of .plt entries. This function is called from the
7745 linker scripts in ld/emultempl/{armelf}.em. */
7747 void
7748 bfd_elf32_arm_use_long_plt (void)
7750 elf32_arm_use_long_plt_entry = true;
7753 /* Add the glue sections to ABFD. This function is called from the
7754 linker scripts in ld/emultempl/{armelf}.em. */
7756 bool
7757 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7758 struct bfd_link_info *info)
7760 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7761 bool dostm32l4xx = globals
7762 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7763 bool addglue;
7765 /* If we are only performing a partial
7766 link do not bother adding the glue. */
7767 if (bfd_link_relocatable (info))
7768 return true;
7770 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7771 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7772 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7773 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7775 if (!dostm32l4xx)
7776 return addglue;
7778 return addglue
7779 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7782 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7783 ensures they are not marked for deletion by
7784 strip_excluded_output_sections () when veneers are going to be created
7785 later. Not doing so would trigger assert on empty section size in
7786 lang_size_sections_1 (). */
7788 void
7789 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7791 enum elf32_arm_stub_type stub_type;
7793 /* If we are only performing a partial
7794 link do not bother adding the glue. */
7795 if (bfd_link_relocatable (info))
7796 return;
7798 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7800 asection *out_sec;
7801 const char *out_sec_name;
7803 if (!arm_dedicated_stub_output_section_required (stub_type))
7804 continue;
7806 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7807 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7808 if (out_sec != NULL)
7809 out_sec->flags |= SEC_KEEP;
7813 /* Select a BFD to be used to hold the sections used by the glue code.
7814 This function is called from the linker scripts in ld/emultempl/
7815 {armelf/pe}.em. */
7817 bool
7818 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7820 struct elf32_arm_link_hash_table *globals;
7822 /* If we are only performing a partial link
7823 do not bother getting a bfd to hold the glue. */
7824 if (bfd_link_relocatable (info))
7825 return true;
7827 /* Make sure we don't attach the glue sections to a dynamic object. */
7828 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7830 globals = elf32_arm_hash_table (info);
7831 BFD_ASSERT (globals != NULL);
7833 if (globals->bfd_of_glue_owner != NULL)
7834 return true;
7836 /* Save the bfd for later use. */
7837 globals->bfd_of_glue_owner = abfd;
7839 return true;
7842 static void
7843 check_use_blx (struct elf32_arm_link_hash_table *globals)
7845 int cpu_arch;
7847 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7848 Tag_CPU_arch);
7850 if (globals->fix_arm1176)
7852 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7853 globals->use_blx = 1;
7855 else
7857 if (cpu_arch > TAG_CPU_ARCH_V4T)
7858 globals->use_blx = 1;
7862 bool
7863 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7864 struct bfd_link_info *link_info)
7866 Elf_Internal_Shdr *symtab_hdr;
7867 Elf_Internal_Rela *internal_relocs = NULL;
7868 Elf_Internal_Rela *irel, *irelend;
7869 bfd_byte *contents = NULL;
7871 asection *sec;
7872 struct elf32_arm_link_hash_table *globals;
7874 /* If we are only performing a partial link do not bother
7875 to construct any glue. */
7876 if (bfd_link_relocatable (link_info))
7877 return true;
7879 /* Here we have a bfd that is to be included on the link. We have a
7880 hook to do reloc rummaging, before section sizes are nailed down. */
7881 globals = elf32_arm_hash_table (link_info);
7882 BFD_ASSERT (globals != NULL);
7884 check_use_blx (globals);
7886 if (globals->byteswap_code && !bfd_big_endian (abfd))
7888 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7889 abfd);
7890 return false;
7893 /* PR 5398: If we have not decided to include any loadable sections in
7894 the output then we will not have a glue owner bfd. This is OK, it
7895 just means that there is nothing else for us to do here. */
7896 if (globals->bfd_of_glue_owner == NULL)
7897 return true;
7899 /* Rummage around all the relocs and map the glue vectors. */
7900 sec = abfd->sections;
7902 if (sec == NULL)
7903 return true;
7905 for (; sec != NULL; sec = sec->next)
7907 if (sec->reloc_count == 0)
7908 continue;
7910 if ((sec->flags & SEC_EXCLUDE) != 0
7911 || (sec->flags & SEC_HAS_CONTENTS) == 0)
7912 continue;
7914 symtab_hdr = & elf_symtab_hdr (abfd);
7916 /* Load the relocs. */
7917 internal_relocs
7918 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7920 if (internal_relocs == NULL)
7921 goto error_return;
7923 irelend = internal_relocs + sec->reloc_count;
7924 for (irel = internal_relocs; irel < irelend; irel++)
7926 long r_type;
7927 unsigned long r_index;
7929 struct elf_link_hash_entry *h;
7931 r_type = ELF32_R_TYPE (irel->r_info);
7932 r_index = ELF32_R_SYM (irel->r_info);
7934 /* These are the only relocation types we care about. */
7935 if ( r_type != R_ARM_PC24
7936 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7937 continue;
7939 /* Get the section contents if we haven't done so already. */
7940 if (contents == NULL)
7942 /* Get cached copy if it exists. */
7943 if (elf_section_data (sec)->this_hdr.contents != NULL)
7944 contents = elf_section_data (sec)->this_hdr.contents;
7945 else
7947 /* Go get them off disk. */
7948 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7949 goto error_return;
7953 if (r_type == R_ARM_V4BX)
7955 int reg;
7957 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7958 record_arm_bx_glue (link_info, reg);
7959 continue;
7962 /* If the relocation is not against a symbol it cannot concern us. */
7963 h = NULL;
7965 /* We don't care about local symbols. */
7966 if (r_index < symtab_hdr->sh_info)
7967 continue;
7969 /* This is an external symbol. */
7970 r_index -= symtab_hdr->sh_info;
7971 h = (struct elf_link_hash_entry *)
7972 elf_sym_hashes (abfd)[r_index];
7974 /* If the relocation is against a static symbol it must be within
7975 the current section and so cannot be a cross ARM/Thumb relocation. */
7976 if (h == NULL)
7977 continue;
7979 /* If the call will go through a PLT entry then we do not need
7980 glue. */
7981 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7982 continue;
7984 switch (r_type)
7986 case R_ARM_PC24:
7987 /* This one is a call from arm code. We need to look up
7988 the target of the call. If it is a thumb target, we
7989 insert glue. */
7990 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7991 == ST_BRANCH_TO_THUMB)
7992 record_arm_to_thumb_glue (link_info, h);
7993 break;
7995 default:
7996 abort ();
8000 if (elf_section_data (sec)->this_hdr.contents != contents)
8001 free (contents);
8002 contents = NULL;
8004 if (elf_section_data (sec)->relocs != internal_relocs)
8005 free (internal_relocs);
8006 internal_relocs = NULL;
8009 return true;
8011 error_return:
8012 if (elf_section_data (sec)->this_hdr.contents != contents)
8013 free (contents);
8014 if (elf_section_data (sec)->relocs != internal_relocs)
8015 free (internal_relocs);
8017 return false;
8019 #endif
8022 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8024 void
8025 bfd_elf32_arm_init_maps (bfd *abfd)
8027 Elf_Internal_Sym *isymbuf;
8028 Elf_Internal_Shdr *hdr;
8029 unsigned int i, localsyms;
8031 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8032 if (! is_arm_elf (abfd))
8033 return;
8035 if ((abfd->flags & DYNAMIC) != 0)
8036 return;
8038 hdr = & elf_symtab_hdr (abfd);
8039 localsyms = hdr->sh_info;
8041 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8042 should contain the number of local symbols, which should come before any
8043 global symbols. Mapping symbols are always local. */
8044 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8045 NULL);
8047 /* No internal symbols read? Skip this BFD. */
8048 if (isymbuf == NULL)
8049 return;
8051 for (i = 0; i < localsyms; i++)
8053 Elf_Internal_Sym *isym = &isymbuf[i];
8054 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8055 const char *name;
8057 if (sec != NULL
8058 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8060 name = bfd_elf_string_from_elf_section (abfd,
8061 hdr->sh_link, isym->st_name);
8063 if (bfd_is_arm_special_symbol_name (name,
8064 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8065 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8071 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8072 say what they wanted. */
8074 void
8075 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8077 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8078 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8080 if (globals == NULL)
8081 return;
8083 if (globals->fix_cortex_a8 == -1)
8085 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8086 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8087 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8088 || out_attr[Tag_CPU_arch_profile].i == 0))
8089 globals->fix_cortex_a8 = 1;
8090 else
8091 globals->fix_cortex_a8 = 0;
8096 void
8097 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8099 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8100 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8102 if (globals == NULL)
8103 return;
8104 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8105 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8107 switch (globals->vfp11_fix)
8109 case BFD_ARM_VFP11_FIX_DEFAULT:
8110 case BFD_ARM_VFP11_FIX_NONE:
8111 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8112 break;
8114 default:
8115 /* Give a warning, but do as the user requests anyway. */
8116 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8117 "workaround is not necessary for target architecture"), obfd);
8120 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8121 /* For earlier architectures, we might need the workaround, but do not
8122 enable it by default. If users is running with broken hardware, they
8123 must enable the erratum fix explicitly. */
8124 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8127 void
8128 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8130 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8131 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8133 if (globals == NULL)
8134 return;
8136 /* We assume only Cortex-M4 may require the fix. */
8137 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8138 || out_attr[Tag_CPU_arch_profile].i != 'M')
8140 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8141 /* Give a warning, but do as the user requests anyway. */
8142 _bfd_error_handler
8143 (_("%pB: warning: selected STM32L4XX erratum "
8144 "workaround is not necessary for target architecture"), obfd);
8148 enum bfd_arm_vfp11_pipe
8150 VFP11_FMAC,
8151 VFP11_LS,
8152 VFP11_DS,
8153 VFP11_BAD
8156 /* Return a VFP register number. This is encoded as RX:X for single-precision
8157 registers, or X:RX for double-precision registers, where RX is the group of
8158 four bits in the instruction encoding and X is the single extension bit.
8159 RX and X fields are specified using their lowest (starting) bit. The return
8160 value is:
8162 0...31: single-precision registers s0...s31
8163 32...63: double-precision registers d0...d31.
8165 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8166 encounter VFP3 instructions, so we allow the full range for DP registers. */
8168 static unsigned int
8169 bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8170 unsigned int x)
8172 if (is_double)
8173 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8174 else
8175 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8178 /* Set bits in *WMASK according to a register number REG as encoded by
8179 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8181 static void
8182 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8184 if (reg < 32)
8185 *wmask |= 1 << reg;
8186 else if (reg < 48)
8187 *wmask |= 3 << ((reg - 32) * 2);
8190 /* Return TRUE if WMASK overwrites anything in REGS. */
8192 static bool
8193 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8195 int i;
8197 for (i = 0; i < numregs; i++)
8199 unsigned int reg = regs[i];
8201 if (reg < 32 && (wmask & (1 << reg)) != 0)
8202 return true;
8204 reg -= 32;
8206 if (reg >= 16)
8207 continue;
8209 if ((wmask & (3 << (reg * 2))) != 0)
8210 return true;
8213 return false;
8216 /* In this function, we're interested in two things: finding input registers
8217 for VFP data-processing instructions, and finding the set of registers which
8218 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8219 hold the written set, so FLDM etc. are easy to deal with (we're only
8220 interested in 32 SP registers or 16 dp registers, due to the VFP version
8221 implemented by the chip in question). DP registers are marked by setting
8222 both SP registers in the write mask). */
8224 static enum bfd_arm_vfp11_pipe
8225 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8226 int *numregs)
8228 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8229 bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8231 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8233 unsigned int pqrs;
8234 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8235 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8237 pqrs = ((insn & 0x00800000) >> 20)
8238 | ((insn & 0x00300000) >> 19)
8239 | ((insn & 0x00000040) >> 6);
8241 switch (pqrs)
8243 case 0: /* fmac[sd]. */
8244 case 1: /* fnmac[sd]. */
8245 case 2: /* fmsc[sd]. */
8246 case 3: /* fnmsc[sd]. */
8247 vpipe = VFP11_FMAC;
8248 bfd_arm_vfp11_write_mask (destmask, fd);
8249 regs[0] = fd;
8250 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8251 regs[2] = fm;
8252 *numregs = 3;
8253 break;
8255 case 4: /* fmul[sd]. */
8256 case 5: /* fnmul[sd]. */
8257 case 6: /* fadd[sd]. */
8258 case 7: /* fsub[sd]. */
8259 vpipe = VFP11_FMAC;
8260 goto vfp_binop;
8262 case 8: /* fdiv[sd]. */
8263 vpipe = VFP11_DS;
8264 vfp_binop:
8265 bfd_arm_vfp11_write_mask (destmask, fd);
8266 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8267 regs[1] = fm;
8268 *numregs = 2;
8269 break;
8271 case 15: /* extended opcode. */
8273 unsigned int extn = ((insn >> 15) & 0x1e)
8274 | ((insn >> 7) & 1);
8276 switch (extn)
8278 case 0: /* fcpy[sd]. */
8279 case 1: /* fabs[sd]. */
8280 case 2: /* fneg[sd]. */
8281 case 8: /* fcmp[sd]. */
8282 case 9: /* fcmpe[sd]. */
8283 case 10: /* fcmpz[sd]. */
8284 case 11: /* fcmpez[sd]. */
8285 case 16: /* fuito[sd]. */
8286 case 17: /* fsito[sd]. */
8287 case 24: /* ftoui[sd]. */
8288 case 25: /* ftouiz[sd]. */
8289 case 26: /* ftosi[sd]. */
8290 case 27: /* ftosiz[sd]. */
8291 /* These instructions will not bounce due to underflow. */
8292 *numregs = 0;
8293 vpipe = VFP11_FMAC;
8294 break;
8296 case 3: /* fsqrt[sd]. */
8297 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8298 registers to cause the erratum in previous instructions. */
8299 bfd_arm_vfp11_write_mask (destmask, fd);
8300 vpipe = VFP11_DS;
8301 break;
8303 case 15: /* fcvt{ds,sd}. */
8305 int rnum = 0;
8307 bfd_arm_vfp11_write_mask (destmask, fd);
8309 /* Only FCVTSD can underflow. */
8310 if ((insn & 0x100) != 0)
8311 regs[rnum++] = fm;
8313 *numregs = rnum;
8315 vpipe = VFP11_FMAC;
8317 break;
8319 default:
8320 return VFP11_BAD;
8323 break;
8325 default:
8326 return VFP11_BAD;
8329 /* Two-register transfer. */
8330 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8332 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8334 if ((insn & 0x100000) == 0)
8336 if (is_double)
8337 bfd_arm_vfp11_write_mask (destmask, fm);
8338 else
8340 bfd_arm_vfp11_write_mask (destmask, fm);
8341 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8345 vpipe = VFP11_LS;
8347 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8349 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8350 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8352 switch (puw)
8354 case 0: /* Two-reg transfer. We should catch these above. */
8355 abort ();
8357 case 2: /* fldm[sdx]. */
8358 case 3:
8359 case 5:
8361 unsigned int i, offset = insn & 0xff;
8363 if (is_double)
8364 offset >>= 1;
8366 for (i = fd; i < fd + offset; i++)
8367 bfd_arm_vfp11_write_mask (destmask, i);
8369 break;
8371 case 4: /* fld[sd]. */
8372 case 6:
8373 bfd_arm_vfp11_write_mask (destmask, fd);
8374 break;
8376 default:
8377 return VFP11_BAD;
8380 vpipe = VFP11_LS;
8382 /* Single-register transfer. Note L==0. */
8383 else if ((insn & 0x0f100e10) == 0x0e000a10)
8385 unsigned int opcode = (insn >> 21) & 7;
8386 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8388 switch (opcode)
8390 case 0: /* fmsr/fmdlr. */
8391 case 1: /* fmdhr. */
8392 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8393 destination register. I don't know if this is exactly right,
8394 but it is the conservative choice. */
8395 bfd_arm_vfp11_write_mask (destmask, fn);
8396 break;
8398 case 7: /* fmxr. */
8399 break;
8402 vpipe = VFP11_LS;
8405 return vpipe;
8409 static int elf32_arm_compare_mapping (const void * a, const void * b);
8412 /* Look for potentially-troublesome code sequences which might trigger the
8413 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8414 (available from ARM) for details of the erratum. A short version is
8415 described in ld.texinfo. */
8417 bool
8418 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8420 asection *sec;
8421 bfd_byte *contents = NULL;
8422 int state = 0;
8423 int regs[3], numregs = 0;
8424 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8425 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8427 if (globals == NULL)
8428 return false;
8430 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8431 The states transition as follows:
8433 0 -> 1 (vector) or 0 -> 2 (scalar)
8434 A VFP FMAC-pipeline instruction has been seen. Fill
8435 regs[0]..regs[numregs-1] with its input operands. Remember this
8436 instruction in 'first_fmac'.
8438 1 -> 2
8439 Any instruction, except for a VFP instruction which overwrites
8440 regs[*].
8442 1 -> 3 [ -> 0 ] or
8443 2 -> 3 [ -> 0 ]
8444 A VFP instruction has been seen which overwrites any of regs[*].
8445 We must make a veneer! Reset state to 0 before examining next
8446 instruction.
8448 2 -> 0
8449 If we fail to match anything in state 2, reset to state 0 and reset
8450 the instruction pointer to the instruction after 'first_fmac'.
8452 If the VFP11 vector mode is in use, there must be at least two unrelated
8453 instructions between anti-dependent VFP11 instructions to properly avoid
8454 triggering the erratum, hence the use of the extra state 1. */
8456 /* If we are only performing a partial link do not bother
8457 to construct any glue. */
8458 if (bfd_link_relocatable (link_info))
8459 return true;
8461 /* Skip if this bfd does not correspond to an ELF image. */
8462 if (! is_arm_elf (abfd))
8463 return true;
8465 /* We should have chosen a fix type by the time we get here. */
8466 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8468 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8469 return true;
8471 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8472 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8473 return true;
8475 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8477 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8478 struct _arm_elf_section_data *sec_data;
8480 /* If we don't have executable progbits, we're not interested in this
8481 section. Also skip if section is to be excluded. */
8482 if (elf_section_type (sec) != SHT_PROGBITS
8483 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8484 || (sec->flags & SEC_EXCLUDE) != 0
8485 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8486 || sec->output_section == bfd_abs_section_ptr
8487 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8488 continue;
8490 sec_data = elf32_arm_section_data (sec);
8492 if (sec_data->mapcount == 0)
8493 continue;
8495 if (elf_section_data (sec)->this_hdr.contents != NULL)
8496 contents = elf_section_data (sec)->this_hdr.contents;
8497 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8498 goto error_return;
8500 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8501 elf32_arm_compare_mapping);
8503 for (span = 0; span < sec_data->mapcount; span++)
8505 unsigned int span_start = sec_data->map[span].vma;
8506 unsigned int span_end = (span == sec_data->mapcount - 1)
8507 ? sec->size : sec_data->map[span + 1].vma;
8508 char span_type = sec_data->map[span].type;
8510 /* FIXME: Only ARM mode is supported at present. We may need to
8511 support Thumb-2 mode also at some point. */
8512 if (span_type != 'a')
8513 continue;
8515 for (i = span_start; i < span_end;)
8517 unsigned int next_i = i + 4;
8518 unsigned int insn = bfd_big_endian (abfd)
8519 ? (((unsigned) contents[i] << 24)
8520 | (contents[i + 1] << 16)
8521 | (contents[i + 2] << 8)
8522 | contents[i + 3])
8523 : (((unsigned) contents[i + 3] << 24)
8524 | (contents[i + 2] << 16)
8525 | (contents[i + 1] << 8)
8526 | contents[i]);
8527 unsigned int writemask = 0;
8528 enum bfd_arm_vfp11_pipe vpipe;
8530 switch (state)
8532 case 0:
8533 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8534 &numregs);
8535 /* I'm assuming the VFP11 erratum can trigger with denorm
8536 operands on either the FMAC or the DS pipeline. This might
8537 lead to slightly overenthusiastic veneer insertion. */
8538 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8540 state = use_vector ? 1 : 2;
8541 first_fmac = i;
8542 veneer_of_insn = insn;
8544 break;
8546 case 1:
8548 int other_regs[3], other_numregs;
8549 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8550 other_regs,
8551 &other_numregs);
8552 if (vpipe != VFP11_BAD
8553 && bfd_arm_vfp11_antidependency (writemask, regs,
8554 numregs))
8555 state = 3;
8556 else
8557 state = 2;
8559 break;
8561 case 2:
8563 int other_regs[3], other_numregs;
8564 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8565 other_regs,
8566 &other_numregs);
8567 if (vpipe != VFP11_BAD
8568 && bfd_arm_vfp11_antidependency (writemask, regs,
8569 numregs))
8570 state = 3;
8571 else
8573 state = 0;
8574 next_i = first_fmac + 4;
8577 break;
8579 case 3:
8580 abort (); /* Should be unreachable. */
8583 if (state == 3)
8585 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8586 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8588 elf32_arm_section_data (sec)->erratumcount += 1;
8590 newerr->u.b.vfp_insn = veneer_of_insn;
8592 switch (span_type)
8594 case 'a':
8595 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8596 break;
8598 default:
8599 abort ();
8602 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8603 first_fmac);
8605 newerr->vma = -1;
8607 newerr->next = sec_data->erratumlist;
8608 sec_data->erratumlist = newerr;
8610 state = 0;
8613 i = next_i;
8617 if (elf_section_data (sec)->this_hdr.contents != contents)
8618 free (contents);
8619 contents = NULL;
8622 return true;
8624 error_return:
8625 if (elf_section_data (sec)->this_hdr.contents != contents)
8626 free (contents);
8628 return false;
8631 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8632 after sections have been laid out, using specially-named symbols. */
8634 void
8635 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8636 struct bfd_link_info *link_info)
8638 asection *sec;
8639 struct elf32_arm_link_hash_table *globals;
8640 char *tmp_name;
8642 if (bfd_link_relocatable (link_info))
8643 return;
8645 /* Skip if this bfd does not correspond to an ELF image. */
8646 if (! is_arm_elf (abfd))
8647 return;
8649 globals = elf32_arm_hash_table (link_info);
8650 if (globals == NULL)
8651 return;
8653 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8654 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8655 BFD_ASSERT (tmp_name);
8657 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8659 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8660 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8662 for (; errnode != NULL; errnode = errnode->next)
8664 struct elf_link_hash_entry *myh;
8665 bfd_vma vma;
8667 switch (errnode->type)
8669 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8670 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8671 /* Find veneer symbol. */
8672 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8673 errnode->u.b.veneer->u.v.id);
8675 myh = elf_link_hash_lookup
8676 (&(globals)->root, tmp_name, false, false, true);
8678 if (myh == NULL)
8679 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8680 abfd, "VFP11", tmp_name);
8682 vma = myh->root.u.def.section->output_section->vma
8683 + myh->root.u.def.section->output_offset
8684 + myh->root.u.def.value;
8686 errnode->u.b.veneer->vma = vma;
8687 break;
8689 case VFP11_ERRATUM_ARM_VENEER:
8690 case VFP11_ERRATUM_THUMB_VENEER:
8691 /* Find return location. */
8692 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8693 errnode->u.v.id);
8695 myh = elf_link_hash_lookup
8696 (&(globals)->root, tmp_name, false, false, true);
8698 if (myh == NULL)
8699 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8700 abfd, "VFP11", tmp_name);
8702 vma = myh->root.u.def.section->output_section->vma
8703 + myh->root.u.def.section->output_offset
8704 + myh->root.u.def.value;
8706 errnode->u.v.branch->vma = vma;
8707 break;
8709 default:
8710 abort ();
8715 free (tmp_name);
8718 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8719 return locations after sections have been laid out, using
8720 specially-named symbols. */
8722 void
8723 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8724 struct bfd_link_info *link_info)
8726 asection *sec;
8727 struct elf32_arm_link_hash_table *globals;
8728 char *tmp_name;
8730 if (bfd_link_relocatable (link_info))
8731 return;
8733 /* Skip if this bfd does not correspond to an ELF image. */
8734 if (! is_arm_elf (abfd))
8735 return;
8737 globals = elf32_arm_hash_table (link_info);
8738 if (globals == NULL)
8739 return;
8741 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8742 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8743 BFD_ASSERT (tmp_name);
8745 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8747 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8748 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8750 for (; errnode != NULL; errnode = errnode->next)
8752 struct elf_link_hash_entry *myh;
8753 bfd_vma vma;
8755 switch (errnode->type)
8757 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8758 /* Find veneer symbol. */
8759 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8760 errnode->u.b.veneer->u.v.id);
8762 myh = elf_link_hash_lookup
8763 (&(globals)->root, tmp_name, false, false, true);
8765 if (myh == NULL)
8766 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8767 abfd, "STM32L4XX", tmp_name);
8769 vma = myh->root.u.def.section->output_section->vma
8770 + myh->root.u.def.section->output_offset
8771 + myh->root.u.def.value;
8773 errnode->u.b.veneer->vma = vma;
8774 break;
8776 case STM32L4XX_ERRATUM_VENEER:
8777 /* Find return location. */
8778 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8779 errnode->u.v.id);
8781 myh = elf_link_hash_lookup
8782 (&(globals)->root, tmp_name, false, false, true);
8784 if (myh == NULL)
8785 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8786 abfd, "STM32L4XX", tmp_name);
8788 vma = myh->root.u.def.section->output_section->vma
8789 + myh->root.u.def.section->output_offset
8790 + myh->root.u.def.value;
8792 errnode->u.v.branch->vma = vma;
8793 break;
8795 default:
8796 abort ();
8801 free (tmp_name);
8804 static inline bool
8805 is_thumb2_ldmia (const insn32 insn)
8807 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8808 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8809 return (insn & 0xffd02000) == 0xe8900000;
8812 static inline bool
8813 is_thumb2_ldmdb (const insn32 insn)
8815 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8816 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8817 return (insn & 0xffd02000) == 0xe9100000;
8820 static inline bool
8821 is_thumb2_vldm (const insn32 insn)
8823 /* A6.5 Extension register load or store instruction
8824 A7.7.229
8825 We look for SP 32-bit and DP 64-bit registers.
8826 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8827 <list> is consecutive 64-bit registers
8828 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8829 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8830 <list> is consecutive 32-bit registers
8831 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8832 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8833 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8834 return
8835 (((insn & 0xfe100f00) == 0xec100b00) ||
8836 ((insn & 0xfe100f00) == 0xec100a00))
8837 && /* (IA without !). */
8838 (((((insn << 7) >> 28) & 0xd) == 0x4)
8839 /* (IA with !), includes VPOP (when reg number is SP). */
8840 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8841 /* (DB with !). */
8842 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8845 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8846 VLDM opcode and:
8847 - computes the number and the mode of memory accesses
8848 - decides if the replacement should be done:
8849 . replaces only if > 8-word accesses
8850 . or (testing purposes only) replaces all accesses. */
8852 static bool
8853 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8854 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8856 int nb_words = 0;
8858 /* The field encoding the register list is the same for both LDMIA
8859 and LDMDB encodings. */
8860 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8861 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8862 else if (is_thumb2_vldm (insn))
8863 nb_words = (insn & 0xff);
8865 /* DEFAULT mode accounts for the real bug condition situation,
8866 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8867 return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8868 ? nb_words > 8
8869 : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8872 /* Look for potentially-troublesome code sequences which might trigger
8873 the STM STM32L4XX erratum. */
8875 bool
8876 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8877 struct bfd_link_info *link_info)
8879 asection *sec;
8880 bfd_byte *contents = NULL;
8881 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8883 if (globals == NULL)
8884 return false;
8886 /* If we are only performing a partial link do not bother
8887 to construct any glue. */
8888 if (bfd_link_relocatable (link_info))
8889 return true;
8891 /* Skip if this bfd does not correspond to an ELF image. */
8892 if (! is_arm_elf (abfd))
8893 return true;
8895 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8896 return true;
8898 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8899 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8900 return true;
8902 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8904 unsigned int i, span;
8905 struct _arm_elf_section_data *sec_data;
8907 /* If we don't have executable progbits, we're not interested in this
8908 section. Also skip if section is to be excluded. */
8909 if (elf_section_type (sec) != SHT_PROGBITS
8910 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8911 || (sec->flags & SEC_EXCLUDE) != 0
8912 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8913 || sec->output_section == bfd_abs_section_ptr
8914 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8915 continue;
8917 sec_data = elf32_arm_section_data (sec);
8919 if (sec_data->mapcount == 0)
8920 continue;
8922 if (elf_section_data (sec)->this_hdr.contents != NULL)
8923 contents = elf_section_data (sec)->this_hdr.contents;
8924 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8925 goto error_return;
8927 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8928 elf32_arm_compare_mapping);
8930 for (span = 0; span < sec_data->mapcount; span++)
8932 unsigned int span_start = sec_data->map[span].vma;
8933 unsigned int span_end = (span == sec_data->mapcount - 1)
8934 ? sec->size : sec_data->map[span + 1].vma;
8935 char span_type = sec_data->map[span].type;
8936 int itblock_current_pos = 0;
8938 /* Only Thumb2 mode need be supported with this CM4 specific
8939 code, we should not encounter any arm mode eg span_type
8940 != 'a'. */
8941 if (span_type != 't')
8942 continue;
8944 for (i = span_start; i < span_end;)
8946 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8947 bool insn_32bit = false;
8948 bool is_ldm = false;
8949 bool is_vldm = false;
8950 bool is_not_last_in_it_block = false;
8952 /* The first 16-bits of all 32-bit thumb2 instructions start
8953 with opcode[15..13]=0b111 and the encoded op1 can be anything
8954 except opcode[12..11]!=0b00.
8955 See 32-bit Thumb instruction encoding. */
8956 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8957 insn_32bit = true;
8959 /* Compute the predicate that tells if the instruction
8960 is concerned by the IT block
8961 - Creates an error if there is a ldm that is not
8962 last in the IT block thus cannot be replaced
8963 - Otherwise we can create a branch at the end of the
8964 IT block, it will be controlled naturally by IT
8965 with the proper pseudo-predicate
8966 - So the only interesting predicate is the one that
8967 tells that we are not on the last item of an IT
8968 block. */
8969 if (itblock_current_pos != 0)
8970 is_not_last_in_it_block = !!--itblock_current_pos;
8972 if (insn_32bit)
8974 /* Load the rest of the insn (in manual-friendly order). */
8975 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8976 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8977 is_vldm = is_thumb2_vldm (insn);
8979 /* Veneers are created for (v)ldm depending on
8980 option flags and memory accesses conditions; but
8981 if the instruction is not the last instruction of
8982 an IT block, we cannot create a jump there, so we
8983 bail out. */
8984 if ((is_ldm || is_vldm)
8985 && stm32l4xx_need_create_replacing_stub
8986 (insn, globals->stm32l4xx_fix))
8988 if (is_not_last_in_it_block)
8990 _bfd_error_handler
8991 /* xgettext:c-format */
8992 (_("%pB(%pA+%#x): error: multiple load detected"
8993 " in non-last IT block instruction:"
8994 " STM32L4XX veneer cannot be generated; "
8995 "use gcc option -mrestrict-it to generate"
8996 " only one instruction per IT block"),
8997 abfd, sec, i);
8999 else
9001 elf32_stm32l4xx_erratum_list *newerr =
9002 (elf32_stm32l4xx_erratum_list *)
9003 bfd_zmalloc
9004 (sizeof (elf32_stm32l4xx_erratum_list));
9006 elf32_arm_section_data (sec)
9007 ->stm32l4xx_erratumcount += 1;
9008 newerr->u.b.insn = insn;
9009 /* We create only thumb branches. */
9010 newerr->type =
9011 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
9012 record_stm32l4xx_erratum_veneer
9013 (link_info, newerr, abfd, sec,
9015 is_ldm ?
9016 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
9017 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
9018 newerr->vma = -1;
9019 newerr->next = sec_data->stm32l4xx_erratumlist;
9020 sec_data->stm32l4xx_erratumlist = newerr;
9024 else
9026 /* A7.7.37 IT p208
9027 IT blocks are only encoded in T1
9028 Encoding T1: IT{x{y{z}}} <firstcond>
9029 1 0 1 1 - 1 1 1 1 - firstcond - mask
9030 if mask = '0000' then see 'related encodings'
9031 We don't deal with UNPREDICTABLE, just ignore these.
9032 There can be no nested IT blocks so an IT block
9033 is naturally a new one for which it is worth
9034 computing its size. */
9035 bool is_newitblock = ((insn & 0xff00) == 0xbf00)
9036 && ((insn & 0x000f) != 0x0000);
9037 /* If we have a new IT block we compute its size. */
9038 if (is_newitblock)
9040 /* Compute the number of instructions controlled
9041 by the IT block, it will be used to decide
9042 whether we are inside an IT block or not. */
9043 unsigned int mask = insn & 0x000f;
9044 itblock_current_pos = 4 - ctz (mask);
9048 i += insn_32bit ? 4 : 2;
9052 if (elf_section_data (sec)->this_hdr.contents != contents)
9053 free (contents);
9054 contents = NULL;
9057 return true;
9059 error_return:
9060 if (elf_section_data (sec)->this_hdr.contents != contents)
9061 free (contents);
9063 return false;
9066 /* Set target relocation values needed during linking. */
9068 void
9069 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9070 struct bfd_link_info *link_info,
9071 struct elf32_arm_params *params)
9073 struct elf32_arm_link_hash_table *globals;
9075 globals = elf32_arm_hash_table (link_info);
9076 if (globals == NULL)
9077 return;
9079 globals->target1_is_rel = params->target1_is_rel;
9080 if (globals->fdpic_p)
9081 globals->target2_reloc = R_ARM_GOT32;
9082 else if (strcmp (params->target2_type, "rel") == 0)
9083 globals->target2_reloc = R_ARM_REL32;
9084 else if (strcmp (params->target2_type, "abs") == 0)
9085 globals->target2_reloc = R_ARM_ABS32;
9086 else if (strcmp (params->target2_type, "got-rel") == 0)
9087 globals->target2_reloc = R_ARM_GOT_PREL;
9088 else
9090 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9091 params->target2_type);
9093 globals->fix_v4bx = params->fix_v4bx;
9094 globals->use_blx |= params->use_blx;
9095 globals->vfp11_fix = params->vfp11_denorm_fix;
9096 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9097 if (globals->fdpic_p)
9098 globals->pic_veneer = 1;
9099 else
9100 globals->pic_veneer = params->pic_veneer;
9101 globals->fix_cortex_a8 = params->fix_cortex_a8;
9102 globals->fix_arm1176 = params->fix_arm1176;
9103 globals->cmse_implib = params->cmse_implib;
9104 globals->in_implib_bfd = params->in_implib_bfd;
9106 BFD_ASSERT (is_arm_elf (output_bfd));
9107 elf_arm_tdata (output_bfd)->no_enum_size_warning
9108 = params->no_enum_size_warning;
9109 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9110 = params->no_wchar_size_warning;
9113 /* Replace the target offset of a Thumb bl or b.w instruction. */
9115 static void
9116 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9118 bfd_vma upper;
9119 bfd_vma lower;
9120 int reloc_sign;
9122 BFD_ASSERT ((offset & 1) == 0);
9124 upper = bfd_get_16 (abfd, insn);
9125 lower = bfd_get_16 (abfd, insn + 2);
9126 reloc_sign = (offset < 0) ? 1 : 0;
9127 upper = (upper & ~(bfd_vma) 0x7ff)
9128 | ((offset >> 12) & 0x3ff)
9129 | (reloc_sign << 10);
9130 lower = (lower & ~(bfd_vma) 0x2fff)
9131 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9132 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9133 | ((offset >> 1) & 0x7ff);
9134 bfd_put_16 (abfd, upper, insn);
9135 bfd_put_16 (abfd, lower, insn + 2);
9138 /* Thumb code calling an ARM function. */
9140 static int
9141 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9142 const char * name,
9143 bfd * input_bfd,
9144 bfd * output_bfd,
9145 asection * input_section,
9146 bfd_byte * hit_data,
9147 asection * sym_sec,
9148 bfd_vma offset,
9149 bfd_signed_vma addend,
9150 bfd_vma val,
9151 char **error_message)
9153 asection * s = 0;
9154 bfd_vma my_offset;
9155 long int ret_offset;
9156 struct elf_link_hash_entry * myh;
9157 struct elf32_arm_link_hash_table * globals;
9159 myh = find_thumb_glue (info, name, error_message);
9160 if (myh == NULL)
9161 return false;
9163 globals = elf32_arm_hash_table (info);
9164 BFD_ASSERT (globals != NULL);
9165 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9167 my_offset = myh->root.u.def.value;
9169 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9170 THUMB2ARM_GLUE_SECTION_NAME);
9172 BFD_ASSERT (s != NULL);
9173 BFD_ASSERT (s->contents != NULL);
9174 BFD_ASSERT (s->output_section != NULL);
9176 if ((my_offset & 0x01) == 0x01)
9178 if (sym_sec != NULL
9179 && sym_sec->owner != NULL
9180 && !INTERWORK_FLAG (sym_sec->owner))
9182 _bfd_error_handler
9183 (_("%pB(%s): warning: interworking not enabled;"
9184 " first occurrence: %pB: %s call to %s"),
9185 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9187 return false;
9190 --my_offset;
9191 myh->root.u.def.value = my_offset;
9193 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9194 s->contents + my_offset);
9196 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9197 s->contents + my_offset + 2);
9199 ret_offset =
9200 /* Address of destination of the stub. */
9201 ((bfd_signed_vma) val)
9202 - ((bfd_signed_vma)
9203 /* Offset from the start of the current section
9204 to the start of the stubs. */
9205 (s->output_offset
9206 /* Offset of the start of this stub from the start of the stubs. */
9207 + my_offset
9208 /* Address of the start of the current section. */
9209 + s->output_section->vma)
9210 /* The branch instruction is 4 bytes into the stub. */
9212 /* ARM branches work from the pc of the instruction + 8. */
9213 + 8);
9215 put_arm_insn (globals, output_bfd,
9216 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9217 s->contents + my_offset + 4);
9220 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9222 /* Now go back and fix up the original BL insn to point to here. */
9223 ret_offset =
9224 /* Address of where the stub is located. */
9225 (s->output_section->vma + s->output_offset + my_offset)
9226 /* Address of where the BL is located. */
9227 - (input_section->output_section->vma + input_section->output_offset
9228 + offset)
9229 /* Addend in the relocation. */
9230 - addend
9231 /* Biassing for PC-relative addressing. */
9232 - 8;
9234 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9236 return true;
9239 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9241 static struct elf_link_hash_entry *
9242 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9243 const char * name,
9244 bfd * input_bfd,
9245 bfd * output_bfd,
9246 asection * sym_sec,
9247 bfd_vma val,
9248 asection * s,
9249 char ** error_message)
9251 bfd_vma my_offset;
9252 long int ret_offset;
9253 struct elf_link_hash_entry * myh;
9254 struct elf32_arm_link_hash_table * globals;
9256 myh = find_arm_glue (info, name, error_message);
9257 if (myh == NULL)
9258 return NULL;
9260 globals = elf32_arm_hash_table (info);
9261 BFD_ASSERT (globals != NULL);
9262 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9264 my_offset = myh->root.u.def.value;
9266 if ((my_offset & 0x01) == 0x01)
9268 if (sym_sec != NULL
9269 && sym_sec->owner != NULL
9270 && !INTERWORK_FLAG (sym_sec->owner))
9272 _bfd_error_handler
9273 (_("%pB(%s): warning: interworking not enabled;"
9274 " first occurrence: %pB: %s call to %s"),
9275 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9278 --my_offset;
9279 myh->root.u.def.value = my_offset;
9281 if (bfd_link_pic (info)
9282 || globals->pic_veneer)
9284 /* For relocatable objects we can't use absolute addresses,
9285 so construct the address from a relative offset. */
9286 /* TODO: If the offset is small it's probably worth
9287 constructing the address with adds. */
9288 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9289 s->contents + my_offset);
9290 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9291 s->contents + my_offset + 4);
9292 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9293 s->contents + my_offset + 8);
9294 /* Adjust the offset by 4 for the position of the add,
9295 and 8 for the pipeline offset. */
9296 ret_offset = (val - (s->output_offset
9297 + s->output_section->vma
9298 + my_offset + 12))
9299 | 1;
9300 bfd_put_32 (output_bfd, ret_offset,
9301 s->contents + my_offset + 12);
9303 else if (globals->use_blx)
9305 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9306 s->contents + my_offset);
9308 /* It's a thumb address. Add the low order bit. */
9309 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9310 s->contents + my_offset + 4);
9312 else
9314 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9315 s->contents + my_offset);
9317 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9318 s->contents + my_offset + 4);
9320 /* It's a thumb address. Add the low order bit. */
9321 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9322 s->contents + my_offset + 8);
9324 my_offset += 12;
9328 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9330 return myh;
9333 /* Arm code calling a Thumb function. */
9335 static int
9336 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9337 const char * name,
9338 bfd * input_bfd,
9339 bfd * output_bfd,
9340 asection * input_section,
9341 bfd_byte * hit_data,
9342 asection * sym_sec,
9343 bfd_vma offset,
9344 bfd_signed_vma addend,
9345 bfd_vma val,
9346 char **error_message)
9348 unsigned long int tmp;
9349 bfd_vma my_offset;
9350 asection * s;
9351 long int ret_offset;
9352 struct elf_link_hash_entry * myh;
9353 struct elf32_arm_link_hash_table * globals;
9355 globals = elf32_arm_hash_table (info);
9356 BFD_ASSERT (globals != NULL);
9357 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9359 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9360 ARM2THUMB_GLUE_SECTION_NAME);
9361 BFD_ASSERT (s != NULL);
9362 BFD_ASSERT (s->contents != NULL);
9363 BFD_ASSERT (s->output_section != NULL);
9365 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9366 sym_sec, val, s, error_message);
9367 if (!myh)
9368 return false;
9370 my_offset = myh->root.u.def.value;
9371 tmp = bfd_get_32 (input_bfd, hit_data);
9372 tmp = tmp & 0xFF000000;
9374 /* Somehow these are both 4 too far, so subtract 8. */
9375 ret_offset = (s->output_offset
9376 + my_offset
9377 + s->output_section->vma
9378 - (input_section->output_offset
9379 + input_section->output_section->vma
9380 + offset + addend)
9381 - 8);
9383 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9385 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9387 return true;
9390 /* Populate Arm stub for an exported Thumb function. */
9392 static bool
9393 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9395 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9396 asection * s;
9397 struct elf_link_hash_entry * myh;
9398 struct elf32_arm_link_hash_entry *eh;
9399 struct elf32_arm_link_hash_table * globals;
9400 asection *sec;
9401 bfd_vma val;
9402 char *error_message;
9404 eh = elf32_arm_hash_entry (h);
9405 /* Allocate stubs for exported Thumb functions on v4t. */
9406 if (eh->export_glue == NULL)
9407 return true;
9409 globals = elf32_arm_hash_table (info);
9410 BFD_ASSERT (globals != NULL);
9411 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9413 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9414 ARM2THUMB_GLUE_SECTION_NAME);
9415 BFD_ASSERT (s != NULL);
9416 BFD_ASSERT (s->contents != NULL);
9417 BFD_ASSERT (s->output_section != NULL);
9419 sec = eh->export_glue->root.u.def.section;
9421 BFD_ASSERT (sec->output_section != NULL);
9423 val = eh->export_glue->root.u.def.value + sec->output_offset
9424 + sec->output_section->vma;
9426 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9427 h->root.u.def.section->owner,
9428 globals->obfd, sec, val, s,
9429 &error_message);
9430 BFD_ASSERT (myh);
9431 return true;
9434 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9436 static bfd_vma
9437 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9439 bfd_byte *p;
9440 bfd_vma glue_addr;
9441 asection *s;
9442 struct elf32_arm_link_hash_table *globals;
9444 globals = elf32_arm_hash_table (info);
9445 BFD_ASSERT (globals != NULL);
9446 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9448 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9449 ARM_BX_GLUE_SECTION_NAME);
9450 BFD_ASSERT (s != NULL);
9451 BFD_ASSERT (s->contents != NULL);
9452 BFD_ASSERT (s->output_section != NULL);
9454 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9456 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9458 if ((globals->bx_glue_offset[reg] & 1) == 0)
9460 p = s->contents + glue_addr;
9461 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9462 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9463 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9464 globals->bx_glue_offset[reg] |= 1;
9467 return glue_addr + s->output_section->vma + s->output_offset;
9470 /* Generate Arm stubs for exported Thumb symbols. */
9471 static void
9472 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9473 struct bfd_link_info *link_info)
9475 struct elf32_arm_link_hash_table * globals;
9477 if (link_info == NULL)
9478 /* Ignore this if we are not called by the ELF backend linker. */
9479 return;
9481 globals = elf32_arm_hash_table (link_info);
9482 if (globals == NULL)
9483 return;
9485 /* If blx is available then exported Thumb symbols are OK and there is
9486 nothing to do. */
9487 if (globals->use_blx)
9488 return;
9490 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9491 link_info);
9494 /* Reserve space for COUNT dynamic relocations in relocation selection
9495 SRELOC. */
9497 static void
9498 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9499 bfd_size_type count)
9501 struct elf32_arm_link_hash_table *htab;
9503 htab = elf32_arm_hash_table (info);
9504 BFD_ASSERT (htab->root.dynamic_sections_created);
9505 if (sreloc == NULL)
9506 abort ();
9507 sreloc->size += RELOC_SIZE (htab) * count;
9510 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9511 dynamic, the relocations should go in SRELOC, otherwise they should
9512 go in the special .rel.iplt section. */
9514 static void
9515 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9516 bfd_size_type count)
9518 struct elf32_arm_link_hash_table *htab;
9520 htab = elf32_arm_hash_table (info);
9521 if (!htab->root.dynamic_sections_created)
9522 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9523 else
9525 BFD_ASSERT (sreloc != NULL);
9526 sreloc->size += RELOC_SIZE (htab) * count;
9530 /* Add relocation REL to the end of relocation section SRELOC. */
9532 static void
9533 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9534 asection *sreloc, Elf_Internal_Rela *rel)
9536 bfd_byte *loc;
9537 struct elf32_arm_link_hash_table *htab;
9539 htab = elf32_arm_hash_table (info);
9540 if (!htab->root.dynamic_sections_created
9541 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9542 sreloc = htab->root.irelplt;
9543 if (sreloc == NULL)
9544 abort ();
9545 loc = sreloc->contents;
9546 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9547 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9548 abort ();
9549 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9552 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9553 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9554 to .plt. */
9556 static void
9557 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9558 bool is_iplt_entry,
9559 union gotplt_union *root_plt,
9560 struct arm_plt_info *arm_plt)
9562 struct elf32_arm_link_hash_table *htab;
9563 asection *splt;
9564 asection *sgotplt;
9566 htab = elf32_arm_hash_table (info);
9568 if (is_iplt_entry)
9570 splt = htab->root.iplt;
9571 sgotplt = htab->root.igotplt;
9573 /* NaCl uses a special first entry in .iplt too. */
9574 if (htab->root.target_os == is_nacl && splt->size == 0)
9575 splt->size += htab->plt_header_size;
9577 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9578 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9580 else
9582 splt = htab->root.splt;
9583 sgotplt = htab->root.sgotplt;
9585 if (htab->fdpic_p)
9587 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9588 /* For lazy binding, relocations will be put into .rel.plt, in
9589 .rel.got otherwise. */
9590 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9591 if (info->flags & DF_BIND_NOW)
9592 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9593 else
9594 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9596 else
9598 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9599 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9602 /* If this is the first .plt entry, make room for the special
9603 first entry. */
9604 if (splt->size == 0)
9605 splt->size += htab->plt_header_size;
9607 htab->next_tls_desc_index++;
9610 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9611 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9612 splt->size += PLT_THUMB_STUB_SIZE;
9613 root_plt->offset = splt->size;
9614 splt->size += htab->plt_entry_size;
9616 /* We also need to make an entry in the .got.plt section, which
9617 will be placed in the .got section by the linker script. */
9618 if (is_iplt_entry)
9619 arm_plt->got_offset = sgotplt->size;
9620 else
9621 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9622 if (htab->fdpic_p)
9623 /* Function descriptor takes 64 bits in GOT. */
9624 sgotplt->size += 8;
9625 else
9626 sgotplt->size += 4;
9629 static bfd_vma
9630 arm_movw_immediate (bfd_vma value)
9632 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9635 static bfd_vma
9636 arm_movt_immediate (bfd_vma value)
9638 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9641 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9642 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9643 Otherwise, DYNINDX is the index of the symbol in the dynamic
9644 symbol table and SYM_VALUE is undefined.
9646 ROOT_PLT points to the offset of the PLT entry from the start of its
9647 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9648 bookkeeping information.
9650 Returns FALSE if there was a problem. */
9652 static bool
9653 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9654 union gotplt_union *root_plt,
9655 struct arm_plt_info *arm_plt,
9656 int dynindx, bfd_vma sym_value)
9658 struct elf32_arm_link_hash_table *htab;
9659 asection *sgot;
9660 asection *splt;
9661 asection *srel;
9662 bfd_byte *loc;
9663 bfd_vma plt_index;
9664 Elf_Internal_Rela rel;
9665 bfd_vma got_header_size;
9667 htab = elf32_arm_hash_table (info);
9669 /* Pick the appropriate sections and sizes. */
9670 if (dynindx == -1)
9672 splt = htab->root.iplt;
9673 sgot = htab->root.igotplt;
9674 srel = htab->root.irelplt;
9676 /* There are no reserved entries in .igot.plt, and no special
9677 first entry in .iplt. */
9678 got_header_size = 0;
9680 else
9682 splt = htab->root.splt;
9683 sgot = htab->root.sgotplt;
9684 srel = htab->root.srelplt;
9686 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9688 BFD_ASSERT (splt != NULL && srel != NULL);
9690 bfd_vma got_offset, got_address, plt_address;
9691 bfd_vma got_displacement, initial_got_entry;
9692 bfd_byte * ptr;
9694 BFD_ASSERT (sgot != NULL);
9696 /* Get the offset into the .(i)got.plt table of the entry that
9697 corresponds to this function. */
9698 got_offset = (arm_plt->got_offset & -2);
9700 /* Get the index in the procedure linkage table which
9701 corresponds to this symbol. This is the index of this symbol
9702 in all the symbols for which we are making plt entries.
9703 After the reserved .got.plt entries, all symbols appear in
9704 the same order as in .plt. */
9705 if (htab->fdpic_p)
9706 /* Function descriptor takes 8 bytes. */
9707 plt_index = (got_offset - got_header_size) / 8;
9708 else
9709 plt_index = (got_offset - got_header_size) / 4;
9711 /* Calculate the address of the GOT entry. */
9712 got_address = (sgot->output_section->vma
9713 + sgot->output_offset
9714 + got_offset);
9716 /* ...and the address of the PLT entry. */
9717 plt_address = (splt->output_section->vma
9718 + splt->output_offset
9719 + root_plt->offset);
9721 ptr = splt->contents + root_plt->offset;
9722 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9724 unsigned int i;
9725 bfd_vma val;
9727 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9729 val = elf32_arm_vxworks_shared_plt_entry[i];
9730 if (i == 2)
9731 val |= got_address - sgot->output_section->vma;
9732 if (i == 5)
9733 val |= plt_index * RELOC_SIZE (htab);
9734 if (i == 2 || i == 5)
9735 bfd_put_32 (output_bfd, val, ptr);
9736 else
9737 put_arm_insn (htab, output_bfd, val, ptr);
9740 else if (htab->root.target_os == is_vxworks)
9742 unsigned int i;
9743 bfd_vma val;
9745 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9747 val = elf32_arm_vxworks_exec_plt_entry[i];
9748 if (i == 2)
9749 val |= got_address;
9750 if (i == 4)
9751 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9752 if (i == 5)
9753 val |= plt_index * RELOC_SIZE (htab);
9754 if (i == 2 || i == 5)
9755 bfd_put_32 (output_bfd, val, ptr);
9756 else
9757 put_arm_insn (htab, output_bfd, val, ptr);
9760 loc = (htab->srelplt2->contents
9761 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9763 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9764 referencing the GOT for this PLT entry. */
9765 rel.r_offset = plt_address + 8;
9766 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9767 rel.r_addend = got_offset;
9768 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9769 loc += RELOC_SIZE (htab);
9771 /* Create the R_ARM_ABS32 relocation referencing the
9772 beginning of the PLT for this GOT entry. */
9773 rel.r_offset = got_address;
9774 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9775 rel.r_addend = 0;
9776 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9778 else if (htab->root.target_os == is_nacl)
9780 /* Calculate the displacement between the PLT slot and the
9781 common tail that's part of the special initial PLT slot. */
9782 int32_t tail_displacement
9783 = ((splt->output_section->vma + splt->output_offset
9784 + ARM_NACL_PLT_TAIL_OFFSET)
9785 - (plt_address + htab->plt_entry_size + 4));
9786 BFD_ASSERT ((tail_displacement & 3) == 0);
9787 tail_displacement >>= 2;
9789 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9790 || (-tail_displacement & 0xff000000) == 0);
9792 /* Calculate the displacement between the PLT slot and the entry
9793 in the GOT. The offset accounts for the value produced by
9794 adding to pc in the penultimate instruction of the PLT stub. */
9795 got_displacement = (got_address
9796 - (plt_address + htab->plt_entry_size));
9798 /* NaCl does not support interworking at all. */
9799 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9801 put_arm_insn (htab, output_bfd,
9802 elf32_arm_nacl_plt_entry[0]
9803 | arm_movw_immediate (got_displacement),
9804 ptr + 0);
9805 put_arm_insn (htab, output_bfd,
9806 elf32_arm_nacl_plt_entry[1]
9807 | arm_movt_immediate (got_displacement),
9808 ptr + 4);
9809 put_arm_insn (htab, output_bfd,
9810 elf32_arm_nacl_plt_entry[2],
9811 ptr + 8);
9812 put_arm_insn (htab, output_bfd,
9813 elf32_arm_nacl_plt_entry[3]
9814 | (tail_displacement & 0x00ffffff),
9815 ptr + 12);
9817 else if (htab->fdpic_p)
9819 const bfd_vma *plt_entry = using_thumb_only (htab)
9820 ? elf32_arm_fdpic_thumb_plt_entry
9821 : elf32_arm_fdpic_plt_entry;
9823 /* Fill-up Thumb stub if needed. */
9824 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9826 put_thumb_insn (htab, output_bfd,
9827 elf32_arm_plt_thumb_stub[0], ptr - 4);
9828 put_thumb_insn (htab, output_bfd,
9829 elf32_arm_plt_thumb_stub[1], ptr - 2);
9831 /* As we are using 32 bit instructions even for the Thumb
9832 version, we have to use 'put_arm_insn' instead of
9833 'put_thumb_insn'. */
9834 put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9835 put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9836 put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9837 put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9838 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9840 if (!(info->flags & DF_BIND_NOW))
9842 /* funcdesc_value_reloc_offset. */
9843 bfd_put_32 (output_bfd,
9844 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9845 ptr + 20);
9846 put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9847 put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9848 put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9849 put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9852 else if (using_thumb_only (htab))
9854 /* PR ld/16017: Generate thumb only PLT entries. */
9855 if (!using_thumb2 (htab))
9857 /* FIXME: We ought to be able to generate thumb-1 PLT
9858 instructions... */
9859 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9860 output_bfd);
9861 return false;
9864 /* Calculate the displacement between the PLT slot and the entry in
9865 the GOT. The 12-byte offset accounts for the value produced by
9866 adding to pc in the 3rd instruction of the PLT stub. */
9867 got_displacement = got_address - (plt_address + 12);
9869 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9870 instead of 'put_thumb_insn'. */
9871 put_arm_insn (htab, output_bfd,
9872 elf32_thumb2_plt_entry[0]
9873 | ((got_displacement & 0x000000ff) << 16)
9874 | ((got_displacement & 0x00000700) << 20)
9875 | ((got_displacement & 0x00000800) >> 1)
9876 | ((got_displacement & 0x0000f000) >> 12),
9877 ptr + 0);
9878 put_arm_insn (htab, output_bfd,
9879 elf32_thumb2_plt_entry[1]
9880 | ((got_displacement & 0x00ff0000) )
9881 | ((got_displacement & 0x07000000) << 4)
9882 | ((got_displacement & 0x08000000) >> 17)
9883 | ((got_displacement & 0xf0000000) >> 28),
9884 ptr + 4);
9885 put_arm_insn (htab, output_bfd,
9886 elf32_thumb2_plt_entry[2],
9887 ptr + 8);
9888 put_arm_insn (htab, output_bfd,
9889 elf32_thumb2_plt_entry[3],
9890 ptr + 12);
9892 else
9894 /* Calculate the displacement between the PLT slot and the
9895 entry in the GOT. The eight-byte offset accounts for the
9896 value produced by adding to pc in the first instruction
9897 of the PLT stub. */
9898 got_displacement = got_address - (plt_address + 8);
9900 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9902 put_thumb_insn (htab, output_bfd,
9903 elf32_arm_plt_thumb_stub[0], ptr - 4);
9904 put_thumb_insn (htab, output_bfd,
9905 elf32_arm_plt_thumb_stub[1], ptr - 2);
9908 if (!elf32_arm_use_long_plt_entry)
9910 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9912 put_arm_insn (htab, output_bfd,
9913 elf32_arm_plt_entry_short[0]
9914 | ((got_displacement & 0x0ff00000) >> 20),
9915 ptr + 0);
9916 put_arm_insn (htab, output_bfd,
9917 elf32_arm_plt_entry_short[1]
9918 | ((got_displacement & 0x000ff000) >> 12),
9919 ptr+ 4);
9920 put_arm_insn (htab, output_bfd,
9921 elf32_arm_plt_entry_short[2]
9922 | (got_displacement & 0x00000fff),
9923 ptr + 8);
9924 #ifdef FOUR_WORD_PLT
9925 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9926 #endif
9928 else
9930 put_arm_insn (htab, output_bfd,
9931 elf32_arm_plt_entry_long[0]
9932 | ((got_displacement & 0xf0000000) >> 28),
9933 ptr + 0);
9934 put_arm_insn (htab, output_bfd,
9935 elf32_arm_plt_entry_long[1]
9936 | ((got_displacement & 0x0ff00000) >> 20),
9937 ptr + 4);
9938 put_arm_insn (htab, output_bfd,
9939 elf32_arm_plt_entry_long[2]
9940 | ((got_displacement & 0x000ff000) >> 12),
9941 ptr+ 8);
9942 put_arm_insn (htab, output_bfd,
9943 elf32_arm_plt_entry_long[3]
9944 | (got_displacement & 0x00000fff),
9945 ptr + 12);
9949 /* Fill in the entry in the .rel(a).(i)plt section. */
9950 rel.r_offset = got_address;
9951 rel.r_addend = 0;
9952 if (dynindx == -1)
9954 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9955 The dynamic linker or static executable then calls SYM_VALUE
9956 to determine the correct run-time value of the .igot.plt entry. */
9957 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9958 initial_got_entry = sym_value;
9960 else
9962 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9963 used by PLT entry. */
9964 if (htab->fdpic_p)
9966 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9967 initial_got_entry = 0;
9969 else
9971 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9972 initial_got_entry = (splt->output_section->vma
9973 + splt->output_offset);
9975 /* PR ld/16017
9976 When thumb only we need to set the LSB for any address that
9977 will be used with an interworking branch instruction. */
9978 if (using_thumb_only (htab))
9979 initial_got_entry |= 1;
9983 /* Fill in the entry in the global offset table. */
9984 bfd_put_32 (output_bfd, initial_got_entry,
9985 sgot->contents + got_offset);
9987 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9989 /* Setup initial funcdesc value. */
9990 /* FIXME: we don't support lazy binding because there is a
9991 race condition between both words getting written and
9992 some other thread attempting to read them. The ARM
9993 architecture does not have an atomic 64 bit load/store
9994 instruction that could be used to prevent it; it is
9995 recommended that threaded FDPIC applications run with the
9996 LD_BIND_NOW environment variable set. */
9997 bfd_put_32 (output_bfd, plt_address + 0x18,
9998 sgot->contents + got_offset);
9999 bfd_put_32 (output_bfd, -1 /*TODO*/,
10000 sgot->contents + got_offset + 4);
10003 if (dynindx == -1)
10004 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
10005 else
10007 if (htab->fdpic_p)
10009 /* For FDPIC we put PLT relocationss into .rel.got when not
10010 lazy binding otherwise we put them in .rel.plt. For now,
10011 we don't support lazy binding so put it in .rel.got. */
10012 if (info->flags & DF_BIND_NOW)
10013 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
10014 else
10015 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
10017 else
10019 loc = srel->contents + plt_index * RELOC_SIZE (htab);
10020 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10024 return true;
10027 /* Some relocations map to different relocations depending on the
10028 target. Return the real relocation. */
10030 static int
10031 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10032 int r_type)
10034 switch (r_type)
10036 case R_ARM_TARGET1:
10037 if (globals->target1_is_rel)
10038 return R_ARM_REL32;
10039 else
10040 return R_ARM_ABS32;
10042 case R_ARM_TARGET2:
10043 return globals->target2_reloc;
10045 default:
10046 return r_type;
10050 /* Return the base VMA address which should be subtracted from real addresses
10051 when resolving @dtpoff relocation.
10052 This is PT_TLS segment p_vaddr. */
10054 static bfd_vma
10055 dtpoff_base (struct bfd_link_info *info)
10057 /* If tls_sec is NULL, we should have signalled an error already. */
10058 if (elf_hash_table (info)->tls_sec == NULL)
10059 return 0;
10060 return elf_hash_table (info)->tls_sec->vma;
10063 /* Return the relocation value for @tpoff relocation
10064 if STT_TLS virtual address is ADDRESS. */
10066 static bfd_vma
10067 tpoff (struct bfd_link_info *info, bfd_vma address)
10069 struct elf_link_hash_table *htab = elf_hash_table (info);
10070 bfd_vma base;
10072 /* If tls_sec is NULL, we should have signalled an error already. */
10073 if (htab->tls_sec == NULL)
10074 return 0;
10075 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10076 return address - htab->tls_sec->vma + base;
10079 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10080 VALUE is the relocation value. */
10082 static bfd_reloc_status_type
10083 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10085 if (value > 0xfff)
10086 return bfd_reloc_overflow;
10088 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10089 bfd_put_32 (abfd, value, data);
10090 return bfd_reloc_ok;
10093 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10094 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10095 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10097 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10098 is to then call final_link_relocate. Return other values in the
10099 case of error.
10101 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10102 the pre-relaxed code. It would be nice if the relocs were updated
10103 to match the optimization. */
10105 static bfd_reloc_status_type
10106 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10107 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10108 Elf_Internal_Rela *rel, unsigned long is_local)
10110 unsigned long insn;
10112 switch (ELF32_R_TYPE (rel->r_info))
10114 default:
10115 return bfd_reloc_notsupported;
10117 case R_ARM_TLS_GOTDESC:
10118 if (is_local)
10119 insn = 0;
10120 else
10122 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10123 if (insn & 1)
10124 insn -= 5; /* THUMB */
10125 else
10126 insn -= 8; /* ARM */
10128 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10129 return bfd_reloc_continue;
10131 case R_ARM_THM_TLS_DESCSEQ:
10132 /* Thumb insn. */
10133 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10134 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10136 if (is_local)
10137 /* nop */
10138 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10140 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10142 if (is_local)
10143 /* nop */
10144 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10145 else
10146 /* ldr rx,[ry] */
10147 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10149 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10151 if (is_local)
10152 /* nop */
10153 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10154 else
10155 /* mov r0, rx */
10156 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10157 contents + rel->r_offset);
10159 else
10161 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10162 /* It's a 32 bit instruction, fetch the rest of it for
10163 error generation. */
10164 insn = (insn << 16)
10165 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10166 _bfd_error_handler
10167 /* xgettext:c-format */
10168 (_("%pB(%pA+%#" PRIx64 "): "
10169 "unexpected %s instruction '%#lx' in TLS trampoline"),
10170 input_bfd, input_sec, (uint64_t) rel->r_offset,
10171 "Thumb", insn);
10172 return bfd_reloc_notsupported;
10174 break;
10176 case R_ARM_TLS_DESCSEQ:
10177 /* arm insn. */
10178 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10179 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10181 if (is_local)
10182 /* mov rx, ry */
10183 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10184 contents + rel->r_offset);
10186 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10188 if (is_local)
10189 /* nop */
10190 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10191 else
10192 /* ldr rx,[ry] */
10193 bfd_put_32 (input_bfd, insn & 0xfffff000,
10194 contents + rel->r_offset);
10196 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10198 if (is_local)
10199 /* nop */
10200 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10201 else
10202 /* mov r0, rx */
10203 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10204 contents + rel->r_offset);
10206 else
10208 _bfd_error_handler
10209 /* xgettext:c-format */
10210 (_("%pB(%pA+%#" PRIx64 "): "
10211 "unexpected %s instruction '%#lx' in TLS trampoline"),
10212 input_bfd, input_sec, (uint64_t) rel->r_offset,
10213 "ARM", insn);
10214 return bfd_reloc_notsupported;
10216 break;
10218 case R_ARM_TLS_CALL:
10219 /* GD->IE relaxation, turn the instruction into 'nop' or
10220 'ldr r0, [pc,r0]' */
10221 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10222 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10223 break;
10225 case R_ARM_THM_TLS_CALL:
10226 /* GD->IE relaxation. */
10227 if (!is_local)
10228 /* add r0,pc; ldr r0, [r0] */
10229 insn = 0x44786800;
10230 else if (using_thumb2 (globals))
10231 /* nop.w */
10232 insn = 0xf3af8000;
10233 else
10234 /* nop; nop */
10235 insn = 0xbf00bf00;
10237 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10238 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10239 break;
10241 return bfd_reloc_ok;
10244 /* For a given value of n, calculate the value of G_n as required to
10245 deal with group relocations. We return it in the form of an
10246 encoded constant-and-rotation, together with the final residual. If n is
10247 specified as less than zero, then final_residual is filled with the
10248 input value and no further action is performed. */
10250 static bfd_vma
10251 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10253 int current_n;
10254 bfd_vma g_n;
10255 bfd_vma encoded_g_n = 0;
10256 bfd_vma residual = value; /* Also known as Y_n. */
10258 for (current_n = 0; current_n <= n; current_n++)
10260 int shift;
10262 /* Calculate which part of the value to mask. */
10263 if (residual == 0)
10264 shift = 0;
10265 else
10267 int msb;
10269 /* Determine the most significant bit in the residual and
10270 align the resulting value to a 2-bit boundary. */
10271 for (msb = 30; msb >= 0; msb -= 2)
10272 if (residual & (3u << msb))
10273 break;
10275 /* The desired shift is now (msb - 6), or zero, whichever
10276 is the greater. */
10277 shift = msb - 6;
10278 if (shift < 0)
10279 shift = 0;
10282 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10283 g_n = residual & (0xff << shift);
10284 encoded_g_n = (g_n >> shift)
10285 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10287 /* Calculate the residual for the next time around. */
10288 residual &= ~g_n;
10291 *final_residual = residual;
10293 return encoded_g_n;
10296 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10297 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10299 static int
10300 identify_add_or_sub (bfd_vma insn)
10302 int opcode = insn & 0x1e00000;
10304 if (opcode == 1 << 23) /* ADD */
10305 return 1;
10307 if (opcode == 1 << 22) /* SUB */
10308 return -1;
10310 return 0;
10313 /* Perform a relocation as part of a final link. */
10315 static bfd_reloc_status_type
10316 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10317 bfd * input_bfd,
10318 bfd * output_bfd,
10319 asection * input_section,
10320 bfd_byte * contents,
10321 Elf_Internal_Rela * rel,
10322 bfd_vma value,
10323 struct bfd_link_info * info,
10324 asection * sym_sec,
10325 const char * sym_name,
10326 unsigned char st_type,
10327 enum arm_st_branch_type branch_type,
10328 struct elf_link_hash_entry * h,
10329 bool * unresolved_reloc_p,
10330 char ** error_message)
10332 unsigned long r_type = howto->type;
10333 unsigned long r_symndx;
10334 bfd_byte * hit_data = contents + rel->r_offset;
10335 bfd_vma * local_got_offsets;
10336 bfd_vma * local_tlsdesc_gotents;
10337 asection * sgot;
10338 asection * splt;
10339 asection * sreloc = NULL;
10340 asection * srelgot;
10341 bfd_vma addend;
10342 bfd_signed_vma signed_addend;
10343 unsigned char dynreloc_st_type;
10344 bfd_vma dynreloc_value;
10345 struct elf32_arm_link_hash_table * globals;
10346 struct elf32_arm_link_hash_entry *eh;
10347 union gotplt_union *root_plt;
10348 struct arm_plt_info *arm_plt;
10349 bfd_vma plt_offset;
10350 bfd_vma gotplt_offset;
10351 bool has_iplt_entry;
10352 bool resolved_to_zero;
10354 globals = elf32_arm_hash_table (info);
10355 if (globals == NULL)
10356 return bfd_reloc_notsupported;
10358 BFD_ASSERT (is_arm_elf (input_bfd));
10359 BFD_ASSERT (howto != NULL);
10361 /* Some relocation types map to different relocations depending on the
10362 target. We pick the right one here. */
10363 r_type = arm_real_reloc_type (globals, r_type);
10365 /* It is possible to have linker relaxations on some TLS access
10366 models. Update our information here. */
10367 r_type = elf32_arm_tls_transition (info, r_type, h);
10369 if (r_type != howto->type)
10370 howto = elf32_arm_howto_from_type (r_type);
10372 eh = (struct elf32_arm_link_hash_entry *) h;
10373 sgot = globals->root.sgot;
10374 local_got_offsets = elf_local_got_offsets (input_bfd);
10375 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10377 if (globals->root.dynamic_sections_created)
10378 srelgot = globals->root.srelgot;
10379 else
10380 srelgot = NULL;
10382 r_symndx = ELF32_R_SYM (rel->r_info);
10384 if (globals->use_rel)
10386 bfd_vma sign;
10388 switch (bfd_get_reloc_size (howto))
10390 case 1: addend = bfd_get_8 (input_bfd, hit_data); break;
10391 case 2: addend = bfd_get_16 (input_bfd, hit_data); break;
10392 case 4: addend = bfd_get_32 (input_bfd, hit_data); break;
10393 default: addend = 0; break;
10395 /* Note: the addend and signed_addend calculated here are
10396 incorrect for any split field. */
10397 addend &= howto->src_mask;
10398 sign = howto->src_mask & ~(howto->src_mask >> 1);
10399 signed_addend = (addend ^ sign) - sign;
10400 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10401 addend <<= howto->rightshift;
10403 else
10404 addend = signed_addend = rel->r_addend;
10406 /* Record the symbol information that should be used in dynamic
10407 relocations. */
10408 dynreloc_st_type = st_type;
10409 dynreloc_value = value;
10410 if (branch_type == ST_BRANCH_TO_THUMB)
10411 dynreloc_value |= 1;
10413 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10414 VALUE appropriately for relocations that we resolve at link time. */
10415 has_iplt_entry = false;
10416 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10417 &arm_plt)
10418 && root_plt->offset != (bfd_vma) -1)
10420 plt_offset = root_plt->offset;
10421 gotplt_offset = arm_plt->got_offset;
10423 if (h == NULL || eh->is_iplt)
10425 has_iplt_entry = true;
10426 splt = globals->root.iplt;
10428 /* Populate .iplt entries here, because not all of them will
10429 be seen by finish_dynamic_symbol. The lower bit is set if
10430 we have already populated the entry. */
10431 if (plt_offset & 1)
10432 plt_offset--;
10433 else
10435 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10436 -1, dynreloc_value))
10437 root_plt->offset |= 1;
10438 else
10439 return bfd_reloc_notsupported;
10442 /* Static relocations always resolve to the .iplt entry. */
10443 st_type = STT_FUNC;
10444 value = (splt->output_section->vma
10445 + splt->output_offset
10446 + plt_offset);
10447 branch_type = ST_BRANCH_TO_ARM;
10449 /* If there are non-call relocations that resolve to the .iplt
10450 entry, then all dynamic ones must too. */
10451 if (arm_plt->noncall_refcount != 0)
10453 dynreloc_st_type = st_type;
10454 dynreloc_value = value;
10457 else
10458 /* We populate the .plt entry in finish_dynamic_symbol. */
10459 splt = globals->root.splt;
10461 else
10463 splt = NULL;
10464 plt_offset = (bfd_vma) -1;
10465 gotplt_offset = (bfd_vma) -1;
10468 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we are
10469 resolving a function call relocation. We want to inform the user
10470 that something is wrong. */
10471 if (using_thumb_only (globals)
10472 && (r_type == R_ARM_THM_CALL
10473 || r_type == R_ARM_THM_JUMP24)
10474 && branch_type == ST_BRANCH_TO_ARM
10475 /* Calls through a PLT are special: the assembly source code
10476 cannot be annotated with '.type foo(PLT), %function', and
10477 they handled specifically below anyway. */
10478 && splt == NULL)
10480 if (sym_sec == bfd_abs_section_ptr)
10482 /* As an exception, assume that absolute symbols are of the
10483 right kind (Thumb). They are presumably defined in the
10484 linker script, where it is not possible to declare them as
10485 Thumb (and thus are seen as Arm mode). Inform the user with
10486 a warning, though. */
10487 branch_type = ST_BRANCH_TO_THUMB;
10489 if (sym_sec->owner)
10490 _bfd_error_handler
10491 (_("warning: %pB(%s): Forcing bramch to absolute symbol in Thumb mode (Thumb-only CPU)"
10492 " in %pB"),
10493 sym_sec->owner, sym_name, input_bfd);
10494 else
10495 _bfd_error_handler
10496 (_("warning: (%s): Forcing branch to absolute symbol in Thumb mode (Thumb-only CPU)"
10497 " in %pB"),
10498 sym_name, input_bfd);
10500 else
10501 /* Otherwise do not silently build a stub, and let the users
10502 know they have to fix their code. Indeed, we could decide
10503 to insert a stub involving Arm code and/or BLX, leading to
10504 a run-time crash. */
10505 branch_type = ST_BRANCH_UNKNOWN;
10508 /* Fail early if branch_type is ST_BRANCH_UNKNOWN and we target a
10509 Thumb-only CPU. We could emit a warning on Arm-capable targets
10510 too, but that would be too verbose (a lot of legacy code does not
10511 use the .type foo, %function directive). */
10512 if (using_thumb_only (globals)
10513 && (r_type == R_ARM_THM_CALL
10514 || r_type == R_ARM_THM_JUMP24)
10515 && branch_type == ST_BRANCH_UNKNOWN
10516 /* Exception to the rule above: a branch to an undefined weak
10517 symbol is turned into a jump to the next instruction unless a
10518 PLT entry will be created (see below). */
10519 && !(h && h->root.type == bfd_link_hash_undefweak
10520 && plt_offset == (bfd_vma) -1))
10522 if (sym_sec != NULL
10523 && sym_sec->owner != NULL)
10524 _bfd_error_handler
10525 (_("%pB(%s): Unknown destination type (ARM/Thumb) in %pB"),
10526 sym_sec->owner, sym_name, input_bfd);
10527 else
10528 _bfd_error_handler
10529 (_("(%s): Unknown destination type (ARM/Thumb) in %pB"),
10530 sym_name, input_bfd);
10532 return bfd_reloc_notsupported;
10535 resolved_to_zero = (h != NULL
10536 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10538 switch (r_type)
10540 case R_ARM_NONE:
10541 /* We don't need to find a value for this symbol. It's just a
10542 marker. */
10543 *unresolved_reloc_p = false;
10544 return bfd_reloc_ok;
10546 case R_ARM_ABS12:
10547 if (globals->root.target_os != is_vxworks)
10548 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10549 /* Fall through. */
10551 case R_ARM_PC24:
10552 case R_ARM_ABS32:
10553 case R_ARM_ABS32_NOI:
10554 case R_ARM_REL32:
10555 case R_ARM_REL32_NOI:
10556 case R_ARM_CALL:
10557 case R_ARM_JUMP24:
10558 case R_ARM_XPC25:
10559 case R_ARM_PREL31:
10560 case R_ARM_PLT32:
10561 /* Handle relocations which should use the PLT entry. ABS32/REL32
10562 will use the symbol's value, which may point to a PLT entry, but we
10563 don't need to handle that here. If we created a PLT entry, all
10564 branches in this object should go to it, except if the PLT is too
10565 far away, in which case a long branch stub should be inserted. */
10566 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10567 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10568 && r_type != R_ARM_CALL
10569 && r_type != R_ARM_JUMP24
10570 && r_type != R_ARM_PLT32)
10571 && plt_offset != (bfd_vma) -1)
10573 /* If we've created a .plt section, and assigned a PLT entry
10574 to this function, it must either be a STT_GNU_IFUNC reference
10575 or not be known to bind locally. In other cases, we should
10576 have cleared the PLT entry by now. */
10577 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10579 value = (splt->output_section->vma
10580 + splt->output_offset
10581 + plt_offset);
10582 *unresolved_reloc_p = false;
10583 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10584 contents, rel->r_offset, value,
10585 rel->r_addend);
10588 /* When generating a shared library or PIE, these relocations
10589 are copied into the output file to be resolved at run time. */
10590 if ((bfd_link_pic (info)
10591 || globals->fdpic_p)
10592 && (input_section->flags & SEC_ALLOC)
10593 && !(globals->root.target_os == is_vxworks
10594 && strcmp (input_section->output_section->name,
10595 ".tls_vars") == 0)
10596 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10597 || !SYMBOL_CALLS_LOCAL (info, h))
10598 && !(input_bfd == globals->stub_bfd
10599 && strstr (input_section->name, STUB_SUFFIX))
10600 && (h == NULL
10601 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10602 && !resolved_to_zero)
10603 || h->root.type != bfd_link_hash_undefweak)
10604 && r_type != R_ARM_PC24
10605 && r_type != R_ARM_CALL
10606 && r_type != R_ARM_JUMP24
10607 && r_type != R_ARM_PREL31
10608 && r_type != R_ARM_PLT32)
10610 Elf_Internal_Rela outrel;
10611 bool skip, relocate;
10612 int isrofixup = 0;
10614 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10615 && !h->def_regular)
10617 char *v = _("shared object");
10619 if (bfd_link_executable (info))
10620 v = _("PIE executable");
10622 _bfd_error_handler
10623 (_("%pB: relocation %s against external or undefined symbol `%s'"
10624 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10625 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10626 return bfd_reloc_notsupported;
10629 *unresolved_reloc_p = false;
10631 if (sreloc == NULL && globals->root.dynamic_sections_created)
10633 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10634 ! globals->use_rel);
10636 if (sreloc == NULL)
10637 return bfd_reloc_notsupported;
10640 skip = false;
10641 relocate = false;
10643 outrel.r_addend = addend;
10644 outrel.r_offset =
10645 _bfd_elf_section_offset (output_bfd, info, input_section,
10646 rel->r_offset);
10647 if (outrel.r_offset == (bfd_vma) -1)
10648 skip = true;
10649 else if (outrel.r_offset == (bfd_vma) -2)
10650 skip = true, relocate = true;
10651 outrel.r_offset += (input_section->output_section->vma
10652 + input_section->output_offset);
10654 if (skip)
10655 memset (&outrel, 0, sizeof outrel);
10656 else if (h != NULL
10657 && h->dynindx != -1
10658 && (!bfd_link_pic (info)
10659 || !(bfd_link_pie (info)
10660 || SYMBOLIC_BIND (info, h))
10661 || !h->def_regular))
10662 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10663 else
10665 int symbol;
10667 /* This symbol is local, or marked to become local. */
10668 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10669 || (globals->fdpic_p && !bfd_link_pic (info)));
10670 /* On SVR4-ish systems, the dynamic loader cannot
10671 relocate the text and data segments independently,
10672 so the symbol does not matter. */
10673 symbol = 0;
10674 if (dynreloc_st_type == STT_GNU_IFUNC)
10675 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10676 to the .iplt entry. Instead, every non-call reference
10677 must use an R_ARM_IRELATIVE relocation to obtain the
10678 correct run-time address. */
10679 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10680 else if (globals->fdpic_p && !bfd_link_pic (info))
10681 isrofixup = 1;
10682 else
10683 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10684 if (globals->use_rel)
10685 relocate = true;
10686 else
10687 outrel.r_addend += dynreloc_value;
10690 if (isrofixup)
10691 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10692 else
10693 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10695 /* If this reloc is against an external symbol, we do not want to
10696 fiddle with the addend. Otherwise, we need to include the symbol
10697 value so that it becomes an addend for the dynamic reloc. */
10698 if (! relocate)
10699 return bfd_reloc_ok;
10701 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10702 contents, rel->r_offset,
10703 dynreloc_value, (bfd_vma) 0);
10705 else switch (r_type)
10707 case R_ARM_ABS12:
10708 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10710 case R_ARM_XPC25: /* Arm BLX instruction. */
10711 case R_ARM_CALL:
10712 case R_ARM_JUMP24:
10713 case R_ARM_PC24: /* Arm B/BL instruction. */
10714 case R_ARM_PLT32:
10716 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10718 if (r_type == R_ARM_XPC25)
10720 /* Check for Arm calling Arm function. */
10721 /* FIXME: Should we translate the instruction into a BL
10722 instruction instead ? */
10723 if (branch_type != ST_BRANCH_TO_THUMB)
10724 _bfd_error_handler
10725 (_("\%pB: warning: %s BLX instruction targets"
10726 " %s function '%s'"),
10727 input_bfd, "ARM",
10728 "ARM", h ? h->root.root.string : "(local)");
10730 else if (r_type == R_ARM_PC24)
10732 /* Check for Arm calling Thumb function. */
10733 if (branch_type == ST_BRANCH_TO_THUMB)
10735 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10736 output_bfd, input_section,
10737 hit_data, sym_sec, rel->r_offset,
10738 signed_addend, value,
10739 error_message))
10740 return bfd_reloc_ok;
10741 else
10742 return bfd_reloc_dangerous;
10746 /* Check if a stub has to be inserted because the
10747 destination is too far or we are changing mode. */
10748 if ( r_type == R_ARM_CALL
10749 || r_type == R_ARM_JUMP24
10750 || r_type == R_ARM_PLT32)
10752 enum elf32_arm_stub_type stub_type = arm_stub_none;
10753 struct elf32_arm_link_hash_entry *hash;
10755 hash = (struct elf32_arm_link_hash_entry *) h;
10756 stub_type = arm_type_of_stub (info, input_section, rel,
10757 st_type, &branch_type,
10758 hash, value, sym_sec,
10759 input_bfd, sym_name);
10761 if (stub_type != arm_stub_none)
10763 /* The target is out of reach, so redirect the
10764 branch to the local stub for this function. */
10765 stub_entry = elf32_arm_get_stub_entry (input_section,
10766 sym_sec, h,
10767 rel, globals,
10768 stub_type);
10770 if (stub_entry != NULL)
10771 value = (stub_entry->stub_offset
10772 + stub_entry->stub_sec->output_offset
10773 + stub_entry->stub_sec->output_section->vma);
10775 if (plt_offset != (bfd_vma) -1)
10776 *unresolved_reloc_p = false;
10779 else
10781 /* If the call goes through a PLT entry, make sure to
10782 check distance to the right destination address. */
10783 if (plt_offset != (bfd_vma) -1)
10785 value = (splt->output_section->vma
10786 + splt->output_offset
10787 + plt_offset);
10788 *unresolved_reloc_p = false;
10789 /* The PLT entry is in ARM mode, regardless of the
10790 target function. */
10791 branch_type = ST_BRANCH_TO_ARM;
10796 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10797 where:
10798 S is the address of the symbol in the relocation.
10799 P is address of the instruction being relocated.
10800 A is the addend (extracted from the instruction) in bytes.
10802 S is held in 'value'.
10803 P is the base address of the section containing the
10804 instruction plus the offset of the reloc into that
10805 section, ie:
10806 (input_section->output_section->vma +
10807 input_section->output_offset +
10808 rel->r_offset).
10809 A is the addend, converted into bytes, ie:
10810 (signed_addend * 4)
10812 Note: None of these operations have knowledge of the pipeline
10813 size of the processor, thus it is up to the assembler to
10814 encode this information into the addend. */
10815 value -= (input_section->output_section->vma
10816 + input_section->output_offset);
10817 value -= rel->r_offset;
10818 value += signed_addend;
10820 signed_addend = value;
10821 signed_addend >>= howto->rightshift;
10823 /* A branch to an undefined weak symbol is turned into a jump to
10824 the next instruction unless a PLT entry will be created.
10825 Do the same for local undefined symbols (but not for STN_UNDEF).
10826 The jump to the next instruction is optimized as a NOP depending
10827 on the architecture. */
10828 if (h ? (h->root.type == bfd_link_hash_undefweak
10829 && plt_offset == (bfd_vma) -1)
10830 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10832 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10834 if (arch_has_arm_nop (globals))
10835 value |= 0x0320f000;
10836 else
10837 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10839 else
10841 /* Perform a signed range check. */
10842 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10843 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10844 return bfd_reloc_overflow;
10846 addend = (value & 2);
10848 value = (signed_addend & howto->dst_mask)
10849 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10851 if (r_type == R_ARM_CALL)
10853 /* Set the H bit in the BLX instruction. */
10854 if (branch_type == ST_BRANCH_TO_THUMB)
10856 if (addend)
10857 value |= (1 << 24);
10858 else
10859 value &= ~(bfd_vma)(1 << 24);
10862 /* Select the correct instruction (BL or BLX). */
10863 /* Only if we are not handling a BL to a stub. In this
10864 case, mode switching is performed by the stub. */
10865 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10866 value |= (1 << 28);
10867 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10869 value &= ~(bfd_vma)(1 << 28);
10870 value |= (1 << 24);
10875 break;
10877 case R_ARM_ABS32:
10878 value += addend;
10879 if (branch_type == ST_BRANCH_TO_THUMB)
10880 value |= 1;
10881 break;
10883 case R_ARM_ABS32_NOI:
10884 value += addend;
10885 break;
10887 case R_ARM_REL32:
10888 value += addend;
10889 if (branch_type == ST_BRANCH_TO_THUMB)
10890 value |= 1;
10891 value -= (input_section->output_section->vma
10892 + input_section->output_offset + rel->r_offset);
10893 break;
10895 case R_ARM_REL32_NOI:
10896 value += addend;
10897 value -= (input_section->output_section->vma
10898 + input_section->output_offset + rel->r_offset);
10899 break;
10901 case R_ARM_PREL31:
10902 value -= (input_section->output_section->vma
10903 + input_section->output_offset + rel->r_offset);
10904 value += signed_addend;
10905 if (! h || h->root.type != bfd_link_hash_undefweak)
10907 /* Check for overflow. */
10908 if ((value ^ (value >> 1)) & (1 << 30))
10909 return bfd_reloc_overflow;
10911 value &= 0x7fffffff;
10912 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10913 if (branch_type == ST_BRANCH_TO_THUMB)
10914 value |= 1;
10915 break;
10918 bfd_put_32 (input_bfd, value, hit_data);
10919 return bfd_reloc_ok;
10921 case R_ARM_ABS8:
10922 value += addend;
10924 /* There is no way to tell whether the user intended to use a signed or
10925 unsigned addend. When checking for overflow we accept either,
10926 as specified by the AAELF. */
10927 if ((long) value > 0xff || (long) value < -0x80)
10928 return bfd_reloc_overflow;
10930 bfd_put_8 (input_bfd, value, hit_data);
10931 return bfd_reloc_ok;
10933 case R_ARM_ABS16:
10934 value += addend;
10936 /* See comment for R_ARM_ABS8. */
10937 if ((long) value > 0xffff || (long) value < -0x8000)
10938 return bfd_reloc_overflow;
10940 bfd_put_16 (input_bfd, value, hit_data);
10941 return bfd_reloc_ok;
10943 case R_ARM_THM_ABS5:
10944 /* Support ldr and str instructions for the thumb. */
10945 if (globals->use_rel)
10947 /* Need to refetch addend. */
10948 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10949 /* ??? Need to determine shift amount from operand size. */
10950 addend >>= howto->rightshift;
10952 value += addend;
10954 /* ??? Isn't value unsigned? */
10955 if ((long) value > 0x1f || (long) value < -0x10)
10956 return bfd_reloc_overflow;
10958 /* ??? Value needs to be properly shifted into place first. */
10959 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10960 bfd_put_16 (input_bfd, value, hit_data);
10961 return bfd_reloc_ok;
10963 case R_ARM_THM_ALU_PREL_11_0:
10964 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10966 bfd_vma insn;
10967 bfd_signed_vma relocation;
10969 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10970 | bfd_get_16 (input_bfd, hit_data + 2);
10972 if (globals->use_rel)
10974 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10975 | ((insn & (1 << 26)) >> 15);
10976 if (insn & 0xf00000)
10977 signed_addend = -signed_addend;
10980 relocation = value + signed_addend;
10981 relocation -= Pa (input_section->output_section->vma
10982 + input_section->output_offset
10983 + rel->r_offset);
10985 /* PR 21523: Use an absolute value. The user of this reloc will
10986 have already selected an ADD or SUB insn appropriately. */
10987 value = llabs (relocation);
10989 if (value >= 0x1000)
10990 return bfd_reloc_overflow;
10992 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10993 if (branch_type == ST_BRANCH_TO_THUMB)
10994 value |= 1;
10996 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10997 | ((value & 0x700) << 4)
10998 | ((value & 0x800) << 15);
10999 if (relocation < 0)
11000 insn |= 0xa00000;
11002 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11003 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11005 return bfd_reloc_ok;
11008 case R_ARM_THM_PC8:
11009 /* PR 10073: This reloc is not generated by the GNU toolchain,
11010 but it is supported for compatibility with third party libraries
11011 generated by other compilers, specifically the ARM/IAR. */
11013 bfd_vma insn;
11014 bfd_signed_vma relocation;
11016 insn = bfd_get_16 (input_bfd, hit_data);
11018 if (globals->use_rel)
11019 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
11021 relocation = value + addend;
11022 relocation -= Pa (input_section->output_section->vma
11023 + input_section->output_offset
11024 + rel->r_offset);
11026 value = relocation;
11028 /* We do not check for overflow of this reloc. Although strictly
11029 speaking this is incorrect, it appears to be necessary in order
11030 to work with IAR generated relocs. Since GCC and GAS do not
11031 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11032 a problem for them. */
11033 value &= 0x3fc;
11035 insn = (insn & 0xff00) | (value >> 2);
11037 bfd_put_16 (input_bfd, insn, hit_data);
11039 return bfd_reloc_ok;
11042 case R_ARM_THM_PC12:
11043 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11045 bfd_vma insn;
11046 bfd_signed_vma relocation;
11048 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11049 | bfd_get_16 (input_bfd, hit_data + 2);
11051 if (globals->use_rel)
11053 signed_addend = insn & 0xfff;
11054 if (!(insn & (1 << 23)))
11055 signed_addend = -signed_addend;
11058 relocation = value + signed_addend;
11059 relocation -= Pa (input_section->output_section->vma
11060 + input_section->output_offset
11061 + rel->r_offset);
11063 value = relocation;
11065 if (value >= 0x1000)
11066 return bfd_reloc_overflow;
11068 insn = (insn & 0xff7ff000) | value;
11069 if (relocation >= 0)
11070 insn |= (1 << 23);
11072 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11073 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11075 return bfd_reloc_ok;
11078 case R_ARM_THM_XPC22:
11079 case R_ARM_THM_CALL:
11080 case R_ARM_THM_JUMP24:
11081 /* Thumb BL (branch long instruction). */
11083 bfd_vma relocation;
11084 bfd_vma reloc_sign;
11085 bool overflow = false;
11086 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11087 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11088 bfd_signed_vma reloc_signed_max;
11089 bfd_signed_vma reloc_signed_min;
11090 bfd_vma check;
11091 bfd_signed_vma signed_check;
11092 int bitsize;
11093 const int thumb2 = using_thumb2 (globals);
11094 const int thumb2_bl = using_thumb2_bl (globals);
11096 /* A branch to an undefined weak symbol is turned into a jump to
11097 the next instruction unless a PLT entry will be created.
11098 The jump to the next instruction is optimized as a NOP.W for
11099 Thumb-2 enabled architectures. */
11100 if (h && h->root.type == bfd_link_hash_undefweak
11101 && plt_offset == (bfd_vma) -1)
11103 if (thumb2)
11105 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11106 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11108 else
11110 bfd_put_16 (input_bfd, 0xe000, hit_data);
11111 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11113 return bfd_reloc_ok;
11116 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11117 with Thumb-1) involving the J1 and J2 bits. */
11118 if (globals->use_rel)
11120 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11121 bfd_vma upper = upper_insn & 0x3ff;
11122 bfd_vma lower = lower_insn & 0x7ff;
11123 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11124 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11125 bfd_vma i1 = j1 ^ s ? 0 : 1;
11126 bfd_vma i2 = j2 ^ s ? 0 : 1;
11128 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11129 /* Sign extend. */
11130 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11132 signed_addend = addend;
11135 if (r_type == R_ARM_THM_XPC22)
11137 /* Check for Thumb to Thumb call. */
11138 /* FIXME: Should we translate the instruction into a BL
11139 instruction instead ? */
11140 if (branch_type == ST_BRANCH_TO_THUMB)
11141 _bfd_error_handler
11142 (_("%pB: warning: %s BLX instruction targets"
11143 " %s function '%s'"),
11144 input_bfd, "Thumb",
11145 "Thumb", h ? h->root.root.string : "(local)");
11147 else
11149 /* If it is not a call to Thumb, assume call to Arm.
11150 If it is a call relative to a section name, then it is not a
11151 function call at all, but rather a long jump. Calls through
11152 the PLT do not require stubs. */
11153 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11155 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11157 /* Convert BL to BLX. */
11158 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11160 else if (( r_type != R_ARM_THM_CALL)
11161 && (r_type != R_ARM_THM_JUMP24))
11163 if (elf32_thumb_to_arm_stub
11164 (info, sym_name, input_bfd, output_bfd, input_section,
11165 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11166 error_message))
11167 return bfd_reloc_ok;
11168 else
11169 return bfd_reloc_dangerous;
11172 else if (branch_type == ST_BRANCH_TO_THUMB
11173 && globals->use_blx
11174 && r_type == R_ARM_THM_CALL)
11176 /* Make sure this is a BL. */
11177 lower_insn |= 0x1800;
11181 enum elf32_arm_stub_type stub_type = arm_stub_none;
11182 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11184 /* Check if a stub has to be inserted because the destination
11185 is too far. */
11186 struct elf32_arm_stub_hash_entry *stub_entry;
11187 struct elf32_arm_link_hash_entry *hash;
11189 hash = (struct elf32_arm_link_hash_entry *) h;
11191 stub_type = arm_type_of_stub (info, input_section, rel,
11192 st_type, &branch_type,
11193 hash, value, sym_sec,
11194 input_bfd, sym_name);
11196 if (stub_type != arm_stub_none)
11198 /* The target is out of reach or we are changing modes, so
11199 redirect the branch to the local stub for this
11200 function. */
11201 stub_entry = elf32_arm_get_stub_entry (input_section,
11202 sym_sec, h,
11203 rel, globals,
11204 stub_type);
11205 if (stub_entry != NULL)
11207 value = (stub_entry->stub_offset
11208 + stub_entry->stub_sec->output_offset
11209 + stub_entry->stub_sec->output_section->vma);
11211 if (plt_offset != (bfd_vma) -1)
11212 *unresolved_reloc_p = false;
11215 /* If this call becomes a call to Arm, force BLX. */
11216 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11218 if ((stub_entry
11219 && !arm_stub_is_thumb (stub_entry->stub_type))
11220 || branch_type != ST_BRANCH_TO_THUMB)
11221 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11226 /* Handle calls via the PLT. */
11227 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11229 value = (splt->output_section->vma
11230 + splt->output_offset
11231 + plt_offset);
11233 if (globals->use_blx
11234 && r_type == R_ARM_THM_CALL
11235 && ! using_thumb_only (globals))
11237 /* If the Thumb BLX instruction is available, convert
11238 the BL to a BLX instruction to call the ARM-mode
11239 PLT entry. */
11240 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11241 branch_type = ST_BRANCH_TO_ARM;
11243 else
11245 if (! using_thumb_only (globals))
11246 /* Target the Thumb stub before the ARM PLT entry. */
11247 value -= PLT_THUMB_STUB_SIZE;
11248 branch_type = ST_BRANCH_TO_THUMB;
11250 *unresolved_reloc_p = false;
11253 relocation = value + signed_addend;
11255 relocation -= (input_section->output_section->vma
11256 + input_section->output_offset
11257 + rel->r_offset);
11259 check = relocation >> howto->rightshift;
11261 /* If this is a signed value, the rightshift just dropped
11262 leading 1 bits (assuming twos complement). */
11263 if ((bfd_signed_vma) relocation >= 0)
11264 signed_check = check;
11265 else
11266 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11268 /* Calculate the permissable maximum and minimum values for
11269 this relocation according to whether we're relocating for
11270 Thumb-2 or not. */
11271 bitsize = howto->bitsize;
11272 if (!thumb2_bl)
11273 bitsize -= 2;
11274 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11275 reloc_signed_min = ~reloc_signed_max;
11277 /* Assumes two's complement. */
11278 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11279 overflow = true;
11281 if ((lower_insn & 0x5000) == 0x4000)
11282 /* For a BLX instruction, make sure that the relocation is rounded up
11283 to a word boundary. This follows the semantics of the instruction
11284 which specifies that bit 1 of the target address will come from bit
11285 1 of the base address. */
11286 relocation = (relocation + 2) & ~ 3;
11288 /* Put RELOCATION back into the insn. Assumes two's complement.
11289 We use the Thumb-2 encoding, which is safe even if dealing with
11290 a Thumb-1 instruction by virtue of our overflow check above. */
11291 reloc_sign = (signed_check < 0) ? 1 : 0;
11292 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11293 | ((relocation >> 12) & 0x3ff)
11294 | (reloc_sign << 10);
11295 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11296 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11297 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11298 | ((relocation >> 1) & 0x7ff);
11300 /* Put the relocated value back in the object file: */
11301 bfd_put_16 (input_bfd, upper_insn, hit_data);
11302 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11304 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11306 break;
11308 case R_ARM_THM_JUMP19:
11309 /* Thumb32 conditional branch instruction. */
11311 bfd_vma relocation;
11312 bool overflow = false;
11313 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11314 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11315 bfd_signed_vma reloc_signed_max = 0xffffe;
11316 bfd_signed_vma reloc_signed_min = -0x100000;
11317 bfd_signed_vma signed_check;
11318 enum elf32_arm_stub_type stub_type = arm_stub_none;
11319 struct elf32_arm_stub_hash_entry *stub_entry;
11320 struct elf32_arm_link_hash_entry *hash;
11322 /* Need to refetch the addend, reconstruct the top three bits,
11323 and squish the two 11 bit pieces together. */
11324 if (globals->use_rel)
11326 bfd_vma S = (upper_insn & 0x0400) >> 10;
11327 bfd_vma upper = (upper_insn & 0x003f);
11328 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11329 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11330 bfd_vma lower = (lower_insn & 0x07ff);
11332 upper |= J1 << 6;
11333 upper |= J2 << 7;
11334 upper |= (!S) << 8;
11335 upper -= 0x0100; /* Sign extend. */
11337 addend = (upper << 12) | (lower << 1);
11338 signed_addend = addend;
11341 /* Handle calls via the PLT. */
11342 if (plt_offset != (bfd_vma) -1)
11344 value = (splt->output_section->vma
11345 + splt->output_offset
11346 + plt_offset);
11347 /* Target the Thumb stub before the ARM PLT entry. */
11348 value -= PLT_THUMB_STUB_SIZE;
11349 *unresolved_reloc_p = false;
11352 hash = (struct elf32_arm_link_hash_entry *)h;
11354 stub_type = arm_type_of_stub (info, input_section, rel,
11355 st_type, &branch_type,
11356 hash, value, sym_sec,
11357 input_bfd, sym_name);
11358 if (stub_type != arm_stub_none)
11360 stub_entry = elf32_arm_get_stub_entry (input_section,
11361 sym_sec, h,
11362 rel, globals,
11363 stub_type);
11364 if (stub_entry != NULL)
11366 value = (stub_entry->stub_offset
11367 + stub_entry->stub_sec->output_offset
11368 + stub_entry->stub_sec->output_section->vma);
11372 relocation = value + signed_addend;
11373 relocation -= (input_section->output_section->vma
11374 + input_section->output_offset
11375 + rel->r_offset);
11376 signed_check = (bfd_signed_vma) relocation;
11378 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11379 overflow = true;
11381 /* Put RELOCATION back into the insn. */
11383 bfd_vma S = (relocation & 0x00100000) >> 20;
11384 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11385 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11386 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11387 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11389 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11390 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11393 /* Put the relocated value back in the object file: */
11394 bfd_put_16 (input_bfd, upper_insn, hit_data);
11395 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11397 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11400 case R_ARM_THM_JUMP11:
11401 case R_ARM_THM_JUMP8:
11402 case R_ARM_THM_JUMP6:
11403 /* Thumb B (branch) instruction). */
11405 bfd_signed_vma relocation;
11406 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11407 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11408 bfd_signed_vma signed_check;
11410 /* CZB cannot jump backward. */
11411 if (r_type == R_ARM_THM_JUMP6)
11413 reloc_signed_min = 0;
11414 if (globals->use_rel)
11415 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11418 relocation = value + signed_addend;
11420 relocation -= (input_section->output_section->vma
11421 + input_section->output_offset
11422 + rel->r_offset);
11424 relocation >>= howto->rightshift;
11425 signed_check = relocation;
11427 if (r_type == R_ARM_THM_JUMP6)
11428 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11429 else
11430 relocation &= howto->dst_mask;
11431 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11433 bfd_put_16 (input_bfd, relocation, hit_data);
11435 /* Assumes two's complement. */
11436 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11437 return bfd_reloc_overflow;
11439 return bfd_reloc_ok;
11442 case R_ARM_ALU_PCREL7_0:
11443 case R_ARM_ALU_PCREL15_8:
11444 case R_ARM_ALU_PCREL23_15:
11446 bfd_vma insn;
11447 bfd_vma relocation;
11449 insn = bfd_get_32 (input_bfd, hit_data);
11450 if (globals->use_rel)
11452 /* Extract the addend. */
11453 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11454 signed_addend = addend;
11456 relocation = value + signed_addend;
11458 relocation -= (input_section->output_section->vma
11459 + input_section->output_offset
11460 + rel->r_offset);
11461 insn = (insn & ~0xfff)
11462 | ((howto->bitpos << 7) & 0xf00)
11463 | ((relocation >> howto->bitpos) & 0xff);
11464 bfd_put_32 (input_bfd, value, hit_data);
11466 return bfd_reloc_ok;
11468 case R_ARM_GNU_VTINHERIT:
11469 case R_ARM_GNU_VTENTRY:
11470 return bfd_reloc_ok;
11472 case R_ARM_GOTOFF32:
11473 /* Relocation is relative to the start of the
11474 global offset table. */
11476 BFD_ASSERT (sgot != NULL);
11477 if (sgot == NULL)
11478 return bfd_reloc_notsupported;
11480 /* If we are addressing a Thumb function, we need to adjust the
11481 address by one, so that attempts to call the function pointer will
11482 correctly interpret it as Thumb code. */
11483 if (branch_type == ST_BRANCH_TO_THUMB)
11484 value += 1;
11486 /* Note that sgot->output_offset is not involved in this
11487 calculation. We always want the start of .got. If we
11488 define _GLOBAL_OFFSET_TABLE in a different way, as is
11489 permitted by the ABI, we might have to change this
11490 calculation. */
11491 value -= sgot->output_section->vma;
11492 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11493 contents, rel->r_offset, value,
11494 rel->r_addend);
11496 case R_ARM_GOTPC:
11497 /* Use global offset table as symbol value. */
11498 BFD_ASSERT (sgot != NULL);
11500 if (sgot == NULL)
11501 return bfd_reloc_notsupported;
11503 *unresolved_reloc_p = false;
11504 value = sgot->output_section->vma;
11505 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11506 contents, rel->r_offset, value,
11507 rel->r_addend);
11509 case R_ARM_GOT32:
11510 case R_ARM_GOT_PREL:
11511 /* Relocation is to the entry for this symbol in the
11512 global offset table. */
11513 if (sgot == NULL)
11514 return bfd_reloc_notsupported;
11516 if (dynreloc_st_type == STT_GNU_IFUNC
11517 && plt_offset != (bfd_vma) -1
11518 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11520 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11521 symbol, and the relocation resolves directly to the runtime
11522 target rather than to the .iplt entry. This means that any
11523 .got entry would be the same value as the .igot.plt entry,
11524 so there's no point creating both. */
11525 sgot = globals->root.igotplt;
11526 value = sgot->output_offset + gotplt_offset;
11528 else if (h != NULL)
11530 bfd_vma off;
11532 off = h->got.offset;
11533 BFD_ASSERT (off != (bfd_vma) -1);
11534 if ((off & 1) != 0)
11536 /* We have already processsed one GOT relocation against
11537 this symbol. */
11538 off &= ~1;
11539 if (globals->root.dynamic_sections_created
11540 && !SYMBOL_REFERENCES_LOCAL (info, h))
11541 *unresolved_reloc_p = false;
11543 else
11545 Elf_Internal_Rela outrel;
11546 int isrofixup = 0;
11548 if (((h->dynindx != -1) || globals->fdpic_p)
11549 && !SYMBOL_REFERENCES_LOCAL (info, h))
11551 /* If the symbol doesn't resolve locally in a static
11552 object, we have an undefined reference. If the
11553 symbol doesn't resolve locally in a dynamic object,
11554 it should be resolved by the dynamic linker. */
11555 if (globals->root.dynamic_sections_created)
11557 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11558 *unresolved_reloc_p = false;
11560 else
11561 outrel.r_info = 0;
11562 outrel.r_addend = 0;
11564 else
11566 if (dynreloc_st_type == STT_GNU_IFUNC)
11567 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11568 else if (bfd_link_pic (info)
11569 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11570 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11571 else
11573 outrel.r_info = 0;
11574 if (globals->fdpic_p)
11575 isrofixup = 1;
11577 outrel.r_addend = dynreloc_value;
11580 /* The GOT entry is initialized to zero by default.
11581 See if we should install a different value. */
11582 if (outrel.r_addend != 0
11583 && (globals->use_rel || outrel.r_info == 0))
11585 bfd_put_32 (output_bfd, outrel.r_addend,
11586 sgot->contents + off);
11587 outrel.r_addend = 0;
11590 if (isrofixup)
11591 arm_elf_add_rofixup (output_bfd,
11592 elf32_arm_hash_table (info)->srofixup,
11593 sgot->output_section->vma
11594 + sgot->output_offset + off);
11596 else if (outrel.r_info != 0)
11598 outrel.r_offset = (sgot->output_section->vma
11599 + sgot->output_offset
11600 + off);
11601 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11604 h->got.offset |= 1;
11606 value = sgot->output_offset + off;
11608 else
11610 bfd_vma off;
11612 BFD_ASSERT (local_got_offsets != NULL
11613 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11615 off = local_got_offsets[r_symndx];
11617 /* The offset must always be a multiple of 4. We use the
11618 least significant bit to record whether we have already
11619 generated the necessary reloc. */
11620 if ((off & 1) != 0)
11621 off &= ~1;
11622 else
11624 Elf_Internal_Rela outrel;
11625 int isrofixup = 0;
11627 if (dynreloc_st_type == STT_GNU_IFUNC)
11628 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11629 else if (bfd_link_pic (info))
11630 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11631 else
11633 outrel.r_info = 0;
11634 if (globals->fdpic_p)
11635 isrofixup = 1;
11638 /* The GOT entry is initialized to zero by default.
11639 See if we should install a different value. */
11640 if (globals->use_rel || outrel.r_info == 0)
11641 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11643 if (isrofixup)
11644 arm_elf_add_rofixup (output_bfd,
11645 globals->srofixup,
11646 sgot->output_section->vma
11647 + sgot->output_offset + off);
11649 else if (outrel.r_info != 0)
11651 outrel.r_addend = addend + dynreloc_value;
11652 outrel.r_offset = (sgot->output_section->vma
11653 + sgot->output_offset
11654 + off);
11655 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11658 local_got_offsets[r_symndx] |= 1;
11661 value = sgot->output_offset + off;
11663 if (r_type != R_ARM_GOT32)
11664 value += sgot->output_section->vma;
11666 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11667 contents, rel->r_offset, value,
11668 rel->r_addend);
11670 case R_ARM_TLS_LDO32:
11671 value = value - dtpoff_base (info);
11673 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11674 contents, rel->r_offset, value,
11675 rel->r_addend);
11677 case R_ARM_TLS_LDM32:
11678 case R_ARM_TLS_LDM32_FDPIC:
11680 bfd_vma off;
11682 if (sgot == NULL)
11683 abort ();
11685 off = globals->tls_ldm_got.offset;
11687 if ((off & 1) != 0)
11688 off &= ~1;
11689 else
11691 /* If we don't know the module number, create a relocation
11692 for it. */
11693 if (bfd_link_dll (info))
11695 Elf_Internal_Rela outrel;
11697 if (srelgot == NULL)
11698 abort ();
11700 outrel.r_addend = 0;
11701 outrel.r_offset = (sgot->output_section->vma
11702 + sgot->output_offset + off);
11703 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11705 if (globals->use_rel)
11706 bfd_put_32 (output_bfd, outrel.r_addend,
11707 sgot->contents + off);
11709 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11711 else
11712 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11714 globals->tls_ldm_got.offset |= 1;
11717 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11719 bfd_put_32 (output_bfd,
11720 globals->root.sgot->output_offset + off,
11721 contents + rel->r_offset);
11723 return bfd_reloc_ok;
11725 else
11727 value = sgot->output_section->vma + sgot->output_offset + off
11728 - (input_section->output_section->vma
11729 + input_section->output_offset + rel->r_offset);
11731 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11732 contents, rel->r_offset, value,
11733 rel->r_addend);
11737 case R_ARM_TLS_CALL:
11738 case R_ARM_THM_TLS_CALL:
11739 case R_ARM_TLS_GD32:
11740 case R_ARM_TLS_GD32_FDPIC:
11741 case R_ARM_TLS_IE32:
11742 case R_ARM_TLS_IE32_FDPIC:
11743 case R_ARM_TLS_GOTDESC:
11744 case R_ARM_TLS_DESCSEQ:
11745 case R_ARM_THM_TLS_DESCSEQ:
11747 bfd_vma off, offplt;
11748 int indx = 0;
11749 char tls_type;
11751 BFD_ASSERT (sgot != NULL);
11753 if (h != NULL)
11755 bool dyn;
11756 dyn = globals->root.dynamic_sections_created;
11757 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11758 bfd_link_pic (info),
11760 && (!bfd_link_pic (info)
11761 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11763 *unresolved_reloc_p = false;
11764 indx = h->dynindx;
11766 off = h->got.offset;
11767 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11768 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11770 else
11772 BFD_ASSERT (local_got_offsets != NULL);
11774 if (r_symndx >= elf32_arm_num_entries (input_bfd))
11776 _bfd_error_handler (_("\
11777 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11778 input_bfd,
11779 (unsigned long) elf32_arm_num_entries (input_bfd),
11780 r_symndx);
11781 return false;
11783 off = local_got_offsets[r_symndx];
11784 offplt = local_tlsdesc_gotents[r_symndx];
11785 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11788 /* Linker relaxations happens from one of the
11789 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11790 if (ELF32_R_TYPE (rel->r_info) != r_type)
11791 tls_type = GOT_TLS_IE;
11793 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11795 if ((off & 1) != 0)
11796 off &= ~1;
11797 else
11799 bool need_relocs = false;
11800 Elf_Internal_Rela outrel;
11801 int cur_off = off;
11803 /* The GOT entries have not been initialized yet. Do it
11804 now, and emit any relocations. If both an IE GOT and a
11805 GD GOT are necessary, we emit the GD first. */
11807 if ((bfd_link_dll (info) || indx != 0)
11808 && (h == NULL
11809 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11810 && !resolved_to_zero)
11811 || h->root.type != bfd_link_hash_undefweak))
11813 need_relocs = true;
11814 BFD_ASSERT (srelgot != NULL);
11817 if (tls_type & GOT_TLS_GDESC)
11819 bfd_byte *loc;
11821 /* We should have relaxed, unless this is an undefined
11822 weak symbol. */
11823 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11824 || bfd_link_dll (info));
11825 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11826 <= globals->root.sgotplt->size);
11828 outrel.r_addend = 0;
11829 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11830 + globals->root.sgotplt->output_offset
11831 + offplt
11832 + globals->sgotplt_jump_table_size);
11834 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11835 sreloc = globals->root.srelplt;
11836 loc = sreloc->contents;
11837 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11838 BFD_ASSERT (loc + RELOC_SIZE (globals)
11839 <= sreloc->contents + sreloc->size);
11841 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11843 /* For globals, the first word in the relocation gets
11844 the relocation index and the top bit set, or zero,
11845 if we're binding now. For locals, it gets the
11846 symbol's offset in the tls section. */
11847 bfd_put_32 (output_bfd,
11848 !h ? value - elf_hash_table (info)->tls_sec->vma
11849 : info->flags & DF_BIND_NOW ? 0
11850 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11851 globals->root.sgotplt->contents + offplt
11852 + globals->sgotplt_jump_table_size);
11854 /* Second word in the relocation is always zero. */
11855 bfd_put_32 (output_bfd, 0,
11856 globals->root.sgotplt->contents + offplt
11857 + globals->sgotplt_jump_table_size + 4);
11859 if (tls_type & GOT_TLS_GD)
11861 if (need_relocs)
11863 outrel.r_addend = 0;
11864 outrel.r_offset = (sgot->output_section->vma
11865 + sgot->output_offset
11866 + cur_off);
11867 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11869 if (globals->use_rel)
11870 bfd_put_32 (output_bfd, outrel.r_addend,
11871 sgot->contents + cur_off);
11873 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11875 if (indx == 0)
11876 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11877 sgot->contents + cur_off + 4);
11878 else
11880 outrel.r_addend = 0;
11881 outrel.r_info = ELF32_R_INFO (indx,
11882 R_ARM_TLS_DTPOFF32);
11883 outrel.r_offset += 4;
11885 if (globals->use_rel)
11886 bfd_put_32 (output_bfd, outrel.r_addend,
11887 sgot->contents + cur_off + 4);
11889 elf32_arm_add_dynreloc (output_bfd, info,
11890 srelgot, &outrel);
11893 else
11895 /* If we are not emitting relocations for a
11896 general dynamic reference, then we must be in a
11897 static link or an executable link with the
11898 symbol binding locally. Mark it as belonging
11899 to module 1, the executable. */
11900 bfd_put_32 (output_bfd, 1,
11901 sgot->contents + cur_off);
11902 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11903 sgot->contents + cur_off + 4);
11906 cur_off += 8;
11909 if (tls_type & GOT_TLS_IE)
11911 if (need_relocs)
11913 if (indx == 0)
11914 outrel.r_addend = value - dtpoff_base (info);
11915 else
11916 outrel.r_addend = 0;
11917 outrel.r_offset = (sgot->output_section->vma
11918 + sgot->output_offset
11919 + cur_off);
11920 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11922 if (globals->use_rel)
11923 bfd_put_32 (output_bfd, outrel.r_addend,
11924 sgot->contents + cur_off);
11926 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11928 else
11929 bfd_put_32 (output_bfd, tpoff (info, value),
11930 sgot->contents + cur_off);
11931 cur_off += 4;
11934 if (h != NULL)
11935 h->got.offset |= 1;
11936 else
11937 local_got_offsets[r_symndx] |= 1;
11940 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11941 off += 8;
11942 else if (tls_type & GOT_TLS_GDESC)
11943 off = offplt;
11945 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11946 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11948 bfd_signed_vma offset;
11949 /* TLS stubs are arm mode. The original symbol is a
11950 data object, so branch_type is bogus. */
11951 branch_type = ST_BRANCH_TO_ARM;
11952 enum elf32_arm_stub_type stub_type
11953 = arm_type_of_stub (info, input_section, rel,
11954 st_type, &branch_type,
11955 (struct elf32_arm_link_hash_entry *)h,
11956 globals->tls_trampoline, globals->root.splt,
11957 input_bfd, sym_name);
11959 if (stub_type != arm_stub_none)
11961 struct elf32_arm_stub_hash_entry *stub_entry
11962 = elf32_arm_get_stub_entry
11963 (input_section, globals->root.splt, 0, rel,
11964 globals, stub_type);
11965 offset = (stub_entry->stub_offset
11966 + stub_entry->stub_sec->output_offset
11967 + stub_entry->stub_sec->output_section->vma);
11969 else
11970 offset = (globals->root.splt->output_section->vma
11971 + globals->root.splt->output_offset
11972 + globals->tls_trampoline);
11974 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11976 unsigned long inst;
11978 offset -= (input_section->output_section->vma
11979 + input_section->output_offset
11980 + rel->r_offset + 8);
11982 inst = offset >> 2;
11983 inst &= 0x00ffffff;
11984 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11986 else
11988 /* Thumb blx encodes the offset in a complicated
11989 fashion. */
11990 unsigned upper_insn, lower_insn;
11991 unsigned neg;
11993 offset -= (input_section->output_section->vma
11994 + input_section->output_offset
11995 + rel->r_offset + 4);
11997 if (stub_type != arm_stub_none
11998 && arm_stub_is_thumb (stub_type))
12000 lower_insn = 0xd000;
12002 else
12004 lower_insn = 0xc000;
12005 /* Round up the offset to a word boundary. */
12006 offset = (offset + 2) & ~2;
12009 neg = offset < 0;
12010 upper_insn = (0xf000
12011 | ((offset >> 12) & 0x3ff)
12012 | (neg << 10));
12013 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
12014 | (((!((offset >> 22) & 1)) ^ neg) << 11)
12015 | ((offset >> 1) & 0x7ff);
12016 bfd_put_16 (input_bfd, upper_insn, hit_data);
12017 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12018 return bfd_reloc_ok;
12021 /* These relocations needs special care, as besides the fact
12022 they point somewhere in .gotplt, the addend must be
12023 adjusted accordingly depending on the type of instruction
12024 we refer to. */
12025 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
12027 unsigned long data, insn;
12028 unsigned thumb;
12030 data = bfd_get_signed_32 (input_bfd, hit_data);
12031 thumb = data & 1;
12032 data &= ~1ul;
12034 if (thumb)
12036 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12037 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12038 insn = (insn << 16)
12039 | bfd_get_16 (input_bfd,
12040 contents + rel->r_offset - data + 2);
12041 if ((insn & 0xf800c000) == 0xf000c000)
12042 /* bl/blx */
12043 value = -6;
12044 else if ((insn & 0xffffff00) == 0x4400)
12045 /* add */
12046 value = -5;
12047 else
12049 _bfd_error_handler
12050 /* xgettext:c-format */
12051 (_("%pB(%pA+%#" PRIx64 "): "
12052 "unexpected %s instruction '%#lx' "
12053 "referenced by TLS_GOTDESC"),
12054 input_bfd, input_section, (uint64_t) rel->r_offset,
12055 "Thumb", insn);
12056 return bfd_reloc_notsupported;
12059 else
12061 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12063 switch (insn >> 24)
12065 case 0xeb: /* bl */
12066 case 0xfa: /* blx */
12067 value = -4;
12068 break;
12070 case 0xe0: /* add */
12071 value = -8;
12072 break;
12074 default:
12075 _bfd_error_handler
12076 /* xgettext:c-format */
12077 (_("%pB(%pA+%#" PRIx64 "): "
12078 "unexpected %s instruction '%#lx' "
12079 "referenced by TLS_GOTDESC"),
12080 input_bfd, input_section, (uint64_t) rel->r_offset,
12081 "ARM", insn);
12082 return bfd_reloc_notsupported;
12086 value += ((globals->root.sgotplt->output_section->vma
12087 + globals->root.sgotplt->output_offset + off)
12088 - (input_section->output_section->vma
12089 + input_section->output_offset
12090 + rel->r_offset)
12091 + globals->sgotplt_jump_table_size);
12093 else
12094 value = ((globals->root.sgot->output_section->vma
12095 + globals->root.sgot->output_offset + off)
12096 - (input_section->output_section->vma
12097 + input_section->output_offset + rel->r_offset));
12099 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12100 r_type == R_ARM_TLS_IE32_FDPIC))
12102 /* For FDPIC relocations, resolve to the offset of the GOT
12103 entry from the start of GOT. */
12104 bfd_put_32 (output_bfd,
12105 globals->root.sgot->output_offset + off,
12106 contents + rel->r_offset);
12108 return bfd_reloc_ok;
12110 else
12112 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12113 contents, rel->r_offset, value,
12114 rel->r_addend);
12118 case R_ARM_TLS_LE32:
12119 if (bfd_link_dll (info))
12121 _bfd_error_handler
12122 /* xgettext:c-format */
12123 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12124 "in shared object"),
12125 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12126 return bfd_reloc_notsupported;
12128 else
12129 value = tpoff (info, value);
12131 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12132 contents, rel->r_offset, value,
12133 rel->r_addend);
12135 case R_ARM_V4BX:
12136 if (globals->fix_v4bx)
12138 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12140 /* Ensure that we have a BX instruction. */
12141 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12143 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12145 /* Branch to veneer. */
12146 bfd_vma glue_addr;
12147 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12148 glue_addr -= input_section->output_section->vma
12149 + input_section->output_offset
12150 + rel->r_offset + 8;
12151 insn = (insn & 0xf0000000) | 0x0a000000
12152 | ((glue_addr >> 2) & 0x00ffffff);
12154 else
12156 /* Preserve Rm (lowest four bits) and the condition code
12157 (highest four bits). Other bits encode MOV PC,Rm. */
12158 insn = (insn & 0xf000000f) | 0x01a0f000;
12161 bfd_put_32 (input_bfd, insn, hit_data);
12163 return bfd_reloc_ok;
12165 case R_ARM_MOVW_ABS_NC:
12166 case R_ARM_MOVT_ABS:
12167 case R_ARM_MOVW_PREL_NC:
12168 case R_ARM_MOVT_PREL:
12169 /* Until we properly support segment-base-relative addressing then
12170 we assume the segment base to be zero, as for the group relocations.
12171 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12172 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12173 case R_ARM_MOVW_BREL_NC:
12174 case R_ARM_MOVW_BREL:
12175 case R_ARM_MOVT_BREL:
12177 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12179 if (globals->use_rel)
12181 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12182 signed_addend = (addend ^ 0x8000) - 0x8000;
12185 value += signed_addend;
12187 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12188 value -= (input_section->output_section->vma
12189 + input_section->output_offset + rel->r_offset);
12191 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12192 return bfd_reloc_overflow;
12194 if (branch_type == ST_BRANCH_TO_THUMB)
12195 value |= 1;
12197 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12198 || r_type == R_ARM_MOVT_BREL)
12199 value >>= 16;
12201 insn &= 0xfff0f000;
12202 insn |= value & 0xfff;
12203 insn |= (value & 0xf000) << 4;
12204 bfd_put_32 (input_bfd, insn, hit_data);
12206 return bfd_reloc_ok;
12208 case R_ARM_THM_MOVW_ABS_NC:
12209 case R_ARM_THM_MOVT_ABS:
12210 case R_ARM_THM_MOVW_PREL_NC:
12211 case R_ARM_THM_MOVT_PREL:
12212 /* Until we properly support segment-base-relative addressing then
12213 we assume the segment base to be zero, as for the above relocations.
12214 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12215 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12216 as R_ARM_THM_MOVT_ABS. */
12217 case R_ARM_THM_MOVW_BREL_NC:
12218 case R_ARM_THM_MOVW_BREL:
12219 case R_ARM_THM_MOVT_BREL:
12221 bfd_vma insn;
12223 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12224 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12226 if (globals->use_rel)
12228 addend = ((insn >> 4) & 0xf000)
12229 | ((insn >> 15) & 0x0800)
12230 | ((insn >> 4) & 0x0700)
12231 | (insn & 0x00ff);
12232 signed_addend = (addend ^ 0x8000) - 0x8000;
12235 value += signed_addend;
12237 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12238 value -= (input_section->output_section->vma
12239 + input_section->output_offset + rel->r_offset);
12241 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12242 return bfd_reloc_overflow;
12244 if (branch_type == ST_BRANCH_TO_THUMB)
12245 value |= 1;
12247 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12248 || r_type == R_ARM_THM_MOVT_BREL)
12249 value >>= 16;
12251 insn &= 0xfbf08f00;
12252 insn |= (value & 0xf000) << 4;
12253 insn |= (value & 0x0800) << 15;
12254 insn |= (value & 0x0700) << 4;
12255 insn |= (value & 0x00ff);
12257 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12258 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12260 return bfd_reloc_ok;
12262 case R_ARM_ALU_PC_G0_NC:
12263 case R_ARM_ALU_PC_G1_NC:
12264 case R_ARM_ALU_PC_G0:
12265 case R_ARM_ALU_PC_G1:
12266 case R_ARM_ALU_PC_G2:
12267 case R_ARM_ALU_SB_G0_NC:
12268 case R_ARM_ALU_SB_G1_NC:
12269 case R_ARM_ALU_SB_G0:
12270 case R_ARM_ALU_SB_G1:
12271 case R_ARM_ALU_SB_G2:
12273 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12274 bfd_vma pc = input_section->output_section->vma
12275 + input_section->output_offset + rel->r_offset;
12276 /* sb is the origin of the *segment* containing the symbol. */
12277 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12278 bfd_vma residual;
12279 bfd_vma g_n;
12280 bfd_signed_vma signed_value;
12281 int group = 0;
12283 /* Determine which group of bits to select. */
12284 switch (r_type)
12286 case R_ARM_ALU_PC_G0_NC:
12287 case R_ARM_ALU_PC_G0:
12288 case R_ARM_ALU_SB_G0_NC:
12289 case R_ARM_ALU_SB_G0:
12290 group = 0;
12291 break;
12293 case R_ARM_ALU_PC_G1_NC:
12294 case R_ARM_ALU_PC_G1:
12295 case R_ARM_ALU_SB_G1_NC:
12296 case R_ARM_ALU_SB_G1:
12297 group = 1;
12298 break;
12300 case R_ARM_ALU_PC_G2:
12301 case R_ARM_ALU_SB_G2:
12302 group = 2;
12303 break;
12305 default:
12306 abort ();
12309 /* If REL, extract the addend from the insn. If RELA, it will
12310 have already been fetched for us. */
12311 if (globals->use_rel)
12313 int negative;
12314 bfd_vma constant = insn & 0xff;
12315 bfd_vma rotation = (insn & 0xf00) >> 8;
12317 if (rotation == 0)
12318 signed_addend = constant;
12319 else
12321 /* Compensate for the fact that in the instruction, the
12322 rotation is stored in multiples of 2 bits. */
12323 rotation *= 2;
12325 /* Rotate "constant" right by "rotation" bits. */
12326 signed_addend = (constant >> rotation) |
12327 (constant << (8 * sizeof (bfd_vma) - rotation));
12330 /* Determine if the instruction is an ADD or a SUB.
12331 (For REL, this determines the sign of the addend.) */
12332 negative = identify_add_or_sub (insn);
12333 if (negative == 0)
12335 _bfd_error_handler
12336 /* xgettext:c-format */
12337 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12338 "are allowed for ALU group relocations"),
12339 input_bfd, input_section, (uint64_t) rel->r_offset);
12340 return bfd_reloc_overflow;
12343 signed_addend *= negative;
12346 /* Compute the value (X) to go in the place. */
12347 if (r_type == R_ARM_ALU_PC_G0_NC
12348 || r_type == R_ARM_ALU_PC_G1_NC
12349 || r_type == R_ARM_ALU_PC_G0
12350 || r_type == R_ARM_ALU_PC_G1
12351 || r_type == R_ARM_ALU_PC_G2)
12352 /* PC relative. */
12353 signed_value = value - pc + signed_addend;
12354 else
12355 /* Section base relative. */
12356 signed_value = value - sb + signed_addend;
12358 /* If the target symbol is a Thumb function, then set the
12359 Thumb bit in the address. */
12360 if (branch_type == ST_BRANCH_TO_THUMB)
12361 signed_value |= 1;
12363 /* Calculate the value of the relevant G_n, in encoded
12364 constant-with-rotation format. */
12365 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12366 group, &residual);
12368 /* Check for overflow if required. */
12369 if ((r_type == R_ARM_ALU_PC_G0
12370 || r_type == R_ARM_ALU_PC_G1
12371 || r_type == R_ARM_ALU_PC_G2
12372 || r_type == R_ARM_ALU_SB_G0
12373 || r_type == R_ARM_ALU_SB_G1
12374 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12376 _bfd_error_handler
12377 /* xgettext:c-format */
12378 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12379 "splitting %#" PRIx64 " for group relocation %s"),
12380 input_bfd, input_section, (uint64_t) rel->r_offset,
12381 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12382 howto->name);
12383 return bfd_reloc_overflow;
12386 /* Mask out the value and the ADD/SUB part of the opcode; take care
12387 not to destroy the S bit. */
12388 insn &= 0xff1ff000;
12390 /* Set the opcode according to whether the value to go in the
12391 place is negative. */
12392 if (signed_value < 0)
12393 insn |= 1 << 22;
12394 else
12395 insn |= 1 << 23;
12397 /* Encode the offset. */
12398 insn |= g_n;
12400 bfd_put_32 (input_bfd, insn, hit_data);
12402 return bfd_reloc_ok;
12404 case R_ARM_LDR_PC_G0:
12405 case R_ARM_LDR_PC_G1:
12406 case R_ARM_LDR_PC_G2:
12407 case R_ARM_LDR_SB_G0:
12408 case R_ARM_LDR_SB_G1:
12409 case R_ARM_LDR_SB_G2:
12411 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12412 bfd_vma pc = input_section->output_section->vma
12413 + input_section->output_offset + rel->r_offset;
12414 /* sb is the origin of the *segment* containing the symbol. */
12415 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12416 bfd_vma residual;
12417 bfd_signed_vma signed_value;
12418 int group = 0;
12420 /* Determine which groups of bits to calculate. */
12421 switch (r_type)
12423 case R_ARM_LDR_PC_G0:
12424 case R_ARM_LDR_SB_G0:
12425 group = 0;
12426 break;
12428 case R_ARM_LDR_PC_G1:
12429 case R_ARM_LDR_SB_G1:
12430 group = 1;
12431 break;
12433 case R_ARM_LDR_PC_G2:
12434 case R_ARM_LDR_SB_G2:
12435 group = 2;
12436 break;
12438 default:
12439 abort ();
12442 /* If REL, extract the addend from the insn. If RELA, it will
12443 have already been fetched for us. */
12444 if (globals->use_rel)
12446 int negative = (insn & (1 << 23)) ? 1 : -1;
12447 signed_addend = negative * (insn & 0xfff);
12450 /* Compute the value (X) to go in the place. */
12451 if (r_type == R_ARM_LDR_PC_G0
12452 || r_type == R_ARM_LDR_PC_G1
12453 || r_type == R_ARM_LDR_PC_G2)
12454 /* PC relative. */
12455 signed_value = value - pc + signed_addend;
12456 else
12457 /* Section base relative. */
12458 signed_value = value - sb + signed_addend;
12460 /* Calculate the value of the relevant G_{n-1} to obtain
12461 the residual at that stage. */
12462 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12463 group - 1, &residual);
12465 /* Check for overflow. */
12466 if (residual >= 0x1000)
12468 _bfd_error_handler
12469 /* xgettext:c-format */
12470 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12471 "splitting %#" PRIx64 " for group relocation %s"),
12472 input_bfd, input_section, (uint64_t) rel->r_offset,
12473 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12474 howto->name);
12475 return bfd_reloc_overflow;
12478 /* Mask out the value and U bit. */
12479 insn &= 0xff7ff000;
12481 /* Set the U bit if the value to go in the place is non-negative. */
12482 if (signed_value >= 0)
12483 insn |= 1 << 23;
12485 /* Encode the offset. */
12486 insn |= residual;
12488 bfd_put_32 (input_bfd, insn, hit_data);
12490 return bfd_reloc_ok;
12492 case R_ARM_LDRS_PC_G0:
12493 case R_ARM_LDRS_PC_G1:
12494 case R_ARM_LDRS_PC_G2:
12495 case R_ARM_LDRS_SB_G0:
12496 case R_ARM_LDRS_SB_G1:
12497 case R_ARM_LDRS_SB_G2:
12499 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12500 bfd_vma pc = input_section->output_section->vma
12501 + input_section->output_offset + rel->r_offset;
12502 /* sb is the origin of the *segment* containing the symbol. */
12503 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12504 bfd_vma residual;
12505 bfd_signed_vma signed_value;
12506 int group = 0;
12508 /* Determine which groups of bits to calculate. */
12509 switch (r_type)
12511 case R_ARM_LDRS_PC_G0:
12512 case R_ARM_LDRS_SB_G0:
12513 group = 0;
12514 break;
12516 case R_ARM_LDRS_PC_G1:
12517 case R_ARM_LDRS_SB_G1:
12518 group = 1;
12519 break;
12521 case R_ARM_LDRS_PC_G2:
12522 case R_ARM_LDRS_SB_G2:
12523 group = 2;
12524 break;
12526 default:
12527 abort ();
12530 /* If REL, extract the addend from the insn. If RELA, it will
12531 have already been fetched for us. */
12532 if (globals->use_rel)
12534 int negative = (insn & (1 << 23)) ? 1 : -1;
12535 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12538 /* Compute the value (X) to go in the place. */
12539 if (r_type == R_ARM_LDRS_PC_G0
12540 || r_type == R_ARM_LDRS_PC_G1
12541 || r_type == R_ARM_LDRS_PC_G2)
12542 /* PC relative. */
12543 signed_value = value - pc + signed_addend;
12544 else
12545 /* Section base relative. */
12546 signed_value = value - sb + signed_addend;
12548 /* Calculate the value of the relevant G_{n-1} to obtain
12549 the residual at that stage. */
12550 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12551 group - 1, &residual);
12553 /* Check for overflow. */
12554 if (residual >= 0x100)
12556 _bfd_error_handler
12557 /* xgettext:c-format */
12558 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12559 "splitting %#" PRIx64 " for group relocation %s"),
12560 input_bfd, input_section, (uint64_t) rel->r_offset,
12561 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12562 howto->name);
12563 return bfd_reloc_overflow;
12566 /* Mask out the value and U bit. */
12567 insn &= 0xff7ff0f0;
12569 /* Set the U bit if the value to go in the place is non-negative. */
12570 if (signed_value >= 0)
12571 insn |= 1 << 23;
12573 /* Encode the offset. */
12574 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12576 bfd_put_32 (input_bfd, insn, hit_data);
12578 return bfd_reloc_ok;
12580 case R_ARM_LDC_PC_G0:
12581 case R_ARM_LDC_PC_G1:
12582 case R_ARM_LDC_PC_G2:
12583 case R_ARM_LDC_SB_G0:
12584 case R_ARM_LDC_SB_G1:
12585 case R_ARM_LDC_SB_G2:
12587 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12588 bfd_vma pc = input_section->output_section->vma
12589 + input_section->output_offset + rel->r_offset;
12590 /* sb is the origin of the *segment* containing the symbol. */
12591 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12592 bfd_vma residual;
12593 bfd_signed_vma signed_value;
12594 int group = 0;
12596 /* Determine which groups of bits to calculate. */
12597 switch (r_type)
12599 case R_ARM_LDC_PC_G0:
12600 case R_ARM_LDC_SB_G0:
12601 group = 0;
12602 break;
12604 case R_ARM_LDC_PC_G1:
12605 case R_ARM_LDC_SB_G1:
12606 group = 1;
12607 break;
12609 case R_ARM_LDC_PC_G2:
12610 case R_ARM_LDC_SB_G2:
12611 group = 2;
12612 break;
12614 default:
12615 abort ();
12618 /* If REL, extract the addend from the insn. If RELA, it will
12619 have already been fetched for us. */
12620 if (globals->use_rel)
12622 int negative = (insn & (1 << 23)) ? 1 : -1;
12623 signed_addend = negative * ((insn & 0xff) << 2);
12626 /* Compute the value (X) to go in the place. */
12627 if (r_type == R_ARM_LDC_PC_G0
12628 || r_type == R_ARM_LDC_PC_G1
12629 || r_type == R_ARM_LDC_PC_G2)
12630 /* PC relative. */
12631 signed_value = value - pc + signed_addend;
12632 else
12633 /* Section base relative. */
12634 signed_value = value - sb + signed_addend;
12636 /* Calculate the value of the relevant G_{n-1} to obtain
12637 the residual at that stage. */
12638 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12639 group - 1, &residual);
12641 /* Check for overflow. (The absolute value to go in the place must be
12642 divisible by four and, after having been divided by four, must
12643 fit in eight bits.) */
12644 if ((residual & 0x3) != 0 || residual >= 0x400)
12646 _bfd_error_handler
12647 /* xgettext:c-format */
12648 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12649 "splitting %#" PRIx64 " for group relocation %s"),
12650 input_bfd, input_section, (uint64_t) rel->r_offset,
12651 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12652 howto->name);
12653 return bfd_reloc_overflow;
12656 /* Mask out the value and U bit. */
12657 insn &= 0xff7fff00;
12659 /* Set the U bit if the value to go in the place is non-negative. */
12660 if (signed_value >= 0)
12661 insn |= 1 << 23;
12663 /* Encode the offset. */
12664 insn |= residual >> 2;
12666 bfd_put_32 (input_bfd, insn, hit_data);
12668 return bfd_reloc_ok;
12670 case R_ARM_THM_ALU_ABS_G0_NC:
12671 case R_ARM_THM_ALU_ABS_G1_NC:
12672 case R_ARM_THM_ALU_ABS_G2_NC:
12673 case R_ARM_THM_ALU_ABS_G3_NC:
12675 const int shift_array[4] = {0, 8, 16, 24};
12676 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12677 bfd_vma addr = value;
12678 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12680 /* Compute address. */
12681 if (globals->use_rel)
12682 signed_addend = insn & 0xff;
12683 addr += signed_addend;
12684 if (branch_type == ST_BRANCH_TO_THUMB)
12685 addr |= 1;
12686 /* Clean imm8 insn. */
12687 insn &= 0xff00;
12688 /* And update with correct part of address. */
12689 insn |= (addr >> shift) & 0xff;
12690 /* Update insn. */
12691 bfd_put_16 (input_bfd, insn, hit_data);
12694 *unresolved_reloc_p = false;
12695 return bfd_reloc_ok;
12697 case R_ARM_GOTOFFFUNCDESC:
12699 if (h == NULL)
12701 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12702 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12704 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12706 * error_message = _("local symbol index too big");
12707 return bfd_reloc_dangerous;
12710 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12711 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12712 bfd_vma seg = -1;
12714 if (bfd_link_pic (info) && dynindx == 0)
12716 * error_message = _("no dynamic index information available");
12717 return bfd_reloc_dangerous;
12720 /* Resolve relocation. */
12721 bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12722 , contents + rel->r_offset);
12723 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12724 not done yet. */
12725 arm_elf_fill_funcdesc (output_bfd, info,
12726 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12727 dynindx, offset, addr, dynreloc_value, seg);
12729 else
12731 int dynindx;
12732 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12733 bfd_vma addr;
12734 bfd_vma seg = -1;
12736 /* For static binaries, sym_sec can be null. */
12737 if (sym_sec)
12739 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12740 addr = dynreloc_value - sym_sec->output_section->vma;
12742 else
12744 dynindx = 0;
12745 addr = 0;
12748 if (bfd_link_pic (info) && dynindx == 0)
12750 * error_message = _("no dynamic index information available");
12751 return bfd_reloc_dangerous;
12754 /* This case cannot occur since funcdesc is allocated by
12755 the dynamic loader so we cannot resolve the relocation. */
12756 if (h->dynindx != -1)
12758 * error_message = _("invalid dynamic index");
12759 return bfd_reloc_dangerous;
12762 /* Resolve relocation. */
12763 bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12764 contents + rel->r_offset);
12765 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12766 arm_elf_fill_funcdesc (output_bfd, info,
12767 &eh->fdpic_cnts.funcdesc_offset,
12768 dynindx, offset, addr, dynreloc_value, seg);
12771 *unresolved_reloc_p = false;
12772 return bfd_reloc_ok;
12774 case R_ARM_GOTFUNCDESC:
12776 if (h != NULL)
12778 Elf_Internal_Rela outrel;
12780 /* Resolve relocation. */
12781 bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12782 + sgot->output_offset),
12783 contents + rel->r_offset);
12784 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12785 if (h->dynindx == -1)
12787 int dynindx;
12788 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12789 bfd_vma addr;
12790 bfd_vma seg = -1;
12792 /* For static binaries sym_sec can be null. */
12793 if (sym_sec)
12795 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12796 addr = dynreloc_value - sym_sec->output_section->vma;
12798 else
12800 dynindx = 0;
12801 addr = 0;
12804 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12805 arm_elf_fill_funcdesc (output_bfd, info,
12806 &eh->fdpic_cnts.funcdesc_offset,
12807 dynindx, offset, addr, dynreloc_value, seg);
12810 /* Add a dynamic relocation on GOT entry if not already done. */
12811 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12813 if (h->dynindx == -1)
12815 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12816 if (h->root.type == bfd_link_hash_undefweak)
12817 bfd_put_32 (output_bfd, 0, sgot->contents
12818 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12819 else
12820 bfd_put_32 (output_bfd, sgot->output_section->vma
12821 + sgot->output_offset
12822 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12823 sgot->contents
12824 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12826 else
12828 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12830 outrel.r_offset = sgot->output_section->vma
12831 + sgot->output_offset
12832 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12833 outrel.r_addend = 0;
12834 if (h->dynindx == -1 && !bfd_link_pic (info))
12835 if (h->root.type == bfd_link_hash_undefweak)
12836 arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12837 else
12838 arm_elf_add_rofixup (output_bfd, globals->srofixup,
12839 outrel.r_offset);
12840 else
12841 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12842 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12845 else
12847 /* Such relocation on static function should not have been
12848 emitted by the compiler. */
12849 return bfd_reloc_notsupported;
12852 *unresolved_reloc_p = false;
12853 return bfd_reloc_ok;
12855 case R_ARM_FUNCDESC:
12857 if (h == NULL)
12859 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12860 Elf_Internal_Rela outrel;
12861 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12863 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12865 * error_message = _("local symbol index too big");
12866 return bfd_reloc_dangerous;
12869 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12870 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12871 bfd_vma seg = -1;
12873 if (bfd_link_pic (info) && dynindx == 0)
12875 * error_message = _("dynamic index information not available");
12876 return bfd_reloc_dangerous;
12879 /* Replace static FUNCDESC relocation with a
12880 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12881 executable. */
12882 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12883 outrel.r_offset = input_section->output_section->vma
12884 + input_section->output_offset + rel->r_offset;
12885 outrel.r_addend = 0;
12886 if (bfd_link_pic (info))
12887 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12888 else
12889 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12891 bfd_put_32 (input_bfd, sgot->output_section->vma
12892 + sgot->output_offset + offset, hit_data);
12894 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12895 arm_elf_fill_funcdesc (output_bfd, info,
12896 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12897 dynindx, offset, addr, dynreloc_value, seg);
12899 else
12901 if (h->dynindx == -1)
12903 int dynindx;
12904 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12905 bfd_vma addr;
12906 bfd_vma seg = -1;
12907 Elf_Internal_Rela outrel;
12909 /* For static binaries sym_sec can be null. */
12910 if (sym_sec)
12912 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12913 addr = dynreloc_value - sym_sec->output_section->vma;
12915 else
12917 dynindx = 0;
12918 addr = 0;
12921 if (bfd_link_pic (info) && dynindx == 0)
12922 abort ();
12924 /* Replace static FUNCDESC relocation with a
12925 R_ARM_RELATIVE dynamic relocation. */
12926 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12927 outrel.r_offset = input_section->output_section->vma
12928 + input_section->output_offset + rel->r_offset;
12929 outrel.r_addend = 0;
12930 if (bfd_link_pic (info))
12931 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12932 else
12933 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12935 bfd_put_32 (input_bfd, sgot->output_section->vma
12936 + sgot->output_offset + offset, hit_data);
12938 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12939 arm_elf_fill_funcdesc (output_bfd, info,
12940 &eh->fdpic_cnts.funcdesc_offset,
12941 dynindx, offset, addr, dynreloc_value, seg);
12943 else
12945 Elf_Internal_Rela outrel;
12947 /* Add a dynamic relocation. */
12948 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12949 outrel.r_offset = input_section->output_section->vma
12950 + input_section->output_offset + rel->r_offset;
12951 outrel.r_addend = 0;
12952 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12956 *unresolved_reloc_p = false;
12957 return bfd_reloc_ok;
12959 case R_ARM_THM_BF16:
12961 bfd_vma relocation;
12962 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12963 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12965 if (globals->use_rel)
12967 bfd_vma immA = (upper_insn & 0x001f);
12968 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12969 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12970 addend = (immA << 12);
12971 addend |= (immB << 2);
12972 addend |= (immC << 1);
12973 addend |= 1;
12974 /* Sign extend. */
12975 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12978 relocation = value + signed_addend;
12979 relocation -= (input_section->output_section->vma
12980 + input_section->output_offset
12981 + rel->r_offset);
12983 /* Put RELOCATION back into the insn. */
12985 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12986 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12987 bfd_vma immC = (relocation & 0x00000002) >> 1;
12989 upper_insn = (upper_insn & 0xffe0) | immA;
12990 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12993 /* Put the relocated value back in the object file: */
12994 bfd_put_16 (input_bfd, upper_insn, hit_data);
12995 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12997 return bfd_reloc_ok;
13000 case R_ARM_THM_BF12:
13002 bfd_vma relocation;
13003 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
13004 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
13006 if (globals->use_rel)
13008 bfd_vma immA = (upper_insn & 0x0001);
13009 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
13010 bfd_vma immC = (lower_insn & 0x0800) >> 11;
13011 addend = (immA << 12);
13012 addend |= (immB << 2);
13013 addend |= (immC << 1);
13014 addend |= 1;
13015 /* Sign extend. */
13016 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
13017 signed_addend = addend;
13020 relocation = value + signed_addend;
13021 relocation -= (input_section->output_section->vma
13022 + input_section->output_offset
13023 + rel->r_offset);
13025 /* Put RELOCATION back into the insn. */
13027 bfd_vma immA = (relocation & 0x00001000) >> 12;
13028 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13029 bfd_vma immC = (relocation & 0x00000002) >> 1;
13031 upper_insn = (upper_insn & 0xfffe) | immA;
13032 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13035 /* Put the relocated value back in the object file: */
13036 bfd_put_16 (input_bfd, upper_insn, hit_data);
13037 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13039 return bfd_reloc_ok;
13042 case R_ARM_THM_BF18:
13044 bfd_vma relocation;
13045 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
13046 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
13048 if (globals->use_rel)
13050 bfd_vma immA = (upper_insn & 0x007f);
13051 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
13052 bfd_vma immC = (lower_insn & 0x0800) >> 11;
13053 addend = (immA << 12);
13054 addend |= (immB << 2);
13055 addend |= (immC << 1);
13056 addend |= 1;
13057 /* Sign extend. */
13058 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
13059 signed_addend = addend;
13062 relocation = value + signed_addend;
13063 relocation -= (input_section->output_section->vma
13064 + input_section->output_offset
13065 + rel->r_offset);
13067 /* Put RELOCATION back into the insn. */
13069 bfd_vma immA = (relocation & 0x0007f000) >> 12;
13070 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13071 bfd_vma immC = (relocation & 0x00000002) >> 1;
13073 upper_insn = (upper_insn & 0xff80) | immA;
13074 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13077 /* Put the relocated value back in the object file: */
13078 bfd_put_16 (input_bfd, upper_insn, hit_data);
13079 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13081 return bfd_reloc_ok;
13084 default:
13085 return bfd_reloc_notsupported;
13089 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13090 static void
13091 arm_add_to_rel (bfd * abfd,
13092 bfd_byte * address,
13093 reloc_howto_type * howto,
13094 bfd_signed_vma increment)
13096 bfd_signed_vma addend;
13098 if (howto->type == R_ARM_THM_CALL
13099 || howto->type == R_ARM_THM_JUMP24)
13101 int upper_insn, lower_insn;
13102 int upper, lower;
13104 upper_insn = bfd_get_16 (abfd, address);
13105 lower_insn = bfd_get_16 (abfd, address + 2);
13106 upper = upper_insn & 0x7ff;
13107 lower = lower_insn & 0x7ff;
13109 addend = (upper << 12) | (lower << 1);
13110 addend += increment;
13111 addend >>= 1;
13113 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13114 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13116 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13117 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13119 else
13121 bfd_vma contents;
13123 contents = bfd_get_32 (abfd, address);
13125 /* Get the (signed) value from the instruction. */
13126 addend = contents & howto->src_mask;
13127 if (addend & ((howto->src_mask + 1) >> 1))
13129 bfd_signed_vma mask;
13131 mask = -1;
13132 mask &= ~ howto->src_mask;
13133 addend |= mask;
13136 /* Add in the increment, (which is a byte value). */
13137 switch (howto->type)
13139 default:
13140 addend += increment;
13141 break;
13143 case R_ARM_PC24:
13144 case R_ARM_PLT32:
13145 case R_ARM_CALL:
13146 case R_ARM_JUMP24:
13147 addend *= bfd_get_reloc_size (howto);
13148 addend += increment;
13150 /* Should we check for overflow here ? */
13152 /* Drop any undesired bits. */
13153 addend >>= howto->rightshift;
13154 break;
13157 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13159 bfd_put_32 (abfd, contents, address);
13163 #define IS_ARM_TLS_RELOC(R_TYPE) \
13164 ((R_TYPE) == R_ARM_TLS_GD32 \
13165 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13166 || (R_TYPE) == R_ARM_TLS_LDO32 \
13167 || (R_TYPE) == R_ARM_TLS_LDM32 \
13168 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13169 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13170 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13171 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13172 || (R_TYPE) == R_ARM_TLS_LE32 \
13173 || (R_TYPE) == R_ARM_TLS_IE32 \
13174 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13175 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13177 /* Specific set of relocations for the gnu tls dialect. */
13178 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13179 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13180 || (R_TYPE) == R_ARM_TLS_CALL \
13181 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13182 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13183 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13185 /* Relocate an ARM ELF section. */
13187 static int
13188 elf32_arm_relocate_section (bfd * output_bfd,
13189 struct bfd_link_info * info,
13190 bfd * input_bfd,
13191 asection * input_section,
13192 bfd_byte * contents,
13193 Elf_Internal_Rela * relocs,
13194 Elf_Internal_Sym * local_syms,
13195 asection ** local_sections)
13197 Elf_Internal_Shdr *symtab_hdr;
13198 struct elf_link_hash_entry **sym_hashes;
13199 Elf_Internal_Rela *rel;
13200 Elf_Internal_Rela *relend;
13201 const char *name;
13202 struct elf32_arm_link_hash_table * globals;
13204 globals = elf32_arm_hash_table (info);
13205 if (globals == NULL)
13206 return false;
13208 symtab_hdr = & elf_symtab_hdr (input_bfd);
13209 sym_hashes = elf_sym_hashes (input_bfd);
13211 rel = relocs;
13212 relend = relocs + input_section->reloc_count;
13213 for (; rel < relend; rel++)
13215 int r_type;
13216 reloc_howto_type * howto;
13217 unsigned long r_symndx;
13218 Elf_Internal_Sym * sym;
13219 asection * sec;
13220 struct elf_link_hash_entry * h;
13221 bfd_vma relocation;
13222 bfd_reloc_status_type r;
13223 arelent bfd_reloc;
13224 char sym_type;
13225 bool unresolved_reloc = false;
13226 char *error_message = NULL;
13228 r_symndx = ELF32_R_SYM (rel->r_info);
13229 r_type = ELF32_R_TYPE (rel->r_info);
13230 r_type = arm_real_reloc_type (globals, r_type);
13232 if ( r_type == R_ARM_GNU_VTENTRY
13233 || r_type == R_ARM_GNU_VTINHERIT)
13234 continue;
13236 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13238 if (howto == NULL)
13239 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13241 h = NULL;
13242 sym = NULL;
13243 sec = NULL;
13245 if (r_symndx < symtab_hdr->sh_info)
13247 sym = local_syms + r_symndx;
13248 sym_type = ELF32_ST_TYPE (sym->st_info);
13249 sec = local_sections[r_symndx];
13251 /* An object file might have a reference to a local
13252 undefined symbol. This is a daft object file, but we
13253 should at least do something about it. V4BX & NONE
13254 relocations do not use the symbol and are explicitly
13255 allowed to use the undefined symbol, so allow those.
13256 Likewise for relocations against STN_UNDEF. */
13257 if (r_type != R_ARM_V4BX
13258 && r_type != R_ARM_NONE
13259 && r_symndx != STN_UNDEF
13260 && bfd_is_und_section (sec)
13261 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13262 (*info->callbacks->undefined_symbol)
13263 (info, bfd_elf_string_from_elf_section
13264 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13265 input_bfd, input_section,
13266 rel->r_offset, true);
13268 if (globals->use_rel)
13270 relocation = (sec->output_section->vma
13271 + sec->output_offset
13272 + sym->st_value);
13273 if (!bfd_link_relocatable (info)
13274 && (sec->flags & SEC_MERGE)
13275 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13277 asection *msec;
13278 bfd_vma addend, value;
13280 switch (r_type)
13282 case R_ARM_MOVW_ABS_NC:
13283 case R_ARM_MOVT_ABS:
13284 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13285 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13286 addend = (addend ^ 0x8000) - 0x8000;
13287 break;
13289 case R_ARM_THM_MOVW_ABS_NC:
13290 case R_ARM_THM_MOVT_ABS:
13291 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13292 << 16;
13293 value |= bfd_get_16 (input_bfd,
13294 contents + rel->r_offset + 2);
13295 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13296 | ((value & 0x04000000) >> 15);
13297 addend = (addend ^ 0x8000) - 0x8000;
13298 break;
13300 default:
13301 if (howto->rightshift
13302 || (howto->src_mask & (howto->src_mask + 1)))
13304 _bfd_error_handler
13305 /* xgettext:c-format */
13306 (_("%pB(%pA+%#" PRIx64 "): "
13307 "%s relocation against SEC_MERGE section"),
13308 input_bfd, input_section,
13309 (uint64_t) rel->r_offset, howto->name);
13310 return false;
13313 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13315 /* Get the (signed) value from the instruction. */
13316 addend = value & howto->src_mask;
13317 if (addend & ((howto->src_mask + 1) >> 1))
13319 bfd_signed_vma mask;
13321 mask = -1;
13322 mask &= ~ howto->src_mask;
13323 addend |= mask;
13325 break;
13328 msec = sec;
13329 addend =
13330 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13331 - relocation;
13332 addend += msec->output_section->vma + msec->output_offset;
13334 /* Cases here must match those in the preceding
13335 switch statement. */
13336 switch (r_type)
13338 case R_ARM_MOVW_ABS_NC:
13339 case R_ARM_MOVT_ABS:
13340 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13341 | (addend & 0xfff);
13342 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13343 break;
13345 case R_ARM_THM_MOVW_ABS_NC:
13346 case R_ARM_THM_MOVT_ABS:
13347 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13348 | (addend & 0xff) | ((addend & 0x0800) << 15);
13349 bfd_put_16 (input_bfd, value >> 16,
13350 contents + rel->r_offset);
13351 bfd_put_16 (input_bfd, value,
13352 contents + rel->r_offset + 2);
13353 break;
13355 default:
13356 value = (value & ~ howto->dst_mask)
13357 | (addend & howto->dst_mask);
13358 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13359 break;
13363 else
13364 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13366 else
13368 bool warned, ignored;
13370 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13371 r_symndx, symtab_hdr, sym_hashes,
13372 h, sec, relocation,
13373 unresolved_reloc, warned, ignored);
13375 sym_type = h->type;
13378 if (sec != NULL && discarded_section (sec))
13379 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13380 rel, 1, relend, howto, 0, contents);
13382 if (bfd_link_relocatable (info))
13384 /* This is a relocatable link. We don't have to change
13385 anything, unless the reloc is against a section symbol,
13386 in which case we have to adjust according to where the
13387 section symbol winds up in the output section. */
13388 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13390 if (globals->use_rel)
13391 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13392 howto, (bfd_signed_vma) sec->output_offset);
13393 else
13394 rel->r_addend += sec->output_offset;
13396 continue;
13399 if (h != NULL)
13400 name = h->root.root.string;
13401 else
13403 name = (bfd_elf_string_from_elf_section
13404 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13405 if (name == NULL || *name == '\0')
13406 name = bfd_section_name (sec);
13409 if (r_symndx != STN_UNDEF
13410 && r_type != R_ARM_NONE
13411 && (h == NULL
13412 || h->root.type == bfd_link_hash_defined
13413 || h->root.type == bfd_link_hash_defweak)
13414 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13416 _bfd_error_handler
13417 ((sym_type == STT_TLS
13418 /* xgettext:c-format */
13419 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13420 /* xgettext:c-format */
13421 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13422 input_bfd,
13423 input_section,
13424 (uint64_t) rel->r_offset,
13425 howto->name,
13426 name);
13429 /* We call elf32_arm_final_link_relocate unless we're completely
13430 done, i.e., the relaxation produced the final output we want,
13431 and we won't let anybody mess with it. Also, we have to do
13432 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13433 both in relaxed and non-relaxed cases. */
13434 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13435 || (IS_ARM_TLS_GNU_RELOC (r_type)
13436 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13437 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13438 & GOT_TLS_GDESC)))
13440 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13441 contents, rel, h == NULL);
13442 /* This may have been marked unresolved because it came from
13443 a shared library. But we've just dealt with that. */
13444 unresolved_reloc = 0;
13446 else
13447 r = bfd_reloc_continue;
13449 if (r == bfd_reloc_continue)
13451 unsigned char branch_type =
13452 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13453 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13455 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13456 input_section, contents, rel,
13457 relocation, info, sec, name,
13458 sym_type, branch_type, h,
13459 &unresolved_reloc,
13460 &error_message);
13463 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13464 because such sections are not SEC_ALLOC and thus ld.so will
13465 not process them. */
13466 if (unresolved_reloc
13467 && !((input_section->flags & SEC_DEBUGGING) != 0
13468 && h->def_dynamic)
13469 && _bfd_elf_section_offset (output_bfd, info, input_section,
13470 rel->r_offset) != (bfd_vma) -1)
13472 _bfd_error_handler
13473 /* xgettext:c-format */
13474 (_("%pB(%pA+%#" PRIx64 "): "
13475 "unresolvable %s relocation against symbol `%s'"),
13476 input_bfd,
13477 input_section,
13478 (uint64_t) rel->r_offset,
13479 howto->name,
13480 h->root.root.string);
13481 return false;
13484 if (r != bfd_reloc_ok)
13486 switch (r)
13488 case bfd_reloc_overflow:
13489 /* If the overflowing reloc was to an undefined symbol,
13490 we have already printed one error message and there
13491 is no point complaining again. */
13492 if (!h || h->root.type != bfd_link_hash_undefined)
13493 (*info->callbacks->reloc_overflow)
13494 (info, (h ? &h->root : NULL), name, howto->name,
13495 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13496 break;
13498 case bfd_reloc_undefined:
13499 (*info->callbacks->undefined_symbol)
13500 (info, name, input_bfd, input_section, rel->r_offset, true);
13501 break;
13503 case bfd_reloc_outofrange:
13504 error_message = _("out of range");
13505 goto common_error;
13507 case bfd_reloc_notsupported:
13508 error_message = _("unsupported relocation");
13509 goto common_error;
13511 case bfd_reloc_dangerous:
13512 /* error_message should already be set. */
13513 goto common_error;
13515 default:
13516 error_message = _("unknown error");
13517 /* Fall through. */
13519 common_error:
13520 BFD_ASSERT (error_message != NULL);
13521 (*info->callbacks->reloc_dangerous)
13522 (info, error_message, input_bfd, input_section, rel->r_offset);
13523 break;
13528 return true;
13531 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13532 adds the edit to the start of the list. (The list must be built in order of
13533 ascending TINDEX: the function's callers are primarily responsible for
13534 maintaining that condition). */
13536 static void
13537 add_unwind_table_edit (arm_unwind_table_edit **head,
13538 arm_unwind_table_edit **tail,
13539 arm_unwind_edit_type type,
13540 asection *linked_section,
13541 unsigned int tindex)
13543 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13544 xmalloc (sizeof (arm_unwind_table_edit));
13546 new_edit->type = type;
13547 new_edit->linked_section = linked_section;
13548 new_edit->index = tindex;
13550 if (tindex > 0)
13552 new_edit->next = NULL;
13554 if (*tail)
13555 (*tail)->next = new_edit;
13557 (*tail) = new_edit;
13559 if (!*head)
13560 (*head) = new_edit;
13562 else
13564 new_edit->next = *head;
13566 if (!*tail)
13567 *tail = new_edit;
13569 *head = new_edit;
13573 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13575 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13577 static void
13578 adjust_exidx_size (asection *exidx_sec, int adjust)
13580 asection *out_sec;
13582 if (!exidx_sec->rawsize)
13583 exidx_sec->rawsize = exidx_sec->size;
13585 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13586 out_sec = exidx_sec->output_section;
13587 /* Adjust size of output section. */
13588 bfd_set_section_size (out_sec, out_sec->size + adjust);
13591 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13593 static void
13594 insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13596 struct _arm_elf_section_data *exidx_arm_data;
13598 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13599 add_unwind_table_edit
13600 (&exidx_arm_data->u.exidx.unwind_edit_list,
13601 &exidx_arm_data->u.exidx.unwind_edit_tail,
13602 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13604 exidx_arm_data->additional_reloc_count++;
13606 adjust_exidx_size (exidx_sec, 8);
13609 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13610 made to those tables, such that:
13612 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13613 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13614 codes which have been inlined into the index).
13616 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13618 The edits are applied when the tables are written
13619 (in elf32_arm_write_section). */
13621 bool
13622 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13623 unsigned int num_text_sections,
13624 struct bfd_link_info *info,
13625 bool merge_exidx_entries)
13627 bfd *inp;
13628 unsigned int last_second_word = 0, i;
13629 asection *last_exidx_sec = NULL;
13630 asection *last_text_sec = NULL;
13631 int last_unwind_type = -1;
13633 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13634 text sections. */
13635 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13637 asection *sec;
13639 for (sec = inp->sections; sec != NULL; sec = sec->next)
13641 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13642 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13644 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13645 continue;
13647 if (elf_sec->linked_to)
13649 Elf_Internal_Shdr *linked_hdr
13650 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13651 struct _arm_elf_section_data *linked_sec_arm_data
13652 = get_arm_elf_section_data (linked_hdr->bfd_section);
13654 if (linked_sec_arm_data == NULL)
13655 continue;
13657 /* Link this .ARM.exidx section back from the text section it
13658 describes. */
13659 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13664 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13665 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13666 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13668 for (i = 0; i < num_text_sections; i++)
13670 asection *sec = text_section_order[i];
13671 asection *exidx_sec;
13672 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13673 struct _arm_elf_section_data *exidx_arm_data;
13674 bfd_byte *contents = NULL;
13675 int deleted_exidx_bytes = 0;
13676 bfd_vma j;
13677 arm_unwind_table_edit *unwind_edit_head = NULL;
13678 arm_unwind_table_edit *unwind_edit_tail = NULL;
13679 Elf_Internal_Shdr *hdr;
13680 bfd *ibfd;
13682 if (arm_data == NULL)
13683 continue;
13685 exidx_sec = arm_data->u.text.arm_exidx_sec;
13686 if (exidx_sec == NULL)
13688 /* Section has no unwind data. */
13689 if (last_unwind_type == 0 || !last_exidx_sec)
13690 continue;
13692 /* Ignore zero sized sections. */
13693 if (sec->size == 0)
13694 continue;
13696 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13697 last_unwind_type = 0;
13698 continue;
13701 /* Skip /DISCARD/ sections. */
13702 if (bfd_is_abs_section (exidx_sec->output_section))
13703 continue;
13705 hdr = &elf_section_data (exidx_sec)->this_hdr;
13706 if (hdr->sh_type != SHT_ARM_EXIDX)
13707 continue;
13709 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13710 if (exidx_arm_data == NULL)
13711 continue;
13713 ibfd = exidx_sec->owner;
13715 if (hdr->contents != NULL)
13716 contents = hdr->contents;
13717 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13718 /* An error? */
13719 continue;
13721 if (last_unwind_type > 0)
13723 unsigned int first_word = bfd_get_32 (ibfd, contents);
13724 /* Add cantunwind if first unwind item does not match section
13725 start. */
13726 if (first_word != sec->vma)
13728 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13729 last_unwind_type = 0;
13733 for (j = 0; j < hdr->sh_size; j += 8)
13735 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13736 int unwind_type;
13737 int elide = 0;
13739 /* An EXIDX_CANTUNWIND entry. */
13740 if (second_word == 1)
13742 if (last_unwind_type == 0)
13743 elide = 1;
13744 unwind_type = 0;
13746 /* Inlined unwinding data. Merge if equal to previous. */
13747 else if ((second_word & 0x80000000) != 0)
13749 if (merge_exidx_entries
13750 && last_second_word == second_word && last_unwind_type == 1)
13751 elide = 1;
13752 unwind_type = 1;
13753 last_second_word = second_word;
13755 /* Normal table entry. In theory we could merge these too,
13756 but duplicate entries are likely to be much less common. */
13757 else
13758 unwind_type = 2;
13760 if (elide && !bfd_link_relocatable (info))
13762 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13763 DELETE_EXIDX_ENTRY, NULL, j / 8);
13765 deleted_exidx_bytes += 8;
13768 last_unwind_type = unwind_type;
13771 /* Free contents if we allocated it ourselves. */
13772 if (contents != hdr->contents)
13773 free (contents);
13775 /* Record edits to be applied later (in elf32_arm_write_section). */
13776 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13777 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13779 if (deleted_exidx_bytes > 0)
13780 adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13782 last_exidx_sec = exidx_sec;
13783 last_text_sec = sec;
13786 /* Add terminating CANTUNWIND entry. */
13787 if (!bfd_link_relocatable (info) && last_exidx_sec
13788 && last_unwind_type != 0)
13789 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13791 return true;
13794 static bool
13795 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13796 bfd *ibfd, const char *name)
13798 asection *sec, *osec;
13800 sec = bfd_get_linker_section (ibfd, name);
13801 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13802 return true;
13804 osec = sec->output_section;
13805 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13806 return true;
13808 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13809 sec->output_offset, sec->size))
13810 return false;
13812 return true;
13815 static bool
13816 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13818 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13819 asection *sec, *osec;
13821 if (globals == NULL)
13822 return false;
13824 /* Invoke the regular ELF backend linker to do all the work. */
13825 if (!bfd_elf_final_link (abfd, info))
13826 return false;
13828 /* Process stub sections (eg BE8 encoding, ...). */
13829 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13830 unsigned int i;
13831 for (i=0; i<htab->top_id; i++)
13833 sec = htab->stub_group[i].stub_sec;
13834 /* Only process it once, in its link_sec slot. */
13835 if (sec && i == htab->stub_group[i].link_sec->id)
13837 osec = sec->output_section;
13838 elf32_arm_write_section (abfd, info, sec, sec->contents);
13839 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13840 sec->output_offset, sec->size))
13841 return false;
13845 /* Write out any glue sections now that we have created all the
13846 stubs. */
13847 if (globals->bfd_of_glue_owner != NULL)
13849 if (! elf32_arm_output_glue_section (info, abfd,
13850 globals->bfd_of_glue_owner,
13851 ARM2THUMB_GLUE_SECTION_NAME))
13852 return false;
13854 if (! elf32_arm_output_glue_section (info, abfd,
13855 globals->bfd_of_glue_owner,
13856 THUMB2ARM_GLUE_SECTION_NAME))
13857 return false;
13859 if (! elf32_arm_output_glue_section (info, abfd,
13860 globals->bfd_of_glue_owner,
13861 VFP11_ERRATUM_VENEER_SECTION_NAME))
13862 return false;
13864 if (! elf32_arm_output_glue_section (info, abfd,
13865 globals->bfd_of_glue_owner,
13866 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13867 return false;
13869 if (! elf32_arm_output_glue_section (info, abfd,
13870 globals->bfd_of_glue_owner,
13871 ARM_BX_GLUE_SECTION_NAME))
13872 return false;
13875 return true;
13878 /* Return a best guess for the machine number based on the attributes. */
13880 static unsigned int
13881 bfd_arm_get_mach_from_attributes (bfd * abfd)
13883 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13885 switch (arch)
13887 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13888 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13889 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13890 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13892 case TAG_CPU_ARCH_V5TE:
13894 char * name;
13896 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13897 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13899 if (name)
13901 if (strcmp (name, "IWMMXT2") == 0)
13902 return bfd_mach_arm_iWMMXt2;
13904 if (strcmp (name, "IWMMXT") == 0)
13905 return bfd_mach_arm_iWMMXt;
13907 if (strcmp (name, "XSCALE") == 0)
13909 int wmmx;
13911 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13912 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13913 switch (wmmx)
13915 case 1: return bfd_mach_arm_iWMMXt;
13916 case 2: return bfd_mach_arm_iWMMXt2;
13917 default: return bfd_mach_arm_XScale;
13922 return bfd_mach_arm_5TE;
13925 case TAG_CPU_ARCH_V5TEJ:
13926 return bfd_mach_arm_5TEJ;
13927 case TAG_CPU_ARCH_V6:
13928 return bfd_mach_arm_6;
13929 case TAG_CPU_ARCH_V6KZ:
13930 return bfd_mach_arm_6KZ;
13931 case TAG_CPU_ARCH_V6T2:
13932 return bfd_mach_arm_6T2;
13933 case TAG_CPU_ARCH_V6K:
13934 return bfd_mach_arm_6K;
13935 case TAG_CPU_ARCH_V7:
13936 return bfd_mach_arm_7;
13937 case TAG_CPU_ARCH_V6_M:
13938 return bfd_mach_arm_6M;
13939 case TAG_CPU_ARCH_V6S_M:
13940 return bfd_mach_arm_6SM;
13941 case TAG_CPU_ARCH_V7E_M:
13942 return bfd_mach_arm_7EM;
13943 case TAG_CPU_ARCH_V8:
13944 return bfd_mach_arm_8;
13945 case TAG_CPU_ARCH_V8R:
13946 return bfd_mach_arm_8R;
13947 case TAG_CPU_ARCH_V8M_BASE:
13948 return bfd_mach_arm_8M_BASE;
13949 case TAG_CPU_ARCH_V8M_MAIN:
13950 return bfd_mach_arm_8M_MAIN;
13951 case TAG_CPU_ARCH_V8_1M_MAIN:
13952 return bfd_mach_arm_8_1M_MAIN;
13953 case TAG_CPU_ARCH_V9:
13954 return bfd_mach_arm_9;
13956 default:
13957 /* Force entry to be added for any new known Tag_CPU_arch value. */
13958 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13960 /* Unknown Tag_CPU_arch value. */
13961 return bfd_mach_arm_unknown;
13965 /* Set the right machine number. */
13967 static bool
13968 elf32_arm_object_p (bfd *abfd)
13970 unsigned int mach;
13972 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13974 if (mach == bfd_mach_arm_unknown)
13975 mach = bfd_arm_get_mach_from_attributes (abfd);
13977 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13978 return true;
13981 /* Function to keep ARM specific flags in the ELF header. */
13983 static bool
13984 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13986 if (elf_flags_init (abfd)
13987 && elf_elfheader (abfd)->e_flags != flags)
13989 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13991 if (flags & EF_ARM_INTERWORK)
13992 _bfd_error_handler
13993 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13994 abfd);
13995 else
13996 _bfd_error_handler
13997 (_("warning: clearing the interworking flag of %pB due to outside request"),
13998 abfd);
14001 else
14003 elf_elfheader (abfd)->e_flags = flags;
14004 elf_flags_init (abfd) = true;
14007 return true;
14010 /* Copy backend specific data from one object module to another. */
14012 static bool
14013 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
14015 flagword in_flags;
14016 flagword out_flags;
14018 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14019 return true;
14021 in_flags = elf_elfheader (ibfd)->e_flags;
14022 out_flags = elf_elfheader (obfd)->e_flags;
14024 if (elf_flags_init (obfd)
14025 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
14026 && in_flags != out_flags)
14028 /* Cannot mix APCS26 and APCS32 code. */
14029 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14030 return false;
14032 /* Cannot mix float APCS and non-float APCS code. */
14033 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14034 return false;
14036 /* If the src and dest have different interworking flags
14037 then turn off the interworking bit. */
14038 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14040 if (out_flags & EF_ARM_INTERWORK)
14041 _bfd_error_handler
14042 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14043 obfd, ibfd);
14045 in_flags &= ~EF_ARM_INTERWORK;
14048 /* Likewise for PIC, though don't warn for this case. */
14049 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
14050 in_flags &= ~EF_ARM_PIC;
14053 elf_elfheader (obfd)->e_flags = in_flags;
14054 elf_flags_init (obfd) = true;
14056 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
14059 /* Values for Tag_ABI_PCS_R9_use. */
14060 enum
14062 AEABI_R9_V6,
14063 AEABI_R9_SB,
14064 AEABI_R9_TLS,
14065 AEABI_R9_unused
14068 /* Values for Tag_ABI_PCS_RW_data. */
14069 enum
14071 AEABI_PCS_RW_data_absolute,
14072 AEABI_PCS_RW_data_PCrel,
14073 AEABI_PCS_RW_data_SBrel,
14074 AEABI_PCS_RW_data_unused
14077 /* Values for Tag_ABI_enum_size. */
14078 enum
14080 AEABI_enum_unused,
14081 AEABI_enum_short,
14082 AEABI_enum_wide,
14083 AEABI_enum_forced_wide
14086 /* Determine whether an object attribute tag takes an integer, a
14087 string or both. */
14089 static int
14090 elf32_arm_obj_attrs_arg_type (int tag)
14092 if (tag == Tag_compatibility)
14093 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14094 else if (tag == Tag_nodefaults)
14095 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14096 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14097 return ATTR_TYPE_FLAG_STR_VAL;
14098 else if (tag < 32)
14099 return ATTR_TYPE_FLAG_INT_VAL;
14100 else
14101 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14104 /* The ABI defines that Tag_conformance should be emitted first, and that
14105 Tag_nodefaults should be second (if either is defined). This sets those
14106 two positions, and bumps up the position of all the remaining tags to
14107 compensate. */
14108 static int
14109 elf32_arm_obj_attrs_order (int num)
14111 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14112 return Tag_conformance;
14113 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14114 return Tag_nodefaults;
14115 if ((num - 2) < Tag_nodefaults)
14116 return num - 2;
14117 if ((num - 1) < Tag_conformance)
14118 return num - 1;
14119 return num;
14122 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14123 static bool
14124 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14126 if ((tag & 127) < 64)
14128 _bfd_error_handler
14129 (_("%pB: unknown mandatory EABI object attribute %d"),
14130 abfd, tag);
14131 bfd_set_error (bfd_error_bad_value);
14132 return false;
14134 else
14136 _bfd_error_handler
14137 (_("warning: %pB: unknown EABI object attribute %d"),
14138 abfd, tag);
14139 return true;
14143 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14144 Returns -1 if no architecture could be read. */
14146 static int
14147 get_secondary_compatible_arch (bfd *abfd)
14149 obj_attribute *attr =
14150 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14152 /* Note: the tag and its argument below are uleb128 values, though
14153 currently-defined values fit in one byte for each. */
14154 if (attr->s
14155 && attr->s[0] == Tag_CPU_arch
14156 && (attr->s[1] & 128) != 128
14157 && attr->s[2] == 0)
14158 return attr->s[1];
14160 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14161 return -1;
14164 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14165 The tag is removed if ARCH is -1. */
14167 static void
14168 set_secondary_compatible_arch (bfd *abfd, int arch)
14170 obj_attribute *attr =
14171 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14173 if (arch == -1)
14175 attr->s = NULL;
14176 return;
14179 /* Note: the tag and its argument below are uleb128 values, though
14180 currently-defined values fit in one byte for each. */
14181 if (!attr->s)
14182 attr->s = (char *) bfd_alloc (abfd, 3);
14183 attr->s[0] = Tag_CPU_arch;
14184 attr->s[1] = arch;
14185 attr->s[2] = '\0';
14188 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14189 into account. */
14191 static int
14192 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14193 int newtag, int secondary_compat, const char* name_table[])
14195 #define T(X) TAG_CPU_ARCH_##X
14196 int tagl, tagh, result;
14197 const int v6t2[] =
14199 T(V6T2), /* PRE_V4. */
14200 T(V6T2), /* V4. */
14201 T(V6T2), /* V4T. */
14202 T(V6T2), /* V5T. */
14203 T(V6T2), /* V5TE. */
14204 T(V6T2), /* V5TEJ. */
14205 T(V6T2), /* V6. */
14206 T(V7), /* V6KZ. */
14207 T(V6T2) /* V6T2. */
14209 const int v6k[] =
14211 T(V6K), /* PRE_V4. */
14212 T(V6K), /* V4. */
14213 T(V6K), /* V4T. */
14214 T(V6K), /* V5T. */
14215 T(V6K), /* V5TE. */
14216 T(V6K), /* V5TEJ. */
14217 T(V6K), /* V6. */
14218 T(V6KZ), /* V6KZ. */
14219 T(V7), /* V6T2. */
14220 T(V6K) /* V6K. */
14222 const int v7[] =
14224 T(V7), /* PRE_V4. */
14225 T(V7), /* V4. */
14226 T(V7), /* V4T. */
14227 T(V7), /* V5T. */
14228 T(V7), /* V5TE. */
14229 T(V7), /* V5TEJ. */
14230 T(V7), /* V6. */
14231 T(V7), /* V6KZ. */
14232 T(V7), /* V6T2. */
14233 T(V7), /* V6K. */
14234 T(V7) /* V7. */
14236 const int v6_m[] =
14238 -1, /* PRE_V4. */
14239 -1, /* V4. */
14240 T(V6K), /* V4T. */
14241 T(V6K), /* V5T. */
14242 T(V6K), /* V5TE. */
14243 T(V6K), /* V5TEJ. */
14244 T(V6K), /* V6. */
14245 T(V6KZ), /* V6KZ. */
14246 T(V7), /* V6T2. */
14247 T(V6K), /* V6K. */
14248 T(V7), /* V7. */
14249 T(V6_M) /* V6_M. */
14251 const int v6s_m[] =
14253 -1, /* PRE_V4. */
14254 -1, /* V4. */
14255 T(V6K), /* V4T. */
14256 T(V6K), /* V5T. */
14257 T(V6K), /* V5TE. */
14258 T(V6K), /* V5TEJ. */
14259 T(V6K), /* V6. */
14260 T(V6KZ), /* V6KZ. */
14261 T(V7), /* V6T2. */
14262 T(V6K), /* V6K. */
14263 T(V7), /* V7. */
14264 T(V6S_M), /* V6_M. */
14265 T(V6S_M) /* V6S_M. */
14267 const int v7e_m[] =
14269 -1, /* PRE_V4. */
14270 -1, /* V4. */
14271 T(V7E_M), /* V4T. */
14272 T(V7E_M), /* V5T. */
14273 T(V7E_M), /* V5TE. */
14274 T(V7E_M), /* V5TEJ. */
14275 T(V7E_M), /* V6. */
14276 T(V7E_M), /* V6KZ. */
14277 T(V7E_M), /* V6T2. */
14278 T(V7E_M), /* V6K. */
14279 T(V7E_M), /* V7. */
14280 T(V7E_M), /* V6_M. */
14281 T(V7E_M), /* V6S_M. */
14282 T(V7E_M) /* V7E_M. */
14284 const int v8[] =
14286 T(V8), /* PRE_V4. */
14287 T(V8), /* V4. */
14288 T(V8), /* V4T. */
14289 T(V8), /* V5T. */
14290 T(V8), /* V5TE. */
14291 T(V8), /* V5TEJ. */
14292 T(V8), /* V6. */
14293 T(V8), /* V6KZ. */
14294 T(V8), /* V6T2. */
14295 T(V8), /* V6K. */
14296 T(V8), /* V7. */
14297 T(V8), /* V6_M. */
14298 T(V8), /* V6S_M. */
14299 T(V8), /* V7E_M. */
14300 T(V8), /* V8. */
14301 T(V8), /* V8-R. */
14302 T(V8), /* V8-M.BASE. */
14303 T(V8), /* V8-M.MAIN. */
14304 T(V8), /* V8.1. */
14305 T(V8), /* V8.2. */
14306 T(V8), /* V8.3. */
14307 T(V8), /* V8.1-M.MAIN. */
14309 const int v8r[] =
14311 T(V8R), /* PRE_V4. */
14312 T(V8R), /* V4. */
14313 T(V8R), /* V4T. */
14314 T(V8R), /* V5T. */
14315 T(V8R), /* V5TE. */
14316 T(V8R), /* V5TEJ. */
14317 T(V8R), /* V6. */
14318 T(V8R), /* V6KZ. */
14319 T(V8R), /* V6T2. */
14320 T(V8R), /* V6K. */
14321 T(V8R), /* V7. */
14322 T(V8R), /* V6_M. */
14323 T(V8R), /* V6S_M. */
14324 T(V8R), /* V7E_M. */
14325 T(V8), /* V8. */
14326 T(V8R), /* V8R. */
14328 const int v8m_baseline[] =
14330 -1, /* PRE_V4. */
14331 -1, /* V4. */
14332 -1, /* V4T. */
14333 -1, /* V5T. */
14334 -1, /* V5TE. */
14335 -1, /* V5TEJ. */
14336 -1, /* V6. */
14337 -1, /* V6KZ. */
14338 -1, /* V6T2. */
14339 -1, /* V6K. */
14340 -1, /* V7. */
14341 T(V8M_BASE), /* V6_M. */
14342 T(V8M_BASE), /* V6S_M. */
14343 -1, /* V7E_M. */
14344 -1, /* V8. */
14345 -1, /* V8R. */
14346 T(V8M_BASE) /* V8-M BASELINE. */
14348 const int v8m_mainline[] =
14350 -1, /* PRE_V4. */
14351 -1, /* V4. */
14352 -1, /* V4T. */
14353 -1, /* V5T. */
14354 -1, /* V5TE. */
14355 -1, /* V5TEJ. */
14356 -1, /* V6. */
14357 -1, /* V6KZ. */
14358 -1, /* V6T2. */
14359 -1, /* V6K. */
14360 T(V8M_MAIN), /* V7. */
14361 T(V8M_MAIN), /* V6_M. */
14362 T(V8M_MAIN), /* V6S_M. */
14363 T(V8M_MAIN), /* V7E_M. */
14364 -1, /* V8. */
14365 -1, /* V8R. */
14366 T(V8M_MAIN), /* V8-M BASELINE. */
14367 T(V8M_MAIN) /* V8-M MAINLINE. */
14369 const int v8_1m_mainline[] =
14371 -1, /* PRE_V4. */
14372 -1, /* V4. */
14373 -1, /* V4T. */
14374 -1, /* V5T. */
14375 -1, /* V5TE. */
14376 -1, /* V5TEJ. */
14377 -1, /* V6. */
14378 -1, /* V6KZ. */
14379 -1, /* V6T2. */
14380 -1, /* V6K. */
14381 T(V8_1M_MAIN), /* V7. */
14382 T(V8_1M_MAIN), /* V6_M. */
14383 T(V8_1M_MAIN), /* V6S_M. */
14384 T(V8_1M_MAIN), /* V7E_M. */
14385 -1, /* V8. */
14386 -1, /* V8R. */
14387 T(V8_1M_MAIN), /* V8-M BASELINE. */
14388 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14389 -1, /* Unused (18). */
14390 -1, /* Unused (19). */
14391 -1, /* Unused (20). */
14392 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14394 const int v9[] =
14396 T(V9), /* PRE_V4. */
14397 T(V9), /* V4. */
14398 T(V9), /* V4T. */
14399 T(V9), /* V5T. */
14400 T(V9), /* V5TE. */
14401 T(V9), /* V5TEJ. */
14402 T(V9), /* V6. */
14403 T(V9), /* V6KZ. */
14404 T(V9), /* V6T2. */
14405 T(V9), /* V6K. */
14406 T(V9), /* V7. */
14407 T(V9), /* V6_M. */
14408 T(V9), /* V6S_M. */
14409 T(V9), /* V7E_M. */
14410 T(V9), /* V8. */
14411 T(V9), /* V8-R. */
14412 T(V9), /* V8-M.BASE. */
14413 T(V9), /* V8-M.MAIN. */
14414 T(V9), /* V8.1. */
14415 T(V9), /* V8.2. */
14416 T(V9), /* V8.3. */
14417 T(V9), /* V8.1-M.MAIN. */
14418 T(V9), /* V9. */
14420 const int v4t_plus_v6_m[] =
14422 -1, /* PRE_V4. */
14423 -1, /* V4. */
14424 T(V4T), /* V4T. */
14425 T(V5T), /* V5T. */
14426 T(V5TE), /* V5TE. */
14427 T(V5TEJ), /* V5TEJ. */
14428 T(V6), /* V6. */
14429 T(V6KZ), /* V6KZ. */
14430 T(V6T2), /* V6T2. */
14431 T(V6K), /* V6K. */
14432 T(V7), /* V7. */
14433 T(V6_M), /* V6_M. */
14434 T(V6S_M), /* V6S_M. */
14435 T(V7E_M), /* V7E_M. */
14436 T(V8), /* V8. */
14437 -1, /* V8R. */
14438 T(V8M_BASE), /* V8-M BASELINE. */
14439 T(V8M_MAIN), /* V8-M MAINLINE. */
14440 -1, /* Unused (18). */
14441 -1, /* Unused (19). */
14442 -1, /* Unused (20). */
14443 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14444 T(V9), /* V9. */
14445 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14447 const int *comb[] =
14449 v6t2,
14450 v6k,
14452 v6_m,
14453 v6s_m,
14454 v7e_m,
14456 v8r,
14457 v8m_baseline,
14458 v8m_mainline,
14459 NULL,
14460 NULL,
14461 NULL,
14462 v8_1m_mainline,
14464 /* Pseudo-architecture. */
14465 v4t_plus_v6_m
14468 /* Check we've not got a higher architecture than we know about. */
14470 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14472 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14473 return -1;
14476 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14478 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14479 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14480 oldtag = T(V4T_PLUS_V6_M);
14482 /* And override the new tag if we have a Tag_also_compatible_with on the
14483 input. */
14485 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14486 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14487 newtag = T(V4T_PLUS_V6_M);
14489 tagl = (oldtag < newtag) ? oldtag : newtag;
14490 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14492 /* Architectures before V6KZ add features monotonically. */
14493 if (tagh <= TAG_CPU_ARCH_V6KZ)
14494 return result;
14496 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14498 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14499 as the canonical version. */
14500 if (result == T(V4T_PLUS_V6_M))
14502 result = T(V4T);
14503 *secondary_compat_out = T(V6_M);
14505 else
14506 *secondary_compat_out = -1;
14508 if (result == -1)
14510 _bfd_error_handler (_("error: conflicting CPU architectures %s vs %s in %pB"),
14511 name_table[oldtag], name_table[newtag], ibfd);
14512 return -1;
14515 return result;
14516 #undef T
14519 /* Query attributes object to see if integer divide instructions may be
14520 present in an object. */
14521 static bool
14522 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14524 int arch = attr[Tag_CPU_arch].i;
14525 int profile = attr[Tag_CPU_arch_profile].i;
14527 switch (attr[Tag_DIV_use].i)
14529 case 0:
14530 /* Integer divide allowed if instruction contained in archetecture. */
14531 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14532 return true;
14533 else if (arch >= TAG_CPU_ARCH_V7E_M)
14534 return true;
14535 else
14536 return false;
14538 case 1:
14539 /* Integer divide explicitly prohibited. */
14540 return false;
14542 default:
14543 /* Unrecognised case - treat as allowing divide everywhere. */
14544 case 2:
14545 /* Integer divide allowed in ARM state. */
14546 return true;
14550 /* Query attributes object to see if integer divide instructions are
14551 forbidden to be in the object. This is not the inverse of
14552 elf32_arm_attributes_accept_div. */
14553 static bool
14554 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14556 return attr[Tag_DIV_use].i == 1;
14559 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14560 are conflicting attributes. */
14562 static bool
14563 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14565 bfd *obfd = info->output_bfd;
14566 obj_attribute *in_attr;
14567 obj_attribute *out_attr;
14568 /* Some tags have 0 = don't care, 1 = strong requirement,
14569 2 = weak requirement. */
14570 static const int order_021[3] = {0, 2, 1};
14571 int i;
14572 bool result = true;
14573 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14575 /* Skip the linker stubs file. This preserves previous behavior
14576 of accepting unknown attributes in the first input file - but
14577 is that a bug? */
14578 if (ibfd->flags & BFD_LINKER_CREATED)
14579 return true;
14581 /* Skip any input that hasn't attribute section.
14582 This enables to link object files without attribute section with
14583 any others. */
14584 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14585 return true;
14587 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14589 /* This is the first object. Copy the attributes. */
14590 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14592 out_attr = elf_known_obj_attributes_proc (obfd);
14594 /* Use the Tag_null value to indicate the attributes have been
14595 initialized. */
14596 out_attr[0].i = 1;
14598 /* We do not output objects with Tag_MPextension_use_legacy - we move
14599 the attribute's value to Tag_MPextension_use. */
14600 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14602 if (out_attr[Tag_MPextension_use].i != 0
14603 && out_attr[Tag_MPextension_use_legacy].i
14604 != out_attr[Tag_MPextension_use].i)
14606 _bfd_error_handler
14607 (_("Error: %pB has both the current and legacy "
14608 "Tag_MPextension_use attributes"), ibfd);
14609 result = false;
14612 out_attr[Tag_MPextension_use] =
14613 out_attr[Tag_MPextension_use_legacy];
14614 out_attr[Tag_MPextension_use_legacy].type = 0;
14615 out_attr[Tag_MPextension_use_legacy].i = 0;
14618 /* PR 28859 and 28848: Handle the case where the first input file,
14619 eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
14620 Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
14621 attribute to zero.
14622 FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
14623 if (out_attr[Tag_ABI_HardFP_use].i == 3 && out_attr[Tag_FP_arch].i == 0)
14624 out_attr[Tag_ABI_HardFP_use].i = 0;
14626 return result;
14629 in_attr = elf_known_obj_attributes_proc (ibfd);
14630 out_attr = elf_known_obj_attributes_proc (obfd);
14631 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14632 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14634 /* Ignore mismatches if the object doesn't use floating point or is
14635 floating point ABI independent. */
14636 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14637 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14638 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14639 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14640 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14641 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14643 _bfd_error_handler
14644 (_("error: %pB uses VFP register arguments, %pB does not"),
14645 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14646 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14647 result = false;
14651 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14653 /* Merge this attribute with existing attributes. */
14654 switch (i)
14656 case Tag_CPU_raw_name:
14657 case Tag_CPU_name:
14658 /* These are merged after Tag_CPU_arch. */
14659 break;
14661 case Tag_ABI_optimization_goals:
14662 case Tag_ABI_FP_optimization_goals:
14663 /* Use the first value seen. */
14664 break;
14666 case Tag_CPU_arch:
14668 int secondary_compat = -1, secondary_compat_out = -1;
14669 unsigned int saved_out_attr = out_attr[i].i;
14670 int arch_attr;
14671 static const char *name_table[] =
14673 /* These aren't real CPU names, but we can't guess
14674 that from the architecture version alone. */
14675 "Pre v4",
14676 "ARM v4",
14677 "ARM v4T",
14678 "ARM v5T",
14679 "ARM v5TE",
14680 "ARM v5TEJ",
14681 "ARM v6",
14682 "ARM v6KZ",
14683 "ARM v6T2",
14684 "ARM v6K",
14685 "ARM v7",
14686 "ARM v6-M",
14687 "ARM v6S-M",
14688 "ARM v7E-M",
14689 "ARM v8",
14690 "ARM v8-R",
14691 "ARM v8-M.baseline",
14692 "ARM v8-M.mainline",
14693 "ARM v8.1-A",
14694 "ARM v8.2-A",
14695 "ARM v8.3-A",
14696 "ARM v8.1-M.mainline",
14697 "ARM v9",
14700 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14701 secondary_compat = get_secondary_compatible_arch (ibfd);
14702 secondary_compat_out = get_secondary_compatible_arch (obfd);
14703 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14704 &secondary_compat_out,
14705 in_attr[i].i,
14706 secondary_compat,
14707 name_table);
14709 /* Return with error if failed to merge. */
14710 if (arch_attr == -1)
14711 return false;
14713 out_attr[i].i = arch_attr;
14715 set_secondary_compatible_arch (obfd, secondary_compat_out);
14717 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14718 if (out_attr[i].i == saved_out_attr)
14719 ; /* Leave the names alone. */
14720 else if (out_attr[i].i == in_attr[i].i)
14722 /* The output architecture has been changed to match the
14723 input architecture. Use the input names. */
14724 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14725 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14726 : NULL;
14727 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14728 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14729 : NULL;
14731 else
14733 out_attr[Tag_CPU_name].s = NULL;
14734 out_attr[Tag_CPU_raw_name].s = NULL;
14737 /* If we still don't have a value for Tag_CPU_name,
14738 make one up now. Tag_CPU_raw_name remains blank. */
14739 if (out_attr[Tag_CPU_name].s == NULL
14740 && out_attr[i].i < ARRAY_SIZE (name_table))
14741 out_attr[Tag_CPU_name].s =
14742 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14744 break;
14746 case Tag_ARM_ISA_use:
14747 case Tag_THUMB_ISA_use:
14748 case Tag_WMMX_arch:
14749 case Tag_Advanced_SIMD_arch:
14750 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14751 case Tag_ABI_FP_rounding:
14752 case Tag_ABI_FP_exceptions:
14753 case Tag_ABI_FP_user_exceptions:
14754 case Tag_ABI_FP_number_model:
14755 case Tag_FP_HP_extension:
14756 case Tag_CPU_unaligned_access:
14757 case Tag_T2EE_use:
14758 case Tag_MPextension_use:
14759 case Tag_MVE_arch:
14760 case Tag_PAC_extension:
14761 case Tag_BTI_extension:
14762 case Tag_BTI_use:
14763 case Tag_PACRET_use:
14764 /* Use the largest value specified. */
14765 if (in_attr[i].i > out_attr[i].i)
14766 out_attr[i].i = in_attr[i].i;
14767 break;
14769 case Tag_ABI_align_preserved:
14770 case Tag_ABI_PCS_RO_data:
14771 /* Use the smallest value specified. */
14772 if (in_attr[i].i < out_attr[i].i)
14773 out_attr[i].i = in_attr[i].i;
14774 break;
14776 case Tag_ABI_align_needed:
14777 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14778 && (in_attr[Tag_ABI_align_preserved].i == 0
14779 || out_attr[Tag_ABI_align_preserved].i == 0))
14781 /* This error message should be enabled once all non-conformant
14782 binaries in the toolchain have had the attributes set
14783 properly.
14784 _bfd_error_handler
14785 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14786 obfd, ibfd);
14787 result = false; */
14789 /* Fall through. */
14790 case Tag_ABI_FP_denormal:
14791 case Tag_ABI_PCS_GOT_use:
14792 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14793 value if greater than 2 (for future-proofing). */
14794 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14795 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14796 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14797 out_attr[i].i = in_attr[i].i;
14798 break;
14800 case Tag_Virtualization_use:
14801 /* The virtualization tag effectively stores two bits of
14802 information: the intended use of TrustZone (in bit 0), and the
14803 intended use of Virtualization (in bit 1). */
14804 if (out_attr[i].i == 0)
14805 out_attr[i].i = in_attr[i].i;
14806 else if (in_attr[i].i != 0
14807 && in_attr[i].i != out_attr[i].i)
14809 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14810 out_attr[i].i = 3;
14811 else
14813 _bfd_error_handler
14814 (_("error: %pB: unable to merge virtualization attributes "
14815 "with %pB"),
14816 obfd, ibfd);
14817 result = false;
14820 break;
14822 case Tag_CPU_arch_profile:
14823 if (out_attr[i].i != in_attr[i].i)
14825 /* 0 will merge with anything.
14826 'A' and 'S' merge to 'A'.
14827 'R' and 'S' merge to 'R'.
14828 'M' and 'A|R|S' is an error. */
14829 if (out_attr[i].i == 0
14830 || (out_attr[i].i == 'S'
14831 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14832 out_attr[i].i = in_attr[i].i;
14833 else if (in_attr[i].i == 0
14834 || (in_attr[i].i == 'S'
14835 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14836 ; /* Do nothing. */
14837 else
14839 _bfd_error_handler
14840 (_("error: %pB: conflicting architecture profiles %c/%c"),
14841 ibfd,
14842 in_attr[i].i ? in_attr[i].i : '0',
14843 out_attr[i].i ? out_attr[i].i : '0');
14844 result = false;
14847 break;
14849 case Tag_DSP_extension:
14850 /* No need to change output value if any of:
14851 - pre (<=) ARMv5T input architecture (do not have DSP)
14852 - M input profile not ARMv7E-M and do not have DSP. */
14853 if (in_attr[Tag_CPU_arch].i <= 3
14854 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14855 && in_attr[Tag_CPU_arch].i != 13
14856 && in_attr[i].i == 0))
14857 ; /* Do nothing. */
14858 /* Output value should be 0 if DSP part of architecture, ie.
14859 - post (>=) ARMv5te architecture output
14860 - A, R or S profile output or ARMv7E-M output architecture. */
14861 else if (out_attr[Tag_CPU_arch].i >= 4
14862 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14863 || out_attr[Tag_CPU_arch_profile].i == 'R'
14864 || out_attr[Tag_CPU_arch_profile].i == 'S'
14865 || out_attr[Tag_CPU_arch].i == 13))
14866 out_attr[i].i = 0;
14867 /* Otherwise, DSP instructions are added and not part of output
14868 architecture. */
14869 else
14870 out_attr[i].i = 1;
14871 break;
14873 case Tag_FP_arch:
14875 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14876 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14877 when it's 0. It might mean absence of FP hardware if
14878 Tag_FP_arch is zero. */
14880 #define VFP_VERSION_COUNT 9
14881 static const struct
14883 int ver;
14884 int regs;
14885 } vfp_versions[VFP_VERSION_COUNT] =
14887 {0, 0},
14888 {1, 16},
14889 {2, 16},
14890 {3, 32},
14891 {3, 16},
14892 {4, 32},
14893 {4, 16},
14894 {8, 32},
14895 {8, 16}
14897 int ver;
14898 int regs;
14899 int newval;
14901 /* If the output has no requirement about FP hardware,
14902 follow the requirement of the input. */
14903 if (out_attr[i].i == 0)
14905 /* This assert is still reasonable, we shouldn't
14906 produce the suspicious build attribute
14907 combination (See below for in_attr). */
14908 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14909 out_attr[i].i = in_attr[i].i;
14910 out_attr[Tag_ABI_HardFP_use].i
14911 = in_attr[Tag_ABI_HardFP_use].i;
14912 break;
14914 /* If the input has no requirement about FP hardware, do
14915 nothing. */
14916 else if (in_attr[i].i == 0)
14918 /* We used to assert that Tag_ABI_HardFP_use was
14919 zero here, but we should never assert when
14920 consuming an object file that has suspicious
14921 build attributes. The single precision variant
14922 of 'no FP architecture' is still 'no FP
14923 architecture', so we just ignore the tag in this
14924 case. */
14925 break;
14928 /* Both the input and the output have nonzero Tag_FP_arch.
14929 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14931 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14932 do nothing. */
14933 if (in_attr[Tag_ABI_HardFP_use].i == 0
14934 && out_attr[Tag_ABI_HardFP_use].i == 0)
14936 /* If the input and the output have different Tag_ABI_HardFP_use,
14937 the combination of them is 0 (implied by Tag_FP_arch). */
14938 else if (in_attr[Tag_ABI_HardFP_use].i
14939 != out_attr[Tag_ABI_HardFP_use].i)
14940 out_attr[Tag_ABI_HardFP_use].i = 0;
14942 /* Now we can handle Tag_FP_arch. */
14944 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14945 pick the biggest. */
14946 if (in_attr[i].i >= VFP_VERSION_COUNT
14947 && in_attr[i].i > out_attr[i].i)
14949 out_attr[i] = in_attr[i];
14950 break;
14952 /* The output uses the superset of input features
14953 (ISA version) and registers. */
14954 ver = vfp_versions[in_attr[i].i].ver;
14955 if (ver < vfp_versions[out_attr[i].i].ver)
14956 ver = vfp_versions[out_attr[i].i].ver;
14957 regs = vfp_versions[in_attr[i].i].regs;
14958 if (regs < vfp_versions[out_attr[i].i].regs)
14959 regs = vfp_versions[out_attr[i].i].regs;
14960 /* This assumes all possible supersets are also a valid
14961 options. */
14962 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14964 if (regs == vfp_versions[newval].regs
14965 && ver == vfp_versions[newval].ver)
14966 break;
14968 out_attr[i].i = newval;
14970 break;
14971 case Tag_PCS_config:
14972 if (out_attr[i].i == 0)
14973 out_attr[i].i = in_attr[i].i;
14974 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14976 /* It's sometimes ok to mix different configs, so this is only
14977 a warning. */
14978 _bfd_error_handler
14979 (_("warning: %pB: conflicting platform configuration"), ibfd);
14981 break;
14982 case Tag_ABI_PCS_R9_use:
14983 if (in_attr[i].i != out_attr[i].i
14984 && out_attr[i].i != AEABI_R9_unused
14985 && in_attr[i].i != AEABI_R9_unused)
14987 _bfd_error_handler
14988 (_("error: %pB: conflicting use of R9"), ibfd);
14989 result = false;
14991 if (out_attr[i].i == AEABI_R9_unused)
14992 out_attr[i].i = in_attr[i].i;
14993 break;
14994 case Tag_ABI_PCS_RW_data:
14995 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14996 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14997 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14999 _bfd_error_handler
15000 (_("error: %pB: SB relative addressing conflicts with use of R9"),
15001 ibfd);
15002 result = false;
15004 /* Use the smallest value specified. */
15005 if (in_attr[i].i < out_attr[i].i)
15006 out_attr[i].i = in_attr[i].i;
15007 break;
15008 case Tag_ABI_PCS_wchar_t:
15009 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
15010 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
15012 _bfd_error_handler
15013 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
15014 ibfd, in_attr[i].i, out_attr[i].i);
15016 else if (in_attr[i].i && !out_attr[i].i)
15017 out_attr[i].i = in_attr[i].i;
15018 break;
15019 case Tag_ABI_enum_size:
15020 if (in_attr[i].i != AEABI_enum_unused)
15022 if (out_attr[i].i == AEABI_enum_unused
15023 || out_attr[i].i == AEABI_enum_forced_wide)
15025 /* The existing object is compatible with anything.
15026 Use whatever requirements the new object has. */
15027 out_attr[i].i = in_attr[i].i;
15029 else if (in_attr[i].i != AEABI_enum_forced_wide
15030 && out_attr[i].i != in_attr[i].i
15031 && !elf_arm_tdata (obfd)->no_enum_size_warning)
15033 static const char *aeabi_enum_names[] =
15034 { "", "variable-size", "32-bit", "" };
15035 const char *in_name =
15036 in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
15037 ? aeabi_enum_names[in_attr[i].i]
15038 : "<unknown>";
15039 const char *out_name =
15040 out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
15041 ? aeabi_enum_names[out_attr[i].i]
15042 : "<unknown>";
15043 _bfd_error_handler
15044 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
15045 ibfd, in_name, out_name);
15048 break;
15049 case Tag_ABI_VFP_args:
15050 /* Aready done. */
15051 break;
15052 case Tag_ABI_WMMX_args:
15053 if (in_attr[i].i != out_attr[i].i)
15055 _bfd_error_handler
15056 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
15057 ibfd, obfd);
15058 result = false;
15060 break;
15061 case Tag_compatibility:
15062 /* Merged in target-independent code. */
15063 break;
15064 case Tag_ABI_HardFP_use:
15065 /* This is handled along with Tag_FP_arch. */
15066 break;
15067 case Tag_ABI_FP_16bit_format:
15068 if (in_attr[i].i != 0 && out_attr[i].i != 0)
15070 if (in_attr[i].i != out_attr[i].i)
15072 _bfd_error_handler
15073 (_("error: fp16 format mismatch between %pB and %pB"),
15074 ibfd, obfd);
15075 result = false;
15078 if (in_attr[i].i != 0)
15079 out_attr[i].i = in_attr[i].i;
15080 break;
15082 case Tag_DIV_use:
15083 /* A value of zero on input means that the divide instruction may
15084 be used if available in the base architecture as specified via
15085 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15086 the user did not want divide instructions. A value of 2
15087 explicitly means that divide instructions were allowed in ARM
15088 and Thumb state. */
15089 if (in_attr[i].i == out_attr[i].i)
15090 /* Do nothing. */ ;
15091 else if (elf32_arm_attributes_forbid_div (in_attr)
15092 && !elf32_arm_attributes_accept_div (out_attr))
15093 out_attr[i].i = 1;
15094 else if (elf32_arm_attributes_forbid_div (out_attr)
15095 && elf32_arm_attributes_accept_div (in_attr))
15096 out_attr[i].i = in_attr[i].i;
15097 else if (in_attr[i].i == 2)
15098 out_attr[i].i = in_attr[i].i;
15099 break;
15101 case Tag_MPextension_use_legacy:
15102 /* We don't output objects with Tag_MPextension_use_legacy - we
15103 move the value to Tag_MPextension_use. */
15104 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15106 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15108 _bfd_error_handler
15109 (_("%pB has both the current and legacy "
15110 "Tag_MPextension_use attributes"),
15111 ibfd);
15112 result = false;
15116 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15117 out_attr[Tag_MPextension_use] = in_attr[i];
15119 break;
15121 case Tag_nodefaults:
15122 /* This tag is set if it exists, but the value is unused (and is
15123 typically zero). We don't actually need to do anything here -
15124 the merge happens automatically when the type flags are merged
15125 below. */
15126 break;
15127 case Tag_also_compatible_with:
15128 /* Already done in Tag_CPU_arch. */
15129 break;
15130 case Tag_conformance:
15131 /* Keep the attribute if it matches. Throw it away otherwise.
15132 No attribute means no claim to conform. */
15133 if (!in_attr[i].s || !out_attr[i].s
15134 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15135 out_attr[i].s = NULL;
15136 break;
15138 default:
15139 result
15140 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15143 /* If out_attr was copied from in_attr then it won't have a type yet. */
15144 if (in_attr[i].type && !out_attr[i].type)
15145 out_attr[i].type = in_attr[i].type;
15148 /* Merge Tag_compatibility attributes and any common GNU ones. */
15149 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15150 return false;
15152 /* Check for any attributes not known on ARM. */
15153 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15155 return result;
15159 /* Return TRUE if the two EABI versions are incompatible. */
15161 static bool
15162 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15164 /* v4 and v5 are the same spec before and after it was released,
15165 so allow mixing them. */
15166 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15167 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15168 return true;
15170 return (iver == over);
15173 /* Merge backend specific data from an object file to the output
15174 object file when linking. */
15176 static bool
15177 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15179 /* Display the flags field. */
15181 static bool
15182 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15184 FILE * file = (FILE *) ptr;
15185 unsigned long flags;
15187 BFD_ASSERT (abfd != NULL && ptr != NULL);
15189 /* Print normal ELF private data. */
15190 _bfd_elf_print_private_bfd_data (abfd, ptr);
15192 flags = elf_elfheader (abfd)->e_flags;
15193 /* Ignore init flag - it may not be set, despite the flags field
15194 containing valid data. */
15196 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15198 switch (EF_ARM_EABI_VERSION (flags))
15200 case EF_ARM_EABI_UNKNOWN:
15201 /* The following flag bits are GNU extensions and not part of the
15202 official ARM ELF extended ABI. Hence they are only decoded if
15203 the EABI version is not set. */
15204 if (flags & EF_ARM_INTERWORK)
15205 fprintf (file, _(" [interworking enabled]"));
15207 if (flags & EF_ARM_APCS_26)
15208 fprintf (file, " [APCS-26]");
15209 else
15210 fprintf (file, " [APCS-32]");
15212 if (flags & EF_ARM_VFP_FLOAT)
15213 fprintf (file, _(" [VFP float format]"));
15214 else
15215 fprintf (file, _(" [FPA float format]"));
15217 if (flags & EF_ARM_APCS_FLOAT)
15218 fprintf (file, _(" [floats passed in float registers]"));
15220 if (flags & EF_ARM_PIC)
15221 fprintf (file, _(" [position independent]"));
15223 if (flags & EF_ARM_NEW_ABI)
15224 fprintf (file, _(" [new ABI]"));
15226 if (flags & EF_ARM_OLD_ABI)
15227 fprintf (file, _(" [old ABI]"));
15229 if (flags & EF_ARM_SOFT_FLOAT)
15230 fprintf (file, _(" [software FP]"));
15232 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15233 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15234 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT);
15235 break;
15237 case EF_ARM_EABI_VER1:
15238 fprintf (file, _(" [Version1 EABI]"));
15240 if (flags & EF_ARM_SYMSARESORTED)
15241 fprintf (file, _(" [sorted symbol table]"));
15242 else
15243 fprintf (file, _(" [unsorted symbol table]"));
15245 flags &= ~ EF_ARM_SYMSARESORTED;
15246 break;
15248 case EF_ARM_EABI_VER2:
15249 fprintf (file, _(" [Version2 EABI]"));
15251 if (flags & EF_ARM_SYMSARESORTED)
15252 fprintf (file, _(" [sorted symbol table]"));
15253 else
15254 fprintf (file, _(" [unsorted symbol table]"));
15256 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15257 fprintf (file, _(" [dynamic symbols use segment index]"));
15259 if (flags & EF_ARM_MAPSYMSFIRST)
15260 fprintf (file, _(" [mapping symbols precede others]"));
15262 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15263 | EF_ARM_MAPSYMSFIRST);
15264 break;
15266 case EF_ARM_EABI_VER3:
15267 fprintf (file, _(" [Version3 EABI]"));
15268 break;
15270 case EF_ARM_EABI_VER4:
15271 fprintf (file, _(" [Version4 EABI]"));
15272 goto eabi;
15274 case EF_ARM_EABI_VER5:
15275 fprintf (file, _(" [Version5 EABI]"));
15277 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15278 fprintf (file, _(" [soft-float ABI]"));
15280 if (flags & EF_ARM_ABI_FLOAT_HARD)
15281 fprintf (file, _(" [hard-float ABI]"));
15283 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15285 eabi:
15286 if (flags & EF_ARM_BE8)
15287 fprintf (file, _(" [BE8]"));
15289 if (flags & EF_ARM_LE8)
15290 fprintf (file, _(" [LE8]"));
15292 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15293 break;
15295 default:
15296 fprintf (file, _(" <EABI version unrecognised>"));
15297 break;
15300 flags &= ~ EF_ARM_EABIMASK;
15302 if (flags & EF_ARM_RELEXEC)
15303 fprintf (file, _(" [relocatable executable]"));
15305 if (flags & EF_ARM_PIC)
15306 fprintf (file, _(" [position independent]"));
15308 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15309 fprintf (file, _(" [FDPIC ABI supplement]"));
15311 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15313 if (flags)
15314 fprintf (file, _(" <Unrecognised flag bits set>"));
15316 fputc ('\n', file);
15318 return true;
15321 static int
15322 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15324 switch (ELF_ST_TYPE (elf_sym->st_info))
15326 case STT_ARM_TFUNC:
15327 return ELF_ST_TYPE (elf_sym->st_info);
15329 case STT_ARM_16BIT:
15330 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15331 This allows us to distinguish between data used by Thumb instructions
15332 and non-data (which is probably code) inside Thumb regions of an
15333 executable. */
15334 if (type != STT_OBJECT && type != STT_TLS)
15335 return ELF_ST_TYPE (elf_sym->st_info);
15336 break;
15338 default:
15339 break;
15342 return type;
15345 static asection *
15346 elf32_arm_gc_mark_hook (asection *sec,
15347 struct bfd_link_info *info,
15348 Elf_Internal_Rela *rel,
15349 struct elf_link_hash_entry *h,
15350 Elf_Internal_Sym *sym)
15352 if (h != NULL)
15353 switch (ELF32_R_TYPE (rel->r_info))
15355 case R_ARM_GNU_VTINHERIT:
15356 case R_ARM_GNU_VTENTRY:
15357 return NULL;
15360 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15363 /* Look through the relocs for a section during the first phase. */
15365 static bool
15366 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15367 asection *sec, const Elf_Internal_Rela *relocs)
15369 Elf_Internal_Shdr *symtab_hdr;
15370 struct elf_link_hash_entry **sym_hashes;
15371 const Elf_Internal_Rela *rel;
15372 const Elf_Internal_Rela *rel_end;
15373 bfd *dynobj;
15374 asection *sreloc;
15375 struct elf32_arm_link_hash_table *htab;
15376 bool call_reloc_p;
15377 bool may_become_dynamic_p;
15378 bool may_need_local_target_p;
15379 unsigned long nsyms;
15381 if (bfd_link_relocatable (info))
15382 return true;
15384 BFD_ASSERT (is_arm_elf (abfd));
15386 htab = elf32_arm_hash_table (info);
15387 if (htab == NULL)
15388 return false;
15390 sreloc = NULL;
15392 if (htab->root.dynobj == NULL)
15393 htab->root.dynobj = abfd;
15394 if (!create_ifunc_sections (info))
15395 return false;
15397 dynobj = htab->root.dynobj;
15399 symtab_hdr = & elf_symtab_hdr (abfd);
15400 sym_hashes = elf_sym_hashes (abfd);
15401 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15403 rel_end = relocs + sec->reloc_count;
15404 for (rel = relocs; rel < rel_end; rel++)
15406 Elf_Internal_Sym *isym;
15407 struct elf_link_hash_entry *h;
15408 struct elf32_arm_link_hash_entry *eh;
15409 unsigned int r_symndx;
15410 int r_type;
15412 r_symndx = ELF32_R_SYM (rel->r_info);
15413 r_type = ELF32_R_TYPE (rel->r_info);
15414 r_type = arm_real_reloc_type (htab, r_type);
15416 if (r_symndx >= nsyms
15417 /* PR 9934: It is possible to have relocations that do not
15418 refer to symbols, thus it is also possible to have an
15419 object file containing relocations but no symbol table. */
15420 && (r_symndx > STN_UNDEF || nsyms > 0))
15422 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15423 r_symndx);
15424 return false;
15427 h = NULL;
15428 isym = NULL;
15429 if (nsyms > 0)
15431 if (r_symndx < symtab_hdr->sh_info)
15433 /* A local symbol. */
15434 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15435 abfd, r_symndx);
15436 if (isym == NULL)
15437 return false;
15439 else
15441 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15442 while (h->root.type == bfd_link_hash_indirect
15443 || h->root.type == bfd_link_hash_warning)
15444 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15448 eh = (struct elf32_arm_link_hash_entry *) h;
15450 call_reloc_p = false;
15451 may_become_dynamic_p = false;
15452 may_need_local_target_p = false;
15454 /* Could be done earlier, if h were already available. */
15455 r_type = elf32_arm_tls_transition (info, r_type, h);
15456 switch (r_type)
15458 case R_ARM_GOTOFFFUNCDESC:
15460 if (h == NULL)
15462 if (!elf32_arm_allocate_local_sym_info (abfd))
15463 return false;
15464 if (r_symndx >= elf32_arm_num_entries (abfd))
15465 return false;
15466 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15467 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15469 else
15471 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15474 break;
15476 case R_ARM_GOTFUNCDESC:
15478 if (h == NULL)
15480 /* Such a relocation is not supposed to be generated
15481 by gcc on a static function. */
15482 /* Anyway if needed it could be handled. */
15483 return false;
15485 else
15487 eh->fdpic_cnts.gotfuncdesc_cnt++;
15490 break;
15492 case R_ARM_FUNCDESC:
15494 if (h == NULL)
15496 if (!elf32_arm_allocate_local_sym_info (abfd))
15497 return false;
15498 if (r_symndx >= elf32_arm_num_entries (abfd))
15499 return false;
15500 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15501 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15503 else
15505 eh->fdpic_cnts.funcdesc_cnt++;
15508 break;
15510 case R_ARM_GOT32:
15511 case R_ARM_GOT_PREL:
15512 case R_ARM_TLS_GD32:
15513 case R_ARM_TLS_GD32_FDPIC:
15514 case R_ARM_TLS_IE32:
15515 case R_ARM_TLS_IE32_FDPIC:
15516 case R_ARM_TLS_GOTDESC:
15517 case R_ARM_TLS_DESCSEQ:
15518 case R_ARM_THM_TLS_DESCSEQ:
15519 case R_ARM_TLS_CALL:
15520 case R_ARM_THM_TLS_CALL:
15521 /* This symbol requires a global offset table entry. */
15523 int tls_type, old_tls_type;
15525 switch (r_type)
15527 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15528 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15530 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15531 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15533 case R_ARM_TLS_GOTDESC:
15534 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15535 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15536 tls_type = GOT_TLS_GDESC; break;
15538 default: tls_type = GOT_NORMAL; break;
15541 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15542 info->flags |= DF_STATIC_TLS;
15544 if (h != NULL)
15546 h->got.refcount++;
15547 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15549 else
15551 /* This is a global offset table entry for a local symbol. */
15552 if (!elf32_arm_allocate_local_sym_info (abfd))
15553 return false;
15554 if (r_symndx >= elf32_arm_num_entries (abfd))
15556 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15557 r_symndx);
15558 return false;
15561 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15562 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15565 /* If a variable is accessed with both tls methods, two
15566 slots may be created. */
15567 if (GOT_TLS_GD_ANY_P (old_tls_type)
15568 && GOT_TLS_GD_ANY_P (tls_type))
15569 tls_type |= old_tls_type;
15571 /* We will already have issued an error message if there
15572 is a TLS/non-TLS mismatch, based on the symbol
15573 type. So just combine any TLS types needed. */
15574 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15575 && tls_type != GOT_NORMAL)
15576 tls_type |= old_tls_type;
15578 /* If the symbol is accessed in both IE and GDESC
15579 method, we're able to relax. Turn off the GDESC flag,
15580 without messing up with any other kind of tls types
15581 that may be involved. */
15582 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15583 tls_type &= ~GOT_TLS_GDESC;
15585 if (old_tls_type != tls_type)
15587 if (h != NULL)
15588 elf32_arm_hash_entry (h)->tls_type = tls_type;
15589 else
15590 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15593 /* Fall through. */
15595 case R_ARM_TLS_LDM32:
15596 case R_ARM_TLS_LDM32_FDPIC:
15597 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15598 htab->tls_ldm_got.refcount++;
15599 /* Fall through. */
15601 case R_ARM_GOTOFF32:
15602 case R_ARM_GOTPC:
15603 if (htab->root.sgot == NULL
15604 && !create_got_section (htab->root.dynobj, info))
15605 return false;
15606 break;
15608 case R_ARM_PC24:
15609 case R_ARM_PLT32:
15610 case R_ARM_CALL:
15611 case R_ARM_JUMP24:
15612 case R_ARM_PREL31:
15613 case R_ARM_THM_CALL:
15614 case R_ARM_THM_JUMP24:
15615 case R_ARM_THM_JUMP19:
15616 call_reloc_p = true;
15617 may_need_local_target_p = true;
15618 break;
15620 case R_ARM_ABS12:
15621 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15622 ldr __GOTT_INDEX__ offsets. */
15623 if (htab->root.target_os != is_vxworks)
15625 may_need_local_target_p = true;
15626 break;
15628 else goto jump_over;
15630 /* Fall through. */
15632 case R_ARM_MOVW_ABS_NC:
15633 case R_ARM_MOVT_ABS:
15634 case R_ARM_THM_MOVW_ABS_NC:
15635 case R_ARM_THM_MOVT_ABS:
15636 if (bfd_link_pic (info))
15638 _bfd_error_handler
15639 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15640 abfd, elf32_arm_howto_table_1[r_type].name,
15641 (h) ? h->root.root.string : "a local symbol");
15642 bfd_set_error (bfd_error_bad_value);
15643 return false;
15646 /* Fall through. */
15647 case R_ARM_ABS32:
15648 case R_ARM_ABS32_NOI:
15649 jump_over:
15650 if (h != NULL && bfd_link_executable (info))
15652 h->pointer_equality_needed = 1;
15654 /* Fall through. */
15655 case R_ARM_REL32:
15656 case R_ARM_REL32_NOI:
15657 case R_ARM_MOVW_PREL_NC:
15658 case R_ARM_MOVT_PREL:
15659 case R_ARM_THM_MOVW_PREL_NC:
15660 case R_ARM_THM_MOVT_PREL:
15662 /* Should the interworking branches be listed here? */
15663 if ((bfd_link_pic (info)
15664 || htab->fdpic_p)
15665 && (sec->flags & SEC_ALLOC) != 0)
15667 if (h == NULL
15668 && elf32_arm_howto_from_type (r_type)->pc_relative)
15670 /* In shared libraries and relocatable executables,
15671 we treat local relative references as calls;
15672 see the related SYMBOL_CALLS_LOCAL code in
15673 allocate_dynrelocs. */
15674 call_reloc_p = true;
15675 may_need_local_target_p = true;
15677 else
15678 /* We are creating a shared library or relocatable
15679 executable, and this is a reloc against a global symbol,
15680 or a non-PC-relative reloc against a local symbol.
15681 We may need to copy the reloc into the output. */
15682 may_become_dynamic_p = true;
15684 else
15685 may_need_local_target_p = true;
15686 break;
15688 /* This relocation describes the C++ object vtable hierarchy.
15689 Reconstruct it for later use during GC. */
15690 case R_ARM_GNU_VTINHERIT:
15691 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15692 return false;
15693 break;
15695 /* This relocation describes which C++ vtable entries are actually
15696 used. Record for later use during GC. */
15697 case R_ARM_GNU_VTENTRY:
15698 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15699 return false;
15700 break;
15703 if (h != NULL)
15705 if (call_reloc_p)
15706 /* We may need a .plt entry if the function this reloc
15707 refers to is in a different object, regardless of the
15708 symbol's type. We can't tell for sure yet, because
15709 something later might force the symbol local. */
15710 h->needs_plt = 1;
15711 else if (may_need_local_target_p)
15712 /* If this reloc is in a read-only section, we might
15713 need a copy reloc. We can't check reliably at this
15714 stage whether the section is read-only, as input
15715 sections have not yet been mapped to output sections.
15716 Tentatively set the flag for now, and correct in
15717 adjust_dynamic_symbol. */
15718 h->non_got_ref = 1;
15721 if (may_need_local_target_p
15722 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15724 union gotplt_union *root_plt;
15725 struct arm_plt_info *arm_plt;
15726 struct arm_local_iplt_info *local_iplt;
15728 if (h != NULL)
15730 root_plt = &h->plt;
15731 arm_plt = &eh->plt;
15733 else
15735 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15736 if (local_iplt == NULL)
15737 return false;
15738 root_plt = &local_iplt->root;
15739 arm_plt = &local_iplt->arm;
15742 /* If the symbol is a function that doesn't bind locally,
15743 this relocation will need a PLT entry. */
15744 if (root_plt->refcount != -1)
15745 root_plt->refcount += 1;
15747 if (!call_reloc_p)
15748 arm_plt->noncall_refcount++;
15750 /* It's too early to use htab->use_blx here, so we have to
15751 record possible blx references separately from
15752 relocs that definitely need a thumb stub. */
15754 if (r_type == R_ARM_THM_CALL)
15755 arm_plt->maybe_thumb_refcount += 1;
15757 if (r_type == R_ARM_THM_JUMP24
15758 || r_type == R_ARM_THM_JUMP19)
15759 arm_plt->thumb_refcount += 1;
15762 if (may_become_dynamic_p)
15764 struct elf_dyn_relocs *p, **head;
15766 /* Create a reloc section in dynobj. */
15767 if (sreloc == NULL)
15769 sreloc = _bfd_elf_make_dynamic_reloc_section
15770 (sec, dynobj, 2, abfd, ! htab->use_rel);
15772 if (sreloc == NULL)
15773 return false;
15776 /* If this is a global symbol, count the number of
15777 relocations we need for this symbol. */
15778 if (h != NULL)
15779 head = &h->dyn_relocs;
15780 else
15782 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15783 if (head == NULL)
15784 return false;
15787 p = *head;
15788 if (p == NULL || p->sec != sec)
15790 size_t amt = sizeof *p;
15792 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15793 if (p == NULL)
15794 return false;
15795 p->next = *head;
15796 *head = p;
15797 p->sec = sec;
15798 p->count = 0;
15799 p->pc_count = 0;
15802 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15803 p->pc_count += 1;
15804 p->count += 1;
15805 if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15806 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15808 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15809 that will become rofixup. */
15810 /* This is due to the fact that we suppose all will become rofixup. */
15811 _bfd_error_handler
15812 (_("FDPIC does not yet support %s relocation"
15813 " to become dynamic for executable"),
15814 elf32_arm_howto_table_1[r_type].name);
15815 abort ();
15820 return true;
15823 static void
15824 elf32_arm_update_relocs (asection *o,
15825 struct bfd_elf_section_reloc_data *reldata)
15827 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15828 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15829 const struct elf_backend_data *bed;
15830 _arm_elf_section_data *eado;
15831 struct bfd_link_order *p;
15832 bfd_byte *erela_head, *erela;
15833 Elf_Internal_Rela *irela_head, *irela;
15834 Elf_Internal_Shdr *rel_hdr;
15835 bfd *abfd;
15836 unsigned int count;
15838 eado = get_arm_elf_section_data (o);
15840 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15841 return;
15843 abfd = o->owner;
15844 bed = get_elf_backend_data (abfd);
15845 rel_hdr = reldata->hdr;
15847 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15849 swap_in = bed->s->swap_reloc_in;
15850 swap_out = bed->s->swap_reloc_out;
15852 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15854 swap_in = bed->s->swap_reloca_in;
15855 swap_out = bed->s->swap_reloca_out;
15857 else
15858 abort ();
15860 erela_head = rel_hdr->contents;
15861 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15862 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15864 erela = erela_head;
15865 irela = irela_head;
15866 count = 0;
15868 for (p = o->map_head.link_order; p; p = p->next)
15870 if (p->type == bfd_section_reloc_link_order
15871 || p->type == bfd_symbol_reloc_link_order)
15873 (*swap_in) (abfd, erela, irela);
15874 erela += rel_hdr->sh_entsize;
15875 irela++;
15876 count++;
15878 else if (p->type == bfd_indirect_link_order)
15880 struct bfd_elf_section_reloc_data *input_reldata;
15881 arm_unwind_table_edit *edit_list, *edit_tail;
15882 _arm_elf_section_data *eadi;
15883 bfd_size_type j;
15884 bfd_vma offset;
15885 asection *i;
15887 i = p->u.indirect.section;
15889 eadi = get_arm_elf_section_data (i);
15890 edit_list = eadi->u.exidx.unwind_edit_list;
15891 edit_tail = eadi->u.exidx.unwind_edit_tail;
15892 offset = i->output_offset;
15894 if (eadi->elf.rel.hdr &&
15895 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15896 input_reldata = &eadi->elf.rel;
15897 else if (eadi->elf.rela.hdr &&
15898 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15899 input_reldata = &eadi->elf.rela;
15900 else
15901 abort ();
15903 if (edit_list)
15905 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15907 arm_unwind_table_edit *edit_node, *edit_next;
15908 bfd_vma bias;
15909 bfd_vma reloc_index;
15911 (*swap_in) (abfd, erela, irela);
15912 reloc_index = (irela->r_offset - offset) / 8;
15914 bias = 0;
15915 edit_node = edit_list;
15916 for (edit_next = edit_list;
15917 edit_next && edit_next->index <= reloc_index;
15918 edit_next = edit_node->next)
15920 bias++;
15921 edit_node = edit_next;
15924 if (edit_node->type != DELETE_EXIDX_ENTRY
15925 || edit_node->index != reloc_index)
15927 irela->r_offset -= bias * 8;
15928 irela++;
15929 count++;
15932 erela += rel_hdr->sh_entsize;
15935 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15937 /* New relocation entity. */
15938 asection *text_sec = edit_tail->linked_section;
15939 asection *text_out = text_sec->output_section;
15940 bfd_vma exidx_offset = offset + i->size - 8;
15942 irela->r_addend = 0;
15943 irela->r_offset = exidx_offset;
15944 irela->r_info = ELF32_R_INFO
15945 (text_out->target_index, R_ARM_PREL31);
15946 irela++;
15947 count++;
15950 else
15952 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15954 (*swap_in) (abfd, erela, irela);
15955 erela += rel_hdr->sh_entsize;
15956 irela++;
15959 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15964 reldata->count = count;
15965 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15967 erela = erela_head;
15968 irela = irela_head;
15969 while (count > 0)
15971 (*swap_out) (abfd, irela, erela);
15972 erela += rel_hdr->sh_entsize;
15973 irela++;
15974 count--;
15977 free (irela_head);
15979 /* Hashes are no longer valid. */
15980 free (reldata->hashes);
15981 reldata->hashes = NULL;
15984 /* Unwinding tables are not referenced directly. This pass marks them as
15985 required if the corresponding code section is marked. Similarly, ARMv8-M
15986 secure entry functions can only be referenced by SG veneers which are
15987 created after the GC process. They need to be marked in case they reside in
15988 their own section (as would be the case if code was compiled with
15989 -ffunction-sections). */
15991 static bool
15992 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15993 elf_gc_mark_hook_fn gc_mark_hook)
15995 bfd *sub;
15996 Elf_Internal_Shdr **elf_shdrp;
15997 asection *cmse_sec;
15998 obj_attribute *out_attr;
15999 Elf_Internal_Shdr *symtab_hdr;
16000 unsigned i, sym_count, ext_start;
16001 const struct elf_backend_data *bed;
16002 struct elf_link_hash_entry **sym_hashes;
16003 struct elf32_arm_link_hash_entry *cmse_hash;
16004 bool again, is_v8m, first_bfd_browse = true;
16005 bool extra_marks_added = false;
16006 asection *isec;
16008 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
16010 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
16011 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
16012 && out_attr[Tag_CPU_arch_profile].i == 'M';
16014 /* Marking EH data may cause additional code sections to be marked,
16015 requiring multiple passes. */
16016 again = true;
16017 while (again)
16019 again = false;
16020 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
16022 asection *o;
16024 if (! is_arm_elf (sub))
16025 continue;
16027 elf_shdrp = elf_elfsections (sub);
16028 for (o = sub->sections; o != NULL; o = o->next)
16030 Elf_Internal_Shdr *hdr;
16032 hdr = &elf_section_data (o)->this_hdr;
16033 if (hdr->sh_type == SHT_ARM_EXIDX
16034 && hdr->sh_link
16035 && hdr->sh_link < elf_numsections (sub)
16036 && !o->gc_mark
16037 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
16039 again = true;
16040 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
16041 return false;
16045 /* Mark section holding ARMv8-M secure entry functions. We mark all
16046 of them so no need for a second browsing. */
16047 if (is_v8m && first_bfd_browse)
16049 bool debug_sec_need_to_be_marked = false;
16051 sym_hashes = elf_sym_hashes (sub);
16052 bed = get_elf_backend_data (sub);
16053 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
16054 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
16055 ext_start = symtab_hdr->sh_info;
16057 /* Scan symbols. */
16058 for (i = ext_start; i < sym_count; i++)
16060 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
16061 if (cmse_hash == NULL)
16062 continue;
16064 /* Assume it is a special symbol. If not, cmse_scan will
16065 warn about it and user can do something about it. */
16066 if (startswith (cmse_hash->root.root.root.string,
16067 CMSE_PREFIX))
16069 cmse_sec = cmse_hash->root.root.u.def.section;
16070 if (!cmse_sec->gc_mark
16071 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16072 return false;
16073 /* The debug sections related to these secure entry
16074 functions are marked on enabling below flag. */
16075 debug_sec_need_to_be_marked = true;
16079 if (debug_sec_need_to_be_marked)
16081 /* Looping over all the sections of the object file containing
16082 Armv8-M secure entry functions and marking all the debug
16083 sections. */
16084 for (isec = sub->sections; isec != NULL; isec = isec->next)
16086 /* If not a debug sections, skip it. */
16087 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16089 isec->gc_mark = 1;
16090 extra_marks_added = true;
16093 debug_sec_need_to_be_marked = false;
16098 first_bfd_browse = false;
16101 /* PR 30354: If we have added extra marks then make sure that any
16102 dependencies of the newly marked sections are also marked. */
16103 if (extra_marks_added)
16104 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
16106 return true;
16109 /* Treat mapping symbols as special target symbols. */
16111 static bool
16112 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16114 return bfd_is_arm_special_symbol_name (sym->name,
16115 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16118 /* If the ELF symbol SYM might be a function in SEC, return the
16119 function size and set *CODE_OFF to the function's entry point,
16120 otherwise return zero. */
16122 static bfd_size_type
16123 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16124 bfd_vma *code_off)
16126 bfd_size_type size;
16127 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
16129 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16130 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16131 || sym->section != sec)
16132 return 0;
16134 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
16136 if (!(sym->flags & BSF_SYNTHETIC))
16137 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
16139 case STT_NOTYPE:
16140 /* Ignore symbols created by the annobin plugin for gcc and clang.
16141 These symbols are hidden, local, notype and have a size of 0. */
16142 if (size == 0
16143 && sym->flags & BSF_LOCAL
16144 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
16145 return 0;
16146 /* Fall through. */
16147 case STT_FUNC:
16148 case STT_ARM_TFUNC:
16149 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16150 break;
16151 default:
16152 return 0;
16155 if ((sym->flags & BSF_LOCAL)
16156 && bfd_is_arm_special_symbol_name (sym->name,
16157 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16158 return 0;
16160 *code_off = sym->value;
16162 /* Do not return 0 for the function's size. */
16163 return size ? size : 1;
16167 static bool
16168 elf32_arm_find_inliner_info (bfd * abfd,
16169 const char ** filename_ptr,
16170 const char ** functionname_ptr,
16171 unsigned int * line_ptr)
16173 bool found;
16174 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16175 functionname_ptr, line_ptr,
16176 & elf_tdata (abfd)->dwarf2_find_line_info);
16177 return found;
16180 /* Adjust a symbol defined by a dynamic object and referenced by a
16181 regular object. The current definition is in some section of the
16182 dynamic object, but we're not including those sections. We have to
16183 change the definition to something the rest of the link can
16184 understand. */
16186 static bool
16187 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16188 struct elf_link_hash_entry * h)
16190 bfd * dynobj;
16191 asection *s, *srel;
16192 struct elf32_arm_link_hash_entry * eh;
16193 struct elf32_arm_link_hash_table *globals;
16195 globals = elf32_arm_hash_table (info);
16196 if (globals == NULL)
16197 return false;
16199 dynobj = elf_hash_table (info)->dynobj;
16201 /* Make sure we know what is going on here. */
16202 BFD_ASSERT (dynobj != NULL
16203 && (h->needs_plt
16204 || h->type == STT_GNU_IFUNC
16205 || h->is_weakalias
16206 || (h->def_dynamic
16207 && h->ref_regular
16208 && !h->def_regular)));
16210 eh = (struct elf32_arm_link_hash_entry *) h;
16212 /* If this is a function, put it in the procedure linkage table. We
16213 will fill in the contents of the procedure linkage table later,
16214 when we know the address of the .got section. */
16215 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16217 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16218 symbol binds locally. */
16219 if (h->plt.refcount <= 0
16220 || (h->type != STT_GNU_IFUNC
16221 && (SYMBOL_CALLS_LOCAL (info, h)
16222 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16223 && h->root.type == bfd_link_hash_undefweak))))
16225 /* This case can occur if we saw a PLT32 reloc in an input
16226 file, but the symbol was never referred to by a dynamic
16227 object, or if all references were garbage collected. In
16228 such a case, we don't actually need to build a procedure
16229 linkage table, and we can just do a PC24 reloc instead. */
16230 h->plt.offset = (bfd_vma) -1;
16231 eh->plt.thumb_refcount = 0;
16232 eh->plt.maybe_thumb_refcount = 0;
16233 eh->plt.noncall_refcount = 0;
16234 h->needs_plt = 0;
16237 return true;
16239 else
16241 /* It's possible that we incorrectly decided a .plt reloc was
16242 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16243 in check_relocs. We can't decide accurately between function
16244 and non-function syms in check-relocs; Objects loaded later in
16245 the link may change h->type. So fix it now. */
16246 h->plt.offset = (bfd_vma) -1;
16247 eh->plt.thumb_refcount = 0;
16248 eh->plt.maybe_thumb_refcount = 0;
16249 eh->plt.noncall_refcount = 0;
16252 /* If this is a weak symbol, and there is a real definition, the
16253 processor independent code will have arranged for us to see the
16254 real definition first, and we can just use the same value. */
16255 if (h->is_weakalias)
16257 struct elf_link_hash_entry *def = weakdef (h);
16258 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16259 h->root.u.def.section = def->root.u.def.section;
16260 h->root.u.def.value = def->root.u.def.value;
16261 return true;
16264 /* If there are no non-GOT references, we do not need a copy
16265 relocation. */
16266 if (!h->non_got_ref)
16267 return true;
16269 /* This is a reference to a symbol defined by a dynamic object which
16270 is not a function. */
16272 /* If we are creating a shared library, we must presume that the
16273 only references to the symbol are via the global offset table.
16274 For such cases we need not do anything here; the relocations will
16275 be handled correctly by relocate_section. */
16276 if (bfd_link_pic (info))
16277 return true;
16279 /* We must allocate the symbol in our .dynbss section, which will
16280 become part of the .bss section of the executable. There will be
16281 an entry for this symbol in the .dynsym section. The dynamic
16282 object will contain position independent code, so all references
16283 from the dynamic object to this symbol will go through the global
16284 offset table. The dynamic linker will use the .dynsym entry to
16285 determine the address it must put in the global offset table, so
16286 both the dynamic object and the regular object will refer to the
16287 same memory location for the variable. */
16288 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16289 linker to copy the initial value out of the dynamic object and into
16290 the runtime process image. We need to remember the offset into the
16291 .rel(a).bss section we are going to use. */
16292 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16294 s = globals->root.sdynrelro;
16295 srel = globals->root.sreldynrelro;
16297 else
16299 s = globals->root.sdynbss;
16300 srel = globals->root.srelbss;
16302 if (info->nocopyreloc == 0
16303 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16304 && h->size != 0)
16306 elf32_arm_allocate_dynrelocs (info, srel, 1);
16307 h->needs_copy = 1;
16310 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16313 /* Allocate space in .plt, .got and associated reloc sections for
16314 dynamic relocs. */
16316 static bool
16317 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16319 struct bfd_link_info *info;
16320 struct elf32_arm_link_hash_table *htab;
16321 struct elf32_arm_link_hash_entry *eh;
16322 struct elf_dyn_relocs *p;
16324 if (h->root.type == bfd_link_hash_indirect)
16325 return true;
16327 eh = (struct elf32_arm_link_hash_entry *) h;
16329 info = (struct bfd_link_info *) inf;
16330 htab = elf32_arm_hash_table (info);
16331 if (htab == NULL)
16332 return false;
16334 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16335 && h->plt.refcount > 0)
16337 /* Make sure this symbol is output as a dynamic symbol.
16338 Undefined weak syms won't yet be marked as dynamic. */
16339 if (h->dynindx == -1 && !h->forced_local
16340 && h->root.type == bfd_link_hash_undefweak)
16342 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16343 return false;
16346 /* If the call in the PLT entry binds locally, the associated
16347 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16348 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16349 than the .plt section. */
16350 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16352 eh->is_iplt = 1;
16353 if (eh->plt.noncall_refcount == 0
16354 && SYMBOL_REFERENCES_LOCAL (info, h))
16355 /* All non-call references can be resolved directly.
16356 This means that they can (and in some cases, must)
16357 resolve directly to the run-time target, rather than
16358 to the PLT. That in turns means that any .got entry
16359 would be equal to the .igot.plt entry, so there's
16360 no point having both. */
16361 h->got.refcount = 0;
16364 if (bfd_link_pic (info)
16365 || eh->is_iplt
16366 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16368 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16370 /* If this symbol is not defined in a regular file, and we are
16371 not generating a shared library, then set the symbol to this
16372 location in the .plt. This is required to make function
16373 pointers compare as equal between the normal executable and
16374 the shared library. */
16375 if (! bfd_link_pic (info)
16376 && !h->def_regular)
16378 h->root.u.def.section = htab->root.splt;
16379 h->root.u.def.value = h->plt.offset;
16381 /* Make sure the function is not marked as Thumb, in case
16382 it is the target of an ABS32 relocation, which will
16383 point to the PLT entry. */
16384 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16387 /* VxWorks executables have a second set of relocations for
16388 each PLT entry. They go in a separate relocation section,
16389 which is processed by the kernel loader. */
16390 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16392 /* There is a relocation for the initial PLT entry:
16393 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16394 if (h->plt.offset == htab->plt_header_size)
16395 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16397 /* There are two extra relocations for each subsequent
16398 PLT entry: an R_ARM_32 relocation for the GOT entry,
16399 and an R_ARM_32 relocation for the PLT entry. */
16400 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16403 else
16405 h->plt.offset = (bfd_vma) -1;
16406 h->needs_plt = 0;
16409 else
16411 h->plt.offset = (bfd_vma) -1;
16412 h->needs_plt = 0;
16415 eh = (struct elf32_arm_link_hash_entry *) h;
16416 eh->tlsdesc_got = (bfd_vma) -1;
16418 if (h->got.refcount > 0)
16420 asection *s;
16421 bool dyn;
16422 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16423 int indx;
16425 /* Make sure this symbol is output as a dynamic symbol.
16426 Undefined weak syms won't yet be marked as dynamic. */
16427 if (htab->root.dynamic_sections_created
16428 && h->dynindx == -1
16429 && !h->forced_local
16430 && h->root.type == bfd_link_hash_undefweak)
16432 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16433 return false;
16436 s = htab->root.sgot;
16437 h->got.offset = s->size;
16439 if (tls_type == GOT_UNKNOWN)
16440 abort ();
16442 if (tls_type == GOT_NORMAL)
16443 /* Non-TLS symbols need one GOT slot. */
16444 s->size += 4;
16445 else
16447 if (tls_type & GOT_TLS_GDESC)
16449 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16450 eh->tlsdesc_got
16451 = (htab->root.sgotplt->size
16452 - elf32_arm_compute_jump_table_size (htab));
16453 htab->root.sgotplt->size += 8;
16454 h->got.offset = (bfd_vma) -2;
16455 /* plt.got_offset needs to know there's a TLS_DESC
16456 reloc in the middle of .got.plt. */
16457 htab->num_tls_desc++;
16460 if (tls_type & GOT_TLS_GD)
16462 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16463 consecutive GOT slots. If the symbol is both GD
16464 and GDESC, got.offset may have been
16465 overwritten. */
16466 h->got.offset = s->size;
16467 s->size += 8;
16470 if (tls_type & GOT_TLS_IE)
16471 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16472 slot. */
16473 s->size += 4;
16476 dyn = htab->root.dynamic_sections_created;
16478 indx = 0;
16479 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16480 && (!bfd_link_pic (info)
16481 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16482 indx = h->dynindx;
16484 if (tls_type != GOT_NORMAL
16485 && (bfd_link_dll (info) || indx != 0)
16486 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16487 || h->root.type != bfd_link_hash_undefweak))
16489 if (tls_type & GOT_TLS_IE)
16490 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16492 if (tls_type & GOT_TLS_GD)
16493 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16495 if (tls_type & GOT_TLS_GDESC)
16497 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16498 /* GDESC needs a trampoline to jump to. */
16499 htab->tls_trampoline = -1;
16502 /* Only GD needs it. GDESC just emits one relocation per
16503 2 entries. */
16504 if ((tls_type & GOT_TLS_GD) && indx != 0)
16505 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16507 else if (((indx != -1) || htab->fdpic_p)
16508 && !SYMBOL_REFERENCES_LOCAL (info, h))
16510 if (htab->root.dynamic_sections_created)
16511 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16512 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16514 else if (h->type == STT_GNU_IFUNC
16515 && eh->plt.noncall_refcount == 0)
16516 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16517 they all resolve dynamically instead. Reserve room for the
16518 GOT entry's R_ARM_IRELATIVE relocation. */
16519 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16520 else if (bfd_link_pic (info)
16521 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16522 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16523 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16524 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16525 /* Reserve room for rofixup for FDPIC executable. */
16526 /* TLS relocs do not need space since they are completely
16527 resolved. */
16528 htab->srofixup->size += 4;
16530 else
16531 h->got.offset = (bfd_vma) -1;
16533 /* FDPIC support. */
16534 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16536 /* Symbol musn't be exported. */
16537 if (h->dynindx != -1)
16538 abort ();
16540 /* We only allocate one function descriptor with its associated
16541 relocation. */
16542 if (eh->fdpic_cnts.funcdesc_offset == -1)
16544 asection *s = htab->root.sgot;
16546 eh->fdpic_cnts.funcdesc_offset = s->size;
16547 s->size += 8;
16548 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16549 if (bfd_link_pic (info))
16550 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16551 else
16552 htab->srofixup->size += 8;
16556 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16558 asection *s = htab->root.sgot;
16560 if (htab->root.dynamic_sections_created && h->dynindx == -1
16561 && !h->forced_local)
16562 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16563 return false;
16565 if (h->dynindx == -1)
16567 /* We only allocate one function descriptor with its
16568 associated relocation. */
16569 if (eh->fdpic_cnts.funcdesc_offset == -1)
16572 eh->fdpic_cnts.funcdesc_offset = s->size;
16573 s->size += 8;
16574 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16575 rofixups. */
16576 if (bfd_link_pic (info))
16577 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16578 else
16579 htab->srofixup->size += 8;
16583 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16584 R_ARM_RELATIVE/rofixup relocation on it. */
16585 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16586 s->size += 4;
16587 if (h->dynindx == -1 && !bfd_link_pic (info))
16588 htab->srofixup->size += 4;
16589 else
16590 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16593 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16595 if (htab->root.dynamic_sections_created && h->dynindx == -1
16596 && !h->forced_local)
16597 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16598 return false;
16600 if (h->dynindx == -1)
16602 /* We only allocate one function descriptor with its
16603 associated relocation. */
16604 if (eh->fdpic_cnts.funcdesc_offset == -1)
16606 asection *s = htab->root.sgot;
16608 eh->fdpic_cnts.funcdesc_offset = s->size;
16609 s->size += 8;
16610 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16611 rofixups. */
16612 if (bfd_link_pic (info))
16613 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16614 else
16615 htab->srofixup->size += 8;
16618 if (h->dynindx == -1 && !bfd_link_pic (info))
16620 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16621 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16623 else
16625 /* Will need one dynamic reloc per reference. will be either
16626 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16627 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16628 eh->fdpic_cnts.funcdesc_cnt);
16632 /* Allocate stubs for exported Thumb functions on v4t. */
16633 if (!htab->use_blx && h->dynindx != -1
16634 && h->def_regular
16635 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16636 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16638 struct elf_link_hash_entry * th;
16639 struct bfd_link_hash_entry * bh;
16640 struct elf_link_hash_entry * myh;
16641 char name[1024];
16642 asection *s;
16643 bh = NULL;
16644 /* Create a new symbol to regist the real location of the function. */
16645 s = h->root.u.def.section;
16646 sprintf (name, "__real_%s", h->root.root.string);
16647 _bfd_generic_link_add_one_symbol (info, s->owner,
16648 name, BSF_GLOBAL, s,
16649 h->root.u.def.value,
16650 NULL, true, false, &bh);
16652 myh = (struct elf_link_hash_entry *) bh;
16653 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16654 myh->forced_local = 1;
16655 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16656 eh->export_glue = myh;
16657 th = record_arm_to_thumb_glue (info, h);
16658 /* Point the symbol at the stub. */
16659 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16660 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16661 h->root.u.def.section = th->root.u.def.section;
16662 h->root.u.def.value = th->root.u.def.value & ~1;
16665 if (h->dyn_relocs == NULL)
16666 return true;
16668 /* In the shared -Bsymbolic case, discard space allocated for
16669 dynamic pc-relative relocs against symbols which turn out to be
16670 defined in regular objects. For the normal shared case, discard
16671 space for pc-relative relocs that have become local due to symbol
16672 visibility changes. */
16674 if (bfd_link_pic (info)
16675 || htab->fdpic_p)
16677 /* Relocs that use pc_count are PC-relative forms, which will appear
16678 on something like ".long foo - ." or "movw REG, foo - .". We want
16679 calls to protected symbols to resolve directly to the function
16680 rather than going via the plt. If people want function pointer
16681 comparisons to work as expected then they should avoid writing
16682 assembly like ".long foo - .". */
16683 if (SYMBOL_CALLS_LOCAL (info, h))
16685 struct elf_dyn_relocs **pp;
16687 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16689 p->count -= p->pc_count;
16690 p->pc_count = 0;
16691 if (p->count == 0)
16692 *pp = p->next;
16693 else
16694 pp = &p->next;
16698 if (htab->root.target_os == is_vxworks)
16700 struct elf_dyn_relocs **pp;
16702 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16704 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16705 *pp = p->next;
16706 else
16707 pp = &p->next;
16711 /* Also discard relocs on undefined weak syms with non-default
16712 visibility. */
16713 if (h->dyn_relocs != NULL
16714 && h->root.type == bfd_link_hash_undefweak)
16716 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16717 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16718 h->dyn_relocs = NULL;
16720 /* Make sure undefined weak symbols are output as a dynamic
16721 symbol in PIEs. */
16722 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16723 && !h->forced_local)
16725 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16726 return false;
16730 else
16732 /* For the non-shared case, discard space for relocs against
16733 symbols which turn out to need copy relocs or are not
16734 dynamic. */
16736 if (!h->non_got_ref
16737 && ((h->def_dynamic
16738 && !h->def_regular)
16739 || (htab->root.dynamic_sections_created
16740 && (h->root.type == bfd_link_hash_undefweak
16741 || h->root.type == bfd_link_hash_undefined))))
16743 /* Make sure this symbol is output as a dynamic symbol.
16744 Undefined weak syms won't yet be marked as dynamic. */
16745 if (h->dynindx == -1 && !h->forced_local
16746 && h->root.type == bfd_link_hash_undefweak)
16748 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16749 return false;
16752 /* If that succeeded, we know we'll be keeping all the
16753 relocs. */
16754 if (h->dynindx != -1)
16755 goto keep;
16758 h->dyn_relocs = NULL;
16760 keep: ;
16763 /* Finally, allocate space. */
16764 for (p = h->dyn_relocs; p != NULL; p = p->next)
16766 asection *sreloc = elf_section_data (p->sec)->sreloc;
16768 if (h->type == STT_GNU_IFUNC
16769 && eh->plt.noncall_refcount == 0
16770 && SYMBOL_REFERENCES_LOCAL (info, h))
16771 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16772 else if (h->dynindx != -1
16773 && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16774 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16775 else if (htab->fdpic_p && !bfd_link_pic (info))
16776 htab->srofixup->size += 4 * p->count;
16777 else
16778 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16781 return true;
16784 void
16785 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16786 int byteswap_code)
16788 struct elf32_arm_link_hash_table *globals;
16790 globals = elf32_arm_hash_table (info);
16791 if (globals == NULL)
16792 return;
16794 globals->byteswap_code = byteswap_code;
16797 /* Set the sizes of the dynamic sections. */
16799 static bool
16800 elf32_arm_late_size_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16801 struct bfd_link_info * info)
16803 bfd * dynobj;
16804 asection * s;
16805 bool relocs;
16806 bfd *ibfd;
16807 struct elf32_arm_link_hash_table *htab;
16809 htab = elf32_arm_hash_table (info);
16810 if (htab == NULL)
16811 return false;
16813 dynobj = elf_hash_table (info)->dynobj;
16814 if (dynobj == NULL)
16815 return true;
16817 check_use_blx (htab);
16819 if (elf_hash_table (info)->dynamic_sections_created)
16821 /* Set the contents of the .interp section to the interpreter. */
16822 if (bfd_link_executable (info) && !info->nointerp)
16824 s = bfd_get_linker_section (dynobj, ".interp");
16825 BFD_ASSERT (s != NULL);
16826 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16827 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16831 /* Set up .got offsets for local syms, and space for local dynamic
16832 relocs. */
16833 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16835 bfd_signed_vma *local_got;
16836 bfd_signed_vma *end_local_got;
16837 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16838 char *local_tls_type;
16839 bfd_vma *local_tlsdesc_gotent;
16840 bfd_size_type locsymcount;
16841 Elf_Internal_Shdr *symtab_hdr;
16842 asection *srel;
16843 unsigned int symndx;
16844 struct fdpic_local *local_fdpic_cnts;
16846 if (! is_arm_elf (ibfd))
16847 continue;
16849 for (s = ibfd->sections; s != NULL; s = s->next)
16851 struct elf_dyn_relocs *p;
16853 for (p = (struct elf_dyn_relocs *)
16854 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16856 if (!bfd_is_abs_section (p->sec)
16857 && bfd_is_abs_section (p->sec->output_section))
16859 /* Input section has been discarded, either because
16860 it is a copy of a linkonce section or due to
16861 linker script /DISCARD/, so we'll be discarding
16862 the relocs too. */
16864 else if (htab->root.target_os == is_vxworks
16865 && strcmp (p->sec->output_section->name,
16866 ".tls_vars") == 0)
16868 /* Relocations in vxworks .tls_vars sections are
16869 handled specially by the loader. */
16871 else if (p->count != 0)
16873 srel = elf_section_data (p->sec)->sreloc;
16874 if (htab->fdpic_p && !bfd_link_pic (info))
16875 htab->srofixup->size += 4 * p->count;
16876 else
16877 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16878 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16879 info->flags |= DF_TEXTREL;
16884 local_got = elf_local_got_refcounts (ibfd);
16885 if (local_got == NULL)
16886 continue;
16888 symtab_hdr = & elf_symtab_hdr (ibfd);
16889 locsymcount = symtab_hdr->sh_info;
16890 end_local_got = local_got + locsymcount;
16891 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16892 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16893 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16894 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16895 symndx = 0;
16896 s = htab->root.sgot;
16897 srel = htab->root.srelgot;
16898 for (; local_got < end_local_got;
16899 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16900 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16902 if (symndx >= elf32_arm_num_entries (ibfd))
16903 return false;
16905 *local_tlsdesc_gotent = (bfd_vma) -1;
16906 local_iplt = *local_iplt_ptr;
16908 /* FDPIC support. */
16909 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16911 if (local_fdpic_cnts->funcdesc_offset == -1)
16913 local_fdpic_cnts->funcdesc_offset = s->size;
16914 s->size += 8;
16916 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16917 if (bfd_link_pic (info))
16918 elf32_arm_allocate_dynrelocs (info, srel, 1);
16919 else
16920 htab->srofixup->size += 8;
16924 if (local_fdpic_cnts->funcdesc_cnt > 0)
16926 if (local_fdpic_cnts->funcdesc_offset == -1)
16928 local_fdpic_cnts->funcdesc_offset = s->size;
16929 s->size += 8;
16931 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16932 if (bfd_link_pic (info))
16933 elf32_arm_allocate_dynrelocs (info, srel, 1);
16934 else
16935 htab->srofixup->size += 8;
16938 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16939 if (bfd_link_pic (info))
16940 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16941 else
16942 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16945 if (local_iplt != NULL)
16947 struct elf_dyn_relocs *p;
16949 if (local_iplt->root.refcount > 0)
16951 elf32_arm_allocate_plt_entry (info, true,
16952 &local_iplt->root,
16953 &local_iplt->arm);
16954 if (local_iplt->arm.noncall_refcount == 0)
16955 /* All references to the PLT are calls, so all
16956 non-call references can resolve directly to the
16957 run-time target. This means that the .got entry
16958 would be the same as the .igot.plt entry, so there's
16959 no point creating both. */
16960 *local_got = 0;
16962 else
16964 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16965 local_iplt->root.offset = (bfd_vma) -1;
16968 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16970 asection *psrel;
16972 psrel = elf_section_data (p->sec)->sreloc;
16973 if (local_iplt->arm.noncall_refcount == 0)
16974 elf32_arm_allocate_irelocs (info, psrel, p->count);
16975 else
16976 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16979 if (*local_got > 0)
16981 Elf_Internal_Sym *isym;
16983 *local_got = s->size;
16984 if (*local_tls_type & GOT_TLS_GD)
16985 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16986 s->size += 8;
16987 if (*local_tls_type & GOT_TLS_GDESC)
16989 *local_tlsdesc_gotent = htab->root.sgotplt->size
16990 - elf32_arm_compute_jump_table_size (htab);
16991 htab->root.sgotplt->size += 8;
16992 *local_got = (bfd_vma) -2;
16993 /* plt.got_offset needs to know there's a TLS_DESC
16994 reloc in the middle of .got.plt. */
16995 htab->num_tls_desc++;
16997 if (*local_tls_type & GOT_TLS_IE)
16998 s->size += 4;
17000 if (*local_tls_type & GOT_NORMAL)
17002 /* If the symbol is both GD and GDESC, *local_got
17003 may have been overwritten. */
17004 *local_got = s->size;
17005 s->size += 4;
17008 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
17009 symndx);
17010 if (isym == NULL)
17011 return false;
17013 /* If all references to an STT_GNU_IFUNC PLT are calls,
17014 then all non-call references, including this GOT entry,
17015 resolve directly to the run-time target. */
17016 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
17017 && (local_iplt == NULL
17018 || local_iplt->arm.noncall_refcount == 0))
17019 elf32_arm_allocate_irelocs (info, srel, 1);
17020 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
17022 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
17023 elf32_arm_allocate_dynrelocs (info, srel, 1);
17024 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
17025 htab->srofixup->size += 4;
17027 if ((bfd_link_pic (info) || htab->fdpic_p)
17028 && *local_tls_type & GOT_TLS_GDESC)
17030 elf32_arm_allocate_dynrelocs (info,
17031 htab->root.srelplt, 1);
17032 htab->tls_trampoline = -1;
17036 else
17037 *local_got = (bfd_vma) -1;
17041 if (htab->tls_ldm_got.refcount > 0)
17043 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17044 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17045 htab->tls_ldm_got.offset = htab->root.sgot->size;
17046 htab->root.sgot->size += 8;
17047 if (bfd_link_pic (info))
17048 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
17050 else
17051 htab->tls_ldm_got.offset = -1;
17053 /* At the very end of the .rofixup section is a pointer to the GOT,
17054 reserve space for it. */
17055 if (htab->fdpic_p && htab->srofixup != NULL)
17056 htab->srofixup->size += 4;
17058 /* Allocate global sym .plt and .got entries, and space for global
17059 sym dynamic relocs. */
17060 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
17062 /* Here we rummage through the found bfds to collect glue information. */
17063 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
17065 if (! is_arm_elf (ibfd))
17066 continue;
17068 /* Initialise mapping tables for code/data. */
17069 bfd_elf32_arm_init_maps (ibfd);
17071 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17072 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17073 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17074 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17077 /* Allocate space for the glue sections now that we've sized them. */
17078 bfd_elf32_arm_allocate_interworking_sections (info);
17080 /* For every jump slot reserved in the sgotplt, reloc_count is
17081 incremented. However, when we reserve space for TLS descriptors,
17082 it's not incremented, so in order to compute the space reserved
17083 for them, it suffices to multiply the reloc count by the jump
17084 slot size. */
17085 if (htab->root.srelplt)
17086 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
17088 if (htab->tls_trampoline)
17090 if (htab->root.splt->size == 0)
17091 htab->root.splt->size += htab->plt_header_size;
17093 htab->tls_trampoline = htab->root.splt->size;
17094 htab->root.splt->size += htab->plt_entry_size;
17096 /* If we're not using lazy TLS relocations, don't generate the
17097 PLT and GOT entries they require. */
17098 if ((info->flags & DF_BIND_NOW))
17099 htab->root.tlsdesc_plt = 0;
17100 else
17102 htab->root.tlsdesc_got = htab->root.sgot->size;
17103 htab->root.sgot->size += 4;
17105 htab->root.tlsdesc_plt = htab->root.splt->size;
17106 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17110 /* The check_relocs and adjust_dynamic_symbol entry points have
17111 determined the sizes of the various dynamic sections. Allocate
17112 memory for them. */
17113 relocs = false;
17114 for (s = dynobj->sections; s != NULL; s = s->next)
17116 const char * name;
17118 if ((s->flags & SEC_LINKER_CREATED) == 0)
17119 continue;
17121 /* It's OK to base decisions on the section name, because none
17122 of the dynobj section names depend upon the input files. */
17123 name = bfd_section_name (s);
17125 if (s == htab->root.splt)
17127 /* Remember whether there is a PLT. */
17130 else if (startswith (name, ".rel"))
17132 if (s->size != 0)
17134 /* Remember whether there are any reloc sections other
17135 than .rel(a).plt and .rela.plt.unloaded. */
17136 if (s != htab->root.srelplt && s != htab->srelplt2)
17137 relocs = true;
17139 /* We use the reloc_count field as a counter if we need
17140 to copy relocs into the output file. */
17141 s->reloc_count = 0;
17144 else if (s != htab->root.sgot
17145 && s != htab->root.sgotplt
17146 && s != htab->root.iplt
17147 && s != htab->root.igotplt
17148 && s != htab->root.sdynbss
17149 && s != htab->root.sdynrelro
17150 && s != htab->srofixup)
17152 /* It's not one of our sections, so don't allocate space. */
17153 continue;
17156 if (s->size == 0)
17158 /* If we don't need this section, strip it from the
17159 output file. This is mostly to handle .rel(a).bss and
17160 .rel(a).plt. We must create both sections in
17161 create_dynamic_sections, because they must be created
17162 before the linker maps input sections to output
17163 sections. The linker does that before
17164 adjust_dynamic_symbol is called, and it is that
17165 function which decides whether anything needs to go
17166 into these sections. */
17167 s->flags |= SEC_EXCLUDE;
17168 continue;
17171 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17172 continue;
17174 /* Allocate memory for the section contents. */
17175 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17176 if (s->contents == NULL)
17177 return false;
17180 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17181 relocs);
17184 /* Size sections even though they're not dynamic. We use it to setup
17185 _TLS_MODULE_BASE_, if needed. */
17187 static bool
17188 elf32_arm_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
17190 asection *tls_sec;
17191 struct elf32_arm_link_hash_table *htab;
17193 htab = elf32_arm_hash_table (info);
17195 if (bfd_link_relocatable (info))
17196 return true;
17198 tls_sec = elf_hash_table (info)->tls_sec;
17200 if (tls_sec)
17202 struct elf_link_hash_entry *tlsbase;
17204 tlsbase = elf_link_hash_lookup
17205 (elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17207 if (tlsbase)
17209 struct bfd_link_hash_entry *bh = NULL;
17210 const struct elf_backend_data *bed
17211 = get_elf_backend_data (output_bfd);
17213 if (!(_bfd_generic_link_add_one_symbol
17214 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17215 tls_sec, 0, NULL, false,
17216 bed->collect, &bh)))
17217 return false;
17219 tlsbase->type = STT_TLS;
17220 tlsbase = (struct elf_link_hash_entry *)bh;
17221 tlsbase->def_regular = 1;
17222 tlsbase->other = STV_HIDDEN;
17223 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17227 if (htab->fdpic_p && !bfd_link_relocatable (info)
17228 && !bfd_elf_stack_segment_size (output_bfd, info,
17229 "__stacksize", DEFAULT_STACK_SIZE))
17230 return false;
17232 return true;
17235 /* Finish up dynamic symbol handling. We set the contents of various
17236 dynamic sections here. */
17238 static bool
17239 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17240 struct bfd_link_info * info,
17241 struct elf_link_hash_entry * h,
17242 Elf_Internal_Sym * sym)
17244 struct elf32_arm_link_hash_table *htab;
17245 struct elf32_arm_link_hash_entry *eh;
17247 htab = elf32_arm_hash_table (info);
17249 eh = (struct elf32_arm_link_hash_entry *) h;
17251 if (h->plt.offset != (bfd_vma) -1)
17253 if (!eh->is_iplt)
17255 BFD_ASSERT (h->dynindx != -1);
17256 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17257 h->dynindx, 0))
17258 return false;
17261 if (!h->def_regular)
17263 /* Mark the symbol as undefined, rather than as defined in
17264 the .plt section. */
17265 sym->st_shndx = SHN_UNDEF;
17266 /* If the symbol is weak we need to clear the value.
17267 Otherwise, the PLT entry would provide a definition for
17268 the symbol even if the symbol wasn't defined anywhere,
17269 and so the symbol would never be NULL. Leave the value if
17270 there were any relocations where pointer equality matters
17271 (this is a clue for the dynamic linker, to make function
17272 pointer comparisons work between an application and shared
17273 library). */
17274 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17275 sym->st_value = 0;
17277 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17279 /* At least one non-call relocation references this .iplt entry,
17280 so the .iplt entry is the function's canonical address. */
17281 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17282 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17283 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17284 (output_bfd, htab->root.iplt->output_section));
17285 sym->st_value = (h->plt.offset
17286 + htab->root.iplt->output_section->vma
17287 + htab->root.iplt->output_offset);
17291 if (h->needs_copy)
17293 asection * s;
17294 Elf_Internal_Rela rel;
17296 /* This symbol needs a copy reloc. Set it up. */
17297 BFD_ASSERT (h->dynindx != -1
17298 && (h->root.type == bfd_link_hash_defined
17299 || h->root.type == bfd_link_hash_defweak));
17301 rel.r_addend = 0;
17302 rel.r_offset = (h->root.u.def.value
17303 + h->root.u.def.section->output_section->vma
17304 + h->root.u.def.section->output_offset);
17305 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17306 if (h->root.u.def.section == htab->root.sdynrelro)
17307 s = htab->root.sreldynrelro;
17308 else
17309 s = htab->root.srelbss;
17310 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17313 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17314 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17315 it is relative to the ".got" section. */
17316 if (h == htab->root.hdynamic
17317 || (!htab->fdpic_p
17318 && htab->root.target_os != is_vxworks
17319 && h == htab->root.hgot))
17320 sym->st_shndx = SHN_ABS;
17322 return true;
17325 static void
17326 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17327 void *contents,
17328 const unsigned long *template, unsigned count)
17330 unsigned ix;
17332 for (ix = 0; ix != count; ix++)
17334 unsigned long insn = template[ix];
17336 /* Emit mov pc,rx if bx is not permitted. */
17337 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17338 insn = (insn & 0xf000000f) | 0x01a0f000;
17339 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17343 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17344 other variants, NaCl needs this entry in a static executable's
17345 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17346 zero. For .iplt really only the last bundle is useful, and .iplt
17347 could have a shorter first entry, with each individual PLT entry's
17348 relative branch calculated differently so it targets the last
17349 bundle instead of the instruction before it (labelled .Lplt_tail
17350 above). But it's simpler to keep the size and layout of PLT0
17351 consistent with the dynamic case, at the cost of some dead code at
17352 the start of .iplt and the one dead store to the stack at the start
17353 of .Lplt_tail. */
17354 static void
17355 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17356 asection *plt, bfd_vma got_displacement)
17358 unsigned int i;
17360 put_arm_insn (htab, output_bfd,
17361 elf32_arm_nacl_plt0_entry[0]
17362 | arm_movw_immediate (got_displacement),
17363 plt->contents + 0);
17364 put_arm_insn (htab, output_bfd,
17365 elf32_arm_nacl_plt0_entry[1]
17366 | arm_movt_immediate (got_displacement),
17367 plt->contents + 4);
17369 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17370 put_arm_insn (htab, output_bfd,
17371 elf32_arm_nacl_plt0_entry[i],
17372 plt->contents + (i * 4));
17375 /* Finish up the dynamic sections. */
17377 static bool
17378 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17380 bfd * dynobj;
17381 asection * sgot;
17382 asection * sdyn;
17383 struct elf32_arm_link_hash_table *htab;
17385 htab = elf32_arm_hash_table (info);
17386 if (htab == NULL)
17387 return false;
17389 dynobj = elf_hash_table (info)->dynobj;
17391 sgot = htab->root.sgotplt;
17392 /* A broken linker script might have discarded the dynamic sections.
17393 Catch this here so that we do not seg-fault later on. */
17394 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17395 return false;
17396 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17398 if (elf_hash_table (info)->dynamic_sections_created)
17400 asection *splt;
17401 Elf32_External_Dyn *dyncon, *dynconend;
17403 splt = htab->root.splt;
17404 BFD_ASSERT (splt != NULL && sdyn != NULL);
17405 BFD_ASSERT (sgot != NULL);
17407 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17408 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17410 for (; dyncon < dynconend; dyncon++)
17412 Elf_Internal_Dyn dyn;
17413 const char * name;
17414 asection * s;
17416 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17418 switch (dyn.d_tag)
17420 default:
17421 if (htab->root.target_os == is_vxworks
17422 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17423 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17424 break;
17426 case DT_HASH:
17427 case DT_STRTAB:
17428 case DT_SYMTAB:
17429 case DT_VERSYM:
17430 case DT_VERDEF:
17431 case DT_VERNEED:
17432 break;
17434 case DT_PLTGOT:
17435 name = ".got.plt";
17436 goto get_vma;
17437 case DT_JMPREL:
17438 name = RELOC_SECTION (htab, ".plt");
17439 get_vma:
17440 s = bfd_get_linker_section (dynobj, name);
17441 if (s == NULL)
17443 _bfd_error_handler
17444 (_("could not find section %s"), name);
17445 bfd_set_error (bfd_error_invalid_operation);
17446 return false;
17448 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17449 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17450 break;
17452 case DT_PLTRELSZ:
17453 s = htab->root.srelplt;
17454 BFD_ASSERT (s != NULL);
17455 dyn.d_un.d_val = s->size;
17456 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17457 break;
17459 case DT_RELSZ:
17460 case DT_RELASZ:
17461 case DT_REL:
17462 case DT_RELA:
17463 break;
17465 case DT_TLSDESC_PLT:
17466 s = htab->root.splt;
17467 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17468 + htab->root.tlsdesc_plt);
17469 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17470 break;
17472 case DT_TLSDESC_GOT:
17473 s = htab->root.sgot;
17474 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17475 + htab->root.tlsdesc_got);
17476 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17477 break;
17479 /* Set the bottom bit of DT_INIT/FINI if the
17480 corresponding function is Thumb. */
17481 case DT_INIT:
17482 name = info->init_function;
17483 goto get_sym;
17484 case DT_FINI:
17485 name = info->fini_function;
17486 get_sym:
17487 /* If it wasn't set by elf_bfd_final_link
17488 then there is nothing to adjust. */
17489 if (dyn.d_un.d_val != 0)
17491 struct elf_link_hash_entry * eh;
17493 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17494 false, false, true);
17495 if (eh != NULL
17496 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17497 == ST_BRANCH_TO_THUMB)
17499 dyn.d_un.d_val |= 1;
17500 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17503 break;
17507 /* Fill in the first entry in the procedure linkage table. */
17508 if (splt->size > 0 && htab->plt_header_size)
17510 const bfd_vma *plt0_entry;
17511 bfd_vma got_address, plt_address, got_displacement;
17513 /* Calculate the addresses of the GOT and PLT. */
17514 got_address = sgot->output_section->vma + sgot->output_offset;
17515 plt_address = splt->output_section->vma + splt->output_offset;
17517 if (htab->root.target_os == is_vxworks)
17519 /* The VxWorks GOT is relocated by the dynamic linker.
17520 Therefore, we must emit relocations rather than simply
17521 computing the values now. */
17522 Elf_Internal_Rela rel;
17524 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17525 put_arm_insn (htab, output_bfd, plt0_entry[0],
17526 splt->contents + 0);
17527 put_arm_insn (htab, output_bfd, plt0_entry[1],
17528 splt->contents + 4);
17529 put_arm_insn (htab, output_bfd, plt0_entry[2],
17530 splt->contents + 8);
17531 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17533 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17534 rel.r_offset = plt_address + 12;
17535 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17536 rel.r_addend = 0;
17537 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17538 htab->srelplt2->contents);
17540 else if (htab->root.target_os == is_nacl)
17541 arm_nacl_put_plt0 (htab, output_bfd, splt,
17542 got_address + 8 - (plt_address + 16));
17543 else if (using_thumb_only (htab))
17545 got_displacement = got_address - (plt_address + 12);
17547 plt0_entry = elf32_thumb2_plt0_entry;
17548 put_arm_insn (htab, output_bfd, plt0_entry[0],
17549 splt->contents + 0);
17550 put_arm_insn (htab, output_bfd, plt0_entry[1],
17551 splt->contents + 4);
17552 put_arm_insn (htab, output_bfd, plt0_entry[2],
17553 splt->contents + 8);
17555 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17557 else
17559 got_displacement = got_address - (plt_address + 16);
17561 plt0_entry = elf32_arm_plt0_entry;
17562 put_arm_insn (htab, output_bfd, plt0_entry[0],
17563 splt->contents + 0);
17564 put_arm_insn (htab, output_bfd, plt0_entry[1],
17565 splt->contents + 4);
17566 put_arm_insn (htab, output_bfd, plt0_entry[2],
17567 splt->contents + 8);
17568 put_arm_insn (htab, output_bfd, plt0_entry[3],
17569 splt->contents + 12);
17571 #ifdef FOUR_WORD_PLT
17572 /* The displacement value goes in the otherwise-unused
17573 last word of the second entry. */
17574 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17575 #else
17576 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17577 #endif
17581 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17582 really seem like the right value. */
17583 if (splt->output_section->owner == output_bfd)
17584 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17586 if (htab->root.tlsdesc_plt)
17588 bfd_vma got_address
17589 = sgot->output_section->vma + sgot->output_offset;
17590 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17591 + htab->root.sgot->output_offset);
17592 bfd_vma plt_address
17593 = splt->output_section->vma + splt->output_offset;
17595 arm_put_trampoline (htab, output_bfd,
17596 splt->contents + htab->root.tlsdesc_plt,
17597 dl_tlsdesc_lazy_trampoline, 6);
17599 bfd_put_32 (output_bfd,
17600 gotplt_address + htab->root.tlsdesc_got
17601 - (plt_address + htab->root.tlsdesc_plt)
17602 - dl_tlsdesc_lazy_trampoline[6],
17603 splt->contents + htab->root.tlsdesc_plt + 24);
17604 bfd_put_32 (output_bfd,
17605 got_address - (plt_address + htab->root.tlsdesc_plt)
17606 - dl_tlsdesc_lazy_trampoline[7],
17607 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17610 if (htab->tls_trampoline)
17612 arm_put_trampoline (htab, output_bfd,
17613 splt->contents + htab->tls_trampoline,
17614 tls_trampoline, 3);
17615 #ifdef FOUR_WORD_PLT
17616 bfd_put_32 (output_bfd, 0x00000000,
17617 splt->contents + htab->tls_trampoline + 12);
17618 #endif
17621 if (htab->root.target_os == is_vxworks
17622 && !bfd_link_pic (info)
17623 && htab->root.splt->size > 0)
17625 /* Correct the .rel(a).plt.unloaded relocations. They will have
17626 incorrect symbol indexes. */
17627 int num_plts;
17628 unsigned char *p;
17630 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17631 / htab->plt_entry_size);
17632 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17634 for (; num_plts; num_plts--)
17636 Elf_Internal_Rela rel;
17638 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17639 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17640 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17641 p += RELOC_SIZE (htab);
17643 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17644 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17645 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17646 p += RELOC_SIZE (htab);
17651 if (htab->root.target_os == is_nacl
17652 && htab->root.iplt != NULL
17653 && htab->root.iplt->size > 0)
17654 /* NaCl uses a special first entry in .iplt too. */
17655 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17657 /* Fill in the first three entries in the global offset table. */
17658 if (sgot)
17660 if (sgot->size > 0)
17662 if (sdyn == NULL)
17663 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17664 else
17665 bfd_put_32 (output_bfd,
17666 sdyn->output_section->vma + sdyn->output_offset,
17667 sgot->contents);
17668 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17669 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17672 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17675 /* At the very end of the .rofixup section is a pointer to the GOT. */
17676 if (htab->fdpic_p && htab->srofixup != NULL)
17678 struct elf_link_hash_entry *hgot = htab->root.hgot;
17680 bfd_vma got_value = hgot->root.u.def.value
17681 + hgot->root.u.def.section->output_section->vma
17682 + hgot->root.u.def.section->output_offset;
17684 arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17686 /* Make sure we allocated and generated the same number of fixups. */
17687 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17690 return true;
17693 static bool
17694 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17696 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17697 struct elf32_arm_link_hash_table *globals;
17698 struct elf_segment_map *m;
17700 if (!_bfd_elf_init_file_header (abfd, link_info))
17701 return false;
17703 i_ehdrp = elf_elfheader (abfd);
17705 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17706 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17707 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17709 if (link_info)
17711 globals = elf32_arm_hash_table (link_info);
17712 if (globals != NULL && globals->byteswap_code)
17713 i_ehdrp->e_flags |= EF_ARM_BE8;
17715 if (globals->fdpic_p)
17716 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17719 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17720 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17722 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17723 if (abi == AEABI_VFP_args_vfp)
17724 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17725 else
17726 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17729 /* Scan segment to set p_flags attribute if it contains only sections with
17730 SHF_ARM_PURECODE flag. */
17731 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17733 unsigned int j;
17735 if (m->count == 0)
17736 continue;
17737 for (j = 0; j < m->count; j++)
17739 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17740 break;
17742 if (j == m->count)
17744 m->p_flags = PF_X;
17745 m->p_flags_valid = 1;
17748 return true;
17751 static enum elf_reloc_type_class
17752 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17753 const asection *rel_sec ATTRIBUTE_UNUSED,
17754 const Elf_Internal_Rela *rela)
17756 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
17758 if (htab->root.dynsym != NULL
17759 && htab->root.dynsym->contents != NULL)
17761 /* Check relocation against STT_GNU_IFUNC symbol if there are
17762 dynamic symbols. */
17763 bfd *abfd = info->output_bfd;
17764 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
17765 unsigned long r_symndx = ELF32_R_SYM (rela->r_info);
17766 if (r_symndx != STN_UNDEF)
17768 Elf_Internal_Sym sym;
17769 if (!bed->s->swap_symbol_in (abfd,
17770 (htab->root.dynsym->contents
17771 + r_symndx * bed->s->sizeof_sym),
17772 0, &sym))
17774 /* xgettext:c-format */
17775 _bfd_error_handler (_("%pB symbol number %lu references"
17776 " nonexistent SHT_SYMTAB_SHNDX section"),
17777 abfd, r_symndx);
17778 /* Ideally an error class should be returned here. */
17780 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
17781 return reloc_class_ifunc;
17785 switch ((int) ELF32_R_TYPE (rela->r_info))
17787 case R_ARM_RELATIVE:
17788 return reloc_class_relative;
17789 case R_ARM_JUMP_SLOT:
17790 return reloc_class_plt;
17791 case R_ARM_COPY:
17792 return reloc_class_copy;
17793 case R_ARM_IRELATIVE:
17794 return reloc_class_ifunc;
17795 default:
17796 return reloc_class_normal;
17800 static void
17801 arm_final_write_processing (bfd *abfd)
17803 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17806 static bool
17807 elf32_arm_final_write_processing (bfd *abfd)
17809 arm_final_write_processing (abfd);
17810 return _bfd_elf_final_write_processing (abfd);
17813 /* Return TRUE if this is an unwinding table entry. */
17815 static bool
17816 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17818 return (startswith (name, ELF_STRING_ARM_unwind)
17819 || startswith (name, ELF_STRING_ARM_unwind_once));
17823 /* Set the type and flags for an ARM section. We do this by
17824 the section name, which is a hack, but ought to work. */
17826 static bool
17827 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17829 const char * name;
17831 name = bfd_section_name (sec);
17833 if (is_arm_elf_unwind_section_name (abfd, name))
17835 hdr->sh_type = SHT_ARM_EXIDX;
17836 hdr->sh_flags |= SHF_LINK_ORDER;
17839 if (sec->flags & SEC_ELF_PURECODE)
17840 hdr->sh_flags |= SHF_ARM_PURECODE;
17842 return true;
17845 /* Handle an ARM specific section when reading an object file. This is
17846 called when bfd_section_from_shdr finds a section with an unknown
17847 type. */
17849 static bool
17850 elf32_arm_section_from_shdr (bfd *abfd,
17851 Elf_Internal_Shdr * hdr,
17852 const char *name,
17853 int shindex)
17855 /* There ought to be a place to keep ELF backend specific flags, but
17856 at the moment there isn't one. We just keep track of the
17857 sections by their name, instead. Fortunately, the ABI gives
17858 names for all the ARM specific sections, so we will probably get
17859 away with this. */
17860 switch (hdr->sh_type)
17862 case SHT_ARM_EXIDX:
17863 case SHT_ARM_PREEMPTMAP:
17864 case SHT_ARM_ATTRIBUTES:
17865 break;
17867 default:
17868 return false;
17871 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17872 return false;
17874 return true;
17877 static _arm_elf_section_data *
17878 get_arm_elf_section_data (asection * sec)
17880 if (sec && sec->owner && is_arm_elf (sec->owner))
17881 return elf32_arm_section_data (sec);
17882 else
17883 return NULL;
17886 typedef struct
17888 void *flaginfo;
17889 struct bfd_link_info *info;
17890 asection *sec;
17891 int sec_shndx;
17892 int (*func) (void *, const char *, Elf_Internal_Sym *,
17893 asection *, struct elf_link_hash_entry *);
17894 } output_arch_syminfo;
17896 enum map_symbol_type
17898 ARM_MAP_ARM,
17899 ARM_MAP_THUMB,
17900 ARM_MAP_DATA
17904 /* Output a single mapping symbol. */
17906 static bool
17907 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17908 enum map_symbol_type type,
17909 bfd_vma offset)
17911 static const char *names[3] = {"$a", "$t", "$d"};
17912 Elf_Internal_Sym sym;
17914 sym.st_value = osi->sec->output_section->vma
17915 + osi->sec->output_offset
17916 + offset;
17917 sym.st_size = 0;
17918 sym.st_other = 0;
17919 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17920 sym.st_shndx = osi->sec_shndx;
17921 sym.st_target_internal = ST_BRANCH_TO_ARM;
17922 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17923 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17926 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17927 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17929 static bool
17930 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17931 bool is_iplt_entry_p,
17932 union gotplt_union *root_plt,
17933 struct arm_plt_info *arm_plt)
17935 struct elf32_arm_link_hash_table *htab;
17936 bfd_vma addr, plt_header_size;
17938 if (root_plt->offset == (bfd_vma) -1)
17939 return true;
17941 htab = elf32_arm_hash_table (osi->info);
17942 if (htab == NULL)
17943 return false;
17945 if (is_iplt_entry_p)
17947 osi->sec = htab->root.iplt;
17948 plt_header_size = 0;
17950 else
17952 osi->sec = htab->root.splt;
17953 plt_header_size = htab->plt_header_size;
17955 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17956 (osi->info->output_bfd, osi->sec->output_section));
17958 addr = root_plt->offset & -2;
17959 if (htab->root.target_os == is_vxworks)
17961 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17962 return false;
17963 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17964 return false;
17965 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17966 return false;
17967 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17968 return false;
17970 else if (htab->root.target_os == is_nacl)
17972 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17973 return false;
17975 else if (htab->fdpic_p)
17977 enum map_symbol_type type = using_thumb_only (htab)
17978 ? ARM_MAP_THUMB
17979 : ARM_MAP_ARM;
17981 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17982 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17983 return false;
17984 if (!elf32_arm_output_map_sym (osi, type, addr))
17985 return false;
17986 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17987 return false;
17988 if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17989 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17990 return false;
17992 else if (using_thumb_only (htab))
17994 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17995 return false;
17997 else
17999 bool thumb_stub_p;
18001 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18002 if (thumb_stub_p)
18004 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18005 return false;
18007 #ifdef FOUR_WORD_PLT
18008 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18009 return false;
18010 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18011 return false;
18012 #else
18013 /* A three-word PLT with no Thumb thunk contains only Arm code,
18014 so only need to output a mapping symbol for the first PLT entry and
18015 entries with thumb thunks. */
18016 if (thumb_stub_p || addr == plt_header_size)
18018 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18019 return false;
18021 #endif
18024 return true;
18027 /* Output mapping symbols for PLT entries associated with H. */
18029 static bool
18030 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18032 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18033 struct elf32_arm_link_hash_entry *eh;
18035 if (h->root.type == bfd_link_hash_indirect)
18036 return true;
18038 if (h->root.type == bfd_link_hash_warning)
18039 /* When warning symbols are created, they **replace** the "real"
18040 entry in the hash table, thus we never get to see the real
18041 symbol in a hash traversal. So look at it now. */
18042 h = (struct elf_link_hash_entry *) h->root.u.i.link;
18044 eh = (struct elf32_arm_link_hash_entry *) h;
18045 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18046 &h->plt, &eh->plt);
18049 /* Bind a veneered symbol to its veneer identified by its hash entry
18050 STUB_ENTRY. The veneered location thus loose its symbol. */
18052 static void
18053 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18055 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18057 BFD_ASSERT (hash);
18058 hash->root.root.u.def.section = stub_entry->stub_sec;
18059 hash->root.root.u.def.value = stub_entry->stub_offset;
18060 hash->root.size = stub_entry->stub_size;
18063 /* Output a single local symbol for a generated stub. */
18065 static bool
18066 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18067 bfd_vma offset, bfd_vma size)
18069 Elf_Internal_Sym sym;
18071 sym.st_value = osi->sec->output_section->vma
18072 + osi->sec->output_offset
18073 + offset;
18074 sym.st_size = size;
18075 sym.st_other = 0;
18076 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18077 sym.st_shndx = osi->sec_shndx;
18078 sym.st_target_internal = ST_BRANCH_TO_ARM;
18079 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18082 static bool
18083 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18084 void * in_arg)
18086 struct elf32_arm_stub_hash_entry *stub_entry;
18087 asection *stub_sec;
18088 bfd_vma addr;
18089 char *stub_name;
18090 output_arch_syminfo *osi;
18091 const insn_sequence *template_sequence;
18092 enum stub_insn_type prev_type;
18093 int size;
18094 int i;
18095 enum map_symbol_type sym_type;
18097 /* Massage our args to the form they really have. */
18098 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18099 osi = (output_arch_syminfo *) in_arg;
18101 stub_sec = stub_entry->stub_sec;
18103 /* Ensure this stub is attached to the current section being
18104 processed. */
18105 if (stub_sec != osi->sec)
18106 return true;
18108 addr = (bfd_vma) stub_entry->stub_offset;
18109 template_sequence = stub_entry->stub_template;
18111 if (arm_stub_sym_claimed (stub_entry->stub_type))
18112 arm_stub_claim_sym (stub_entry);
18113 else
18115 stub_name = stub_entry->output_name;
18116 switch (template_sequence[0].type)
18118 case ARM_TYPE:
18119 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18120 stub_entry->stub_size))
18121 return false;
18122 break;
18123 case THUMB16_TYPE:
18124 case THUMB32_TYPE:
18125 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18126 stub_entry->stub_size))
18127 return false;
18128 break;
18129 default:
18130 BFD_FAIL ();
18131 return 0;
18135 prev_type = DATA_TYPE;
18136 size = 0;
18137 for (i = 0; i < stub_entry->stub_template_size; i++)
18139 switch (template_sequence[i].type)
18141 case ARM_TYPE:
18142 sym_type = ARM_MAP_ARM;
18143 break;
18145 case THUMB16_TYPE:
18146 case THUMB32_TYPE:
18147 sym_type = ARM_MAP_THUMB;
18148 break;
18150 case DATA_TYPE:
18151 sym_type = ARM_MAP_DATA;
18152 break;
18154 default:
18155 BFD_FAIL ();
18156 return false;
18159 if (template_sequence[i].type != prev_type)
18161 prev_type = template_sequence[i].type;
18162 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18163 return false;
18166 switch (template_sequence[i].type)
18168 case ARM_TYPE:
18169 case THUMB32_TYPE:
18170 size += 4;
18171 break;
18173 case THUMB16_TYPE:
18174 size += 2;
18175 break;
18177 case DATA_TYPE:
18178 size += 4;
18179 break;
18181 default:
18182 BFD_FAIL ();
18183 return false;
18187 return true;
18190 /* Output mapping symbols for linker generated sections,
18191 and for those data-only sections that do not have a
18192 $d. */
18194 static bool
18195 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18196 struct bfd_link_info *info,
18197 void *flaginfo,
18198 int (*func) (void *, const char *,
18199 Elf_Internal_Sym *,
18200 asection *,
18201 struct elf_link_hash_entry *))
18203 output_arch_syminfo osi;
18204 struct elf32_arm_link_hash_table *htab;
18205 bfd_vma offset;
18206 bfd_size_type size;
18207 bfd *input_bfd;
18209 if (info->strip == strip_all
18210 && !info->emitrelocations
18211 && !bfd_link_relocatable (info))
18212 return true;
18214 htab = elf32_arm_hash_table (info);
18215 if (htab == NULL)
18216 return false;
18218 check_use_blx (htab);
18220 osi.flaginfo = flaginfo;
18221 osi.info = info;
18222 osi.func = func;
18224 /* Add a $d mapping symbol to data-only sections that
18225 don't have any mapping symbol. This may result in (harmless) redundant
18226 mapping symbols. */
18227 for (input_bfd = info->input_bfds;
18228 input_bfd != NULL;
18229 input_bfd = input_bfd->link.next)
18231 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18232 for (osi.sec = input_bfd->sections;
18233 osi.sec != NULL;
18234 osi.sec = osi.sec->next)
18236 if (osi.sec->output_section != NULL
18237 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18238 != 0)
18239 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18240 == SEC_HAS_CONTENTS
18241 && get_arm_elf_section_data (osi.sec) != NULL
18242 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18243 && osi.sec->size > 0
18244 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18246 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18247 (output_bfd, osi.sec->output_section);
18248 if (osi.sec_shndx != (int)SHN_BAD)
18249 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18254 /* ARM->Thumb glue. */
18255 if (htab->arm_glue_size > 0)
18257 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18258 ARM2THUMB_GLUE_SECTION_NAME);
18260 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18261 (output_bfd, osi.sec->output_section);
18262 if (bfd_link_pic (info)
18263 || htab->pic_veneer)
18264 size = ARM2THUMB_PIC_GLUE_SIZE;
18265 else if (htab->use_blx)
18266 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18267 else
18268 size = ARM2THUMB_STATIC_GLUE_SIZE;
18270 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18272 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18273 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18277 /* Thumb->ARM glue. */
18278 if (htab->thumb_glue_size > 0)
18280 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18281 THUMB2ARM_GLUE_SECTION_NAME);
18283 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18284 (output_bfd, osi.sec->output_section);
18285 size = THUMB2ARM_GLUE_SIZE;
18287 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18289 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18290 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18294 /* ARMv4 BX veneers. */
18295 if (htab->bx_glue_size > 0)
18297 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18298 ARM_BX_GLUE_SECTION_NAME);
18300 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18301 (output_bfd, osi.sec->output_section);
18303 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18306 /* Long calls stubs. */
18307 if (htab->stub_bfd && htab->stub_bfd->sections)
18309 asection* stub_sec;
18311 for (stub_sec = htab->stub_bfd->sections;
18312 stub_sec != NULL;
18313 stub_sec = stub_sec->next)
18315 /* Ignore non-stub sections. */
18316 if (!strstr (stub_sec->name, STUB_SUFFIX))
18317 continue;
18319 osi.sec = stub_sec;
18321 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18322 (output_bfd, osi.sec->output_section);
18324 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18328 /* Finally, output mapping symbols for the PLT. */
18329 if (htab->root.splt && htab->root.splt->size > 0)
18331 osi.sec = htab->root.splt;
18332 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18333 (output_bfd, osi.sec->output_section));
18335 /* Output mapping symbols for the plt header. */
18336 if (htab->root.target_os == is_vxworks)
18338 /* VxWorks shared libraries have no PLT header. */
18339 if (!bfd_link_pic (info))
18341 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18342 return false;
18343 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18344 return false;
18347 else if (htab->root.target_os == is_nacl)
18349 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18350 return false;
18352 else if (using_thumb_only (htab) && !htab->fdpic_p)
18354 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18355 return false;
18356 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18357 return false;
18358 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18359 return false;
18361 else if (!htab->fdpic_p)
18363 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18364 return false;
18365 #ifndef FOUR_WORD_PLT
18366 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18367 return false;
18368 #endif
18371 if (htab->root.target_os == is_nacl
18372 && htab->root.iplt
18373 && htab->root.iplt->size > 0)
18375 /* NaCl uses a special first entry in .iplt too. */
18376 osi.sec = htab->root.iplt;
18377 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18378 (output_bfd, osi.sec->output_section));
18379 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18380 return false;
18382 if ((htab->root.splt && htab->root.splt->size > 0)
18383 || (htab->root.iplt && htab->root.iplt->size > 0))
18385 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18386 for (input_bfd = info->input_bfds;
18387 input_bfd != NULL;
18388 input_bfd = input_bfd->link.next)
18390 struct arm_local_iplt_info **local_iplt;
18391 unsigned int i, num_syms;
18393 local_iplt = elf32_arm_local_iplt (input_bfd);
18394 if (local_iplt != NULL)
18396 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18397 if (num_syms > elf32_arm_num_entries (input_bfd))
18399 _bfd_error_handler (_("\
18400 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18401 input_bfd,
18402 (unsigned long) elf32_arm_num_entries (input_bfd),
18403 num_syms);
18404 return false;
18406 for (i = 0; i < num_syms; i++)
18407 if (local_iplt[i] != NULL
18408 && !elf32_arm_output_plt_map_1 (&osi, true,
18409 &local_iplt[i]->root,
18410 &local_iplt[i]->arm))
18411 return false;
18415 if (htab->root.tlsdesc_plt != 0)
18417 /* Mapping symbols for the lazy tls trampoline. */
18418 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18419 htab->root.tlsdesc_plt))
18420 return false;
18422 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18423 htab->root.tlsdesc_plt + 24))
18424 return false;
18426 if (htab->tls_trampoline != 0)
18428 /* Mapping symbols for the tls trampoline. */
18429 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18430 return false;
18431 #ifdef FOUR_WORD_PLT
18432 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18433 htab->tls_trampoline + 12))
18434 return false;
18435 #endif
18438 return true;
18441 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18442 the import library. All SYMCOUNT symbols of ABFD can be examined
18443 from their pointers in SYMS. Pointers of symbols to keep should be
18444 stored continuously at the beginning of that array.
18446 Returns the number of symbols to keep. */
18448 static unsigned int
18449 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18450 struct bfd_link_info *info,
18451 asymbol **syms, long symcount)
18453 size_t maxnamelen;
18454 char *cmse_name;
18455 long src_count, dst_count = 0;
18456 struct elf32_arm_link_hash_table *htab;
18458 htab = elf32_arm_hash_table (info);
18459 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18460 symcount = 0;
18462 maxnamelen = 128;
18463 cmse_name = (char *) bfd_malloc (maxnamelen);
18464 BFD_ASSERT (cmse_name);
18466 for (src_count = 0; src_count < symcount; src_count++)
18468 struct elf32_arm_link_hash_entry *cmse_hash;
18469 asymbol *sym;
18470 flagword flags;
18471 char *name;
18472 size_t namelen;
18474 sym = syms[src_count];
18475 flags = sym->flags;
18476 name = (char *) bfd_asymbol_name (sym);
18478 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18479 continue;
18480 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18481 continue;
18483 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18484 if (namelen > maxnamelen)
18486 cmse_name = (char *)
18487 bfd_realloc (cmse_name, namelen);
18488 maxnamelen = namelen;
18490 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18491 cmse_hash = (struct elf32_arm_link_hash_entry *)
18492 elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18494 if (!cmse_hash
18495 || (cmse_hash->root.root.type != bfd_link_hash_defined
18496 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18497 || cmse_hash->root.type != STT_FUNC)
18498 continue;
18500 syms[dst_count++] = sym;
18502 free (cmse_name);
18504 syms[dst_count] = NULL;
18506 return dst_count;
18509 /* Filter symbols of ABFD to include in the import library. All
18510 SYMCOUNT symbols of ABFD can be examined from their pointers in
18511 SYMS. Pointers of symbols to keep should be stored continuously at
18512 the beginning of that array.
18514 Returns the number of symbols to keep. */
18516 static unsigned int
18517 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18518 struct bfd_link_info *info,
18519 asymbol **syms, long symcount)
18521 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18523 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18524 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18525 library to be a relocatable object file. */
18526 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18527 if (globals->cmse_implib)
18528 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18529 else
18530 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18533 /* Allocate target specific section data. */
18535 static bool
18536 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18538 if (!sec->used_by_bfd)
18540 _arm_elf_section_data *sdata;
18541 size_t amt = sizeof (*sdata);
18543 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18544 if (sdata == NULL)
18545 return false;
18546 sec->used_by_bfd = sdata;
18549 return _bfd_elf_new_section_hook (abfd, sec);
18553 /* Used to order a list of mapping symbols by address. */
18555 static int
18556 elf32_arm_compare_mapping (const void * a, const void * b)
18558 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18559 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18561 if (amap->vma > bmap->vma)
18562 return 1;
18563 else if (amap->vma < bmap->vma)
18564 return -1;
18565 else if (amap->type > bmap->type)
18566 /* Ensure results do not depend on the host qsort for objects with
18567 multiple mapping symbols at the same address by sorting on type
18568 after vma. */
18569 return 1;
18570 else if (amap->type < bmap->type)
18571 return -1;
18572 else
18573 return 0;
18576 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18578 static unsigned long
18579 offset_prel31 (unsigned long addr, bfd_vma offset)
18581 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18584 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18585 relocations. */
18587 static void
18588 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18590 unsigned long first_word = bfd_get_32 (output_bfd, from);
18591 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18593 /* High bit of first word is supposed to be zero. */
18594 if ((first_word & 0x80000000ul) == 0)
18595 first_word = offset_prel31 (first_word, offset);
18597 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18598 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18599 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18600 second_word = offset_prel31 (second_word, offset);
18602 bfd_put_32 (output_bfd, first_word, to);
18603 bfd_put_32 (output_bfd, second_word, to + 4);
18606 /* Data for make_branch_to_a8_stub(). */
18608 struct a8_branch_to_stub_data
18610 asection *writing_section;
18611 bfd_byte *contents;
18615 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18616 places for a particular section. */
18618 static bool
18619 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18620 void *in_arg)
18622 struct elf32_arm_stub_hash_entry *stub_entry;
18623 struct a8_branch_to_stub_data *data;
18624 bfd_byte *contents;
18625 unsigned long branch_insn;
18626 bfd_vma veneered_insn_loc, veneer_entry_loc;
18627 bfd_signed_vma branch_offset;
18628 bfd *abfd;
18629 unsigned int loc;
18631 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18632 data = (struct a8_branch_to_stub_data *) in_arg;
18634 if (stub_entry->target_section != data->writing_section
18635 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18636 return true;
18638 contents = data->contents;
18640 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18641 generated when both source and target are in the same section. */
18642 veneered_insn_loc = stub_entry->target_section->output_section->vma
18643 + stub_entry->target_section->output_offset
18644 + stub_entry->source_value;
18646 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18647 + stub_entry->stub_sec->output_offset
18648 + stub_entry->stub_offset;
18650 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18651 veneered_insn_loc &= ~3u;
18653 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18655 abfd = stub_entry->target_section->owner;
18656 loc = stub_entry->source_value;
18658 /* We attempt to avoid this condition by setting stubs_always_after_branch
18659 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18660 This check is just to be on the safe side... */
18661 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18663 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18664 "allocated in unsafe location"), abfd);
18665 return false;
18668 switch (stub_entry->stub_type)
18670 case arm_stub_a8_veneer_b:
18671 case arm_stub_a8_veneer_b_cond:
18672 branch_insn = 0xf0009000;
18673 goto jump24;
18675 case arm_stub_a8_veneer_blx:
18676 branch_insn = 0xf000e800;
18677 goto jump24;
18679 case arm_stub_a8_veneer_bl:
18681 unsigned int i1, j1, i2, j2, s;
18683 branch_insn = 0xf000d000;
18685 jump24:
18686 if (branch_offset < -16777216 || branch_offset > 16777214)
18688 /* There's not much we can do apart from complain if this
18689 happens. */
18690 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18691 "of range (input file too large)"), abfd);
18692 return false;
18695 /* i1 = not(j1 eor s), so:
18696 not i1 = j1 eor s
18697 j1 = (not i1) eor s. */
18699 branch_insn |= (branch_offset >> 1) & 0x7ff;
18700 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18701 i2 = (branch_offset >> 22) & 1;
18702 i1 = (branch_offset >> 23) & 1;
18703 s = (branch_offset >> 24) & 1;
18704 j1 = (!i1) ^ s;
18705 j2 = (!i2) ^ s;
18706 branch_insn |= j2 << 11;
18707 branch_insn |= j1 << 13;
18708 branch_insn |= s << 26;
18710 break;
18712 default:
18713 BFD_FAIL ();
18714 return false;
18717 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18718 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18720 return true;
18723 /* Beginning of stm32l4xx work-around. */
18725 /* Functions encoding instructions necessary for the emission of the
18726 fix-stm32l4xx-629360.
18727 Encoding is extracted from the
18728 ARM (C) Architecture Reference Manual
18729 ARMv7-A and ARMv7-R edition
18730 ARM DDI 0406C.b (ID072512). */
18732 static inline bfd_vma
18733 create_instruction_branch_absolute (int branch_offset)
18735 /* A8.8.18 B (A8-334)
18736 B target_address (Encoding T4). */
18737 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18738 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18739 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18741 int s = ((branch_offset & 0x1000000) >> 24);
18742 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18743 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18745 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18746 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18748 bfd_vma patched_inst = 0xf0009000
18749 | s << 26 /* S. */
18750 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18751 | j1 << 13 /* J1. */
18752 | j2 << 11 /* J2. */
18753 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18755 return patched_inst;
18758 static inline bfd_vma
18759 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18761 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18762 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18763 bfd_vma patched_inst = 0xe8900000
18764 | (/*W=*/wback << 21)
18765 | (base_reg << 16)
18766 | (reg_mask & 0x0000ffff);
18768 return patched_inst;
18771 static inline bfd_vma
18772 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18774 /* A8.8.60 LDMDB/LDMEA (A8-402)
18775 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18776 bfd_vma patched_inst = 0xe9100000
18777 | (/*W=*/wback << 21)
18778 | (base_reg << 16)
18779 | (reg_mask & 0x0000ffff);
18781 return patched_inst;
18784 static inline bfd_vma
18785 create_instruction_mov (int target_reg, int source_reg)
18787 /* A8.8.103 MOV (register) (A8-486)
18788 MOV Rd, Rm (Encoding T1). */
18789 bfd_vma patched_inst = 0x4600
18790 | (target_reg & 0x7)
18791 | ((target_reg & 0x8) >> 3) << 7
18792 | (source_reg << 3);
18794 return patched_inst;
18797 static inline bfd_vma
18798 create_instruction_sub (int target_reg, int source_reg, int value)
18800 /* A8.8.221 SUB (immediate) (A8-708)
18801 SUB Rd, Rn, #value (Encoding T3). */
18802 bfd_vma patched_inst = 0xf1a00000
18803 | (target_reg << 8)
18804 | (source_reg << 16)
18805 | (/*S=*/0 << 20)
18806 | ((value & 0x800) >> 11) << 26
18807 | ((value & 0x700) >> 8) << 12
18808 | (value & 0x0ff);
18810 return patched_inst;
18813 static inline bfd_vma
18814 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18815 int first_reg)
18817 /* A8.8.332 VLDM (A8-922)
18818 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18819 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18820 | (/*W=*/wback << 21)
18821 | (base_reg << 16)
18822 | (num_words & 0x000000ff)
18823 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18824 | (first_reg & 0x00000001) << 22;
18826 return patched_inst;
18829 static inline bfd_vma
18830 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18831 int first_reg)
18833 /* A8.8.332 VLDM (A8-922)
18834 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18835 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18836 | (base_reg << 16)
18837 | (num_words & 0x000000ff)
18838 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18839 | (first_reg & 0x00000001) << 22;
18841 return patched_inst;
18844 static inline bfd_vma
18845 create_instruction_udf_w (int value)
18847 /* A8.8.247 UDF (A8-758)
18848 Undefined (Encoding T2). */
18849 bfd_vma patched_inst = 0xf7f0a000
18850 | (value & 0x00000fff)
18851 | (value & 0x000f0000) << 16;
18853 return patched_inst;
18856 static inline bfd_vma
18857 create_instruction_udf (int value)
18859 /* A8.8.247 UDF (A8-758)
18860 Undefined (Encoding T1). */
18861 bfd_vma patched_inst = 0xde00
18862 | (value & 0xff);
18864 return patched_inst;
18867 /* Functions writing an instruction in memory, returning the next
18868 memory position to write to. */
18870 static inline bfd_byte *
18871 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18872 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18874 put_thumb2_insn (htab, output_bfd, insn, pt);
18875 return pt + 4;
18878 static inline bfd_byte *
18879 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18880 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18882 put_thumb_insn (htab, output_bfd, insn, pt);
18883 return pt + 2;
18886 /* Function filling up a region in memory with T1 and T2 UDFs taking
18887 care of alignment. */
18889 static bfd_byte *
18890 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18891 bfd * output_bfd,
18892 const bfd_byte * const base_stub_contents,
18893 bfd_byte * const from_stub_contents,
18894 const bfd_byte * const end_stub_contents)
18896 bfd_byte *current_stub_contents = from_stub_contents;
18898 /* Fill the remaining of the stub with deterministic contents : UDF
18899 instructions.
18900 Check if realignment is needed on modulo 4 frontier using T1, to
18901 further use T2. */
18902 if ((current_stub_contents < end_stub_contents)
18903 && !((current_stub_contents - base_stub_contents) % 2)
18904 && ((current_stub_contents - base_stub_contents) % 4))
18905 current_stub_contents =
18906 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18907 create_instruction_udf (0));
18909 for (; current_stub_contents < end_stub_contents;)
18910 current_stub_contents =
18911 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18912 create_instruction_udf_w (0));
18914 return current_stub_contents;
18917 /* Functions writing the stream of instructions equivalent to the
18918 derived sequence for ldmia, ldmdb, vldm respectively. */
18920 static void
18921 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18922 bfd * output_bfd,
18923 const insn32 initial_insn,
18924 const bfd_byte *const initial_insn_addr,
18925 bfd_byte *const base_stub_contents)
18927 int wback = (initial_insn & 0x00200000) >> 21;
18928 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18929 int insn_all_registers = initial_insn & 0x0000ffff;
18930 int insn_low_registers, insn_high_registers;
18931 int usable_register_mask;
18932 int nb_registers = elf32_arm_popcount (insn_all_registers);
18933 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18934 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18935 bfd_byte *current_stub_contents = base_stub_contents;
18937 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18939 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18940 smaller than 8 registers load sequences that do not cause the
18941 hardware issue. */
18942 if (nb_registers <= 8)
18944 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18945 current_stub_contents =
18946 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18947 initial_insn);
18949 /* B initial_insn_addr+4. */
18950 if (!restore_pc)
18951 current_stub_contents =
18952 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18953 create_instruction_branch_absolute
18954 (initial_insn_addr - current_stub_contents));
18956 /* Fill the remaining of the stub with deterministic contents. */
18957 current_stub_contents =
18958 stm32l4xx_fill_stub_udf (htab, output_bfd,
18959 base_stub_contents, current_stub_contents,
18960 base_stub_contents +
18961 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18963 return;
18966 /* - reg_list[13] == 0. */
18967 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18969 /* - reg_list[14] & reg_list[15] != 1. */
18970 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18972 /* - if (wback==1) reg_list[rn] == 0. */
18973 BFD_ASSERT (!wback || !restore_rn);
18975 /* - nb_registers > 8. */
18976 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18978 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18980 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18981 - One with the 7 lowest registers (register mask 0x007F)
18982 This LDM will finally contain between 2 and 7 registers
18983 - One with the 7 highest registers (register mask 0xDF80)
18984 This ldm will finally contain between 2 and 7 registers. */
18985 insn_low_registers = insn_all_registers & 0x007F;
18986 insn_high_registers = insn_all_registers & 0xDF80;
18988 /* A spare register may be needed during this veneer to temporarily
18989 handle the base register. This register will be restored with the
18990 last LDM operation.
18991 The usable register may be any general purpose register (that
18992 excludes PC, SP, LR : register mask is 0x1FFF). */
18993 usable_register_mask = 0x1FFF;
18995 /* Generate the stub function. */
18996 if (wback)
18998 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18999 current_stub_contents =
19000 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19001 create_instruction_ldmia
19002 (rn, /*wback=*/1, insn_low_registers));
19004 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19005 current_stub_contents =
19006 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19007 create_instruction_ldmia
19008 (rn, /*wback=*/1, insn_high_registers));
19009 if (!restore_pc)
19011 /* B initial_insn_addr+4. */
19012 current_stub_contents =
19013 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19014 create_instruction_branch_absolute
19015 (initial_insn_addr - current_stub_contents));
19018 else /* if (!wback). */
19020 ri = rn;
19022 /* If Rn is not part of the high-register-list, move it there. */
19023 if (!(insn_high_registers & (1 << rn)))
19025 /* Choose a Ri in the high-register-list that will be restored. */
19026 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19028 /* MOV Ri, Rn. */
19029 current_stub_contents =
19030 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19031 create_instruction_mov (ri, rn));
19034 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19035 current_stub_contents =
19036 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19037 create_instruction_ldmia
19038 (ri, /*wback=*/1, insn_low_registers));
19040 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19041 current_stub_contents =
19042 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19043 create_instruction_ldmia
19044 (ri, /*wback=*/0, insn_high_registers));
19046 if (!restore_pc)
19048 /* B initial_insn_addr+4. */
19049 current_stub_contents =
19050 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19051 create_instruction_branch_absolute
19052 (initial_insn_addr - current_stub_contents));
19056 /* Fill the remaining of the stub with deterministic contents. */
19057 current_stub_contents =
19058 stm32l4xx_fill_stub_udf (htab, output_bfd,
19059 base_stub_contents, current_stub_contents,
19060 base_stub_contents +
19061 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19064 static void
19065 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19066 bfd * output_bfd,
19067 const insn32 initial_insn,
19068 const bfd_byte *const initial_insn_addr,
19069 bfd_byte *const base_stub_contents)
19071 int wback = (initial_insn & 0x00200000) >> 21;
19072 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19073 int insn_all_registers = initial_insn & 0x0000ffff;
19074 int insn_low_registers, insn_high_registers;
19075 int usable_register_mask;
19076 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19077 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19078 int nb_registers = elf32_arm_popcount (insn_all_registers);
19079 bfd_byte *current_stub_contents = base_stub_contents;
19081 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19083 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19084 smaller than 8 registers load sequences that do not cause the
19085 hardware issue. */
19086 if (nb_registers <= 8)
19088 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19089 current_stub_contents =
19090 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19091 initial_insn);
19093 /* B initial_insn_addr+4. */
19094 current_stub_contents =
19095 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096 create_instruction_branch_absolute
19097 (initial_insn_addr - current_stub_contents));
19099 /* Fill the remaining of the stub with deterministic contents. */
19100 current_stub_contents =
19101 stm32l4xx_fill_stub_udf (htab, output_bfd,
19102 base_stub_contents, current_stub_contents,
19103 base_stub_contents +
19104 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19106 return;
19109 /* - reg_list[13] == 0. */
19110 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19112 /* - reg_list[14] & reg_list[15] != 1. */
19113 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19115 /* - if (wback==1) reg_list[rn] == 0. */
19116 BFD_ASSERT (!wback || !restore_rn);
19118 /* - nb_registers > 8. */
19119 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19121 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19123 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19124 - One with the 7 lowest registers (register mask 0x007F)
19125 This LDM will finally contain between 2 and 7 registers
19126 - One with the 7 highest registers (register mask 0xDF80)
19127 This ldm will finally contain between 2 and 7 registers. */
19128 insn_low_registers = insn_all_registers & 0x007F;
19129 insn_high_registers = insn_all_registers & 0xDF80;
19131 /* A spare register may be needed during this veneer to temporarily
19132 handle the base register. This register will be restored with
19133 the last LDM operation.
19134 The usable register may be any general purpose register (that excludes
19135 PC, SP, LR : register mask is 0x1FFF). */
19136 usable_register_mask = 0x1FFF;
19138 /* Generate the stub function. */
19139 if (!wback && !restore_pc && !restore_rn)
19141 /* Choose a Ri in the low-register-list that will be restored. */
19142 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19144 /* MOV Ri, Rn. */
19145 current_stub_contents =
19146 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19147 create_instruction_mov (ri, rn));
19149 /* LDMDB Ri!, {R-high-register-list}. */
19150 current_stub_contents =
19151 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19152 create_instruction_ldmdb
19153 (ri, /*wback=*/1, insn_high_registers));
19155 /* LDMDB Ri, {R-low-register-list}. */
19156 current_stub_contents =
19157 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19158 create_instruction_ldmdb
19159 (ri, /*wback=*/0, insn_low_registers));
19161 /* B initial_insn_addr+4. */
19162 current_stub_contents =
19163 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19164 create_instruction_branch_absolute
19165 (initial_insn_addr - current_stub_contents));
19167 else if (wback && !restore_pc && !restore_rn)
19169 /* LDMDB Rn!, {R-high-register-list}. */
19170 current_stub_contents =
19171 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19172 create_instruction_ldmdb
19173 (rn, /*wback=*/1, insn_high_registers));
19175 /* LDMDB Rn!, {R-low-register-list}. */
19176 current_stub_contents =
19177 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19178 create_instruction_ldmdb
19179 (rn, /*wback=*/1, insn_low_registers));
19181 /* B initial_insn_addr+4. */
19182 current_stub_contents =
19183 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19184 create_instruction_branch_absolute
19185 (initial_insn_addr - current_stub_contents));
19187 else if (!wback && restore_pc && !restore_rn)
19189 /* Choose a Ri in the high-register-list that will be restored. */
19190 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19192 /* SUB Ri, Rn, #(4*nb_registers). */
19193 current_stub_contents =
19194 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19195 create_instruction_sub (ri, rn, (4 * nb_registers)));
19197 /* LDMIA Ri!, {R-low-register-list}. */
19198 current_stub_contents =
19199 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19200 create_instruction_ldmia
19201 (ri, /*wback=*/1, insn_low_registers));
19203 /* LDMIA Ri, {R-high-register-list}. */
19204 current_stub_contents =
19205 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19206 create_instruction_ldmia
19207 (ri, /*wback=*/0, insn_high_registers));
19209 else if (wback && restore_pc && !restore_rn)
19211 /* Choose a Ri in the high-register-list that will be restored. */
19212 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19214 /* SUB Rn, Rn, #(4*nb_registers) */
19215 current_stub_contents =
19216 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19217 create_instruction_sub (rn, rn, (4 * nb_registers)));
19219 /* MOV Ri, Rn. */
19220 current_stub_contents =
19221 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19222 create_instruction_mov (ri, rn));
19224 /* LDMIA Ri!, {R-low-register-list}. */
19225 current_stub_contents =
19226 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19227 create_instruction_ldmia
19228 (ri, /*wback=*/1, insn_low_registers));
19230 /* LDMIA Ri, {R-high-register-list}. */
19231 current_stub_contents =
19232 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19233 create_instruction_ldmia
19234 (ri, /*wback=*/0, insn_high_registers));
19236 else if (!wback && !restore_pc && restore_rn)
19238 ri = rn;
19239 if (!(insn_low_registers & (1 << rn)))
19241 /* Choose a Ri in the low-register-list that will be restored. */
19242 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19244 /* MOV Ri, Rn. */
19245 current_stub_contents =
19246 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19247 create_instruction_mov (ri, rn));
19250 /* LDMDB Ri!, {R-high-register-list}. */
19251 current_stub_contents =
19252 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19253 create_instruction_ldmdb
19254 (ri, /*wback=*/1, insn_high_registers));
19256 /* LDMDB Ri, {R-low-register-list}. */
19257 current_stub_contents =
19258 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19259 create_instruction_ldmdb
19260 (ri, /*wback=*/0, insn_low_registers));
19262 /* B initial_insn_addr+4. */
19263 current_stub_contents =
19264 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19265 create_instruction_branch_absolute
19266 (initial_insn_addr - current_stub_contents));
19268 else if (!wback && restore_pc && restore_rn)
19270 ri = rn;
19271 if (!(insn_high_registers & (1 << rn)))
19273 /* Choose a Ri in the high-register-list that will be restored. */
19274 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19277 /* SUB Ri, Rn, #(4*nb_registers). */
19278 current_stub_contents =
19279 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19280 create_instruction_sub (ri, rn, (4 * nb_registers)));
19282 /* LDMIA Ri!, {R-low-register-list}. */
19283 current_stub_contents =
19284 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19285 create_instruction_ldmia
19286 (ri, /*wback=*/1, insn_low_registers));
19288 /* LDMIA Ri, {R-high-register-list}. */
19289 current_stub_contents =
19290 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19291 create_instruction_ldmia
19292 (ri, /*wback=*/0, insn_high_registers));
19294 else if (wback && restore_rn)
19296 /* The assembler should not have accepted to encode this. */
19297 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19298 "undefined behavior.\n");
19301 /* Fill the remaining of the stub with deterministic contents. */
19302 current_stub_contents =
19303 stm32l4xx_fill_stub_udf (htab, output_bfd,
19304 base_stub_contents, current_stub_contents,
19305 base_stub_contents +
19306 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19310 static void
19311 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19312 bfd * output_bfd,
19313 const insn32 initial_insn,
19314 const bfd_byte *const initial_insn_addr,
19315 bfd_byte *const base_stub_contents)
19317 int num_words = initial_insn & 0xff;
19318 bfd_byte *current_stub_contents = base_stub_contents;
19320 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19322 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19323 smaller than 8 words load sequences that do not cause the
19324 hardware issue. */
19325 if (num_words <= 8)
19327 /* Untouched instruction. */
19328 current_stub_contents =
19329 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19330 initial_insn);
19332 /* B initial_insn_addr+4. */
19333 current_stub_contents =
19334 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19335 create_instruction_branch_absolute
19336 (initial_insn_addr - current_stub_contents));
19338 else
19340 bool is_dp = /* DP encoding. */
19341 (initial_insn & 0xfe100f00) == 0xec100b00;
19342 bool is_ia_nobang = /* (IA without !). */
19343 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19344 bool is_ia_bang = /* (IA with !) - includes VPOP. */
19345 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19346 bool is_db_bang = /* (DB with !). */
19347 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19348 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19349 /* d = UInt (Vd:D);. */
19350 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19351 | (((unsigned int)initial_insn << 9) >> 31);
19353 /* Compute the number of 8-words chunks needed to split. */
19354 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19355 int chunk;
19357 /* The test coverage has been done assuming the following
19358 hypothesis that exactly one of the previous is_ predicates is
19359 true. */
19360 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19361 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19363 /* We treat the cutting of the words in one pass for all
19364 cases, then we emit the adjustments:
19366 vldm rx, {...}
19367 -> vldm rx!, {8_words_or_less} for each needed 8_word
19368 -> sub rx, rx, #size (list)
19370 vldm rx!, {...}
19371 -> vldm rx!, {8_words_or_less} for each needed 8_word
19372 This also handles vpop instruction (when rx is sp)
19374 vldmd rx!, {...}
19375 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19376 for (chunk = 0; chunk < chunks; ++chunk)
19378 bfd_vma new_insn = 0;
19380 if (is_ia_nobang || is_ia_bang)
19382 new_insn = create_instruction_vldmia
19383 (base_reg,
19384 is_dp,
19385 /*wback= . */1,
19386 chunks - (chunk + 1) ?
19387 8 : num_words - chunk * 8,
19388 first_reg + chunk * 8);
19390 else if (is_db_bang)
19392 new_insn = create_instruction_vldmdb
19393 (base_reg,
19394 is_dp,
19395 chunks - (chunk + 1) ?
19396 8 : num_words - chunk * 8,
19397 first_reg + chunk * 8);
19400 if (new_insn)
19401 current_stub_contents =
19402 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19403 new_insn);
19406 /* Only this case requires the base register compensation
19407 subtract. */
19408 if (is_ia_nobang)
19410 current_stub_contents =
19411 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19412 create_instruction_sub
19413 (base_reg, base_reg, 4*num_words));
19416 /* B initial_insn_addr+4. */
19417 current_stub_contents =
19418 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19419 create_instruction_branch_absolute
19420 (initial_insn_addr - current_stub_contents));
19423 /* Fill the remaining of the stub with deterministic contents. */
19424 current_stub_contents =
19425 stm32l4xx_fill_stub_udf (htab, output_bfd,
19426 base_stub_contents, current_stub_contents,
19427 base_stub_contents +
19428 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19431 static void
19432 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19433 bfd * output_bfd,
19434 const insn32 wrong_insn,
19435 const bfd_byte *const wrong_insn_addr,
19436 bfd_byte *const stub_contents)
19438 if (is_thumb2_ldmia (wrong_insn))
19439 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19440 wrong_insn, wrong_insn_addr,
19441 stub_contents);
19442 else if (is_thumb2_ldmdb (wrong_insn))
19443 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19444 wrong_insn, wrong_insn_addr,
19445 stub_contents);
19446 else if (is_thumb2_vldm (wrong_insn))
19447 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19448 wrong_insn, wrong_insn_addr,
19449 stub_contents);
19452 /* End of stm32l4xx work-around. */
19455 /* Do code byteswapping. Return FALSE afterwards so that the section is
19456 written out as normal. */
19458 static bool
19459 elf32_arm_write_section (bfd *output_bfd,
19460 struct bfd_link_info *link_info,
19461 asection *sec,
19462 bfd_byte *contents)
19464 unsigned int mapcount, errcount;
19465 _arm_elf_section_data *arm_data;
19466 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19467 elf32_arm_section_map *map;
19468 elf32_vfp11_erratum_list *errnode;
19469 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19470 bfd_vma ptr;
19471 bfd_vma end;
19472 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19473 bfd_byte tmp;
19474 unsigned int i;
19476 if (globals == NULL)
19477 return false;
19479 /* If this section has not been allocated an _arm_elf_section_data
19480 structure then we cannot record anything. */
19481 arm_data = get_arm_elf_section_data (sec);
19482 if (arm_data == NULL)
19483 return false;
19485 mapcount = arm_data->mapcount;
19486 map = arm_data->map;
19487 errcount = arm_data->erratumcount;
19489 if (errcount != 0)
19491 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19493 for (errnode = arm_data->erratumlist; errnode != 0;
19494 errnode = errnode->next)
19496 bfd_vma target = errnode->vma - offset;
19498 switch (errnode->type)
19500 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19502 bfd_vma branch_to_veneer;
19503 /* Original condition code of instruction, plus bit mask for
19504 ARM B instruction. */
19505 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19506 | 0x0a000000;
19508 /* The instruction is before the label. */
19509 target -= 4;
19511 /* Above offset included in -4 below. */
19512 branch_to_veneer = errnode->u.b.veneer->vma
19513 - errnode->vma - 4;
19515 if ((signed) branch_to_veneer < -(1 << 25)
19516 || (signed) branch_to_veneer >= (1 << 25))
19517 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19518 "range"), output_bfd);
19520 insn |= (branch_to_veneer >> 2) & 0xffffff;
19521 contents[endianflip ^ target] = insn & 0xff;
19522 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19523 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19524 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19526 break;
19528 case VFP11_ERRATUM_ARM_VENEER:
19530 bfd_vma branch_from_veneer;
19531 unsigned int insn;
19533 /* Take size of veneer into account. */
19534 branch_from_veneer = errnode->u.v.branch->vma
19535 - errnode->vma - 12;
19537 if ((signed) branch_from_veneer < -(1 << 25)
19538 || (signed) branch_from_veneer >= (1 << 25))
19539 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19540 "range"), output_bfd);
19542 /* Original instruction. */
19543 insn = errnode->u.v.branch->u.b.vfp_insn;
19544 contents[endianflip ^ target] = insn & 0xff;
19545 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19546 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19547 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19549 /* Branch back to insn after original insn. */
19550 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19551 contents[endianflip ^ (target + 4)] = insn & 0xff;
19552 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19553 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19554 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19556 break;
19558 default:
19559 abort ();
19564 if (arm_data->stm32l4xx_erratumcount != 0)
19566 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19567 stm32l4xx_errnode != 0;
19568 stm32l4xx_errnode = stm32l4xx_errnode->next)
19570 bfd_vma target = stm32l4xx_errnode->vma - offset;
19572 switch (stm32l4xx_errnode->type)
19574 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19576 unsigned int insn;
19577 bfd_vma branch_to_veneer =
19578 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19580 if ((signed) branch_to_veneer < -(1 << 24)
19581 || (signed) branch_to_veneer >= (1 << 24))
19583 bfd_vma out_of_range =
19584 ((signed) branch_to_veneer < -(1 << 24)) ?
19585 - branch_to_veneer - (1 << 24) :
19586 ((signed) branch_to_veneer >= (1 << 24)) ?
19587 branch_to_veneer - (1 << 24) : 0;
19589 _bfd_error_handler
19590 (_("%pB(%#" PRIx64 "): error: "
19591 "cannot create STM32L4XX veneer; "
19592 "jump out of range by %" PRId64 " bytes; "
19593 "cannot encode branch instruction"),
19594 output_bfd,
19595 (uint64_t) (stm32l4xx_errnode->vma - 4),
19596 (int64_t) out_of_range);
19597 continue;
19600 insn = create_instruction_branch_absolute
19601 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19603 /* The instruction is before the label. */
19604 target -= 4;
19606 put_thumb2_insn (globals, output_bfd,
19607 (bfd_vma) insn, contents + target);
19609 break;
19611 case STM32L4XX_ERRATUM_VENEER:
19613 bfd_byte * veneer;
19614 bfd_byte * veneer_r;
19615 unsigned int insn;
19617 veneer = contents + target;
19618 veneer_r = veneer
19619 + stm32l4xx_errnode->u.b.veneer->vma
19620 - stm32l4xx_errnode->vma - 4;
19622 if ((signed) (veneer_r - veneer -
19623 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19624 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19625 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19626 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19627 || (signed) (veneer_r - veneer) >= (1 << 24))
19629 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19630 "veneer"), output_bfd);
19631 continue;
19634 /* Original instruction. */
19635 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19637 stm32l4xx_create_replacing_stub
19638 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19640 break;
19642 default:
19643 abort ();
19648 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19650 arm_unwind_table_edit *edit_node
19651 = arm_data->u.exidx.unwind_edit_list;
19652 /* Now, sec->size is the size of the section we will write. The original
19653 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19654 markers) was sec->rawsize. (This isn't the case if we perform no
19655 edits, then rawsize will be zero and we should use size). */
19656 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19657 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19658 unsigned int in_index, out_index;
19659 bfd_vma add_to_offsets = 0;
19661 if (edited_contents == NULL)
19662 return false;
19663 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19665 if (edit_node)
19667 unsigned int edit_index = edit_node->index;
19669 if (in_index < edit_index && in_index * 8 < input_size)
19671 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19672 contents + in_index * 8, add_to_offsets);
19673 out_index++;
19674 in_index++;
19676 else if (in_index == edit_index
19677 || (in_index * 8 >= input_size
19678 && edit_index == UINT_MAX))
19680 switch (edit_node->type)
19682 case DELETE_EXIDX_ENTRY:
19683 in_index++;
19684 add_to_offsets += 8;
19685 break;
19687 case INSERT_EXIDX_CANTUNWIND_AT_END:
19689 asection *text_sec = edit_node->linked_section;
19690 bfd_vma text_offset = text_sec->output_section->vma
19691 + text_sec->output_offset
19692 + text_sec->size;
19693 bfd_vma exidx_offset = offset + out_index * 8;
19694 unsigned long prel31_offset;
19696 /* Note: this is meant to be equivalent to an
19697 R_ARM_PREL31 relocation. These synthetic
19698 EXIDX_CANTUNWIND markers are not relocated by the
19699 usual BFD method. */
19700 prel31_offset = (text_offset - exidx_offset)
19701 & 0x7ffffffful;
19702 if (bfd_link_relocatable (link_info))
19704 /* Here relocation for new EXIDX_CANTUNWIND is
19705 created, so there is no need to
19706 adjust offset by hand. */
19707 prel31_offset = text_sec->output_offset
19708 + text_sec->size;
19711 /* First address we can't unwind. */
19712 bfd_put_32 (output_bfd, prel31_offset,
19713 &edited_contents[out_index * 8]);
19715 /* Code for EXIDX_CANTUNWIND. */
19716 bfd_put_32 (output_bfd, 0x1,
19717 &edited_contents[out_index * 8 + 4]);
19719 out_index++;
19720 add_to_offsets -= 8;
19722 break;
19725 edit_node = edit_node->next;
19728 else
19730 /* No more edits, copy remaining entries verbatim. */
19731 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19732 contents + in_index * 8, add_to_offsets);
19733 out_index++;
19734 in_index++;
19738 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19739 bfd_set_section_contents (output_bfd, sec->output_section,
19740 edited_contents,
19741 (file_ptr) sec->output_offset, sec->size);
19743 return true;
19746 /* Fix code to point to Cortex-A8 erratum stubs. */
19747 if (globals->fix_cortex_a8)
19749 struct a8_branch_to_stub_data data;
19751 data.writing_section = sec;
19752 data.contents = contents;
19754 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19755 & data);
19758 if (mapcount == 0)
19759 return false;
19761 if (globals->byteswap_code)
19763 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19765 ptr = map[0].vma;
19766 for (i = 0; i < mapcount; i++)
19768 if (i == mapcount - 1)
19769 end = sec->size;
19770 else
19771 end = map[i + 1].vma;
19773 switch (map[i].type)
19775 case 'a':
19776 /* Byte swap code words. */
19777 while (ptr + 3 < end)
19779 tmp = contents[ptr];
19780 contents[ptr] = contents[ptr + 3];
19781 contents[ptr + 3] = tmp;
19782 tmp = contents[ptr + 1];
19783 contents[ptr + 1] = contents[ptr + 2];
19784 contents[ptr + 2] = tmp;
19785 ptr += 4;
19787 break;
19789 case 't':
19790 /* Byte swap code halfwords. */
19791 while (ptr + 1 < end)
19793 tmp = contents[ptr];
19794 contents[ptr] = contents[ptr + 1];
19795 contents[ptr + 1] = tmp;
19796 ptr += 2;
19798 break;
19800 case 'd':
19801 /* Leave data alone. */
19802 break;
19804 ptr = end;
19808 free (map);
19809 arm_data->mapcount = -1;
19810 arm_data->mapsize = 0;
19811 arm_data->map = NULL;
19813 return false;
19816 /* Mangle thumb function symbols as we read them in. */
19818 static bool
19819 elf32_arm_swap_symbol_in (bfd * abfd,
19820 const void *psrc,
19821 const void *pshn,
19822 Elf_Internal_Sym *dst)
19824 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19825 return false;
19826 dst->st_target_internal = ST_BRANCH_TO_ARM;
19828 /* New EABI objects mark thumb function symbols by setting the low bit of
19829 the address. */
19830 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19831 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19833 if (dst->st_value & 1)
19835 dst->st_value &= ~(bfd_vma) 1;
19836 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19837 ST_BRANCH_TO_THUMB);
19839 else
19840 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19842 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19844 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19845 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19847 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19848 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19849 else
19850 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19852 return true;
19856 /* Mangle thumb function symbols as we write them out. */
19858 static void
19859 elf32_arm_swap_symbol_out (bfd *abfd,
19860 const Elf_Internal_Sym *src,
19861 void *cdst,
19862 void *shndx)
19864 Elf_Internal_Sym newsym;
19866 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19867 of the address set, as per the new EABI. We do this unconditionally
19868 because objcopy does not set the elf header flags until after
19869 it writes out the symbol table. */
19870 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19872 newsym = *src;
19873 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19874 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19875 if (newsym.st_shndx != SHN_UNDEF)
19877 /* Do this only for defined symbols. At link type, the static
19878 linker will simulate the work of dynamic linker of resolving
19879 symbols and will carry over the thumbness of found symbols to
19880 the output symbol table. It's not clear how it happens, but
19881 the thumbness of undefined symbols can well be different at
19882 runtime, and writing '1' for them will be confusing for users
19883 and possibly for dynamic linker itself.
19885 newsym.st_value |= 1;
19888 src = &newsym;
19890 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19893 /* Add the PT_ARM_EXIDX program header. */
19895 static bool
19896 elf32_arm_modify_segment_map (bfd *abfd,
19897 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19899 struct elf_segment_map *m;
19900 asection *sec;
19902 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19903 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19905 /* If there is already a PT_ARM_EXIDX header, then we do not
19906 want to add another one. This situation arises when running
19907 "strip"; the input binary already has the header. */
19908 m = elf_seg_map (abfd);
19909 while (m && m->p_type != PT_ARM_EXIDX)
19910 m = m->next;
19911 if (!m)
19913 m = (struct elf_segment_map *)
19914 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19915 if (m == NULL)
19916 return false;
19917 m->p_type = PT_ARM_EXIDX;
19918 m->count = 1;
19919 m->sections[0] = sec;
19921 m->next = elf_seg_map (abfd);
19922 elf_seg_map (abfd) = m;
19926 return true;
19929 /* We may add a PT_ARM_EXIDX program header. */
19931 static int
19932 elf32_arm_additional_program_headers (bfd *abfd,
19933 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19935 asection *sec;
19937 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19938 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19939 return 1;
19940 else
19941 return 0;
19944 /* Hook called by the linker routine which adds symbols from an object
19945 file. */
19947 static bool
19948 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19949 Elf_Internal_Sym *sym, const char **namep,
19950 flagword *flagsp, asection **secp, bfd_vma *valp)
19952 if (elf32_arm_hash_table (info) == NULL)
19953 return false;
19955 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19956 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19957 flagsp, secp, valp))
19958 return false;
19960 return true;
19963 /* We use this to override swap_symbol_in and swap_symbol_out. */
19964 const struct elf_size_info elf32_arm_size_info =
19966 sizeof (Elf32_External_Ehdr),
19967 sizeof (Elf32_External_Phdr),
19968 sizeof (Elf32_External_Shdr),
19969 sizeof (Elf32_External_Rel),
19970 sizeof (Elf32_External_Rela),
19971 sizeof (Elf32_External_Sym),
19972 sizeof (Elf32_External_Dyn),
19973 sizeof (Elf_External_Note),
19976 32, 2,
19977 ELFCLASS32, EV_CURRENT,
19978 bfd_elf32_write_out_phdrs,
19979 bfd_elf32_write_shdrs_and_ehdr,
19980 bfd_elf32_checksum_contents,
19981 bfd_elf32_write_relocs,
19982 elf32_arm_swap_symbol_in,
19983 elf32_arm_swap_symbol_out,
19984 bfd_elf32_slurp_reloc_table,
19985 bfd_elf32_slurp_symbol_table,
19986 bfd_elf32_swap_dyn_in,
19987 bfd_elf32_swap_dyn_out,
19988 bfd_elf32_swap_reloc_in,
19989 bfd_elf32_swap_reloc_out,
19990 bfd_elf32_swap_reloca_in,
19991 bfd_elf32_swap_reloca_out
19994 static bfd_vma
19995 read_code32 (const bfd *abfd, const bfd_byte *addr)
19997 /* V7 BE8 code is always little endian. */
19998 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19999 return bfd_getl32 (addr);
20001 return bfd_get_32 (abfd, addr);
20004 static bfd_vma
20005 read_code16 (const bfd *abfd, const bfd_byte *addr)
20007 /* V7 BE8 code is always little endian. */
20008 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20009 return bfd_getl16 (addr);
20011 return bfd_get_16 (abfd, addr);
20014 /* Return size of plt0 entry starting at ADDR
20015 or (bfd_vma) -1 if size can not be determined. */
20017 static bfd_vma
20018 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr,
20019 bfd_size_type data_size)
20021 bfd_vma first_word;
20022 bfd_vma plt0_size;
20024 if (data_size < 4)
20025 return (bfd_vma) -1;
20027 first_word = read_code32 (abfd, addr);
20029 if (first_word == elf32_arm_plt0_entry[0])
20030 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20031 else if (first_word == elf32_thumb2_plt0_entry[0])
20032 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20033 else
20034 /* We don't yet handle this PLT format. */
20035 return (bfd_vma) -1;
20037 return plt0_size;
20040 /* Return size of plt entry starting at offset OFFSET
20041 of plt section located at address START
20042 or (bfd_vma) -1 if size can not be determined. */
20044 static bfd_vma
20045 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset,
20046 bfd_size_type data_size)
20048 bfd_vma first_insn;
20049 bfd_vma plt_size = 0;
20051 /* PLT entry size if fixed on Thumb-only platforms. */
20052 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20053 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20055 /* Respect Thumb stub if necessary. */
20056 if (offset + 2 > data_size)
20057 return (bfd_vma) -1;
20058 if (read_code16 (abfd, start + offset) == elf32_arm_plt_thumb_stub[0])
20060 plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
20063 /* Strip immediate from first add. */
20064 if (offset + plt_size + 4 > data_size)
20065 return (bfd_vma) -1;
20066 first_insn = read_code32 (abfd, start + offset + plt_size) & 0xffffff00;
20068 #ifdef FOUR_WORD_PLT
20069 if (first_insn == elf32_arm_plt_entry[0])
20070 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20071 #else
20072 if (first_insn == elf32_arm_plt_entry_long[0])
20073 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20074 else if (first_insn == elf32_arm_plt_entry_short[0])
20075 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20076 #endif
20077 else
20078 /* We don't yet handle this PLT format. */
20079 return (bfd_vma) -1;
20081 return plt_size;
20084 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20086 static long
20087 elf32_arm_get_synthetic_symtab (bfd *abfd,
20088 long symcount ATTRIBUTE_UNUSED,
20089 asymbol **syms ATTRIBUTE_UNUSED,
20090 long dynsymcount,
20091 asymbol **dynsyms,
20092 asymbol **ret)
20094 asection *relplt;
20095 asymbol *s;
20096 arelent *p;
20097 long count, i, n;
20098 size_t size;
20099 Elf_Internal_Shdr *hdr;
20100 char *names;
20101 asection *plt;
20102 bfd_vma offset;
20103 bfd_byte *data;
20105 *ret = NULL;
20107 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20108 return 0;
20110 if (dynsymcount <= 0)
20111 return 0;
20113 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20114 if (relplt == NULL)
20115 return 0;
20117 hdr = &elf_section_data (relplt)->this_hdr;
20118 if (hdr->sh_link != elf_dynsymtab (abfd)
20119 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20120 return 0;
20122 plt = bfd_get_section_by_name (abfd, ".plt");
20123 if (plt == NULL)
20124 return 0;
20126 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
20127 return -1;
20129 data = NULL;
20130 if (!bfd_get_full_section_contents (abfd, plt, &data))
20131 return -1;
20133 count = NUM_SHDR_ENTRIES (hdr);
20134 size = count * sizeof (asymbol);
20135 p = relplt->relocation;
20136 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20138 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20139 if (p->addend != 0)
20140 size += sizeof ("+0x") - 1 + 8;
20143 offset = elf32_arm_plt0_size (abfd, data, plt->size);
20144 if (offset == (bfd_vma) -1
20145 || (s = *ret = (asymbol *) bfd_malloc (size)) == NULL)
20147 free (data);
20148 return -1;
20151 names = (char *) (s + count);
20152 p = relplt->relocation;
20153 n = 0;
20154 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20156 size_t len;
20158 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset, plt->size);
20159 if (plt_size == (bfd_vma) -1)
20160 break;
20162 *s = **p->sym_ptr_ptr;
20163 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20164 we are defining a symbol, ensure one of them is set. */
20165 if ((s->flags & BSF_LOCAL) == 0)
20166 s->flags |= BSF_GLOBAL;
20167 s->flags |= BSF_SYNTHETIC;
20168 s->section = plt;
20169 s->value = offset;
20170 s->name = names;
20171 s->udata.p = NULL;
20172 len = strlen ((*p->sym_ptr_ptr)->name);
20173 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20174 names += len;
20175 if (p->addend != 0)
20177 char buf[30], *a;
20179 memcpy (names, "+0x", sizeof ("+0x") - 1);
20180 names += sizeof ("+0x") - 1;
20181 bfd_sprintf_vma (abfd, buf, p->addend);
20182 for (a = buf; *a == '0'; ++a)
20184 len = strlen (a);
20185 memcpy (names, a, len);
20186 names += len;
20188 memcpy (names, "@plt", sizeof ("@plt"));
20189 names += sizeof ("@plt");
20190 ++s, ++n;
20191 offset += plt_size;
20194 free (data);
20195 return n;
20198 static bool
20199 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20201 if (hdr->sh_flags & SHF_ARM_PURECODE)
20202 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20203 return true;
20206 static flagword
20207 elf32_arm_lookup_section_flags (char *flag_name)
20209 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20210 return SHF_ARM_PURECODE;
20212 return SEC_NO_FLAGS;
20215 static unsigned int
20216 elf32_arm_count_additional_relocs (asection *sec)
20218 struct _arm_elf_section_data *arm_data;
20219 arm_data = get_arm_elf_section_data (sec);
20221 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20224 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20225 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20226 FALSE otherwise. ISECTION is the best guess matching section from the
20227 input bfd IBFD, but it might be NULL. */
20229 static bool
20230 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20231 bfd *obfd ATTRIBUTE_UNUSED,
20232 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20233 Elf_Internal_Shdr *osection)
20235 switch (osection->sh_type)
20237 case SHT_ARM_EXIDX:
20239 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20240 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20241 unsigned i = 0;
20243 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20244 osection->sh_info = 0;
20246 /* The sh_link field must be set to the text section associated with
20247 this index section. Unfortunately the ARM EHABI does not specify
20248 exactly how to determine this association. Our caller does try
20249 to match up OSECTION with its corresponding input section however
20250 so that is a good first guess. */
20251 if (isection != NULL
20252 && osection->bfd_section != NULL
20253 && isection->bfd_section != NULL
20254 && isection->bfd_section->output_section != NULL
20255 && isection->bfd_section->output_section == osection->bfd_section
20256 && iheaders != NULL
20257 && isection->sh_link > 0
20258 && isection->sh_link < elf_numsections (ibfd)
20259 && iheaders[isection->sh_link]->bfd_section != NULL
20260 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20263 for (i = elf_numsections (obfd); i-- > 0;)
20264 if (oheaders[i]->bfd_section
20265 == iheaders[isection->sh_link]->bfd_section->output_section)
20266 break;
20269 if (i == 0)
20271 /* Failing that we have to find a matching section ourselves. If
20272 we had the output section name available we could compare that
20273 with input section names. Unfortunately we don't. So instead
20274 we use a simple heuristic and look for the nearest executable
20275 section before this one. */
20276 for (i = elf_numsections (obfd); i-- > 0;)
20277 if (oheaders[i] == osection)
20278 break;
20279 if (i == 0)
20280 break;
20282 while (i-- > 0)
20283 if (oheaders[i]->sh_type == SHT_PROGBITS
20284 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20285 == (SHF_ALLOC | SHF_EXECINSTR))
20286 break;
20289 if (i)
20291 osection->sh_link = i;
20292 /* If the text section was part of a group
20293 then the index section should be too. */
20294 if (oheaders[i]->sh_flags & SHF_GROUP)
20295 osection->sh_flags |= SHF_GROUP;
20296 return true;
20299 break;
20301 case SHT_ARM_PREEMPTMAP:
20302 osection->sh_flags = SHF_ALLOC;
20303 break;
20305 case SHT_ARM_ATTRIBUTES:
20306 case SHT_ARM_DEBUGOVERLAY:
20307 case SHT_ARM_OVERLAYSECTION:
20308 default:
20309 break;
20312 return false;
20315 /* Returns TRUE if NAME is an ARM mapping symbol.
20316 Traditionally the symbols $a, $d and $t have been used.
20317 The ARM ELF standard also defines $x (for A64 code). It also allows a
20318 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20319 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20320 not support them here. $t.x indicates the start of ThumbEE instructions. */
20322 static bool
20323 is_arm_mapping_symbol (const char * name)
20325 return name != NULL /* Paranoia. */
20326 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20327 the mapping symbols could have acquired a prefix.
20328 We do not support this here, since such symbols no
20329 longer conform to the ARM ELF ABI. */
20330 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20331 && (name[2] == 0 || name[2] == '.');
20332 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20333 any characters that follow the period are legal characters for the body
20334 of a symbol's name. For now we just assume that this is the case. */
20337 /* Make sure that mapping symbols in object files are not removed via the
20338 "strip --strip-unneeded" tool. These symbols are needed in order to
20339 correctly generate interworking veneers, and for byte swapping code
20340 regions. Once an object file has been linked, it is safe to remove the
20341 symbols as they will no longer be needed. */
20343 static void
20344 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20346 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20347 && sym->section != bfd_abs_section_ptr
20348 && is_arm_mapping_symbol (sym->name))
20349 sym->flags |= BSF_KEEP;
20352 #undef elf_backend_copy_special_section_fields
20353 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20355 #define ELF_ARCH bfd_arch_arm
20356 #define ELF_TARGET_ID ARM_ELF_DATA
20357 #define ELF_MACHINE_CODE EM_ARM
20358 #define ELF_MAXPAGESIZE 0x1000
20359 #define ELF_COMMONPAGESIZE 0x1000
20361 #define bfd_elf32_mkobject elf32_arm_mkobject
20363 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20364 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20365 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20366 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20367 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20368 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20369 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20370 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20371 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20372 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20373 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20374 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20376 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20377 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20378 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20379 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20380 #define elf_backend_check_relocs elf32_arm_check_relocs
20381 #define elf_backend_update_relocs elf32_arm_update_relocs
20382 #define elf_backend_relocate_section elf32_arm_relocate_section
20383 #define elf_backend_write_section elf32_arm_write_section
20384 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20385 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20386 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20387 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20388 #define elf_backend_late_size_sections elf32_arm_late_size_sections
20389 #define elf_backend_early_size_sections elf32_arm_early_size_sections
20390 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20391 #define elf_backend_init_file_header elf32_arm_init_file_header
20392 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20393 #define elf_backend_object_p elf32_arm_object_p
20394 #define elf_backend_fake_sections elf32_arm_fake_sections
20395 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20396 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20397 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20398 #define elf_backend_size_info elf32_arm_size_info
20399 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20400 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20401 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20402 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20403 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20404 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20405 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20406 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20408 #define elf_backend_can_refcount 1
20409 #define elf_backend_can_gc_sections 1
20410 #define elf_backend_plt_readonly 1
20411 #define elf_backend_want_got_plt 1
20412 #define elf_backend_want_plt_sym 0
20413 #define elf_backend_want_dynrelro 1
20414 #define elf_backend_may_use_rel_p 1
20415 #define elf_backend_may_use_rela_p 0
20416 #define elf_backend_default_use_rela_p 0
20417 #define elf_backend_dtrel_excludes_plt 1
20419 #define elf_backend_got_header_size 12
20420 #define elf_backend_extern_protected_data 0
20422 #undef elf_backend_obj_attrs_vendor
20423 #define elf_backend_obj_attrs_vendor "aeabi"
20424 #undef elf_backend_obj_attrs_section
20425 #define elf_backend_obj_attrs_section ".ARM.attributes"
20426 #undef elf_backend_obj_attrs_arg_type
20427 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20428 #undef elf_backend_obj_attrs_section_type
20429 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20430 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20431 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20433 #undef elf_backend_section_flags
20434 #define elf_backend_section_flags elf32_arm_section_flags
20435 #undef elf_backend_lookup_section_flags_hook
20436 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20438 #define elf_backend_linux_prpsinfo32_ugid16 true
20440 #include "elf32-target.h"
20442 /* Native Client targets. */
20444 #undef TARGET_LITTLE_SYM
20445 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20446 #undef TARGET_LITTLE_NAME
20447 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20448 #undef TARGET_BIG_SYM
20449 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20450 #undef TARGET_BIG_NAME
20451 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20453 /* Like elf32_arm_link_hash_table_create -- but overrides
20454 appropriately for NaCl. */
20456 static struct bfd_link_hash_table *
20457 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20459 struct bfd_link_hash_table *ret;
20461 ret = elf32_arm_link_hash_table_create (abfd);
20462 if (ret)
20464 struct elf32_arm_link_hash_table *htab
20465 = (struct elf32_arm_link_hash_table *) ret;
20467 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20468 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20470 return ret;
20473 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20474 really need to use elf32_arm_modify_segment_map. But we do it
20475 anyway just to reduce gratuitous differences with the stock ARM backend. */
20477 static bool
20478 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20480 return (elf32_arm_modify_segment_map (abfd, info)
20481 && nacl_modify_segment_map (abfd, info));
20484 static bool
20485 elf32_arm_nacl_final_write_processing (bfd *abfd)
20487 arm_final_write_processing (abfd);
20488 return nacl_final_write_processing (abfd);
20491 static bfd_vma
20492 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20493 const arelent *rel ATTRIBUTE_UNUSED)
20495 return plt->vma
20496 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20497 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20500 #undef elf32_bed
20501 #define elf32_bed elf32_arm_nacl_bed
20502 #undef bfd_elf32_bfd_link_hash_table_create
20503 #define bfd_elf32_bfd_link_hash_table_create \
20504 elf32_arm_nacl_link_hash_table_create
20505 #undef elf_backend_plt_alignment
20506 #define elf_backend_plt_alignment 4
20507 #undef elf_backend_modify_segment_map
20508 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20509 #undef elf_backend_modify_headers
20510 #define elf_backend_modify_headers nacl_modify_headers
20511 #undef elf_backend_final_write_processing
20512 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20513 #undef bfd_elf32_get_synthetic_symtab
20514 #undef elf_backend_plt_sym_val
20515 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20516 #undef elf_backend_copy_special_section_fields
20518 #undef ELF_MINPAGESIZE
20519 #undef ELF_COMMONPAGESIZE
20521 #undef ELF_TARGET_OS
20522 #define ELF_TARGET_OS is_nacl
20524 #include "elf32-target.h"
20526 /* Reset to defaults. */
20527 #undef elf_backend_plt_alignment
20528 #undef elf_backend_modify_segment_map
20529 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20530 #undef elf_backend_modify_headers
20531 #undef elf_backend_final_write_processing
20532 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20533 #undef ELF_MINPAGESIZE
20534 #undef ELF_COMMONPAGESIZE
20535 #define ELF_COMMONPAGESIZE 0x1000
20538 /* FDPIC Targets. */
20540 #undef TARGET_LITTLE_SYM
20541 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20542 #undef TARGET_LITTLE_NAME
20543 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20544 #undef TARGET_BIG_SYM
20545 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20546 #undef TARGET_BIG_NAME
20547 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20548 #undef elf_match_priority
20549 #define elf_match_priority 128
20550 #undef ELF_OSABI
20551 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20553 /* Like elf32_arm_link_hash_table_create -- but overrides
20554 appropriately for FDPIC. */
20556 static struct bfd_link_hash_table *
20557 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20559 struct bfd_link_hash_table *ret;
20561 ret = elf32_arm_link_hash_table_create (abfd);
20562 if (ret)
20564 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20566 htab->fdpic_p = 1;
20568 return ret;
20571 /* We need dynamic symbols for every section, since segments can
20572 relocate independently. */
20573 static bool
20574 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20575 struct bfd_link_info *info
20576 ATTRIBUTE_UNUSED,
20577 asection *p ATTRIBUTE_UNUSED)
20579 switch (elf_section_data (p)->this_hdr.sh_type)
20581 case SHT_PROGBITS:
20582 case SHT_NOBITS:
20583 /* If sh_type is yet undecided, assume it could be
20584 SHT_PROGBITS/SHT_NOBITS. */
20585 case SHT_NULL:
20586 return false;
20588 /* There shouldn't be section relative relocations
20589 against any other section. */
20590 default:
20591 return true;
20595 #undef elf32_bed
20596 #define elf32_bed elf32_arm_fdpic_bed
20598 #undef bfd_elf32_bfd_link_hash_table_create
20599 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20601 #undef elf_backend_omit_section_dynsym
20602 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20604 #undef ELF_TARGET_OS
20606 #include "elf32-target.h"
20608 #undef elf_match_priority
20609 #undef ELF_OSABI
20610 #undef elf_backend_omit_section_dynsym
20612 /* VxWorks Targets. */
20614 #undef TARGET_LITTLE_SYM
20615 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20616 #undef TARGET_LITTLE_NAME
20617 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20618 #undef TARGET_BIG_SYM
20619 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20620 #undef TARGET_BIG_NAME
20621 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20623 /* Like elf32_arm_link_hash_table_create -- but overrides
20624 appropriately for VxWorks. */
20626 static struct bfd_link_hash_table *
20627 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20629 struct bfd_link_hash_table *ret;
20631 ret = elf32_arm_link_hash_table_create (abfd);
20632 if (ret)
20634 struct elf32_arm_link_hash_table *htab
20635 = (struct elf32_arm_link_hash_table *) ret;
20636 htab->use_rel = 0;
20638 return ret;
20641 static bool
20642 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20644 arm_final_write_processing (abfd);
20645 return elf_vxworks_final_write_processing (abfd);
20648 #undef elf32_bed
20649 #define elf32_bed elf32_arm_vxworks_bed
20651 #undef bfd_elf32_bfd_link_hash_table_create
20652 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20653 #undef elf_backend_final_write_processing
20654 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20655 #undef elf_backend_emit_relocs
20656 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20658 #undef elf_backend_may_use_rel_p
20659 #define elf_backend_may_use_rel_p 0
20660 #undef elf_backend_may_use_rela_p
20661 #define elf_backend_may_use_rela_p 1
20662 #undef elf_backend_default_use_rela_p
20663 #define elf_backend_default_use_rela_p 1
20664 #undef elf_backend_want_plt_sym
20665 #define elf_backend_want_plt_sym 1
20666 #undef ELF_MAXPAGESIZE
20667 #define ELF_MAXPAGESIZE 0x1000
20668 #undef ELF_TARGET_OS
20669 #define ELF_TARGET_OS is_vxworks
20671 #include "elf32-target.h"
20674 /* Merge backend specific data from an object file to the output
20675 object file when linking. */
20677 static bool
20678 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20680 bfd *obfd = info->output_bfd;
20681 flagword out_flags;
20682 flagword in_flags;
20683 bool flags_compatible = true;
20684 asection *sec;
20686 /* Check if we have the same endianness. */
20687 if (! _bfd_generic_verify_endian_match (ibfd, info))
20688 return false;
20690 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20691 return true;
20693 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20694 return false;
20696 /* The input BFD must have had its flags initialised. */
20697 /* The following seems bogus to me -- The flags are initialized in
20698 the assembler but I don't think an elf_flags_init field is
20699 written into the object. */
20700 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20702 in_flags = elf_elfheader (ibfd)->e_flags;
20703 out_flags = elf_elfheader (obfd)->e_flags;
20705 /* In theory there is no reason why we couldn't handle this. However
20706 in practice it isn't even close to working and there is no real
20707 reason to want it. */
20708 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20709 && !(ibfd->flags & DYNAMIC)
20710 && (in_flags & EF_ARM_BE8))
20712 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20713 ibfd);
20714 return false;
20717 if (!elf_flags_init (obfd))
20719 /* If the input has no flags set, then do not set the output flags.
20720 This will allow future bfds to determine the desired output flags.
20721 If no input bfds have any flags set, then neither will the output bfd.
20723 Note - we used to restrict this test to when the input architecture
20724 variant was the default variant, but this does not allow for
20725 linker scripts which override the default. See PR 28910 for an
20726 example. */
20727 if (in_flags == 0)
20728 return true;
20730 elf_flags_init (obfd) = true;
20731 elf_elfheader (obfd)->e_flags = in_flags;
20733 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20734 && bfd_get_arch_info (obfd)->the_default)
20735 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20737 return true;
20740 /* Determine what should happen if the input ARM architecture
20741 does not match the output ARM architecture. */
20742 if (! bfd_arm_merge_machines (ibfd, obfd))
20743 return false;
20745 /* Identical flags must be compatible. */
20746 if (in_flags == out_flags)
20747 return true;
20749 /* Check to see if the input BFD actually contains any sections. If
20750 not, its flags may not have been initialised either, but it
20751 cannot actually cause any incompatiblity. Do not short-circuit
20752 dynamic objects; their section list may be emptied by
20753 elf_link_add_object_symbols.
20755 Also check to see if there are no code sections in the input.
20756 In this case there is no need to check for code specific flags.
20757 XXX - do we need to worry about floating-point format compatability
20758 in data sections ? */
20759 if (!(ibfd->flags & DYNAMIC))
20761 bool null_input_bfd = true;
20762 bool only_data_sections = true;
20764 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20766 /* Ignore synthetic glue sections. */
20767 if (strcmp (sec->name, ".glue_7")
20768 && strcmp (sec->name, ".glue_7t"))
20770 if ((bfd_section_flags (sec)
20771 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20772 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20773 only_data_sections = false;
20775 null_input_bfd = false;
20776 break;
20780 if (null_input_bfd || only_data_sections)
20781 return true;
20784 /* Complain about various flag mismatches. */
20785 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20786 EF_ARM_EABI_VERSION (out_flags)))
20788 _bfd_error_handler
20789 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20790 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20791 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20792 return false;
20795 /* Not sure what needs to be checked for EABI versions >= 1. */
20796 /* VxWorks libraries do not use these flags. */
20797 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20798 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20799 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20801 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20803 _bfd_error_handler
20804 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20805 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20806 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20807 flags_compatible = false;
20810 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20812 if (in_flags & EF_ARM_APCS_FLOAT)
20813 _bfd_error_handler
20814 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20815 ibfd, obfd);
20816 else
20817 _bfd_error_handler
20818 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20819 ibfd, obfd);
20821 flags_compatible = false;
20824 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20826 if (in_flags & EF_ARM_VFP_FLOAT)
20827 _bfd_error_handler
20828 (_("error: %pB uses %s instructions, whereas %pB does not"),
20829 ibfd, "VFP", obfd);
20830 else
20831 _bfd_error_handler
20832 (_("error: %pB uses %s instructions, whereas %pB does not"),
20833 ibfd, "FPA", obfd);
20835 flags_compatible = false;
20838 #ifdef EF_ARM_SOFT_FLOAT
20839 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20841 /* We can allow interworking between code that is VFP format
20842 layout, and uses either soft float or integer regs for
20843 passing floating point arguments and results. We already
20844 know that the APCS_FLOAT flags match; similarly for VFP
20845 flags. */
20846 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20847 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20849 if (in_flags & EF_ARM_SOFT_FLOAT)
20850 _bfd_error_handler
20851 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20852 ibfd, obfd);
20853 else
20854 _bfd_error_handler
20855 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20856 ibfd, obfd);
20858 flags_compatible = false;
20861 #endif
20863 /* Interworking mismatch is only a warning. */
20864 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20866 if (in_flags & EF_ARM_INTERWORK)
20868 _bfd_error_handler
20869 (_("warning: %pB supports interworking, whereas %pB does not"),
20870 ibfd, obfd);
20872 else
20874 _bfd_error_handler
20875 (_("warning: %pB does not support interworking, whereas %pB does"),
20876 ibfd, obfd);
20881 return flags_compatible;