Automatic date update in version.in
[binutils-gdb.git] / bfd / elf32-arm.c
blob2cb8768868b30ce1a228f8bd73e4e416b9de4b9b
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include <limits.h>
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bool elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
78 static reloc_howto_type elf32_arm_howto_table_1[] =
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 false, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 false), /* pcrel_offset */
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 true, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 false, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 true, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 true, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 false, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 false, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 false, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 false, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 false, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 true, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 true, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 false, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 false, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 false, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 true, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 true, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 false, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 false, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 false, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 false, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 false, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 false, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 false, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 false, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 true, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 false, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 true, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 true, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 true, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 true, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 false, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 true, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 true, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 true, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 false, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 false, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 false, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 false, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 false, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 false, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 true, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 false, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 false, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 true, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 true, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 false, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 false, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 true, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 true, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 true, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 true, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 true, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 true, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 false, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 true, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 true, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 true, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 true, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 true, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 true, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 true, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 true, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 true, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 true, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 true, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 true, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 true, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 true, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 true, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 true, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 true, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 true, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 true, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 true, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 true, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 true, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 true, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 true, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 true, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 true, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 true, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 false, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 false, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 false, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 false, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 false, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 false, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 false, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 false, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 false, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 false, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 false, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 false, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 true, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 false, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 false, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 false, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 false), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 false, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 false), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 true, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 true, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 false, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 false, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 false, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 false, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 false, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 false, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 false, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 false, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 false, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 16, /* bitsize. */
1699 false, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 16, /* bitsize. */
1712 false, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 16, /* bitsize. */
1725 false, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 16, /* bitsize. */
1738 false, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1751 16, /* bitsize. */
1752 true, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1764 12, /* bitsize. */
1765 true, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1777 18, /* bitsize. */
1778 true, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1795 32, /* bitsize */
1796 false, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1808 32, /* bitsize */
1809 false, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1821 32, /* bitsize */
1822 false, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1834 32, /* bitsize */
1835 false, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1847 64, /* bitsize */
1848 false, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1860 32, /* bitsize */
1861 false, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1873 32, /* bitsize */
1874 false, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1886 32, /* bitsize */
1887 false, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 false, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 false), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 false), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1932 0, /* bitsize */
1933 false, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 false), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1946 0, /* bitsize */
1947 false, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 false) /* pcrel_offset */
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1972 return NULL;
1975 static bool
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1979 unsigned int r_type;
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return false;
1990 return true;
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2108 unsigned int i;
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2114 return NULL;
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2121 unsigned int i;
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2138 return NULL;
2141 /* Support for core dump NOTE sections. */
2143 static bool
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2146 int offset;
2147 size_t size;
2149 switch (note->descsz)
2151 default:
2152 return false;
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2165 break;
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2173 static bool
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2176 switch (note->descsz)
2178 default:
2179 return false;
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2201 return true;
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2208 switch (note_type)
2210 default:
2211 return NULL;
2213 case NT_PRPSINFO:
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2239 case NT_PRSTATUS:
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline [] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bool elf32_arm_use_long_plt_entry = false;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2503 linker first. */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2506 /* First bundle: */
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2516 /* Third bundle: */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2520 /* .Lplt_tail: */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2539 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2548 enum stub_insn_type
2550 THUMB16_TYPE = 1,
2551 THUMB32_TYPE,
2552 ARM_TYPE,
2553 DATA_TYPE
2556 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2558 is inserted in arm_build_one_stub(). */
2559 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2568 typedef struct
2570 bfd_vma data;
2571 enum stub_insn_type type;
2572 unsigned int r_type;
2573 int reloc_addend;
2574 } insn_sequence;
2576 /* See note [Thumb nop sequence] when adding a veneer. */
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579 to reach the stub if necessary. */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2582 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2583 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2587 available. */
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2590 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2591 ARM_INSN (0xe12fff1c), /* bx ip */
2592 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2598 THUMB16_INSN (0xb401), /* push {r0} */
2599 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2600 THUMB16_INSN (0x4684), /* mov ip, r0 */
2601 THUMB16_INSN (0xbc01), /* pop {r0} */
2602 THUMB16_INSN (0x4760), /* bx ip */
2603 THUMB16_INSN (0xbf00), /* nop */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2610 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2611 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615 M-profile architectures. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2618 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2620 THUMB16_INSN (0x4760), /* bx ip */
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2624 allowed. */
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2627 THUMB16_INSN (0x4778), /* bx pc */
2628 THUMB16_INSN (0xe7fd), /* b .-2 */
2629 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2630 ARM_INSN (0xe12fff1c), /* bx ip */
2631 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2635 available. */
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2638 THUMB16_INSN (0x4778), /* bx pc */
2639 THUMB16_INSN (0xe7fd), /* b .-2 */
2640 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2641 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645 one, when the destination is close enough. */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2648 THUMB16_INSN (0x4778), /* bx pc */
2649 THUMB16_INSN (0xe7fd), /* b .-2 */
2650 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2653 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2654 blx to reach the stub if necessary. */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2658 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2659 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2662 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2663 blx to reach the stub if necessary. We can not add into pc;
2664 it is not guaranteed to mode switch (different in ARMv6 and
2665 ARMv7). */
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2668 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2669 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2670 ARM_INSN (0xe12fff1c), /* bx ip */
2671 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2674 /* V4T ARM -> ARM long branch stub, PIC. */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2677 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2678 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2679 ARM_INSN (0xe12fff1c), /* bx ip */
2680 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2683 /* V4T Thumb -> ARM long branch stub, PIC. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0xe7fd), /* b .-2 */
2688 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2689 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2690 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2694 architectures. */
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2697 THUMB16_INSN (0xb401), /* push {r0} */
2698 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2699 THUMB16_INSN (0x46fc), /* mov ip, pc */
2700 THUMB16_INSN (0x4484), /* add ip, r0 */
2701 THUMB16_INSN (0xbc01), /* pop {r0} */
2702 THUMB16_INSN (0x4760), /* bx ip */
2703 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2707 allowed. */
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2710 THUMB16_INSN (0x4778), /* bx pc */
2711 THUMB16_INSN (0xe7fd), /* b .-2 */
2712 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2713 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2714 ARM_INSN (0xe12fff1c), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2718 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2719 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2722 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2723 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2724 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2727 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2728 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2731 THUMB16_INSN (0x4778), /* bx pc */
2732 THUMB16_INSN (0xe7fd), /* b .-2 */
2733 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2734 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2735 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2738 /* NaCl ARM -> ARM long branch stub. */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2741 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2742 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2743 ARM_INSN (0xe12fff1c), /* bx ip */
2744 ARM_INSN (0xe320f000), /* nop */
2745 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2746 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2747 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2748 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2751 /* NaCl ARM -> ARM long branch stub, PIC. */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2754 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2755 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2759 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2761 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2764 /* Stub used for transition to secure state (aka SG veneer). */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2767 THUMB32_INSN (0xe97fe97f), /* sg. */
2768 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2772 /* Cortex-A8 erratum-workaround stubs. */
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775 can't use a conditional branch to reach this stub). */
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2779 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2781 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2784 /* Stub used for b.w and bl.w instructions. */
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2788 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2793 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2796 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2797 instruction (which switches to ARM mode) to point to this stub. Jump to the
2798 real destination using an ARM-mode branch. */
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2802 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2805 /* For each section group there can be a specially created linker section
2806 to hold the stubs for that group. The name of the stub section is based
2807 upon the name of another section within that group with the suffix below
2808 applied.
2810 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811 create what appeared to be a linker stub section when it actually
2812 contained user code/data. For example, consider this fragment:
2814 const char * stubborn_problems[] = { "np" };
2816 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2817 section called:
2819 .data.rel.local.stubborn_problems
2821 This then causes problems in arm32_arm_build_stubs() as it triggers:
2823 // Ignore non-stub sections.
2824 if (!strstr (stub_sec->name, STUB_SUFFIX))
2825 continue;
2827 And so the section would be ignored instead of being processed. Hence
2828 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2829 C identifier. */
2830 #define STUB_SUFFIX ".__stub"
2832 /* One entry per long/short branch stub defined above. */
2833 #define DEF_STUBS \
2834 DEF_STUB (long_branch_any_any) \
2835 DEF_STUB (long_branch_v4t_arm_thumb) \
2836 DEF_STUB (long_branch_thumb_only) \
2837 DEF_STUB (long_branch_v4t_thumb_thumb) \
2838 DEF_STUB (long_branch_v4t_thumb_arm) \
2839 DEF_STUB (short_branch_v4t_thumb_arm) \
2840 DEF_STUB (long_branch_any_arm_pic) \
2841 DEF_STUB (long_branch_any_thumb_pic) \
2842 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2843 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2844 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2845 DEF_STUB (long_branch_thumb_only_pic) \
2846 DEF_STUB (long_branch_any_tls_pic) \
2847 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2848 DEF_STUB (long_branch_arm_nacl) \
2849 DEF_STUB (long_branch_arm_nacl_pic) \
2850 DEF_STUB (cmse_branch_thumb_only) \
2851 DEF_STUB (a8_veneer_b_cond) \
2852 DEF_STUB (a8_veneer_b) \
2853 DEF_STUB (a8_veneer_bl) \
2854 DEF_STUB (a8_veneer_blx) \
2855 DEF_STUB (long_branch_thumb2_only) \
2856 DEF_STUB (long_branch_thumb2_only_pure)
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2861 arm_stub_none,
2862 DEF_STUBS
2863 max_stub_type
2865 #undef DEF_STUB
2867 /* Note the first a8_veneer type. */
2868 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2870 typedef struct
2872 const insn_sequence* template_sequence;
2873 int template_size;
2874 } stub_def;
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions[] =
2879 {NULL, 0},
2880 DEF_STUBS
2883 struct elf32_arm_stub_hash_entry
2885 /* Base hash table entry structure. */
2886 struct bfd_hash_entry root;
2888 /* The stub section. */
2889 asection *stub_sec;
2891 /* Offset within stub_sec of the beginning of this stub. */
2892 bfd_vma stub_offset;
2894 /* Given the symbol's value and its section we can determine its final
2895 value when building the stubs (so the stub knows where to jump). */
2896 bfd_vma target_value;
2897 asection *target_section;
2899 /* Same as above but for the source of the branch to the stub. Used for
2900 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2901 such, source section does not need to be recorded since Cortex-A8 erratum
2902 workaround stubs are only generated when both source and target are in the
2903 same section. */
2904 bfd_vma source_value;
2906 /* The instruction which caused this stub to be generated (only valid for
2907 Cortex-A8 erratum workaround stubs at present). */
2908 unsigned long orig_insn;
2910 /* The stub type. */
2911 enum elf32_arm_stub_type stub_type;
2912 /* Its encoding size in bytes. */
2913 int stub_size;
2914 /* Its template. */
2915 const insn_sequence *stub_template;
2916 /* The size of the template (number of entries). */
2917 int stub_template_size;
2919 /* The symbol table entry, if any, that this was derived from. */
2920 struct elf32_arm_link_hash_entry *h;
2922 /* Type of branch. */
2923 enum arm_st_branch_type branch_type;
2925 /* Where this stub is being called from, or, in the case of combined
2926 stub sections, the first input section in the group. */
2927 asection *id_sec;
2929 /* The name for the local symbol at the start of this stub. The
2930 stub name in the hash table has to be unique; this does not, so
2931 it can be friendlier. */
2932 char *output_name;
2935 /* Used to build a map of a section. This is required for mixed-endian
2936 code/data. */
2938 typedef struct elf32_elf_section_map
2940 bfd_vma vma;
2941 char type;
2943 elf32_arm_section_map;
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2947 typedef enum
2949 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2950 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2951 VFP11_ERRATUM_ARM_VENEER,
2952 VFP11_ERRATUM_THUMB_VENEER
2954 elf32_vfp11_erratum_type;
2956 typedef struct elf32_vfp11_erratum_list
2958 struct elf32_vfp11_erratum_list *next;
2959 bfd_vma vma;
2960 union
2962 struct
2964 struct elf32_vfp11_erratum_list *veneer;
2965 unsigned int vfp_insn;
2966 } b;
2967 struct
2969 struct elf32_vfp11_erratum_list *branch;
2970 unsigned int id;
2971 } v;
2972 } u;
2973 elf32_vfp11_erratum_type type;
2975 elf32_vfp11_erratum_list;
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2978 veneer. */
2979 typedef enum
2981 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2982 STM32L4XX_ERRATUM_VENEER
2984 elf32_stm32l4xx_erratum_type;
2986 typedef struct elf32_stm32l4xx_erratum_list
2988 struct elf32_stm32l4xx_erratum_list *next;
2989 bfd_vma vma;
2990 union
2992 struct
2994 struct elf32_stm32l4xx_erratum_list *veneer;
2995 unsigned int insn;
2996 } b;
2997 struct
2999 struct elf32_stm32l4xx_erratum_list *branch;
3000 unsigned int id;
3001 } v;
3002 } u;
3003 elf32_stm32l4xx_erratum_type type;
3005 elf32_stm32l4xx_erratum_list;
3007 typedef enum
3009 DELETE_EXIDX_ENTRY,
3010 INSERT_EXIDX_CANTUNWIND_AT_END
3012 arm_unwind_edit_type;
3014 /* A (sorted) list of edits to apply to an unwind table. */
3015 typedef struct arm_unwind_table_edit
3017 arm_unwind_edit_type type;
3018 /* Note: we sometimes want to insert an unwind entry corresponding to a
3019 section different from the one we're currently writing out, so record the
3020 (text) section this edit relates to here. */
3021 asection *linked_section;
3022 unsigned int index;
3023 struct arm_unwind_table_edit *next;
3025 arm_unwind_table_edit;
3027 typedef struct _arm_elf_section_data
3029 /* Information about mapping symbols. */
3030 struct bfd_elf_section_data elf;
3031 unsigned int mapcount;
3032 unsigned int mapsize;
3033 elf32_arm_section_map *map;
3034 /* Information about CPU errata. */
3035 unsigned int erratumcount;
3036 elf32_vfp11_erratum_list *erratumlist;
3037 unsigned int stm32l4xx_erratumcount;
3038 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3039 unsigned int additional_reloc_count;
3040 /* Information about unwind tables. */
3041 union
3043 /* Unwind info attached to a text section. */
3044 struct
3046 asection *arm_exidx_sec;
3047 } text;
3049 /* Unwind info attached to an .ARM.exidx section. */
3050 struct
3052 arm_unwind_table_edit *unwind_edit_list;
3053 arm_unwind_table_edit *unwind_edit_tail;
3054 } exidx;
3055 } u;
3057 _arm_elf_section_data;
3059 #define elf32_arm_section_data(sec) \
3060 ((_arm_elf_section_data *) elf_section_data (sec))
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064 so may be created multiple times: we use an array of these entries whilst
3065 relaxing which we can refresh easily, then create stubs for each potentially
3066 erratum-triggering instruction once we've settled on a solution. */
3068 struct a8_erratum_fix
3070 bfd *input_bfd;
3071 asection *section;
3072 bfd_vma offset;
3073 bfd_vma target_offset;
3074 unsigned long orig_insn;
3075 char *stub_name;
3076 enum elf32_arm_stub_type stub_type;
3077 enum arm_st_branch_type branch_type;
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3081 erratum. */
3083 struct a8_erratum_reloc
3085 bfd_vma from;
3086 bfd_vma destination;
3087 struct elf32_arm_link_hash_entry *hash;
3088 const char *sym_name;
3089 unsigned int r_type;
3090 enum arm_st_branch_type branch_type;
3091 bool non_a8_stub;
3094 /* The size of the thread control block. */
3095 #define TCB_SIZE 8
3097 /* ARM-specific information about a PLT entry, over and above the usual
3098 gotplt_union. */
3099 struct arm_plt_info
3101 /* We reference count Thumb references to a PLT entry separately,
3102 so that we can emit the Thumb trampoline only if needed. */
3103 bfd_signed_vma thumb_refcount;
3105 /* Some references from Thumb code may be eliminated by BL->BLX
3106 conversion, so record them separately. */
3107 bfd_signed_vma maybe_thumb_refcount;
3109 /* How many of the recorded PLT accesses were from non-call relocations.
3110 This information is useful when deciding whether anything takes the
3111 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3112 non-call references to the function should resolve directly to the
3113 real runtime target. */
3114 unsigned int noncall_refcount;
3116 /* Since PLT entries have variable size if the Thumb prologue is
3117 used, we need to record the index into .got.plt instead of
3118 recomputing it from the PLT offset. */
3119 bfd_signed_vma got_offset;
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3123 struct arm_local_iplt_info
3125 /* The information that is usually found in the generic ELF part of
3126 the hash table entry. */
3127 union gotplt_union root;
3129 /* The information that is usually found in the ARM-specific part of
3130 the hash table entry. */
3131 struct arm_plt_info arm;
3133 /* A list of all potential dynamic relocations against this symbol. */
3134 struct elf_dyn_relocs *dyn_relocs;
3137 /* Structure to handle FDPIC support for local functions. */
3138 struct fdpic_local
3140 unsigned int funcdesc_cnt;
3141 unsigned int gotofffuncdesc_cnt;
3142 int funcdesc_offset;
3145 struct elf_arm_obj_tdata
3147 struct elf_obj_tdata root;
3149 /* Zero to warn when linking objects with incompatible enum sizes. */
3150 int no_enum_size_warning;
3152 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3153 int no_wchar_size_warning;
3155 /* The number of entries in each of the arrays in this strcuture.
3156 Used to avoid buffer overruns. */
3157 bfd_size_type num_entries;
3159 /* tls_type for each local got entry. */
3160 char *local_got_tls_type;
3162 /* GOTPLT entries for TLS descriptors. */
3163 bfd_vma *local_tlsdesc_gotent;
3165 /* Information for local symbols that need entries in .iplt. */
3166 struct arm_local_iplt_info **local_iplt;
3168 /* Maintains FDPIC counters and funcdesc info. */
3169 struct fdpic_local *local_fdpic_cnts;
3172 #define elf_arm_tdata(bfd) \
3173 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3175 #define elf32_arm_num_entries(bfd) \
3176 (elf_arm_tdata (bfd)->num_entries)
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3195 static bool
3196 elf32_arm_mkobject (bfd *abfd)
3198 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 ARM_ELF_DATA);
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global {
3206 unsigned int gotofffuncdesc_cnt;
3207 unsigned int gotfuncdesc_cnt;
3208 unsigned int funcdesc_cnt;
3209 int funcdesc_offset;
3210 int gotfuncdesc_offset;
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3216 struct elf_link_hash_entry root;
3218 /* ARM-specific PLT information. */
3219 struct arm_plt_info plt;
3221 #define GOT_UNKNOWN 0
3222 #define GOT_NORMAL 1
3223 #define GOT_TLS_GD 2
3224 #define GOT_TLS_IE 4
3225 #define GOT_TLS_GDESC 8
3226 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3227 unsigned int tls_type : 8;
3229 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3230 unsigned int is_iplt : 1;
3232 unsigned int unused : 23;
3234 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3235 starting at the end of the jump table. */
3236 bfd_vma tlsdesc_got;
3238 /* The symbol marking the real symbol location for exported thumb
3239 symbols with Arm stubs. */
3240 struct elf_link_hash_entry *export_glue;
3242 /* A pointer to the most recently used stub hash entry against this
3243 symbol. */
3244 struct elf32_arm_stub_hash_entry *stub_cache;
3246 /* Counter for FDPIC relocations against this symbol. */
3247 struct fdpic_global fdpic_cnts;
3250 /* Traverse an arm ELF linker hash table. */
3251 #define elf32_arm_link_hash_traverse(table, func, info) \
3252 (elf_link_hash_traverse \
3253 (&(table)->root, \
3254 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3255 (info)))
3257 /* Get the ARM elf linker hash table from a link_info structure. */
3258 #define elf32_arm_hash_table(p) \
3259 ((is_elf_hash_table ((p)->hash) \
3260 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3261 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3263 #define arm_stub_hash_lookup(table, string, create, copy) \
3264 ((struct elf32_arm_stub_hash_entry *) \
3265 bfd_hash_lookup ((table), (string), (create), (copy)))
3267 /* Array to keep track of which stub sections have been created, and
3268 information on stub grouping. */
3269 struct map_stub
3271 /* This is the section to which stubs in the group will be
3272 attached. */
3273 asection *link_sec;
3274 /* The stub section. */
3275 asection *stub_sec;
3278 #define elf32_arm_compute_jump_table_size(htab) \
3279 ((htab)->next_tls_desc_index * 4)
3281 /* ARM ELF linker hash table. */
3282 struct elf32_arm_link_hash_table
3284 /* The main hash table. */
3285 struct elf_link_hash_table root;
3287 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3288 bfd_size_type thumb_glue_size;
3290 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3291 bfd_size_type arm_glue_size;
3293 /* The size in bytes of section containing the ARMv4 BX veneers. */
3294 bfd_size_type bx_glue_size;
3296 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3297 veneer has been populated. */
3298 bfd_vma bx_glue_offset[15];
3300 /* The size in bytes of the section containing glue for VFP11 erratum
3301 veneers. */
3302 bfd_size_type vfp11_erratum_glue_size;
3304 /* The size in bytes of the section containing glue for STM32L4XX erratum
3305 veneers. */
3306 bfd_size_type stm32l4xx_erratum_glue_size;
3308 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3309 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3310 elf32_arm_write_section(). */
3311 struct a8_erratum_fix *a8_erratum_fixes;
3312 unsigned int num_a8_erratum_fixes;
3314 /* An arbitrary input BFD chosen to hold the glue sections. */
3315 bfd * bfd_of_glue_owner;
3317 /* Nonzero to output a BE8 image. */
3318 int byteswap_code;
3320 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3321 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3322 int target1_is_rel;
3324 /* The relocation to use for R_ARM_TARGET2 relocations. */
3325 int target2_reloc;
3327 /* 0 = Ignore R_ARM_V4BX.
3328 1 = Convert BX to MOV PC.
3329 2 = Generate v4 interworing stubs. */
3330 int fix_v4bx;
3332 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3333 int fix_cortex_a8;
3335 /* Whether we should fix the ARM1176 BLX immediate issue. */
3336 int fix_arm1176;
3338 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3339 int use_blx;
3341 /* What sort of code sequences we should look for which may trigger the
3342 VFP11 denorm erratum. */
3343 bfd_arm_vfp11_fix vfp11_fix;
3345 /* Global counter for the number of fixes we have emitted. */
3346 int num_vfp11_fixes;
3348 /* What sort of code sequences we should look for which may trigger the
3349 STM32L4XX erratum. */
3350 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3352 /* Global counter for the number of fixes we have emitted. */
3353 int num_stm32l4xx_fixes;
3355 /* Nonzero to force PIC branch veneers. */
3356 int pic_veneer;
3358 /* The number of bytes in the initial entry in the PLT. */
3359 bfd_size_type plt_header_size;
3361 /* The number of bytes in the subsequent PLT etries. */
3362 bfd_size_type plt_entry_size;
3364 /* True if the target uses REL relocations. */
3365 bool use_rel;
3367 /* Nonzero if import library must be a secure gateway import library
3368 as per ARMv8-M Security Extensions. */
3369 int cmse_implib;
3371 /* The import library whose symbols' address must remain stable in
3372 the import library generated. */
3373 bfd *in_implib_bfd;
3375 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3376 bfd_vma next_tls_desc_index;
3378 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3379 bfd_vma num_tls_desc;
3381 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3382 asection *srelplt2;
3384 /* Offset in .plt section of tls_arm_trampoline. */
3385 bfd_vma tls_trampoline;
3387 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3388 union
3390 bfd_signed_vma refcount;
3391 bfd_vma offset;
3392 } tls_ldm_got;
3394 /* For convenience in allocate_dynrelocs. */
3395 bfd * obfd;
3397 /* The amount of space used by the reserved portion of the sgotplt
3398 section, plus whatever space is used by the jump slots. */
3399 bfd_vma sgotplt_jump_table_size;
3401 /* The stub hash table. */
3402 struct bfd_hash_table stub_hash_table;
3404 /* Linker stub bfd. */
3405 bfd *stub_bfd;
3407 /* Linker call-backs. */
3408 asection * (*add_stub_section) (const char *, asection *, asection *,
3409 unsigned int);
3410 void (*layout_sections_again) (void);
3412 /* Array to keep track of which stub sections have been created, and
3413 information on stub grouping. */
3414 struct map_stub *stub_group;
3416 /* Input stub section holding secure gateway veneers. */
3417 asection *cmse_stub_sec;
3419 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3420 start to be allocated. */
3421 bfd_vma new_cmse_stub_offset;
3423 /* Number of elements in stub_group. */
3424 unsigned int top_id;
3426 /* Assorted information used by elf32_arm_size_stubs. */
3427 unsigned int bfd_count;
3428 unsigned int top_index;
3429 asection **input_list;
3431 /* True if the target system uses FDPIC. */
3432 int fdpic_p;
3434 /* Fixup section. Used for FDPIC. */
3435 asection *srofixup;
3438 /* Add an FDPIC read-only fixup. */
3439 static void
3440 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3442 bfd_vma fixup_offset;
3444 fixup_offset = srofixup->reloc_count++ * 4;
3445 BFD_ASSERT (fixup_offset < srofixup->size);
3446 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3449 static inline int
3450 ctz (unsigned int mask)
3452 #if GCC_VERSION >= 3004
3453 return __builtin_ctz (mask);
3454 #else
3455 unsigned int i;
3457 for (i = 0; i < 8 * sizeof (mask); i++)
3459 if (mask & 0x1)
3460 break;
3461 mask = (mask >> 1);
3463 return i;
3464 #endif
3467 static inline int
3468 elf32_arm_popcount (unsigned int mask)
3470 #if GCC_VERSION >= 3004
3471 return __builtin_popcount (mask);
3472 #else
3473 unsigned int i;
3474 int sum = 0;
3476 for (i = 0; i < 8 * sizeof (mask); i++)
3478 if (mask & 0x1)
3479 sum++;
3480 mask = (mask >> 1);
3482 return sum;
3483 #endif
3486 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3487 asection *sreloc, Elf_Internal_Rela *rel);
3489 static void
3490 arm_elf_fill_funcdesc (bfd *output_bfd,
3491 struct bfd_link_info *info,
3492 int *funcdesc_offset,
3493 int dynindx,
3494 int offset,
3495 bfd_vma addr,
3496 bfd_vma dynreloc_value,
3497 bfd_vma seg)
3499 if ((*funcdesc_offset & 1) == 0)
3501 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3502 asection *sgot = globals->root.sgot;
3504 if (bfd_link_pic (info))
3506 asection *srelgot = globals->root.srelgot;
3507 Elf_Internal_Rela outrel;
3509 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3510 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3511 outrel.r_addend = 0;
3513 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3514 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3515 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3517 else
3519 struct elf_link_hash_entry *hgot = globals->root.hgot;
3520 bfd_vma got_value = hgot->root.u.def.value
3521 + hgot->root.u.def.section->output_section->vma
3522 + hgot->root.u.def.section->output_offset;
3524 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3525 sgot->output_section->vma + sgot->output_offset
3526 + offset);
3527 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3528 sgot->output_section->vma + sgot->output_offset
3529 + offset + 4);
3530 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3531 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3533 *funcdesc_offset |= 1;
3537 /* Create an entry in an ARM ELF linker hash table. */
3539 static struct bfd_hash_entry *
3540 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3541 struct bfd_hash_table * table,
3542 const char * string)
3544 struct elf32_arm_link_hash_entry * ret =
3545 (struct elf32_arm_link_hash_entry *) entry;
3547 /* Allocate the structure if it has not already been allocated by a
3548 subclass. */
3549 if (ret == NULL)
3550 ret = (struct elf32_arm_link_hash_entry *)
3551 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3552 if (ret == NULL)
3553 return (struct bfd_hash_entry *) ret;
3555 /* Call the allocation method of the superclass. */
3556 ret = ((struct elf32_arm_link_hash_entry *)
3557 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3558 table, string));
3559 if (ret != NULL)
3561 ret->tls_type = GOT_UNKNOWN;
3562 ret->tlsdesc_got = (bfd_vma) -1;
3563 ret->plt.thumb_refcount = 0;
3564 ret->plt.maybe_thumb_refcount = 0;
3565 ret->plt.noncall_refcount = 0;
3566 ret->plt.got_offset = -1;
3567 ret->is_iplt = false;
3568 ret->export_glue = NULL;
3570 ret->stub_cache = NULL;
3572 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3573 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3574 ret->fdpic_cnts.funcdesc_cnt = 0;
3575 ret->fdpic_cnts.funcdesc_offset = -1;
3576 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3579 return (struct bfd_hash_entry *) ret;
3582 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3583 symbols. */
3585 static bool
3586 elf32_arm_allocate_local_sym_info (bfd *abfd)
3588 if (elf_local_got_refcounts (abfd) == NULL)
3590 bfd_size_type num_syms;
3592 elf32_arm_num_entries (abfd) = 0;
3594 /* Whilst it might be tempting to allocate a single block of memory and
3595 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3596 structure, this interferes with the work of memory checkers looking
3597 for buffer overruns. So allocate each array individually. */
3599 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3601 elf_local_got_refcounts (abfd) = bfd_zalloc
3602 (abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3604 if (elf_local_got_refcounts (abfd) == NULL)
3605 return false;
3607 elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3608 (abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3610 if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3611 return false;
3613 elf32_arm_local_iplt (abfd) = bfd_zalloc
3614 (abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3616 if (elf32_arm_local_iplt (abfd) == NULL)
3617 return false;
3619 elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3620 (abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3622 if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3623 return false;
3625 elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3626 (abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3628 if (elf32_arm_local_got_tls_type (abfd) == NULL)
3629 return false;
3631 elf32_arm_num_entries (abfd) = num_syms;
3633 #if GCC_VERSION >= 3000
3634 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3635 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3636 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3637 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3638 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3639 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3640 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3641 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3642 #endif
3644 return true;
3647 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3648 to input bfd ABFD. Create the information if it doesn't already exist.
3649 Return null if an allocation fails. */
3651 static struct arm_local_iplt_info *
3652 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3654 struct arm_local_iplt_info **ptr;
3656 if (!elf32_arm_allocate_local_sym_info (abfd))
3657 return NULL;
3659 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3660 BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3661 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3662 if (*ptr == NULL)
3663 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3664 return *ptr;
3667 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3668 in ABFD's symbol table. If the symbol is global, H points to its
3669 hash table entry, otherwise H is null.
3671 Return true if the symbol does have PLT information. When returning
3672 true, point *ROOT_PLT at the target-independent reference count/offset
3673 union and *ARM_PLT at the ARM-specific information. */
3675 static bool
3676 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3677 struct elf32_arm_link_hash_entry *h,
3678 unsigned long r_symndx, union gotplt_union **root_plt,
3679 struct arm_plt_info **arm_plt)
3681 struct arm_local_iplt_info *local_iplt;
3683 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3684 return false;
3686 if (h != NULL)
3688 *root_plt = &h->root.plt;
3689 *arm_plt = &h->plt;
3690 return true;
3693 if (elf32_arm_local_iplt (abfd) == NULL)
3694 return false;
3696 if (r_symndx >= elf32_arm_num_entries (abfd))
3697 return false;
3699 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3700 if (local_iplt == NULL)
3701 return false;
3703 *root_plt = &local_iplt->root;
3704 *arm_plt = &local_iplt->arm;
3705 return true;
3708 static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3710 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3711 before it. */
3713 static bool
3714 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3715 struct arm_plt_info *arm_plt)
3717 struct elf32_arm_link_hash_table *htab;
3719 htab = elf32_arm_hash_table (info);
3721 return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3722 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3725 /* Return a pointer to the head of the dynamic reloc list that should
3726 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3727 ABFD's symbol table. Return null if an error occurs. */
3729 static struct elf_dyn_relocs **
3730 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3731 Elf_Internal_Sym *isym)
3733 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3735 struct arm_local_iplt_info *local_iplt;
3737 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3738 if (local_iplt == NULL)
3739 return NULL;
3740 return &local_iplt->dyn_relocs;
3742 else
3744 /* Track dynamic relocs needed for local syms too.
3745 We really need local syms available to do this
3746 easily. Oh well. */
3747 asection *s;
3748 void *vpp;
3750 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3751 if (s == NULL)
3752 return NULL;
3754 vpp = &elf_section_data (s)->local_dynrel;
3755 return (struct elf_dyn_relocs **) vpp;
3759 /* Initialize an entry in the stub hash table. */
3761 static struct bfd_hash_entry *
3762 stub_hash_newfunc (struct bfd_hash_entry *entry,
3763 struct bfd_hash_table *table,
3764 const char *string)
3766 /* Allocate the structure if it has not already been allocated by a
3767 subclass. */
3768 if (entry == NULL)
3770 entry = (struct bfd_hash_entry *)
3771 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3772 if (entry == NULL)
3773 return entry;
3776 /* Call the allocation method of the superclass. */
3777 entry = bfd_hash_newfunc (entry, table, string);
3778 if (entry != NULL)
3780 struct elf32_arm_stub_hash_entry *eh;
3782 /* Initialize the local fields. */
3783 eh = (struct elf32_arm_stub_hash_entry *) entry;
3784 eh->stub_sec = NULL;
3785 eh->stub_offset = (bfd_vma) -1;
3786 eh->source_value = 0;
3787 eh->target_value = 0;
3788 eh->target_section = NULL;
3789 eh->orig_insn = 0;
3790 eh->stub_type = arm_stub_none;
3791 eh->stub_size = 0;
3792 eh->stub_template = NULL;
3793 eh->stub_template_size = -1;
3794 eh->h = NULL;
3795 eh->id_sec = NULL;
3796 eh->output_name = NULL;
3799 return entry;
3802 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3803 shortcuts to them in our hash table. */
3805 static bool
3806 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3808 struct elf32_arm_link_hash_table *htab;
3810 htab = elf32_arm_hash_table (info);
3811 if (htab == NULL)
3812 return false;
3814 if (! _bfd_elf_create_got_section (dynobj, info))
3815 return false;
3817 /* Also create .rofixup. */
3818 if (htab->fdpic_p)
3820 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3821 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3822 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3823 if (htab->srofixup == NULL
3824 || !bfd_set_section_alignment (htab->srofixup, 2))
3825 return false;
3828 return true;
3831 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3833 static bool
3834 create_ifunc_sections (struct bfd_link_info *info)
3836 struct elf32_arm_link_hash_table *htab;
3837 const struct elf_backend_data *bed;
3838 bfd *dynobj;
3839 asection *s;
3840 flagword flags;
3842 htab = elf32_arm_hash_table (info);
3843 dynobj = htab->root.dynobj;
3844 bed = get_elf_backend_data (dynobj);
3845 flags = bed->dynamic_sec_flags;
3847 if (htab->root.iplt == NULL)
3849 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3850 flags | SEC_READONLY | SEC_CODE);
3851 if (s == NULL
3852 || !bfd_set_section_alignment (s, bed->plt_alignment))
3853 return false;
3854 htab->root.iplt = s;
3857 if (htab->root.irelplt == NULL)
3859 s = bfd_make_section_anyway_with_flags (dynobj,
3860 RELOC_SECTION (htab, ".iplt"),
3861 flags | SEC_READONLY);
3862 if (s == NULL
3863 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3864 return false;
3865 htab->root.irelplt = s;
3868 if (htab->root.igotplt == NULL)
3870 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3871 if (s == NULL
3872 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3873 return false;
3874 htab->root.igotplt = s;
3876 return true;
3879 /* Determine if we're dealing with a Thumb only architecture. */
3881 static bool
3882 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3884 int arch;
3885 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3886 Tag_CPU_arch_profile);
3888 if (profile)
3889 return profile == 'M';
3891 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3893 /* Force return logic to be reviewed for each new architecture. */
3894 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3896 if (arch == TAG_CPU_ARCH_V6_M
3897 || arch == TAG_CPU_ARCH_V6S_M
3898 || arch == TAG_CPU_ARCH_V7E_M
3899 || arch == TAG_CPU_ARCH_V8M_BASE
3900 || arch == TAG_CPU_ARCH_V8M_MAIN
3901 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3902 return true;
3904 return false;
3907 /* Determine if we're dealing with a Thumb-2 object. */
3909 static bool
3910 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3912 int arch;
3913 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3914 Tag_THUMB_ISA_use);
3916 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3917 if (thumb_isa < 3)
3918 return thumb_isa == 2;
3920 /* Variant of thumb is described by the architecture tag. */
3921 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3923 /* Force return logic to be reviewed for each new architecture. */
3924 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3926 return (arch == TAG_CPU_ARCH_V6T2
3927 || arch == TAG_CPU_ARCH_V7
3928 || arch == TAG_CPU_ARCH_V7E_M
3929 || arch == TAG_CPU_ARCH_V8
3930 || arch == TAG_CPU_ARCH_V8R
3931 || arch == TAG_CPU_ARCH_V8M_MAIN
3932 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3935 /* Determine whether Thumb-2 BL instruction is available. */
3937 static bool
3938 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3940 int arch =
3941 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3943 /* Force return logic to be reviewed for each new architecture. */
3944 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3946 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3947 return (arch == TAG_CPU_ARCH_V6T2
3948 || arch >= TAG_CPU_ARCH_V7);
3951 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3952 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3953 hash table. */
3955 static bool
3956 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3958 struct elf32_arm_link_hash_table *htab;
3960 htab = elf32_arm_hash_table (info);
3961 if (htab == NULL)
3962 return false;
3964 if (!htab->root.sgot && !create_got_section (dynobj, info))
3965 return false;
3967 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3968 return false;
3970 if (htab->root.target_os == is_vxworks)
3972 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3973 return false;
3975 if (bfd_link_pic (info))
3977 htab->plt_header_size = 0;
3978 htab->plt_entry_size
3979 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3981 else
3983 htab->plt_header_size
3984 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3985 htab->plt_entry_size
3986 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3989 if (elf_elfheader (dynobj))
3990 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3992 else
3994 /* PR ld/16017
3995 Test for thumb only architectures. Note - we cannot just call
3996 using_thumb_only() as the attributes in the output bfd have not been
3997 initialised at this point, so instead we use the input bfd. */
3998 bfd * saved_obfd = htab->obfd;
4000 htab->obfd = dynobj;
4001 if (using_thumb_only (htab))
4003 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4004 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4006 htab->obfd = saved_obfd;
4009 if (htab->fdpic_p) {
4010 htab->plt_header_size = 0;
4011 if (info->flags & DF_BIND_NOW)
4012 htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4013 else
4014 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4017 if (!htab->root.splt
4018 || !htab->root.srelplt
4019 || !htab->root.sdynbss
4020 || (!bfd_link_pic (info) && !htab->root.srelbss))
4021 abort ();
4023 return true;
4026 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4028 static void
4029 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4030 struct elf_link_hash_entry *dir,
4031 struct elf_link_hash_entry *ind)
4033 struct elf32_arm_link_hash_entry *edir, *eind;
4035 edir = (struct elf32_arm_link_hash_entry *) dir;
4036 eind = (struct elf32_arm_link_hash_entry *) ind;
4038 if (ind->root.type == bfd_link_hash_indirect)
4040 /* Copy over PLT info. */
4041 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4042 eind->plt.thumb_refcount = 0;
4043 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4044 eind->plt.maybe_thumb_refcount = 0;
4045 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4046 eind->plt.noncall_refcount = 0;
4048 /* Copy FDPIC counters. */
4049 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4050 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4051 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4053 /* We should only allocate a function to .iplt once the final
4054 symbol information is known. */
4055 BFD_ASSERT (!eind->is_iplt);
4057 if (dir->got.refcount <= 0)
4059 edir->tls_type = eind->tls_type;
4060 eind->tls_type = GOT_UNKNOWN;
4064 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4067 /* Destroy an ARM elf linker hash table. */
4069 static void
4070 elf32_arm_link_hash_table_free (bfd *obfd)
4072 struct elf32_arm_link_hash_table *ret
4073 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4075 bfd_hash_table_free (&ret->stub_hash_table);
4076 _bfd_elf_link_hash_table_free (obfd);
4079 /* Create an ARM elf linker hash table. */
4081 static struct bfd_link_hash_table *
4082 elf32_arm_link_hash_table_create (bfd *abfd)
4084 struct elf32_arm_link_hash_table *ret;
4085 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4087 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4088 if (ret == NULL)
4089 return NULL;
4091 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4092 elf32_arm_link_hash_newfunc,
4093 sizeof (struct elf32_arm_link_hash_entry),
4094 ARM_ELF_DATA))
4096 free (ret);
4097 return NULL;
4100 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4101 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4102 #ifdef FOUR_WORD_PLT
4103 ret->plt_header_size = 16;
4104 ret->plt_entry_size = 16;
4105 #else
4106 ret->plt_header_size = 20;
4107 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4108 #endif
4109 ret->use_rel = true;
4110 ret->obfd = abfd;
4111 ret->fdpic_p = 0;
4113 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4114 sizeof (struct elf32_arm_stub_hash_entry)))
4116 _bfd_elf_link_hash_table_free (abfd);
4117 return NULL;
4119 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4121 return &ret->root.root;
4124 /* Determine what kind of NOPs are available. */
4126 static bool
4127 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4129 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4130 Tag_CPU_arch);
4132 /* Force return logic to be reviewed for each new architecture. */
4133 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4135 return (arch == TAG_CPU_ARCH_V6T2
4136 || arch == TAG_CPU_ARCH_V6K
4137 || arch == TAG_CPU_ARCH_V7
4138 || arch == TAG_CPU_ARCH_V8
4139 || arch == TAG_CPU_ARCH_V8R);
4142 static bool
4143 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4145 switch (stub_type)
4147 case arm_stub_long_branch_thumb_only:
4148 case arm_stub_long_branch_thumb2_only:
4149 case arm_stub_long_branch_thumb2_only_pure:
4150 case arm_stub_long_branch_v4t_thumb_arm:
4151 case arm_stub_short_branch_v4t_thumb_arm:
4152 case arm_stub_long_branch_v4t_thumb_arm_pic:
4153 case arm_stub_long_branch_v4t_thumb_tls_pic:
4154 case arm_stub_long_branch_thumb_only_pic:
4155 case arm_stub_cmse_branch_thumb_only:
4156 return true;
4157 case arm_stub_none:
4158 BFD_FAIL ();
4159 return false;
4160 break;
4161 default:
4162 return false;
4166 /* Determine the type of stub needed, if any, for a call. */
4168 static enum elf32_arm_stub_type
4169 arm_type_of_stub (struct bfd_link_info *info,
4170 asection *input_sec,
4171 const Elf_Internal_Rela *rel,
4172 unsigned char st_type,
4173 enum arm_st_branch_type *actual_branch_type,
4174 struct elf32_arm_link_hash_entry *hash,
4175 bfd_vma destination,
4176 asection *sym_sec,
4177 bfd *input_bfd,
4178 const char *name)
4180 bfd_vma location;
4181 bfd_signed_vma branch_offset;
4182 unsigned int r_type;
4183 struct elf32_arm_link_hash_table * globals;
4184 bool thumb2, thumb2_bl, thumb_only;
4185 enum elf32_arm_stub_type stub_type = arm_stub_none;
4186 int use_plt = 0;
4187 enum arm_st_branch_type branch_type = *actual_branch_type;
4188 union gotplt_union *root_plt;
4189 struct arm_plt_info *arm_plt;
4190 int arch;
4191 int thumb2_movw;
4193 if (branch_type == ST_BRANCH_LONG)
4194 return stub_type;
4196 globals = elf32_arm_hash_table (info);
4197 if (globals == NULL)
4198 return stub_type;
4200 thumb_only = using_thumb_only (globals);
4201 thumb2 = using_thumb2 (globals);
4202 thumb2_bl = using_thumb2_bl (globals);
4204 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4206 /* True for architectures that implement the thumb2 movw instruction. */
4207 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4209 /* Determine where the call point is. */
4210 location = (input_sec->output_offset
4211 + input_sec->output_section->vma
4212 + rel->r_offset);
4214 r_type = ELF32_R_TYPE (rel->r_info);
4216 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4217 are considering a function call relocation. */
4218 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4219 || r_type == R_ARM_THM_JUMP19)
4220 && branch_type == ST_BRANCH_TO_ARM)
4221 branch_type = ST_BRANCH_TO_THUMB;
4223 /* For TLS call relocs, it is the caller's responsibility to provide
4224 the address of the appropriate trampoline. */
4225 if (r_type != R_ARM_TLS_CALL
4226 && r_type != R_ARM_THM_TLS_CALL
4227 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4228 ELF32_R_SYM (rel->r_info), &root_plt,
4229 &arm_plt)
4230 && root_plt->offset != (bfd_vma) -1)
4232 asection *splt;
4234 if (hash == NULL || hash->is_iplt)
4235 splt = globals->root.iplt;
4236 else
4237 splt = globals->root.splt;
4238 if (splt != NULL)
4240 use_plt = 1;
4242 /* Note when dealing with PLT entries: the main PLT stub is in
4243 ARM mode, so if the branch is in Thumb mode, another
4244 Thumb->ARM stub will be inserted later just before the ARM
4245 PLT stub. If a long branch stub is needed, we'll add a
4246 Thumb->Arm one and branch directly to the ARM PLT entry.
4247 Here, we have to check if a pre-PLT Thumb->ARM stub
4248 is needed and if it will be close enough. */
4250 destination = (splt->output_section->vma
4251 + splt->output_offset
4252 + root_plt->offset);
4253 st_type = STT_FUNC;
4255 /* Thumb branch/call to PLT: it can become a branch to ARM
4256 or to Thumb. We must perform the same checks and
4257 corrections as in elf32_arm_final_link_relocate. */
4258 if ((r_type == R_ARM_THM_CALL)
4259 || (r_type == R_ARM_THM_JUMP24))
4261 if (globals->use_blx
4262 && r_type == R_ARM_THM_CALL
4263 && !thumb_only)
4265 /* If the Thumb BLX instruction is available, convert
4266 the BL to a BLX instruction to call the ARM-mode
4267 PLT entry. */
4268 branch_type = ST_BRANCH_TO_ARM;
4270 else
4272 if (!thumb_only)
4273 /* Target the Thumb stub before the ARM PLT entry. */
4274 destination -= PLT_THUMB_STUB_SIZE;
4275 branch_type = ST_BRANCH_TO_THUMB;
4278 else
4280 branch_type = ST_BRANCH_TO_ARM;
4284 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4285 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4287 branch_offset = (bfd_signed_vma)(destination - location);
4289 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4290 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4292 /* Handle cases where:
4293 - this call goes too far (different Thumb/Thumb2 max
4294 distance)
4295 - it's a Thumb->Arm call and blx is not available, or it's a
4296 Thumb->Arm branch (not bl). A stub is needed in this case,
4297 but only if this call is not through a PLT entry. Indeed,
4298 PLT stubs handle mode switching already. */
4299 if ((!thumb2_bl
4300 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4301 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4302 || (thumb2_bl
4303 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4304 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4305 || (thumb2
4306 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4307 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4308 && (r_type == R_ARM_THM_JUMP19))
4309 || (branch_type == ST_BRANCH_TO_ARM
4310 && (((r_type == R_ARM_THM_CALL
4311 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4312 || (r_type == R_ARM_THM_JUMP24)
4313 || (r_type == R_ARM_THM_JUMP19))
4314 && !use_plt))
4316 /* If we need to insert a Thumb-Thumb long branch stub to a
4317 PLT, use one that branches directly to the ARM PLT
4318 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4319 stub, undo this now. */
4320 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4322 branch_type = ST_BRANCH_TO_ARM;
4323 branch_offset += PLT_THUMB_STUB_SIZE;
4326 if (branch_type == ST_BRANCH_TO_THUMB)
4328 /* Thumb to thumb. */
4329 if (!thumb_only)
4331 if (input_sec->flags & SEC_ELF_PURECODE)
4332 _bfd_error_handler
4333 (_("%pB(%pA): warning: long branch veneers used in"
4334 " section with SHF_ARM_PURECODE section"
4335 " attribute is only supported for M-profile"
4336 " targets that implement the movw instruction"),
4337 input_bfd, input_sec);
4339 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4340 /* PIC stubs. */
4341 ? ((globals->use_blx
4342 && (r_type == R_ARM_THM_CALL))
4343 /* V5T and above. Stub starts with ARM code, so
4344 we must be able to switch mode before
4345 reaching it, which is only possible for 'bl'
4346 (ie R_ARM_THM_CALL relocation). */
4347 ? arm_stub_long_branch_any_thumb_pic
4348 /* On V4T, use Thumb code only. */
4349 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4351 /* non-PIC stubs. */
4352 : ((globals->use_blx
4353 && (r_type == R_ARM_THM_CALL))
4354 /* V5T and above. */
4355 ? arm_stub_long_branch_any_any
4356 /* V4T. */
4357 : arm_stub_long_branch_v4t_thumb_thumb);
4359 else
4361 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4362 stub_type = arm_stub_long_branch_thumb2_only_pure;
4363 else
4365 if (input_sec->flags & SEC_ELF_PURECODE)
4366 _bfd_error_handler
4367 (_("%pB(%pA): warning: long branch veneers used in"
4368 " section with SHF_ARM_PURECODE section"
4369 " attribute is only supported for M-profile"
4370 " targets that implement the movw instruction"),
4371 input_bfd, input_sec);
4373 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4374 /* PIC stub. */
4375 ? arm_stub_long_branch_thumb_only_pic
4376 /* non-PIC stub. */
4377 : (thumb2 ? arm_stub_long_branch_thumb2_only
4378 : arm_stub_long_branch_thumb_only);
4382 else
4384 if (input_sec->flags & SEC_ELF_PURECODE)
4385 _bfd_error_handler
4386 (_("%pB(%pA): warning: long branch veneers used in"
4387 " section with SHF_ARM_PURECODE section"
4388 " attribute is only supported" " for M-profile"
4389 " targets that implement the movw instruction"),
4390 input_bfd, input_sec);
4392 /* Thumb to arm. */
4393 if (sym_sec != NULL
4394 && sym_sec->owner != NULL
4395 && !INTERWORK_FLAG (sym_sec->owner))
4397 _bfd_error_handler
4398 (_("%pB(%s): warning: interworking not enabled;"
4399 " first occurrence: %pB: %s call to %s"),
4400 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4403 stub_type =
4404 (bfd_link_pic (info) | globals->pic_veneer)
4405 /* PIC stubs. */
4406 ? (r_type == R_ARM_THM_TLS_CALL
4407 /* TLS PIC stubs. */
4408 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4409 : arm_stub_long_branch_v4t_thumb_tls_pic)
4410 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4411 /* V5T PIC and above. */
4412 ? arm_stub_long_branch_any_arm_pic
4413 /* V4T PIC stub. */
4414 : arm_stub_long_branch_v4t_thumb_arm_pic))
4416 /* non-PIC stubs. */
4417 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4418 /* V5T and above. */
4419 ? arm_stub_long_branch_any_any
4420 /* V4T. */
4421 : arm_stub_long_branch_v4t_thumb_arm);
4423 /* Handle v4t short branches. */
4424 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4425 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4426 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4427 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4431 else if (r_type == R_ARM_CALL
4432 || r_type == R_ARM_JUMP24
4433 || r_type == R_ARM_PLT32
4434 || r_type == R_ARM_TLS_CALL)
4436 if (input_sec->flags & SEC_ELF_PURECODE)
4437 _bfd_error_handler
4438 (_("%pB(%pA): warning: long branch veneers used in"
4439 " section with SHF_ARM_PURECODE section"
4440 " attribute is only supported for M-profile"
4441 " targets that implement the movw instruction"),
4442 input_bfd, input_sec);
4443 if (branch_type == ST_BRANCH_TO_THUMB)
4445 /* Arm to thumb. */
4447 if (sym_sec != NULL
4448 && sym_sec->owner != NULL
4449 && !INTERWORK_FLAG (sym_sec->owner))
4451 _bfd_error_handler
4452 (_("%pB(%s): warning: interworking not enabled;"
4453 " first occurrence: %pB: %s call to %s"),
4454 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4457 /* We have an extra 2-bytes reach because of
4458 the mode change (bit 24 (H) of BLX encoding). */
4459 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4460 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4461 || (r_type == R_ARM_CALL && !globals->use_blx)
4462 || (r_type == R_ARM_JUMP24)
4463 || (r_type == R_ARM_PLT32))
4465 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4466 /* PIC stubs. */
4467 ? ((globals->use_blx)
4468 /* V5T and above. */
4469 ? arm_stub_long_branch_any_thumb_pic
4470 /* V4T stub. */
4471 : arm_stub_long_branch_v4t_arm_thumb_pic)
4473 /* non-PIC stubs. */
4474 : ((globals->use_blx)
4475 /* V5T and above. */
4476 ? arm_stub_long_branch_any_any
4477 /* V4T. */
4478 : arm_stub_long_branch_v4t_arm_thumb);
4481 else
4483 /* Arm to arm. */
4484 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4485 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4487 stub_type =
4488 (bfd_link_pic (info) | globals->pic_veneer)
4489 /* PIC stubs. */
4490 ? (r_type == R_ARM_TLS_CALL
4491 /* TLS PIC Stub. */
4492 ? arm_stub_long_branch_any_tls_pic
4493 : (globals->root.target_os == is_nacl
4494 ? arm_stub_long_branch_arm_nacl_pic
4495 : arm_stub_long_branch_any_arm_pic))
4496 /* non-PIC stubs. */
4497 : (globals->root.target_os == is_nacl
4498 ? arm_stub_long_branch_arm_nacl
4499 : arm_stub_long_branch_any_any);
4504 /* If a stub is needed, record the actual destination type. */
4505 if (stub_type != arm_stub_none)
4506 *actual_branch_type = branch_type;
4508 return stub_type;
4511 /* Build a name for an entry in the stub hash table. */
4513 static char *
4514 elf32_arm_stub_name (const asection *input_section,
4515 const asection *sym_sec,
4516 const struct elf32_arm_link_hash_entry *hash,
4517 const Elf_Internal_Rela *rel,
4518 enum elf32_arm_stub_type stub_type)
4520 char *stub_name;
4521 bfd_size_type len;
4523 if (hash)
4525 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4526 stub_name = (char *) bfd_malloc (len);
4527 if (stub_name != NULL)
4528 sprintf (stub_name, "%08x_%s+%x_%d",
4529 input_section->id & 0xffffffff,
4530 hash->root.root.root.string,
4531 (int) rel->r_addend & 0xffffffff,
4532 (int) stub_type);
4534 else
4536 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4537 stub_name = (char *) bfd_malloc (len);
4538 if (stub_name != NULL)
4539 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4540 input_section->id & 0xffffffff,
4541 sym_sec->id & 0xffffffff,
4542 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4543 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4544 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4545 (int) rel->r_addend & 0xffffffff,
4546 (int) stub_type);
4549 return stub_name;
4552 /* Look up an entry in the stub hash. Stub entries are cached because
4553 creating the stub name takes a bit of time. */
4555 static struct elf32_arm_stub_hash_entry *
4556 elf32_arm_get_stub_entry (const asection *input_section,
4557 const asection *sym_sec,
4558 struct elf_link_hash_entry *hash,
4559 const Elf_Internal_Rela *rel,
4560 struct elf32_arm_link_hash_table *htab,
4561 enum elf32_arm_stub_type stub_type)
4563 struct elf32_arm_stub_hash_entry *stub_entry;
4564 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4565 const asection *id_sec;
4567 if ((input_section->flags & SEC_CODE) == 0)
4568 return NULL;
4570 /* If the input section is the CMSE stubs one and it needs a long
4571 branch stub to reach it's final destination, give up with an
4572 error message: this is not supported. See PR ld/24709. */
4573 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4575 bfd *output_bfd = htab->obfd;
4576 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4578 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4579 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4580 CMSE_STUB_NAME,
4581 (uint64_t)out_sec->output_section->vma
4582 + out_sec->output_offset,
4583 (uint64_t)sym_sec->output_section->vma
4584 + sym_sec->output_offset
4585 + h->root.root.u.def.value);
4586 /* Exit, rather than leave incompletely processed
4587 relocations. */
4588 xexit (1);
4591 /* If this input section is part of a group of sections sharing one
4592 stub section, then use the id of the first section in the group.
4593 Stub names need to include a section id, as there may well be
4594 more than one stub used to reach say, printf, and we need to
4595 distinguish between them. */
4596 BFD_ASSERT (input_section->id <= htab->top_id);
4597 id_sec = htab->stub_group[input_section->id].link_sec;
4599 if (h != NULL && h->stub_cache != NULL
4600 && h->stub_cache->h == h
4601 && h->stub_cache->id_sec == id_sec
4602 && h->stub_cache->stub_type == stub_type)
4604 stub_entry = h->stub_cache;
4606 else
4608 char *stub_name;
4610 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4611 if (stub_name == NULL)
4612 return NULL;
4614 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4615 stub_name, false, false);
4616 if (h != NULL)
4617 h->stub_cache = stub_entry;
4619 free (stub_name);
4622 return stub_entry;
4625 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4626 section. */
4628 static bool
4629 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4631 if (stub_type >= max_stub_type)
4632 abort (); /* Should be unreachable. */
4634 switch (stub_type)
4636 case arm_stub_cmse_branch_thumb_only:
4637 return true;
4639 default:
4640 return false;
4643 abort (); /* Should be unreachable. */
4646 /* Required alignment (as a power of 2) for the dedicated section holding
4647 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4648 with input sections. */
4650 static int
4651 arm_dedicated_stub_output_section_required_alignment
4652 (enum elf32_arm_stub_type stub_type)
4654 if (stub_type >= max_stub_type)
4655 abort (); /* Should be unreachable. */
4657 switch (stub_type)
4659 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4660 boundary. */
4661 case arm_stub_cmse_branch_thumb_only:
4662 return 5;
4664 default:
4665 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4666 return 0;
4669 abort (); /* Should be unreachable. */
4672 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4673 NULL if veneers of this type are interspersed with input sections. */
4675 static const char *
4676 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4678 if (stub_type >= max_stub_type)
4679 abort (); /* Should be unreachable. */
4681 switch (stub_type)
4683 case arm_stub_cmse_branch_thumb_only:
4684 return CMSE_STUB_NAME;
4686 default:
4687 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4688 return NULL;
4691 abort (); /* Should be unreachable. */
4694 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4695 returns the address of the hash table field in HTAB holding a pointer to the
4696 corresponding input section. Otherwise, returns NULL. */
4698 static asection **
4699 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4700 enum elf32_arm_stub_type stub_type)
4702 if (stub_type >= max_stub_type)
4703 abort (); /* Should be unreachable. */
4705 switch (stub_type)
4707 case arm_stub_cmse_branch_thumb_only:
4708 return &htab->cmse_stub_sec;
4710 default:
4711 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4712 return NULL;
4715 abort (); /* Should be unreachable. */
4718 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4719 is the section that branch into veneer and can be NULL if stub should go in
4720 a dedicated output section. Returns a pointer to the stub section, and the
4721 section to which the stub section will be attached (in *LINK_SEC_P).
4722 LINK_SEC_P may be NULL. */
4724 static asection *
4725 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4726 struct elf32_arm_link_hash_table *htab,
4727 enum elf32_arm_stub_type stub_type)
4729 asection *link_sec, *out_sec, **stub_sec_p;
4730 const char *stub_sec_prefix;
4731 bool dedicated_output_section =
4732 arm_dedicated_stub_output_section_required (stub_type);
4733 int align;
4735 if (dedicated_output_section)
4737 bfd *output_bfd = htab->obfd;
4738 const char *out_sec_name =
4739 arm_dedicated_stub_output_section_name (stub_type);
4740 link_sec = NULL;
4741 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4742 stub_sec_prefix = out_sec_name;
4743 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4744 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4745 if (out_sec == NULL)
4747 _bfd_error_handler (_("no address assigned to the veneers output "
4748 "section %s"), out_sec_name);
4749 return NULL;
4752 else
4754 BFD_ASSERT (section->id <= htab->top_id);
4755 link_sec = htab->stub_group[section->id].link_sec;
4756 BFD_ASSERT (link_sec != NULL);
4757 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4758 if (*stub_sec_p == NULL)
4759 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4760 stub_sec_prefix = link_sec->name;
4761 out_sec = link_sec->output_section;
4762 align = htab->root.target_os == is_nacl ? 4 : 3;
4765 if (*stub_sec_p == NULL)
4767 size_t namelen;
4768 bfd_size_type len;
4769 char *s_name;
4771 namelen = strlen (stub_sec_prefix);
4772 len = namelen + sizeof (STUB_SUFFIX);
4773 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4774 if (s_name == NULL)
4775 return NULL;
4777 memcpy (s_name, stub_sec_prefix, namelen);
4778 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4779 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4780 align);
4781 if (*stub_sec_p == NULL)
4782 return NULL;
4784 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4785 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4786 | SEC_KEEP;
4789 if (!dedicated_output_section)
4790 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4792 if (link_sec_p)
4793 *link_sec_p = link_sec;
4795 return *stub_sec_p;
4798 /* Add a new stub entry to the stub hash. Not all fields of the new
4799 stub entry are initialised. */
4801 static struct elf32_arm_stub_hash_entry *
4802 elf32_arm_add_stub (const char *stub_name, asection *section,
4803 struct elf32_arm_link_hash_table *htab,
4804 enum elf32_arm_stub_type stub_type)
4806 asection *link_sec;
4807 asection *stub_sec;
4808 struct elf32_arm_stub_hash_entry *stub_entry;
4810 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4811 stub_type);
4812 if (stub_sec == NULL)
4813 return NULL;
4815 /* Enter this entry into the linker stub hash table. */
4816 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4817 true, false);
4818 if (stub_entry == NULL)
4820 if (section == NULL)
4821 section = stub_sec;
4822 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4823 section->owner, stub_name);
4824 return NULL;
4827 stub_entry->stub_sec = stub_sec;
4828 stub_entry->stub_offset = (bfd_vma) -1;
4829 stub_entry->id_sec = link_sec;
4831 return stub_entry;
4834 /* Store an Arm insn into an output section not processed by
4835 elf32_arm_write_section. */
4837 static void
4838 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4839 bfd * output_bfd, bfd_vma val, void * ptr)
4841 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4842 bfd_putl32 (val, ptr);
4843 else
4844 bfd_putb32 (val, ptr);
4847 /* Store a 16-bit Thumb insn into an output section not processed by
4848 elf32_arm_write_section. */
4850 static void
4851 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4852 bfd * output_bfd, bfd_vma val, void * ptr)
4854 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4855 bfd_putl16 (val, ptr);
4856 else
4857 bfd_putb16 (val, ptr);
4860 /* Store a Thumb2 insn into an output section not processed by
4861 elf32_arm_write_section. */
4863 static void
4864 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4865 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4867 /* T2 instructions are 16-bit streamed. */
4868 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4870 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4871 bfd_putl16 ((val & 0xffff), ptr + 2);
4873 else
4875 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4876 bfd_putb16 ((val & 0xffff), ptr + 2);
4880 /* If it's possible to change R_TYPE to a more efficient access
4881 model, return the new reloc type. */
4883 static unsigned
4884 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4885 struct elf_link_hash_entry *h)
4887 int is_local = (h == NULL);
4889 if (bfd_link_dll (info)
4890 || (h && h->root.type == bfd_link_hash_undefweak))
4891 return r_type;
4893 /* We do not support relaxations for Old TLS models. */
4894 switch (r_type)
4896 case R_ARM_TLS_GOTDESC:
4897 case R_ARM_TLS_CALL:
4898 case R_ARM_THM_TLS_CALL:
4899 case R_ARM_TLS_DESCSEQ:
4900 case R_ARM_THM_TLS_DESCSEQ:
4901 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4904 return r_type;
4907 static bfd_reloc_status_type elf32_arm_final_link_relocate
4908 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4909 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4910 const char *, unsigned char, enum arm_st_branch_type,
4911 struct elf_link_hash_entry *, bool *, char **);
4913 static unsigned int
4914 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4916 switch (stub_type)
4918 case arm_stub_a8_veneer_b_cond:
4919 case arm_stub_a8_veneer_b:
4920 case arm_stub_a8_veneer_bl:
4921 return 2;
4923 case arm_stub_long_branch_any_any:
4924 case arm_stub_long_branch_v4t_arm_thumb:
4925 case arm_stub_long_branch_thumb_only:
4926 case arm_stub_long_branch_thumb2_only:
4927 case arm_stub_long_branch_thumb2_only_pure:
4928 case arm_stub_long_branch_v4t_thumb_thumb:
4929 case arm_stub_long_branch_v4t_thumb_arm:
4930 case arm_stub_short_branch_v4t_thumb_arm:
4931 case arm_stub_long_branch_any_arm_pic:
4932 case arm_stub_long_branch_any_thumb_pic:
4933 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4934 case arm_stub_long_branch_v4t_arm_thumb_pic:
4935 case arm_stub_long_branch_v4t_thumb_arm_pic:
4936 case arm_stub_long_branch_thumb_only_pic:
4937 case arm_stub_long_branch_any_tls_pic:
4938 case arm_stub_long_branch_v4t_thumb_tls_pic:
4939 case arm_stub_cmse_branch_thumb_only:
4940 case arm_stub_a8_veneer_blx:
4941 return 4;
4943 case arm_stub_long_branch_arm_nacl:
4944 case arm_stub_long_branch_arm_nacl_pic:
4945 return 16;
4947 default:
4948 abort (); /* Should be unreachable. */
4952 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4953 veneering (TRUE) or have their own symbol (FALSE). */
4955 static bool
4956 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4958 if (stub_type >= max_stub_type)
4959 abort (); /* Should be unreachable. */
4961 switch (stub_type)
4963 case arm_stub_cmse_branch_thumb_only:
4964 return true;
4966 default:
4967 return false;
4970 abort (); /* Should be unreachable. */
4973 /* Returns the padding needed for the dedicated section used stubs of type
4974 STUB_TYPE. */
4976 static int
4977 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4979 if (stub_type >= max_stub_type)
4980 abort (); /* Should be unreachable. */
4982 switch (stub_type)
4984 case arm_stub_cmse_branch_thumb_only:
4985 return 32;
4987 default:
4988 return 0;
4991 abort (); /* Should be unreachable. */
4994 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4995 returns the address of the hash table field in HTAB holding the offset at
4996 which new veneers should be layed out in the stub section. */
4998 static bfd_vma*
4999 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5000 enum elf32_arm_stub_type stub_type)
5002 switch (stub_type)
5004 case arm_stub_cmse_branch_thumb_only:
5005 return &htab->new_cmse_stub_offset;
5007 default:
5008 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5009 return NULL;
5013 static bool
5014 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5015 void * in_arg)
5017 #define MAXRELOCS 3
5018 bool removed_sg_veneer;
5019 struct elf32_arm_stub_hash_entry *stub_entry;
5020 struct elf32_arm_link_hash_table *globals;
5021 struct bfd_link_info *info;
5022 asection *stub_sec;
5023 bfd *stub_bfd;
5024 bfd_byte *loc;
5025 bfd_vma sym_value;
5026 int template_size;
5027 int size;
5028 const insn_sequence *template_sequence;
5029 int i;
5030 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5031 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5032 int nrelocs = 0;
5033 int just_allocated = 0;
5035 /* Massage our args to the form they really have. */
5036 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5037 info = (struct bfd_link_info *) in_arg;
5039 /* Fail if the target section could not be assigned to an output
5040 section. The user should fix his linker script. */
5041 if (stub_entry->target_section->output_section == NULL
5042 && info->non_contiguous_regions)
5043 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5044 "Retry without --enable-non-contiguous-regions.\n"),
5045 stub_entry->target_section);
5047 globals = elf32_arm_hash_table (info);
5048 if (globals == NULL)
5049 return false;
5051 stub_sec = stub_entry->stub_sec;
5053 if ((globals->fix_cortex_a8 < 0)
5054 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5055 /* We have to do less-strictly-aligned fixes last. */
5056 return true;
5058 /* Assign a slot at the end of section if none assigned yet. */
5059 if (stub_entry->stub_offset == (bfd_vma) -1)
5061 stub_entry->stub_offset = stub_sec->size;
5062 just_allocated = 1;
5064 loc = stub_sec->contents + stub_entry->stub_offset;
5066 stub_bfd = stub_sec->owner;
5068 /* This is the address of the stub destination. */
5069 sym_value = (stub_entry->target_value
5070 + stub_entry->target_section->output_offset
5071 + stub_entry->target_section->output_section->vma);
5073 template_sequence = stub_entry->stub_template;
5074 template_size = stub_entry->stub_template_size;
5076 size = 0;
5077 for (i = 0; i < template_size; i++)
5079 switch (template_sequence[i].type)
5081 case THUMB16_TYPE:
5083 bfd_vma data = (bfd_vma) template_sequence[i].data;
5084 if (template_sequence[i].reloc_addend != 0)
5086 /* We've borrowed the reloc_addend field to mean we should
5087 insert a condition code into this (Thumb-1 branch)
5088 instruction. See THUMB16_BCOND_INSN. */
5089 BFD_ASSERT ((data & 0xff00) == 0xd000);
5090 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5092 bfd_put_16 (stub_bfd, data, loc + size);
5093 size += 2;
5095 break;
5097 case THUMB32_TYPE:
5098 bfd_put_16 (stub_bfd,
5099 (template_sequence[i].data >> 16) & 0xffff,
5100 loc + size);
5101 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5102 loc + size + 2);
5103 if (template_sequence[i].r_type != R_ARM_NONE)
5105 stub_reloc_idx[nrelocs] = i;
5106 stub_reloc_offset[nrelocs++] = size;
5108 size += 4;
5109 break;
5111 case ARM_TYPE:
5112 bfd_put_32 (stub_bfd, template_sequence[i].data,
5113 loc + size);
5114 /* Handle cases where the target is encoded within the
5115 instruction. */
5116 if (template_sequence[i].r_type == R_ARM_JUMP24)
5118 stub_reloc_idx[nrelocs] = i;
5119 stub_reloc_offset[nrelocs++] = size;
5121 size += 4;
5122 break;
5124 case DATA_TYPE:
5125 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5126 stub_reloc_idx[nrelocs] = i;
5127 stub_reloc_offset[nrelocs++] = size;
5128 size += 4;
5129 break;
5131 default:
5132 BFD_FAIL ();
5133 return false;
5137 if (just_allocated)
5138 stub_sec->size += size;
5140 /* Stub size has already been computed in arm_size_one_stub. Check
5141 consistency. */
5142 BFD_ASSERT (size == stub_entry->stub_size);
5144 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5145 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5146 sym_value |= 1;
5148 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5149 to relocate in each stub. */
5150 removed_sg_veneer =
5151 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5152 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5154 for (i = 0; i < nrelocs; i++)
5156 Elf_Internal_Rela rel;
5157 bool unresolved_reloc;
5158 char *error_message;
5159 bfd_vma points_to =
5160 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5162 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5163 rel.r_info = ELF32_R_INFO (0,
5164 template_sequence[stub_reloc_idx[i]].r_type);
5165 rel.r_addend = 0;
5167 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5168 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5169 template should refer back to the instruction after the original
5170 branch. We use target_section as Cortex-A8 erratum workaround stubs
5171 are only generated when both source and target are in the same
5172 section. */
5173 points_to = stub_entry->target_section->output_section->vma
5174 + stub_entry->target_section->output_offset
5175 + stub_entry->source_value;
5177 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5178 (template_sequence[stub_reloc_idx[i]].r_type),
5179 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5180 points_to, info, stub_entry->target_section, "", STT_FUNC,
5181 stub_entry->branch_type,
5182 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5183 &error_message);
5186 return true;
5187 #undef MAXRELOCS
5190 /* Calculate the template, template size and instruction size for a stub.
5191 Return value is the instruction size. */
5193 static unsigned int
5194 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5195 const insn_sequence **stub_template,
5196 int *stub_template_size)
5198 const insn_sequence *template_sequence = NULL;
5199 int template_size = 0, i;
5200 unsigned int size;
5202 template_sequence = stub_definitions[stub_type].template_sequence;
5203 if (stub_template)
5204 *stub_template = template_sequence;
5206 template_size = stub_definitions[stub_type].template_size;
5207 if (stub_template_size)
5208 *stub_template_size = template_size;
5210 size = 0;
5211 for (i = 0; i < template_size; i++)
5213 switch (template_sequence[i].type)
5215 case THUMB16_TYPE:
5216 size += 2;
5217 break;
5219 case ARM_TYPE:
5220 case THUMB32_TYPE:
5221 case DATA_TYPE:
5222 size += 4;
5223 break;
5225 default:
5226 BFD_FAIL ();
5227 return 0;
5231 return size;
5234 /* As above, but don't actually build the stub. Just bump offset so
5235 we know stub section sizes. */
5237 static bool
5238 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5239 void *in_arg ATTRIBUTE_UNUSED)
5241 struct elf32_arm_stub_hash_entry *stub_entry;
5242 const insn_sequence *template_sequence;
5243 int template_size, size;
5245 /* Massage our args to the form they really have. */
5246 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5248 BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5249 && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5251 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5252 &template_size);
5254 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5255 if (stub_entry->stub_template_size)
5257 stub_entry->stub_size = size;
5258 stub_entry->stub_template = template_sequence;
5259 stub_entry->stub_template_size = template_size;
5262 /* Already accounted for. */
5263 if (stub_entry->stub_offset != (bfd_vma) -1)
5264 return true;
5266 size = (size + 7) & ~7;
5267 stub_entry->stub_sec->size += size;
5269 return true;
5272 /* External entry points for sizing and building linker stubs. */
5274 /* Set up various things so that we can make a list of input sections
5275 for each output section included in the link. Returns -1 on error,
5276 0 when no stubs will be needed, and 1 on success. */
5279 elf32_arm_setup_section_lists (bfd *output_bfd,
5280 struct bfd_link_info *info)
5282 bfd *input_bfd;
5283 unsigned int bfd_count;
5284 unsigned int top_id, top_index;
5285 asection *section;
5286 asection **input_list, **list;
5287 size_t amt;
5288 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5290 if (htab == NULL)
5291 return 0;
5293 /* Count the number of input BFDs and find the top input section id. */
5294 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5295 input_bfd != NULL;
5296 input_bfd = input_bfd->link.next)
5298 bfd_count += 1;
5299 for (section = input_bfd->sections;
5300 section != NULL;
5301 section = section->next)
5303 if (top_id < section->id)
5304 top_id = section->id;
5307 htab->bfd_count = bfd_count;
5309 amt = sizeof (struct map_stub) * (top_id + 1);
5310 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5311 if (htab->stub_group == NULL)
5312 return -1;
5313 htab->top_id = top_id;
5315 /* We can't use output_bfd->section_count here to find the top output
5316 section index as some sections may have been removed, and
5317 _bfd_strip_section_from_output doesn't renumber the indices. */
5318 for (section = output_bfd->sections, top_index = 0;
5319 section != NULL;
5320 section = section->next)
5322 if (top_index < section->index)
5323 top_index = section->index;
5326 htab->top_index = top_index;
5327 amt = sizeof (asection *) * (top_index + 1);
5328 input_list = (asection **) bfd_malloc (amt);
5329 htab->input_list = input_list;
5330 if (input_list == NULL)
5331 return -1;
5333 /* For sections we aren't interested in, mark their entries with a
5334 value we can check later. */
5335 list = input_list + top_index;
5337 *list = bfd_abs_section_ptr;
5338 while (list-- != input_list);
5340 for (section = output_bfd->sections;
5341 section != NULL;
5342 section = section->next)
5344 if ((section->flags & SEC_CODE) != 0)
5345 input_list[section->index] = NULL;
5348 return 1;
5351 /* The linker repeatedly calls this function for each input section,
5352 in the order that input sections are linked into output sections.
5353 Build lists of input sections to determine groupings between which
5354 we may insert linker stubs. */
5356 void
5357 elf32_arm_next_input_section (struct bfd_link_info *info,
5358 asection *isec)
5360 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5362 if (htab == NULL)
5363 return;
5365 if (isec->output_section->index <= htab->top_index)
5367 asection **list = htab->input_list + isec->output_section->index;
5369 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5371 /* Steal the link_sec pointer for our list. */
5372 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5373 /* This happens to make the list in reverse order,
5374 which we reverse later. */
5375 PREV_SEC (isec) = *list;
5376 *list = isec;
5381 /* See whether we can group stub sections together. Grouping stub
5382 sections may result in fewer stubs. More importantly, we need to
5383 put all .init* and .fini* stubs at the end of the .init or
5384 .fini output sections respectively, because glibc splits the
5385 _init and _fini functions into multiple parts. Putting a stub in
5386 the middle of a function is not a good idea. */
5388 static void
5389 group_sections (struct elf32_arm_link_hash_table *htab,
5390 bfd_size_type stub_group_size,
5391 bool stubs_always_after_branch)
5393 asection **list = htab->input_list;
5397 asection *tail = *list;
5398 asection *head;
5400 if (tail == bfd_abs_section_ptr)
5401 continue;
5403 /* Reverse the list: we must avoid placing stubs at the
5404 beginning of the section because the beginning of the text
5405 section may be required for an interrupt vector in bare metal
5406 code. */
5407 #define NEXT_SEC PREV_SEC
5408 head = NULL;
5409 while (tail != NULL)
5411 /* Pop from tail. */
5412 asection *item = tail;
5413 tail = PREV_SEC (item);
5415 /* Push on head. */
5416 NEXT_SEC (item) = head;
5417 head = item;
5420 while (head != NULL)
5422 asection *curr;
5423 asection *next;
5424 bfd_vma stub_group_start = head->output_offset;
5425 bfd_vma end_of_next;
5427 curr = head;
5428 while (NEXT_SEC (curr) != NULL)
5430 next = NEXT_SEC (curr);
5431 end_of_next = next->output_offset + next->size;
5432 if (end_of_next - stub_group_start >= stub_group_size)
5433 /* End of NEXT is too far from start, so stop. */
5434 break;
5435 /* Add NEXT to the group. */
5436 curr = next;
5439 /* OK, the size from the start to the start of CURR is less
5440 than stub_group_size and thus can be handled by one stub
5441 section. (Or the head section is itself larger than
5442 stub_group_size, in which case we may be toast.)
5443 We should really be keeping track of the total size of
5444 stubs added here, as stubs contribute to the final output
5445 section size. */
5448 next = NEXT_SEC (head);
5449 /* Set up this stub group. */
5450 htab->stub_group[head->id].link_sec = curr;
5452 while (head != curr && (head = next) != NULL);
5454 /* But wait, there's more! Input sections up to stub_group_size
5455 bytes after the stub section can be handled by it too. */
5456 if (!stubs_always_after_branch)
5458 stub_group_start = curr->output_offset + curr->size;
5460 while (next != NULL)
5462 end_of_next = next->output_offset + next->size;
5463 if (end_of_next - stub_group_start >= stub_group_size)
5464 /* End of NEXT is too far from stubs, so stop. */
5465 break;
5466 /* Add NEXT to the stub group. */
5467 head = next;
5468 next = NEXT_SEC (head);
5469 htab->stub_group[head->id].link_sec = curr;
5472 head = next;
5475 while (list++ != htab->input_list + htab->top_index);
5477 free (htab->input_list);
5478 #undef PREV_SEC
5479 #undef NEXT_SEC
5482 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5483 erratum fix. */
5485 static int
5486 a8_reloc_compare (const void *a, const void *b)
5488 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5489 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5491 if (ra->from < rb->from)
5492 return -1;
5493 else if (ra->from > rb->from)
5494 return 1;
5495 else
5496 return 0;
5499 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5500 const char *, char **);
5502 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5503 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5504 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5505 otherwise. */
5507 static bool
5508 cortex_a8_erratum_scan (bfd *input_bfd,
5509 struct bfd_link_info *info,
5510 struct a8_erratum_fix **a8_fixes_p,
5511 unsigned int *num_a8_fixes_p,
5512 unsigned int *a8_fix_table_size_p,
5513 struct a8_erratum_reloc *a8_relocs,
5514 unsigned int num_a8_relocs,
5515 unsigned prev_num_a8_fixes,
5516 bool *stub_changed_p)
5518 asection *section;
5519 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5520 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5521 unsigned int num_a8_fixes = *num_a8_fixes_p;
5522 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5524 if (htab == NULL)
5525 return false;
5527 for (section = input_bfd->sections;
5528 section != NULL;
5529 section = section->next)
5531 bfd_byte *contents = NULL;
5532 struct _arm_elf_section_data *sec_data;
5533 unsigned int span;
5534 bfd_vma base_vma;
5536 if (elf_section_type (section) != SHT_PROGBITS
5537 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5538 || (section->flags & SEC_EXCLUDE) != 0
5539 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5540 || (section->output_section == bfd_abs_section_ptr))
5541 continue;
5543 base_vma = section->output_section->vma + section->output_offset;
5545 if (elf_section_data (section)->this_hdr.contents != NULL)
5546 contents = elf_section_data (section)->this_hdr.contents;
5547 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5548 return true;
5550 sec_data = elf32_arm_section_data (section);
5552 for (span = 0; span < sec_data->mapcount; span++)
5554 unsigned int span_start = sec_data->map[span].vma;
5555 unsigned int span_end = (span == sec_data->mapcount - 1)
5556 ? section->size : sec_data->map[span + 1].vma;
5557 unsigned int i;
5558 char span_type = sec_data->map[span].type;
5559 bool last_was_32bit = false, last_was_branch = false;
5561 if (span_type != 't')
5562 continue;
5564 /* Span is entirely within a single 4KB region: skip scanning. */
5565 if (((base_vma + span_start) & ~0xfff)
5566 == ((base_vma + span_end) & ~0xfff))
5567 continue;
5569 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5571 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5572 * The branch target is in the same 4KB region as the
5573 first half of the branch.
5574 * The instruction before the branch is a 32-bit
5575 length non-branch instruction. */
5576 for (i = span_start; i < span_end;)
5578 unsigned int insn = bfd_getl16 (&contents[i]);
5579 bool insn_32bit = false, is_blx = false, is_b = false;
5580 bool is_bl = false, is_bcc = false, is_32bit_branch;
5582 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5583 insn_32bit = true;
5585 if (insn_32bit)
5587 /* Load the rest of the insn (in manual-friendly order). */
5588 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5590 /* Encoding T4: B<c>.W. */
5591 is_b = (insn & 0xf800d000) == 0xf0009000;
5592 /* Encoding T1: BL<c>.W. */
5593 is_bl = (insn & 0xf800d000) == 0xf000d000;
5594 /* Encoding T2: BLX<c>.W. */
5595 is_blx = (insn & 0xf800d000) == 0xf000c000;
5596 /* Encoding T3: B<c>.W (not permitted in IT block). */
5597 is_bcc = (insn & 0xf800d000) == 0xf0008000
5598 && (insn & 0x07f00000) != 0x03800000;
5601 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5603 if (((base_vma + i) & 0xfff) == 0xffe
5604 && insn_32bit
5605 && is_32bit_branch
5606 && last_was_32bit
5607 && ! last_was_branch)
5609 bfd_signed_vma offset = 0;
5610 bool force_target_arm = false;
5611 bool force_target_thumb = false;
5612 bfd_vma target;
5613 enum elf32_arm_stub_type stub_type = arm_stub_none;
5614 struct a8_erratum_reloc key, *found;
5615 bool use_plt = false;
5617 key.from = base_vma + i;
5618 found = (struct a8_erratum_reloc *)
5619 bsearch (&key, a8_relocs, num_a8_relocs,
5620 sizeof (struct a8_erratum_reloc),
5621 &a8_reloc_compare);
5623 if (found)
5625 char *error_message = NULL;
5626 struct elf_link_hash_entry *entry;
5628 /* We don't care about the error returned from this
5629 function, only if there is glue or not. */
5630 entry = find_thumb_glue (info, found->sym_name,
5631 &error_message);
5633 if (entry)
5634 found->non_a8_stub = true;
5636 /* Keep a simpler condition, for the sake of clarity. */
5637 if (htab->root.splt != NULL && found->hash != NULL
5638 && found->hash->root.plt.offset != (bfd_vma) -1)
5639 use_plt = true;
5641 if (found->r_type == R_ARM_THM_CALL)
5643 if (found->branch_type == ST_BRANCH_TO_ARM
5644 || use_plt)
5645 force_target_arm = true;
5646 else
5647 force_target_thumb = true;
5651 /* Check if we have an offending branch instruction. */
5653 if (found && found->non_a8_stub)
5654 /* We've already made a stub for this instruction, e.g.
5655 it's a long branch or a Thumb->ARM stub. Assume that
5656 stub will suffice to work around the A8 erratum (see
5657 setting of always_after_branch above). */
5659 else if (is_bcc)
5661 offset = (insn & 0x7ff) << 1;
5662 offset |= (insn & 0x3f0000) >> 4;
5663 offset |= (insn & 0x2000) ? 0x40000 : 0;
5664 offset |= (insn & 0x800) ? 0x80000 : 0;
5665 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5666 if (offset & 0x100000)
5667 offset |= ~ ((bfd_signed_vma) 0xfffff);
5668 stub_type = arm_stub_a8_veneer_b_cond;
5670 else if (is_b || is_bl || is_blx)
5672 int s = (insn & 0x4000000) != 0;
5673 int j1 = (insn & 0x2000) != 0;
5674 int j2 = (insn & 0x800) != 0;
5675 int i1 = !(j1 ^ s);
5676 int i2 = !(j2 ^ s);
5678 offset = (insn & 0x7ff) << 1;
5679 offset |= (insn & 0x3ff0000) >> 4;
5680 offset |= i2 << 22;
5681 offset |= i1 << 23;
5682 offset |= s << 24;
5683 if (offset & 0x1000000)
5684 offset |= ~ ((bfd_signed_vma) 0xffffff);
5686 if (is_blx)
5687 offset &= ~ ((bfd_signed_vma) 3);
5689 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5690 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5693 if (stub_type != arm_stub_none)
5695 bfd_vma pc_for_insn = base_vma + i + 4;
5697 /* The original instruction is a BL, but the target is
5698 an ARM instruction. If we were not making a stub,
5699 the BL would have been converted to a BLX. Use the
5700 BLX stub instead in that case. */
5701 if (htab->use_blx && force_target_arm
5702 && stub_type == arm_stub_a8_veneer_bl)
5704 stub_type = arm_stub_a8_veneer_blx;
5705 is_blx = true;
5706 is_bl = false;
5708 /* Conversely, if the original instruction was
5709 BLX but the target is Thumb mode, use the BL
5710 stub. */
5711 else if (force_target_thumb
5712 && stub_type == arm_stub_a8_veneer_blx)
5714 stub_type = arm_stub_a8_veneer_bl;
5715 is_blx = false;
5716 is_bl = true;
5719 if (is_blx)
5720 pc_for_insn &= ~ ((bfd_vma) 3);
5722 /* If we found a relocation, use the proper destination,
5723 not the offset in the (unrelocated) instruction.
5724 Note this is always done if we switched the stub type
5725 above. */
5726 if (found)
5727 offset =
5728 (bfd_signed_vma) (found->destination - pc_for_insn);
5730 /* If the stub will use a Thumb-mode branch to a
5731 PLT target, redirect it to the preceding Thumb
5732 entry point. */
5733 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5734 offset -= PLT_THUMB_STUB_SIZE;
5736 target = pc_for_insn + offset;
5738 /* The BLX stub is ARM-mode code. Adjust the offset to
5739 take the different PC value (+8 instead of +4) into
5740 account. */
5741 if (stub_type == arm_stub_a8_veneer_blx)
5742 offset += 4;
5744 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5746 char *stub_name = NULL;
5748 if (num_a8_fixes == a8_fix_table_size)
5750 a8_fix_table_size *= 2;
5751 a8_fixes = (struct a8_erratum_fix *)
5752 bfd_realloc (a8_fixes,
5753 sizeof (struct a8_erratum_fix)
5754 * a8_fix_table_size);
5757 if (num_a8_fixes < prev_num_a8_fixes)
5759 /* If we're doing a subsequent scan,
5760 check if we've found the same fix as
5761 before, and try and reuse the stub
5762 name. */
5763 stub_name = a8_fixes[num_a8_fixes].stub_name;
5764 if ((a8_fixes[num_a8_fixes].section != section)
5765 || (a8_fixes[num_a8_fixes].offset != i))
5767 free (stub_name);
5768 stub_name = NULL;
5769 *stub_changed_p = true;
5773 if (!stub_name)
5775 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5776 if (stub_name != NULL)
5777 sprintf (stub_name, "%x:%x", section->id, i);
5780 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5781 a8_fixes[num_a8_fixes].section = section;
5782 a8_fixes[num_a8_fixes].offset = i;
5783 a8_fixes[num_a8_fixes].target_offset =
5784 target - base_vma;
5785 a8_fixes[num_a8_fixes].orig_insn = insn;
5786 a8_fixes[num_a8_fixes].stub_name = stub_name;
5787 a8_fixes[num_a8_fixes].stub_type = stub_type;
5788 a8_fixes[num_a8_fixes].branch_type =
5789 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5791 num_a8_fixes++;
5796 i += insn_32bit ? 4 : 2;
5797 last_was_32bit = insn_32bit;
5798 last_was_branch = is_32bit_branch;
5802 if (elf_section_data (section)->this_hdr.contents == NULL)
5803 free (contents);
5806 *a8_fixes_p = a8_fixes;
5807 *num_a8_fixes_p = num_a8_fixes;
5808 *a8_fix_table_size_p = a8_fix_table_size;
5810 return false;
5813 /* Create or update a stub entry depending on whether the stub can already be
5814 found in HTAB. The stub is identified by:
5815 - its type STUB_TYPE
5816 - its source branch (note that several can share the same stub) whose
5817 section and relocation (if any) are given by SECTION and IRELA
5818 respectively
5819 - its target symbol whose input section, hash, name, value and branch type
5820 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5821 respectively
5823 If found, the value of the stub's target symbol is updated from SYM_VALUE
5824 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5825 TRUE and the stub entry is initialized.
5827 Returns the stub that was created or updated, or NULL if an error
5828 occurred. */
5830 static struct elf32_arm_stub_hash_entry *
5831 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5832 enum elf32_arm_stub_type stub_type, asection *section,
5833 Elf_Internal_Rela *irela, asection *sym_sec,
5834 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5835 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5836 bool *new_stub)
5838 const asection *id_sec;
5839 char *stub_name;
5840 struct elf32_arm_stub_hash_entry *stub_entry;
5841 unsigned int r_type;
5842 bool sym_claimed = arm_stub_sym_claimed (stub_type);
5844 BFD_ASSERT (stub_type != arm_stub_none);
5845 *new_stub = false;
5847 if (sym_claimed)
5848 stub_name = sym_name;
5849 else
5851 BFD_ASSERT (irela);
5852 BFD_ASSERT (section);
5853 BFD_ASSERT (section->id <= htab->top_id);
5855 /* Support for grouping stub sections. */
5856 id_sec = htab->stub_group[section->id].link_sec;
5858 /* Get the name of this stub. */
5859 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5860 stub_type);
5861 if (!stub_name)
5862 return NULL;
5865 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5866 false);
5867 /* The proper stub has already been created, just update its value. */
5868 if (stub_entry != NULL)
5870 if (!sym_claimed)
5871 free (stub_name);
5872 stub_entry->target_value = sym_value;
5873 return stub_entry;
5876 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5877 if (stub_entry == NULL)
5879 if (!sym_claimed)
5880 free (stub_name);
5881 return NULL;
5884 stub_entry->target_value = sym_value;
5885 stub_entry->target_section = sym_sec;
5886 stub_entry->stub_type = stub_type;
5887 stub_entry->h = hash;
5888 stub_entry->branch_type = branch_type;
5890 if (sym_claimed)
5891 stub_entry->output_name = sym_name;
5892 else
5894 if (sym_name == NULL)
5895 sym_name = "unnamed";
5896 stub_entry->output_name = (char *)
5897 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5898 + strlen (sym_name));
5899 if (stub_entry->output_name == NULL)
5901 free (stub_name);
5902 return NULL;
5905 /* For historical reasons, use the existing names for ARM-to-Thumb and
5906 Thumb-to-ARM stubs. */
5907 r_type = ELF32_R_TYPE (irela->r_info);
5908 if ((r_type == (unsigned int) R_ARM_THM_CALL
5909 || r_type == (unsigned int) R_ARM_THM_JUMP24
5910 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5911 && branch_type == ST_BRANCH_TO_ARM)
5912 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5913 else if ((r_type == (unsigned int) R_ARM_CALL
5914 || r_type == (unsigned int) R_ARM_JUMP24)
5915 && branch_type == ST_BRANCH_TO_THUMB)
5916 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5917 else
5918 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5921 *new_stub = true;
5922 return stub_entry;
5925 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5926 gateway veneer to transition from non secure to secure state and create them
5927 accordingly.
5929 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5930 defines the conditions that govern Secure Gateway veneer creation for a
5931 given symbol <SYM> as follows:
5932 - it has function type
5933 - it has non local binding
5934 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5935 same type, binding and value as <SYM> (called normal symbol).
5936 An entry function can handle secure state transition itself in which case
5937 its special symbol would have a different value from the normal symbol.
5939 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5940 entry mapping while HTAB gives the name to hash entry mapping.
5941 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5942 created.
5944 The return value gives whether a stub failed to be allocated. */
5946 static bool
5947 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5948 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5949 int *cmse_stub_created)
5951 const struct elf_backend_data *bed;
5952 Elf_Internal_Shdr *symtab_hdr;
5953 unsigned i, j, sym_count, ext_start;
5954 Elf_Internal_Sym *cmse_sym, *local_syms;
5955 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5956 enum arm_st_branch_type branch_type;
5957 char *sym_name, *lsym_name;
5958 bfd_vma sym_value;
5959 asection *section;
5960 struct elf32_arm_stub_hash_entry *stub_entry;
5961 bool is_v8m, new_stub, cmse_invalid, ret = true;
5963 bed = get_elf_backend_data (input_bfd);
5964 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5965 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5966 ext_start = symtab_hdr->sh_info;
5967 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5968 && out_attr[Tag_CPU_arch_profile].i == 'M');
5970 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5971 if (local_syms == NULL)
5972 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5973 symtab_hdr->sh_info, 0, NULL, NULL,
5974 NULL);
5975 if (symtab_hdr->sh_info && local_syms == NULL)
5976 return false;
5978 /* Scan symbols. */
5979 for (i = 0; i < sym_count; i++)
5981 cmse_invalid = false;
5983 if (i < ext_start)
5985 cmse_sym = &local_syms[i];
5986 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5987 symtab_hdr->sh_link,
5988 cmse_sym->st_name);
5989 if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
5990 continue;
5992 /* Special symbol with local binding. */
5993 cmse_invalid = true;
5995 else
5997 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5998 sym_name = (char *) cmse_hash->root.root.root.string;
5999 if (!startswith (sym_name, CMSE_PREFIX))
6000 continue;
6002 /* Special symbol has incorrect binding or type. */
6003 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6004 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6005 || cmse_hash->root.type != STT_FUNC)
6006 cmse_invalid = true;
6009 if (!is_v8m)
6011 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6012 "ARMv8-M architecture or later"),
6013 input_bfd, sym_name);
6014 is_v8m = true; /* Avoid multiple warning. */
6015 ret = false;
6018 if (cmse_invalid)
6020 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6021 " a global or weak function symbol"),
6022 input_bfd, sym_name);
6023 ret = false;
6024 if (i < ext_start)
6025 continue;
6028 sym_name += strlen (CMSE_PREFIX);
6029 hash = (struct elf32_arm_link_hash_entry *)
6030 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6032 /* No associated normal symbol or it is neither global nor weak. */
6033 if (!hash
6034 || (hash->root.root.type != bfd_link_hash_defined
6035 && hash->root.root.type != bfd_link_hash_defweak)
6036 || hash->root.type != STT_FUNC)
6038 /* Initialize here to avoid warning about use of possibly
6039 uninitialized variable. */
6040 j = 0;
6042 if (!hash)
6044 /* Searching for a normal symbol with local binding. */
6045 for (; j < ext_start; j++)
6047 lsym_name =
6048 bfd_elf_string_from_elf_section (input_bfd,
6049 symtab_hdr->sh_link,
6050 local_syms[j].st_name);
6051 if (!strcmp (sym_name, lsym_name))
6052 break;
6056 if (hash || j < ext_start)
6058 _bfd_error_handler
6059 (_("%pB: invalid standard symbol `%s'; it must be "
6060 "a global or weak function symbol"),
6061 input_bfd, sym_name);
6063 else
6064 _bfd_error_handler
6065 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6066 ret = false;
6067 if (!hash)
6068 continue;
6071 sym_value = hash->root.root.u.def.value;
6072 section = hash->root.root.u.def.section;
6074 if (cmse_hash->root.root.u.def.section != section)
6076 _bfd_error_handler
6077 (_("%pB: `%s' and its special symbol are in different sections"),
6078 input_bfd, sym_name);
6079 ret = false;
6081 if (cmse_hash->root.root.u.def.value != sym_value)
6082 continue; /* Ignore: could be an entry function starting with SG. */
6084 /* If this section is a link-once section that will be discarded, then
6085 don't create any stubs. */
6086 if (section->output_section == NULL)
6088 _bfd_error_handler
6089 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6090 continue;
6093 if (hash->root.size == 0)
6095 _bfd_error_handler
6096 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6097 ret = false;
6100 if (!ret)
6101 continue;
6102 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6103 stub_entry
6104 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6105 NULL, NULL, section, hash, sym_name,
6106 sym_value, branch_type, &new_stub);
6108 if (stub_entry == NULL)
6109 ret = false;
6110 else
6112 BFD_ASSERT (new_stub);
6113 (*cmse_stub_created)++;
6117 if (!symtab_hdr->contents)
6118 free (local_syms);
6119 return ret;
6122 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6123 code entry function, ie can be called from non secure code without using a
6124 veneer. */
6126 static bool
6127 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6129 bfd_byte contents[4];
6130 uint32_t first_insn;
6131 asection *section;
6132 file_ptr offset;
6133 bfd *abfd;
6135 /* Defined symbol of function type. */
6136 if (hash->root.root.type != bfd_link_hash_defined
6137 && hash->root.root.type != bfd_link_hash_defweak)
6138 return false;
6139 if (hash->root.type != STT_FUNC)
6140 return false;
6142 /* Read first instruction. */
6143 section = hash->root.root.u.def.section;
6144 abfd = section->owner;
6145 offset = hash->root.root.u.def.value - section->vma;
6146 if (!bfd_get_section_contents (abfd, section, contents, offset,
6147 sizeof (contents)))
6148 return false;
6150 first_insn = bfd_get_32 (abfd, contents);
6152 /* Starts by SG instruction. */
6153 return first_insn == 0xe97fe97f;
6156 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6157 secure gateway veneers (ie. the veneers was not in the input import library)
6158 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6160 static bool
6161 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6163 struct elf32_arm_stub_hash_entry *stub_entry;
6164 struct bfd_link_info *info;
6166 /* Massage our args to the form they really have. */
6167 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6168 info = (struct bfd_link_info *) gen_info;
6170 if (info->out_implib_bfd)
6171 return true;
6173 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6174 return true;
6176 if (stub_entry->stub_offset == (bfd_vma) -1)
6177 _bfd_error_handler (" %s", stub_entry->output_name);
6179 return true;
6182 /* Set offset of each secure gateway veneers so that its address remain
6183 identical to the one in the input import library referred by
6184 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6185 (present in input import library but absent from the executable being
6186 linked) or if new veneers appeared and there is no output import library
6187 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6188 number of secure gateway veneers found in the input import library.
6190 The function returns whether an error occurred. If no error occurred,
6191 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6192 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6193 veneer observed set for new veneers to be layed out after. */
6195 static bool
6196 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6197 struct elf32_arm_link_hash_table *htab,
6198 int *cmse_stub_created)
6200 long symsize;
6201 char *sym_name;
6202 flagword flags;
6203 long i, symcount;
6204 bfd *in_implib_bfd;
6205 asection *stub_out_sec;
6206 bool ret = true;
6207 Elf_Internal_Sym *intsym;
6208 const char *out_sec_name;
6209 bfd_size_type cmse_stub_size;
6210 asymbol **sympp = NULL, *sym;
6211 struct elf32_arm_link_hash_entry *hash;
6212 const insn_sequence *cmse_stub_template;
6213 struct elf32_arm_stub_hash_entry *stub_entry;
6214 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6215 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6216 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6218 /* No input secure gateway import library. */
6219 if (!htab->in_implib_bfd)
6220 return true;
6222 in_implib_bfd = htab->in_implib_bfd;
6223 if (!htab->cmse_implib)
6225 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6226 "Gateway import libraries"), in_implib_bfd);
6227 return false;
6230 /* Get symbol table size. */
6231 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6232 if (symsize < 0)
6233 return false;
6235 /* Read in the input secure gateway import library's symbol table. */
6236 sympp = (asymbol **) bfd_malloc (symsize);
6237 if (sympp == NULL)
6238 return false;
6240 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6241 if (symcount < 0)
6243 ret = false;
6244 goto free_sym_buf;
6247 htab->new_cmse_stub_offset = 0;
6248 cmse_stub_size =
6249 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6250 &cmse_stub_template,
6251 &cmse_stub_template_size);
6252 out_sec_name =
6253 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6254 stub_out_sec =
6255 bfd_get_section_by_name (htab->obfd, out_sec_name);
6256 if (stub_out_sec != NULL)
6257 cmse_stub_sec_vma = stub_out_sec->vma;
6259 /* Set addresses of veneers mentionned in input secure gateway import
6260 library's symbol table. */
6261 for (i = 0; i < symcount; i++)
6263 sym = sympp[i];
6264 flags = sym->flags;
6265 sym_name = (char *) bfd_asymbol_name (sym);
6266 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6268 if (sym->section != bfd_abs_section_ptr
6269 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6270 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6271 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6272 != ST_BRANCH_TO_THUMB))
6274 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6275 "symbol should be absolute, global and "
6276 "refer to Thumb functions"),
6277 in_implib_bfd, sym_name);
6278 ret = false;
6279 continue;
6282 veneer_value = bfd_asymbol_value (sym);
6283 stub_offset = veneer_value - cmse_stub_sec_vma;
6284 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6285 false, false);
6286 hash = (struct elf32_arm_link_hash_entry *)
6287 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6289 /* Stub entry should have been created by cmse_scan or the symbol be of
6290 a secure function callable from non secure code. */
6291 if (!stub_entry && !hash)
6293 bool new_stub;
6295 _bfd_error_handler
6296 (_("entry function `%s' disappeared from secure code"), sym_name);
6297 hash = (struct elf32_arm_link_hash_entry *)
6298 elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6299 stub_entry
6300 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6301 NULL, NULL, bfd_abs_section_ptr, hash,
6302 sym_name, veneer_value,
6303 ST_BRANCH_TO_THUMB, &new_stub);
6304 if (stub_entry == NULL)
6305 ret = false;
6306 else
6308 BFD_ASSERT (new_stub);
6309 new_cmse_stubs_created++;
6310 (*cmse_stub_created)++;
6312 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6313 stub_entry->stub_offset = stub_offset;
6315 /* Symbol found is not callable from non secure code. */
6316 else if (!stub_entry)
6318 if (!cmse_entry_fct_p (hash))
6320 _bfd_error_handler (_("`%s' refers to a non entry function"),
6321 sym_name);
6322 ret = false;
6324 continue;
6326 else
6328 /* Only stubs for SG veneers should have been created. */
6329 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6331 /* Check visibility hasn't changed. */
6332 if (!!(flags & BSF_GLOBAL)
6333 != (hash->root.root.type == bfd_link_hash_defined))
6334 _bfd_error_handler
6335 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6336 sym_name);
6338 stub_entry->stub_offset = stub_offset;
6341 /* Size should match that of a SG veneer. */
6342 if (intsym->st_size != cmse_stub_size)
6344 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6345 in_implib_bfd, sym_name);
6346 ret = false;
6349 /* Previous veneer address is before current SG veneer section. */
6350 if (veneer_value < cmse_stub_sec_vma)
6352 /* Avoid offset underflow. */
6353 if (stub_entry)
6354 stub_entry->stub_offset = 0;
6355 stub_offset = 0;
6356 ret = false;
6359 /* Complain if stub offset not a multiple of stub size. */
6360 if (stub_offset % cmse_stub_size)
6362 _bfd_error_handler
6363 (_("offset of veneer for entry function `%s' not a multiple of "
6364 "its size"), sym_name);
6365 ret = false;
6368 if (!ret)
6369 continue;
6371 new_cmse_stubs_created--;
6372 if (veneer_value < cmse_stub_array_start)
6373 cmse_stub_array_start = veneer_value;
6374 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6375 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6376 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6379 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6381 BFD_ASSERT (new_cmse_stubs_created > 0);
6382 _bfd_error_handler
6383 (_("new entry function(s) introduced but no output import library "
6384 "specified:"));
6385 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6388 if (cmse_stub_array_start != cmse_stub_sec_vma)
6390 _bfd_error_handler
6391 (_("start address of `%s' is different from previous link"),
6392 out_sec_name);
6393 ret = false;
6396 free_sym_buf:
6397 free (sympp);
6398 return ret;
6401 /* Determine and set the size of the stub section for a final link.
6403 The basic idea here is to examine all the relocations looking for
6404 PC-relative calls to a target that is unreachable with a "bl"
6405 instruction. */
6407 bool
6408 elf32_arm_size_stubs (bfd *output_bfd,
6409 bfd *stub_bfd,
6410 struct bfd_link_info *info,
6411 bfd_signed_vma group_size,
6412 asection * (*add_stub_section) (const char *, asection *,
6413 asection *,
6414 unsigned int),
6415 void (*layout_sections_again) (void))
6417 bool ret = true;
6418 obj_attribute *out_attr;
6419 int cmse_stub_created = 0;
6420 bfd_size_type stub_group_size;
6421 bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6422 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6423 struct a8_erratum_fix *a8_fixes = NULL;
6424 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6425 struct a8_erratum_reloc *a8_relocs = NULL;
6426 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6428 if (htab == NULL)
6429 return false;
6431 if (htab->fix_cortex_a8)
6433 a8_fixes = (struct a8_erratum_fix *)
6434 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6435 a8_relocs = (struct a8_erratum_reloc *)
6436 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6439 /* Propagate mach to stub bfd, because it may not have been
6440 finalized when we created stub_bfd. */
6441 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6442 bfd_get_mach (output_bfd));
6444 /* Stash our params away. */
6445 htab->stub_bfd = stub_bfd;
6446 htab->add_stub_section = add_stub_section;
6447 htab->layout_sections_again = layout_sections_again;
6448 stubs_always_after_branch = group_size < 0;
6450 out_attr = elf_known_obj_attributes_proc (output_bfd);
6451 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6453 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6454 as the first half of a 32-bit branch straddling two 4K pages. This is a
6455 crude way of enforcing that. */
6456 if (htab->fix_cortex_a8)
6457 stubs_always_after_branch = 1;
6459 if (group_size < 0)
6460 stub_group_size = -group_size;
6461 else
6462 stub_group_size = group_size;
6464 if (stub_group_size == 1)
6466 /* Default values. */
6467 /* Thumb branch range is +-4MB has to be used as the default
6468 maximum size (a given section can contain both ARM and Thumb
6469 code, so the worst case has to be taken into account).
6471 This value is 24K less than that, which allows for 2025
6472 12-byte stubs. If we exceed that, then we will fail to link.
6473 The user will have to relink with an explicit group size
6474 option. */
6475 stub_group_size = 4170000;
6478 group_sections (htab, stub_group_size, stubs_always_after_branch);
6480 /* If we're applying the cortex A8 fix, we need to determine the
6481 program header size now, because we cannot change it later --
6482 that could alter section placements. Notice the A8 erratum fix
6483 ends up requiring the section addresses to remain unchanged
6484 modulo the page size. That's something we cannot represent
6485 inside BFD, and we don't want to force the section alignment to
6486 be the page size. */
6487 if (htab->fix_cortex_a8)
6488 (*htab->layout_sections_again) ();
6490 while (1)
6492 bfd *input_bfd;
6493 unsigned int bfd_indx;
6494 asection *stub_sec;
6495 enum elf32_arm_stub_type stub_type;
6496 bool stub_changed = false;
6497 unsigned prev_num_a8_fixes = num_a8_fixes;
6499 num_a8_fixes = 0;
6500 for (input_bfd = info->input_bfds, bfd_indx = 0;
6501 input_bfd != NULL;
6502 input_bfd = input_bfd->link.next, bfd_indx++)
6504 Elf_Internal_Shdr *symtab_hdr;
6505 asection *section;
6506 Elf_Internal_Sym *local_syms = NULL;
6508 if (!is_arm_elf (input_bfd))
6509 continue;
6510 if ((input_bfd->flags & DYNAMIC) != 0
6511 && (elf_sym_hashes (input_bfd) == NULL
6512 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6513 continue;
6515 num_a8_relocs = 0;
6517 /* We'll need the symbol table in a second. */
6518 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6519 if (symtab_hdr->sh_info == 0)
6520 continue;
6522 /* Limit scan of symbols to object file whose profile is
6523 Microcontroller to not hinder performance in the general case. */
6524 if (m_profile && first_veneer_scan)
6526 struct elf_link_hash_entry **sym_hashes;
6528 sym_hashes = elf_sym_hashes (input_bfd);
6529 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6530 &cmse_stub_created))
6531 goto error_ret_free_local;
6533 if (cmse_stub_created != 0)
6534 stub_changed = true;
6537 /* Walk over each section attached to the input bfd. */
6538 for (section = input_bfd->sections;
6539 section != NULL;
6540 section = section->next)
6542 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6544 /* If there aren't any relocs, then there's nothing more
6545 to do. */
6546 if ((section->flags & SEC_RELOC) == 0
6547 || section->reloc_count == 0
6548 || (section->flags & SEC_CODE) == 0)
6549 continue;
6551 /* If this section is a link-once section that will be
6552 discarded, then don't create any stubs. */
6553 if (section->output_section == NULL
6554 || section->output_section->owner != output_bfd)
6555 continue;
6557 /* Get the relocs. */
6558 internal_relocs
6559 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6560 NULL, info->keep_memory);
6561 if (internal_relocs == NULL)
6562 goto error_ret_free_local;
6564 /* Now examine each relocation. */
6565 irela = internal_relocs;
6566 irelaend = irela + section->reloc_count;
6567 for (; irela < irelaend; irela++)
6569 unsigned int r_type, r_indx;
6570 asection *sym_sec;
6571 bfd_vma sym_value;
6572 bfd_vma destination;
6573 struct elf32_arm_link_hash_entry *hash;
6574 const char *sym_name;
6575 unsigned char st_type;
6576 enum arm_st_branch_type branch_type;
6577 bool created_stub = false;
6579 r_type = ELF32_R_TYPE (irela->r_info);
6580 r_indx = ELF32_R_SYM (irela->r_info);
6582 if (r_type >= (unsigned int) R_ARM_max)
6584 bfd_set_error (bfd_error_bad_value);
6585 error_ret_free_internal:
6586 if (elf_section_data (section)->relocs == NULL)
6587 free (internal_relocs);
6588 /* Fall through. */
6589 error_ret_free_local:
6590 if (symtab_hdr->contents != (unsigned char *) local_syms)
6591 free (local_syms);
6592 return false;
6595 hash = NULL;
6596 if (r_indx >= symtab_hdr->sh_info)
6597 hash = elf32_arm_hash_entry
6598 (elf_sym_hashes (input_bfd)
6599 [r_indx - symtab_hdr->sh_info]);
6601 /* Only look for stubs on branch instructions, or
6602 non-relaxed TLSCALL */
6603 if ((r_type != (unsigned int) R_ARM_CALL)
6604 && (r_type != (unsigned int) R_ARM_THM_CALL)
6605 && (r_type != (unsigned int) R_ARM_JUMP24)
6606 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6607 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6608 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6609 && (r_type != (unsigned int) R_ARM_PLT32)
6610 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6611 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6612 && r_type == (elf32_arm_tls_transition
6613 (info, r_type,
6614 (struct elf_link_hash_entry *) hash))
6615 && ((hash ? hash->tls_type
6616 : (elf32_arm_local_got_tls_type
6617 (input_bfd)[r_indx]))
6618 & GOT_TLS_GDESC) != 0))
6619 continue;
6621 /* Now determine the call target, its name, value,
6622 section. */
6623 sym_sec = NULL;
6624 sym_value = 0;
6625 destination = 0;
6626 sym_name = NULL;
6628 if (r_type == (unsigned int) R_ARM_TLS_CALL
6629 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6631 /* A non-relaxed TLS call. The target is the
6632 plt-resident trampoline and nothing to do
6633 with the symbol. */
6634 BFD_ASSERT (htab->tls_trampoline > 0);
6635 sym_sec = htab->root.splt;
6636 sym_value = htab->tls_trampoline;
6637 hash = 0;
6638 st_type = STT_FUNC;
6639 branch_type = ST_BRANCH_TO_ARM;
6641 else if (!hash)
6643 /* It's a local symbol. */
6644 Elf_Internal_Sym *sym;
6646 if (local_syms == NULL)
6648 local_syms
6649 = (Elf_Internal_Sym *) symtab_hdr->contents;
6650 if (local_syms == NULL)
6651 local_syms
6652 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6653 symtab_hdr->sh_info, 0,
6654 NULL, NULL, NULL);
6655 if (local_syms == NULL)
6656 goto error_ret_free_internal;
6659 sym = local_syms + r_indx;
6660 if (sym->st_shndx == SHN_UNDEF)
6661 sym_sec = bfd_und_section_ptr;
6662 else if (sym->st_shndx == SHN_ABS)
6663 sym_sec = bfd_abs_section_ptr;
6664 else if (sym->st_shndx == SHN_COMMON)
6665 sym_sec = bfd_com_section_ptr;
6666 else
6667 sym_sec =
6668 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6670 if (!sym_sec)
6671 /* This is an undefined symbol. It can never
6672 be resolved. */
6673 continue;
6675 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6676 sym_value = sym->st_value;
6677 destination = (sym_value + irela->r_addend
6678 + sym_sec->output_offset
6679 + sym_sec->output_section->vma);
6680 st_type = ELF_ST_TYPE (sym->st_info);
6681 branch_type =
6682 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6683 sym_name
6684 = bfd_elf_string_from_elf_section (input_bfd,
6685 symtab_hdr->sh_link,
6686 sym->st_name);
6688 else
6690 /* It's an external symbol. */
6691 while (hash->root.root.type == bfd_link_hash_indirect
6692 || hash->root.root.type == bfd_link_hash_warning)
6693 hash = ((struct elf32_arm_link_hash_entry *)
6694 hash->root.root.u.i.link);
6696 if (hash->root.root.type == bfd_link_hash_defined
6697 || hash->root.root.type == bfd_link_hash_defweak)
6699 sym_sec = hash->root.root.u.def.section;
6700 sym_value = hash->root.root.u.def.value;
6702 struct elf32_arm_link_hash_table *globals =
6703 elf32_arm_hash_table (info);
6705 /* For a destination in a shared library,
6706 use the PLT stub as target address to
6707 decide whether a branch stub is
6708 needed. */
6709 if (globals != NULL
6710 && globals->root.splt != NULL
6711 && hash != NULL
6712 && hash->root.plt.offset != (bfd_vma) -1)
6714 sym_sec = globals->root.splt;
6715 sym_value = hash->root.plt.offset;
6716 if (sym_sec->output_section != NULL)
6717 destination = (sym_value
6718 + sym_sec->output_offset
6719 + sym_sec->output_section->vma);
6721 else if (sym_sec->output_section != NULL)
6722 destination = (sym_value + irela->r_addend
6723 + sym_sec->output_offset
6724 + sym_sec->output_section->vma);
6726 else if ((hash->root.root.type == bfd_link_hash_undefined)
6727 || (hash->root.root.type == bfd_link_hash_undefweak))
6729 /* For a shared library, use the PLT stub as
6730 target address to decide whether a long
6731 branch stub is needed.
6732 For absolute code, they cannot be handled. */
6733 struct elf32_arm_link_hash_table *globals =
6734 elf32_arm_hash_table (info);
6736 if (globals != NULL
6737 && globals->root.splt != NULL
6738 && hash != NULL
6739 && hash->root.plt.offset != (bfd_vma) -1)
6741 sym_sec = globals->root.splt;
6742 sym_value = hash->root.plt.offset;
6743 if (sym_sec->output_section != NULL)
6744 destination = (sym_value
6745 + sym_sec->output_offset
6746 + sym_sec->output_section->vma);
6748 else
6749 continue;
6751 else
6753 bfd_set_error (bfd_error_bad_value);
6754 goto error_ret_free_internal;
6756 st_type = hash->root.type;
6757 branch_type =
6758 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6759 sym_name = hash->root.root.root.string;
6764 bool new_stub;
6765 struct elf32_arm_stub_hash_entry *stub_entry;
6767 /* Determine what (if any) linker stub is needed. */
6768 stub_type = arm_type_of_stub (info, section, irela,
6769 st_type, &branch_type,
6770 hash, destination, sym_sec,
6771 input_bfd, sym_name);
6772 if (stub_type == arm_stub_none)
6773 break;
6775 /* We've either created a stub for this reloc already,
6776 or we are about to. */
6777 stub_entry =
6778 elf32_arm_create_stub (htab, stub_type, section, irela,
6779 sym_sec, hash,
6780 (char *) sym_name, sym_value,
6781 branch_type, &new_stub);
6783 created_stub = stub_entry != NULL;
6784 if (!created_stub)
6785 goto error_ret_free_internal;
6786 else if (!new_stub)
6787 break;
6788 else
6789 stub_changed = true;
6791 while (0);
6793 /* Look for relocations which might trigger Cortex-A8
6794 erratum. */
6795 if (htab->fix_cortex_a8
6796 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6797 || r_type == (unsigned int) R_ARM_THM_JUMP19
6798 || r_type == (unsigned int) R_ARM_THM_CALL
6799 || r_type == (unsigned int) R_ARM_THM_XPC22))
6801 bfd_vma from = section->output_section->vma
6802 + section->output_offset
6803 + irela->r_offset;
6805 if ((from & 0xfff) == 0xffe)
6807 /* Found a candidate. Note we haven't checked the
6808 destination is within 4K here: if we do so (and
6809 don't create an entry in a8_relocs) we can't tell
6810 that a branch should have been relocated when
6811 scanning later. */
6812 if (num_a8_relocs == a8_reloc_table_size)
6814 a8_reloc_table_size *= 2;
6815 a8_relocs = (struct a8_erratum_reloc *)
6816 bfd_realloc (a8_relocs,
6817 sizeof (struct a8_erratum_reloc)
6818 * a8_reloc_table_size);
6821 a8_relocs[num_a8_relocs].from = from;
6822 a8_relocs[num_a8_relocs].destination = destination;
6823 a8_relocs[num_a8_relocs].r_type = r_type;
6824 a8_relocs[num_a8_relocs].branch_type = branch_type;
6825 a8_relocs[num_a8_relocs].sym_name = sym_name;
6826 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6827 a8_relocs[num_a8_relocs].hash = hash;
6829 num_a8_relocs++;
6834 /* We're done with the internal relocs, free them. */
6835 if (elf_section_data (section)->relocs == NULL)
6836 free (internal_relocs);
6839 if (htab->fix_cortex_a8)
6841 /* Sort relocs which might apply to Cortex-A8 erratum. */
6842 qsort (a8_relocs, num_a8_relocs,
6843 sizeof (struct a8_erratum_reloc),
6844 &a8_reloc_compare);
6846 /* Scan for branches which might trigger Cortex-A8 erratum. */
6847 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6848 &num_a8_fixes, &a8_fix_table_size,
6849 a8_relocs, num_a8_relocs,
6850 prev_num_a8_fixes, &stub_changed)
6851 != 0)
6852 goto error_ret_free_local;
6855 if (local_syms != NULL
6856 && symtab_hdr->contents != (unsigned char *) local_syms)
6858 if (!info->keep_memory)
6859 free (local_syms);
6860 else
6861 symtab_hdr->contents = (unsigned char *) local_syms;
6865 if (first_veneer_scan
6866 && !set_cmse_veneer_addr_from_implib (info, htab,
6867 &cmse_stub_created))
6868 ret = false;
6870 if (prev_num_a8_fixes != num_a8_fixes)
6871 stub_changed = true;
6873 if (!stub_changed)
6874 break;
6876 /* OK, we've added some stubs. Find out the new size of the
6877 stub sections. */
6878 for (stub_sec = htab->stub_bfd->sections;
6879 stub_sec != NULL;
6880 stub_sec = stub_sec->next)
6882 /* Ignore non-stub sections. */
6883 if (!strstr (stub_sec->name, STUB_SUFFIX))
6884 continue;
6886 stub_sec->size = 0;
6889 /* Add new SG veneers after those already in the input import
6890 library. */
6891 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6892 stub_type++)
6894 bfd_vma *start_offset_p;
6895 asection **stub_sec_p;
6897 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6898 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6899 if (start_offset_p == NULL)
6900 continue;
6902 BFD_ASSERT (stub_sec_p != NULL);
6903 if (*stub_sec_p != NULL)
6904 (*stub_sec_p)->size = *start_offset_p;
6907 /* Compute stub section size, considering padding. */
6908 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6909 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6910 stub_type++)
6912 int size, padding;
6913 asection **stub_sec_p;
6915 padding = arm_dedicated_stub_section_padding (stub_type);
6916 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6917 /* Skip if no stub input section or no stub section padding
6918 required. */
6919 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6920 continue;
6921 /* Stub section padding required but no dedicated section. */
6922 BFD_ASSERT (stub_sec_p);
6924 size = (*stub_sec_p)->size;
6925 size = (size + padding - 1) & ~(padding - 1);
6926 (*stub_sec_p)->size = size;
6929 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6930 if (htab->fix_cortex_a8)
6931 for (i = 0; i < num_a8_fixes; i++)
6933 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6934 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6936 if (stub_sec == NULL)
6937 return false;
6939 stub_sec->size
6940 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6941 NULL);
6945 /* Ask the linker to do its stuff. */
6946 (*htab->layout_sections_again) ();
6947 first_veneer_scan = false;
6950 /* Add stubs for Cortex-A8 erratum fixes now. */
6951 if (htab->fix_cortex_a8)
6953 for (i = 0; i < num_a8_fixes; i++)
6955 struct elf32_arm_stub_hash_entry *stub_entry;
6956 char *stub_name = a8_fixes[i].stub_name;
6957 asection *section = a8_fixes[i].section;
6958 unsigned int section_id = a8_fixes[i].section->id;
6959 asection *link_sec = htab->stub_group[section_id].link_sec;
6960 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6961 const insn_sequence *template_sequence;
6962 int template_size, size = 0;
6964 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6965 true, false);
6966 if (stub_entry == NULL)
6968 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6969 section->owner, stub_name);
6970 return false;
6973 stub_entry->stub_sec = stub_sec;
6974 stub_entry->stub_offset = (bfd_vma) -1;
6975 stub_entry->id_sec = link_sec;
6976 stub_entry->stub_type = a8_fixes[i].stub_type;
6977 stub_entry->source_value = a8_fixes[i].offset;
6978 stub_entry->target_section = a8_fixes[i].section;
6979 stub_entry->target_value = a8_fixes[i].target_offset;
6980 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6981 stub_entry->branch_type = a8_fixes[i].branch_type;
6983 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6984 &template_sequence,
6985 &template_size);
6987 stub_entry->stub_size = size;
6988 stub_entry->stub_template = template_sequence;
6989 stub_entry->stub_template_size = template_size;
6992 /* Stash the Cortex-A8 erratum fix array for use later in
6993 elf32_arm_write_section(). */
6994 htab->a8_erratum_fixes = a8_fixes;
6995 htab->num_a8_erratum_fixes = num_a8_fixes;
6997 else
6999 htab->a8_erratum_fixes = NULL;
7000 htab->num_a8_erratum_fixes = 0;
7002 return ret;
7005 /* Build all the stubs associated with the current output file. The
7006 stubs are kept in a hash table attached to the main linker hash
7007 table. We also set up the .plt entries for statically linked PIC
7008 functions here. This function is called via arm_elf_finish in the
7009 linker. */
7011 bool
7012 elf32_arm_build_stubs (struct bfd_link_info *info)
7014 asection *stub_sec;
7015 struct bfd_hash_table *table;
7016 enum elf32_arm_stub_type stub_type;
7017 struct elf32_arm_link_hash_table *htab;
7019 htab = elf32_arm_hash_table (info);
7020 if (htab == NULL)
7021 return false;
7023 for (stub_sec = htab->stub_bfd->sections;
7024 stub_sec != NULL;
7025 stub_sec = stub_sec->next)
7027 bfd_size_type size;
7029 /* Ignore non-stub sections. */
7030 if (!strstr (stub_sec->name, STUB_SUFFIX))
7031 continue;
7033 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7034 must at least be done for stub section requiring padding and for SG
7035 veneers to ensure that a non secure code branching to a removed SG
7036 veneer causes an error. */
7037 size = stub_sec->size;
7038 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7039 if (stub_sec->contents == NULL && size != 0)
7040 return false;
7042 stub_sec->size = 0;
7045 /* Add new SG veneers after those already in the input import library. */
7046 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7048 bfd_vma *start_offset_p;
7049 asection **stub_sec_p;
7051 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7052 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7053 if (start_offset_p == NULL)
7054 continue;
7056 BFD_ASSERT (stub_sec_p != NULL);
7057 if (*stub_sec_p != NULL)
7058 (*stub_sec_p)->size = *start_offset_p;
7061 /* Build the stubs as directed by the stub hash table. */
7062 table = &htab->stub_hash_table;
7063 bfd_hash_traverse (table, arm_build_one_stub, info);
7064 if (htab->fix_cortex_a8)
7066 /* Place the cortex a8 stubs last. */
7067 htab->fix_cortex_a8 = -1;
7068 bfd_hash_traverse (table, arm_build_one_stub, info);
7071 return true;
7074 /* Locate the Thumb encoded calling stub for NAME. */
7076 static struct elf_link_hash_entry *
7077 find_thumb_glue (struct bfd_link_info *link_info,
7078 const char *name,
7079 char **error_message)
7081 char *tmp_name;
7082 struct elf_link_hash_entry *hash;
7083 struct elf32_arm_link_hash_table *hash_table;
7085 /* We need a pointer to the armelf specific hash table. */
7086 hash_table = elf32_arm_hash_table (link_info);
7087 if (hash_table == NULL)
7088 return NULL;
7090 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7091 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7093 BFD_ASSERT (tmp_name);
7095 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7097 hash = elf_link_hash_lookup
7098 (&(hash_table)->root, tmp_name, false, false, true);
7100 if (hash == NULL
7101 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7102 "Thumb", tmp_name, name) == -1)
7103 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7105 free (tmp_name);
7107 return hash;
7110 /* Locate the ARM encoded calling stub for NAME. */
7112 static struct elf_link_hash_entry *
7113 find_arm_glue (struct bfd_link_info *link_info,
7114 const char *name,
7115 char **error_message)
7117 char *tmp_name;
7118 struct elf_link_hash_entry *myh;
7119 struct elf32_arm_link_hash_table *hash_table;
7121 /* We need a pointer to the elfarm specific hash table. */
7122 hash_table = elf32_arm_hash_table (link_info);
7123 if (hash_table == NULL)
7124 return NULL;
7126 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7127 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7128 BFD_ASSERT (tmp_name);
7130 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7132 myh = elf_link_hash_lookup
7133 (&(hash_table)->root, tmp_name, false, false, true);
7135 if (myh == NULL
7136 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7137 "ARM", tmp_name, name) == -1)
7138 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7140 free (tmp_name);
7142 return myh;
7145 /* ARM->Thumb glue (static images):
7147 .arm
7148 __func_from_arm:
7149 ldr r12, __func_addr
7150 bx r12
7151 __func_addr:
7152 .word func @ behave as if you saw a ARM_32 reloc.
7154 (v5t static images)
7155 .arm
7156 __func_from_arm:
7157 ldr pc, __func_addr
7158 __func_addr:
7159 .word func @ behave as if you saw a ARM_32 reloc.
7161 (relocatable images)
7162 .arm
7163 __func_from_arm:
7164 ldr r12, __func_offset
7165 add r12, r12, pc
7166 bx r12
7167 __func_offset:
7168 .word func - . */
7170 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7171 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7172 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7173 static const insn32 a2t3_func_addr_insn = 0x00000001;
7175 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7176 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7177 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7179 #define ARM2THUMB_PIC_GLUE_SIZE 16
7180 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7181 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7182 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7184 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7186 .thumb .thumb
7187 .align 2 .align 2
7188 __func_from_thumb: __func_from_thumb:
7189 bx pc push {r6, lr}
7190 nop ldr r6, __func_addr
7191 .arm mov lr, pc
7192 b func bx r6
7193 .arm
7194 ;; back_to_thumb
7195 ldmia r13! {r6, lr}
7196 bx lr
7197 __func_addr:
7198 .word func */
7200 #define THUMB2ARM_GLUE_SIZE 8
7201 static const insn16 t2a1_bx_pc_insn = 0x4778;
7202 static const insn16 t2a2_noop_insn = 0x46c0;
7203 static const insn32 t2a3_b_insn = 0xea000000;
7205 #define VFP11_ERRATUM_VENEER_SIZE 8
7206 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7207 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7209 #define ARM_BX_VENEER_SIZE 12
7210 static const insn32 armbx1_tst_insn = 0xe3100001;
7211 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7212 static const insn32 armbx3_bx_insn = 0xe12fff10;
7214 #ifndef ELFARM_NABI_C_INCLUDED
7215 static void
7216 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7218 asection * s;
7219 bfd_byte * contents;
7221 if (size == 0)
7223 /* Do not include empty glue sections in the output. */
7224 if (abfd != NULL)
7226 s = bfd_get_linker_section (abfd, name);
7227 if (s != NULL)
7228 s->flags |= SEC_EXCLUDE;
7230 return;
7233 BFD_ASSERT (abfd != NULL);
7235 s = bfd_get_linker_section (abfd, name);
7236 BFD_ASSERT (s != NULL);
7238 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7240 BFD_ASSERT (s->size == size);
7241 s->contents = contents;
7244 bool
7245 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7247 struct elf32_arm_link_hash_table * globals;
7249 globals = elf32_arm_hash_table (info);
7250 BFD_ASSERT (globals != NULL);
7252 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7253 globals->arm_glue_size,
7254 ARM2THUMB_GLUE_SECTION_NAME);
7256 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7257 globals->thumb_glue_size,
7258 THUMB2ARM_GLUE_SECTION_NAME);
7260 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7261 globals->vfp11_erratum_glue_size,
7262 VFP11_ERRATUM_VENEER_SECTION_NAME);
7264 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7265 globals->stm32l4xx_erratum_glue_size,
7266 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7268 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7269 globals->bx_glue_size,
7270 ARM_BX_GLUE_SECTION_NAME);
7272 return true;
7275 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7276 returns the symbol identifying the stub. */
7278 static struct elf_link_hash_entry *
7279 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7280 struct elf_link_hash_entry * h)
7282 const char * name = h->root.root.string;
7283 asection * s;
7284 char * tmp_name;
7285 struct elf_link_hash_entry * myh;
7286 struct bfd_link_hash_entry * bh;
7287 struct elf32_arm_link_hash_table * globals;
7288 bfd_vma val;
7289 bfd_size_type size;
7291 globals = elf32_arm_hash_table (link_info);
7292 BFD_ASSERT (globals != NULL);
7293 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7295 s = bfd_get_linker_section
7296 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7298 BFD_ASSERT (s != NULL);
7300 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7301 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7302 BFD_ASSERT (tmp_name);
7304 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7306 myh = elf_link_hash_lookup
7307 (&(globals)->root, tmp_name, false, false, true);
7309 if (myh != NULL)
7311 /* We've already seen this guy. */
7312 free (tmp_name);
7313 return myh;
7316 /* The only trick here is using hash_table->arm_glue_size as the value.
7317 Even though the section isn't allocated yet, this is where we will be
7318 putting it. The +1 on the value marks that the stub has not been
7319 output yet - not that it is a Thumb function. */
7320 bh = NULL;
7321 val = globals->arm_glue_size + 1;
7322 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7323 tmp_name, BSF_GLOBAL, s, val,
7324 NULL, true, false, &bh);
7326 myh = (struct elf_link_hash_entry *) bh;
7327 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7328 myh->forced_local = 1;
7330 free (tmp_name);
7332 if (bfd_link_pic (link_info)
7333 || globals->root.is_relocatable_executable
7334 || globals->pic_veneer)
7335 size = ARM2THUMB_PIC_GLUE_SIZE;
7336 else if (globals->use_blx)
7337 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7338 else
7339 size = ARM2THUMB_STATIC_GLUE_SIZE;
7341 s->size += size;
7342 globals->arm_glue_size += size;
7344 return myh;
7347 /* Allocate space for ARMv4 BX veneers. */
7349 static void
7350 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7352 asection * s;
7353 struct elf32_arm_link_hash_table *globals;
7354 char *tmp_name;
7355 struct elf_link_hash_entry *myh;
7356 struct bfd_link_hash_entry *bh;
7357 bfd_vma val;
7359 /* BX PC does not need a veneer. */
7360 if (reg == 15)
7361 return;
7363 globals = elf32_arm_hash_table (link_info);
7364 BFD_ASSERT (globals != NULL);
7365 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7367 /* Check if this veneer has already been allocated. */
7368 if (globals->bx_glue_offset[reg])
7369 return;
7371 s = bfd_get_linker_section
7372 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7374 BFD_ASSERT (s != NULL);
7376 /* Add symbol for veneer. */
7377 tmp_name = (char *)
7378 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7379 BFD_ASSERT (tmp_name);
7381 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7383 myh = elf_link_hash_lookup
7384 (&(globals)->root, tmp_name, false, false, false);
7386 BFD_ASSERT (myh == NULL);
7388 bh = NULL;
7389 val = globals->bx_glue_size;
7390 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7391 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7392 NULL, true, false, &bh);
7394 myh = (struct elf_link_hash_entry *) bh;
7395 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7396 myh->forced_local = 1;
7398 s->size += ARM_BX_VENEER_SIZE;
7399 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7400 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7404 /* Add an entry to the code/data map for section SEC. */
7406 static void
7407 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7409 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7410 unsigned int newidx;
7412 if (sec_data->map == NULL)
7414 sec_data->map = (elf32_arm_section_map *)
7415 bfd_malloc (sizeof (elf32_arm_section_map));
7416 sec_data->mapcount = 0;
7417 sec_data->mapsize = 1;
7420 newidx = sec_data->mapcount++;
7422 if (sec_data->mapcount > sec_data->mapsize)
7424 sec_data->mapsize *= 2;
7425 sec_data->map = (elf32_arm_section_map *)
7426 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7427 * sizeof (elf32_arm_section_map));
7430 if (sec_data->map)
7432 sec_data->map[newidx].vma = vma;
7433 sec_data->map[newidx].type = type;
7438 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7439 veneers are handled for now. */
7441 static bfd_vma
7442 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7443 elf32_vfp11_erratum_list *branch,
7444 bfd *branch_bfd,
7445 asection *branch_sec,
7446 unsigned int offset)
7448 asection *s;
7449 struct elf32_arm_link_hash_table *hash_table;
7450 char *tmp_name;
7451 struct elf_link_hash_entry *myh;
7452 struct bfd_link_hash_entry *bh;
7453 bfd_vma val;
7454 struct _arm_elf_section_data *sec_data;
7455 elf32_vfp11_erratum_list *newerr;
7457 hash_table = elf32_arm_hash_table (link_info);
7458 BFD_ASSERT (hash_table != NULL);
7459 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7461 s = bfd_get_linker_section
7462 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7464 sec_data = elf32_arm_section_data (s);
7466 BFD_ASSERT (s != NULL);
7468 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7469 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7470 BFD_ASSERT (tmp_name);
7472 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7473 hash_table->num_vfp11_fixes);
7475 myh = elf_link_hash_lookup
7476 (&(hash_table)->root, tmp_name, false, false, false);
7478 BFD_ASSERT (myh == NULL);
7480 bh = NULL;
7481 val = hash_table->vfp11_erratum_glue_size;
7482 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7483 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7484 NULL, true, false, &bh);
7486 myh = (struct elf_link_hash_entry *) bh;
7487 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7488 myh->forced_local = 1;
7490 /* Link veneer back to calling location. */
7491 sec_data->erratumcount += 1;
7492 newerr = (elf32_vfp11_erratum_list *)
7493 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7495 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7496 newerr->vma = -1;
7497 newerr->u.v.branch = branch;
7498 newerr->u.v.id = hash_table->num_vfp11_fixes;
7499 branch->u.b.veneer = newerr;
7501 newerr->next = sec_data->erratumlist;
7502 sec_data->erratumlist = newerr;
7504 /* A symbol for the return from the veneer. */
7505 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7506 hash_table->num_vfp11_fixes);
7508 myh = elf_link_hash_lookup
7509 (&(hash_table)->root, tmp_name, false, false, false);
7511 if (myh != NULL)
7512 abort ();
7514 bh = NULL;
7515 val = offset + 4;
7516 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7517 branch_sec, val, NULL, true, false, &bh);
7519 myh = (struct elf_link_hash_entry *) bh;
7520 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7521 myh->forced_local = 1;
7523 free (tmp_name);
7525 /* Generate a mapping symbol for the veneer section, and explicitly add an
7526 entry for that symbol to the code/data map for the section. */
7527 if (hash_table->vfp11_erratum_glue_size == 0)
7529 bh = NULL;
7530 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7531 ever requires this erratum fix. */
7532 _bfd_generic_link_add_one_symbol (link_info,
7533 hash_table->bfd_of_glue_owner, "$a",
7534 BSF_LOCAL, s, 0, NULL,
7535 true, false, &bh);
7537 myh = (struct elf_link_hash_entry *) bh;
7538 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7539 myh->forced_local = 1;
7541 /* The elf32_arm_init_maps function only cares about symbols from input
7542 BFDs. We must make a note of this generated mapping symbol
7543 ourselves so that code byteswapping works properly in
7544 elf32_arm_write_section. */
7545 elf32_arm_section_map_add (s, 'a', 0);
7548 s->size += VFP11_ERRATUM_VENEER_SIZE;
7549 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7550 hash_table->num_vfp11_fixes++;
7552 /* The offset of the veneer. */
7553 return val;
7556 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7557 veneers need to be handled because used only in Cortex-M. */
7559 static bfd_vma
7560 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7561 elf32_stm32l4xx_erratum_list *branch,
7562 bfd *branch_bfd,
7563 asection *branch_sec,
7564 unsigned int offset,
7565 bfd_size_type veneer_size)
7567 asection *s;
7568 struct elf32_arm_link_hash_table *hash_table;
7569 char *tmp_name;
7570 struct elf_link_hash_entry *myh;
7571 struct bfd_link_hash_entry *bh;
7572 bfd_vma val;
7573 struct _arm_elf_section_data *sec_data;
7574 elf32_stm32l4xx_erratum_list *newerr;
7576 hash_table = elf32_arm_hash_table (link_info);
7577 BFD_ASSERT (hash_table != NULL);
7578 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7580 s = bfd_get_linker_section
7581 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7583 BFD_ASSERT (s != NULL);
7585 sec_data = elf32_arm_section_data (s);
7587 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7588 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7589 BFD_ASSERT (tmp_name);
7591 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7592 hash_table->num_stm32l4xx_fixes);
7594 myh = elf_link_hash_lookup
7595 (&(hash_table)->root, tmp_name, false, false, false);
7597 BFD_ASSERT (myh == NULL);
7599 bh = NULL;
7600 val = hash_table->stm32l4xx_erratum_glue_size;
7601 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7602 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7603 NULL, true, false, &bh);
7605 myh = (struct elf_link_hash_entry *) bh;
7606 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7607 myh->forced_local = 1;
7609 /* Link veneer back to calling location. */
7610 sec_data->stm32l4xx_erratumcount += 1;
7611 newerr = (elf32_stm32l4xx_erratum_list *)
7612 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7614 newerr->type = STM32L4XX_ERRATUM_VENEER;
7615 newerr->vma = -1;
7616 newerr->u.v.branch = branch;
7617 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7618 branch->u.b.veneer = newerr;
7620 newerr->next = sec_data->stm32l4xx_erratumlist;
7621 sec_data->stm32l4xx_erratumlist = newerr;
7623 /* A symbol for the return from the veneer. */
7624 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7625 hash_table->num_stm32l4xx_fixes);
7627 myh = elf_link_hash_lookup
7628 (&(hash_table)->root, tmp_name, false, false, false);
7630 if (myh != NULL)
7631 abort ();
7633 bh = NULL;
7634 val = offset + 4;
7635 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7636 branch_sec, val, NULL, true, false, &bh);
7638 myh = (struct elf_link_hash_entry *) bh;
7639 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7640 myh->forced_local = 1;
7642 free (tmp_name);
7644 /* Generate a mapping symbol for the veneer section, and explicitly add an
7645 entry for that symbol to the code/data map for the section. */
7646 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7648 bh = NULL;
7649 /* Creates a THUMB symbol since there is no other choice. */
7650 _bfd_generic_link_add_one_symbol (link_info,
7651 hash_table->bfd_of_glue_owner, "$t",
7652 BSF_LOCAL, s, 0, NULL,
7653 true, false, &bh);
7655 myh = (struct elf_link_hash_entry *) bh;
7656 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7657 myh->forced_local = 1;
7659 /* The elf32_arm_init_maps function only cares about symbols from input
7660 BFDs. We must make a note of this generated mapping symbol
7661 ourselves so that code byteswapping works properly in
7662 elf32_arm_write_section. */
7663 elf32_arm_section_map_add (s, 't', 0);
7666 s->size += veneer_size;
7667 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7668 hash_table->num_stm32l4xx_fixes++;
7670 /* The offset of the veneer. */
7671 return val;
7674 #define ARM_GLUE_SECTION_FLAGS \
7675 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7676 | SEC_READONLY | SEC_LINKER_CREATED)
7678 /* Create a fake section for use by the ARM backend of the linker. */
7680 static bool
7681 arm_make_glue_section (bfd * abfd, const char * name)
7683 asection * sec;
7685 sec = bfd_get_linker_section (abfd, name);
7686 if (sec != NULL)
7687 /* Already made. */
7688 return true;
7690 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7692 if (sec == NULL
7693 || !bfd_set_section_alignment (sec, 2))
7694 return false;
7696 /* Set the gc mark to prevent the section from being removed by garbage
7697 collection, despite the fact that no relocs refer to this section. */
7698 sec->gc_mark = 1;
7700 return true;
7703 /* Set size of .plt entries. This function is called from the
7704 linker scripts in ld/emultempl/{armelf}.em. */
7706 void
7707 bfd_elf32_arm_use_long_plt (void)
7709 elf32_arm_use_long_plt_entry = true;
7712 /* Add the glue sections to ABFD. This function is called from the
7713 linker scripts in ld/emultempl/{armelf}.em. */
7715 bool
7716 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7717 struct bfd_link_info *info)
7719 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7720 bool dostm32l4xx = globals
7721 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7722 bool addglue;
7724 /* If we are only performing a partial
7725 link do not bother adding the glue. */
7726 if (bfd_link_relocatable (info))
7727 return true;
7729 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7730 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7731 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7732 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7734 if (!dostm32l4xx)
7735 return addglue;
7737 return addglue
7738 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7741 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7742 ensures they are not marked for deletion by
7743 strip_excluded_output_sections () when veneers are going to be created
7744 later. Not doing so would trigger assert on empty section size in
7745 lang_size_sections_1 (). */
7747 void
7748 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7750 enum elf32_arm_stub_type stub_type;
7752 /* If we are only performing a partial
7753 link do not bother adding the glue. */
7754 if (bfd_link_relocatable (info))
7755 return;
7757 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7759 asection *out_sec;
7760 const char *out_sec_name;
7762 if (!arm_dedicated_stub_output_section_required (stub_type))
7763 continue;
7765 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7766 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7767 if (out_sec != NULL)
7768 out_sec->flags |= SEC_KEEP;
7772 /* Select a BFD to be used to hold the sections used by the glue code.
7773 This function is called from the linker scripts in ld/emultempl/
7774 {armelf/pe}.em. */
7776 bool
7777 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7779 struct elf32_arm_link_hash_table *globals;
7781 /* If we are only performing a partial link
7782 do not bother getting a bfd to hold the glue. */
7783 if (bfd_link_relocatable (info))
7784 return true;
7786 /* Make sure we don't attach the glue sections to a dynamic object. */
7787 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7789 globals = elf32_arm_hash_table (info);
7790 BFD_ASSERT (globals != NULL);
7792 if (globals->bfd_of_glue_owner != NULL)
7793 return true;
7795 /* Save the bfd for later use. */
7796 globals->bfd_of_glue_owner = abfd;
7798 return true;
7801 static void
7802 check_use_blx (struct elf32_arm_link_hash_table *globals)
7804 int cpu_arch;
7806 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7807 Tag_CPU_arch);
7809 if (globals->fix_arm1176)
7811 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7812 globals->use_blx = 1;
7814 else
7816 if (cpu_arch > TAG_CPU_ARCH_V4T)
7817 globals->use_blx = 1;
7821 bool
7822 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7823 struct bfd_link_info *link_info)
7825 Elf_Internal_Shdr *symtab_hdr;
7826 Elf_Internal_Rela *internal_relocs = NULL;
7827 Elf_Internal_Rela *irel, *irelend;
7828 bfd_byte *contents = NULL;
7830 asection *sec;
7831 struct elf32_arm_link_hash_table *globals;
7833 /* If we are only performing a partial link do not bother
7834 to construct any glue. */
7835 if (bfd_link_relocatable (link_info))
7836 return true;
7838 /* Here we have a bfd that is to be included on the link. We have a
7839 hook to do reloc rummaging, before section sizes are nailed down. */
7840 globals = elf32_arm_hash_table (link_info);
7841 BFD_ASSERT (globals != NULL);
7843 check_use_blx (globals);
7845 if (globals->byteswap_code && !bfd_big_endian (abfd))
7847 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7848 abfd);
7849 return false;
7852 /* PR 5398: If we have not decided to include any loadable sections in
7853 the output then we will not have a glue owner bfd. This is OK, it
7854 just means that there is nothing else for us to do here. */
7855 if (globals->bfd_of_glue_owner == NULL)
7856 return true;
7858 /* Rummage around all the relocs and map the glue vectors. */
7859 sec = abfd->sections;
7861 if (sec == NULL)
7862 return true;
7864 for (; sec != NULL; sec = sec->next)
7866 if (sec->reloc_count == 0)
7867 continue;
7869 if ((sec->flags & SEC_EXCLUDE) != 0)
7870 continue;
7872 symtab_hdr = & elf_symtab_hdr (abfd);
7874 /* Load the relocs. */
7875 internal_relocs
7876 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7878 if (internal_relocs == NULL)
7879 goto error_return;
7881 irelend = internal_relocs + sec->reloc_count;
7882 for (irel = internal_relocs; irel < irelend; irel++)
7884 long r_type;
7885 unsigned long r_index;
7887 struct elf_link_hash_entry *h;
7889 r_type = ELF32_R_TYPE (irel->r_info);
7890 r_index = ELF32_R_SYM (irel->r_info);
7892 /* These are the only relocation types we care about. */
7893 if ( r_type != R_ARM_PC24
7894 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7895 continue;
7897 /* Get the section contents if we haven't done so already. */
7898 if (contents == NULL)
7900 /* Get cached copy if it exists. */
7901 if (elf_section_data (sec)->this_hdr.contents != NULL)
7902 contents = elf_section_data (sec)->this_hdr.contents;
7903 else
7905 /* Go get them off disk. */
7906 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7907 goto error_return;
7911 if (r_type == R_ARM_V4BX)
7913 int reg;
7915 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7916 record_arm_bx_glue (link_info, reg);
7917 continue;
7920 /* If the relocation is not against a symbol it cannot concern us. */
7921 h = NULL;
7923 /* We don't care about local symbols. */
7924 if (r_index < symtab_hdr->sh_info)
7925 continue;
7927 /* This is an external symbol. */
7928 r_index -= symtab_hdr->sh_info;
7929 h = (struct elf_link_hash_entry *)
7930 elf_sym_hashes (abfd)[r_index];
7932 /* If the relocation is against a static symbol it must be within
7933 the current section and so cannot be a cross ARM/Thumb relocation. */
7934 if (h == NULL)
7935 continue;
7937 /* If the call will go through a PLT entry then we do not need
7938 glue. */
7939 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7940 continue;
7942 switch (r_type)
7944 case R_ARM_PC24:
7945 /* This one is a call from arm code. We need to look up
7946 the target of the call. If it is a thumb target, we
7947 insert glue. */
7948 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7949 == ST_BRANCH_TO_THUMB)
7950 record_arm_to_thumb_glue (link_info, h);
7951 break;
7953 default:
7954 abort ();
7958 if (elf_section_data (sec)->this_hdr.contents != contents)
7959 free (contents);
7960 contents = NULL;
7962 if (elf_section_data (sec)->relocs != internal_relocs)
7963 free (internal_relocs);
7964 internal_relocs = NULL;
7967 return true;
7969 error_return:
7970 if (elf_section_data (sec)->this_hdr.contents != contents)
7971 free (contents);
7972 if (elf_section_data (sec)->relocs != internal_relocs)
7973 free (internal_relocs);
7975 return false;
7977 #endif
7980 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7982 void
7983 bfd_elf32_arm_init_maps (bfd *abfd)
7985 Elf_Internal_Sym *isymbuf;
7986 Elf_Internal_Shdr *hdr;
7987 unsigned int i, localsyms;
7989 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7990 if (! is_arm_elf (abfd))
7991 return;
7993 if ((abfd->flags & DYNAMIC) != 0)
7994 return;
7996 hdr = & elf_symtab_hdr (abfd);
7997 localsyms = hdr->sh_info;
7999 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8000 should contain the number of local symbols, which should come before any
8001 global symbols. Mapping symbols are always local. */
8002 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8003 NULL);
8005 /* No internal symbols read? Skip this BFD. */
8006 if (isymbuf == NULL)
8007 return;
8009 for (i = 0; i < localsyms; i++)
8011 Elf_Internal_Sym *isym = &isymbuf[i];
8012 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8013 const char *name;
8015 if (sec != NULL
8016 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8018 name = bfd_elf_string_from_elf_section (abfd,
8019 hdr->sh_link, isym->st_name);
8021 if (bfd_is_arm_special_symbol_name (name,
8022 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8023 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8029 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8030 say what they wanted. */
8032 void
8033 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8035 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8036 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8038 if (globals == NULL)
8039 return;
8041 if (globals->fix_cortex_a8 == -1)
8043 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8044 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8045 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8046 || out_attr[Tag_CPU_arch_profile].i == 0))
8047 globals->fix_cortex_a8 = 1;
8048 else
8049 globals->fix_cortex_a8 = 0;
8054 void
8055 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8057 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8058 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8060 if (globals == NULL)
8061 return;
8062 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8063 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8065 switch (globals->vfp11_fix)
8067 case BFD_ARM_VFP11_FIX_DEFAULT:
8068 case BFD_ARM_VFP11_FIX_NONE:
8069 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8070 break;
8072 default:
8073 /* Give a warning, but do as the user requests anyway. */
8074 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8075 "workaround is not necessary for target architecture"), obfd);
8078 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8079 /* For earlier architectures, we might need the workaround, but do not
8080 enable it by default. If users is running with broken hardware, they
8081 must enable the erratum fix explicitly. */
8082 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8085 void
8086 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8088 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8089 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8091 if (globals == NULL)
8092 return;
8094 /* We assume only Cortex-M4 may require the fix. */
8095 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8096 || out_attr[Tag_CPU_arch_profile].i != 'M')
8098 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8099 /* Give a warning, but do as the user requests anyway. */
8100 _bfd_error_handler
8101 (_("%pB: warning: selected STM32L4XX erratum "
8102 "workaround is not necessary for target architecture"), obfd);
8106 enum bfd_arm_vfp11_pipe
8108 VFP11_FMAC,
8109 VFP11_LS,
8110 VFP11_DS,
8111 VFP11_BAD
8114 /* Return a VFP register number. This is encoded as RX:X for single-precision
8115 registers, or X:RX for double-precision registers, where RX is the group of
8116 four bits in the instruction encoding and X is the single extension bit.
8117 RX and X fields are specified using their lowest (starting) bit. The return
8118 value is:
8120 0...31: single-precision registers s0...s31
8121 32...63: double-precision registers d0...d31.
8123 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8124 encounter VFP3 instructions, so we allow the full range for DP registers. */
8126 static unsigned int
8127 bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8128 unsigned int x)
8130 if (is_double)
8131 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8132 else
8133 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8136 /* Set bits in *WMASK according to a register number REG as encoded by
8137 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8139 static void
8140 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8142 if (reg < 32)
8143 *wmask |= 1 << reg;
8144 else if (reg < 48)
8145 *wmask |= 3 << ((reg - 32) * 2);
8148 /* Return TRUE if WMASK overwrites anything in REGS. */
8150 static bool
8151 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8153 int i;
8155 for (i = 0; i < numregs; i++)
8157 unsigned int reg = regs[i];
8159 if (reg < 32 && (wmask & (1 << reg)) != 0)
8160 return true;
8162 reg -= 32;
8164 if (reg >= 16)
8165 continue;
8167 if ((wmask & (3 << (reg * 2))) != 0)
8168 return true;
8171 return false;
8174 /* In this function, we're interested in two things: finding input registers
8175 for VFP data-processing instructions, and finding the set of registers which
8176 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8177 hold the written set, so FLDM etc. are easy to deal with (we're only
8178 interested in 32 SP registers or 16 dp registers, due to the VFP version
8179 implemented by the chip in question). DP registers are marked by setting
8180 both SP registers in the write mask). */
8182 static enum bfd_arm_vfp11_pipe
8183 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8184 int *numregs)
8186 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8187 bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8189 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8191 unsigned int pqrs;
8192 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8193 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8195 pqrs = ((insn & 0x00800000) >> 20)
8196 | ((insn & 0x00300000) >> 19)
8197 | ((insn & 0x00000040) >> 6);
8199 switch (pqrs)
8201 case 0: /* fmac[sd]. */
8202 case 1: /* fnmac[sd]. */
8203 case 2: /* fmsc[sd]. */
8204 case 3: /* fnmsc[sd]. */
8205 vpipe = VFP11_FMAC;
8206 bfd_arm_vfp11_write_mask (destmask, fd);
8207 regs[0] = fd;
8208 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8209 regs[2] = fm;
8210 *numregs = 3;
8211 break;
8213 case 4: /* fmul[sd]. */
8214 case 5: /* fnmul[sd]. */
8215 case 6: /* fadd[sd]. */
8216 case 7: /* fsub[sd]. */
8217 vpipe = VFP11_FMAC;
8218 goto vfp_binop;
8220 case 8: /* fdiv[sd]. */
8221 vpipe = VFP11_DS;
8222 vfp_binop:
8223 bfd_arm_vfp11_write_mask (destmask, fd);
8224 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8225 regs[1] = fm;
8226 *numregs = 2;
8227 break;
8229 case 15: /* extended opcode. */
8231 unsigned int extn = ((insn >> 15) & 0x1e)
8232 | ((insn >> 7) & 1);
8234 switch (extn)
8236 case 0: /* fcpy[sd]. */
8237 case 1: /* fabs[sd]. */
8238 case 2: /* fneg[sd]. */
8239 case 8: /* fcmp[sd]. */
8240 case 9: /* fcmpe[sd]. */
8241 case 10: /* fcmpz[sd]. */
8242 case 11: /* fcmpez[sd]. */
8243 case 16: /* fuito[sd]. */
8244 case 17: /* fsito[sd]. */
8245 case 24: /* ftoui[sd]. */
8246 case 25: /* ftouiz[sd]. */
8247 case 26: /* ftosi[sd]. */
8248 case 27: /* ftosiz[sd]. */
8249 /* These instructions will not bounce due to underflow. */
8250 *numregs = 0;
8251 vpipe = VFP11_FMAC;
8252 break;
8254 case 3: /* fsqrt[sd]. */
8255 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8256 registers to cause the erratum in previous instructions. */
8257 bfd_arm_vfp11_write_mask (destmask, fd);
8258 vpipe = VFP11_DS;
8259 break;
8261 case 15: /* fcvt{ds,sd}. */
8263 int rnum = 0;
8265 bfd_arm_vfp11_write_mask (destmask, fd);
8267 /* Only FCVTSD can underflow. */
8268 if ((insn & 0x100) != 0)
8269 regs[rnum++] = fm;
8271 *numregs = rnum;
8273 vpipe = VFP11_FMAC;
8275 break;
8277 default:
8278 return VFP11_BAD;
8281 break;
8283 default:
8284 return VFP11_BAD;
8287 /* Two-register transfer. */
8288 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8290 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8292 if ((insn & 0x100000) == 0)
8294 if (is_double)
8295 bfd_arm_vfp11_write_mask (destmask, fm);
8296 else
8298 bfd_arm_vfp11_write_mask (destmask, fm);
8299 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8303 vpipe = VFP11_LS;
8305 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8307 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8308 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8310 switch (puw)
8312 case 0: /* Two-reg transfer. We should catch these above. */
8313 abort ();
8315 case 2: /* fldm[sdx]. */
8316 case 3:
8317 case 5:
8319 unsigned int i, offset = insn & 0xff;
8321 if (is_double)
8322 offset >>= 1;
8324 for (i = fd; i < fd + offset; i++)
8325 bfd_arm_vfp11_write_mask (destmask, i);
8327 break;
8329 case 4: /* fld[sd]. */
8330 case 6:
8331 bfd_arm_vfp11_write_mask (destmask, fd);
8332 break;
8334 default:
8335 return VFP11_BAD;
8338 vpipe = VFP11_LS;
8340 /* Single-register transfer. Note L==0. */
8341 else if ((insn & 0x0f100e10) == 0x0e000a10)
8343 unsigned int opcode = (insn >> 21) & 7;
8344 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8346 switch (opcode)
8348 case 0: /* fmsr/fmdlr. */
8349 case 1: /* fmdhr. */
8350 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8351 destination register. I don't know if this is exactly right,
8352 but it is the conservative choice. */
8353 bfd_arm_vfp11_write_mask (destmask, fn);
8354 break;
8356 case 7: /* fmxr. */
8357 break;
8360 vpipe = VFP11_LS;
8363 return vpipe;
8367 static int elf32_arm_compare_mapping (const void * a, const void * b);
8370 /* Look for potentially-troublesome code sequences which might trigger the
8371 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8372 (available from ARM) for details of the erratum. A short version is
8373 described in ld.texinfo. */
8375 bool
8376 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8378 asection *sec;
8379 bfd_byte *contents = NULL;
8380 int state = 0;
8381 int regs[3], numregs = 0;
8382 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8383 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8385 if (globals == NULL)
8386 return false;
8388 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8389 The states transition as follows:
8391 0 -> 1 (vector) or 0 -> 2 (scalar)
8392 A VFP FMAC-pipeline instruction has been seen. Fill
8393 regs[0]..regs[numregs-1] with its input operands. Remember this
8394 instruction in 'first_fmac'.
8396 1 -> 2
8397 Any instruction, except for a VFP instruction which overwrites
8398 regs[*].
8400 1 -> 3 [ -> 0 ] or
8401 2 -> 3 [ -> 0 ]
8402 A VFP instruction has been seen which overwrites any of regs[*].
8403 We must make a veneer! Reset state to 0 before examining next
8404 instruction.
8406 2 -> 0
8407 If we fail to match anything in state 2, reset to state 0 and reset
8408 the instruction pointer to the instruction after 'first_fmac'.
8410 If the VFP11 vector mode is in use, there must be at least two unrelated
8411 instructions between anti-dependent VFP11 instructions to properly avoid
8412 triggering the erratum, hence the use of the extra state 1. */
8414 /* If we are only performing a partial link do not bother
8415 to construct any glue. */
8416 if (bfd_link_relocatable (link_info))
8417 return true;
8419 /* Skip if this bfd does not correspond to an ELF image. */
8420 if (! is_arm_elf (abfd))
8421 return true;
8423 /* We should have chosen a fix type by the time we get here. */
8424 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8426 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8427 return true;
8429 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8430 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8431 return true;
8433 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8435 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8436 struct _arm_elf_section_data *sec_data;
8438 /* If we don't have executable progbits, we're not interested in this
8439 section. Also skip if section is to be excluded. */
8440 if (elf_section_type (sec) != SHT_PROGBITS
8441 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8442 || (sec->flags & SEC_EXCLUDE) != 0
8443 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8444 || sec->output_section == bfd_abs_section_ptr
8445 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8446 continue;
8448 sec_data = elf32_arm_section_data (sec);
8450 if (sec_data->mapcount == 0)
8451 continue;
8453 if (elf_section_data (sec)->this_hdr.contents != NULL)
8454 contents = elf_section_data (sec)->this_hdr.contents;
8455 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8456 goto error_return;
8458 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8459 elf32_arm_compare_mapping);
8461 for (span = 0; span < sec_data->mapcount; span++)
8463 unsigned int span_start = sec_data->map[span].vma;
8464 unsigned int span_end = (span == sec_data->mapcount - 1)
8465 ? sec->size : sec_data->map[span + 1].vma;
8466 char span_type = sec_data->map[span].type;
8468 /* FIXME: Only ARM mode is supported at present. We may need to
8469 support Thumb-2 mode also at some point. */
8470 if (span_type != 'a')
8471 continue;
8473 for (i = span_start; i < span_end;)
8475 unsigned int next_i = i + 4;
8476 unsigned int insn = bfd_big_endian (abfd)
8477 ? (((unsigned) contents[i] << 24)
8478 | (contents[i + 1] << 16)
8479 | (contents[i + 2] << 8)
8480 | contents[i + 3])
8481 : (((unsigned) contents[i + 3] << 24)
8482 | (contents[i + 2] << 16)
8483 | (contents[i + 1] << 8)
8484 | contents[i]);
8485 unsigned int writemask = 0;
8486 enum bfd_arm_vfp11_pipe vpipe;
8488 switch (state)
8490 case 0:
8491 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8492 &numregs);
8493 /* I'm assuming the VFP11 erratum can trigger with denorm
8494 operands on either the FMAC or the DS pipeline. This might
8495 lead to slightly overenthusiastic veneer insertion. */
8496 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8498 state = use_vector ? 1 : 2;
8499 first_fmac = i;
8500 veneer_of_insn = insn;
8502 break;
8504 case 1:
8506 int other_regs[3], other_numregs;
8507 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8508 other_regs,
8509 &other_numregs);
8510 if (vpipe != VFP11_BAD
8511 && bfd_arm_vfp11_antidependency (writemask, regs,
8512 numregs))
8513 state = 3;
8514 else
8515 state = 2;
8517 break;
8519 case 2:
8521 int other_regs[3], other_numregs;
8522 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8523 other_regs,
8524 &other_numregs);
8525 if (vpipe != VFP11_BAD
8526 && bfd_arm_vfp11_antidependency (writemask, regs,
8527 numregs))
8528 state = 3;
8529 else
8531 state = 0;
8532 next_i = first_fmac + 4;
8535 break;
8537 case 3:
8538 abort (); /* Should be unreachable. */
8541 if (state == 3)
8543 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8544 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8546 elf32_arm_section_data (sec)->erratumcount += 1;
8548 newerr->u.b.vfp_insn = veneer_of_insn;
8550 switch (span_type)
8552 case 'a':
8553 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8554 break;
8556 default:
8557 abort ();
8560 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8561 first_fmac);
8563 newerr->vma = -1;
8565 newerr->next = sec_data->erratumlist;
8566 sec_data->erratumlist = newerr;
8568 state = 0;
8571 i = next_i;
8575 if (elf_section_data (sec)->this_hdr.contents != contents)
8576 free (contents);
8577 contents = NULL;
8580 return true;
8582 error_return:
8583 if (elf_section_data (sec)->this_hdr.contents != contents)
8584 free (contents);
8586 return false;
8589 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8590 after sections have been laid out, using specially-named symbols. */
8592 void
8593 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8594 struct bfd_link_info *link_info)
8596 asection *sec;
8597 struct elf32_arm_link_hash_table *globals;
8598 char *tmp_name;
8600 if (bfd_link_relocatable (link_info))
8601 return;
8603 /* Skip if this bfd does not correspond to an ELF image. */
8604 if (! is_arm_elf (abfd))
8605 return;
8607 globals = elf32_arm_hash_table (link_info);
8608 if (globals == NULL)
8609 return;
8611 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8612 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8613 BFD_ASSERT (tmp_name);
8615 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8617 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8618 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8620 for (; errnode != NULL; errnode = errnode->next)
8622 struct elf_link_hash_entry *myh;
8623 bfd_vma vma;
8625 switch (errnode->type)
8627 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8628 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8629 /* Find veneer symbol. */
8630 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8631 errnode->u.b.veneer->u.v.id);
8633 myh = elf_link_hash_lookup
8634 (&(globals)->root, tmp_name, false, false, true);
8636 if (myh == NULL)
8637 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8638 abfd, "VFP11", tmp_name);
8640 vma = myh->root.u.def.section->output_section->vma
8641 + myh->root.u.def.section->output_offset
8642 + myh->root.u.def.value;
8644 errnode->u.b.veneer->vma = vma;
8645 break;
8647 case VFP11_ERRATUM_ARM_VENEER:
8648 case VFP11_ERRATUM_THUMB_VENEER:
8649 /* Find return location. */
8650 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8651 errnode->u.v.id);
8653 myh = elf_link_hash_lookup
8654 (&(globals)->root, tmp_name, false, false, true);
8656 if (myh == NULL)
8657 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8658 abfd, "VFP11", tmp_name);
8660 vma = myh->root.u.def.section->output_section->vma
8661 + myh->root.u.def.section->output_offset
8662 + myh->root.u.def.value;
8664 errnode->u.v.branch->vma = vma;
8665 break;
8667 default:
8668 abort ();
8673 free (tmp_name);
8676 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8677 return locations after sections have been laid out, using
8678 specially-named symbols. */
8680 void
8681 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8682 struct bfd_link_info *link_info)
8684 asection *sec;
8685 struct elf32_arm_link_hash_table *globals;
8686 char *tmp_name;
8688 if (bfd_link_relocatable (link_info))
8689 return;
8691 /* Skip if this bfd does not correspond to an ELF image. */
8692 if (! is_arm_elf (abfd))
8693 return;
8695 globals = elf32_arm_hash_table (link_info);
8696 if (globals == NULL)
8697 return;
8699 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8700 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8701 BFD_ASSERT (tmp_name);
8703 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8705 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8706 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8708 for (; errnode != NULL; errnode = errnode->next)
8710 struct elf_link_hash_entry *myh;
8711 bfd_vma vma;
8713 switch (errnode->type)
8715 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8716 /* Find veneer symbol. */
8717 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8718 errnode->u.b.veneer->u.v.id);
8720 myh = elf_link_hash_lookup
8721 (&(globals)->root, tmp_name, false, false, true);
8723 if (myh == NULL)
8724 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8725 abfd, "STM32L4XX", tmp_name);
8727 vma = myh->root.u.def.section->output_section->vma
8728 + myh->root.u.def.section->output_offset
8729 + myh->root.u.def.value;
8731 errnode->u.b.veneer->vma = vma;
8732 break;
8734 case STM32L4XX_ERRATUM_VENEER:
8735 /* Find return location. */
8736 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8737 errnode->u.v.id);
8739 myh = elf_link_hash_lookup
8740 (&(globals)->root, tmp_name, false, false, true);
8742 if (myh == NULL)
8743 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8744 abfd, "STM32L4XX", tmp_name);
8746 vma = myh->root.u.def.section->output_section->vma
8747 + myh->root.u.def.section->output_offset
8748 + myh->root.u.def.value;
8750 errnode->u.v.branch->vma = vma;
8751 break;
8753 default:
8754 abort ();
8759 free (tmp_name);
8762 static inline bool
8763 is_thumb2_ldmia (const insn32 insn)
8765 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8766 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8767 return (insn & 0xffd02000) == 0xe8900000;
8770 static inline bool
8771 is_thumb2_ldmdb (const insn32 insn)
8773 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8774 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8775 return (insn & 0xffd02000) == 0xe9100000;
8778 static inline bool
8779 is_thumb2_vldm (const insn32 insn)
8781 /* A6.5 Extension register load or store instruction
8782 A7.7.229
8783 We look for SP 32-bit and DP 64-bit registers.
8784 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8785 <list> is consecutive 64-bit registers
8786 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8787 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8788 <list> is consecutive 32-bit registers
8789 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8790 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8791 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8792 return
8793 (((insn & 0xfe100f00) == 0xec100b00) ||
8794 ((insn & 0xfe100f00) == 0xec100a00))
8795 && /* (IA without !). */
8796 (((((insn << 7) >> 28) & 0xd) == 0x4)
8797 /* (IA with !), includes VPOP (when reg number is SP). */
8798 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8799 /* (DB with !). */
8800 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8803 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8804 VLDM opcode and:
8805 - computes the number and the mode of memory accesses
8806 - decides if the replacement should be done:
8807 . replaces only if > 8-word accesses
8808 . or (testing purposes only) replaces all accesses. */
8810 static bool
8811 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8812 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8814 int nb_words = 0;
8816 /* The field encoding the register list is the same for both LDMIA
8817 and LDMDB encodings. */
8818 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8819 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8820 else if (is_thumb2_vldm (insn))
8821 nb_words = (insn & 0xff);
8823 /* DEFAULT mode accounts for the real bug condition situation,
8824 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8825 return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8826 ? nb_words > 8
8827 : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8830 /* Look for potentially-troublesome code sequences which might trigger
8831 the STM STM32L4XX erratum. */
8833 bool
8834 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8835 struct bfd_link_info *link_info)
8837 asection *sec;
8838 bfd_byte *contents = NULL;
8839 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8841 if (globals == NULL)
8842 return false;
8844 /* If we are only performing a partial link do not bother
8845 to construct any glue. */
8846 if (bfd_link_relocatable (link_info))
8847 return true;
8849 /* Skip if this bfd does not correspond to an ELF image. */
8850 if (! is_arm_elf (abfd))
8851 return true;
8853 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8854 return true;
8856 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8857 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8858 return true;
8860 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8862 unsigned int i, span;
8863 struct _arm_elf_section_data *sec_data;
8865 /* If we don't have executable progbits, we're not interested in this
8866 section. Also skip if section is to be excluded. */
8867 if (elf_section_type (sec) != SHT_PROGBITS
8868 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8869 || (sec->flags & SEC_EXCLUDE) != 0
8870 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8871 || sec->output_section == bfd_abs_section_ptr
8872 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8873 continue;
8875 sec_data = elf32_arm_section_data (sec);
8877 if (sec_data->mapcount == 0)
8878 continue;
8880 if (elf_section_data (sec)->this_hdr.contents != NULL)
8881 contents = elf_section_data (sec)->this_hdr.contents;
8882 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8883 goto error_return;
8885 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8886 elf32_arm_compare_mapping);
8888 for (span = 0; span < sec_data->mapcount; span++)
8890 unsigned int span_start = sec_data->map[span].vma;
8891 unsigned int span_end = (span == sec_data->mapcount - 1)
8892 ? sec->size : sec_data->map[span + 1].vma;
8893 char span_type = sec_data->map[span].type;
8894 int itblock_current_pos = 0;
8896 /* Only Thumb2 mode need be supported with this CM4 specific
8897 code, we should not encounter any arm mode eg span_type
8898 != 'a'. */
8899 if (span_type != 't')
8900 continue;
8902 for (i = span_start; i < span_end;)
8904 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8905 bool insn_32bit = false;
8906 bool is_ldm = false;
8907 bool is_vldm = false;
8908 bool is_not_last_in_it_block = false;
8910 /* The first 16-bits of all 32-bit thumb2 instructions start
8911 with opcode[15..13]=0b111 and the encoded op1 can be anything
8912 except opcode[12..11]!=0b00.
8913 See 32-bit Thumb instruction encoding. */
8914 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8915 insn_32bit = true;
8917 /* Compute the predicate that tells if the instruction
8918 is concerned by the IT block
8919 - Creates an error if there is a ldm that is not
8920 last in the IT block thus cannot be replaced
8921 - Otherwise we can create a branch at the end of the
8922 IT block, it will be controlled naturally by IT
8923 with the proper pseudo-predicate
8924 - So the only interesting predicate is the one that
8925 tells that we are not on the last item of an IT
8926 block. */
8927 if (itblock_current_pos != 0)
8928 is_not_last_in_it_block = !!--itblock_current_pos;
8930 if (insn_32bit)
8932 /* Load the rest of the insn (in manual-friendly order). */
8933 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8934 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8935 is_vldm = is_thumb2_vldm (insn);
8937 /* Veneers are created for (v)ldm depending on
8938 option flags and memory accesses conditions; but
8939 if the instruction is not the last instruction of
8940 an IT block, we cannot create a jump there, so we
8941 bail out. */
8942 if ((is_ldm || is_vldm)
8943 && stm32l4xx_need_create_replacing_stub
8944 (insn, globals->stm32l4xx_fix))
8946 if (is_not_last_in_it_block)
8948 _bfd_error_handler
8949 /* xgettext:c-format */
8950 (_("%pB(%pA+%#x): error: multiple load detected"
8951 " in non-last IT block instruction:"
8952 " STM32L4XX veneer cannot be generated; "
8953 "use gcc option -mrestrict-it to generate"
8954 " only one instruction per IT block"),
8955 abfd, sec, i);
8957 else
8959 elf32_stm32l4xx_erratum_list *newerr =
8960 (elf32_stm32l4xx_erratum_list *)
8961 bfd_zmalloc
8962 (sizeof (elf32_stm32l4xx_erratum_list));
8964 elf32_arm_section_data (sec)
8965 ->stm32l4xx_erratumcount += 1;
8966 newerr->u.b.insn = insn;
8967 /* We create only thumb branches. */
8968 newerr->type =
8969 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8970 record_stm32l4xx_erratum_veneer
8971 (link_info, newerr, abfd, sec,
8973 is_ldm ?
8974 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8975 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8976 newerr->vma = -1;
8977 newerr->next = sec_data->stm32l4xx_erratumlist;
8978 sec_data->stm32l4xx_erratumlist = newerr;
8982 else
8984 /* A7.7.37 IT p208
8985 IT blocks are only encoded in T1
8986 Encoding T1: IT{x{y{z}}} <firstcond>
8987 1 0 1 1 - 1 1 1 1 - firstcond - mask
8988 if mask = '0000' then see 'related encodings'
8989 We don't deal with UNPREDICTABLE, just ignore these.
8990 There can be no nested IT blocks so an IT block
8991 is naturally a new one for which it is worth
8992 computing its size. */
8993 bool is_newitblock = ((insn & 0xff00) == 0xbf00)
8994 && ((insn & 0x000f) != 0x0000);
8995 /* If we have a new IT block we compute its size. */
8996 if (is_newitblock)
8998 /* Compute the number of instructions controlled
8999 by the IT block, it will be used to decide
9000 whether we are inside an IT block or not. */
9001 unsigned int mask = insn & 0x000f;
9002 itblock_current_pos = 4 - ctz (mask);
9006 i += insn_32bit ? 4 : 2;
9010 if (elf_section_data (sec)->this_hdr.contents != contents)
9011 free (contents);
9012 contents = NULL;
9015 return true;
9017 error_return:
9018 if (elf_section_data (sec)->this_hdr.contents != contents)
9019 free (contents);
9021 return false;
9024 /* Set target relocation values needed during linking. */
9026 void
9027 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9028 struct bfd_link_info *link_info,
9029 struct elf32_arm_params *params)
9031 struct elf32_arm_link_hash_table *globals;
9033 globals = elf32_arm_hash_table (link_info);
9034 if (globals == NULL)
9035 return;
9037 globals->target1_is_rel = params->target1_is_rel;
9038 if (globals->fdpic_p)
9039 globals->target2_reloc = R_ARM_GOT32;
9040 else if (strcmp (params->target2_type, "rel") == 0)
9041 globals->target2_reloc = R_ARM_REL32;
9042 else if (strcmp (params->target2_type, "abs") == 0)
9043 globals->target2_reloc = R_ARM_ABS32;
9044 else if (strcmp (params->target2_type, "got-rel") == 0)
9045 globals->target2_reloc = R_ARM_GOT_PREL;
9046 else
9048 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9049 params->target2_type);
9051 globals->fix_v4bx = params->fix_v4bx;
9052 globals->use_blx |= params->use_blx;
9053 globals->vfp11_fix = params->vfp11_denorm_fix;
9054 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9055 if (globals->fdpic_p)
9056 globals->pic_veneer = 1;
9057 else
9058 globals->pic_veneer = params->pic_veneer;
9059 globals->fix_cortex_a8 = params->fix_cortex_a8;
9060 globals->fix_arm1176 = params->fix_arm1176;
9061 globals->cmse_implib = params->cmse_implib;
9062 globals->in_implib_bfd = params->in_implib_bfd;
9064 BFD_ASSERT (is_arm_elf (output_bfd));
9065 elf_arm_tdata (output_bfd)->no_enum_size_warning
9066 = params->no_enum_size_warning;
9067 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9068 = params->no_wchar_size_warning;
9071 /* Replace the target offset of a Thumb bl or b.w instruction. */
9073 static void
9074 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9076 bfd_vma upper;
9077 bfd_vma lower;
9078 int reloc_sign;
9080 BFD_ASSERT ((offset & 1) == 0);
9082 upper = bfd_get_16 (abfd, insn);
9083 lower = bfd_get_16 (abfd, insn + 2);
9084 reloc_sign = (offset < 0) ? 1 : 0;
9085 upper = (upper & ~(bfd_vma) 0x7ff)
9086 | ((offset >> 12) & 0x3ff)
9087 | (reloc_sign << 10);
9088 lower = (lower & ~(bfd_vma) 0x2fff)
9089 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9090 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9091 | ((offset >> 1) & 0x7ff);
9092 bfd_put_16 (abfd, upper, insn);
9093 bfd_put_16 (abfd, lower, insn + 2);
9096 /* Thumb code calling an ARM function. */
9098 static int
9099 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9100 const char * name,
9101 bfd * input_bfd,
9102 bfd * output_bfd,
9103 asection * input_section,
9104 bfd_byte * hit_data,
9105 asection * sym_sec,
9106 bfd_vma offset,
9107 bfd_signed_vma addend,
9108 bfd_vma val,
9109 char **error_message)
9111 asection * s = 0;
9112 bfd_vma my_offset;
9113 long int ret_offset;
9114 struct elf_link_hash_entry * myh;
9115 struct elf32_arm_link_hash_table * globals;
9117 myh = find_thumb_glue (info, name, error_message);
9118 if (myh == NULL)
9119 return false;
9121 globals = elf32_arm_hash_table (info);
9122 BFD_ASSERT (globals != NULL);
9123 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9125 my_offset = myh->root.u.def.value;
9127 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9128 THUMB2ARM_GLUE_SECTION_NAME);
9130 BFD_ASSERT (s != NULL);
9131 BFD_ASSERT (s->contents != NULL);
9132 BFD_ASSERT (s->output_section != NULL);
9134 if ((my_offset & 0x01) == 0x01)
9136 if (sym_sec != NULL
9137 && sym_sec->owner != NULL
9138 && !INTERWORK_FLAG (sym_sec->owner))
9140 _bfd_error_handler
9141 (_("%pB(%s): warning: interworking not enabled;"
9142 " first occurrence: %pB: %s call to %s"),
9143 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9145 return false;
9148 --my_offset;
9149 myh->root.u.def.value = my_offset;
9151 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9152 s->contents + my_offset);
9154 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9155 s->contents + my_offset + 2);
9157 ret_offset =
9158 /* Address of destination of the stub. */
9159 ((bfd_signed_vma) val)
9160 - ((bfd_signed_vma)
9161 /* Offset from the start of the current section
9162 to the start of the stubs. */
9163 (s->output_offset
9164 /* Offset of the start of this stub from the start of the stubs. */
9165 + my_offset
9166 /* Address of the start of the current section. */
9167 + s->output_section->vma)
9168 /* The branch instruction is 4 bytes into the stub. */
9170 /* ARM branches work from the pc of the instruction + 8. */
9171 + 8);
9173 put_arm_insn (globals, output_bfd,
9174 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9175 s->contents + my_offset + 4);
9178 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9180 /* Now go back and fix up the original BL insn to point to here. */
9181 ret_offset =
9182 /* Address of where the stub is located. */
9183 (s->output_section->vma + s->output_offset + my_offset)
9184 /* Address of where the BL is located. */
9185 - (input_section->output_section->vma + input_section->output_offset
9186 + offset)
9187 /* Addend in the relocation. */
9188 - addend
9189 /* Biassing for PC-relative addressing. */
9190 - 8;
9192 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9194 return true;
9197 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9199 static struct elf_link_hash_entry *
9200 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9201 const char * name,
9202 bfd * input_bfd,
9203 bfd * output_bfd,
9204 asection * sym_sec,
9205 bfd_vma val,
9206 asection * s,
9207 char ** error_message)
9209 bfd_vma my_offset;
9210 long int ret_offset;
9211 struct elf_link_hash_entry * myh;
9212 struct elf32_arm_link_hash_table * globals;
9214 myh = find_arm_glue (info, name, error_message);
9215 if (myh == NULL)
9216 return NULL;
9218 globals = elf32_arm_hash_table (info);
9219 BFD_ASSERT (globals != NULL);
9220 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9222 my_offset = myh->root.u.def.value;
9224 if ((my_offset & 0x01) == 0x01)
9226 if (sym_sec != NULL
9227 && sym_sec->owner != NULL
9228 && !INTERWORK_FLAG (sym_sec->owner))
9230 _bfd_error_handler
9231 (_("%pB(%s): warning: interworking not enabled;"
9232 " first occurrence: %pB: %s call to %s"),
9233 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9236 --my_offset;
9237 myh->root.u.def.value = my_offset;
9239 if (bfd_link_pic (info)
9240 || globals->root.is_relocatable_executable
9241 || globals->pic_veneer)
9243 /* For relocatable objects we can't use absolute addresses,
9244 so construct the address from a relative offset. */
9245 /* TODO: If the offset is small it's probably worth
9246 constructing the address with adds. */
9247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9248 s->contents + my_offset);
9249 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9250 s->contents + my_offset + 4);
9251 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9252 s->contents + my_offset + 8);
9253 /* Adjust the offset by 4 for the position of the add,
9254 and 8 for the pipeline offset. */
9255 ret_offset = (val - (s->output_offset
9256 + s->output_section->vma
9257 + my_offset + 12))
9258 | 1;
9259 bfd_put_32 (output_bfd, ret_offset,
9260 s->contents + my_offset + 12);
9262 else if (globals->use_blx)
9264 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9265 s->contents + my_offset);
9267 /* It's a thumb address. Add the low order bit. */
9268 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9269 s->contents + my_offset + 4);
9271 else
9273 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9274 s->contents + my_offset);
9276 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9277 s->contents + my_offset + 4);
9279 /* It's a thumb address. Add the low order bit. */
9280 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9281 s->contents + my_offset + 8);
9283 my_offset += 12;
9287 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9289 return myh;
9292 /* Arm code calling a Thumb function. */
9294 static int
9295 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9296 const char * name,
9297 bfd * input_bfd,
9298 bfd * output_bfd,
9299 asection * input_section,
9300 bfd_byte * hit_data,
9301 asection * sym_sec,
9302 bfd_vma offset,
9303 bfd_signed_vma addend,
9304 bfd_vma val,
9305 char **error_message)
9307 unsigned long int tmp;
9308 bfd_vma my_offset;
9309 asection * s;
9310 long int ret_offset;
9311 struct elf_link_hash_entry * myh;
9312 struct elf32_arm_link_hash_table * globals;
9314 globals = elf32_arm_hash_table (info);
9315 BFD_ASSERT (globals != NULL);
9316 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9318 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9319 ARM2THUMB_GLUE_SECTION_NAME);
9320 BFD_ASSERT (s != NULL);
9321 BFD_ASSERT (s->contents != NULL);
9322 BFD_ASSERT (s->output_section != NULL);
9324 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9325 sym_sec, val, s, error_message);
9326 if (!myh)
9327 return false;
9329 my_offset = myh->root.u.def.value;
9330 tmp = bfd_get_32 (input_bfd, hit_data);
9331 tmp = tmp & 0xFF000000;
9333 /* Somehow these are both 4 too far, so subtract 8. */
9334 ret_offset = (s->output_offset
9335 + my_offset
9336 + s->output_section->vma
9337 - (input_section->output_offset
9338 + input_section->output_section->vma
9339 + offset + addend)
9340 - 8);
9342 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9344 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9346 return true;
9349 /* Populate Arm stub for an exported Thumb function. */
9351 static bool
9352 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9354 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9355 asection * s;
9356 struct elf_link_hash_entry * myh;
9357 struct elf32_arm_link_hash_entry *eh;
9358 struct elf32_arm_link_hash_table * globals;
9359 asection *sec;
9360 bfd_vma val;
9361 char *error_message;
9363 eh = elf32_arm_hash_entry (h);
9364 /* Allocate stubs for exported Thumb functions on v4t. */
9365 if (eh->export_glue == NULL)
9366 return true;
9368 globals = elf32_arm_hash_table (info);
9369 BFD_ASSERT (globals != NULL);
9370 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9372 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9373 ARM2THUMB_GLUE_SECTION_NAME);
9374 BFD_ASSERT (s != NULL);
9375 BFD_ASSERT (s->contents != NULL);
9376 BFD_ASSERT (s->output_section != NULL);
9378 sec = eh->export_glue->root.u.def.section;
9380 BFD_ASSERT (sec->output_section != NULL);
9382 val = eh->export_glue->root.u.def.value + sec->output_offset
9383 + sec->output_section->vma;
9385 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9386 h->root.u.def.section->owner,
9387 globals->obfd, sec, val, s,
9388 &error_message);
9389 BFD_ASSERT (myh);
9390 return true;
9393 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9395 static bfd_vma
9396 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9398 bfd_byte *p;
9399 bfd_vma glue_addr;
9400 asection *s;
9401 struct elf32_arm_link_hash_table *globals;
9403 globals = elf32_arm_hash_table (info);
9404 BFD_ASSERT (globals != NULL);
9405 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9407 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9408 ARM_BX_GLUE_SECTION_NAME);
9409 BFD_ASSERT (s != NULL);
9410 BFD_ASSERT (s->contents != NULL);
9411 BFD_ASSERT (s->output_section != NULL);
9413 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9415 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9417 if ((globals->bx_glue_offset[reg] & 1) == 0)
9419 p = s->contents + glue_addr;
9420 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9421 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9422 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9423 globals->bx_glue_offset[reg] |= 1;
9426 return glue_addr + s->output_section->vma + s->output_offset;
9429 /* Generate Arm stubs for exported Thumb symbols. */
9430 static void
9431 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9432 struct bfd_link_info *link_info)
9434 struct elf32_arm_link_hash_table * globals;
9436 if (link_info == NULL)
9437 /* Ignore this if we are not called by the ELF backend linker. */
9438 return;
9440 globals = elf32_arm_hash_table (link_info);
9441 if (globals == NULL)
9442 return;
9444 /* If blx is available then exported Thumb symbols are OK and there is
9445 nothing to do. */
9446 if (globals->use_blx)
9447 return;
9449 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9450 link_info);
9453 /* Reserve space for COUNT dynamic relocations in relocation selection
9454 SRELOC. */
9456 static void
9457 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9458 bfd_size_type count)
9460 struct elf32_arm_link_hash_table *htab;
9462 htab = elf32_arm_hash_table (info);
9463 BFD_ASSERT (htab->root.dynamic_sections_created);
9464 if (sreloc == NULL)
9465 abort ();
9466 sreloc->size += RELOC_SIZE (htab) * count;
9469 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9470 dynamic, the relocations should go in SRELOC, otherwise they should
9471 go in the special .rel.iplt section. */
9473 static void
9474 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9475 bfd_size_type count)
9477 struct elf32_arm_link_hash_table *htab;
9479 htab = elf32_arm_hash_table (info);
9480 if (!htab->root.dynamic_sections_created)
9481 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9482 else
9484 BFD_ASSERT (sreloc != NULL);
9485 sreloc->size += RELOC_SIZE (htab) * count;
9489 /* Add relocation REL to the end of relocation section SRELOC. */
9491 static void
9492 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9493 asection *sreloc, Elf_Internal_Rela *rel)
9495 bfd_byte *loc;
9496 struct elf32_arm_link_hash_table *htab;
9498 htab = elf32_arm_hash_table (info);
9499 if (!htab->root.dynamic_sections_created
9500 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9501 sreloc = htab->root.irelplt;
9502 if (sreloc == NULL)
9503 abort ();
9504 loc = sreloc->contents;
9505 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9506 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9507 abort ();
9508 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9511 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9512 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9513 to .plt. */
9515 static void
9516 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9517 bool is_iplt_entry,
9518 union gotplt_union *root_plt,
9519 struct arm_plt_info *arm_plt)
9521 struct elf32_arm_link_hash_table *htab;
9522 asection *splt;
9523 asection *sgotplt;
9525 htab = elf32_arm_hash_table (info);
9527 if (is_iplt_entry)
9529 splt = htab->root.iplt;
9530 sgotplt = htab->root.igotplt;
9532 /* NaCl uses a special first entry in .iplt too. */
9533 if (htab->root.target_os == is_nacl && splt->size == 0)
9534 splt->size += htab->plt_header_size;
9536 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9537 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9539 else
9541 splt = htab->root.splt;
9542 sgotplt = htab->root.sgotplt;
9544 if (htab->fdpic_p)
9546 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9547 /* For lazy binding, relocations will be put into .rel.plt, in
9548 .rel.got otherwise. */
9549 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9550 if (info->flags & DF_BIND_NOW)
9551 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9552 else
9553 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9555 else
9557 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9558 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9561 /* If this is the first .plt entry, make room for the special
9562 first entry. */
9563 if (splt->size == 0)
9564 splt->size += htab->plt_header_size;
9566 htab->next_tls_desc_index++;
9569 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9570 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9571 splt->size += PLT_THUMB_STUB_SIZE;
9572 root_plt->offset = splt->size;
9573 splt->size += htab->plt_entry_size;
9575 /* We also need to make an entry in the .got.plt section, which
9576 will be placed in the .got section by the linker script. */
9577 if (is_iplt_entry)
9578 arm_plt->got_offset = sgotplt->size;
9579 else
9580 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9581 if (htab->fdpic_p)
9582 /* Function descriptor takes 64 bits in GOT. */
9583 sgotplt->size += 8;
9584 else
9585 sgotplt->size += 4;
9588 static bfd_vma
9589 arm_movw_immediate (bfd_vma value)
9591 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9594 static bfd_vma
9595 arm_movt_immediate (bfd_vma value)
9597 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9600 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9601 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9602 Otherwise, DYNINDX is the index of the symbol in the dynamic
9603 symbol table and SYM_VALUE is undefined.
9605 ROOT_PLT points to the offset of the PLT entry from the start of its
9606 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9607 bookkeeping information.
9609 Returns FALSE if there was a problem. */
9611 static bool
9612 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9613 union gotplt_union *root_plt,
9614 struct arm_plt_info *arm_plt,
9615 int dynindx, bfd_vma sym_value)
9617 struct elf32_arm_link_hash_table *htab;
9618 asection *sgot;
9619 asection *splt;
9620 asection *srel;
9621 bfd_byte *loc;
9622 bfd_vma plt_index;
9623 Elf_Internal_Rela rel;
9624 bfd_vma got_header_size;
9626 htab = elf32_arm_hash_table (info);
9628 /* Pick the appropriate sections and sizes. */
9629 if (dynindx == -1)
9631 splt = htab->root.iplt;
9632 sgot = htab->root.igotplt;
9633 srel = htab->root.irelplt;
9635 /* There are no reserved entries in .igot.plt, and no special
9636 first entry in .iplt. */
9637 got_header_size = 0;
9639 else
9641 splt = htab->root.splt;
9642 sgot = htab->root.sgotplt;
9643 srel = htab->root.srelplt;
9645 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9647 BFD_ASSERT (splt != NULL && srel != NULL);
9649 bfd_vma got_offset, got_address, plt_address;
9650 bfd_vma got_displacement, initial_got_entry;
9651 bfd_byte * ptr;
9653 BFD_ASSERT (sgot != NULL);
9655 /* Get the offset into the .(i)got.plt table of the entry that
9656 corresponds to this function. */
9657 got_offset = (arm_plt->got_offset & -2);
9659 /* Get the index in the procedure linkage table which
9660 corresponds to this symbol. This is the index of this symbol
9661 in all the symbols for which we are making plt entries.
9662 After the reserved .got.plt entries, all symbols appear in
9663 the same order as in .plt. */
9664 if (htab->fdpic_p)
9665 /* Function descriptor takes 8 bytes. */
9666 plt_index = (got_offset - got_header_size) / 8;
9667 else
9668 plt_index = (got_offset - got_header_size) / 4;
9670 /* Calculate the address of the GOT entry. */
9671 got_address = (sgot->output_section->vma
9672 + sgot->output_offset
9673 + got_offset);
9675 /* ...and the address of the PLT entry. */
9676 plt_address = (splt->output_section->vma
9677 + splt->output_offset
9678 + root_plt->offset);
9680 ptr = splt->contents + root_plt->offset;
9681 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9683 unsigned int i;
9684 bfd_vma val;
9686 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9688 val = elf32_arm_vxworks_shared_plt_entry[i];
9689 if (i == 2)
9690 val |= got_address - sgot->output_section->vma;
9691 if (i == 5)
9692 val |= plt_index * RELOC_SIZE (htab);
9693 if (i == 2 || i == 5)
9694 bfd_put_32 (output_bfd, val, ptr);
9695 else
9696 put_arm_insn (htab, output_bfd, val, ptr);
9699 else if (htab->root.target_os == is_vxworks)
9701 unsigned int i;
9702 bfd_vma val;
9704 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9706 val = elf32_arm_vxworks_exec_plt_entry[i];
9707 if (i == 2)
9708 val |= got_address;
9709 if (i == 4)
9710 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9711 if (i == 5)
9712 val |= plt_index * RELOC_SIZE (htab);
9713 if (i == 2 || i == 5)
9714 bfd_put_32 (output_bfd, val, ptr);
9715 else
9716 put_arm_insn (htab, output_bfd, val, ptr);
9719 loc = (htab->srelplt2->contents
9720 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9722 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9723 referencing the GOT for this PLT entry. */
9724 rel.r_offset = plt_address + 8;
9725 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9726 rel.r_addend = got_offset;
9727 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9728 loc += RELOC_SIZE (htab);
9730 /* Create the R_ARM_ABS32 relocation referencing the
9731 beginning of the PLT for this GOT entry. */
9732 rel.r_offset = got_address;
9733 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9734 rel.r_addend = 0;
9735 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9737 else if (htab->root.target_os == is_nacl)
9739 /* Calculate the displacement between the PLT slot and the
9740 common tail that's part of the special initial PLT slot. */
9741 int32_t tail_displacement
9742 = ((splt->output_section->vma + splt->output_offset
9743 + ARM_NACL_PLT_TAIL_OFFSET)
9744 - (plt_address + htab->plt_entry_size + 4));
9745 BFD_ASSERT ((tail_displacement & 3) == 0);
9746 tail_displacement >>= 2;
9748 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9749 || (-tail_displacement & 0xff000000) == 0);
9751 /* Calculate the displacement between the PLT slot and the entry
9752 in the GOT. The offset accounts for the value produced by
9753 adding to pc in the penultimate instruction of the PLT stub. */
9754 got_displacement = (got_address
9755 - (plt_address + htab->plt_entry_size));
9757 /* NaCl does not support interworking at all. */
9758 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9760 put_arm_insn (htab, output_bfd,
9761 elf32_arm_nacl_plt_entry[0]
9762 | arm_movw_immediate (got_displacement),
9763 ptr + 0);
9764 put_arm_insn (htab, output_bfd,
9765 elf32_arm_nacl_plt_entry[1]
9766 | arm_movt_immediate (got_displacement),
9767 ptr + 4);
9768 put_arm_insn (htab, output_bfd,
9769 elf32_arm_nacl_plt_entry[2],
9770 ptr + 8);
9771 put_arm_insn (htab, output_bfd,
9772 elf32_arm_nacl_plt_entry[3]
9773 | (tail_displacement & 0x00ffffff),
9774 ptr + 12);
9776 else if (htab->fdpic_p)
9778 const bfd_vma *plt_entry = using_thumb_only (htab)
9779 ? elf32_arm_fdpic_thumb_plt_entry
9780 : elf32_arm_fdpic_plt_entry;
9782 /* Fill-up Thumb stub if needed. */
9783 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9785 put_thumb_insn (htab, output_bfd,
9786 elf32_arm_plt_thumb_stub[0], ptr - 4);
9787 put_thumb_insn (htab, output_bfd,
9788 elf32_arm_plt_thumb_stub[1], ptr - 2);
9790 /* As we are using 32 bit instructions even for the Thumb
9791 version, we have to use 'put_arm_insn' instead of
9792 'put_thumb_insn'. */
9793 put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9794 put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9795 put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9796 put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9797 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9799 if (!(info->flags & DF_BIND_NOW))
9801 /* funcdesc_value_reloc_offset. */
9802 bfd_put_32 (output_bfd,
9803 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9804 ptr + 20);
9805 put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9806 put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9807 put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9808 put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9811 else if (using_thumb_only (htab))
9813 /* PR ld/16017: Generate thumb only PLT entries. */
9814 if (!using_thumb2 (htab))
9816 /* FIXME: We ought to be able to generate thumb-1 PLT
9817 instructions... */
9818 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9819 output_bfd);
9820 return false;
9823 /* Calculate the displacement between the PLT slot and the entry in
9824 the GOT. The 12-byte offset accounts for the value produced by
9825 adding to pc in the 3rd instruction of the PLT stub. */
9826 got_displacement = got_address - (plt_address + 12);
9828 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9829 instead of 'put_thumb_insn'. */
9830 put_arm_insn (htab, output_bfd,
9831 elf32_thumb2_plt_entry[0]
9832 | ((got_displacement & 0x000000ff) << 16)
9833 | ((got_displacement & 0x00000700) << 20)
9834 | ((got_displacement & 0x00000800) >> 1)
9835 | ((got_displacement & 0x0000f000) >> 12),
9836 ptr + 0);
9837 put_arm_insn (htab, output_bfd,
9838 elf32_thumb2_plt_entry[1]
9839 | ((got_displacement & 0x00ff0000) )
9840 | ((got_displacement & 0x07000000) << 4)
9841 | ((got_displacement & 0x08000000) >> 17)
9842 | ((got_displacement & 0xf0000000) >> 28),
9843 ptr + 4);
9844 put_arm_insn (htab, output_bfd,
9845 elf32_thumb2_plt_entry[2],
9846 ptr + 8);
9847 put_arm_insn (htab, output_bfd,
9848 elf32_thumb2_plt_entry[3],
9849 ptr + 12);
9851 else
9853 /* Calculate the displacement between the PLT slot and the
9854 entry in the GOT. The eight-byte offset accounts for the
9855 value produced by adding to pc in the first instruction
9856 of the PLT stub. */
9857 got_displacement = got_address - (plt_address + 8);
9859 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9861 put_thumb_insn (htab, output_bfd,
9862 elf32_arm_plt_thumb_stub[0], ptr - 4);
9863 put_thumb_insn (htab, output_bfd,
9864 elf32_arm_plt_thumb_stub[1], ptr - 2);
9867 if (!elf32_arm_use_long_plt_entry)
9869 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9871 put_arm_insn (htab, output_bfd,
9872 elf32_arm_plt_entry_short[0]
9873 | ((got_displacement & 0x0ff00000) >> 20),
9874 ptr + 0);
9875 put_arm_insn (htab, output_bfd,
9876 elf32_arm_plt_entry_short[1]
9877 | ((got_displacement & 0x000ff000) >> 12),
9878 ptr+ 4);
9879 put_arm_insn (htab, output_bfd,
9880 elf32_arm_plt_entry_short[2]
9881 | (got_displacement & 0x00000fff),
9882 ptr + 8);
9883 #ifdef FOUR_WORD_PLT
9884 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9885 #endif
9887 else
9889 put_arm_insn (htab, output_bfd,
9890 elf32_arm_plt_entry_long[0]
9891 | ((got_displacement & 0xf0000000) >> 28),
9892 ptr + 0);
9893 put_arm_insn (htab, output_bfd,
9894 elf32_arm_plt_entry_long[1]
9895 | ((got_displacement & 0x0ff00000) >> 20),
9896 ptr + 4);
9897 put_arm_insn (htab, output_bfd,
9898 elf32_arm_plt_entry_long[2]
9899 | ((got_displacement & 0x000ff000) >> 12),
9900 ptr+ 8);
9901 put_arm_insn (htab, output_bfd,
9902 elf32_arm_plt_entry_long[3]
9903 | (got_displacement & 0x00000fff),
9904 ptr + 12);
9908 /* Fill in the entry in the .rel(a).(i)plt section. */
9909 rel.r_offset = got_address;
9910 rel.r_addend = 0;
9911 if (dynindx == -1)
9913 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9914 The dynamic linker or static executable then calls SYM_VALUE
9915 to determine the correct run-time value of the .igot.plt entry. */
9916 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9917 initial_got_entry = sym_value;
9919 else
9921 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9922 used by PLT entry. */
9923 if (htab->fdpic_p)
9925 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9926 initial_got_entry = 0;
9928 else
9930 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9931 initial_got_entry = (splt->output_section->vma
9932 + splt->output_offset);
9934 /* PR ld/16017
9935 When thumb only we need to set the LSB for any address that
9936 will be used with an interworking branch instruction. */
9937 if (using_thumb_only (htab))
9938 initial_got_entry |= 1;
9942 /* Fill in the entry in the global offset table. */
9943 bfd_put_32 (output_bfd, initial_got_entry,
9944 sgot->contents + got_offset);
9946 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9948 /* Setup initial funcdesc value. */
9949 /* FIXME: we don't support lazy binding because there is a
9950 race condition between both words getting written and
9951 some other thread attempting to read them. The ARM
9952 architecture does not have an atomic 64 bit load/store
9953 instruction that could be used to prevent it; it is
9954 recommended that threaded FDPIC applications run with the
9955 LD_BIND_NOW environment variable set. */
9956 bfd_put_32 (output_bfd, plt_address + 0x18,
9957 sgot->contents + got_offset);
9958 bfd_put_32 (output_bfd, -1 /*TODO*/,
9959 sgot->contents + got_offset + 4);
9962 if (dynindx == -1)
9963 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9964 else
9966 if (htab->fdpic_p)
9968 /* For FDPIC we put PLT relocationss into .rel.got when not
9969 lazy binding otherwise we put them in .rel.plt. For now,
9970 we don't support lazy binding so put it in .rel.got. */
9971 if (info->flags & DF_BIND_NOW)
9972 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
9973 else
9974 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
9976 else
9978 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9979 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9983 return true;
9986 /* Some relocations map to different relocations depending on the
9987 target. Return the real relocation. */
9989 static int
9990 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9991 int r_type)
9993 switch (r_type)
9995 case R_ARM_TARGET1:
9996 if (globals->target1_is_rel)
9997 return R_ARM_REL32;
9998 else
9999 return R_ARM_ABS32;
10001 case R_ARM_TARGET2:
10002 return globals->target2_reloc;
10004 default:
10005 return r_type;
10009 /* Return the base VMA address which should be subtracted from real addresses
10010 when resolving @dtpoff relocation.
10011 This is PT_TLS segment p_vaddr. */
10013 static bfd_vma
10014 dtpoff_base (struct bfd_link_info *info)
10016 /* If tls_sec is NULL, we should have signalled an error already. */
10017 if (elf_hash_table (info)->tls_sec == NULL)
10018 return 0;
10019 return elf_hash_table (info)->tls_sec->vma;
10022 /* Return the relocation value for @tpoff relocation
10023 if STT_TLS virtual address is ADDRESS. */
10025 static bfd_vma
10026 tpoff (struct bfd_link_info *info, bfd_vma address)
10028 struct elf_link_hash_table *htab = elf_hash_table (info);
10029 bfd_vma base;
10031 /* If tls_sec is NULL, we should have signalled an error already. */
10032 if (htab->tls_sec == NULL)
10033 return 0;
10034 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10035 return address - htab->tls_sec->vma + base;
10038 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10039 VALUE is the relocation value. */
10041 static bfd_reloc_status_type
10042 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10044 if (value > 0xfff)
10045 return bfd_reloc_overflow;
10047 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10048 bfd_put_32 (abfd, value, data);
10049 return bfd_reloc_ok;
10052 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10053 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10054 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10056 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10057 is to then call final_link_relocate. Return other values in the
10058 case of error.
10060 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10061 the pre-relaxed code. It would be nice if the relocs were updated
10062 to match the optimization. */
10064 static bfd_reloc_status_type
10065 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10066 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10067 Elf_Internal_Rela *rel, unsigned long is_local)
10069 unsigned long insn;
10071 switch (ELF32_R_TYPE (rel->r_info))
10073 default:
10074 return bfd_reloc_notsupported;
10076 case R_ARM_TLS_GOTDESC:
10077 if (is_local)
10078 insn = 0;
10079 else
10081 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10082 if (insn & 1)
10083 insn -= 5; /* THUMB */
10084 else
10085 insn -= 8; /* ARM */
10087 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10088 return bfd_reloc_continue;
10090 case R_ARM_THM_TLS_DESCSEQ:
10091 /* Thumb insn. */
10092 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10093 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10095 if (is_local)
10096 /* nop */
10097 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10099 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10101 if (is_local)
10102 /* nop */
10103 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10104 else
10105 /* ldr rx,[ry] */
10106 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10108 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10110 if (is_local)
10111 /* nop */
10112 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10113 else
10114 /* mov r0, rx */
10115 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10116 contents + rel->r_offset);
10118 else
10120 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10121 /* It's a 32 bit instruction, fetch the rest of it for
10122 error generation. */
10123 insn = (insn << 16)
10124 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10125 _bfd_error_handler
10126 /* xgettext:c-format */
10127 (_("%pB(%pA+%#" PRIx64 "): "
10128 "unexpected %s instruction '%#lx' in TLS trampoline"),
10129 input_bfd, input_sec, (uint64_t) rel->r_offset,
10130 "Thumb", insn);
10131 return bfd_reloc_notsupported;
10133 break;
10135 case R_ARM_TLS_DESCSEQ:
10136 /* arm insn. */
10137 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10138 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10140 if (is_local)
10141 /* mov rx, ry */
10142 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10143 contents + rel->r_offset);
10145 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10147 if (is_local)
10148 /* nop */
10149 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10150 else
10151 /* ldr rx,[ry] */
10152 bfd_put_32 (input_bfd, insn & 0xfffff000,
10153 contents + rel->r_offset);
10155 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10157 if (is_local)
10158 /* nop */
10159 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10160 else
10161 /* mov r0, rx */
10162 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10163 contents + rel->r_offset);
10165 else
10167 _bfd_error_handler
10168 /* xgettext:c-format */
10169 (_("%pB(%pA+%#" PRIx64 "): "
10170 "unexpected %s instruction '%#lx' in TLS trampoline"),
10171 input_bfd, input_sec, (uint64_t) rel->r_offset,
10172 "ARM", insn);
10173 return bfd_reloc_notsupported;
10175 break;
10177 case R_ARM_TLS_CALL:
10178 /* GD->IE relaxation, turn the instruction into 'nop' or
10179 'ldr r0, [pc,r0]' */
10180 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10181 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10182 break;
10184 case R_ARM_THM_TLS_CALL:
10185 /* GD->IE relaxation. */
10186 if (!is_local)
10187 /* add r0,pc; ldr r0, [r0] */
10188 insn = 0x44786800;
10189 else if (using_thumb2 (globals))
10190 /* nop.w */
10191 insn = 0xf3af8000;
10192 else
10193 /* nop; nop */
10194 insn = 0xbf00bf00;
10196 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10197 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10198 break;
10200 return bfd_reloc_ok;
10203 /* For a given value of n, calculate the value of G_n as required to
10204 deal with group relocations. We return it in the form of an
10205 encoded constant-and-rotation, together with the final residual. If n is
10206 specified as less than zero, then final_residual is filled with the
10207 input value and no further action is performed. */
10209 static bfd_vma
10210 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10212 int current_n;
10213 bfd_vma g_n;
10214 bfd_vma encoded_g_n = 0;
10215 bfd_vma residual = value; /* Also known as Y_n. */
10217 for (current_n = 0; current_n <= n; current_n++)
10219 int shift;
10221 /* Calculate which part of the value to mask. */
10222 if (residual == 0)
10223 shift = 0;
10224 else
10226 int msb;
10228 /* Determine the most significant bit in the residual and
10229 align the resulting value to a 2-bit boundary. */
10230 for (msb = 30; msb >= 0; msb -= 2)
10231 if (residual & (3u << msb))
10232 break;
10234 /* The desired shift is now (msb - 6), or zero, whichever
10235 is the greater. */
10236 shift = msb - 6;
10237 if (shift < 0)
10238 shift = 0;
10241 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10242 g_n = residual & (0xff << shift);
10243 encoded_g_n = (g_n >> shift)
10244 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10246 /* Calculate the residual for the next time around. */
10247 residual &= ~g_n;
10250 *final_residual = residual;
10252 return encoded_g_n;
10255 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10256 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10258 static int
10259 identify_add_or_sub (bfd_vma insn)
10261 int opcode = insn & 0x1e00000;
10263 if (opcode == 1 << 23) /* ADD */
10264 return 1;
10266 if (opcode == 1 << 22) /* SUB */
10267 return -1;
10269 return 0;
10272 /* Perform a relocation as part of a final link. */
10274 static bfd_reloc_status_type
10275 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10276 bfd * input_bfd,
10277 bfd * output_bfd,
10278 asection * input_section,
10279 bfd_byte * contents,
10280 Elf_Internal_Rela * rel,
10281 bfd_vma value,
10282 struct bfd_link_info * info,
10283 asection * sym_sec,
10284 const char * sym_name,
10285 unsigned char st_type,
10286 enum arm_st_branch_type branch_type,
10287 struct elf_link_hash_entry * h,
10288 bool * unresolved_reloc_p,
10289 char ** error_message)
10291 unsigned long r_type = howto->type;
10292 unsigned long r_symndx;
10293 bfd_byte * hit_data = contents + rel->r_offset;
10294 bfd_vma * local_got_offsets;
10295 bfd_vma * local_tlsdesc_gotents;
10296 asection * sgot;
10297 asection * splt;
10298 asection * sreloc = NULL;
10299 asection * srelgot;
10300 bfd_vma addend;
10301 bfd_signed_vma signed_addend;
10302 unsigned char dynreloc_st_type;
10303 bfd_vma dynreloc_value;
10304 struct elf32_arm_link_hash_table * globals;
10305 struct elf32_arm_link_hash_entry *eh;
10306 union gotplt_union *root_plt;
10307 struct arm_plt_info *arm_plt;
10308 bfd_vma plt_offset;
10309 bfd_vma gotplt_offset;
10310 bool has_iplt_entry;
10311 bool resolved_to_zero;
10313 globals = elf32_arm_hash_table (info);
10314 if (globals == NULL)
10315 return bfd_reloc_notsupported;
10317 BFD_ASSERT (is_arm_elf (input_bfd));
10318 BFD_ASSERT (howto != NULL);
10320 /* Some relocation types map to different relocations depending on the
10321 target. We pick the right one here. */
10322 r_type = arm_real_reloc_type (globals, r_type);
10324 /* It is possible to have linker relaxations on some TLS access
10325 models. Update our information here. */
10326 r_type = elf32_arm_tls_transition (info, r_type, h);
10328 if (r_type != howto->type)
10329 howto = elf32_arm_howto_from_type (r_type);
10331 eh = (struct elf32_arm_link_hash_entry *) h;
10332 sgot = globals->root.sgot;
10333 local_got_offsets = elf_local_got_offsets (input_bfd);
10334 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10336 if (globals->root.dynamic_sections_created)
10337 srelgot = globals->root.srelgot;
10338 else
10339 srelgot = NULL;
10341 r_symndx = ELF32_R_SYM (rel->r_info);
10343 if (globals->use_rel)
10345 bfd_vma sign;
10347 switch (howto->size)
10349 case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
10350 case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
10351 case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
10352 default: addend = 0; break;
10354 /* Note: the addend and signed_addend calculated here are
10355 incorrect for any split field. */
10356 addend &= howto->src_mask;
10357 sign = howto->src_mask & ~(howto->src_mask >> 1);
10358 signed_addend = (addend ^ sign) - sign;
10359 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10360 addend <<= howto->rightshift;
10362 else
10363 addend = signed_addend = rel->r_addend;
10365 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10366 are resolving a function call relocation. */
10367 if (using_thumb_only (globals)
10368 && (r_type == R_ARM_THM_CALL
10369 || r_type == R_ARM_THM_JUMP24)
10370 && branch_type == ST_BRANCH_TO_ARM)
10371 branch_type = ST_BRANCH_TO_THUMB;
10373 /* Record the symbol information that should be used in dynamic
10374 relocations. */
10375 dynreloc_st_type = st_type;
10376 dynreloc_value = value;
10377 if (branch_type == ST_BRANCH_TO_THUMB)
10378 dynreloc_value |= 1;
10380 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10381 VALUE appropriately for relocations that we resolve at link time. */
10382 has_iplt_entry = false;
10383 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10384 &arm_plt)
10385 && root_plt->offset != (bfd_vma) -1)
10387 plt_offset = root_plt->offset;
10388 gotplt_offset = arm_plt->got_offset;
10390 if (h == NULL || eh->is_iplt)
10392 has_iplt_entry = true;
10393 splt = globals->root.iplt;
10395 /* Populate .iplt entries here, because not all of them will
10396 be seen by finish_dynamic_symbol. The lower bit is set if
10397 we have already populated the entry. */
10398 if (plt_offset & 1)
10399 plt_offset--;
10400 else
10402 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10403 -1, dynreloc_value))
10404 root_plt->offset |= 1;
10405 else
10406 return bfd_reloc_notsupported;
10409 /* Static relocations always resolve to the .iplt entry. */
10410 st_type = STT_FUNC;
10411 value = (splt->output_section->vma
10412 + splt->output_offset
10413 + plt_offset);
10414 branch_type = ST_BRANCH_TO_ARM;
10416 /* If there are non-call relocations that resolve to the .iplt
10417 entry, then all dynamic ones must too. */
10418 if (arm_plt->noncall_refcount != 0)
10420 dynreloc_st_type = st_type;
10421 dynreloc_value = value;
10424 else
10425 /* We populate the .plt entry in finish_dynamic_symbol. */
10426 splt = globals->root.splt;
10428 else
10430 splt = NULL;
10431 plt_offset = (bfd_vma) -1;
10432 gotplt_offset = (bfd_vma) -1;
10435 resolved_to_zero = (h != NULL
10436 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10438 switch (r_type)
10440 case R_ARM_NONE:
10441 /* We don't need to find a value for this symbol. It's just a
10442 marker. */
10443 *unresolved_reloc_p = false;
10444 return bfd_reloc_ok;
10446 case R_ARM_ABS12:
10447 if (globals->root.target_os != is_vxworks)
10448 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10449 /* Fall through. */
10451 case R_ARM_PC24:
10452 case R_ARM_ABS32:
10453 case R_ARM_ABS32_NOI:
10454 case R_ARM_REL32:
10455 case R_ARM_REL32_NOI:
10456 case R_ARM_CALL:
10457 case R_ARM_JUMP24:
10458 case R_ARM_XPC25:
10459 case R_ARM_PREL31:
10460 case R_ARM_PLT32:
10461 /* Handle relocations which should use the PLT entry. ABS32/REL32
10462 will use the symbol's value, which may point to a PLT entry, but we
10463 don't need to handle that here. If we created a PLT entry, all
10464 branches in this object should go to it, except if the PLT is too
10465 far away, in which case a long branch stub should be inserted. */
10466 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10467 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10468 && r_type != R_ARM_CALL
10469 && r_type != R_ARM_JUMP24
10470 && r_type != R_ARM_PLT32)
10471 && plt_offset != (bfd_vma) -1)
10473 /* If we've created a .plt section, and assigned a PLT entry
10474 to this function, it must either be a STT_GNU_IFUNC reference
10475 or not be known to bind locally. In other cases, we should
10476 have cleared the PLT entry by now. */
10477 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10479 value = (splt->output_section->vma
10480 + splt->output_offset
10481 + plt_offset);
10482 *unresolved_reloc_p = false;
10483 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10484 contents, rel->r_offset, value,
10485 rel->r_addend);
10488 /* When generating a shared object or relocatable executable, these
10489 relocations are copied into the output file to be resolved at
10490 run time. */
10491 if ((bfd_link_pic (info)
10492 || globals->root.is_relocatable_executable
10493 || globals->fdpic_p)
10494 && (input_section->flags & SEC_ALLOC)
10495 && !(globals->root.target_os == is_vxworks
10496 && strcmp (input_section->output_section->name,
10497 ".tls_vars") == 0)
10498 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10499 || !SYMBOL_CALLS_LOCAL (info, h))
10500 && !(input_bfd == globals->stub_bfd
10501 && strstr (input_section->name, STUB_SUFFIX))
10502 && (h == NULL
10503 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10504 && !resolved_to_zero)
10505 || h->root.type != bfd_link_hash_undefweak)
10506 && r_type != R_ARM_PC24
10507 && r_type != R_ARM_CALL
10508 && r_type != R_ARM_JUMP24
10509 && r_type != R_ARM_PREL31
10510 && r_type != R_ARM_PLT32)
10512 Elf_Internal_Rela outrel;
10513 bool skip, relocate;
10514 int isrofixup = 0;
10516 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10517 && !h->def_regular)
10519 char *v = _("shared object");
10521 if (bfd_link_executable (info))
10522 v = _("PIE executable");
10524 _bfd_error_handler
10525 (_("%pB: relocation %s against external or undefined symbol `%s'"
10526 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10527 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10528 return bfd_reloc_notsupported;
10531 *unresolved_reloc_p = false;
10533 if (sreloc == NULL && globals->root.dynamic_sections_created)
10535 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10536 ! globals->use_rel);
10538 if (sreloc == NULL)
10539 return bfd_reloc_notsupported;
10542 skip = false;
10543 relocate = false;
10545 outrel.r_addend = addend;
10546 outrel.r_offset =
10547 _bfd_elf_section_offset (output_bfd, info, input_section,
10548 rel->r_offset);
10549 if (outrel.r_offset == (bfd_vma) -1)
10550 skip = true;
10551 else if (outrel.r_offset == (bfd_vma) -2)
10552 skip = true, relocate = true;
10553 outrel.r_offset += (input_section->output_section->vma
10554 + input_section->output_offset);
10556 if (skip)
10557 memset (&outrel, 0, sizeof outrel);
10558 else if (h != NULL
10559 && h->dynindx != -1
10560 && (!bfd_link_pic (info)
10561 || !(bfd_link_pie (info)
10562 || SYMBOLIC_BIND (info, h))
10563 || !h->def_regular))
10564 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10565 else
10567 int symbol;
10569 /* This symbol is local, or marked to become local. */
10570 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10571 || (globals->fdpic_p && !bfd_link_pic (info)));
10572 /* On SVR4-ish systems, the dynamic loader cannot
10573 relocate the text and data segments independently,
10574 so the symbol does not matter. */
10575 symbol = 0;
10576 if (dynreloc_st_type == STT_GNU_IFUNC)
10577 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10578 to the .iplt entry. Instead, every non-call reference
10579 must use an R_ARM_IRELATIVE relocation to obtain the
10580 correct run-time address. */
10581 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10582 else if (globals->fdpic_p && !bfd_link_pic (info))
10583 isrofixup = 1;
10584 else
10585 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10586 if (globals->use_rel)
10587 relocate = true;
10588 else
10589 outrel.r_addend += dynreloc_value;
10592 if (isrofixup)
10593 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10594 else
10595 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10597 /* If this reloc is against an external symbol, we do not want to
10598 fiddle with the addend. Otherwise, we need to include the symbol
10599 value so that it becomes an addend for the dynamic reloc. */
10600 if (! relocate)
10601 return bfd_reloc_ok;
10603 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10604 contents, rel->r_offset,
10605 dynreloc_value, (bfd_vma) 0);
10607 else switch (r_type)
10609 case R_ARM_ABS12:
10610 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10612 case R_ARM_XPC25: /* Arm BLX instruction. */
10613 case R_ARM_CALL:
10614 case R_ARM_JUMP24:
10615 case R_ARM_PC24: /* Arm B/BL instruction. */
10616 case R_ARM_PLT32:
10618 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10620 if (r_type == R_ARM_XPC25)
10622 /* Check for Arm calling Arm function. */
10623 /* FIXME: Should we translate the instruction into a BL
10624 instruction instead ? */
10625 if (branch_type != ST_BRANCH_TO_THUMB)
10626 _bfd_error_handler
10627 (_("\%pB: warning: %s BLX instruction targets"
10628 " %s function '%s'"),
10629 input_bfd, "ARM",
10630 "ARM", h ? h->root.root.string : "(local)");
10632 else if (r_type == R_ARM_PC24)
10634 /* Check for Arm calling Thumb function. */
10635 if (branch_type == ST_BRANCH_TO_THUMB)
10637 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10638 output_bfd, input_section,
10639 hit_data, sym_sec, rel->r_offset,
10640 signed_addend, value,
10641 error_message))
10642 return bfd_reloc_ok;
10643 else
10644 return bfd_reloc_dangerous;
10648 /* Check if a stub has to be inserted because the
10649 destination is too far or we are changing mode. */
10650 if ( r_type == R_ARM_CALL
10651 || r_type == R_ARM_JUMP24
10652 || r_type == R_ARM_PLT32)
10654 enum elf32_arm_stub_type stub_type = arm_stub_none;
10655 struct elf32_arm_link_hash_entry *hash;
10657 hash = (struct elf32_arm_link_hash_entry *) h;
10658 stub_type = arm_type_of_stub (info, input_section, rel,
10659 st_type, &branch_type,
10660 hash, value, sym_sec,
10661 input_bfd, sym_name);
10663 if (stub_type != arm_stub_none)
10665 /* The target is out of reach, so redirect the
10666 branch to the local stub for this function. */
10667 stub_entry = elf32_arm_get_stub_entry (input_section,
10668 sym_sec, h,
10669 rel, globals,
10670 stub_type);
10672 if (stub_entry != NULL)
10673 value = (stub_entry->stub_offset
10674 + stub_entry->stub_sec->output_offset
10675 + stub_entry->stub_sec->output_section->vma);
10677 if (plt_offset != (bfd_vma) -1)
10678 *unresolved_reloc_p = false;
10681 else
10683 /* If the call goes through a PLT entry, make sure to
10684 check distance to the right destination address. */
10685 if (plt_offset != (bfd_vma) -1)
10687 value = (splt->output_section->vma
10688 + splt->output_offset
10689 + plt_offset);
10690 *unresolved_reloc_p = false;
10691 /* The PLT entry is in ARM mode, regardless of the
10692 target function. */
10693 branch_type = ST_BRANCH_TO_ARM;
10698 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10699 where:
10700 S is the address of the symbol in the relocation.
10701 P is address of the instruction being relocated.
10702 A is the addend (extracted from the instruction) in bytes.
10704 S is held in 'value'.
10705 P is the base address of the section containing the
10706 instruction plus the offset of the reloc into that
10707 section, ie:
10708 (input_section->output_section->vma +
10709 input_section->output_offset +
10710 rel->r_offset).
10711 A is the addend, converted into bytes, ie:
10712 (signed_addend * 4)
10714 Note: None of these operations have knowledge of the pipeline
10715 size of the processor, thus it is up to the assembler to
10716 encode this information into the addend. */
10717 value -= (input_section->output_section->vma
10718 + input_section->output_offset);
10719 value -= rel->r_offset;
10720 value += signed_addend;
10722 signed_addend = value;
10723 signed_addend >>= howto->rightshift;
10725 /* A branch to an undefined weak symbol is turned into a jump to
10726 the next instruction unless a PLT entry will be created.
10727 Do the same for local undefined symbols (but not for STN_UNDEF).
10728 The jump to the next instruction is optimized as a NOP depending
10729 on the architecture. */
10730 if (h ? (h->root.type == bfd_link_hash_undefweak
10731 && plt_offset == (bfd_vma) -1)
10732 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10734 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10736 if (arch_has_arm_nop (globals))
10737 value |= 0x0320f000;
10738 else
10739 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10741 else
10743 /* Perform a signed range check. */
10744 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10745 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10746 return bfd_reloc_overflow;
10748 addend = (value & 2);
10750 value = (signed_addend & howto->dst_mask)
10751 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10753 if (r_type == R_ARM_CALL)
10755 /* Set the H bit in the BLX instruction. */
10756 if (branch_type == ST_BRANCH_TO_THUMB)
10758 if (addend)
10759 value |= (1 << 24);
10760 else
10761 value &= ~(bfd_vma)(1 << 24);
10764 /* Select the correct instruction (BL or BLX). */
10765 /* Only if we are not handling a BL to a stub. In this
10766 case, mode switching is performed by the stub. */
10767 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10768 value |= (1 << 28);
10769 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10771 value &= ~(bfd_vma)(1 << 28);
10772 value |= (1 << 24);
10777 break;
10779 case R_ARM_ABS32:
10780 value += addend;
10781 if (branch_type == ST_BRANCH_TO_THUMB)
10782 value |= 1;
10783 break;
10785 case R_ARM_ABS32_NOI:
10786 value += addend;
10787 break;
10789 case R_ARM_REL32:
10790 value += addend;
10791 if (branch_type == ST_BRANCH_TO_THUMB)
10792 value |= 1;
10793 value -= (input_section->output_section->vma
10794 + input_section->output_offset + rel->r_offset);
10795 break;
10797 case R_ARM_REL32_NOI:
10798 value += addend;
10799 value -= (input_section->output_section->vma
10800 + input_section->output_offset + rel->r_offset);
10801 break;
10803 case R_ARM_PREL31:
10804 value -= (input_section->output_section->vma
10805 + input_section->output_offset + rel->r_offset);
10806 value += signed_addend;
10807 if (! h || h->root.type != bfd_link_hash_undefweak)
10809 /* Check for overflow. */
10810 if ((value ^ (value >> 1)) & (1 << 30))
10811 return bfd_reloc_overflow;
10813 value &= 0x7fffffff;
10814 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10815 if (branch_type == ST_BRANCH_TO_THUMB)
10816 value |= 1;
10817 break;
10820 bfd_put_32 (input_bfd, value, hit_data);
10821 return bfd_reloc_ok;
10823 case R_ARM_ABS8:
10824 value += addend;
10826 /* There is no way to tell whether the user intended to use a signed or
10827 unsigned addend. When checking for overflow we accept either,
10828 as specified by the AAELF. */
10829 if ((long) value > 0xff || (long) value < -0x80)
10830 return bfd_reloc_overflow;
10832 bfd_put_8 (input_bfd, value, hit_data);
10833 return bfd_reloc_ok;
10835 case R_ARM_ABS16:
10836 value += addend;
10838 /* See comment for R_ARM_ABS8. */
10839 if ((long) value > 0xffff || (long) value < -0x8000)
10840 return bfd_reloc_overflow;
10842 bfd_put_16 (input_bfd, value, hit_data);
10843 return bfd_reloc_ok;
10845 case R_ARM_THM_ABS5:
10846 /* Support ldr and str instructions for the thumb. */
10847 if (globals->use_rel)
10849 /* Need to refetch addend. */
10850 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10851 /* ??? Need to determine shift amount from operand size. */
10852 addend >>= howto->rightshift;
10854 value += addend;
10856 /* ??? Isn't value unsigned? */
10857 if ((long) value > 0x1f || (long) value < -0x10)
10858 return bfd_reloc_overflow;
10860 /* ??? Value needs to be properly shifted into place first. */
10861 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10862 bfd_put_16 (input_bfd, value, hit_data);
10863 return bfd_reloc_ok;
10865 case R_ARM_THM_ALU_PREL_11_0:
10866 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10868 bfd_vma insn;
10869 bfd_signed_vma relocation;
10871 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10872 | bfd_get_16 (input_bfd, hit_data + 2);
10874 if (globals->use_rel)
10876 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10877 | ((insn & (1 << 26)) >> 15);
10878 if (insn & 0xf00000)
10879 signed_addend = -signed_addend;
10882 relocation = value + signed_addend;
10883 relocation -= Pa (input_section->output_section->vma
10884 + input_section->output_offset
10885 + rel->r_offset);
10887 /* PR 21523: Use an absolute value. The user of this reloc will
10888 have already selected an ADD or SUB insn appropriately. */
10889 value = llabs (relocation);
10891 if (value >= 0x1000)
10892 return bfd_reloc_overflow;
10894 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10895 if (branch_type == ST_BRANCH_TO_THUMB)
10896 value |= 1;
10898 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10899 | ((value & 0x700) << 4)
10900 | ((value & 0x800) << 15);
10901 if (relocation < 0)
10902 insn |= 0xa00000;
10904 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10905 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10907 return bfd_reloc_ok;
10910 case R_ARM_THM_PC8:
10911 /* PR 10073: This reloc is not generated by the GNU toolchain,
10912 but it is supported for compatibility with third party libraries
10913 generated by other compilers, specifically the ARM/IAR. */
10915 bfd_vma insn;
10916 bfd_signed_vma relocation;
10918 insn = bfd_get_16 (input_bfd, hit_data);
10920 if (globals->use_rel)
10921 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10923 relocation = value + addend;
10924 relocation -= Pa (input_section->output_section->vma
10925 + input_section->output_offset
10926 + rel->r_offset);
10928 value = relocation;
10930 /* We do not check for overflow of this reloc. Although strictly
10931 speaking this is incorrect, it appears to be necessary in order
10932 to work with IAR generated relocs. Since GCC and GAS do not
10933 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10934 a problem for them. */
10935 value &= 0x3fc;
10937 insn = (insn & 0xff00) | (value >> 2);
10939 bfd_put_16 (input_bfd, insn, hit_data);
10941 return bfd_reloc_ok;
10944 case R_ARM_THM_PC12:
10945 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10947 bfd_vma insn;
10948 bfd_signed_vma relocation;
10950 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10951 | bfd_get_16 (input_bfd, hit_data + 2);
10953 if (globals->use_rel)
10955 signed_addend = insn & 0xfff;
10956 if (!(insn & (1 << 23)))
10957 signed_addend = -signed_addend;
10960 relocation = value + signed_addend;
10961 relocation -= Pa (input_section->output_section->vma
10962 + input_section->output_offset
10963 + rel->r_offset);
10965 value = relocation;
10967 if (value >= 0x1000)
10968 return bfd_reloc_overflow;
10970 insn = (insn & 0xff7ff000) | value;
10971 if (relocation >= 0)
10972 insn |= (1 << 23);
10974 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10975 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10977 return bfd_reloc_ok;
10980 case R_ARM_THM_XPC22:
10981 case R_ARM_THM_CALL:
10982 case R_ARM_THM_JUMP24:
10983 /* Thumb BL (branch long instruction). */
10985 bfd_vma relocation;
10986 bfd_vma reloc_sign;
10987 bool overflow = false;
10988 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10989 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10990 bfd_signed_vma reloc_signed_max;
10991 bfd_signed_vma reloc_signed_min;
10992 bfd_vma check;
10993 bfd_signed_vma signed_check;
10994 int bitsize;
10995 const int thumb2 = using_thumb2 (globals);
10996 const int thumb2_bl = using_thumb2_bl (globals);
10998 /* A branch to an undefined weak symbol is turned into a jump to
10999 the next instruction unless a PLT entry will be created.
11000 The jump to the next instruction is optimized as a NOP.W for
11001 Thumb-2 enabled architectures. */
11002 if (h && h->root.type == bfd_link_hash_undefweak
11003 && plt_offset == (bfd_vma) -1)
11005 if (thumb2)
11007 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11008 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11010 else
11012 bfd_put_16 (input_bfd, 0xe000, hit_data);
11013 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11015 return bfd_reloc_ok;
11018 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11019 with Thumb-1) involving the J1 and J2 bits. */
11020 if (globals->use_rel)
11022 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11023 bfd_vma upper = upper_insn & 0x3ff;
11024 bfd_vma lower = lower_insn & 0x7ff;
11025 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11026 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11027 bfd_vma i1 = j1 ^ s ? 0 : 1;
11028 bfd_vma i2 = j2 ^ s ? 0 : 1;
11030 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11031 /* Sign extend. */
11032 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11034 signed_addend = addend;
11037 if (r_type == R_ARM_THM_XPC22)
11039 /* Check for Thumb to Thumb call. */
11040 /* FIXME: Should we translate the instruction into a BL
11041 instruction instead ? */
11042 if (branch_type == ST_BRANCH_TO_THUMB)
11043 _bfd_error_handler
11044 (_("%pB: warning: %s BLX instruction targets"
11045 " %s function '%s'"),
11046 input_bfd, "Thumb",
11047 "Thumb", h ? h->root.root.string : "(local)");
11049 else
11051 /* If it is not a call to Thumb, assume call to Arm.
11052 If it is a call relative to a section name, then it is not a
11053 function call at all, but rather a long jump. Calls through
11054 the PLT do not require stubs. */
11055 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11057 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11059 /* Convert BL to BLX. */
11060 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11062 else if (( r_type != R_ARM_THM_CALL)
11063 && (r_type != R_ARM_THM_JUMP24))
11065 if (elf32_thumb_to_arm_stub
11066 (info, sym_name, input_bfd, output_bfd, input_section,
11067 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11068 error_message))
11069 return bfd_reloc_ok;
11070 else
11071 return bfd_reloc_dangerous;
11074 else if (branch_type == ST_BRANCH_TO_THUMB
11075 && globals->use_blx
11076 && r_type == R_ARM_THM_CALL)
11078 /* Make sure this is a BL. */
11079 lower_insn |= 0x1800;
11083 enum elf32_arm_stub_type stub_type = arm_stub_none;
11084 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11086 /* Check if a stub has to be inserted because the destination
11087 is too far. */
11088 struct elf32_arm_stub_hash_entry *stub_entry;
11089 struct elf32_arm_link_hash_entry *hash;
11091 hash = (struct elf32_arm_link_hash_entry *) h;
11093 stub_type = arm_type_of_stub (info, input_section, rel,
11094 st_type, &branch_type,
11095 hash, value, sym_sec,
11096 input_bfd, sym_name);
11098 if (stub_type != arm_stub_none)
11100 /* The target is out of reach or we are changing modes, so
11101 redirect the branch to the local stub for this
11102 function. */
11103 stub_entry = elf32_arm_get_stub_entry (input_section,
11104 sym_sec, h,
11105 rel, globals,
11106 stub_type);
11107 if (stub_entry != NULL)
11109 value = (stub_entry->stub_offset
11110 + stub_entry->stub_sec->output_offset
11111 + stub_entry->stub_sec->output_section->vma);
11113 if (plt_offset != (bfd_vma) -1)
11114 *unresolved_reloc_p = false;
11117 /* If this call becomes a call to Arm, force BLX. */
11118 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11120 if ((stub_entry
11121 && !arm_stub_is_thumb (stub_entry->stub_type))
11122 || branch_type != ST_BRANCH_TO_THUMB)
11123 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11128 /* Handle calls via the PLT. */
11129 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11131 value = (splt->output_section->vma
11132 + splt->output_offset
11133 + plt_offset);
11135 if (globals->use_blx
11136 && r_type == R_ARM_THM_CALL
11137 && ! using_thumb_only (globals))
11139 /* If the Thumb BLX instruction is available, convert
11140 the BL to a BLX instruction to call the ARM-mode
11141 PLT entry. */
11142 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11143 branch_type = ST_BRANCH_TO_ARM;
11145 else
11147 if (! using_thumb_only (globals))
11148 /* Target the Thumb stub before the ARM PLT entry. */
11149 value -= PLT_THUMB_STUB_SIZE;
11150 branch_type = ST_BRANCH_TO_THUMB;
11152 *unresolved_reloc_p = false;
11155 relocation = value + signed_addend;
11157 relocation -= (input_section->output_section->vma
11158 + input_section->output_offset
11159 + rel->r_offset);
11161 check = relocation >> howto->rightshift;
11163 /* If this is a signed value, the rightshift just dropped
11164 leading 1 bits (assuming twos complement). */
11165 if ((bfd_signed_vma) relocation >= 0)
11166 signed_check = check;
11167 else
11168 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11170 /* Calculate the permissable maximum and minimum values for
11171 this relocation according to whether we're relocating for
11172 Thumb-2 or not. */
11173 bitsize = howto->bitsize;
11174 if (!thumb2_bl)
11175 bitsize -= 2;
11176 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11177 reloc_signed_min = ~reloc_signed_max;
11179 /* Assumes two's complement. */
11180 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11181 overflow = true;
11183 if ((lower_insn & 0x5000) == 0x4000)
11184 /* For a BLX instruction, make sure that the relocation is rounded up
11185 to a word boundary. This follows the semantics of the instruction
11186 which specifies that bit 1 of the target address will come from bit
11187 1 of the base address. */
11188 relocation = (relocation + 2) & ~ 3;
11190 /* Put RELOCATION back into the insn. Assumes two's complement.
11191 We use the Thumb-2 encoding, which is safe even if dealing with
11192 a Thumb-1 instruction by virtue of our overflow check above. */
11193 reloc_sign = (signed_check < 0) ? 1 : 0;
11194 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11195 | ((relocation >> 12) & 0x3ff)
11196 | (reloc_sign << 10);
11197 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11198 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11199 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11200 | ((relocation >> 1) & 0x7ff);
11202 /* Put the relocated value back in the object file: */
11203 bfd_put_16 (input_bfd, upper_insn, hit_data);
11204 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11206 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11208 break;
11210 case R_ARM_THM_JUMP19:
11211 /* Thumb32 conditional branch instruction. */
11213 bfd_vma relocation;
11214 bool overflow = false;
11215 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11216 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11217 bfd_signed_vma reloc_signed_max = 0xffffe;
11218 bfd_signed_vma reloc_signed_min = -0x100000;
11219 bfd_signed_vma signed_check;
11220 enum elf32_arm_stub_type stub_type = arm_stub_none;
11221 struct elf32_arm_stub_hash_entry *stub_entry;
11222 struct elf32_arm_link_hash_entry *hash;
11224 /* Need to refetch the addend, reconstruct the top three bits,
11225 and squish the two 11 bit pieces together. */
11226 if (globals->use_rel)
11228 bfd_vma S = (upper_insn & 0x0400) >> 10;
11229 bfd_vma upper = (upper_insn & 0x003f);
11230 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11231 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11232 bfd_vma lower = (lower_insn & 0x07ff);
11234 upper |= J1 << 6;
11235 upper |= J2 << 7;
11236 upper |= (!S) << 8;
11237 upper -= 0x0100; /* Sign extend. */
11239 addend = (upper << 12) | (lower << 1);
11240 signed_addend = addend;
11243 /* Handle calls via the PLT. */
11244 if (plt_offset != (bfd_vma) -1)
11246 value = (splt->output_section->vma
11247 + splt->output_offset
11248 + plt_offset);
11249 /* Target the Thumb stub before the ARM PLT entry. */
11250 value -= PLT_THUMB_STUB_SIZE;
11251 *unresolved_reloc_p = false;
11254 hash = (struct elf32_arm_link_hash_entry *)h;
11256 stub_type = arm_type_of_stub (info, input_section, rel,
11257 st_type, &branch_type,
11258 hash, value, sym_sec,
11259 input_bfd, sym_name);
11260 if (stub_type != arm_stub_none)
11262 stub_entry = elf32_arm_get_stub_entry (input_section,
11263 sym_sec, h,
11264 rel, globals,
11265 stub_type);
11266 if (stub_entry != NULL)
11268 value = (stub_entry->stub_offset
11269 + stub_entry->stub_sec->output_offset
11270 + stub_entry->stub_sec->output_section->vma);
11274 relocation = value + signed_addend;
11275 relocation -= (input_section->output_section->vma
11276 + input_section->output_offset
11277 + rel->r_offset);
11278 signed_check = (bfd_signed_vma) relocation;
11280 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11281 overflow = true;
11283 /* Put RELOCATION back into the insn. */
11285 bfd_vma S = (relocation & 0x00100000) >> 20;
11286 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11287 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11288 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11289 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11291 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11292 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11295 /* Put the relocated value back in the object file: */
11296 bfd_put_16 (input_bfd, upper_insn, hit_data);
11297 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11299 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11302 case R_ARM_THM_JUMP11:
11303 case R_ARM_THM_JUMP8:
11304 case R_ARM_THM_JUMP6:
11305 /* Thumb B (branch) instruction). */
11307 bfd_signed_vma relocation;
11308 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11309 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11310 bfd_signed_vma signed_check;
11312 /* CZB cannot jump backward. */
11313 if (r_type == R_ARM_THM_JUMP6)
11315 reloc_signed_min = 0;
11316 if (globals->use_rel)
11317 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11320 relocation = value + signed_addend;
11322 relocation -= (input_section->output_section->vma
11323 + input_section->output_offset
11324 + rel->r_offset);
11326 relocation >>= howto->rightshift;
11327 signed_check = relocation;
11329 if (r_type == R_ARM_THM_JUMP6)
11330 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11331 else
11332 relocation &= howto->dst_mask;
11333 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11335 bfd_put_16 (input_bfd, relocation, hit_data);
11337 /* Assumes two's complement. */
11338 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11339 return bfd_reloc_overflow;
11341 return bfd_reloc_ok;
11344 case R_ARM_ALU_PCREL7_0:
11345 case R_ARM_ALU_PCREL15_8:
11346 case R_ARM_ALU_PCREL23_15:
11348 bfd_vma insn;
11349 bfd_vma relocation;
11351 insn = bfd_get_32 (input_bfd, hit_data);
11352 if (globals->use_rel)
11354 /* Extract the addend. */
11355 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11356 signed_addend = addend;
11358 relocation = value + signed_addend;
11360 relocation -= (input_section->output_section->vma
11361 + input_section->output_offset
11362 + rel->r_offset);
11363 insn = (insn & ~0xfff)
11364 | ((howto->bitpos << 7) & 0xf00)
11365 | ((relocation >> howto->bitpos) & 0xff);
11366 bfd_put_32 (input_bfd, value, hit_data);
11368 return bfd_reloc_ok;
11370 case R_ARM_GNU_VTINHERIT:
11371 case R_ARM_GNU_VTENTRY:
11372 return bfd_reloc_ok;
11374 case R_ARM_GOTOFF32:
11375 /* Relocation is relative to the start of the
11376 global offset table. */
11378 BFD_ASSERT (sgot != NULL);
11379 if (sgot == NULL)
11380 return bfd_reloc_notsupported;
11382 /* If we are addressing a Thumb function, we need to adjust the
11383 address by one, so that attempts to call the function pointer will
11384 correctly interpret it as Thumb code. */
11385 if (branch_type == ST_BRANCH_TO_THUMB)
11386 value += 1;
11388 /* Note that sgot->output_offset is not involved in this
11389 calculation. We always want the start of .got. If we
11390 define _GLOBAL_OFFSET_TABLE in a different way, as is
11391 permitted by the ABI, we might have to change this
11392 calculation. */
11393 value -= sgot->output_section->vma;
11394 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11395 contents, rel->r_offset, value,
11396 rel->r_addend);
11398 case R_ARM_GOTPC:
11399 /* Use global offset table as symbol value. */
11400 BFD_ASSERT (sgot != NULL);
11402 if (sgot == NULL)
11403 return bfd_reloc_notsupported;
11405 *unresolved_reloc_p = false;
11406 value = sgot->output_section->vma;
11407 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11408 contents, rel->r_offset, value,
11409 rel->r_addend);
11411 case R_ARM_GOT32:
11412 case R_ARM_GOT_PREL:
11413 /* Relocation is to the entry for this symbol in the
11414 global offset table. */
11415 if (sgot == NULL)
11416 return bfd_reloc_notsupported;
11418 if (dynreloc_st_type == STT_GNU_IFUNC
11419 && plt_offset != (bfd_vma) -1
11420 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11422 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11423 symbol, and the relocation resolves directly to the runtime
11424 target rather than to the .iplt entry. This means that any
11425 .got entry would be the same value as the .igot.plt entry,
11426 so there's no point creating both. */
11427 sgot = globals->root.igotplt;
11428 value = sgot->output_offset + gotplt_offset;
11430 else if (h != NULL)
11432 bfd_vma off;
11434 off = h->got.offset;
11435 BFD_ASSERT (off != (bfd_vma) -1);
11436 if ((off & 1) != 0)
11438 /* We have already processsed one GOT relocation against
11439 this symbol. */
11440 off &= ~1;
11441 if (globals->root.dynamic_sections_created
11442 && !SYMBOL_REFERENCES_LOCAL (info, h))
11443 *unresolved_reloc_p = false;
11445 else
11447 Elf_Internal_Rela outrel;
11448 int isrofixup = 0;
11450 if (((h->dynindx != -1) || globals->fdpic_p)
11451 && !SYMBOL_REFERENCES_LOCAL (info, h))
11453 /* If the symbol doesn't resolve locally in a static
11454 object, we have an undefined reference. If the
11455 symbol doesn't resolve locally in a dynamic object,
11456 it should be resolved by the dynamic linker. */
11457 if (globals->root.dynamic_sections_created)
11459 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11460 *unresolved_reloc_p = false;
11462 else
11463 outrel.r_info = 0;
11464 outrel.r_addend = 0;
11466 else
11468 if (dynreloc_st_type == STT_GNU_IFUNC)
11469 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11470 else if (bfd_link_pic (info)
11471 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11472 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11473 else
11475 outrel.r_info = 0;
11476 if (globals->fdpic_p)
11477 isrofixup = 1;
11479 outrel.r_addend = dynreloc_value;
11482 /* The GOT entry is initialized to zero by default.
11483 See if we should install a different value. */
11484 if (outrel.r_addend != 0
11485 && (globals->use_rel || outrel.r_info == 0))
11487 bfd_put_32 (output_bfd, outrel.r_addend,
11488 sgot->contents + off);
11489 outrel.r_addend = 0;
11492 if (isrofixup)
11493 arm_elf_add_rofixup (output_bfd,
11494 elf32_arm_hash_table (info)->srofixup,
11495 sgot->output_section->vma
11496 + sgot->output_offset + off);
11498 else if (outrel.r_info != 0)
11500 outrel.r_offset = (sgot->output_section->vma
11501 + sgot->output_offset
11502 + off);
11503 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11506 h->got.offset |= 1;
11508 value = sgot->output_offset + off;
11510 else
11512 bfd_vma off;
11514 BFD_ASSERT (local_got_offsets != NULL
11515 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11517 off = local_got_offsets[r_symndx];
11519 /* The offset must always be a multiple of 4. We use the
11520 least significant bit to record whether we have already
11521 generated the necessary reloc. */
11522 if ((off & 1) != 0)
11523 off &= ~1;
11524 else
11526 Elf_Internal_Rela outrel;
11527 int isrofixup = 0;
11529 if (dynreloc_st_type == STT_GNU_IFUNC)
11530 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11531 else if (bfd_link_pic (info))
11532 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11533 else
11535 outrel.r_info = 0;
11536 if (globals->fdpic_p)
11537 isrofixup = 1;
11540 /* The GOT entry is initialized to zero by default.
11541 See if we should install a different value. */
11542 if (globals->use_rel || outrel.r_info == 0)
11543 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11545 if (isrofixup)
11546 arm_elf_add_rofixup (output_bfd,
11547 globals->srofixup,
11548 sgot->output_section->vma
11549 + sgot->output_offset + off);
11551 else if (outrel.r_info != 0)
11553 outrel.r_addend = addend + dynreloc_value;
11554 outrel.r_offset = (sgot->output_section->vma
11555 + sgot->output_offset
11556 + off);
11557 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11560 local_got_offsets[r_symndx] |= 1;
11563 value = sgot->output_offset + off;
11565 if (r_type != R_ARM_GOT32)
11566 value += sgot->output_section->vma;
11568 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11569 contents, rel->r_offset, value,
11570 rel->r_addend);
11572 case R_ARM_TLS_LDO32:
11573 value = value - dtpoff_base (info);
11575 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11576 contents, rel->r_offset, value,
11577 rel->r_addend);
11579 case R_ARM_TLS_LDM32:
11580 case R_ARM_TLS_LDM32_FDPIC:
11582 bfd_vma off;
11584 if (sgot == NULL)
11585 abort ();
11587 off = globals->tls_ldm_got.offset;
11589 if ((off & 1) != 0)
11590 off &= ~1;
11591 else
11593 /* If we don't know the module number, create a relocation
11594 for it. */
11595 if (bfd_link_dll (info))
11597 Elf_Internal_Rela outrel;
11599 if (srelgot == NULL)
11600 abort ();
11602 outrel.r_addend = 0;
11603 outrel.r_offset = (sgot->output_section->vma
11604 + sgot->output_offset + off);
11605 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11607 if (globals->use_rel)
11608 bfd_put_32 (output_bfd, outrel.r_addend,
11609 sgot->contents + off);
11611 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11613 else
11614 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11616 globals->tls_ldm_got.offset |= 1;
11619 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11621 bfd_put_32 (output_bfd,
11622 globals->root.sgot->output_offset + off,
11623 contents + rel->r_offset);
11625 return bfd_reloc_ok;
11627 else
11629 value = sgot->output_section->vma + sgot->output_offset + off
11630 - (input_section->output_section->vma
11631 + input_section->output_offset + rel->r_offset);
11633 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11634 contents, rel->r_offset, value,
11635 rel->r_addend);
11639 case R_ARM_TLS_CALL:
11640 case R_ARM_THM_TLS_CALL:
11641 case R_ARM_TLS_GD32:
11642 case R_ARM_TLS_GD32_FDPIC:
11643 case R_ARM_TLS_IE32:
11644 case R_ARM_TLS_IE32_FDPIC:
11645 case R_ARM_TLS_GOTDESC:
11646 case R_ARM_TLS_DESCSEQ:
11647 case R_ARM_THM_TLS_DESCSEQ:
11649 bfd_vma off, offplt;
11650 int indx = 0;
11651 char tls_type;
11653 BFD_ASSERT (sgot != NULL);
11655 if (h != NULL)
11657 bool dyn;
11658 dyn = globals->root.dynamic_sections_created;
11659 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11660 bfd_link_pic (info),
11662 && (!bfd_link_pic (info)
11663 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11665 *unresolved_reloc_p = false;
11666 indx = h->dynindx;
11668 off = h->got.offset;
11669 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11670 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11672 else
11674 BFD_ASSERT (local_got_offsets != NULL);
11676 if (r_symndx >= elf32_arm_num_entries (input_bfd))
11678 _bfd_error_handler (_("\
11679 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11680 input_bfd,
11681 (unsigned long) elf32_arm_num_entries (input_bfd),
11682 r_symndx);
11683 return false;
11685 off = local_got_offsets[r_symndx];
11686 offplt = local_tlsdesc_gotents[r_symndx];
11687 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11690 /* Linker relaxations happens from one of the
11691 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11692 if (ELF32_R_TYPE (rel->r_info) != r_type)
11693 tls_type = GOT_TLS_IE;
11695 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11697 if ((off & 1) != 0)
11698 off &= ~1;
11699 else
11701 bool need_relocs = false;
11702 Elf_Internal_Rela outrel;
11703 int cur_off = off;
11705 /* The GOT entries have not been initialized yet. Do it
11706 now, and emit any relocations. If both an IE GOT and a
11707 GD GOT are necessary, we emit the GD first. */
11709 if ((bfd_link_dll (info) || indx != 0)
11710 && (h == NULL
11711 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11712 && !resolved_to_zero)
11713 || h->root.type != bfd_link_hash_undefweak))
11715 need_relocs = true;
11716 BFD_ASSERT (srelgot != NULL);
11719 if (tls_type & GOT_TLS_GDESC)
11721 bfd_byte *loc;
11723 /* We should have relaxed, unless this is an undefined
11724 weak symbol. */
11725 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11726 || bfd_link_dll (info));
11727 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11728 <= globals->root.sgotplt->size);
11730 outrel.r_addend = 0;
11731 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11732 + globals->root.sgotplt->output_offset
11733 + offplt
11734 + globals->sgotplt_jump_table_size);
11736 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11737 sreloc = globals->root.srelplt;
11738 loc = sreloc->contents;
11739 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11740 BFD_ASSERT (loc + RELOC_SIZE (globals)
11741 <= sreloc->contents + sreloc->size);
11743 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11745 /* For globals, the first word in the relocation gets
11746 the relocation index and the top bit set, or zero,
11747 if we're binding now. For locals, it gets the
11748 symbol's offset in the tls section. */
11749 bfd_put_32 (output_bfd,
11750 !h ? value - elf_hash_table (info)->tls_sec->vma
11751 : info->flags & DF_BIND_NOW ? 0
11752 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11753 globals->root.sgotplt->contents + offplt
11754 + globals->sgotplt_jump_table_size);
11756 /* Second word in the relocation is always zero. */
11757 bfd_put_32 (output_bfd, 0,
11758 globals->root.sgotplt->contents + offplt
11759 + globals->sgotplt_jump_table_size + 4);
11761 if (tls_type & GOT_TLS_GD)
11763 if (need_relocs)
11765 outrel.r_addend = 0;
11766 outrel.r_offset = (sgot->output_section->vma
11767 + sgot->output_offset
11768 + cur_off);
11769 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11771 if (globals->use_rel)
11772 bfd_put_32 (output_bfd, outrel.r_addend,
11773 sgot->contents + cur_off);
11775 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11777 if (indx == 0)
11778 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11779 sgot->contents + cur_off + 4);
11780 else
11782 outrel.r_addend = 0;
11783 outrel.r_info = ELF32_R_INFO (indx,
11784 R_ARM_TLS_DTPOFF32);
11785 outrel.r_offset += 4;
11787 if (globals->use_rel)
11788 bfd_put_32 (output_bfd, outrel.r_addend,
11789 sgot->contents + cur_off + 4);
11791 elf32_arm_add_dynreloc (output_bfd, info,
11792 srelgot, &outrel);
11795 else
11797 /* If we are not emitting relocations for a
11798 general dynamic reference, then we must be in a
11799 static link or an executable link with the
11800 symbol binding locally. Mark it as belonging
11801 to module 1, the executable. */
11802 bfd_put_32 (output_bfd, 1,
11803 sgot->contents + cur_off);
11804 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11805 sgot->contents + cur_off + 4);
11808 cur_off += 8;
11811 if (tls_type & GOT_TLS_IE)
11813 if (need_relocs)
11815 if (indx == 0)
11816 outrel.r_addend = value - dtpoff_base (info);
11817 else
11818 outrel.r_addend = 0;
11819 outrel.r_offset = (sgot->output_section->vma
11820 + sgot->output_offset
11821 + cur_off);
11822 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11824 if (globals->use_rel)
11825 bfd_put_32 (output_bfd, outrel.r_addend,
11826 sgot->contents + cur_off);
11828 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11830 else
11831 bfd_put_32 (output_bfd, tpoff (info, value),
11832 sgot->contents + cur_off);
11833 cur_off += 4;
11836 if (h != NULL)
11837 h->got.offset |= 1;
11838 else
11839 local_got_offsets[r_symndx] |= 1;
11842 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11843 off += 8;
11844 else if (tls_type & GOT_TLS_GDESC)
11845 off = offplt;
11847 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11848 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11850 bfd_signed_vma offset;
11851 /* TLS stubs are arm mode. The original symbol is a
11852 data object, so branch_type is bogus. */
11853 branch_type = ST_BRANCH_TO_ARM;
11854 enum elf32_arm_stub_type stub_type
11855 = arm_type_of_stub (info, input_section, rel,
11856 st_type, &branch_type,
11857 (struct elf32_arm_link_hash_entry *)h,
11858 globals->tls_trampoline, globals->root.splt,
11859 input_bfd, sym_name);
11861 if (stub_type != arm_stub_none)
11863 struct elf32_arm_stub_hash_entry *stub_entry
11864 = elf32_arm_get_stub_entry
11865 (input_section, globals->root.splt, 0, rel,
11866 globals, stub_type);
11867 offset = (stub_entry->stub_offset
11868 + stub_entry->stub_sec->output_offset
11869 + stub_entry->stub_sec->output_section->vma);
11871 else
11872 offset = (globals->root.splt->output_section->vma
11873 + globals->root.splt->output_offset
11874 + globals->tls_trampoline);
11876 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11878 unsigned long inst;
11880 offset -= (input_section->output_section->vma
11881 + input_section->output_offset
11882 + rel->r_offset + 8);
11884 inst = offset >> 2;
11885 inst &= 0x00ffffff;
11886 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11888 else
11890 /* Thumb blx encodes the offset in a complicated
11891 fashion. */
11892 unsigned upper_insn, lower_insn;
11893 unsigned neg;
11895 offset -= (input_section->output_section->vma
11896 + input_section->output_offset
11897 + rel->r_offset + 4);
11899 if (stub_type != arm_stub_none
11900 && arm_stub_is_thumb (stub_type))
11902 lower_insn = 0xd000;
11904 else
11906 lower_insn = 0xc000;
11907 /* Round up the offset to a word boundary. */
11908 offset = (offset + 2) & ~2;
11911 neg = offset < 0;
11912 upper_insn = (0xf000
11913 | ((offset >> 12) & 0x3ff)
11914 | (neg << 10));
11915 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11916 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11917 | ((offset >> 1) & 0x7ff);
11918 bfd_put_16 (input_bfd, upper_insn, hit_data);
11919 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11920 return bfd_reloc_ok;
11923 /* These relocations needs special care, as besides the fact
11924 they point somewhere in .gotplt, the addend must be
11925 adjusted accordingly depending on the type of instruction
11926 we refer to. */
11927 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11929 unsigned long data, insn;
11930 unsigned thumb;
11932 data = bfd_get_signed_32 (input_bfd, hit_data);
11933 thumb = data & 1;
11934 data &= ~1ul;
11936 if (thumb)
11938 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11939 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11940 insn = (insn << 16)
11941 | bfd_get_16 (input_bfd,
11942 contents + rel->r_offset - data + 2);
11943 if ((insn & 0xf800c000) == 0xf000c000)
11944 /* bl/blx */
11945 value = -6;
11946 else if ((insn & 0xffffff00) == 0x4400)
11947 /* add */
11948 value = -5;
11949 else
11951 _bfd_error_handler
11952 /* xgettext:c-format */
11953 (_("%pB(%pA+%#" PRIx64 "): "
11954 "unexpected %s instruction '%#lx' "
11955 "referenced by TLS_GOTDESC"),
11956 input_bfd, input_section, (uint64_t) rel->r_offset,
11957 "Thumb", insn);
11958 return bfd_reloc_notsupported;
11961 else
11963 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11965 switch (insn >> 24)
11967 case 0xeb: /* bl */
11968 case 0xfa: /* blx */
11969 value = -4;
11970 break;
11972 case 0xe0: /* add */
11973 value = -8;
11974 break;
11976 default:
11977 _bfd_error_handler
11978 /* xgettext:c-format */
11979 (_("%pB(%pA+%#" PRIx64 "): "
11980 "unexpected %s instruction '%#lx' "
11981 "referenced by TLS_GOTDESC"),
11982 input_bfd, input_section, (uint64_t) rel->r_offset,
11983 "ARM", insn);
11984 return bfd_reloc_notsupported;
11988 value += ((globals->root.sgotplt->output_section->vma
11989 + globals->root.sgotplt->output_offset + off)
11990 - (input_section->output_section->vma
11991 + input_section->output_offset
11992 + rel->r_offset)
11993 + globals->sgotplt_jump_table_size);
11995 else
11996 value = ((globals->root.sgot->output_section->vma
11997 + globals->root.sgot->output_offset + off)
11998 - (input_section->output_section->vma
11999 + input_section->output_offset + rel->r_offset));
12001 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12002 r_type == R_ARM_TLS_IE32_FDPIC))
12004 /* For FDPIC relocations, resolve to the offset of the GOT
12005 entry from the start of GOT. */
12006 bfd_put_32 (output_bfd,
12007 globals->root.sgot->output_offset + off,
12008 contents + rel->r_offset);
12010 return bfd_reloc_ok;
12012 else
12014 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12015 contents, rel->r_offset, value,
12016 rel->r_addend);
12020 case R_ARM_TLS_LE32:
12021 if (bfd_link_dll (info))
12023 _bfd_error_handler
12024 /* xgettext:c-format */
12025 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12026 "in shared object"),
12027 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12028 return bfd_reloc_notsupported;
12030 else
12031 value = tpoff (info, value);
12033 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12034 contents, rel->r_offset, value,
12035 rel->r_addend);
12037 case R_ARM_V4BX:
12038 if (globals->fix_v4bx)
12040 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12042 /* Ensure that we have a BX instruction. */
12043 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12045 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12047 /* Branch to veneer. */
12048 bfd_vma glue_addr;
12049 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12050 glue_addr -= input_section->output_section->vma
12051 + input_section->output_offset
12052 + rel->r_offset + 8;
12053 insn = (insn & 0xf0000000) | 0x0a000000
12054 | ((glue_addr >> 2) & 0x00ffffff);
12056 else
12058 /* Preserve Rm (lowest four bits) and the condition code
12059 (highest four bits). Other bits encode MOV PC,Rm. */
12060 insn = (insn & 0xf000000f) | 0x01a0f000;
12063 bfd_put_32 (input_bfd, insn, hit_data);
12065 return bfd_reloc_ok;
12067 case R_ARM_MOVW_ABS_NC:
12068 case R_ARM_MOVT_ABS:
12069 case R_ARM_MOVW_PREL_NC:
12070 case R_ARM_MOVT_PREL:
12071 /* Until we properly support segment-base-relative addressing then
12072 we assume the segment base to be zero, as for the group relocations.
12073 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12074 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12075 case R_ARM_MOVW_BREL_NC:
12076 case R_ARM_MOVW_BREL:
12077 case R_ARM_MOVT_BREL:
12079 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12081 if (globals->use_rel)
12083 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12084 signed_addend = (addend ^ 0x8000) - 0x8000;
12087 value += signed_addend;
12089 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12090 value -= (input_section->output_section->vma
12091 + input_section->output_offset + rel->r_offset);
12093 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12094 return bfd_reloc_overflow;
12096 if (branch_type == ST_BRANCH_TO_THUMB)
12097 value |= 1;
12099 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12100 || r_type == R_ARM_MOVT_BREL)
12101 value >>= 16;
12103 insn &= 0xfff0f000;
12104 insn |= value & 0xfff;
12105 insn |= (value & 0xf000) << 4;
12106 bfd_put_32 (input_bfd, insn, hit_data);
12108 return bfd_reloc_ok;
12110 case R_ARM_THM_MOVW_ABS_NC:
12111 case R_ARM_THM_MOVT_ABS:
12112 case R_ARM_THM_MOVW_PREL_NC:
12113 case R_ARM_THM_MOVT_PREL:
12114 /* Until we properly support segment-base-relative addressing then
12115 we assume the segment base to be zero, as for the above relocations.
12116 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12117 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12118 as R_ARM_THM_MOVT_ABS. */
12119 case R_ARM_THM_MOVW_BREL_NC:
12120 case R_ARM_THM_MOVW_BREL:
12121 case R_ARM_THM_MOVT_BREL:
12123 bfd_vma insn;
12125 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12126 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12128 if (globals->use_rel)
12130 addend = ((insn >> 4) & 0xf000)
12131 | ((insn >> 15) & 0x0800)
12132 | ((insn >> 4) & 0x0700)
12133 | (insn & 0x00ff);
12134 signed_addend = (addend ^ 0x8000) - 0x8000;
12137 value += signed_addend;
12139 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12140 value -= (input_section->output_section->vma
12141 + input_section->output_offset + rel->r_offset);
12143 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12144 return bfd_reloc_overflow;
12146 if (branch_type == ST_BRANCH_TO_THUMB)
12147 value |= 1;
12149 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12150 || r_type == R_ARM_THM_MOVT_BREL)
12151 value >>= 16;
12153 insn &= 0xfbf08f00;
12154 insn |= (value & 0xf000) << 4;
12155 insn |= (value & 0x0800) << 15;
12156 insn |= (value & 0x0700) << 4;
12157 insn |= (value & 0x00ff);
12159 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12160 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12162 return bfd_reloc_ok;
12164 case R_ARM_ALU_PC_G0_NC:
12165 case R_ARM_ALU_PC_G1_NC:
12166 case R_ARM_ALU_PC_G0:
12167 case R_ARM_ALU_PC_G1:
12168 case R_ARM_ALU_PC_G2:
12169 case R_ARM_ALU_SB_G0_NC:
12170 case R_ARM_ALU_SB_G1_NC:
12171 case R_ARM_ALU_SB_G0:
12172 case R_ARM_ALU_SB_G1:
12173 case R_ARM_ALU_SB_G2:
12175 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12176 bfd_vma pc = input_section->output_section->vma
12177 + input_section->output_offset + rel->r_offset;
12178 /* sb is the origin of the *segment* containing the symbol. */
12179 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12180 bfd_vma residual;
12181 bfd_vma g_n;
12182 bfd_signed_vma signed_value;
12183 int group = 0;
12185 /* Determine which group of bits to select. */
12186 switch (r_type)
12188 case R_ARM_ALU_PC_G0_NC:
12189 case R_ARM_ALU_PC_G0:
12190 case R_ARM_ALU_SB_G0_NC:
12191 case R_ARM_ALU_SB_G0:
12192 group = 0;
12193 break;
12195 case R_ARM_ALU_PC_G1_NC:
12196 case R_ARM_ALU_PC_G1:
12197 case R_ARM_ALU_SB_G1_NC:
12198 case R_ARM_ALU_SB_G1:
12199 group = 1;
12200 break;
12202 case R_ARM_ALU_PC_G2:
12203 case R_ARM_ALU_SB_G2:
12204 group = 2;
12205 break;
12207 default:
12208 abort ();
12211 /* If REL, extract the addend from the insn. If RELA, it will
12212 have already been fetched for us. */
12213 if (globals->use_rel)
12215 int negative;
12216 bfd_vma constant = insn & 0xff;
12217 bfd_vma rotation = (insn & 0xf00) >> 8;
12219 if (rotation == 0)
12220 signed_addend = constant;
12221 else
12223 /* Compensate for the fact that in the instruction, the
12224 rotation is stored in multiples of 2 bits. */
12225 rotation *= 2;
12227 /* Rotate "constant" right by "rotation" bits. */
12228 signed_addend = (constant >> rotation) |
12229 (constant << (8 * sizeof (bfd_vma) - rotation));
12232 /* Determine if the instruction is an ADD or a SUB.
12233 (For REL, this determines the sign of the addend.) */
12234 negative = identify_add_or_sub (insn);
12235 if (negative == 0)
12237 _bfd_error_handler
12238 /* xgettext:c-format */
12239 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12240 "are allowed for ALU group relocations"),
12241 input_bfd, input_section, (uint64_t) rel->r_offset);
12242 return bfd_reloc_overflow;
12245 signed_addend *= negative;
12248 /* Compute the value (X) to go in the place. */
12249 if (r_type == R_ARM_ALU_PC_G0_NC
12250 || r_type == R_ARM_ALU_PC_G1_NC
12251 || r_type == R_ARM_ALU_PC_G0
12252 || r_type == R_ARM_ALU_PC_G1
12253 || r_type == R_ARM_ALU_PC_G2)
12254 /* PC relative. */
12255 signed_value = value - pc + signed_addend;
12256 else
12257 /* Section base relative. */
12258 signed_value = value - sb + signed_addend;
12260 /* If the target symbol is a Thumb function, then set the
12261 Thumb bit in the address. */
12262 if (branch_type == ST_BRANCH_TO_THUMB)
12263 signed_value |= 1;
12265 /* Calculate the value of the relevant G_n, in encoded
12266 constant-with-rotation format. */
12267 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12268 group, &residual);
12270 /* Check for overflow if required. */
12271 if ((r_type == R_ARM_ALU_PC_G0
12272 || r_type == R_ARM_ALU_PC_G1
12273 || r_type == R_ARM_ALU_PC_G2
12274 || r_type == R_ARM_ALU_SB_G0
12275 || r_type == R_ARM_ALU_SB_G1
12276 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12278 _bfd_error_handler
12279 /* xgettext:c-format */
12280 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12281 "splitting %#" PRIx64 " for group relocation %s"),
12282 input_bfd, input_section, (uint64_t) rel->r_offset,
12283 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12284 howto->name);
12285 return bfd_reloc_overflow;
12288 /* Mask out the value and the ADD/SUB part of the opcode; take care
12289 not to destroy the S bit. */
12290 insn &= 0xff1ff000;
12292 /* Set the opcode according to whether the value to go in the
12293 place is negative. */
12294 if (signed_value < 0)
12295 insn |= 1 << 22;
12296 else
12297 insn |= 1 << 23;
12299 /* Encode the offset. */
12300 insn |= g_n;
12302 bfd_put_32 (input_bfd, insn, hit_data);
12304 return bfd_reloc_ok;
12306 case R_ARM_LDR_PC_G0:
12307 case R_ARM_LDR_PC_G1:
12308 case R_ARM_LDR_PC_G2:
12309 case R_ARM_LDR_SB_G0:
12310 case R_ARM_LDR_SB_G1:
12311 case R_ARM_LDR_SB_G2:
12313 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12314 bfd_vma pc = input_section->output_section->vma
12315 + input_section->output_offset + rel->r_offset;
12316 /* sb is the origin of the *segment* containing the symbol. */
12317 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12318 bfd_vma residual;
12319 bfd_signed_vma signed_value;
12320 int group = 0;
12322 /* Determine which groups of bits to calculate. */
12323 switch (r_type)
12325 case R_ARM_LDR_PC_G0:
12326 case R_ARM_LDR_SB_G0:
12327 group = 0;
12328 break;
12330 case R_ARM_LDR_PC_G1:
12331 case R_ARM_LDR_SB_G1:
12332 group = 1;
12333 break;
12335 case R_ARM_LDR_PC_G2:
12336 case R_ARM_LDR_SB_G2:
12337 group = 2;
12338 break;
12340 default:
12341 abort ();
12344 /* If REL, extract the addend from the insn. If RELA, it will
12345 have already been fetched for us. */
12346 if (globals->use_rel)
12348 int negative = (insn & (1 << 23)) ? 1 : -1;
12349 signed_addend = negative * (insn & 0xfff);
12352 /* Compute the value (X) to go in the place. */
12353 if (r_type == R_ARM_LDR_PC_G0
12354 || r_type == R_ARM_LDR_PC_G1
12355 || r_type == R_ARM_LDR_PC_G2)
12356 /* PC relative. */
12357 signed_value = value - pc + signed_addend;
12358 else
12359 /* Section base relative. */
12360 signed_value = value - sb + signed_addend;
12362 /* Calculate the value of the relevant G_{n-1} to obtain
12363 the residual at that stage. */
12364 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12365 group - 1, &residual);
12367 /* Check for overflow. */
12368 if (residual >= 0x1000)
12370 _bfd_error_handler
12371 /* xgettext:c-format */
12372 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12373 "splitting %#" PRIx64 " for group relocation %s"),
12374 input_bfd, input_section, (uint64_t) rel->r_offset,
12375 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12376 howto->name);
12377 return bfd_reloc_overflow;
12380 /* Mask out the value and U bit. */
12381 insn &= 0xff7ff000;
12383 /* Set the U bit if the value to go in the place is non-negative. */
12384 if (signed_value >= 0)
12385 insn |= 1 << 23;
12387 /* Encode the offset. */
12388 insn |= residual;
12390 bfd_put_32 (input_bfd, insn, hit_data);
12392 return bfd_reloc_ok;
12394 case R_ARM_LDRS_PC_G0:
12395 case R_ARM_LDRS_PC_G1:
12396 case R_ARM_LDRS_PC_G2:
12397 case R_ARM_LDRS_SB_G0:
12398 case R_ARM_LDRS_SB_G1:
12399 case R_ARM_LDRS_SB_G2:
12401 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12402 bfd_vma pc = input_section->output_section->vma
12403 + input_section->output_offset + rel->r_offset;
12404 /* sb is the origin of the *segment* containing the symbol. */
12405 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12406 bfd_vma residual;
12407 bfd_signed_vma signed_value;
12408 int group = 0;
12410 /* Determine which groups of bits to calculate. */
12411 switch (r_type)
12413 case R_ARM_LDRS_PC_G0:
12414 case R_ARM_LDRS_SB_G0:
12415 group = 0;
12416 break;
12418 case R_ARM_LDRS_PC_G1:
12419 case R_ARM_LDRS_SB_G1:
12420 group = 1;
12421 break;
12423 case R_ARM_LDRS_PC_G2:
12424 case R_ARM_LDRS_SB_G2:
12425 group = 2;
12426 break;
12428 default:
12429 abort ();
12432 /* If REL, extract the addend from the insn. If RELA, it will
12433 have already been fetched for us. */
12434 if (globals->use_rel)
12436 int negative = (insn & (1 << 23)) ? 1 : -1;
12437 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12440 /* Compute the value (X) to go in the place. */
12441 if (r_type == R_ARM_LDRS_PC_G0
12442 || r_type == R_ARM_LDRS_PC_G1
12443 || r_type == R_ARM_LDRS_PC_G2)
12444 /* PC relative. */
12445 signed_value = value - pc + signed_addend;
12446 else
12447 /* Section base relative. */
12448 signed_value = value - sb + signed_addend;
12450 /* Calculate the value of the relevant G_{n-1} to obtain
12451 the residual at that stage. */
12452 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12453 group - 1, &residual);
12455 /* Check for overflow. */
12456 if (residual >= 0x100)
12458 _bfd_error_handler
12459 /* xgettext:c-format */
12460 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12461 "splitting %#" PRIx64 " for group relocation %s"),
12462 input_bfd, input_section, (uint64_t) rel->r_offset,
12463 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12464 howto->name);
12465 return bfd_reloc_overflow;
12468 /* Mask out the value and U bit. */
12469 insn &= 0xff7ff0f0;
12471 /* Set the U bit if the value to go in the place is non-negative. */
12472 if (signed_value >= 0)
12473 insn |= 1 << 23;
12475 /* Encode the offset. */
12476 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12478 bfd_put_32 (input_bfd, insn, hit_data);
12480 return bfd_reloc_ok;
12482 case R_ARM_LDC_PC_G0:
12483 case R_ARM_LDC_PC_G1:
12484 case R_ARM_LDC_PC_G2:
12485 case R_ARM_LDC_SB_G0:
12486 case R_ARM_LDC_SB_G1:
12487 case R_ARM_LDC_SB_G2:
12489 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12490 bfd_vma pc = input_section->output_section->vma
12491 + input_section->output_offset + rel->r_offset;
12492 /* sb is the origin of the *segment* containing the symbol. */
12493 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12494 bfd_vma residual;
12495 bfd_signed_vma signed_value;
12496 int group = 0;
12498 /* Determine which groups of bits to calculate. */
12499 switch (r_type)
12501 case R_ARM_LDC_PC_G0:
12502 case R_ARM_LDC_SB_G0:
12503 group = 0;
12504 break;
12506 case R_ARM_LDC_PC_G1:
12507 case R_ARM_LDC_SB_G1:
12508 group = 1;
12509 break;
12511 case R_ARM_LDC_PC_G2:
12512 case R_ARM_LDC_SB_G2:
12513 group = 2;
12514 break;
12516 default:
12517 abort ();
12520 /* If REL, extract the addend from the insn. If RELA, it will
12521 have already been fetched for us. */
12522 if (globals->use_rel)
12524 int negative = (insn & (1 << 23)) ? 1 : -1;
12525 signed_addend = negative * ((insn & 0xff) << 2);
12528 /* Compute the value (X) to go in the place. */
12529 if (r_type == R_ARM_LDC_PC_G0
12530 || r_type == R_ARM_LDC_PC_G1
12531 || r_type == R_ARM_LDC_PC_G2)
12532 /* PC relative. */
12533 signed_value = value - pc + signed_addend;
12534 else
12535 /* Section base relative. */
12536 signed_value = value - sb + signed_addend;
12538 /* Calculate the value of the relevant G_{n-1} to obtain
12539 the residual at that stage. */
12540 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12541 group - 1, &residual);
12543 /* Check for overflow. (The absolute value to go in the place must be
12544 divisible by four and, after having been divided by four, must
12545 fit in eight bits.) */
12546 if ((residual & 0x3) != 0 || residual >= 0x400)
12548 _bfd_error_handler
12549 /* xgettext:c-format */
12550 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12551 "splitting %#" PRIx64 " for group relocation %s"),
12552 input_bfd, input_section, (uint64_t) rel->r_offset,
12553 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12554 howto->name);
12555 return bfd_reloc_overflow;
12558 /* Mask out the value and U bit. */
12559 insn &= 0xff7fff00;
12561 /* Set the U bit if the value to go in the place is non-negative. */
12562 if (signed_value >= 0)
12563 insn |= 1 << 23;
12565 /* Encode the offset. */
12566 insn |= residual >> 2;
12568 bfd_put_32 (input_bfd, insn, hit_data);
12570 return bfd_reloc_ok;
12572 case R_ARM_THM_ALU_ABS_G0_NC:
12573 case R_ARM_THM_ALU_ABS_G1_NC:
12574 case R_ARM_THM_ALU_ABS_G2_NC:
12575 case R_ARM_THM_ALU_ABS_G3_NC:
12577 const int shift_array[4] = {0, 8, 16, 24};
12578 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12579 bfd_vma addr = value;
12580 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12582 /* Compute address. */
12583 if (globals->use_rel)
12584 signed_addend = insn & 0xff;
12585 addr += signed_addend;
12586 if (branch_type == ST_BRANCH_TO_THUMB)
12587 addr |= 1;
12588 /* Clean imm8 insn. */
12589 insn &= 0xff00;
12590 /* And update with correct part of address. */
12591 insn |= (addr >> shift) & 0xff;
12592 /* Update insn. */
12593 bfd_put_16 (input_bfd, insn, hit_data);
12596 *unresolved_reloc_p = false;
12597 return bfd_reloc_ok;
12599 case R_ARM_GOTOFFFUNCDESC:
12601 if (h == NULL)
12603 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12604 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12606 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12608 * error_message = _("local symbol index too big");
12609 return bfd_reloc_dangerous;
12612 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12613 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12614 bfd_vma seg = -1;
12616 if (bfd_link_pic (info) && dynindx == 0)
12618 * error_message = _("no dynamic index information available");
12619 return bfd_reloc_dangerous;
12622 /* Resolve relocation. */
12623 bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12624 , contents + rel->r_offset);
12625 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12626 not done yet. */
12627 arm_elf_fill_funcdesc (output_bfd, info,
12628 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12629 dynindx, offset, addr, dynreloc_value, seg);
12631 else
12633 int dynindx;
12634 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12635 bfd_vma addr;
12636 bfd_vma seg = -1;
12638 /* For static binaries, sym_sec can be null. */
12639 if (sym_sec)
12641 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12642 addr = dynreloc_value - sym_sec->output_section->vma;
12644 else
12646 dynindx = 0;
12647 addr = 0;
12650 if (bfd_link_pic (info) && dynindx == 0)
12652 * error_message = _("no dynamic index information available");
12653 return bfd_reloc_dangerous;
12656 /* This case cannot occur since funcdesc is allocated by
12657 the dynamic loader so we cannot resolve the relocation. */
12658 if (h->dynindx != -1)
12660 * error_message = _("invalid dynamic index");
12661 return bfd_reloc_dangerous;
12664 /* Resolve relocation. */
12665 bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12666 contents + rel->r_offset);
12667 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12668 arm_elf_fill_funcdesc (output_bfd, info,
12669 &eh->fdpic_cnts.funcdesc_offset,
12670 dynindx, offset, addr, dynreloc_value, seg);
12673 *unresolved_reloc_p = false;
12674 return bfd_reloc_ok;
12676 case R_ARM_GOTFUNCDESC:
12678 if (h != NULL)
12680 Elf_Internal_Rela outrel;
12682 /* Resolve relocation. */
12683 bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12684 + sgot->output_offset),
12685 contents + rel->r_offset);
12686 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12687 if (h->dynindx == -1)
12689 int dynindx;
12690 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12691 bfd_vma addr;
12692 bfd_vma seg = -1;
12694 /* For static binaries sym_sec can be null. */
12695 if (sym_sec)
12697 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12698 addr = dynreloc_value - sym_sec->output_section->vma;
12700 else
12702 dynindx = 0;
12703 addr = 0;
12706 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12707 arm_elf_fill_funcdesc (output_bfd, info,
12708 &eh->fdpic_cnts.funcdesc_offset,
12709 dynindx, offset, addr, dynreloc_value, seg);
12712 /* Add a dynamic relocation on GOT entry if not already done. */
12713 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12715 if (h->dynindx == -1)
12717 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12718 if (h->root.type == bfd_link_hash_undefweak)
12719 bfd_put_32 (output_bfd, 0, sgot->contents
12720 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12721 else
12722 bfd_put_32 (output_bfd, sgot->output_section->vma
12723 + sgot->output_offset
12724 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12725 sgot->contents
12726 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12728 else
12730 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12732 outrel.r_offset = sgot->output_section->vma
12733 + sgot->output_offset
12734 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12735 outrel.r_addend = 0;
12736 if (h->dynindx == -1 && !bfd_link_pic (info))
12737 if (h->root.type == bfd_link_hash_undefweak)
12738 arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12739 else
12740 arm_elf_add_rofixup (output_bfd, globals->srofixup,
12741 outrel.r_offset);
12742 else
12743 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12744 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12747 else
12749 /* Such relocation on static function should not have been
12750 emitted by the compiler. */
12751 return bfd_reloc_notsupported;
12754 *unresolved_reloc_p = false;
12755 return bfd_reloc_ok;
12757 case R_ARM_FUNCDESC:
12759 if (h == NULL)
12761 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12762 Elf_Internal_Rela outrel;
12763 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12765 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12767 * error_message = _("local symbol index too big");
12768 return bfd_reloc_dangerous;
12771 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12772 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12773 bfd_vma seg = -1;
12775 if (bfd_link_pic (info) && dynindx == 0)
12777 * error_message = _("dynamic index information not available");
12778 return bfd_reloc_dangerous;
12781 /* Replace static FUNCDESC relocation with a
12782 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12783 executable. */
12784 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12785 outrel.r_offset = input_section->output_section->vma
12786 + input_section->output_offset + rel->r_offset;
12787 outrel.r_addend = 0;
12788 if (bfd_link_pic (info))
12789 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12790 else
12791 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12793 bfd_put_32 (input_bfd, sgot->output_section->vma
12794 + sgot->output_offset + offset, hit_data);
12796 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12797 arm_elf_fill_funcdesc (output_bfd, info,
12798 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12799 dynindx, offset, addr, dynreloc_value, seg);
12801 else
12803 if (h->dynindx == -1)
12805 int dynindx;
12806 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12807 bfd_vma addr;
12808 bfd_vma seg = -1;
12809 Elf_Internal_Rela outrel;
12811 /* For static binaries sym_sec can be null. */
12812 if (sym_sec)
12814 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12815 addr = dynreloc_value - sym_sec->output_section->vma;
12817 else
12819 dynindx = 0;
12820 addr = 0;
12823 if (bfd_link_pic (info) && dynindx == 0)
12824 abort ();
12826 /* Replace static FUNCDESC relocation with a
12827 R_ARM_RELATIVE dynamic relocation. */
12828 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12829 outrel.r_offset = input_section->output_section->vma
12830 + input_section->output_offset + rel->r_offset;
12831 outrel.r_addend = 0;
12832 if (bfd_link_pic (info))
12833 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12834 else
12835 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12837 bfd_put_32 (input_bfd, sgot->output_section->vma
12838 + sgot->output_offset + offset, hit_data);
12840 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12841 arm_elf_fill_funcdesc (output_bfd, info,
12842 &eh->fdpic_cnts.funcdesc_offset,
12843 dynindx, offset, addr, dynreloc_value, seg);
12845 else
12847 Elf_Internal_Rela outrel;
12849 /* Add a dynamic relocation. */
12850 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12851 outrel.r_offset = input_section->output_section->vma
12852 + input_section->output_offset + rel->r_offset;
12853 outrel.r_addend = 0;
12854 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12858 *unresolved_reloc_p = false;
12859 return bfd_reloc_ok;
12861 case R_ARM_THM_BF16:
12863 bfd_vma relocation;
12864 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12865 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12867 if (globals->use_rel)
12869 bfd_vma immA = (upper_insn & 0x001f);
12870 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12871 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12872 addend = (immA << 12);
12873 addend |= (immB << 2);
12874 addend |= (immC << 1);
12875 addend |= 1;
12876 /* Sign extend. */
12877 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12880 relocation = value + signed_addend;
12881 relocation -= (input_section->output_section->vma
12882 + input_section->output_offset
12883 + rel->r_offset);
12885 /* Put RELOCATION back into the insn. */
12887 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12888 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12889 bfd_vma immC = (relocation & 0x00000002) >> 1;
12891 upper_insn = (upper_insn & 0xffe0) | immA;
12892 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12895 /* Put the relocated value back in the object file: */
12896 bfd_put_16 (input_bfd, upper_insn, hit_data);
12897 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12899 return bfd_reloc_ok;
12902 case R_ARM_THM_BF12:
12904 bfd_vma relocation;
12905 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12906 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12908 if (globals->use_rel)
12910 bfd_vma immA = (upper_insn & 0x0001);
12911 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12912 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12913 addend = (immA << 12);
12914 addend |= (immB << 2);
12915 addend |= (immC << 1);
12916 addend |= 1;
12917 /* Sign extend. */
12918 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12919 signed_addend = addend;
12922 relocation = value + signed_addend;
12923 relocation -= (input_section->output_section->vma
12924 + input_section->output_offset
12925 + rel->r_offset);
12927 /* Put RELOCATION back into the insn. */
12929 bfd_vma immA = (relocation & 0x00001000) >> 12;
12930 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12931 bfd_vma immC = (relocation & 0x00000002) >> 1;
12933 upper_insn = (upper_insn & 0xfffe) | immA;
12934 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12937 /* Put the relocated value back in the object file: */
12938 bfd_put_16 (input_bfd, upper_insn, hit_data);
12939 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12941 return bfd_reloc_ok;
12944 case R_ARM_THM_BF18:
12946 bfd_vma relocation;
12947 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12948 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12950 if (globals->use_rel)
12952 bfd_vma immA = (upper_insn & 0x007f);
12953 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12954 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12955 addend = (immA << 12);
12956 addend |= (immB << 2);
12957 addend |= (immC << 1);
12958 addend |= 1;
12959 /* Sign extend. */
12960 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12961 signed_addend = addend;
12964 relocation = value + signed_addend;
12965 relocation -= (input_section->output_section->vma
12966 + input_section->output_offset
12967 + rel->r_offset);
12969 /* Put RELOCATION back into the insn. */
12971 bfd_vma immA = (relocation & 0x0007f000) >> 12;
12972 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12973 bfd_vma immC = (relocation & 0x00000002) >> 1;
12975 upper_insn = (upper_insn & 0xff80) | immA;
12976 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12979 /* Put the relocated value back in the object file: */
12980 bfd_put_16 (input_bfd, upper_insn, hit_data);
12981 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12983 return bfd_reloc_ok;
12986 default:
12987 return bfd_reloc_notsupported;
12991 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12992 static void
12993 arm_add_to_rel (bfd * abfd,
12994 bfd_byte * address,
12995 reloc_howto_type * howto,
12996 bfd_signed_vma increment)
12998 bfd_signed_vma addend;
13000 if (howto->type == R_ARM_THM_CALL
13001 || howto->type == R_ARM_THM_JUMP24)
13003 int upper_insn, lower_insn;
13004 int upper, lower;
13006 upper_insn = bfd_get_16 (abfd, address);
13007 lower_insn = bfd_get_16 (abfd, address + 2);
13008 upper = upper_insn & 0x7ff;
13009 lower = lower_insn & 0x7ff;
13011 addend = (upper << 12) | (lower << 1);
13012 addend += increment;
13013 addend >>= 1;
13015 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13016 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13018 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13019 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13021 else
13023 bfd_vma contents;
13025 contents = bfd_get_32 (abfd, address);
13027 /* Get the (signed) value from the instruction. */
13028 addend = contents & howto->src_mask;
13029 if (addend & ((howto->src_mask + 1) >> 1))
13031 bfd_signed_vma mask;
13033 mask = -1;
13034 mask &= ~ howto->src_mask;
13035 addend |= mask;
13038 /* Add in the increment, (which is a byte value). */
13039 switch (howto->type)
13041 default:
13042 addend += increment;
13043 break;
13045 case R_ARM_PC24:
13046 case R_ARM_PLT32:
13047 case R_ARM_CALL:
13048 case R_ARM_JUMP24:
13049 addend <<= howto->size;
13050 addend += increment;
13052 /* Should we check for overflow here ? */
13054 /* Drop any undesired bits. */
13055 addend >>= howto->rightshift;
13056 break;
13059 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13061 bfd_put_32 (abfd, contents, address);
13065 #define IS_ARM_TLS_RELOC(R_TYPE) \
13066 ((R_TYPE) == R_ARM_TLS_GD32 \
13067 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13068 || (R_TYPE) == R_ARM_TLS_LDO32 \
13069 || (R_TYPE) == R_ARM_TLS_LDM32 \
13070 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13071 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13072 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13073 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13074 || (R_TYPE) == R_ARM_TLS_LE32 \
13075 || (R_TYPE) == R_ARM_TLS_IE32 \
13076 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13077 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13079 /* Specific set of relocations for the gnu tls dialect. */
13080 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13081 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13082 || (R_TYPE) == R_ARM_TLS_CALL \
13083 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13084 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13085 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13087 /* Relocate an ARM ELF section. */
13089 static int
13090 elf32_arm_relocate_section (bfd * output_bfd,
13091 struct bfd_link_info * info,
13092 bfd * input_bfd,
13093 asection * input_section,
13094 bfd_byte * contents,
13095 Elf_Internal_Rela * relocs,
13096 Elf_Internal_Sym * local_syms,
13097 asection ** local_sections)
13099 Elf_Internal_Shdr *symtab_hdr;
13100 struct elf_link_hash_entry **sym_hashes;
13101 Elf_Internal_Rela *rel;
13102 Elf_Internal_Rela *relend;
13103 const char *name;
13104 struct elf32_arm_link_hash_table * globals;
13106 globals = elf32_arm_hash_table (info);
13107 if (globals == NULL)
13108 return false;
13110 symtab_hdr = & elf_symtab_hdr (input_bfd);
13111 sym_hashes = elf_sym_hashes (input_bfd);
13113 rel = relocs;
13114 relend = relocs + input_section->reloc_count;
13115 for (; rel < relend; rel++)
13117 int r_type;
13118 reloc_howto_type * howto;
13119 unsigned long r_symndx;
13120 Elf_Internal_Sym * sym;
13121 asection * sec;
13122 struct elf_link_hash_entry * h;
13123 bfd_vma relocation;
13124 bfd_reloc_status_type r;
13125 arelent bfd_reloc;
13126 char sym_type;
13127 bool unresolved_reloc = false;
13128 char *error_message = NULL;
13130 r_symndx = ELF32_R_SYM (rel->r_info);
13131 r_type = ELF32_R_TYPE (rel->r_info);
13132 r_type = arm_real_reloc_type (globals, r_type);
13134 if ( r_type == R_ARM_GNU_VTENTRY
13135 || r_type == R_ARM_GNU_VTINHERIT)
13136 continue;
13138 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13140 if (howto == NULL)
13141 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13143 h = NULL;
13144 sym = NULL;
13145 sec = NULL;
13147 if (r_symndx < symtab_hdr->sh_info)
13149 sym = local_syms + r_symndx;
13150 sym_type = ELF32_ST_TYPE (sym->st_info);
13151 sec = local_sections[r_symndx];
13153 /* An object file might have a reference to a local
13154 undefined symbol. This is a daft object file, but we
13155 should at least do something about it. V4BX & NONE
13156 relocations do not use the symbol and are explicitly
13157 allowed to use the undefined symbol, so allow those.
13158 Likewise for relocations against STN_UNDEF. */
13159 if (r_type != R_ARM_V4BX
13160 && r_type != R_ARM_NONE
13161 && r_symndx != STN_UNDEF
13162 && bfd_is_und_section (sec)
13163 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13164 (*info->callbacks->undefined_symbol)
13165 (info, bfd_elf_string_from_elf_section
13166 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13167 input_bfd, input_section,
13168 rel->r_offset, true);
13170 if (globals->use_rel)
13172 relocation = (sec->output_section->vma
13173 + sec->output_offset
13174 + sym->st_value);
13175 if (!bfd_link_relocatable (info)
13176 && (sec->flags & SEC_MERGE)
13177 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13179 asection *msec;
13180 bfd_vma addend, value;
13182 switch (r_type)
13184 case R_ARM_MOVW_ABS_NC:
13185 case R_ARM_MOVT_ABS:
13186 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13187 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13188 addend = (addend ^ 0x8000) - 0x8000;
13189 break;
13191 case R_ARM_THM_MOVW_ABS_NC:
13192 case R_ARM_THM_MOVT_ABS:
13193 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13194 << 16;
13195 value |= bfd_get_16 (input_bfd,
13196 contents + rel->r_offset + 2);
13197 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13198 | ((value & 0x04000000) >> 15);
13199 addend = (addend ^ 0x8000) - 0x8000;
13200 break;
13202 default:
13203 if (howto->rightshift
13204 || (howto->src_mask & (howto->src_mask + 1)))
13206 _bfd_error_handler
13207 /* xgettext:c-format */
13208 (_("%pB(%pA+%#" PRIx64 "): "
13209 "%s relocation against SEC_MERGE section"),
13210 input_bfd, input_section,
13211 (uint64_t) rel->r_offset, howto->name);
13212 return false;
13215 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13217 /* Get the (signed) value from the instruction. */
13218 addend = value & howto->src_mask;
13219 if (addend & ((howto->src_mask + 1) >> 1))
13221 bfd_signed_vma mask;
13223 mask = -1;
13224 mask &= ~ howto->src_mask;
13225 addend |= mask;
13227 break;
13230 msec = sec;
13231 addend =
13232 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13233 - relocation;
13234 addend += msec->output_section->vma + msec->output_offset;
13236 /* Cases here must match those in the preceding
13237 switch statement. */
13238 switch (r_type)
13240 case R_ARM_MOVW_ABS_NC:
13241 case R_ARM_MOVT_ABS:
13242 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13243 | (addend & 0xfff);
13244 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13245 break;
13247 case R_ARM_THM_MOVW_ABS_NC:
13248 case R_ARM_THM_MOVT_ABS:
13249 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13250 | (addend & 0xff) | ((addend & 0x0800) << 15);
13251 bfd_put_16 (input_bfd, value >> 16,
13252 contents + rel->r_offset);
13253 bfd_put_16 (input_bfd, value,
13254 contents + rel->r_offset + 2);
13255 break;
13257 default:
13258 value = (value & ~ howto->dst_mask)
13259 | (addend & howto->dst_mask);
13260 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13261 break;
13265 else
13266 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13268 else
13270 bool warned, ignored;
13272 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13273 r_symndx, symtab_hdr, sym_hashes,
13274 h, sec, relocation,
13275 unresolved_reloc, warned, ignored);
13277 sym_type = h->type;
13280 if (sec != NULL && discarded_section (sec))
13281 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13282 rel, 1, relend, howto, 0, contents);
13284 if (bfd_link_relocatable (info))
13286 /* This is a relocatable link. We don't have to change
13287 anything, unless the reloc is against a section symbol,
13288 in which case we have to adjust according to where the
13289 section symbol winds up in the output section. */
13290 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13292 if (globals->use_rel)
13293 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13294 howto, (bfd_signed_vma) sec->output_offset);
13295 else
13296 rel->r_addend += sec->output_offset;
13298 continue;
13301 if (h != NULL)
13302 name = h->root.root.string;
13303 else
13305 name = (bfd_elf_string_from_elf_section
13306 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13307 if (name == NULL || *name == '\0')
13308 name = bfd_section_name (sec);
13311 if (r_symndx != STN_UNDEF
13312 && r_type != R_ARM_NONE
13313 && (h == NULL
13314 || h->root.type == bfd_link_hash_defined
13315 || h->root.type == bfd_link_hash_defweak)
13316 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13318 _bfd_error_handler
13319 ((sym_type == STT_TLS
13320 /* xgettext:c-format */
13321 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13322 /* xgettext:c-format */
13323 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13324 input_bfd,
13325 input_section,
13326 (uint64_t) rel->r_offset,
13327 howto->name,
13328 name);
13331 /* We call elf32_arm_final_link_relocate unless we're completely
13332 done, i.e., the relaxation produced the final output we want,
13333 and we won't let anybody mess with it. Also, we have to do
13334 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13335 both in relaxed and non-relaxed cases. */
13336 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13337 || (IS_ARM_TLS_GNU_RELOC (r_type)
13338 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13339 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13340 & GOT_TLS_GDESC)))
13342 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13343 contents, rel, h == NULL);
13344 /* This may have been marked unresolved because it came from
13345 a shared library. But we've just dealt with that. */
13346 unresolved_reloc = 0;
13348 else
13349 r = bfd_reloc_continue;
13351 if (r == bfd_reloc_continue)
13353 unsigned char branch_type =
13354 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13355 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13357 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13358 input_section, contents, rel,
13359 relocation, info, sec, name,
13360 sym_type, branch_type, h,
13361 &unresolved_reloc,
13362 &error_message);
13365 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13366 because such sections are not SEC_ALLOC and thus ld.so will
13367 not process them. */
13368 if (unresolved_reloc
13369 && !((input_section->flags & SEC_DEBUGGING) != 0
13370 && h->def_dynamic)
13371 && _bfd_elf_section_offset (output_bfd, info, input_section,
13372 rel->r_offset) != (bfd_vma) -1)
13374 _bfd_error_handler
13375 /* xgettext:c-format */
13376 (_("%pB(%pA+%#" PRIx64 "): "
13377 "unresolvable %s relocation against symbol `%s'"),
13378 input_bfd,
13379 input_section,
13380 (uint64_t) rel->r_offset,
13381 howto->name,
13382 h->root.root.string);
13383 return false;
13386 if (r != bfd_reloc_ok)
13388 switch (r)
13390 case bfd_reloc_overflow:
13391 /* If the overflowing reloc was to an undefined symbol,
13392 we have already printed one error message and there
13393 is no point complaining again. */
13394 if (!h || h->root.type != bfd_link_hash_undefined)
13395 (*info->callbacks->reloc_overflow)
13396 (info, (h ? &h->root : NULL), name, howto->name,
13397 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13398 break;
13400 case bfd_reloc_undefined:
13401 (*info->callbacks->undefined_symbol)
13402 (info, name, input_bfd, input_section, rel->r_offset, true);
13403 break;
13405 case bfd_reloc_outofrange:
13406 error_message = _("out of range");
13407 goto common_error;
13409 case bfd_reloc_notsupported:
13410 error_message = _("unsupported relocation");
13411 goto common_error;
13413 case bfd_reloc_dangerous:
13414 /* error_message should already be set. */
13415 goto common_error;
13417 default:
13418 error_message = _("unknown error");
13419 /* Fall through. */
13421 common_error:
13422 BFD_ASSERT (error_message != NULL);
13423 (*info->callbacks->reloc_dangerous)
13424 (info, error_message, input_bfd, input_section, rel->r_offset);
13425 break;
13430 return true;
13433 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13434 adds the edit to the start of the list. (The list must be built in order of
13435 ascending TINDEX: the function's callers are primarily responsible for
13436 maintaining that condition). */
13438 static void
13439 add_unwind_table_edit (arm_unwind_table_edit **head,
13440 arm_unwind_table_edit **tail,
13441 arm_unwind_edit_type type,
13442 asection *linked_section,
13443 unsigned int tindex)
13445 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13446 xmalloc (sizeof (arm_unwind_table_edit));
13448 new_edit->type = type;
13449 new_edit->linked_section = linked_section;
13450 new_edit->index = tindex;
13452 if (tindex > 0)
13454 new_edit->next = NULL;
13456 if (*tail)
13457 (*tail)->next = new_edit;
13459 (*tail) = new_edit;
13461 if (!*head)
13462 (*head) = new_edit;
13464 else
13466 new_edit->next = *head;
13468 if (!*tail)
13469 *tail = new_edit;
13471 *head = new_edit;
13475 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13477 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13479 static void
13480 adjust_exidx_size (asection *exidx_sec, int adjust)
13482 asection *out_sec;
13484 if (!exidx_sec->rawsize)
13485 exidx_sec->rawsize = exidx_sec->size;
13487 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13488 out_sec = exidx_sec->output_section;
13489 /* Adjust size of output section. */
13490 bfd_set_section_size (out_sec, out_sec->size + adjust);
13493 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13495 static void
13496 insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13498 struct _arm_elf_section_data *exidx_arm_data;
13500 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13501 add_unwind_table_edit
13502 (&exidx_arm_data->u.exidx.unwind_edit_list,
13503 &exidx_arm_data->u.exidx.unwind_edit_tail,
13504 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13506 exidx_arm_data->additional_reloc_count++;
13508 adjust_exidx_size (exidx_sec, 8);
13511 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13512 made to those tables, such that:
13514 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13515 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13516 codes which have been inlined into the index).
13518 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13520 The edits are applied when the tables are written
13521 (in elf32_arm_write_section). */
13523 bool
13524 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13525 unsigned int num_text_sections,
13526 struct bfd_link_info *info,
13527 bool merge_exidx_entries)
13529 bfd *inp;
13530 unsigned int last_second_word = 0, i;
13531 asection *last_exidx_sec = NULL;
13532 asection *last_text_sec = NULL;
13533 int last_unwind_type = -1;
13535 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13536 text sections. */
13537 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13539 asection *sec;
13541 for (sec = inp->sections; sec != NULL; sec = sec->next)
13543 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13544 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13546 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13547 continue;
13549 if (elf_sec->linked_to)
13551 Elf_Internal_Shdr *linked_hdr
13552 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13553 struct _arm_elf_section_data *linked_sec_arm_data
13554 = get_arm_elf_section_data (linked_hdr->bfd_section);
13556 if (linked_sec_arm_data == NULL)
13557 continue;
13559 /* Link this .ARM.exidx section back from the text section it
13560 describes. */
13561 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13566 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13567 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13568 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13570 for (i = 0; i < num_text_sections; i++)
13572 asection *sec = text_section_order[i];
13573 asection *exidx_sec;
13574 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13575 struct _arm_elf_section_data *exidx_arm_data;
13576 bfd_byte *contents = NULL;
13577 int deleted_exidx_bytes = 0;
13578 bfd_vma j;
13579 arm_unwind_table_edit *unwind_edit_head = NULL;
13580 arm_unwind_table_edit *unwind_edit_tail = NULL;
13581 Elf_Internal_Shdr *hdr;
13582 bfd *ibfd;
13584 if (arm_data == NULL)
13585 continue;
13587 exidx_sec = arm_data->u.text.arm_exidx_sec;
13588 if (exidx_sec == NULL)
13590 /* Section has no unwind data. */
13591 if (last_unwind_type == 0 || !last_exidx_sec)
13592 continue;
13594 /* Ignore zero sized sections. */
13595 if (sec->size == 0)
13596 continue;
13598 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13599 last_unwind_type = 0;
13600 continue;
13603 /* Skip /DISCARD/ sections. */
13604 if (bfd_is_abs_section (exidx_sec->output_section))
13605 continue;
13607 hdr = &elf_section_data (exidx_sec)->this_hdr;
13608 if (hdr->sh_type != SHT_ARM_EXIDX)
13609 continue;
13611 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13612 if (exidx_arm_data == NULL)
13613 continue;
13615 ibfd = exidx_sec->owner;
13617 if (hdr->contents != NULL)
13618 contents = hdr->contents;
13619 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13620 /* An error? */
13621 continue;
13623 if (last_unwind_type > 0)
13625 unsigned int first_word = bfd_get_32 (ibfd, contents);
13626 /* Add cantunwind if first unwind item does not match section
13627 start. */
13628 if (first_word != sec->vma)
13630 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13631 last_unwind_type = 0;
13635 for (j = 0; j < hdr->sh_size; j += 8)
13637 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13638 int unwind_type;
13639 int elide = 0;
13641 /* An EXIDX_CANTUNWIND entry. */
13642 if (second_word == 1)
13644 if (last_unwind_type == 0)
13645 elide = 1;
13646 unwind_type = 0;
13648 /* Inlined unwinding data. Merge if equal to previous. */
13649 else if ((second_word & 0x80000000) != 0)
13651 if (merge_exidx_entries
13652 && last_second_word == second_word && last_unwind_type == 1)
13653 elide = 1;
13654 unwind_type = 1;
13655 last_second_word = second_word;
13657 /* Normal table entry. In theory we could merge these too,
13658 but duplicate entries are likely to be much less common. */
13659 else
13660 unwind_type = 2;
13662 if (elide && !bfd_link_relocatable (info))
13664 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13665 DELETE_EXIDX_ENTRY, NULL, j / 8);
13667 deleted_exidx_bytes += 8;
13670 last_unwind_type = unwind_type;
13673 /* Free contents if we allocated it ourselves. */
13674 if (contents != hdr->contents)
13675 free (contents);
13677 /* Record edits to be applied later (in elf32_arm_write_section). */
13678 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13679 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13681 if (deleted_exidx_bytes > 0)
13682 adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13684 last_exidx_sec = exidx_sec;
13685 last_text_sec = sec;
13688 /* Add terminating CANTUNWIND entry. */
13689 if (!bfd_link_relocatable (info) && last_exidx_sec
13690 && last_unwind_type != 0)
13691 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13693 return true;
13696 static bool
13697 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13698 bfd *ibfd, const char *name)
13700 asection *sec, *osec;
13702 sec = bfd_get_linker_section (ibfd, name);
13703 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13704 return true;
13706 osec = sec->output_section;
13707 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13708 return true;
13710 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13711 sec->output_offset, sec->size))
13712 return false;
13714 return true;
13717 static bool
13718 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13720 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13721 asection *sec, *osec;
13723 if (globals == NULL)
13724 return false;
13726 /* Invoke the regular ELF backend linker to do all the work. */
13727 if (!bfd_elf_final_link (abfd, info))
13728 return false;
13730 /* Process stub sections (eg BE8 encoding, ...). */
13731 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13732 unsigned int i;
13733 for (i=0; i<htab->top_id; i++)
13735 sec = htab->stub_group[i].stub_sec;
13736 /* Only process it once, in its link_sec slot. */
13737 if (sec && i == htab->stub_group[i].link_sec->id)
13739 osec = sec->output_section;
13740 elf32_arm_write_section (abfd, info, sec, sec->contents);
13741 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13742 sec->output_offset, sec->size))
13743 return false;
13747 /* Write out any glue sections now that we have created all the
13748 stubs. */
13749 if (globals->bfd_of_glue_owner != NULL)
13751 if (! elf32_arm_output_glue_section (info, abfd,
13752 globals->bfd_of_glue_owner,
13753 ARM2THUMB_GLUE_SECTION_NAME))
13754 return false;
13756 if (! elf32_arm_output_glue_section (info, abfd,
13757 globals->bfd_of_glue_owner,
13758 THUMB2ARM_GLUE_SECTION_NAME))
13759 return false;
13761 if (! elf32_arm_output_glue_section (info, abfd,
13762 globals->bfd_of_glue_owner,
13763 VFP11_ERRATUM_VENEER_SECTION_NAME))
13764 return false;
13766 if (! elf32_arm_output_glue_section (info, abfd,
13767 globals->bfd_of_glue_owner,
13768 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13769 return false;
13771 if (! elf32_arm_output_glue_section (info, abfd,
13772 globals->bfd_of_glue_owner,
13773 ARM_BX_GLUE_SECTION_NAME))
13774 return false;
13777 return true;
13780 /* Return a best guess for the machine number based on the attributes. */
13782 static unsigned int
13783 bfd_arm_get_mach_from_attributes (bfd * abfd)
13785 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13787 switch (arch)
13789 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13790 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13791 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13792 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13794 case TAG_CPU_ARCH_V5TE:
13796 char * name;
13798 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13799 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13801 if (name)
13803 if (strcmp (name, "IWMMXT2") == 0)
13804 return bfd_mach_arm_iWMMXt2;
13806 if (strcmp (name, "IWMMXT") == 0)
13807 return bfd_mach_arm_iWMMXt;
13809 if (strcmp (name, "XSCALE") == 0)
13811 int wmmx;
13813 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13814 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13815 switch (wmmx)
13817 case 1: return bfd_mach_arm_iWMMXt;
13818 case 2: return bfd_mach_arm_iWMMXt2;
13819 default: return bfd_mach_arm_XScale;
13824 return bfd_mach_arm_5TE;
13827 case TAG_CPU_ARCH_V5TEJ:
13828 return bfd_mach_arm_5TEJ;
13829 case TAG_CPU_ARCH_V6:
13830 return bfd_mach_arm_6;
13831 case TAG_CPU_ARCH_V6KZ:
13832 return bfd_mach_arm_6KZ;
13833 case TAG_CPU_ARCH_V6T2:
13834 return bfd_mach_arm_6T2;
13835 case TAG_CPU_ARCH_V6K:
13836 return bfd_mach_arm_6K;
13837 case TAG_CPU_ARCH_V7:
13838 return bfd_mach_arm_7;
13839 case TAG_CPU_ARCH_V6_M:
13840 return bfd_mach_arm_6M;
13841 case TAG_CPU_ARCH_V6S_M:
13842 return bfd_mach_arm_6SM;
13843 case TAG_CPU_ARCH_V7E_M:
13844 return bfd_mach_arm_7EM;
13845 case TAG_CPU_ARCH_V8:
13846 return bfd_mach_arm_8;
13847 case TAG_CPU_ARCH_V8R:
13848 return bfd_mach_arm_8R;
13849 case TAG_CPU_ARCH_V8M_BASE:
13850 return bfd_mach_arm_8M_BASE;
13851 case TAG_CPU_ARCH_V8M_MAIN:
13852 return bfd_mach_arm_8M_MAIN;
13853 case TAG_CPU_ARCH_V8_1M_MAIN:
13854 return bfd_mach_arm_8_1M_MAIN;
13856 default:
13857 /* Force entry to be added for any new known Tag_CPU_arch value. */
13858 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13860 /* Unknown Tag_CPU_arch value. */
13861 return bfd_mach_arm_unknown;
13865 /* Set the right machine number. */
13867 static bool
13868 elf32_arm_object_p (bfd *abfd)
13870 unsigned int mach;
13872 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13874 if (mach == bfd_mach_arm_unknown)
13876 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13877 mach = bfd_mach_arm_ep9312;
13878 else
13879 mach = bfd_arm_get_mach_from_attributes (abfd);
13882 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13883 return true;
13886 /* Function to keep ARM specific flags in the ELF header. */
13888 static bool
13889 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13891 if (elf_flags_init (abfd)
13892 && elf_elfheader (abfd)->e_flags != flags)
13894 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13896 if (flags & EF_ARM_INTERWORK)
13897 _bfd_error_handler
13898 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13899 abfd);
13900 else
13901 _bfd_error_handler
13902 (_("warning: clearing the interworking flag of %pB due to outside request"),
13903 abfd);
13906 else
13908 elf_elfheader (abfd)->e_flags = flags;
13909 elf_flags_init (abfd) = true;
13912 return true;
13915 /* Copy backend specific data from one object module to another. */
13917 static bool
13918 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13920 flagword in_flags;
13921 flagword out_flags;
13923 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13924 return true;
13926 in_flags = elf_elfheader (ibfd)->e_flags;
13927 out_flags = elf_elfheader (obfd)->e_flags;
13929 if (elf_flags_init (obfd)
13930 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13931 && in_flags != out_flags)
13933 /* Cannot mix APCS26 and APCS32 code. */
13934 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13935 return false;
13937 /* Cannot mix float APCS and non-float APCS code. */
13938 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13939 return false;
13941 /* If the src and dest have different interworking flags
13942 then turn off the interworking bit. */
13943 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13945 if (out_flags & EF_ARM_INTERWORK)
13946 _bfd_error_handler
13947 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13948 obfd, ibfd);
13950 in_flags &= ~EF_ARM_INTERWORK;
13953 /* Likewise for PIC, though don't warn for this case. */
13954 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13955 in_flags &= ~EF_ARM_PIC;
13958 elf_elfheader (obfd)->e_flags = in_flags;
13959 elf_flags_init (obfd) = true;
13961 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13964 /* Values for Tag_ABI_PCS_R9_use. */
13965 enum
13967 AEABI_R9_V6,
13968 AEABI_R9_SB,
13969 AEABI_R9_TLS,
13970 AEABI_R9_unused
13973 /* Values for Tag_ABI_PCS_RW_data. */
13974 enum
13976 AEABI_PCS_RW_data_absolute,
13977 AEABI_PCS_RW_data_PCrel,
13978 AEABI_PCS_RW_data_SBrel,
13979 AEABI_PCS_RW_data_unused
13982 /* Values for Tag_ABI_enum_size. */
13983 enum
13985 AEABI_enum_unused,
13986 AEABI_enum_short,
13987 AEABI_enum_wide,
13988 AEABI_enum_forced_wide
13991 /* Determine whether an object attribute tag takes an integer, a
13992 string or both. */
13994 static int
13995 elf32_arm_obj_attrs_arg_type (int tag)
13997 if (tag == Tag_compatibility)
13998 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13999 else if (tag == Tag_nodefaults)
14000 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14001 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14002 return ATTR_TYPE_FLAG_STR_VAL;
14003 else if (tag < 32)
14004 return ATTR_TYPE_FLAG_INT_VAL;
14005 else
14006 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14009 /* The ABI defines that Tag_conformance should be emitted first, and that
14010 Tag_nodefaults should be second (if either is defined). This sets those
14011 two positions, and bumps up the position of all the remaining tags to
14012 compensate. */
14013 static int
14014 elf32_arm_obj_attrs_order (int num)
14016 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14017 return Tag_conformance;
14018 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14019 return Tag_nodefaults;
14020 if ((num - 2) < Tag_nodefaults)
14021 return num - 2;
14022 if ((num - 1) < Tag_conformance)
14023 return num - 1;
14024 return num;
14027 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14028 static bool
14029 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14031 if ((tag & 127) < 64)
14033 _bfd_error_handler
14034 (_("%pB: unknown mandatory EABI object attribute %d"),
14035 abfd, tag);
14036 bfd_set_error (bfd_error_bad_value);
14037 return false;
14039 else
14041 _bfd_error_handler
14042 (_("warning: %pB: unknown EABI object attribute %d"),
14043 abfd, tag);
14044 return true;
14048 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14049 Returns -1 if no architecture could be read. */
14051 static int
14052 get_secondary_compatible_arch (bfd *abfd)
14054 obj_attribute *attr =
14055 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14057 /* Note: the tag and its argument below are uleb128 values, though
14058 currently-defined values fit in one byte for each. */
14059 if (attr->s
14060 && attr->s[0] == Tag_CPU_arch
14061 && (attr->s[1] & 128) != 128
14062 && attr->s[2] == 0)
14063 return attr->s[1];
14065 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14066 return -1;
14069 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14070 The tag is removed if ARCH is -1. */
14072 static void
14073 set_secondary_compatible_arch (bfd *abfd, int arch)
14075 obj_attribute *attr =
14076 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14078 if (arch == -1)
14080 attr->s = NULL;
14081 return;
14084 /* Note: the tag and its argument below are uleb128 values, though
14085 currently-defined values fit in one byte for each. */
14086 if (!attr->s)
14087 attr->s = (char *) bfd_alloc (abfd, 3);
14088 attr->s[0] = Tag_CPU_arch;
14089 attr->s[1] = arch;
14090 attr->s[2] = '\0';
14093 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14094 into account. */
14096 static int
14097 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14098 int newtag, int secondary_compat)
14100 #define T(X) TAG_CPU_ARCH_##X
14101 int tagl, tagh, result;
14102 const int v6t2[] =
14104 T(V6T2), /* PRE_V4. */
14105 T(V6T2), /* V4. */
14106 T(V6T2), /* V4T. */
14107 T(V6T2), /* V5T. */
14108 T(V6T2), /* V5TE. */
14109 T(V6T2), /* V5TEJ. */
14110 T(V6T2), /* V6. */
14111 T(V7), /* V6KZ. */
14112 T(V6T2) /* V6T2. */
14114 const int v6k[] =
14116 T(V6K), /* PRE_V4. */
14117 T(V6K), /* V4. */
14118 T(V6K), /* V4T. */
14119 T(V6K), /* V5T. */
14120 T(V6K), /* V5TE. */
14121 T(V6K), /* V5TEJ. */
14122 T(V6K), /* V6. */
14123 T(V6KZ), /* V6KZ. */
14124 T(V7), /* V6T2. */
14125 T(V6K) /* V6K. */
14127 const int v7[] =
14129 T(V7), /* PRE_V4. */
14130 T(V7), /* V4. */
14131 T(V7), /* V4T. */
14132 T(V7), /* V5T. */
14133 T(V7), /* V5TE. */
14134 T(V7), /* V5TEJ. */
14135 T(V7), /* V6. */
14136 T(V7), /* V6KZ. */
14137 T(V7), /* V6T2. */
14138 T(V7), /* V6K. */
14139 T(V7) /* V7. */
14141 const int v6_m[] =
14143 -1, /* PRE_V4. */
14144 -1, /* V4. */
14145 T(V6K), /* V4T. */
14146 T(V6K), /* V5T. */
14147 T(V6K), /* V5TE. */
14148 T(V6K), /* V5TEJ. */
14149 T(V6K), /* V6. */
14150 T(V6KZ), /* V6KZ. */
14151 T(V7), /* V6T2. */
14152 T(V6K), /* V6K. */
14153 T(V7), /* V7. */
14154 T(V6_M) /* V6_M. */
14156 const int v6s_m[] =
14158 -1, /* PRE_V4. */
14159 -1, /* V4. */
14160 T(V6K), /* V4T. */
14161 T(V6K), /* V5T. */
14162 T(V6K), /* V5TE. */
14163 T(V6K), /* V5TEJ. */
14164 T(V6K), /* V6. */
14165 T(V6KZ), /* V6KZ. */
14166 T(V7), /* V6T2. */
14167 T(V6K), /* V6K. */
14168 T(V7), /* V7. */
14169 T(V6S_M), /* V6_M. */
14170 T(V6S_M) /* V6S_M. */
14172 const int v7e_m[] =
14174 -1, /* PRE_V4. */
14175 -1, /* V4. */
14176 T(V7E_M), /* V4T. */
14177 T(V7E_M), /* V5T. */
14178 T(V7E_M), /* V5TE. */
14179 T(V7E_M), /* V5TEJ. */
14180 T(V7E_M), /* V6. */
14181 T(V7E_M), /* V6KZ. */
14182 T(V7E_M), /* V6T2. */
14183 T(V7E_M), /* V6K. */
14184 T(V7E_M), /* V7. */
14185 T(V7E_M), /* V6_M. */
14186 T(V7E_M), /* V6S_M. */
14187 T(V7E_M) /* V7E_M. */
14189 const int v8[] =
14191 T(V8), /* PRE_V4. */
14192 T(V8), /* V4. */
14193 T(V8), /* V4T. */
14194 T(V8), /* V5T. */
14195 T(V8), /* V5TE. */
14196 T(V8), /* V5TEJ. */
14197 T(V8), /* V6. */
14198 T(V8), /* V6KZ. */
14199 T(V8), /* V6T2. */
14200 T(V8), /* V6K. */
14201 T(V8), /* V7. */
14202 T(V8), /* V6_M. */
14203 T(V8), /* V6S_M. */
14204 T(V8), /* V7E_M. */
14205 T(V8) /* V8. */
14207 const int v8r[] =
14209 T(V8R), /* PRE_V4. */
14210 T(V8R), /* V4. */
14211 T(V8R), /* V4T. */
14212 T(V8R), /* V5T. */
14213 T(V8R), /* V5TE. */
14214 T(V8R), /* V5TEJ. */
14215 T(V8R), /* V6. */
14216 T(V8R), /* V6KZ. */
14217 T(V8R), /* V6T2. */
14218 T(V8R), /* V6K. */
14219 T(V8R), /* V7. */
14220 T(V8R), /* V6_M. */
14221 T(V8R), /* V6S_M. */
14222 T(V8R), /* V7E_M. */
14223 T(V8), /* V8. */
14224 T(V8R), /* V8R. */
14226 const int v8m_baseline[] =
14228 -1, /* PRE_V4. */
14229 -1, /* V4. */
14230 -1, /* V4T. */
14231 -1, /* V5T. */
14232 -1, /* V5TE. */
14233 -1, /* V5TEJ. */
14234 -1, /* V6. */
14235 -1, /* V6KZ. */
14236 -1, /* V6T2. */
14237 -1, /* V6K. */
14238 -1, /* V7. */
14239 T(V8M_BASE), /* V6_M. */
14240 T(V8M_BASE), /* V6S_M. */
14241 -1, /* V7E_M. */
14242 -1, /* V8. */
14243 -1, /* V8R. */
14244 T(V8M_BASE) /* V8-M BASELINE. */
14246 const int v8m_mainline[] =
14248 -1, /* PRE_V4. */
14249 -1, /* V4. */
14250 -1, /* V4T. */
14251 -1, /* V5T. */
14252 -1, /* V5TE. */
14253 -1, /* V5TEJ. */
14254 -1, /* V6. */
14255 -1, /* V6KZ. */
14256 -1, /* V6T2. */
14257 -1, /* V6K. */
14258 T(V8M_MAIN), /* V7. */
14259 T(V8M_MAIN), /* V6_M. */
14260 T(V8M_MAIN), /* V6S_M. */
14261 T(V8M_MAIN), /* V7E_M. */
14262 -1, /* V8. */
14263 -1, /* V8R. */
14264 T(V8M_MAIN), /* V8-M BASELINE. */
14265 T(V8M_MAIN) /* V8-M MAINLINE. */
14267 const int v8_1m_mainline[] =
14269 -1, /* PRE_V4. */
14270 -1, /* V4. */
14271 -1, /* V4T. */
14272 -1, /* V5T. */
14273 -1, /* V5TE. */
14274 -1, /* V5TEJ. */
14275 -1, /* V6. */
14276 -1, /* V6KZ. */
14277 -1, /* V6T2. */
14278 -1, /* V6K. */
14279 T(V8_1M_MAIN), /* V7. */
14280 T(V8_1M_MAIN), /* V6_M. */
14281 T(V8_1M_MAIN), /* V6S_M. */
14282 T(V8_1M_MAIN), /* V7E_M. */
14283 -1, /* V8. */
14284 -1, /* V8R. */
14285 T(V8_1M_MAIN), /* V8-M BASELINE. */
14286 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14287 -1, /* Unused (18). */
14288 -1, /* Unused (19). */
14289 -1, /* Unused (20). */
14290 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14292 const int v4t_plus_v6_m[] =
14294 -1, /* PRE_V4. */
14295 -1, /* V4. */
14296 T(V4T), /* V4T. */
14297 T(V5T), /* V5T. */
14298 T(V5TE), /* V5TE. */
14299 T(V5TEJ), /* V5TEJ. */
14300 T(V6), /* V6. */
14301 T(V6KZ), /* V6KZ. */
14302 T(V6T2), /* V6T2. */
14303 T(V6K), /* V6K. */
14304 T(V7), /* V7. */
14305 T(V6_M), /* V6_M. */
14306 T(V6S_M), /* V6S_M. */
14307 T(V7E_M), /* V7E_M. */
14308 T(V8), /* V8. */
14309 -1, /* V8R. */
14310 T(V8M_BASE), /* V8-M BASELINE. */
14311 T(V8M_MAIN), /* V8-M MAINLINE. */
14312 -1, /* Unused (18). */
14313 -1, /* Unused (19). */
14314 -1, /* Unused (20). */
14315 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14316 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14318 const int *comb[] =
14320 v6t2,
14321 v6k,
14323 v6_m,
14324 v6s_m,
14325 v7e_m,
14327 v8r,
14328 v8m_baseline,
14329 v8m_mainline,
14330 NULL,
14331 NULL,
14332 NULL,
14333 v8_1m_mainline,
14334 /* Pseudo-architecture. */
14335 v4t_plus_v6_m
14338 /* Check we've not got a higher architecture than we know about. */
14340 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14342 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14343 return -1;
14346 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14348 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14349 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14350 oldtag = T(V4T_PLUS_V6_M);
14352 /* And override the new tag if we have a Tag_also_compatible_with on the
14353 input. */
14355 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14356 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14357 newtag = T(V4T_PLUS_V6_M);
14359 tagl = (oldtag < newtag) ? oldtag : newtag;
14360 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14362 /* Architectures before V6KZ add features monotonically. */
14363 if (tagh <= TAG_CPU_ARCH_V6KZ)
14364 return result;
14366 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14368 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14369 as the canonical version. */
14370 if (result == T(V4T_PLUS_V6_M))
14372 result = T(V4T);
14373 *secondary_compat_out = T(V6_M);
14375 else
14376 *secondary_compat_out = -1;
14378 if (result == -1)
14380 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14381 ibfd, oldtag, newtag);
14382 return -1;
14385 return result;
14386 #undef T
14389 /* Query attributes object to see if integer divide instructions may be
14390 present in an object. */
14391 static bool
14392 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14394 int arch = attr[Tag_CPU_arch].i;
14395 int profile = attr[Tag_CPU_arch_profile].i;
14397 switch (attr[Tag_DIV_use].i)
14399 case 0:
14400 /* Integer divide allowed if instruction contained in archetecture. */
14401 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14402 return true;
14403 else if (arch >= TAG_CPU_ARCH_V7E_M)
14404 return true;
14405 else
14406 return false;
14408 case 1:
14409 /* Integer divide explicitly prohibited. */
14410 return false;
14412 default:
14413 /* Unrecognised case - treat as allowing divide everywhere. */
14414 case 2:
14415 /* Integer divide allowed in ARM state. */
14416 return true;
14420 /* Query attributes object to see if integer divide instructions are
14421 forbidden to be in the object. This is not the inverse of
14422 elf32_arm_attributes_accept_div. */
14423 static bool
14424 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14426 return attr[Tag_DIV_use].i == 1;
14429 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14430 are conflicting attributes. */
14432 static bool
14433 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14435 bfd *obfd = info->output_bfd;
14436 obj_attribute *in_attr;
14437 obj_attribute *out_attr;
14438 /* Some tags have 0 = don't care, 1 = strong requirement,
14439 2 = weak requirement. */
14440 static const int order_021[3] = {0, 2, 1};
14441 int i;
14442 bool result = true;
14443 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14445 /* Skip the linker stubs file. This preserves previous behavior
14446 of accepting unknown attributes in the first input file - but
14447 is that a bug? */
14448 if (ibfd->flags & BFD_LINKER_CREATED)
14449 return true;
14451 /* Skip any input that hasn't attribute section.
14452 This enables to link object files without attribute section with
14453 any others. */
14454 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14455 return true;
14457 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14459 /* This is the first object. Copy the attributes. */
14460 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14462 out_attr = elf_known_obj_attributes_proc (obfd);
14464 /* Use the Tag_null value to indicate the attributes have been
14465 initialized. */
14466 out_attr[0].i = 1;
14468 /* We do not output objects with Tag_MPextension_use_legacy - we move
14469 the attribute's value to Tag_MPextension_use. */
14470 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14472 if (out_attr[Tag_MPextension_use].i != 0
14473 && out_attr[Tag_MPextension_use_legacy].i
14474 != out_attr[Tag_MPextension_use].i)
14476 _bfd_error_handler
14477 (_("Error: %pB has both the current and legacy "
14478 "Tag_MPextension_use attributes"), ibfd);
14479 result = false;
14482 out_attr[Tag_MPextension_use] =
14483 out_attr[Tag_MPextension_use_legacy];
14484 out_attr[Tag_MPextension_use_legacy].type = 0;
14485 out_attr[Tag_MPextension_use_legacy].i = 0;
14488 return result;
14491 in_attr = elf_known_obj_attributes_proc (ibfd);
14492 out_attr = elf_known_obj_attributes_proc (obfd);
14493 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14494 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14496 /* Ignore mismatches if the object doesn't use floating point or is
14497 floating point ABI independent. */
14498 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14499 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14500 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14501 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14502 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14503 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14505 _bfd_error_handler
14506 (_("error: %pB uses VFP register arguments, %pB does not"),
14507 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14508 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14509 result = false;
14513 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14515 /* Merge this attribute with existing attributes. */
14516 switch (i)
14518 case Tag_CPU_raw_name:
14519 case Tag_CPU_name:
14520 /* These are merged after Tag_CPU_arch. */
14521 break;
14523 case Tag_ABI_optimization_goals:
14524 case Tag_ABI_FP_optimization_goals:
14525 /* Use the first value seen. */
14526 break;
14528 case Tag_CPU_arch:
14530 int secondary_compat = -1, secondary_compat_out = -1;
14531 unsigned int saved_out_attr = out_attr[i].i;
14532 int arch_attr;
14533 static const char *name_table[] =
14535 /* These aren't real CPU names, but we can't guess
14536 that from the architecture version alone. */
14537 "Pre v4",
14538 "ARM v4",
14539 "ARM v4T",
14540 "ARM v5T",
14541 "ARM v5TE",
14542 "ARM v5TEJ",
14543 "ARM v6",
14544 "ARM v6KZ",
14545 "ARM v6T2",
14546 "ARM v6K",
14547 "ARM v7",
14548 "ARM v6-M",
14549 "ARM v6S-M",
14550 "ARM v8",
14552 "ARM v8-M.baseline",
14553 "ARM v8-M.mainline",
14556 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14557 secondary_compat = get_secondary_compatible_arch (ibfd);
14558 secondary_compat_out = get_secondary_compatible_arch (obfd);
14559 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14560 &secondary_compat_out,
14561 in_attr[i].i,
14562 secondary_compat);
14564 /* Return with error if failed to merge. */
14565 if (arch_attr == -1)
14566 return false;
14568 out_attr[i].i = arch_attr;
14570 set_secondary_compatible_arch (obfd, secondary_compat_out);
14572 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14573 if (out_attr[i].i == saved_out_attr)
14574 ; /* Leave the names alone. */
14575 else if (out_attr[i].i == in_attr[i].i)
14577 /* The output architecture has been changed to match the
14578 input architecture. Use the input names. */
14579 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14580 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14581 : NULL;
14582 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14583 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14584 : NULL;
14586 else
14588 out_attr[Tag_CPU_name].s = NULL;
14589 out_attr[Tag_CPU_raw_name].s = NULL;
14592 /* If we still don't have a value for Tag_CPU_name,
14593 make one up now. Tag_CPU_raw_name remains blank. */
14594 if (out_attr[Tag_CPU_name].s == NULL
14595 && out_attr[i].i < ARRAY_SIZE (name_table))
14596 out_attr[Tag_CPU_name].s =
14597 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14599 break;
14601 case Tag_ARM_ISA_use:
14602 case Tag_THUMB_ISA_use:
14603 case Tag_WMMX_arch:
14604 case Tag_Advanced_SIMD_arch:
14605 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14606 case Tag_ABI_FP_rounding:
14607 case Tag_ABI_FP_exceptions:
14608 case Tag_ABI_FP_user_exceptions:
14609 case Tag_ABI_FP_number_model:
14610 case Tag_FP_HP_extension:
14611 case Tag_CPU_unaligned_access:
14612 case Tag_T2EE_use:
14613 case Tag_MPextension_use:
14614 case Tag_MVE_arch:
14615 case Tag_PAC_extension:
14616 case Tag_BTI_extension:
14617 case Tag_BTI_use:
14618 case Tag_PACRET_use:
14619 /* Use the largest value specified. */
14620 if (in_attr[i].i > out_attr[i].i)
14621 out_attr[i].i = in_attr[i].i;
14622 break;
14624 case Tag_ABI_align_preserved:
14625 case Tag_ABI_PCS_RO_data:
14626 /* Use the smallest value specified. */
14627 if (in_attr[i].i < out_attr[i].i)
14628 out_attr[i].i = in_attr[i].i;
14629 break;
14631 case Tag_ABI_align_needed:
14632 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14633 && (in_attr[Tag_ABI_align_preserved].i == 0
14634 || out_attr[Tag_ABI_align_preserved].i == 0))
14636 /* This error message should be enabled once all non-conformant
14637 binaries in the toolchain have had the attributes set
14638 properly.
14639 _bfd_error_handler
14640 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14641 obfd, ibfd);
14642 result = false; */
14644 /* Fall through. */
14645 case Tag_ABI_FP_denormal:
14646 case Tag_ABI_PCS_GOT_use:
14647 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14648 value if greater than 2 (for future-proofing). */
14649 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14650 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14651 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14652 out_attr[i].i = in_attr[i].i;
14653 break;
14655 case Tag_Virtualization_use:
14656 /* The virtualization tag effectively stores two bits of
14657 information: the intended use of TrustZone (in bit 0), and the
14658 intended use of Virtualization (in bit 1). */
14659 if (out_attr[i].i == 0)
14660 out_attr[i].i = in_attr[i].i;
14661 else if (in_attr[i].i != 0
14662 && in_attr[i].i != out_attr[i].i)
14664 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14665 out_attr[i].i = 3;
14666 else
14668 _bfd_error_handler
14669 (_("error: %pB: unable to merge virtualization attributes "
14670 "with %pB"),
14671 obfd, ibfd);
14672 result = false;
14675 break;
14677 case Tag_CPU_arch_profile:
14678 if (out_attr[i].i != in_attr[i].i)
14680 /* 0 will merge with anything.
14681 'A' and 'S' merge to 'A'.
14682 'R' and 'S' merge to 'R'.
14683 'M' and 'A|R|S' is an error. */
14684 if (out_attr[i].i == 0
14685 || (out_attr[i].i == 'S'
14686 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14687 out_attr[i].i = in_attr[i].i;
14688 else if (in_attr[i].i == 0
14689 || (in_attr[i].i == 'S'
14690 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14691 ; /* Do nothing. */
14692 else
14694 _bfd_error_handler
14695 (_("error: %pB: conflicting architecture profiles %c/%c"),
14696 ibfd,
14697 in_attr[i].i ? in_attr[i].i : '0',
14698 out_attr[i].i ? out_attr[i].i : '0');
14699 result = false;
14702 break;
14704 case Tag_DSP_extension:
14705 /* No need to change output value if any of:
14706 - pre (<=) ARMv5T input architecture (do not have DSP)
14707 - M input profile not ARMv7E-M and do not have DSP. */
14708 if (in_attr[Tag_CPU_arch].i <= 3
14709 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14710 && in_attr[Tag_CPU_arch].i != 13
14711 && in_attr[i].i == 0))
14712 ; /* Do nothing. */
14713 /* Output value should be 0 if DSP part of architecture, ie.
14714 - post (>=) ARMv5te architecture output
14715 - A, R or S profile output or ARMv7E-M output architecture. */
14716 else if (out_attr[Tag_CPU_arch].i >= 4
14717 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14718 || out_attr[Tag_CPU_arch_profile].i == 'R'
14719 || out_attr[Tag_CPU_arch_profile].i == 'S'
14720 || out_attr[Tag_CPU_arch].i == 13))
14721 out_attr[i].i = 0;
14722 /* Otherwise, DSP instructions are added and not part of output
14723 architecture. */
14724 else
14725 out_attr[i].i = 1;
14726 break;
14728 case Tag_FP_arch:
14730 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14731 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14732 when it's 0. It might mean absence of FP hardware if
14733 Tag_FP_arch is zero. */
14735 #define VFP_VERSION_COUNT 9
14736 static const struct
14738 int ver;
14739 int regs;
14740 } vfp_versions[VFP_VERSION_COUNT] =
14742 {0, 0},
14743 {1, 16},
14744 {2, 16},
14745 {3, 32},
14746 {3, 16},
14747 {4, 32},
14748 {4, 16},
14749 {8, 32},
14750 {8, 16}
14752 int ver;
14753 int regs;
14754 int newval;
14756 /* If the output has no requirement about FP hardware,
14757 follow the requirement of the input. */
14758 if (out_attr[i].i == 0)
14760 /* This assert is still reasonable, we shouldn't
14761 produce the suspicious build attribute
14762 combination (See below for in_attr). */
14763 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14764 out_attr[i].i = in_attr[i].i;
14765 out_attr[Tag_ABI_HardFP_use].i
14766 = in_attr[Tag_ABI_HardFP_use].i;
14767 break;
14769 /* If the input has no requirement about FP hardware, do
14770 nothing. */
14771 else if (in_attr[i].i == 0)
14773 /* We used to assert that Tag_ABI_HardFP_use was
14774 zero here, but we should never assert when
14775 consuming an object file that has suspicious
14776 build attributes. The single precision variant
14777 of 'no FP architecture' is still 'no FP
14778 architecture', so we just ignore the tag in this
14779 case. */
14780 break;
14783 /* Both the input and the output have nonzero Tag_FP_arch.
14784 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14786 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14787 do nothing. */
14788 if (in_attr[Tag_ABI_HardFP_use].i == 0
14789 && out_attr[Tag_ABI_HardFP_use].i == 0)
14791 /* If the input and the output have different Tag_ABI_HardFP_use,
14792 the combination of them is 0 (implied by Tag_FP_arch). */
14793 else if (in_attr[Tag_ABI_HardFP_use].i
14794 != out_attr[Tag_ABI_HardFP_use].i)
14795 out_attr[Tag_ABI_HardFP_use].i = 0;
14797 /* Now we can handle Tag_FP_arch. */
14799 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14800 pick the biggest. */
14801 if (in_attr[i].i >= VFP_VERSION_COUNT
14802 && in_attr[i].i > out_attr[i].i)
14804 out_attr[i] = in_attr[i];
14805 break;
14807 /* The output uses the superset of input features
14808 (ISA version) and registers. */
14809 ver = vfp_versions[in_attr[i].i].ver;
14810 if (ver < vfp_versions[out_attr[i].i].ver)
14811 ver = vfp_versions[out_attr[i].i].ver;
14812 regs = vfp_versions[in_attr[i].i].regs;
14813 if (regs < vfp_versions[out_attr[i].i].regs)
14814 regs = vfp_versions[out_attr[i].i].regs;
14815 /* This assumes all possible supersets are also a valid
14816 options. */
14817 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14819 if (regs == vfp_versions[newval].regs
14820 && ver == vfp_versions[newval].ver)
14821 break;
14823 out_attr[i].i = newval;
14825 break;
14826 case Tag_PCS_config:
14827 if (out_attr[i].i == 0)
14828 out_attr[i].i = in_attr[i].i;
14829 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14831 /* It's sometimes ok to mix different configs, so this is only
14832 a warning. */
14833 _bfd_error_handler
14834 (_("warning: %pB: conflicting platform configuration"), ibfd);
14836 break;
14837 case Tag_ABI_PCS_R9_use:
14838 if (in_attr[i].i != out_attr[i].i
14839 && out_attr[i].i != AEABI_R9_unused
14840 && in_attr[i].i != AEABI_R9_unused)
14842 _bfd_error_handler
14843 (_("error: %pB: conflicting use of R9"), ibfd);
14844 result = false;
14846 if (out_attr[i].i == AEABI_R9_unused)
14847 out_attr[i].i = in_attr[i].i;
14848 break;
14849 case Tag_ABI_PCS_RW_data:
14850 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14851 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14852 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14854 _bfd_error_handler
14855 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14856 ibfd);
14857 result = false;
14859 /* Use the smallest value specified. */
14860 if (in_attr[i].i < out_attr[i].i)
14861 out_attr[i].i = in_attr[i].i;
14862 break;
14863 case Tag_ABI_PCS_wchar_t:
14864 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14865 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14867 _bfd_error_handler
14868 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14869 ibfd, in_attr[i].i, out_attr[i].i);
14871 else if (in_attr[i].i && !out_attr[i].i)
14872 out_attr[i].i = in_attr[i].i;
14873 break;
14874 case Tag_ABI_enum_size:
14875 if (in_attr[i].i != AEABI_enum_unused)
14877 if (out_attr[i].i == AEABI_enum_unused
14878 || out_attr[i].i == AEABI_enum_forced_wide)
14880 /* The existing object is compatible with anything.
14881 Use whatever requirements the new object has. */
14882 out_attr[i].i = in_attr[i].i;
14884 else if (in_attr[i].i != AEABI_enum_forced_wide
14885 && out_attr[i].i != in_attr[i].i
14886 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14888 static const char *aeabi_enum_names[] =
14889 { "", "variable-size", "32-bit", "" };
14890 const char *in_name =
14891 in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14892 ? aeabi_enum_names[in_attr[i].i]
14893 : "<unknown>";
14894 const char *out_name =
14895 out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14896 ? aeabi_enum_names[out_attr[i].i]
14897 : "<unknown>";
14898 _bfd_error_handler
14899 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14900 ibfd, in_name, out_name);
14903 break;
14904 case Tag_ABI_VFP_args:
14905 /* Aready done. */
14906 break;
14907 case Tag_ABI_WMMX_args:
14908 if (in_attr[i].i != out_attr[i].i)
14910 _bfd_error_handler
14911 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14912 ibfd, obfd);
14913 result = false;
14915 break;
14916 case Tag_compatibility:
14917 /* Merged in target-independent code. */
14918 break;
14919 case Tag_ABI_HardFP_use:
14920 /* This is handled along with Tag_FP_arch. */
14921 break;
14922 case Tag_ABI_FP_16bit_format:
14923 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14925 if (in_attr[i].i != out_attr[i].i)
14927 _bfd_error_handler
14928 (_("error: fp16 format mismatch between %pB and %pB"),
14929 ibfd, obfd);
14930 result = false;
14933 if (in_attr[i].i != 0)
14934 out_attr[i].i = in_attr[i].i;
14935 break;
14937 case Tag_DIV_use:
14938 /* A value of zero on input means that the divide instruction may
14939 be used if available in the base architecture as specified via
14940 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14941 the user did not want divide instructions. A value of 2
14942 explicitly means that divide instructions were allowed in ARM
14943 and Thumb state. */
14944 if (in_attr[i].i == out_attr[i].i)
14945 /* Do nothing. */ ;
14946 else if (elf32_arm_attributes_forbid_div (in_attr)
14947 && !elf32_arm_attributes_accept_div (out_attr))
14948 out_attr[i].i = 1;
14949 else if (elf32_arm_attributes_forbid_div (out_attr)
14950 && elf32_arm_attributes_accept_div (in_attr))
14951 out_attr[i].i = in_attr[i].i;
14952 else if (in_attr[i].i == 2)
14953 out_attr[i].i = in_attr[i].i;
14954 break;
14956 case Tag_MPextension_use_legacy:
14957 /* We don't output objects with Tag_MPextension_use_legacy - we
14958 move the value to Tag_MPextension_use. */
14959 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14961 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14963 _bfd_error_handler
14964 (_("%pB has both the current and legacy "
14965 "Tag_MPextension_use attributes"),
14966 ibfd);
14967 result = false;
14971 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14972 out_attr[Tag_MPextension_use] = in_attr[i];
14974 break;
14976 case Tag_nodefaults:
14977 /* This tag is set if it exists, but the value is unused (and is
14978 typically zero). We don't actually need to do anything here -
14979 the merge happens automatically when the type flags are merged
14980 below. */
14981 break;
14982 case Tag_also_compatible_with:
14983 /* Already done in Tag_CPU_arch. */
14984 break;
14985 case Tag_conformance:
14986 /* Keep the attribute if it matches. Throw it away otherwise.
14987 No attribute means no claim to conform. */
14988 if (!in_attr[i].s || !out_attr[i].s
14989 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14990 out_attr[i].s = NULL;
14991 break;
14993 default:
14994 result
14995 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14998 /* If out_attr was copied from in_attr then it won't have a type yet. */
14999 if (in_attr[i].type && !out_attr[i].type)
15000 out_attr[i].type = in_attr[i].type;
15003 /* Merge Tag_compatibility attributes and any common GNU ones. */
15004 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15005 return false;
15007 /* Check for any attributes not known on ARM. */
15008 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15010 return result;
15014 /* Return TRUE if the two EABI versions are incompatible. */
15016 static bool
15017 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15019 /* v4 and v5 are the same spec before and after it was released,
15020 so allow mixing them. */
15021 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15022 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15023 return true;
15025 return (iver == over);
15028 /* Merge backend specific data from an object file to the output
15029 object file when linking. */
15031 static bool
15032 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15034 /* Display the flags field. */
15036 static bool
15037 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15039 FILE * file = (FILE *) ptr;
15040 unsigned long flags;
15042 BFD_ASSERT (abfd != NULL && ptr != NULL);
15044 /* Print normal ELF private data. */
15045 _bfd_elf_print_private_bfd_data (abfd, ptr);
15047 flags = elf_elfheader (abfd)->e_flags;
15048 /* Ignore init flag - it may not be set, despite the flags field
15049 containing valid data. */
15051 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15053 switch (EF_ARM_EABI_VERSION (flags))
15055 case EF_ARM_EABI_UNKNOWN:
15056 /* The following flag bits are GNU extensions and not part of the
15057 official ARM ELF extended ABI. Hence they are only decoded if
15058 the EABI version is not set. */
15059 if (flags & EF_ARM_INTERWORK)
15060 fprintf (file, _(" [interworking enabled]"));
15062 if (flags & EF_ARM_APCS_26)
15063 fprintf (file, " [APCS-26]");
15064 else
15065 fprintf (file, " [APCS-32]");
15067 if (flags & EF_ARM_VFP_FLOAT)
15068 fprintf (file, _(" [VFP float format]"));
15069 else if (flags & EF_ARM_MAVERICK_FLOAT)
15070 fprintf (file, _(" [Maverick float format]"));
15071 else
15072 fprintf (file, _(" [FPA float format]"));
15074 if (flags & EF_ARM_APCS_FLOAT)
15075 fprintf (file, _(" [floats passed in float registers]"));
15077 if (flags & EF_ARM_PIC)
15078 fprintf (file, _(" [position independent]"));
15080 if (flags & EF_ARM_NEW_ABI)
15081 fprintf (file, _(" [new ABI]"));
15083 if (flags & EF_ARM_OLD_ABI)
15084 fprintf (file, _(" [old ABI]"));
15086 if (flags & EF_ARM_SOFT_FLOAT)
15087 fprintf (file, _(" [software FP]"));
15089 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15090 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15091 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15092 | EF_ARM_MAVERICK_FLOAT);
15093 break;
15095 case EF_ARM_EABI_VER1:
15096 fprintf (file, _(" [Version1 EABI]"));
15098 if (flags & EF_ARM_SYMSARESORTED)
15099 fprintf (file, _(" [sorted symbol table]"));
15100 else
15101 fprintf (file, _(" [unsorted symbol table]"));
15103 flags &= ~ EF_ARM_SYMSARESORTED;
15104 break;
15106 case EF_ARM_EABI_VER2:
15107 fprintf (file, _(" [Version2 EABI]"));
15109 if (flags & EF_ARM_SYMSARESORTED)
15110 fprintf (file, _(" [sorted symbol table]"));
15111 else
15112 fprintf (file, _(" [unsorted symbol table]"));
15114 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15115 fprintf (file, _(" [dynamic symbols use segment index]"));
15117 if (flags & EF_ARM_MAPSYMSFIRST)
15118 fprintf (file, _(" [mapping symbols precede others]"));
15120 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15121 | EF_ARM_MAPSYMSFIRST);
15122 break;
15124 case EF_ARM_EABI_VER3:
15125 fprintf (file, _(" [Version3 EABI]"));
15126 break;
15128 case EF_ARM_EABI_VER4:
15129 fprintf (file, _(" [Version4 EABI]"));
15130 goto eabi;
15132 case EF_ARM_EABI_VER5:
15133 fprintf (file, _(" [Version5 EABI]"));
15135 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15136 fprintf (file, _(" [soft-float ABI]"));
15138 if (flags & EF_ARM_ABI_FLOAT_HARD)
15139 fprintf (file, _(" [hard-float ABI]"));
15141 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15143 eabi:
15144 if (flags & EF_ARM_BE8)
15145 fprintf (file, _(" [BE8]"));
15147 if (flags & EF_ARM_LE8)
15148 fprintf (file, _(" [LE8]"));
15150 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15151 break;
15153 default:
15154 fprintf (file, _(" <EABI version unrecognised>"));
15155 break;
15158 flags &= ~ EF_ARM_EABIMASK;
15160 if (flags & EF_ARM_RELEXEC)
15161 fprintf (file, _(" [relocatable executable]"));
15163 if (flags & EF_ARM_PIC)
15164 fprintf (file, _(" [position independent]"));
15166 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15167 fprintf (file, _(" [FDPIC ABI supplement]"));
15169 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15171 if (flags)
15172 fprintf (file, _(" <Unrecognised flag bits set>"));
15174 fputc ('\n', file);
15176 return true;
15179 static int
15180 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15182 switch (ELF_ST_TYPE (elf_sym->st_info))
15184 case STT_ARM_TFUNC:
15185 return ELF_ST_TYPE (elf_sym->st_info);
15187 case STT_ARM_16BIT:
15188 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15189 This allows us to distinguish between data used by Thumb instructions
15190 and non-data (which is probably code) inside Thumb regions of an
15191 executable. */
15192 if (type != STT_OBJECT && type != STT_TLS)
15193 return ELF_ST_TYPE (elf_sym->st_info);
15194 break;
15196 default:
15197 break;
15200 return type;
15203 static asection *
15204 elf32_arm_gc_mark_hook (asection *sec,
15205 struct bfd_link_info *info,
15206 Elf_Internal_Rela *rel,
15207 struct elf_link_hash_entry *h,
15208 Elf_Internal_Sym *sym)
15210 if (h != NULL)
15211 switch (ELF32_R_TYPE (rel->r_info))
15213 case R_ARM_GNU_VTINHERIT:
15214 case R_ARM_GNU_VTENTRY:
15215 return NULL;
15218 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15221 /* Look through the relocs for a section during the first phase. */
15223 static bool
15224 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15225 asection *sec, const Elf_Internal_Rela *relocs)
15227 Elf_Internal_Shdr *symtab_hdr;
15228 struct elf_link_hash_entry **sym_hashes;
15229 const Elf_Internal_Rela *rel;
15230 const Elf_Internal_Rela *rel_end;
15231 bfd *dynobj;
15232 asection *sreloc;
15233 struct elf32_arm_link_hash_table *htab;
15234 bool call_reloc_p;
15235 bool may_become_dynamic_p;
15236 bool may_need_local_target_p;
15237 unsigned long nsyms;
15239 if (bfd_link_relocatable (info))
15240 return true;
15242 BFD_ASSERT (is_arm_elf (abfd));
15244 htab = elf32_arm_hash_table (info);
15245 if (htab == NULL)
15246 return false;
15248 sreloc = NULL;
15250 /* Create dynamic sections for relocatable executables so that we can
15251 copy relocations. */
15252 if (htab->root.is_relocatable_executable
15253 && ! htab->root.dynamic_sections_created)
15255 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15256 return false;
15259 if (htab->root.dynobj == NULL)
15260 htab->root.dynobj = abfd;
15261 if (!create_ifunc_sections (info))
15262 return false;
15264 dynobj = htab->root.dynobj;
15266 symtab_hdr = & elf_symtab_hdr (abfd);
15267 sym_hashes = elf_sym_hashes (abfd);
15268 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15270 rel_end = relocs + sec->reloc_count;
15271 for (rel = relocs; rel < rel_end; rel++)
15273 Elf_Internal_Sym *isym;
15274 struct elf_link_hash_entry *h;
15275 struct elf32_arm_link_hash_entry *eh;
15276 unsigned int r_symndx;
15277 int r_type;
15279 r_symndx = ELF32_R_SYM (rel->r_info);
15280 r_type = ELF32_R_TYPE (rel->r_info);
15281 r_type = arm_real_reloc_type (htab, r_type);
15283 if (r_symndx >= nsyms
15284 /* PR 9934: It is possible to have relocations that do not
15285 refer to symbols, thus it is also possible to have an
15286 object file containing relocations but no symbol table. */
15287 && (r_symndx > STN_UNDEF || nsyms > 0))
15289 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15290 r_symndx);
15291 return false;
15294 h = NULL;
15295 isym = NULL;
15296 if (nsyms > 0)
15298 if (r_symndx < symtab_hdr->sh_info)
15300 /* A local symbol. */
15301 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15302 abfd, r_symndx);
15303 if (isym == NULL)
15304 return false;
15306 else
15308 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15309 while (h->root.type == bfd_link_hash_indirect
15310 || h->root.type == bfd_link_hash_warning)
15311 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15315 eh = (struct elf32_arm_link_hash_entry *) h;
15317 call_reloc_p = false;
15318 may_become_dynamic_p = false;
15319 may_need_local_target_p = false;
15321 /* Could be done earlier, if h were already available. */
15322 r_type = elf32_arm_tls_transition (info, r_type, h);
15323 switch (r_type)
15325 case R_ARM_GOTOFFFUNCDESC:
15327 if (h == NULL)
15329 if (!elf32_arm_allocate_local_sym_info (abfd))
15330 return false;
15331 if (r_symndx >= elf32_arm_num_entries (abfd))
15332 return false;
15333 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15334 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15336 else
15338 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15341 break;
15343 case R_ARM_GOTFUNCDESC:
15345 if (h == NULL)
15347 /* Such a relocation is not supposed to be generated
15348 by gcc on a static function. */
15349 /* Anyway if needed it could be handled. */
15350 return false;
15352 else
15354 eh->fdpic_cnts.gotfuncdesc_cnt++;
15357 break;
15359 case R_ARM_FUNCDESC:
15361 if (h == NULL)
15363 if (!elf32_arm_allocate_local_sym_info (abfd))
15364 return false;
15365 if (r_symndx >= elf32_arm_num_entries (abfd))
15366 return false;
15367 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15368 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15370 else
15372 eh->fdpic_cnts.funcdesc_cnt++;
15375 break;
15377 case R_ARM_GOT32:
15378 case R_ARM_GOT_PREL:
15379 case R_ARM_TLS_GD32:
15380 case R_ARM_TLS_GD32_FDPIC:
15381 case R_ARM_TLS_IE32:
15382 case R_ARM_TLS_IE32_FDPIC:
15383 case R_ARM_TLS_GOTDESC:
15384 case R_ARM_TLS_DESCSEQ:
15385 case R_ARM_THM_TLS_DESCSEQ:
15386 case R_ARM_TLS_CALL:
15387 case R_ARM_THM_TLS_CALL:
15388 /* This symbol requires a global offset table entry. */
15390 int tls_type, old_tls_type;
15392 switch (r_type)
15394 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15395 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15397 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15398 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15400 case R_ARM_TLS_GOTDESC:
15401 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15402 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15403 tls_type = GOT_TLS_GDESC; break;
15405 default: tls_type = GOT_NORMAL; break;
15408 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15409 info->flags |= DF_STATIC_TLS;
15411 if (h != NULL)
15413 h->got.refcount++;
15414 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15416 else
15418 /* This is a global offset table entry for a local symbol. */
15419 if (!elf32_arm_allocate_local_sym_info (abfd))
15420 return false;
15421 if (r_symndx >= elf32_arm_num_entries (abfd))
15423 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15424 r_symndx);
15425 return false;
15428 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15429 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15432 /* If a variable is accessed with both tls methods, two
15433 slots may be created. */
15434 if (GOT_TLS_GD_ANY_P (old_tls_type)
15435 && GOT_TLS_GD_ANY_P (tls_type))
15436 tls_type |= old_tls_type;
15438 /* We will already have issued an error message if there
15439 is a TLS/non-TLS mismatch, based on the symbol
15440 type. So just combine any TLS types needed. */
15441 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15442 && tls_type != GOT_NORMAL)
15443 tls_type |= old_tls_type;
15445 /* If the symbol is accessed in both IE and GDESC
15446 method, we're able to relax. Turn off the GDESC flag,
15447 without messing up with any other kind of tls types
15448 that may be involved. */
15449 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15450 tls_type &= ~GOT_TLS_GDESC;
15452 if (old_tls_type != tls_type)
15454 if (h != NULL)
15455 elf32_arm_hash_entry (h)->tls_type = tls_type;
15456 else
15457 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15460 /* Fall through. */
15462 case R_ARM_TLS_LDM32:
15463 case R_ARM_TLS_LDM32_FDPIC:
15464 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15465 htab->tls_ldm_got.refcount++;
15466 /* Fall through. */
15468 case R_ARM_GOTOFF32:
15469 case R_ARM_GOTPC:
15470 if (htab->root.sgot == NULL
15471 && !create_got_section (htab->root.dynobj, info))
15472 return false;
15473 break;
15475 case R_ARM_PC24:
15476 case R_ARM_PLT32:
15477 case R_ARM_CALL:
15478 case R_ARM_JUMP24:
15479 case R_ARM_PREL31:
15480 case R_ARM_THM_CALL:
15481 case R_ARM_THM_JUMP24:
15482 case R_ARM_THM_JUMP19:
15483 call_reloc_p = true;
15484 may_need_local_target_p = true;
15485 break;
15487 case R_ARM_ABS12:
15488 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15489 ldr __GOTT_INDEX__ offsets. */
15490 if (htab->root.target_os != is_vxworks)
15492 may_need_local_target_p = true;
15493 break;
15495 else goto jump_over;
15497 /* Fall through. */
15499 case R_ARM_MOVW_ABS_NC:
15500 case R_ARM_MOVT_ABS:
15501 case R_ARM_THM_MOVW_ABS_NC:
15502 case R_ARM_THM_MOVT_ABS:
15503 if (bfd_link_pic (info))
15505 _bfd_error_handler
15506 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15507 abfd, elf32_arm_howto_table_1[r_type].name,
15508 (h) ? h->root.root.string : "a local symbol");
15509 bfd_set_error (bfd_error_bad_value);
15510 return false;
15513 /* Fall through. */
15514 case R_ARM_ABS32:
15515 case R_ARM_ABS32_NOI:
15516 jump_over:
15517 if (h != NULL && bfd_link_executable (info))
15519 h->pointer_equality_needed = 1;
15521 /* Fall through. */
15522 case R_ARM_REL32:
15523 case R_ARM_REL32_NOI:
15524 case R_ARM_MOVW_PREL_NC:
15525 case R_ARM_MOVT_PREL:
15526 case R_ARM_THM_MOVW_PREL_NC:
15527 case R_ARM_THM_MOVT_PREL:
15529 /* Should the interworking branches be listed here? */
15530 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15531 || htab->fdpic_p)
15532 && (sec->flags & SEC_ALLOC) != 0)
15534 if (h == NULL
15535 && elf32_arm_howto_from_type (r_type)->pc_relative)
15537 /* In shared libraries and relocatable executables,
15538 we treat local relative references as calls;
15539 see the related SYMBOL_CALLS_LOCAL code in
15540 allocate_dynrelocs. */
15541 call_reloc_p = true;
15542 may_need_local_target_p = true;
15544 else
15545 /* We are creating a shared library or relocatable
15546 executable, and this is a reloc against a global symbol,
15547 or a non-PC-relative reloc against a local symbol.
15548 We may need to copy the reloc into the output. */
15549 may_become_dynamic_p = true;
15551 else
15552 may_need_local_target_p = true;
15553 break;
15555 /* This relocation describes the C++ object vtable hierarchy.
15556 Reconstruct it for later use during GC. */
15557 case R_ARM_GNU_VTINHERIT:
15558 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15559 return false;
15560 break;
15562 /* This relocation describes which C++ vtable entries are actually
15563 used. Record for later use during GC. */
15564 case R_ARM_GNU_VTENTRY:
15565 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15566 return false;
15567 break;
15570 if (h != NULL)
15572 if (call_reloc_p)
15573 /* We may need a .plt entry if the function this reloc
15574 refers to is in a different object, regardless of the
15575 symbol's type. We can't tell for sure yet, because
15576 something later might force the symbol local. */
15577 h->needs_plt = 1;
15578 else if (may_need_local_target_p)
15579 /* If this reloc is in a read-only section, we might
15580 need a copy reloc. We can't check reliably at this
15581 stage whether the section is read-only, as input
15582 sections have not yet been mapped to output sections.
15583 Tentatively set the flag for now, and correct in
15584 adjust_dynamic_symbol. */
15585 h->non_got_ref = 1;
15588 if (may_need_local_target_p
15589 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15591 union gotplt_union *root_plt;
15592 struct arm_plt_info *arm_plt;
15593 struct arm_local_iplt_info *local_iplt;
15595 if (h != NULL)
15597 root_plt = &h->plt;
15598 arm_plt = &eh->plt;
15600 else
15602 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15603 if (local_iplt == NULL)
15604 return false;
15605 root_plt = &local_iplt->root;
15606 arm_plt = &local_iplt->arm;
15609 /* If the symbol is a function that doesn't bind locally,
15610 this relocation will need a PLT entry. */
15611 if (root_plt->refcount != -1)
15612 root_plt->refcount += 1;
15614 if (!call_reloc_p)
15615 arm_plt->noncall_refcount++;
15617 /* It's too early to use htab->use_blx here, so we have to
15618 record possible blx references separately from
15619 relocs that definitely need a thumb stub. */
15621 if (r_type == R_ARM_THM_CALL)
15622 arm_plt->maybe_thumb_refcount += 1;
15624 if (r_type == R_ARM_THM_JUMP24
15625 || r_type == R_ARM_THM_JUMP19)
15626 arm_plt->thumb_refcount += 1;
15629 if (may_become_dynamic_p)
15631 struct elf_dyn_relocs *p, **head;
15633 /* Create a reloc section in dynobj. */
15634 if (sreloc == NULL)
15636 sreloc = _bfd_elf_make_dynamic_reloc_section
15637 (sec, dynobj, 2, abfd, ! htab->use_rel);
15639 if (sreloc == NULL)
15640 return false;
15643 /* If this is a global symbol, count the number of
15644 relocations we need for this symbol. */
15645 if (h != NULL)
15646 head = &h->dyn_relocs;
15647 else
15649 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15650 if (head == NULL)
15651 return false;
15654 p = *head;
15655 if (p == NULL || p->sec != sec)
15657 size_t amt = sizeof *p;
15659 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15660 if (p == NULL)
15661 return false;
15662 p->next = *head;
15663 *head = p;
15664 p->sec = sec;
15665 p->count = 0;
15666 p->pc_count = 0;
15669 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15670 p->pc_count += 1;
15671 p->count += 1;
15672 if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15673 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15675 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15676 that will become rofixup. */
15677 /* This is due to the fact that we suppose all will become rofixup. */
15678 _bfd_error_handler
15679 (_("FDPIC does not yet support %s relocation"
15680 " to become dynamic for executable"),
15681 elf32_arm_howto_table_1[r_type].name);
15682 abort ();
15687 return true;
15690 static void
15691 elf32_arm_update_relocs (asection *o,
15692 struct bfd_elf_section_reloc_data *reldata)
15694 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15695 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15696 const struct elf_backend_data *bed;
15697 _arm_elf_section_data *eado;
15698 struct bfd_link_order *p;
15699 bfd_byte *erela_head, *erela;
15700 Elf_Internal_Rela *irela_head, *irela;
15701 Elf_Internal_Shdr *rel_hdr;
15702 bfd *abfd;
15703 unsigned int count;
15705 eado = get_arm_elf_section_data (o);
15707 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15708 return;
15710 abfd = o->owner;
15711 bed = get_elf_backend_data (abfd);
15712 rel_hdr = reldata->hdr;
15714 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15716 swap_in = bed->s->swap_reloc_in;
15717 swap_out = bed->s->swap_reloc_out;
15719 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15721 swap_in = bed->s->swap_reloca_in;
15722 swap_out = bed->s->swap_reloca_out;
15724 else
15725 abort ();
15727 erela_head = rel_hdr->contents;
15728 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15729 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15731 erela = erela_head;
15732 irela = irela_head;
15733 count = 0;
15735 for (p = o->map_head.link_order; p; p = p->next)
15737 if (p->type == bfd_section_reloc_link_order
15738 || p->type == bfd_symbol_reloc_link_order)
15740 (*swap_in) (abfd, erela, irela);
15741 erela += rel_hdr->sh_entsize;
15742 irela++;
15743 count++;
15745 else if (p->type == bfd_indirect_link_order)
15747 struct bfd_elf_section_reloc_data *input_reldata;
15748 arm_unwind_table_edit *edit_list, *edit_tail;
15749 _arm_elf_section_data *eadi;
15750 bfd_size_type j;
15751 bfd_vma offset;
15752 asection *i;
15754 i = p->u.indirect.section;
15756 eadi = get_arm_elf_section_data (i);
15757 edit_list = eadi->u.exidx.unwind_edit_list;
15758 edit_tail = eadi->u.exidx.unwind_edit_tail;
15759 offset = i->output_offset;
15761 if (eadi->elf.rel.hdr &&
15762 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15763 input_reldata = &eadi->elf.rel;
15764 else if (eadi->elf.rela.hdr &&
15765 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15766 input_reldata = &eadi->elf.rela;
15767 else
15768 abort ();
15770 if (edit_list)
15772 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15774 arm_unwind_table_edit *edit_node, *edit_next;
15775 bfd_vma bias;
15776 bfd_vma reloc_index;
15778 (*swap_in) (abfd, erela, irela);
15779 reloc_index = (irela->r_offset - offset) / 8;
15781 bias = 0;
15782 edit_node = edit_list;
15783 for (edit_next = edit_list;
15784 edit_next && edit_next->index <= reloc_index;
15785 edit_next = edit_node->next)
15787 bias++;
15788 edit_node = edit_next;
15791 if (edit_node->type != DELETE_EXIDX_ENTRY
15792 || edit_node->index != reloc_index)
15794 irela->r_offset -= bias * 8;
15795 irela++;
15796 count++;
15799 erela += rel_hdr->sh_entsize;
15802 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15804 /* New relocation entity. */
15805 asection *text_sec = edit_tail->linked_section;
15806 asection *text_out = text_sec->output_section;
15807 bfd_vma exidx_offset = offset + i->size - 8;
15809 irela->r_addend = 0;
15810 irela->r_offset = exidx_offset;
15811 irela->r_info = ELF32_R_INFO
15812 (text_out->target_index, R_ARM_PREL31);
15813 irela++;
15814 count++;
15817 else
15819 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15821 (*swap_in) (abfd, erela, irela);
15822 erela += rel_hdr->sh_entsize;
15823 irela++;
15826 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15831 reldata->count = count;
15832 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15834 erela = erela_head;
15835 irela = irela_head;
15836 while (count > 0)
15838 (*swap_out) (abfd, irela, erela);
15839 erela += rel_hdr->sh_entsize;
15840 irela++;
15841 count--;
15844 free (irela_head);
15846 /* Hashes are no longer valid. */
15847 free (reldata->hashes);
15848 reldata->hashes = NULL;
15851 /* Unwinding tables are not referenced directly. This pass marks them as
15852 required if the corresponding code section is marked. Similarly, ARMv8-M
15853 secure entry functions can only be referenced by SG veneers which are
15854 created after the GC process. They need to be marked in case they reside in
15855 their own section (as would be the case if code was compiled with
15856 -ffunction-sections). */
15858 static bool
15859 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15860 elf_gc_mark_hook_fn gc_mark_hook)
15862 bfd *sub;
15863 Elf_Internal_Shdr **elf_shdrp;
15864 asection *cmse_sec;
15865 obj_attribute *out_attr;
15866 Elf_Internal_Shdr *symtab_hdr;
15867 unsigned i, sym_count, ext_start;
15868 const struct elf_backend_data *bed;
15869 struct elf_link_hash_entry **sym_hashes;
15870 struct elf32_arm_link_hash_entry *cmse_hash;
15871 bool again, is_v8m, first_bfd_browse = true;
15872 bool debug_sec_need_to_be_marked = false;
15873 asection *isec;
15875 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15877 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15878 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15879 && out_attr[Tag_CPU_arch_profile].i == 'M';
15881 /* Marking EH data may cause additional code sections to be marked,
15882 requiring multiple passes. */
15883 again = true;
15884 while (again)
15886 again = false;
15887 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15889 asection *o;
15891 if (! is_arm_elf (sub))
15892 continue;
15894 elf_shdrp = elf_elfsections (sub);
15895 for (o = sub->sections; o != NULL; o = o->next)
15897 Elf_Internal_Shdr *hdr;
15899 hdr = &elf_section_data (o)->this_hdr;
15900 if (hdr->sh_type == SHT_ARM_EXIDX
15901 && hdr->sh_link
15902 && hdr->sh_link < elf_numsections (sub)
15903 && !o->gc_mark
15904 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15906 again = true;
15907 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15908 return false;
15912 /* Mark section holding ARMv8-M secure entry functions. We mark all
15913 of them so no need for a second browsing. */
15914 if (is_v8m && first_bfd_browse)
15916 sym_hashes = elf_sym_hashes (sub);
15917 bed = get_elf_backend_data (sub);
15918 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15919 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15920 ext_start = symtab_hdr->sh_info;
15922 /* Scan symbols. */
15923 for (i = ext_start; i < sym_count; i++)
15925 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15927 /* Assume it is a special symbol. If not, cmse_scan will
15928 warn about it and user can do something about it. */
15929 if (startswith (cmse_hash->root.root.root.string,
15930 CMSE_PREFIX))
15932 cmse_sec = cmse_hash->root.root.u.def.section;
15933 if (!cmse_sec->gc_mark
15934 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15935 return false;
15936 /* The debug sections related to these secure entry
15937 functions are marked on enabling below flag. */
15938 debug_sec_need_to_be_marked = true;
15942 if (debug_sec_need_to_be_marked)
15944 /* Looping over all the sections of the object file containing
15945 Armv8-M secure entry functions and marking all the debug
15946 sections. */
15947 for (isec = sub->sections; isec != NULL; isec = isec->next)
15949 /* If not a debug sections, skip it. */
15950 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
15951 isec->gc_mark = 1 ;
15953 debug_sec_need_to_be_marked = false;
15957 first_bfd_browse = false;
15960 return true;
15963 /* Treat mapping symbols as special target symbols. */
15965 static bool
15966 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15968 return bfd_is_arm_special_symbol_name (sym->name,
15969 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15972 /* If the ELF symbol SYM might be a function in SEC, return the
15973 function size and set *CODE_OFF to the function's entry point,
15974 otherwise return zero. */
15976 static bfd_size_type
15977 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
15978 bfd_vma *code_off)
15980 bfd_size_type size;
15981 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
15983 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
15984 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
15985 || sym->section != sec)
15986 return 0;
15988 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
15990 if (!(sym->flags & BSF_SYNTHETIC))
15991 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
15993 case STT_NOTYPE:
15994 /* Ignore symbols created by the annobin plugin for gcc and clang.
15995 These symbols are hidden, local, notype and have a size of 0. */
15996 if (size == 0
15997 && sym->flags & BSF_LOCAL
15998 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
15999 return 0;
16000 /* Fall through. */
16001 case STT_FUNC:
16002 case STT_ARM_TFUNC:
16003 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16004 break;
16005 default:
16006 return 0;
16009 if ((sym->flags & BSF_LOCAL)
16010 && bfd_is_arm_special_symbol_name (sym->name,
16011 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16012 return 0;
16014 *code_off = sym->value;
16016 /* Do not return 0 for the function's size. */
16017 return size ? size : 1;
16021 static bool
16022 elf32_arm_find_inliner_info (bfd * abfd,
16023 const char ** filename_ptr,
16024 const char ** functionname_ptr,
16025 unsigned int * line_ptr)
16027 bool found;
16028 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16029 functionname_ptr, line_ptr,
16030 & elf_tdata (abfd)->dwarf2_find_line_info);
16031 return found;
16034 /* Adjust a symbol defined by a dynamic object and referenced by a
16035 regular object. The current definition is in some section of the
16036 dynamic object, but we're not including those sections. We have to
16037 change the definition to something the rest of the link can
16038 understand. */
16040 static bool
16041 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16042 struct elf_link_hash_entry * h)
16044 bfd * dynobj;
16045 asection *s, *srel;
16046 struct elf32_arm_link_hash_entry * eh;
16047 struct elf32_arm_link_hash_table *globals;
16049 globals = elf32_arm_hash_table (info);
16050 if (globals == NULL)
16051 return false;
16053 dynobj = elf_hash_table (info)->dynobj;
16055 /* Make sure we know what is going on here. */
16056 BFD_ASSERT (dynobj != NULL
16057 && (h->needs_plt
16058 || h->type == STT_GNU_IFUNC
16059 || h->is_weakalias
16060 || (h->def_dynamic
16061 && h->ref_regular
16062 && !h->def_regular)));
16064 eh = (struct elf32_arm_link_hash_entry *) h;
16066 /* If this is a function, put it in the procedure linkage table. We
16067 will fill in the contents of the procedure linkage table later,
16068 when we know the address of the .got section. */
16069 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16071 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16072 symbol binds locally. */
16073 if (h->plt.refcount <= 0
16074 || (h->type != STT_GNU_IFUNC
16075 && (SYMBOL_CALLS_LOCAL (info, h)
16076 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16077 && h->root.type == bfd_link_hash_undefweak))))
16079 /* This case can occur if we saw a PLT32 reloc in an input
16080 file, but the symbol was never referred to by a dynamic
16081 object, or if all references were garbage collected. In
16082 such a case, we don't actually need to build a procedure
16083 linkage table, and we can just do a PC24 reloc instead. */
16084 h->plt.offset = (bfd_vma) -1;
16085 eh->plt.thumb_refcount = 0;
16086 eh->plt.maybe_thumb_refcount = 0;
16087 eh->plt.noncall_refcount = 0;
16088 h->needs_plt = 0;
16091 return true;
16093 else
16095 /* It's possible that we incorrectly decided a .plt reloc was
16096 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16097 in check_relocs. We can't decide accurately between function
16098 and non-function syms in check-relocs; Objects loaded later in
16099 the link may change h->type. So fix it now. */
16100 h->plt.offset = (bfd_vma) -1;
16101 eh->plt.thumb_refcount = 0;
16102 eh->plt.maybe_thumb_refcount = 0;
16103 eh->plt.noncall_refcount = 0;
16106 /* If this is a weak symbol, and there is a real definition, the
16107 processor independent code will have arranged for us to see the
16108 real definition first, and we can just use the same value. */
16109 if (h->is_weakalias)
16111 struct elf_link_hash_entry *def = weakdef (h);
16112 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16113 h->root.u.def.section = def->root.u.def.section;
16114 h->root.u.def.value = def->root.u.def.value;
16115 return true;
16118 /* If there are no non-GOT references, we do not need a copy
16119 relocation. */
16120 if (!h->non_got_ref)
16121 return true;
16123 /* This is a reference to a symbol defined by a dynamic object which
16124 is not a function. */
16126 /* If we are creating a shared library, we must presume that the
16127 only references to the symbol are via the global offset table.
16128 For such cases we need not do anything here; the relocations will
16129 be handled correctly by relocate_section. Relocatable executables
16130 can reference data in shared objects directly, so we don't need to
16131 do anything here. */
16132 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16133 return true;
16135 /* We must allocate the symbol in our .dynbss section, which will
16136 become part of the .bss section of the executable. There will be
16137 an entry for this symbol in the .dynsym section. The dynamic
16138 object will contain position independent code, so all references
16139 from the dynamic object to this symbol will go through the global
16140 offset table. The dynamic linker will use the .dynsym entry to
16141 determine the address it must put in the global offset table, so
16142 both the dynamic object and the regular object will refer to the
16143 same memory location for the variable. */
16144 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16145 linker to copy the initial value out of the dynamic object and into
16146 the runtime process image. We need to remember the offset into the
16147 .rel(a).bss section we are going to use. */
16148 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16150 s = globals->root.sdynrelro;
16151 srel = globals->root.sreldynrelro;
16153 else
16155 s = globals->root.sdynbss;
16156 srel = globals->root.srelbss;
16158 if (info->nocopyreloc == 0
16159 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16160 && h->size != 0)
16162 elf32_arm_allocate_dynrelocs (info, srel, 1);
16163 h->needs_copy = 1;
16166 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16169 /* Allocate space in .plt, .got and associated reloc sections for
16170 dynamic relocs. */
16172 static bool
16173 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16175 struct bfd_link_info *info;
16176 struct elf32_arm_link_hash_table *htab;
16177 struct elf32_arm_link_hash_entry *eh;
16178 struct elf_dyn_relocs *p;
16180 if (h->root.type == bfd_link_hash_indirect)
16181 return true;
16183 eh = (struct elf32_arm_link_hash_entry *) h;
16185 info = (struct bfd_link_info *) inf;
16186 htab = elf32_arm_hash_table (info);
16187 if (htab == NULL)
16188 return false;
16190 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16191 && h->plt.refcount > 0)
16193 /* Make sure this symbol is output as a dynamic symbol.
16194 Undefined weak syms won't yet be marked as dynamic. */
16195 if (h->dynindx == -1 && !h->forced_local
16196 && h->root.type == bfd_link_hash_undefweak)
16198 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16199 return false;
16202 /* If the call in the PLT entry binds locally, the associated
16203 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16204 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16205 than the .plt section. */
16206 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16208 eh->is_iplt = 1;
16209 if (eh->plt.noncall_refcount == 0
16210 && SYMBOL_REFERENCES_LOCAL (info, h))
16211 /* All non-call references can be resolved directly.
16212 This means that they can (and in some cases, must)
16213 resolve directly to the run-time target, rather than
16214 to the PLT. That in turns means that any .got entry
16215 would be equal to the .igot.plt entry, so there's
16216 no point having both. */
16217 h->got.refcount = 0;
16220 if (bfd_link_pic (info)
16221 || eh->is_iplt
16222 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16224 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16226 /* If this symbol is not defined in a regular file, and we are
16227 not generating a shared library, then set the symbol to this
16228 location in the .plt. This is required to make function
16229 pointers compare as equal between the normal executable and
16230 the shared library. */
16231 if (! bfd_link_pic (info)
16232 && !h->def_regular)
16234 h->root.u.def.section = htab->root.splt;
16235 h->root.u.def.value = h->plt.offset;
16237 /* Make sure the function is not marked as Thumb, in case
16238 it is the target of an ABS32 relocation, which will
16239 point to the PLT entry. */
16240 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16243 /* VxWorks executables have a second set of relocations for
16244 each PLT entry. They go in a separate relocation section,
16245 which is processed by the kernel loader. */
16246 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16248 /* There is a relocation for the initial PLT entry:
16249 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16250 if (h->plt.offset == htab->plt_header_size)
16251 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16253 /* There are two extra relocations for each subsequent
16254 PLT entry: an R_ARM_32 relocation for the GOT entry,
16255 and an R_ARM_32 relocation for the PLT entry. */
16256 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16259 else
16261 h->plt.offset = (bfd_vma) -1;
16262 h->needs_plt = 0;
16265 else
16267 h->plt.offset = (bfd_vma) -1;
16268 h->needs_plt = 0;
16271 eh = (struct elf32_arm_link_hash_entry *) h;
16272 eh->tlsdesc_got = (bfd_vma) -1;
16274 if (h->got.refcount > 0)
16276 asection *s;
16277 bool dyn;
16278 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16279 int indx;
16281 /* Make sure this symbol is output as a dynamic symbol.
16282 Undefined weak syms won't yet be marked as dynamic. */
16283 if (htab->root.dynamic_sections_created
16284 && h->dynindx == -1
16285 && !h->forced_local
16286 && h->root.type == bfd_link_hash_undefweak)
16288 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16289 return false;
16292 s = htab->root.sgot;
16293 h->got.offset = s->size;
16295 if (tls_type == GOT_UNKNOWN)
16296 abort ();
16298 if (tls_type == GOT_NORMAL)
16299 /* Non-TLS symbols need one GOT slot. */
16300 s->size += 4;
16301 else
16303 if (tls_type & GOT_TLS_GDESC)
16305 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16306 eh->tlsdesc_got
16307 = (htab->root.sgotplt->size
16308 - elf32_arm_compute_jump_table_size (htab));
16309 htab->root.sgotplt->size += 8;
16310 h->got.offset = (bfd_vma) -2;
16311 /* plt.got_offset needs to know there's a TLS_DESC
16312 reloc in the middle of .got.plt. */
16313 htab->num_tls_desc++;
16316 if (tls_type & GOT_TLS_GD)
16318 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16319 consecutive GOT slots. If the symbol is both GD
16320 and GDESC, got.offset may have been
16321 overwritten. */
16322 h->got.offset = s->size;
16323 s->size += 8;
16326 if (tls_type & GOT_TLS_IE)
16327 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16328 slot. */
16329 s->size += 4;
16332 dyn = htab->root.dynamic_sections_created;
16334 indx = 0;
16335 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16336 && (!bfd_link_pic (info)
16337 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16338 indx = h->dynindx;
16340 if (tls_type != GOT_NORMAL
16341 && (bfd_link_dll (info) || indx != 0)
16342 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16343 || h->root.type != bfd_link_hash_undefweak))
16345 if (tls_type & GOT_TLS_IE)
16346 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16348 if (tls_type & GOT_TLS_GD)
16349 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16351 if (tls_type & GOT_TLS_GDESC)
16353 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16354 /* GDESC needs a trampoline to jump to. */
16355 htab->tls_trampoline = -1;
16358 /* Only GD needs it. GDESC just emits one relocation per
16359 2 entries. */
16360 if ((tls_type & GOT_TLS_GD) && indx != 0)
16361 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16363 else if (((indx != -1) || htab->fdpic_p)
16364 && !SYMBOL_REFERENCES_LOCAL (info, h))
16366 if (htab->root.dynamic_sections_created)
16367 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16368 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16370 else if (h->type == STT_GNU_IFUNC
16371 && eh->plt.noncall_refcount == 0)
16372 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16373 they all resolve dynamically instead. Reserve room for the
16374 GOT entry's R_ARM_IRELATIVE relocation. */
16375 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16376 else if (bfd_link_pic (info)
16377 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16378 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16379 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16380 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16381 /* Reserve room for rofixup for FDPIC executable. */
16382 /* TLS relocs do not need space since they are completely
16383 resolved. */
16384 htab->srofixup->size += 4;
16386 else
16387 h->got.offset = (bfd_vma) -1;
16389 /* FDPIC support. */
16390 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16392 /* Symbol musn't be exported. */
16393 if (h->dynindx != -1)
16394 abort ();
16396 /* We only allocate one function descriptor with its associated
16397 relocation. */
16398 if (eh->fdpic_cnts.funcdesc_offset == -1)
16400 asection *s = htab->root.sgot;
16402 eh->fdpic_cnts.funcdesc_offset = s->size;
16403 s->size += 8;
16404 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16405 if (bfd_link_pic (info))
16406 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16407 else
16408 htab->srofixup->size += 8;
16412 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16414 asection *s = htab->root.sgot;
16416 if (htab->root.dynamic_sections_created && h->dynindx == -1
16417 && !h->forced_local)
16418 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16419 return false;
16421 if (h->dynindx == -1)
16423 /* We only allocate one function descriptor with its
16424 associated relocation. */
16425 if (eh->fdpic_cnts.funcdesc_offset == -1)
16428 eh->fdpic_cnts.funcdesc_offset = s->size;
16429 s->size += 8;
16430 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16431 rofixups. */
16432 if (bfd_link_pic (info))
16433 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16434 else
16435 htab->srofixup->size += 8;
16439 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16440 R_ARM_RELATIVE/rofixup relocation on it. */
16441 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16442 s->size += 4;
16443 if (h->dynindx == -1 && !bfd_link_pic (info))
16444 htab->srofixup->size += 4;
16445 else
16446 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16449 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16451 if (htab->root.dynamic_sections_created && h->dynindx == -1
16452 && !h->forced_local)
16453 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16454 return false;
16456 if (h->dynindx == -1)
16458 /* We only allocate one function descriptor with its
16459 associated relocation. */
16460 if (eh->fdpic_cnts.funcdesc_offset == -1)
16462 asection *s = htab->root.sgot;
16464 eh->fdpic_cnts.funcdesc_offset = s->size;
16465 s->size += 8;
16466 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16467 rofixups. */
16468 if (bfd_link_pic (info))
16469 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16470 else
16471 htab->srofixup->size += 8;
16474 if (h->dynindx == -1 && !bfd_link_pic (info))
16476 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16477 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16479 else
16481 /* Will need one dynamic reloc per reference. will be either
16482 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16483 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16484 eh->fdpic_cnts.funcdesc_cnt);
16488 /* Allocate stubs for exported Thumb functions on v4t. */
16489 if (!htab->use_blx && h->dynindx != -1
16490 && h->def_regular
16491 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16492 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16494 struct elf_link_hash_entry * th;
16495 struct bfd_link_hash_entry * bh;
16496 struct elf_link_hash_entry * myh;
16497 char name[1024];
16498 asection *s;
16499 bh = NULL;
16500 /* Create a new symbol to regist the real location of the function. */
16501 s = h->root.u.def.section;
16502 sprintf (name, "__real_%s", h->root.root.string);
16503 _bfd_generic_link_add_one_symbol (info, s->owner,
16504 name, BSF_GLOBAL, s,
16505 h->root.u.def.value,
16506 NULL, true, false, &bh);
16508 myh = (struct elf_link_hash_entry *) bh;
16509 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16510 myh->forced_local = 1;
16511 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16512 eh->export_glue = myh;
16513 th = record_arm_to_thumb_glue (info, h);
16514 /* Point the symbol at the stub. */
16515 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16516 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16517 h->root.u.def.section = th->root.u.def.section;
16518 h->root.u.def.value = th->root.u.def.value & ~1;
16521 if (h->dyn_relocs == NULL)
16522 return true;
16524 /* In the shared -Bsymbolic case, discard space allocated for
16525 dynamic pc-relative relocs against symbols which turn out to be
16526 defined in regular objects. For the normal shared case, discard
16527 space for pc-relative relocs that have become local due to symbol
16528 visibility changes. */
16530 if (bfd_link_pic (info)
16531 || htab->root.is_relocatable_executable
16532 || htab->fdpic_p)
16534 /* Relocs that use pc_count are PC-relative forms, which will appear
16535 on something like ".long foo - ." or "movw REG, foo - .". We want
16536 calls to protected symbols to resolve directly to the function
16537 rather than going via the plt. If people want function pointer
16538 comparisons to work as expected then they should avoid writing
16539 assembly like ".long foo - .". */
16540 if (SYMBOL_CALLS_LOCAL (info, h))
16542 struct elf_dyn_relocs **pp;
16544 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16546 p->count -= p->pc_count;
16547 p->pc_count = 0;
16548 if (p->count == 0)
16549 *pp = p->next;
16550 else
16551 pp = &p->next;
16555 if (htab->root.target_os == is_vxworks)
16557 struct elf_dyn_relocs **pp;
16559 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16561 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16562 *pp = p->next;
16563 else
16564 pp = &p->next;
16568 /* Also discard relocs on undefined weak syms with non-default
16569 visibility. */
16570 if (h->dyn_relocs != NULL
16571 && h->root.type == bfd_link_hash_undefweak)
16573 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16574 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16575 h->dyn_relocs = NULL;
16577 /* Make sure undefined weak symbols are output as a dynamic
16578 symbol in PIEs. */
16579 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16580 && !h->forced_local)
16582 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16583 return false;
16587 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16588 && h->root.type == bfd_link_hash_new)
16590 /* Output absolute symbols so that we can create relocations
16591 against them. For normal symbols we output a relocation
16592 against the section that contains them. */
16593 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16594 return false;
16598 else
16600 /* For the non-shared case, discard space for relocs against
16601 symbols which turn out to need copy relocs or are not
16602 dynamic. */
16604 if (!h->non_got_ref
16605 && ((h->def_dynamic
16606 && !h->def_regular)
16607 || (htab->root.dynamic_sections_created
16608 && (h->root.type == bfd_link_hash_undefweak
16609 || h->root.type == bfd_link_hash_undefined))))
16611 /* Make sure this symbol is output as a dynamic symbol.
16612 Undefined weak syms won't yet be marked as dynamic. */
16613 if (h->dynindx == -1 && !h->forced_local
16614 && h->root.type == bfd_link_hash_undefweak)
16616 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16617 return false;
16620 /* If that succeeded, we know we'll be keeping all the
16621 relocs. */
16622 if (h->dynindx != -1)
16623 goto keep;
16626 h->dyn_relocs = NULL;
16628 keep: ;
16631 /* Finally, allocate space. */
16632 for (p = h->dyn_relocs; p != NULL; p = p->next)
16634 asection *sreloc = elf_section_data (p->sec)->sreloc;
16636 if (h->type == STT_GNU_IFUNC
16637 && eh->plt.noncall_refcount == 0
16638 && SYMBOL_REFERENCES_LOCAL (info, h))
16639 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16640 else if (h->dynindx != -1
16641 && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16642 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16643 else if (htab->fdpic_p && !bfd_link_pic (info))
16644 htab->srofixup->size += 4 * p->count;
16645 else
16646 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16649 return true;
16652 void
16653 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16654 int byteswap_code)
16656 struct elf32_arm_link_hash_table *globals;
16658 globals = elf32_arm_hash_table (info);
16659 if (globals == NULL)
16660 return;
16662 globals->byteswap_code = byteswap_code;
16665 /* Set the sizes of the dynamic sections. */
16667 static bool
16668 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16669 struct bfd_link_info * info)
16671 bfd * dynobj;
16672 asection * s;
16673 bool relocs;
16674 bfd *ibfd;
16675 struct elf32_arm_link_hash_table *htab;
16677 htab = elf32_arm_hash_table (info);
16678 if (htab == NULL)
16679 return false;
16681 dynobj = elf_hash_table (info)->dynobj;
16682 BFD_ASSERT (dynobj != NULL);
16683 check_use_blx (htab);
16685 if (elf_hash_table (info)->dynamic_sections_created)
16687 /* Set the contents of the .interp section to the interpreter. */
16688 if (bfd_link_executable (info) && !info->nointerp)
16690 s = bfd_get_linker_section (dynobj, ".interp");
16691 BFD_ASSERT (s != NULL);
16692 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16693 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16697 /* Set up .got offsets for local syms, and space for local dynamic
16698 relocs. */
16699 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16701 bfd_signed_vma *local_got;
16702 bfd_signed_vma *end_local_got;
16703 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16704 char *local_tls_type;
16705 bfd_vma *local_tlsdesc_gotent;
16706 bfd_size_type locsymcount;
16707 Elf_Internal_Shdr *symtab_hdr;
16708 asection *srel;
16709 unsigned int symndx;
16710 struct fdpic_local *local_fdpic_cnts;
16712 if (! is_arm_elf (ibfd))
16713 continue;
16715 for (s = ibfd->sections; s != NULL; s = s->next)
16717 struct elf_dyn_relocs *p;
16719 for (p = (struct elf_dyn_relocs *)
16720 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16722 if (!bfd_is_abs_section (p->sec)
16723 && bfd_is_abs_section (p->sec->output_section))
16725 /* Input section has been discarded, either because
16726 it is a copy of a linkonce section or due to
16727 linker script /DISCARD/, so we'll be discarding
16728 the relocs too. */
16730 else if (htab->root.target_os == is_vxworks
16731 && strcmp (p->sec->output_section->name,
16732 ".tls_vars") == 0)
16734 /* Relocations in vxworks .tls_vars sections are
16735 handled specially by the loader. */
16737 else if (p->count != 0)
16739 srel = elf_section_data (p->sec)->sreloc;
16740 if (htab->fdpic_p && !bfd_link_pic (info))
16741 htab->srofixup->size += 4 * p->count;
16742 else
16743 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16744 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16745 info->flags |= DF_TEXTREL;
16750 local_got = elf_local_got_refcounts (ibfd);
16751 if (local_got == NULL)
16752 continue;
16754 symtab_hdr = & elf_symtab_hdr (ibfd);
16755 locsymcount = symtab_hdr->sh_info;
16756 end_local_got = local_got + locsymcount;
16757 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16758 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16759 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16760 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16761 symndx = 0;
16762 s = htab->root.sgot;
16763 srel = htab->root.srelgot;
16764 for (; local_got < end_local_got;
16765 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16766 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16768 if (symndx >= elf32_arm_num_entries (ibfd))
16769 return false;
16771 *local_tlsdesc_gotent = (bfd_vma) -1;
16772 local_iplt = *local_iplt_ptr;
16774 /* FDPIC support. */
16775 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16777 if (local_fdpic_cnts->funcdesc_offset == -1)
16779 local_fdpic_cnts->funcdesc_offset = s->size;
16780 s->size += 8;
16782 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16783 if (bfd_link_pic (info))
16784 elf32_arm_allocate_dynrelocs (info, srel, 1);
16785 else
16786 htab->srofixup->size += 8;
16790 if (local_fdpic_cnts->funcdesc_cnt > 0)
16792 if (local_fdpic_cnts->funcdesc_offset == -1)
16794 local_fdpic_cnts->funcdesc_offset = s->size;
16795 s->size += 8;
16797 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16798 if (bfd_link_pic (info))
16799 elf32_arm_allocate_dynrelocs (info, srel, 1);
16800 else
16801 htab->srofixup->size += 8;
16804 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16805 if (bfd_link_pic (info))
16806 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16807 else
16808 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16811 if (local_iplt != NULL)
16813 struct elf_dyn_relocs *p;
16815 if (local_iplt->root.refcount > 0)
16817 elf32_arm_allocate_plt_entry (info, true,
16818 &local_iplt->root,
16819 &local_iplt->arm);
16820 if (local_iplt->arm.noncall_refcount == 0)
16821 /* All references to the PLT are calls, so all
16822 non-call references can resolve directly to the
16823 run-time target. This means that the .got entry
16824 would be the same as the .igot.plt entry, so there's
16825 no point creating both. */
16826 *local_got = 0;
16828 else
16830 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16831 local_iplt->root.offset = (bfd_vma) -1;
16834 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16836 asection *psrel;
16838 psrel = elf_section_data (p->sec)->sreloc;
16839 if (local_iplt->arm.noncall_refcount == 0)
16840 elf32_arm_allocate_irelocs (info, psrel, p->count);
16841 else
16842 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16845 if (*local_got > 0)
16847 Elf_Internal_Sym *isym;
16849 *local_got = s->size;
16850 if (*local_tls_type & GOT_TLS_GD)
16851 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16852 s->size += 8;
16853 if (*local_tls_type & GOT_TLS_GDESC)
16855 *local_tlsdesc_gotent = htab->root.sgotplt->size
16856 - elf32_arm_compute_jump_table_size (htab);
16857 htab->root.sgotplt->size += 8;
16858 *local_got = (bfd_vma) -2;
16859 /* plt.got_offset needs to know there's a TLS_DESC
16860 reloc in the middle of .got.plt. */
16861 htab->num_tls_desc++;
16863 if (*local_tls_type & GOT_TLS_IE)
16864 s->size += 4;
16866 if (*local_tls_type & GOT_NORMAL)
16868 /* If the symbol is both GD and GDESC, *local_got
16869 may have been overwritten. */
16870 *local_got = s->size;
16871 s->size += 4;
16874 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16875 symndx);
16876 if (isym == NULL)
16877 return false;
16879 /* If all references to an STT_GNU_IFUNC PLT are calls,
16880 then all non-call references, including this GOT entry,
16881 resolve directly to the run-time target. */
16882 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16883 && (local_iplt == NULL
16884 || local_iplt->arm.noncall_refcount == 0))
16885 elf32_arm_allocate_irelocs (info, srel, 1);
16886 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16888 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16889 elf32_arm_allocate_dynrelocs (info, srel, 1);
16890 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16891 htab->srofixup->size += 4;
16893 if ((bfd_link_pic (info) || htab->fdpic_p)
16894 && *local_tls_type & GOT_TLS_GDESC)
16896 elf32_arm_allocate_dynrelocs (info,
16897 htab->root.srelplt, 1);
16898 htab->tls_trampoline = -1;
16902 else
16903 *local_got = (bfd_vma) -1;
16907 if (htab->tls_ldm_got.refcount > 0)
16909 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16910 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16911 htab->tls_ldm_got.offset = htab->root.sgot->size;
16912 htab->root.sgot->size += 8;
16913 if (bfd_link_pic (info))
16914 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16916 else
16917 htab->tls_ldm_got.offset = -1;
16919 /* At the very end of the .rofixup section is a pointer to the GOT,
16920 reserve space for it. */
16921 if (htab->fdpic_p && htab->srofixup != NULL)
16922 htab->srofixup->size += 4;
16924 /* Allocate global sym .plt and .got entries, and space for global
16925 sym dynamic relocs. */
16926 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16928 /* Here we rummage through the found bfds to collect glue information. */
16929 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16931 if (! is_arm_elf (ibfd))
16932 continue;
16934 /* Initialise mapping tables for code/data. */
16935 bfd_elf32_arm_init_maps (ibfd);
16937 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16938 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16939 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16940 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16943 /* Allocate space for the glue sections now that we've sized them. */
16944 bfd_elf32_arm_allocate_interworking_sections (info);
16946 /* For every jump slot reserved in the sgotplt, reloc_count is
16947 incremented. However, when we reserve space for TLS descriptors,
16948 it's not incremented, so in order to compute the space reserved
16949 for them, it suffices to multiply the reloc count by the jump
16950 slot size. */
16951 if (htab->root.srelplt)
16952 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
16954 if (htab->tls_trampoline)
16956 if (htab->root.splt->size == 0)
16957 htab->root.splt->size += htab->plt_header_size;
16959 htab->tls_trampoline = htab->root.splt->size;
16960 htab->root.splt->size += htab->plt_entry_size;
16962 /* If we're not using lazy TLS relocations, don't generate the
16963 PLT and GOT entries they require. */
16964 if ((info->flags & DF_BIND_NOW))
16965 htab->root.tlsdesc_plt = 0;
16966 else
16968 htab->root.tlsdesc_got = htab->root.sgot->size;
16969 htab->root.sgot->size += 4;
16971 htab->root.tlsdesc_plt = htab->root.splt->size;
16972 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16976 /* The check_relocs and adjust_dynamic_symbol entry points have
16977 determined the sizes of the various dynamic sections. Allocate
16978 memory for them. */
16979 relocs = false;
16980 for (s = dynobj->sections; s != NULL; s = s->next)
16982 const char * name;
16984 if ((s->flags & SEC_LINKER_CREATED) == 0)
16985 continue;
16987 /* It's OK to base decisions on the section name, because none
16988 of the dynobj section names depend upon the input files. */
16989 name = bfd_section_name (s);
16991 if (s == htab->root.splt)
16993 /* Remember whether there is a PLT. */
16996 else if (startswith (name, ".rel"))
16998 if (s->size != 0)
17000 /* Remember whether there are any reloc sections other
17001 than .rel(a).plt and .rela.plt.unloaded. */
17002 if (s != htab->root.srelplt && s != htab->srelplt2)
17003 relocs = true;
17005 /* We use the reloc_count field as a counter if we need
17006 to copy relocs into the output file. */
17007 s->reloc_count = 0;
17010 else if (s != htab->root.sgot
17011 && s != htab->root.sgotplt
17012 && s != htab->root.iplt
17013 && s != htab->root.igotplt
17014 && s != htab->root.sdynbss
17015 && s != htab->root.sdynrelro
17016 && s != htab->srofixup)
17018 /* It's not one of our sections, so don't allocate space. */
17019 continue;
17022 if (s->size == 0)
17024 /* If we don't need this section, strip it from the
17025 output file. This is mostly to handle .rel(a).bss and
17026 .rel(a).plt. We must create both sections in
17027 create_dynamic_sections, because they must be created
17028 before the linker maps input sections to output
17029 sections. The linker does that before
17030 adjust_dynamic_symbol is called, and it is that
17031 function which decides whether anything needs to go
17032 into these sections. */
17033 s->flags |= SEC_EXCLUDE;
17034 continue;
17037 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17038 continue;
17040 /* Allocate memory for the section contents. */
17041 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17042 if (s->contents == NULL)
17043 return false;
17046 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17047 relocs);
17050 /* Size sections even though they're not dynamic. We use it to setup
17051 _TLS_MODULE_BASE_, if needed. */
17053 static bool
17054 elf32_arm_always_size_sections (bfd *output_bfd,
17055 struct bfd_link_info *info)
17057 asection *tls_sec;
17058 struct elf32_arm_link_hash_table *htab;
17060 htab = elf32_arm_hash_table (info);
17062 if (bfd_link_relocatable (info))
17063 return true;
17065 tls_sec = elf_hash_table (info)->tls_sec;
17067 if (tls_sec)
17069 struct elf_link_hash_entry *tlsbase;
17071 tlsbase = elf_link_hash_lookup
17072 (elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17074 if (tlsbase)
17076 struct bfd_link_hash_entry *bh = NULL;
17077 const struct elf_backend_data *bed
17078 = get_elf_backend_data (output_bfd);
17080 if (!(_bfd_generic_link_add_one_symbol
17081 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17082 tls_sec, 0, NULL, false,
17083 bed->collect, &bh)))
17084 return false;
17086 tlsbase->type = STT_TLS;
17087 tlsbase = (struct elf_link_hash_entry *)bh;
17088 tlsbase->def_regular = 1;
17089 tlsbase->other = STV_HIDDEN;
17090 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17094 if (htab->fdpic_p && !bfd_link_relocatable (info)
17095 && !bfd_elf_stack_segment_size (output_bfd, info,
17096 "__stacksize", DEFAULT_STACK_SIZE))
17097 return false;
17099 return true;
17102 /* Finish up dynamic symbol handling. We set the contents of various
17103 dynamic sections here. */
17105 static bool
17106 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17107 struct bfd_link_info * info,
17108 struct elf_link_hash_entry * h,
17109 Elf_Internal_Sym * sym)
17111 struct elf32_arm_link_hash_table *htab;
17112 struct elf32_arm_link_hash_entry *eh;
17114 htab = elf32_arm_hash_table (info);
17115 if (htab == NULL)
17116 return false;
17118 eh = (struct elf32_arm_link_hash_entry *) h;
17120 if (h->plt.offset != (bfd_vma) -1)
17122 if (!eh->is_iplt)
17124 BFD_ASSERT (h->dynindx != -1);
17125 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17126 h->dynindx, 0))
17127 return false;
17130 if (!h->def_regular)
17132 /* Mark the symbol as undefined, rather than as defined in
17133 the .plt section. */
17134 sym->st_shndx = SHN_UNDEF;
17135 /* If the symbol is weak we need to clear the value.
17136 Otherwise, the PLT entry would provide a definition for
17137 the symbol even if the symbol wasn't defined anywhere,
17138 and so the symbol would never be NULL. Leave the value if
17139 there were any relocations where pointer equality matters
17140 (this is a clue for the dynamic linker, to make function
17141 pointer comparisons work between an application and shared
17142 library). */
17143 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17144 sym->st_value = 0;
17146 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17148 /* At least one non-call relocation references this .iplt entry,
17149 so the .iplt entry is the function's canonical address. */
17150 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17151 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17152 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17153 (output_bfd, htab->root.iplt->output_section));
17154 sym->st_value = (h->plt.offset
17155 + htab->root.iplt->output_section->vma
17156 + htab->root.iplt->output_offset);
17160 if (h->needs_copy)
17162 asection * s;
17163 Elf_Internal_Rela rel;
17165 /* This symbol needs a copy reloc. Set it up. */
17166 BFD_ASSERT (h->dynindx != -1
17167 && (h->root.type == bfd_link_hash_defined
17168 || h->root.type == bfd_link_hash_defweak));
17170 rel.r_addend = 0;
17171 rel.r_offset = (h->root.u.def.value
17172 + h->root.u.def.section->output_section->vma
17173 + h->root.u.def.section->output_offset);
17174 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17175 if (h->root.u.def.section == htab->root.sdynrelro)
17176 s = htab->root.sreldynrelro;
17177 else
17178 s = htab->root.srelbss;
17179 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17182 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17183 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17184 it is relative to the ".got" section. */
17185 if (h == htab->root.hdynamic
17186 || (!htab->fdpic_p
17187 && htab->root.target_os != is_vxworks
17188 && h == htab->root.hgot))
17189 sym->st_shndx = SHN_ABS;
17191 return true;
17194 static void
17195 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17196 void *contents,
17197 const unsigned long *template, unsigned count)
17199 unsigned ix;
17201 for (ix = 0; ix != count; ix++)
17203 unsigned long insn = template[ix];
17205 /* Emit mov pc,rx if bx is not permitted. */
17206 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17207 insn = (insn & 0xf000000f) | 0x01a0f000;
17208 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17212 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17213 other variants, NaCl needs this entry in a static executable's
17214 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17215 zero. For .iplt really only the last bundle is useful, and .iplt
17216 could have a shorter first entry, with each individual PLT entry's
17217 relative branch calculated differently so it targets the last
17218 bundle instead of the instruction before it (labelled .Lplt_tail
17219 above). But it's simpler to keep the size and layout of PLT0
17220 consistent with the dynamic case, at the cost of some dead code at
17221 the start of .iplt and the one dead store to the stack at the start
17222 of .Lplt_tail. */
17223 static void
17224 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17225 asection *plt, bfd_vma got_displacement)
17227 unsigned int i;
17229 put_arm_insn (htab, output_bfd,
17230 elf32_arm_nacl_plt0_entry[0]
17231 | arm_movw_immediate (got_displacement),
17232 plt->contents + 0);
17233 put_arm_insn (htab, output_bfd,
17234 elf32_arm_nacl_plt0_entry[1]
17235 | arm_movt_immediate (got_displacement),
17236 plt->contents + 4);
17238 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17239 put_arm_insn (htab, output_bfd,
17240 elf32_arm_nacl_plt0_entry[i],
17241 plt->contents + (i * 4));
17244 /* Finish up the dynamic sections. */
17246 static bool
17247 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17249 bfd * dynobj;
17250 asection * sgot;
17251 asection * sdyn;
17252 struct elf32_arm_link_hash_table *htab;
17254 htab = elf32_arm_hash_table (info);
17255 if (htab == NULL)
17256 return false;
17258 dynobj = elf_hash_table (info)->dynobj;
17260 sgot = htab->root.sgotplt;
17261 /* A broken linker script might have discarded the dynamic sections.
17262 Catch this here so that we do not seg-fault later on. */
17263 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17264 return false;
17265 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17267 if (elf_hash_table (info)->dynamic_sections_created)
17269 asection *splt;
17270 Elf32_External_Dyn *dyncon, *dynconend;
17272 splt = htab->root.splt;
17273 BFD_ASSERT (splt != NULL && sdyn != NULL);
17274 BFD_ASSERT (sgot != NULL);
17276 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17277 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17279 for (; dyncon < dynconend; dyncon++)
17281 Elf_Internal_Dyn dyn;
17282 const char * name;
17283 asection * s;
17285 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17287 switch (dyn.d_tag)
17289 default:
17290 if (htab->root.target_os == is_vxworks
17291 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17292 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17293 break;
17295 case DT_HASH:
17296 case DT_STRTAB:
17297 case DT_SYMTAB:
17298 case DT_VERSYM:
17299 case DT_VERDEF:
17300 case DT_VERNEED:
17301 break;
17303 case DT_PLTGOT:
17304 name = ".got.plt";
17305 goto get_vma;
17306 case DT_JMPREL:
17307 name = RELOC_SECTION (htab, ".plt");
17308 get_vma:
17309 s = bfd_get_linker_section (dynobj, name);
17310 if (s == NULL)
17312 _bfd_error_handler
17313 (_("could not find section %s"), name);
17314 bfd_set_error (bfd_error_invalid_operation);
17315 return false;
17317 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17318 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17319 break;
17321 case DT_PLTRELSZ:
17322 s = htab->root.srelplt;
17323 BFD_ASSERT (s != NULL);
17324 dyn.d_un.d_val = s->size;
17325 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17326 break;
17328 case DT_RELSZ:
17329 case DT_RELASZ:
17330 case DT_REL:
17331 case DT_RELA:
17332 break;
17334 case DT_TLSDESC_PLT:
17335 s = htab->root.splt;
17336 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17337 + htab->root.tlsdesc_plt);
17338 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17339 break;
17341 case DT_TLSDESC_GOT:
17342 s = htab->root.sgot;
17343 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17344 + htab->root.tlsdesc_got);
17345 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17346 break;
17348 /* Set the bottom bit of DT_INIT/FINI if the
17349 corresponding function is Thumb. */
17350 case DT_INIT:
17351 name = info->init_function;
17352 goto get_sym;
17353 case DT_FINI:
17354 name = info->fini_function;
17355 get_sym:
17356 /* If it wasn't set by elf_bfd_final_link
17357 then there is nothing to adjust. */
17358 if (dyn.d_un.d_val != 0)
17360 struct elf_link_hash_entry * eh;
17362 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17363 false, false, true);
17364 if (eh != NULL
17365 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17366 == ST_BRANCH_TO_THUMB)
17368 dyn.d_un.d_val |= 1;
17369 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17372 break;
17376 /* Fill in the first entry in the procedure linkage table. */
17377 if (splt->size > 0 && htab->plt_header_size)
17379 const bfd_vma *plt0_entry;
17380 bfd_vma got_address, plt_address, got_displacement;
17382 /* Calculate the addresses of the GOT and PLT. */
17383 got_address = sgot->output_section->vma + sgot->output_offset;
17384 plt_address = splt->output_section->vma + splt->output_offset;
17386 if (htab->root.target_os == is_vxworks)
17388 /* The VxWorks GOT is relocated by the dynamic linker.
17389 Therefore, we must emit relocations rather than simply
17390 computing the values now. */
17391 Elf_Internal_Rela rel;
17393 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17394 put_arm_insn (htab, output_bfd, plt0_entry[0],
17395 splt->contents + 0);
17396 put_arm_insn (htab, output_bfd, plt0_entry[1],
17397 splt->contents + 4);
17398 put_arm_insn (htab, output_bfd, plt0_entry[2],
17399 splt->contents + 8);
17400 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17402 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17403 rel.r_offset = plt_address + 12;
17404 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17405 rel.r_addend = 0;
17406 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17407 htab->srelplt2->contents);
17409 else if (htab->root.target_os == is_nacl)
17410 arm_nacl_put_plt0 (htab, output_bfd, splt,
17411 got_address + 8 - (plt_address + 16));
17412 else if (using_thumb_only (htab))
17414 got_displacement = got_address - (plt_address + 12);
17416 plt0_entry = elf32_thumb2_plt0_entry;
17417 put_arm_insn (htab, output_bfd, plt0_entry[0],
17418 splt->contents + 0);
17419 put_arm_insn (htab, output_bfd, plt0_entry[1],
17420 splt->contents + 4);
17421 put_arm_insn (htab, output_bfd, plt0_entry[2],
17422 splt->contents + 8);
17424 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17426 else
17428 got_displacement = got_address - (plt_address + 16);
17430 plt0_entry = elf32_arm_plt0_entry;
17431 put_arm_insn (htab, output_bfd, plt0_entry[0],
17432 splt->contents + 0);
17433 put_arm_insn (htab, output_bfd, plt0_entry[1],
17434 splt->contents + 4);
17435 put_arm_insn (htab, output_bfd, plt0_entry[2],
17436 splt->contents + 8);
17437 put_arm_insn (htab, output_bfd, plt0_entry[3],
17438 splt->contents + 12);
17440 #ifdef FOUR_WORD_PLT
17441 /* The displacement value goes in the otherwise-unused
17442 last word of the second entry. */
17443 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17444 #else
17445 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17446 #endif
17450 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17451 really seem like the right value. */
17452 if (splt->output_section->owner == output_bfd)
17453 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17455 if (htab->root.tlsdesc_plt)
17457 bfd_vma got_address
17458 = sgot->output_section->vma + sgot->output_offset;
17459 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17460 + htab->root.sgot->output_offset);
17461 bfd_vma plt_address
17462 = splt->output_section->vma + splt->output_offset;
17464 arm_put_trampoline (htab, output_bfd,
17465 splt->contents + htab->root.tlsdesc_plt,
17466 dl_tlsdesc_lazy_trampoline, 6);
17468 bfd_put_32 (output_bfd,
17469 gotplt_address + htab->root.tlsdesc_got
17470 - (plt_address + htab->root.tlsdesc_plt)
17471 - dl_tlsdesc_lazy_trampoline[6],
17472 splt->contents + htab->root.tlsdesc_plt + 24);
17473 bfd_put_32 (output_bfd,
17474 got_address - (plt_address + htab->root.tlsdesc_plt)
17475 - dl_tlsdesc_lazy_trampoline[7],
17476 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17479 if (htab->tls_trampoline)
17481 arm_put_trampoline (htab, output_bfd,
17482 splt->contents + htab->tls_trampoline,
17483 tls_trampoline, 3);
17484 #ifdef FOUR_WORD_PLT
17485 bfd_put_32 (output_bfd, 0x00000000,
17486 splt->contents + htab->tls_trampoline + 12);
17487 #endif
17490 if (htab->root.target_os == is_vxworks
17491 && !bfd_link_pic (info)
17492 && htab->root.splt->size > 0)
17494 /* Correct the .rel(a).plt.unloaded relocations. They will have
17495 incorrect symbol indexes. */
17496 int num_plts;
17497 unsigned char *p;
17499 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17500 / htab->plt_entry_size);
17501 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17503 for (; num_plts; num_plts--)
17505 Elf_Internal_Rela rel;
17507 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17508 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17509 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17510 p += RELOC_SIZE (htab);
17512 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17513 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17514 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17515 p += RELOC_SIZE (htab);
17520 if (htab->root.target_os == is_nacl
17521 && htab->root.iplt != NULL
17522 && htab->root.iplt->size > 0)
17523 /* NaCl uses a special first entry in .iplt too. */
17524 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17526 /* Fill in the first three entries in the global offset table. */
17527 if (sgot)
17529 if (sgot->size > 0)
17531 if (sdyn == NULL)
17532 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17533 else
17534 bfd_put_32 (output_bfd,
17535 sdyn->output_section->vma + sdyn->output_offset,
17536 sgot->contents);
17537 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17538 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17541 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17544 /* At the very end of the .rofixup section is a pointer to the GOT. */
17545 if (htab->fdpic_p && htab->srofixup != NULL)
17547 struct elf_link_hash_entry *hgot = htab->root.hgot;
17549 bfd_vma got_value = hgot->root.u.def.value
17550 + hgot->root.u.def.section->output_section->vma
17551 + hgot->root.u.def.section->output_offset;
17553 arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17555 /* Make sure we allocated and generated the same number of fixups. */
17556 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17559 return true;
17562 static bool
17563 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17565 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17566 struct elf32_arm_link_hash_table *globals;
17567 struct elf_segment_map *m;
17569 if (!_bfd_elf_init_file_header (abfd, link_info))
17570 return false;
17572 i_ehdrp = elf_elfheader (abfd);
17574 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17575 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17576 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17578 if (link_info)
17580 globals = elf32_arm_hash_table (link_info);
17581 if (globals != NULL && globals->byteswap_code)
17582 i_ehdrp->e_flags |= EF_ARM_BE8;
17584 if (globals->fdpic_p)
17585 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17588 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17589 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17591 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17592 if (abi == AEABI_VFP_args_vfp)
17593 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17594 else
17595 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17598 /* Scan segment to set p_flags attribute if it contains only sections with
17599 SHF_ARM_PURECODE flag. */
17600 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17602 unsigned int j;
17604 if (m->count == 0)
17605 continue;
17606 for (j = 0; j < m->count; j++)
17608 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17609 break;
17611 if (j == m->count)
17613 m->p_flags = PF_X;
17614 m->p_flags_valid = 1;
17617 return true;
17620 static enum elf_reloc_type_class
17621 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17622 const asection *rel_sec ATTRIBUTE_UNUSED,
17623 const Elf_Internal_Rela *rela)
17625 switch ((int) ELF32_R_TYPE (rela->r_info))
17627 case R_ARM_RELATIVE:
17628 return reloc_class_relative;
17629 case R_ARM_JUMP_SLOT:
17630 return reloc_class_plt;
17631 case R_ARM_COPY:
17632 return reloc_class_copy;
17633 case R_ARM_IRELATIVE:
17634 return reloc_class_ifunc;
17635 default:
17636 return reloc_class_normal;
17640 static void
17641 arm_final_write_processing (bfd *abfd)
17643 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17646 static bool
17647 elf32_arm_final_write_processing (bfd *abfd)
17649 arm_final_write_processing (abfd);
17650 return _bfd_elf_final_write_processing (abfd);
17653 /* Return TRUE if this is an unwinding table entry. */
17655 static bool
17656 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17658 return (startswith (name, ELF_STRING_ARM_unwind)
17659 || startswith (name, ELF_STRING_ARM_unwind_once));
17663 /* Set the type and flags for an ARM section. We do this by
17664 the section name, which is a hack, but ought to work. */
17666 static bool
17667 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17669 const char * name;
17671 name = bfd_section_name (sec);
17673 if (is_arm_elf_unwind_section_name (abfd, name))
17675 hdr->sh_type = SHT_ARM_EXIDX;
17676 hdr->sh_flags |= SHF_LINK_ORDER;
17679 if (sec->flags & SEC_ELF_PURECODE)
17680 hdr->sh_flags |= SHF_ARM_PURECODE;
17682 return true;
17685 /* Handle an ARM specific section when reading an object file. This is
17686 called when bfd_section_from_shdr finds a section with an unknown
17687 type. */
17689 static bool
17690 elf32_arm_section_from_shdr (bfd *abfd,
17691 Elf_Internal_Shdr * hdr,
17692 const char *name,
17693 int shindex)
17695 /* There ought to be a place to keep ELF backend specific flags, but
17696 at the moment there isn't one. We just keep track of the
17697 sections by their name, instead. Fortunately, the ABI gives
17698 names for all the ARM specific sections, so we will probably get
17699 away with this. */
17700 switch (hdr->sh_type)
17702 case SHT_ARM_EXIDX:
17703 case SHT_ARM_PREEMPTMAP:
17704 case SHT_ARM_ATTRIBUTES:
17705 break;
17707 default:
17708 return false;
17711 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17712 return false;
17714 return true;
17717 static _arm_elf_section_data *
17718 get_arm_elf_section_data (asection * sec)
17720 if (sec && sec->owner && is_arm_elf (sec->owner))
17721 return elf32_arm_section_data (sec);
17722 else
17723 return NULL;
17726 typedef struct
17728 void *flaginfo;
17729 struct bfd_link_info *info;
17730 asection *sec;
17731 int sec_shndx;
17732 int (*func) (void *, const char *, Elf_Internal_Sym *,
17733 asection *, struct elf_link_hash_entry *);
17734 } output_arch_syminfo;
17736 enum map_symbol_type
17738 ARM_MAP_ARM,
17739 ARM_MAP_THUMB,
17740 ARM_MAP_DATA
17744 /* Output a single mapping symbol. */
17746 static bool
17747 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17748 enum map_symbol_type type,
17749 bfd_vma offset)
17751 static const char *names[3] = {"$a", "$t", "$d"};
17752 Elf_Internal_Sym sym;
17754 sym.st_value = osi->sec->output_section->vma
17755 + osi->sec->output_offset
17756 + offset;
17757 sym.st_size = 0;
17758 sym.st_other = 0;
17759 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17760 sym.st_shndx = osi->sec_shndx;
17761 sym.st_target_internal = 0;
17762 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17763 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17766 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17767 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17769 static bool
17770 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17771 bool is_iplt_entry_p,
17772 union gotplt_union *root_plt,
17773 struct arm_plt_info *arm_plt)
17775 struct elf32_arm_link_hash_table *htab;
17776 bfd_vma addr, plt_header_size;
17778 if (root_plt->offset == (bfd_vma) -1)
17779 return true;
17781 htab = elf32_arm_hash_table (osi->info);
17782 if (htab == NULL)
17783 return false;
17785 if (is_iplt_entry_p)
17787 osi->sec = htab->root.iplt;
17788 plt_header_size = 0;
17790 else
17792 osi->sec = htab->root.splt;
17793 plt_header_size = htab->plt_header_size;
17795 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17796 (osi->info->output_bfd, osi->sec->output_section));
17798 addr = root_plt->offset & -2;
17799 if (htab->root.target_os == is_vxworks)
17801 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17802 return false;
17803 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17804 return false;
17805 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17806 return false;
17807 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17808 return false;
17810 else if (htab->root.target_os == is_nacl)
17812 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17813 return false;
17815 else if (htab->fdpic_p)
17817 enum map_symbol_type type = using_thumb_only (htab)
17818 ? ARM_MAP_THUMB
17819 : ARM_MAP_ARM;
17821 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17822 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17823 return false;
17824 if (!elf32_arm_output_map_sym (osi, type, addr))
17825 return false;
17826 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17827 return false;
17828 if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17829 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17830 return false;
17832 else if (using_thumb_only (htab))
17834 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17835 return false;
17837 else
17839 bool thumb_stub_p;
17841 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17842 if (thumb_stub_p)
17844 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17845 return false;
17847 #ifdef FOUR_WORD_PLT
17848 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17849 return false;
17850 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17851 return false;
17852 #else
17853 /* A three-word PLT with no Thumb thunk contains only Arm code,
17854 so only need to output a mapping symbol for the first PLT entry and
17855 entries with thumb thunks. */
17856 if (thumb_stub_p || addr == plt_header_size)
17858 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17859 return false;
17861 #endif
17864 return true;
17867 /* Output mapping symbols for PLT entries associated with H. */
17869 static bool
17870 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17872 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17873 struct elf32_arm_link_hash_entry *eh;
17875 if (h->root.type == bfd_link_hash_indirect)
17876 return true;
17878 if (h->root.type == bfd_link_hash_warning)
17879 /* When warning symbols are created, they **replace** the "real"
17880 entry in the hash table, thus we never get to see the real
17881 symbol in a hash traversal. So look at it now. */
17882 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17884 eh = (struct elf32_arm_link_hash_entry *) h;
17885 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17886 &h->plt, &eh->plt);
17889 /* Bind a veneered symbol to its veneer identified by its hash entry
17890 STUB_ENTRY. The veneered location thus loose its symbol. */
17892 static void
17893 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17895 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17897 BFD_ASSERT (hash);
17898 hash->root.root.u.def.section = stub_entry->stub_sec;
17899 hash->root.root.u.def.value = stub_entry->stub_offset;
17900 hash->root.size = stub_entry->stub_size;
17903 /* Output a single local symbol for a generated stub. */
17905 static bool
17906 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17907 bfd_vma offset, bfd_vma size)
17909 Elf_Internal_Sym sym;
17911 sym.st_value = osi->sec->output_section->vma
17912 + osi->sec->output_offset
17913 + offset;
17914 sym.st_size = size;
17915 sym.st_other = 0;
17916 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17917 sym.st_shndx = osi->sec_shndx;
17918 sym.st_target_internal = 0;
17919 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17922 static bool
17923 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17924 void * in_arg)
17926 struct elf32_arm_stub_hash_entry *stub_entry;
17927 asection *stub_sec;
17928 bfd_vma addr;
17929 char *stub_name;
17930 output_arch_syminfo *osi;
17931 const insn_sequence *template_sequence;
17932 enum stub_insn_type prev_type;
17933 int size;
17934 int i;
17935 enum map_symbol_type sym_type;
17937 /* Massage our args to the form they really have. */
17938 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17939 osi = (output_arch_syminfo *) in_arg;
17941 stub_sec = stub_entry->stub_sec;
17943 /* Ensure this stub is attached to the current section being
17944 processed. */
17945 if (stub_sec != osi->sec)
17946 return true;
17948 addr = (bfd_vma) stub_entry->stub_offset;
17949 template_sequence = stub_entry->stub_template;
17951 if (arm_stub_sym_claimed (stub_entry->stub_type))
17952 arm_stub_claim_sym (stub_entry);
17953 else
17955 stub_name = stub_entry->output_name;
17956 switch (template_sequence[0].type)
17958 case ARM_TYPE:
17959 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17960 stub_entry->stub_size))
17961 return false;
17962 break;
17963 case THUMB16_TYPE:
17964 case THUMB32_TYPE:
17965 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17966 stub_entry->stub_size))
17967 return false;
17968 break;
17969 default:
17970 BFD_FAIL ();
17971 return 0;
17975 prev_type = DATA_TYPE;
17976 size = 0;
17977 for (i = 0; i < stub_entry->stub_template_size; i++)
17979 switch (template_sequence[i].type)
17981 case ARM_TYPE:
17982 sym_type = ARM_MAP_ARM;
17983 break;
17985 case THUMB16_TYPE:
17986 case THUMB32_TYPE:
17987 sym_type = ARM_MAP_THUMB;
17988 break;
17990 case DATA_TYPE:
17991 sym_type = ARM_MAP_DATA;
17992 break;
17994 default:
17995 BFD_FAIL ();
17996 return false;
17999 if (template_sequence[i].type != prev_type)
18001 prev_type = template_sequence[i].type;
18002 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18003 return false;
18006 switch (template_sequence[i].type)
18008 case ARM_TYPE:
18009 case THUMB32_TYPE:
18010 size += 4;
18011 break;
18013 case THUMB16_TYPE:
18014 size += 2;
18015 break;
18017 case DATA_TYPE:
18018 size += 4;
18019 break;
18021 default:
18022 BFD_FAIL ();
18023 return false;
18027 return true;
18030 /* Output mapping symbols for linker generated sections,
18031 and for those data-only sections that do not have a
18032 $d. */
18034 static bool
18035 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18036 struct bfd_link_info *info,
18037 void *flaginfo,
18038 int (*func) (void *, const char *,
18039 Elf_Internal_Sym *,
18040 asection *,
18041 struct elf_link_hash_entry *))
18043 output_arch_syminfo osi;
18044 struct elf32_arm_link_hash_table *htab;
18045 bfd_vma offset;
18046 bfd_size_type size;
18047 bfd *input_bfd;
18049 htab = elf32_arm_hash_table (info);
18050 if (htab == NULL)
18051 return false;
18053 check_use_blx (htab);
18055 osi.flaginfo = flaginfo;
18056 osi.info = info;
18057 osi.func = func;
18059 /* Add a $d mapping symbol to data-only sections that
18060 don't have any mapping symbol. This may result in (harmless) redundant
18061 mapping symbols. */
18062 for (input_bfd = info->input_bfds;
18063 input_bfd != NULL;
18064 input_bfd = input_bfd->link.next)
18066 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18067 for (osi.sec = input_bfd->sections;
18068 osi.sec != NULL;
18069 osi.sec = osi.sec->next)
18071 if (osi.sec->output_section != NULL
18072 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18073 != 0)
18074 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18075 == SEC_HAS_CONTENTS
18076 && get_arm_elf_section_data (osi.sec) != NULL
18077 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18078 && osi.sec->size > 0
18079 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18081 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18082 (output_bfd, osi.sec->output_section);
18083 if (osi.sec_shndx != (int)SHN_BAD)
18084 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18089 /* ARM->Thumb glue. */
18090 if (htab->arm_glue_size > 0)
18092 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18093 ARM2THUMB_GLUE_SECTION_NAME);
18095 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18096 (output_bfd, osi.sec->output_section);
18097 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18098 || htab->pic_veneer)
18099 size = ARM2THUMB_PIC_GLUE_SIZE;
18100 else if (htab->use_blx)
18101 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18102 else
18103 size = ARM2THUMB_STATIC_GLUE_SIZE;
18105 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18107 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18108 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18112 /* Thumb->ARM glue. */
18113 if (htab->thumb_glue_size > 0)
18115 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18116 THUMB2ARM_GLUE_SECTION_NAME);
18118 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18119 (output_bfd, osi.sec->output_section);
18120 size = THUMB2ARM_GLUE_SIZE;
18122 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18124 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18125 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18129 /* ARMv4 BX veneers. */
18130 if (htab->bx_glue_size > 0)
18132 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18133 ARM_BX_GLUE_SECTION_NAME);
18135 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18136 (output_bfd, osi.sec->output_section);
18138 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18141 /* Long calls stubs. */
18142 if (htab->stub_bfd && htab->stub_bfd->sections)
18144 asection* stub_sec;
18146 for (stub_sec = htab->stub_bfd->sections;
18147 stub_sec != NULL;
18148 stub_sec = stub_sec->next)
18150 /* Ignore non-stub sections. */
18151 if (!strstr (stub_sec->name, STUB_SUFFIX))
18152 continue;
18154 osi.sec = stub_sec;
18156 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18157 (output_bfd, osi.sec->output_section);
18159 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18163 /* Finally, output mapping symbols for the PLT. */
18164 if (htab->root.splt && htab->root.splt->size > 0)
18166 osi.sec = htab->root.splt;
18167 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18168 (output_bfd, osi.sec->output_section));
18170 /* Output mapping symbols for the plt header. */
18171 if (htab->root.target_os == is_vxworks)
18173 /* VxWorks shared libraries have no PLT header. */
18174 if (!bfd_link_pic (info))
18176 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18177 return false;
18178 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18179 return false;
18182 else if (htab->root.target_os == is_nacl)
18184 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18185 return false;
18187 else if (using_thumb_only (htab) && !htab->fdpic_p)
18189 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18190 return false;
18191 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18192 return false;
18193 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18194 return false;
18196 else if (!htab->fdpic_p)
18198 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18199 return false;
18200 #ifndef FOUR_WORD_PLT
18201 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18202 return false;
18203 #endif
18206 if (htab->root.target_os == is_nacl
18207 && htab->root.iplt
18208 && htab->root.iplt->size > 0)
18210 /* NaCl uses a special first entry in .iplt too. */
18211 osi.sec = htab->root.iplt;
18212 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18213 (output_bfd, osi.sec->output_section));
18214 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18215 return false;
18217 if ((htab->root.splt && htab->root.splt->size > 0)
18218 || (htab->root.iplt && htab->root.iplt->size > 0))
18220 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18221 for (input_bfd = info->input_bfds;
18222 input_bfd != NULL;
18223 input_bfd = input_bfd->link.next)
18225 struct arm_local_iplt_info **local_iplt;
18226 unsigned int i, num_syms;
18228 local_iplt = elf32_arm_local_iplt (input_bfd);
18229 if (local_iplt != NULL)
18231 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18232 if (num_syms > elf32_arm_num_entries (input_bfd))
18234 _bfd_error_handler (_("\
18235 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18236 input_bfd,
18237 (unsigned long) elf32_arm_num_entries (input_bfd),
18238 num_syms);
18239 return false;
18241 for (i = 0; i < num_syms; i++)
18242 if (local_iplt[i] != NULL
18243 && !elf32_arm_output_plt_map_1 (&osi, true,
18244 &local_iplt[i]->root,
18245 &local_iplt[i]->arm))
18246 return false;
18250 if (htab->root.tlsdesc_plt != 0)
18252 /* Mapping symbols for the lazy tls trampoline. */
18253 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18254 htab->root.tlsdesc_plt))
18255 return false;
18257 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18258 htab->root.tlsdesc_plt + 24))
18259 return false;
18261 if (htab->tls_trampoline != 0)
18263 /* Mapping symbols for the tls trampoline. */
18264 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18265 return false;
18266 #ifdef FOUR_WORD_PLT
18267 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18268 htab->tls_trampoline + 12))
18269 return false;
18270 #endif
18273 return true;
18276 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18277 the import library. All SYMCOUNT symbols of ABFD can be examined
18278 from their pointers in SYMS. Pointers of symbols to keep should be
18279 stored continuously at the beginning of that array.
18281 Returns the number of symbols to keep. */
18283 static unsigned int
18284 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18285 struct bfd_link_info *info,
18286 asymbol **syms, long symcount)
18288 size_t maxnamelen;
18289 char *cmse_name;
18290 long src_count, dst_count = 0;
18291 struct elf32_arm_link_hash_table *htab;
18293 htab = elf32_arm_hash_table (info);
18294 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18295 symcount = 0;
18297 maxnamelen = 128;
18298 cmse_name = (char *) bfd_malloc (maxnamelen);
18299 BFD_ASSERT (cmse_name);
18301 for (src_count = 0; src_count < symcount; src_count++)
18303 struct elf32_arm_link_hash_entry *cmse_hash;
18304 asymbol *sym;
18305 flagword flags;
18306 char *name;
18307 size_t namelen;
18309 sym = syms[src_count];
18310 flags = sym->flags;
18311 name = (char *) bfd_asymbol_name (sym);
18313 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18314 continue;
18315 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18316 continue;
18318 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18319 if (namelen > maxnamelen)
18321 cmse_name = (char *)
18322 bfd_realloc (cmse_name, namelen);
18323 maxnamelen = namelen;
18325 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18326 cmse_hash = (struct elf32_arm_link_hash_entry *)
18327 elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18329 if (!cmse_hash
18330 || (cmse_hash->root.root.type != bfd_link_hash_defined
18331 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18332 || cmse_hash->root.type != STT_FUNC)
18333 continue;
18335 syms[dst_count++] = sym;
18337 free (cmse_name);
18339 syms[dst_count] = NULL;
18341 return dst_count;
18344 /* Filter symbols of ABFD to include in the import library. All
18345 SYMCOUNT symbols of ABFD can be examined from their pointers in
18346 SYMS. Pointers of symbols to keep should be stored continuously at
18347 the beginning of that array.
18349 Returns the number of symbols to keep. */
18351 static unsigned int
18352 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18353 struct bfd_link_info *info,
18354 asymbol **syms, long symcount)
18356 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18358 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18359 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18360 library to be a relocatable object file. */
18361 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18362 if (globals->cmse_implib)
18363 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18364 else
18365 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18368 /* Allocate target specific section data. */
18370 static bool
18371 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18373 if (!sec->used_by_bfd)
18375 _arm_elf_section_data *sdata;
18376 size_t amt = sizeof (*sdata);
18378 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18379 if (sdata == NULL)
18380 return false;
18381 sec->used_by_bfd = sdata;
18384 return _bfd_elf_new_section_hook (abfd, sec);
18388 /* Used to order a list of mapping symbols by address. */
18390 static int
18391 elf32_arm_compare_mapping (const void * a, const void * b)
18393 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18394 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18396 if (amap->vma > bmap->vma)
18397 return 1;
18398 else if (amap->vma < bmap->vma)
18399 return -1;
18400 else if (amap->type > bmap->type)
18401 /* Ensure results do not depend on the host qsort for objects with
18402 multiple mapping symbols at the same address by sorting on type
18403 after vma. */
18404 return 1;
18405 else if (amap->type < bmap->type)
18406 return -1;
18407 else
18408 return 0;
18411 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18413 static unsigned long
18414 offset_prel31 (unsigned long addr, bfd_vma offset)
18416 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18419 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18420 relocations. */
18422 static void
18423 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18425 unsigned long first_word = bfd_get_32 (output_bfd, from);
18426 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18428 /* High bit of first word is supposed to be zero. */
18429 if ((first_word & 0x80000000ul) == 0)
18430 first_word = offset_prel31 (first_word, offset);
18432 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18433 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18434 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18435 second_word = offset_prel31 (second_word, offset);
18437 bfd_put_32 (output_bfd, first_word, to);
18438 bfd_put_32 (output_bfd, second_word, to + 4);
18441 /* Data for make_branch_to_a8_stub(). */
18443 struct a8_branch_to_stub_data
18445 asection *writing_section;
18446 bfd_byte *contents;
18450 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18451 places for a particular section. */
18453 static bool
18454 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18455 void *in_arg)
18457 struct elf32_arm_stub_hash_entry *stub_entry;
18458 struct a8_branch_to_stub_data *data;
18459 bfd_byte *contents;
18460 unsigned long branch_insn;
18461 bfd_vma veneered_insn_loc, veneer_entry_loc;
18462 bfd_signed_vma branch_offset;
18463 bfd *abfd;
18464 unsigned int loc;
18466 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18467 data = (struct a8_branch_to_stub_data *) in_arg;
18469 if (stub_entry->target_section != data->writing_section
18470 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18471 return true;
18473 contents = data->contents;
18475 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18476 generated when both source and target are in the same section. */
18477 veneered_insn_loc = stub_entry->target_section->output_section->vma
18478 + stub_entry->target_section->output_offset
18479 + stub_entry->source_value;
18481 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18482 + stub_entry->stub_sec->output_offset
18483 + stub_entry->stub_offset;
18485 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18486 veneered_insn_loc &= ~3u;
18488 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18490 abfd = stub_entry->target_section->owner;
18491 loc = stub_entry->source_value;
18493 /* We attempt to avoid this condition by setting stubs_always_after_branch
18494 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18495 This check is just to be on the safe side... */
18496 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18498 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18499 "allocated in unsafe location"), abfd);
18500 return false;
18503 switch (stub_entry->stub_type)
18505 case arm_stub_a8_veneer_b:
18506 case arm_stub_a8_veneer_b_cond:
18507 branch_insn = 0xf0009000;
18508 goto jump24;
18510 case arm_stub_a8_veneer_blx:
18511 branch_insn = 0xf000e800;
18512 goto jump24;
18514 case arm_stub_a8_veneer_bl:
18516 unsigned int i1, j1, i2, j2, s;
18518 branch_insn = 0xf000d000;
18520 jump24:
18521 if (branch_offset < -16777216 || branch_offset > 16777214)
18523 /* There's not much we can do apart from complain if this
18524 happens. */
18525 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18526 "of range (input file too large)"), abfd);
18527 return false;
18530 /* i1 = not(j1 eor s), so:
18531 not i1 = j1 eor s
18532 j1 = (not i1) eor s. */
18534 branch_insn |= (branch_offset >> 1) & 0x7ff;
18535 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18536 i2 = (branch_offset >> 22) & 1;
18537 i1 = (branch_offset >> 23) & 1;
18538 s = (branch_offset >> 24) & 1;
18539 j1 = (!i1) ^ s;
18540 j2 = (!i2) ^ s;
18541 branch_insn |= j2 << 11;
18542 branch_insn |= j1 << 13;
18543 branch_insn |= s << 26;
18545 break;
18547 default:
18548 BFD_FAIL ();
18549 return false;
18552 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18553 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18555 return true;
18558 /* Beginning of stm32l4xx work-around. */
18560 /* Functions encoding instructions necessary for the emission of the
18561 fix-stm32l4xx-629360.
18562 Encoding is extracted from the
18563 ARM (C) Architecture Reference Manual
18564 ARMv7-A and ARMv7-R edition
18565 ARM DDI 0406C.b (ID072512). */
18567 static inline bfd_vma
18568 create_instruction_branch_absolute (int branch_offset)
18570 /* A8.8.18 B (A8-334)
18571 B target_address (Encoding T4). */
18572 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18573 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18574 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18576 int s = ((branch_offset & 0x1000000) >> 24);
18577 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18578 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18580 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18581 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18583 bfd_vma patched_inst = 0xf0009000
18584 | s << 26 /* S. */
18585 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18586 | j1 << 13 /* J1. */
18587 | j2 << 11 /* J2. */
18588 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18590 return patched_inst;
18593 static inline bfd_vma
18594 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18596 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18597 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18598 bfd_vma patched_inst = 0xe8900000
18599 | (/*W=*/wback << 21)
18600 | (base_reg << 16)
18601 | (reg_mask & 0x0000ffff);
18603 return patched_inst;
18606 static inline bfd_vma
18607 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18609 /* A8.8.60 LDMDB/LDMEA (A8-402)
18610 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18611 bfd_vma patched_inst = 0xe9100000
18612 | (/*W=*/wback << 21)
18613 | (base_reg << 16)
18614 | (reg_mask & 0x0000ffff);
18616 return patched_inst;
18619 static inline bfd_vma
18620 create_instruction_mov (int target_reg, int source_reg)
18622 /* A8.8.103 MOV (register) (A8-486)
18623 MOV Rd, Rm (Encoding T1). */
18624 bfd_vma patched_inst = 0x4600
18625 | (target_reg & 0x7)
18626 | ((target_reg & 0x8) >> 3) << 7
18627 | (source_reg << 3);
18629 return patched_inst;
18632 static inline bfd_vma
18633 create_instruction_sub (int target_reg, int source_reg, int value)
18635 /* A8.8.221 SUB (immediate) (A8-708)
18636 SUB Rd, Rn, #value (Encoding T3). */
18637 bfd_vma patched_inst = 0xf1a00000
18638 | (target_reg << 8)
18639 | (source_reg << 16)
18640 | (/*S=*/0 << 20)
18641 | ((value & 0x800) >> 11) << 26
18642 | ((value & 0x700) >> 8) << 12
18643 | (value & 0x0ff);
18645 return patched_inst;
18648 static inline bfd_vma
18649 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18650 int first_reg)
18652 /* A8.8.332 VLDM (A8-922)
18653 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18654 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18655 | (/*W=*/wback << 21)
18656 | (base_reg << 16)
18657 | (num_words & 0x000000ff)
18658 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18659 | (first_reg & 0x00000001) << 22;
18661 return patched_inst;
18664 static inline bfd_vma
18665 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18666 int first_reg)
18668 /* A8.8.332 VLDM (A8-922)
18669 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18670 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18671 | (base_reg << 16)
18672 | (num_words & 0x000000ff)
18673 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18674 | (first_reg & 0x00000001) << 22;
18676 return patched_inst;
18679 static inline bfd_vma
18680 create_instruction_udf_w (int value)
18682 /* A8.8.247 UDF (A8-758)
18683 Undefined (Encoding T2). */
18684 bfd_vma patched_inst = 0xf7f0a000
18685 | (value & 0x00000fff)
18686 | (value & 0x000f0000) << 16;
18688 return patched_inst;
18691 static inline bfd_vma
18692 create_instruction_udf (int value)
18694 /* A8.8.247 UDF (A8-758)
18695 Undefined (Encoding T1). */
18696 bfd_vma patched_inst = 0xde00
18697 | (value & 0xff);
18699 return patched_inst;
18702 /* Functions writing an instruction in memory, returning the next
18703 memory position to write to. */
18705 static inline bfd_byte *
18706 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18707 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18709 put_thumb2_insn (htab, output_bfd, insn, pt);
18710 return pt + 4;
18713 static inline bfd_byte *
18714 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18715 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18717 put_thumb_insn (htab, output_bfd, insn, pt);
18718 return pt + 2;
18721 /* Function filling up a region in memory with T1 and T2 UDFs taking
18722 care of alignment. */
18724 static bfd_byte *
18725 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18726 bfd * output_bfd,
18727 const bfd_byte * const base_stub_contents,
18728 bfd_byte * const from_stub_contents,
18729 const bfd_byte * const end_stub_contents)
18731 bfd_byte *current_stub_contents = from_stub_contents;
18733 /* Fill the remaining of the stub with deterministic contents : UDF
18734 instructions.
18735 Check if realignment is needed on modulo 4 frontier using T1, to
18736 further use T2. */
18737 if ((current_stub_contents < end_stub_contents)
18738 && !((current_stub_contents - base_stub_contents) % 2)
18739 && ((current_stub_contents - base_stub_contents) % 4))
18740 current_stub_contents =
18741 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18742 create_instruction_udf (0));
18744 for (; current_stub_contents < end_stub_contents;)
18745 current_stub_contents =
18746 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18747 create_instruction_udf_w (0));
18749 return current_stub_contents;
18752 /* Functions writing the stream of instructions equivalent to the
18753 derived sequence for ldmia, ldmdb, vldm respectively. */
18755 static void
18756 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18757 bfd * output_bfd,
18758 const insn32 initial_insn,
18759 const bfd_byte *const initial_insn_addr,
18760 bfd_byte *const base_stub_contents)
18762 int wback = (initial_insn & 0x00200000) >> 21;
18763 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18764 int insn_all_registers = initial_insn & 0x0000ffff;
18765 int insn_low_registers, insn_high_registers;
18766 int usable_register_mask;
18767 int nb_registers = elf32_arm_popcount (insn_all_registers);
18768 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18769 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18770 bfd_byte *current_stub_contents = base_stub_contents;
18772 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18774 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18775 smaller than 8 registers load sequences that do not cause the
18776 hardware issue. */
18777 if (nb_registers <= 8)
18779 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18780 current_stub_contents =
18781 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18782 initial_insn);
18784 /* B initial_insn_addr+4. */
18785 if (!restore_pc)
18786 current_stub_contents =
18787 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18788 create_instruction_branch_absolute
18789 (initial_insn_addr - current_stub_contents));
18791 /* Fill the remaining of the stub with deterministic contents. */
18792 current_stub_contents =
18793 stm32l4xx_fill_stub_udf (htab, output_bfd,
18794 base_stub_contents, current_stub_contents,
18795 base_stub_contents +
18796 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18798 return;
18801 /* - reg_list[13] == 0. */
18802 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18804 /* - reg_list[14] & reg_list[15] != 1. */
18805 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18807 /* - if (wback==1) reg_list[rn] == 0. */
18808 BFD_ASSERT (!wback || !restore_rn);
18810 /* - nb_registers > 8. */
18811 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18813 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18815 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18816 - One with the 7 lowest registers (register mask 0x007F)
18817 This LDM will finally contain between 2 and 7 registers
18818 - One with the 7 highest registers (register mask 0xDF80)
18819 This ldm will finally contain between 2 and 7 registers. */
18820 insn_low_registers = insn_all_registers & 0x007F;
18821 insn_high_registers = insn_all_registers & 0xDF80;
18823 /* A spare register may be needed during this veneer to temporarily
18824 handle the base register. This register will be restored with the
18825 last LDM operation.
18826 The usable register may be any general purpose register (that
18827 excludes PC, SP, LR : register mask is 0x1FFF). */
18828 usable_register_mask = 0x1FFF;
18830 /* Generate the stub function. */
18831 if (wback)
18833 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18834 current_stub_contents =
18835 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18836 create_instruction_ldmia
18837 (rn, /*wback=*/1, insn_low_registers));
18839 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18840 current_stub_contents =
18841 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18842 create_instruction_ldmia
18843 (rn, /*wback=*/1, insn_high_registers));
18844 if (!restore_pc)
18846 /* B initial_insn_addr+4. */
18847 current_stub_contents =
18848 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18849 create_instruction_branch_absolute
18850 (initial_insn_addr - current_stub_contents));
18853 else /* if (!wback). */
18855 ri = rn;
18857 /* If Rn is not part of the high-register-list, move it there. */
18858 if (!(insn_high_registers & (1 << rn)))
18860 /* Choose a Ri in the high-register-list that will be restored. */
18861 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18863 /* MOV Ri, Rn. */
18864 current_stub_contents =
18865 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18866 create_instruction_mov (ri, rn));
18869 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18870 current_stub_contents =
18871 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18872 create_instruction_ldmia
18873 (ri, /*wback=*/1, insn_low_registers));
18875 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18876 current_stub_contents =
18877 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18878 create_instruction_ldmia
18879 (ri, /*wback=*/0, insn_high_registers));
18881 if (!restore_pc)
18883 /* B initial_insn_addr+4. */
18884 current_stub_contents =
18885 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18886 create_instruction_branch_absolute
18887 (initial_insn_addr - current_stub_contents));
18891 /* Fill the remaining of the stub with deterministic contents. */
18892 current_stub_contents =
18893 stm32l4xx_fill_stub_udf (htab, output_bfd,
18894 base_stub_contents, current_stub_contents,
18895 base_stub_contents +
18896 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18899 static void
18900 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18901 bfd * output_bfd,
18902 const insn32 initial_insn,
18903 const bfd_byte *const initial_insn_addr,
18904 bfd_byte *const base_stub_contents)
18906 int wback = (initial_insn & 0x00200000) >> 21;
18907 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18908 int insn_all_registers = initial_insn & 0x0000ffff;
18909 int insn_low_registers, insn_high_registers;
18910 int usable_register_mask;
18911 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18912 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18913 int nb_registers = elf32_arm_popcount (insn_all_registers);
18914 bfd_byte *current_stub_contents = base_stub_contents;
18916 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18918 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18919 smaller than 8 registers load sequences that do not cause the
18920 hardware issue. */
18921 if (nb_registers <= 8)
18923 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18924 current_stub_contents =
18925 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18926 initial_insn);
18928 /* B initial_insn_addr+4. */
18929 current_stub_contents =
18930 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18931 create_instruction_branch_absolute
18932 (initial_insn_addr - current_stub_contents));
18934 /* Fill the remaining of the stub with deterministic contents. */
18935 current_stub_contents =
18936 stm32l4xx_fill_stub_udf (htab, output_bfd,
18937 base_stub_contents, current_stub_contents,
18938 base_stub_contents +
18939 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18941 return;
18944 /* - reg_list[13] == 0. */
18945 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18947 /* - reg_list[14] & reg_list[15] != 1. */
18948 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18950 /* - if (wback==1) reg_list[rn] == 0. */
18951 BFD_ASSERT (!wback || !restore_rn);
18953 /* - nb_registers > 8. */
18954 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18956 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18958 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18959 - One with the 7 lowest registers (register mask 0x007F)
18960 This LDM will finally contain between 2 and 7 registers
18961 - One with the 7 highest registers (register mask 0xDF80)
18962 This ldm will finally contain between 2 and 7 registers. */
18963 insn_low_registers = insn_all_registers & 0x007F;
18964 insn_high_registers = insn_all_registers & 0xDF80;
18966 /* A spare register may be needed during this veneer to temporarily
18967 handle the base register. This register will be restored with
18968 the last LDM operation.
18969 The usable register may be any general purpose register (that excludes
18970 PC, SP, LR : register mask is 0x1FFF). */
18971 usable_register_mask = 0x1FFF;
18973 /* Generate the stub function. */
18974 if (!wback && !restore_pc && !restore_rn)
18976 /* Choose a Ri in the low-register-list that will be restored. */
18977 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18979 /* MOV Ri, Rn. */
18980 current_stub_contents =
18981 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18982 create_instruction_mov (ri, rn));
18984 /* LDMDB Ri!, {R-high-register-list}. */
18985 current_stub_contents =
18986 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18987 create_instruction_ldmdb
18988 (ri, /*wback=*/1, insn_high_registers));
18990 /* LDMDB Ri, {R-low-register-list}. */
18991 current_stub_contents =
18992 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18993 create_instruction_ldmdb
18994 (ri, /*wback=*/0, insn_low_registers));
18996 /* B initial_insn_addr+4. */
18997 current_stub_contents =
18998 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18999 create_instruction_branch_absolute
19000 (initial_insn_addr - current_stub_contents));
19002 else if (wback && !restore_pc && !restore_rn)
19004 /* LDMDB Rn!, {R-high-register-list}. */
19005 current_stub_contents =
19006 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19007 create_instruction_ldmdb
19008 (rn, /*wback=*/1, insn_high_registers));
19010 /* LDMDB Rn!, {R-low-register-list}. */
19011 current_stub_contents =
19012 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19013 create_instruction_ldmdb
19014 (rn, /*wback=*/1, insn_low_registers));
19016 /* B initial_insn_addr+4. */
19017 current_stub_contents =
19018 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19019 create_instruction_branch_absolute
19020 (initial_insn_addr - current_stub_contents));
19022 else if (!wback && restore_pc && !restore_rn)
19024 /* Choose a Ri in the high-register-list that will be restored. */
19025 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19027 /* SUB Ri, Rn, #(4*nb_registers). */
19028 current_stub_contents =
19029 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19030 create_instruction_sub (ri, rn, (4 * nb_registers)));
19032 /* LDMIA Ri!, {R-low-register-list}. */
19033 current_stub_contents =
19034 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19035 create_instruction_ldmia
19036 (ri, /*wback=*/1, insn_low_registers));
19038 /* LDMIA Ri, {R-high-register-list}. */
19039 current_stub_contents =
19040 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19041 create_instruction_ldmia
19042 (ri, /*wback=*/0, insn_high_registers));
19044 else if (wback && restore_pc && !restore_rn)
19046 /* Choose a Ri in the high-register-list that will be restored. */
19047 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19049 /* SUB Rn, Rn, #(4*nb_registers) */
19050 current_stub_contents =
19051 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19052 create_instruction_sub (rn, rn, (4 * nb_registers)));
19054 /* MOV Ri, Rn. */
19055 current_stub_contents =
19056 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19057 create_instruction_mov (ri, rn));
19059 /* LDMIA Ri!, {R-low-register-list}. */
19060 current_stub_contents =
19061 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19062 create_instruction_ldmia
19063 (ri, /*wback=*/1, insn_low_registers));
19065 /* LDMIA Ri, {R-high-register-list}. */
19066 current_stub_contents =
19067 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19068 create_instruction_ldmia
19069 (ri, /*wback=*/0, insn_high_registers));
19071 else if (!wback && !restore_pc && restore_rn)
19073 ri = rn;
19074 if (!(insn_low_registers & (1 << rn)))
19076 /* Choose a Ri in the low-register-list that will be restored. */
19077 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19079 /* MOV Ri, Rn. */
19080 current_stub_contents =
19081 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19082 create_instruction_mov (ri, rn));
19085 /* LDMDB Ri!, {R-high-register-list}. */
19086 current_stub_contents =
19087 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19088 create_instruction_ldmdb
19089 (ri, /*wback=*/1, insn_high_registers));
19091 /* LDMDB Ri, {R-low-register-list}. */
19092 current_stub_contents =
19093 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19094 create_instruction_ldmdb
19095 (ri, /*wback=*/0, insn_low_registers));
19097 /* B initial_insn_addr+4. */
19098 current_stub_contents =
19099 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19100 create_instruction_branch_absolute
19101 (initial_insn_addr - current_stub_contents));
19103 else if (!wback && restore_pc && restore_rn)
19105 ri = rn;
19106 if (!(insn_high_registers & (1 << rn)))
19108 /* Choose a Ri in the high-register-list that will be restored. */
19109 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19112 /* SUB Ri, Rn, #(4*nb_registers). */
19113 current_stub_contents =
19114 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19115 create_instruction_sub (ri, rn, (4 * nb_registers)));
19117 /* LDMIA Ri!, {R-low-register-list}. */
19118 current_stub_contents =
19119 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19120 create_instruction_ldmia
19121 (ri, /*wback=*/1, insn_low_registers));
19123 /* LDMIA Ri, {R-high-register-list}. */
19124 current_stub_contents =
19125 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19126 create_instruction_ldmia
19127 (ri, /*wback=*/0, insn_high_registers));
19129 else if (wback && restore_rn)
19131 /* The assembler should not have accepted to encode this. */
19132 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19133 "undefined behavior.\n");
19136 /* Fill the remaining of the stub with deterministic contents. */
19137 current_stub_contents =
19138 stm32l4xx_fill_stub_udf (htab, output_bfd,
19139 base_stub_contents, current_stub_contents,
19140 base_stub_contents +
19141 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19145 static void
19146 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19147 bfd * output_bfd,
19148 const insn32 initial_insn,
19149 const bfd_byte *const initial_insn_addr,
19150 bfd_byte *const base_stub_contents)
19152 int num_words = initial_insn & 0xff;
19153 bfd_byte *current_stub_contents = base_stub_contents;
19155 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19157 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19158 smaller than 8 words load sequences that do not cause the
19159 hardware issue. */
19160 if (num_words <= 8)
19162 /* Untouched instruction. */
19163 current_stub_contents =
19164 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19165 initial_insn);
19167 /* B initial_insn_addr+4. */
19168 current_stub_contents =
19169 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19170 create_instruction_branch_absolute
19171 (initial_insn_addr - current_stub_contents));
19173 else
19175 bool is_dp = /* DP encoding. */
19176 (initial_insn & 0xfe100f00) == 0xec100b00;
19177 bool is_ia_nobang = /* (IA without !). */
19178 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19179 bool is_ia_bang = /* (IA with !) - includes VPOP. */
19180 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19181 bool is_db_bang = /* (DB with !). */
19182 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19183 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19184 /* d = UInt (Vd:D);. */
19185 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19186 | (((unsigned int)initial_insn << 9) >> 31);
19188 /* Compute the number of 8-words chunks needed to split. */
19189 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19190 int chunk;
19192 /* The test coverage has been done assuming the following
19193 hypothesis that exactly one of the previous is_ predicates is
19194 true. */
19195 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19196 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19198 /* We treat the cutting of the words in one pass for all
19199 cases, then we emit the adjustments:
19201 vldm rx, {...}
19202 -> vldm rx!, {8_words_or_less} for each needed 8_word
19203 -> sub rx, rx, #size (list)
19205 vldm rx!, {...}
19206 -> vldm rx!, {8_words_or_less} for each needed 8_word
19207 This also handles vpop instruction (when rx is sp)
19209 vldmd rx!, {...}
19210 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19211 for (chunk = 0; chunk < chunks; ++chunk)
19213 bfd_vma new_insn = 0;
19215 if (is_ia_nobang || is_ia_bang)
19217 new_insn = create_instruction_vldmia
19218 (base_reg,
19219 is_dp,
19220 /*wback= . */1,
19221 chunks - (chunk + 1) ?
19222 8 : num_words - chunk * 8,
19223 first_reg + chunk * 8);
19225 else if (is_db_bang)
19227 new_insn = create_instruction_vldmdb
19228 (base_reg,
19229 is_dp,
19230 chunks - (chunk + 1) ?
19231 8 : num_words - chunk * 8,
19232 first_reg + chunk * 8);
19235 if (new_insn)
19236 current_stub_contents =
19237 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19238 new_insn);
19241 /* Only this case requires the base register compensation
19242 subtract. */
19243 if (is_ia_nobang)
19245 current_stub_contents =
19246 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19247 create_instruction_sub
19248 (base_reg, base_reg, 4*num_words));
19251 /* B initial_insn_addr+4. */
19252 current_stub_contents =
19253 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19254 create_instruction_branch_absolute
19255 (initial_insn_addr - current_stub_contents));
19258 /* Fill the remaining of the stub with deterministic contents. */
19259 current_stub_contents =
19260 stm32l4xx_fill_stub_udf (htab, output_bfd,
19261 base_stub_contents, current_stub_contents,
19262 base_stub_contents +
19263 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19266 static void
19267 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19268 bfd * output_bfd,
19269 const insn32 wrong_insn,
19270 const bfd_byte *const wrong_insn_addr,
19271 bfd_byte *const stub_contents)
19273 if (is_thumb2_ldmia (wrong_insn))
19274 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19275 wrong_insn, wrong_insn_addr,
19276 stub_contents);
19277 else if (is_thumb2_ldmdb (wrong_insn))
19278 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19279 wrong_insn, wrong_insn_addr,
19280 stub_contents);
19281 else if (is_thumb2_vldm (wrong_insn))
19282 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19283 wrong_insn, wrong_insn_addr,
19284 stub_contents);
19287 /* End of stm32l4xx work-around. */
19290 /* Do code byteswapping. Return FALSE afterwards so that the section is
19291 written out as normal. */
19293 static bool
19294 elf32_arm_write_section (bfd *output_bfd,
19295 struct bfd_link_info *link_info,
19296 asection *sec,
19297 bfd_byte *contents)
19299 unsigned int mapcount, errcount;
19300 _arm_elf_section_data *arm_data;
19301 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19302 elf32_arm_section_map *map;
19303 elf32_vfp11_erratum_list *errnode;
19304 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19305 bfd_vma ptr;
19306 bfd_vma end;
19307 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19308 bfd_byte tmp;
19309 unsigned int i;
19311 if (globals == NULL)
19312 return false;
19314 /* If this section has not been allocated an _arm_elf_section_data
19315 structure then we cannot record anything. */
19316 arm_data = get_arm_elf_section_data (sec);
19317 if (arm_data == NULL)
19318 return false;
19320 mapcount = arm_data->mapcount;
19321 map = arm_data->map;
19322 errcount = arm_data->erratumcount;
19324 if (errcount != 0)
19326 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19328 for (errnode = arm_data->erratumlist; errnode != 0;
19329 errnode = errnode->next)
19331 bfd_vma target = errnode->vma - offset;
19333 switch (errnode->type)
19335 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19337 bfd_vma branch_to_veneer;
19338 /* Original condition code of instruction, plus bit mask for
19339 ARM B instruction. */
19340 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19341 | 0x0a000000;
19343 /* The instruction is before the label. */
19344 target -= 4;
19346 /* Above offset included in -4 below. */
19347 branch_to_veneer = errnode->u.b.veneer->vma
19348 - errnode->vma - 4;
19350 if ((signed) branch_to_veneer < -(1 << 25)
19351 || (signed) branch_to_veneer >= (1 << 25))
19352 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19353 "range"), output_bfd);
19355 insn |= (branch_to_veneer >> 2) & 0xffffff;
19356 contents[endianflip ^ target] = insn & 0xff;
19357 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19358 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19359 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19361 break;
19363 case VFP11_ERRATUM_ARM_VENEER:
19365 bfd_vma branch_from_veneer;
19366 unsigned int insn;
19368 /* Take size of veneer into account. */
19369 branch_from_veneer = errnode->u.v.branch->vma
19370 - errnode->vma - 12;
19372 if ((signed) branch_from_veneer < -(1 << 25)
19373 || (signed) branch_from_veneer >= (1 << 25))
19374 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19375 "range"), output_bfd);
19377 /* Original instruction. */
19378 insn = errnode->u.v.branch->u.b.vfp_insn;
19379 contents[endianflip ^ target] = insn & 0xff;
19380 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19381 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19382 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19384 /* Branch back to insn after original insn. */
19385 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19386 contents[endianflip ^ (target + 4)] = insn & 0xff;
19387 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19388 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19389 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19391 break;
19393 default:
19394 abort ();
19399 if (arm_data->stm32l4xx_erratumcount != 0)
19401 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19402 stm32l4xx_errnode != 0;
19403 stm32l4xx_errnode = stm32l4xx_errnode->next)
19405 bfd_vma target = stm32l4xx_errnode->vma - offset;
19407 switch (stm32l4xx_errnode->type)
19409 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19411 unsigned int insn;
19412 bfd_vma branch_to_veneer =
19413 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19415 if ((signed) branch_to_veneer < -(1 << 24)
19416 || (signed) branch_to_veneer >= (1 << 24))
19418 bfd_vma out_of_range =
19419 ((signed) branch_to_veneer < -(1 << 24)) ?
19420 - branch_to_veneer - (1 << 24) :
19421 ((signed) branch_to_veneer >= (1 << 24)) ?
19422 branch_to_veneer - (1 << 24) : 0;
19424 _bfd_error_handler
19425 (_("%pB(%#" PRIx64 "): error: "
19426 "cannot create STM32L4XX veneer; "
19427 "jump out of range by %" PRId64 " bytes; "
19428 "cannot encode branch instruction"),
19429 output_bfd,
19430 (uint64_t) (stm32l4xx_errnode->vma - 4),
19431 (int64_t) out_of_range);
19432 continue;
19435 insn = create_instruction_branch_absolute
19436 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19438 /* The instruction is before the label. */
19439 target -= 4;
19441 put_thumb2_insn (globals, output_bfd,
19442 (bfd_vma) insn, contents + target);
19444 break;
19446 case STM32L4XX_ERRATUM_VENEER:
19448 bfd_byte * veneer;
19449 bfd_byte * veneer_r;
19450 unsigned int insn;
19452 veneer = contents + target;
19453 veneer_r = veneer
19454 + stm32l4xx_errnode->u.b.veneer->vma
19455 - stm32l4xx_errnode->vma - 4;
19457 if ((signed) (veneer_r - veneer -
19458 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19459 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19460 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19461 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19462 || (signed) (veneer_r - veneer) >= (1 << 24))
19464 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19465 "veneer"), output_bfd);
19466 continue;
19469 /* Original instruction. */
19470 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19472 stm32l4xx_create_replacing_stub
19473 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19475 break;
19477 default:
19478 abort ();
19483 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19485 arm_unwind_table_edit *edit_node
19486 = arm_data->u.exidx.unwind_edit_list;
19487 /* Now, sec->size is the size of the section we will write. The original
19488 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19489 markers) was sec->rawsize. (This isn't the case if we perform no
19490 edits, then rawsize will be zero and we should use size). */
19491 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19492 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19493 unsigned int in_index, out_index;
19494 bfd_vma add_to_offsets = 0;
19496 if (edited_contents == NULL)
19497 return false;
19498 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19500 if (edit_node)
19502 unsigned int edit_index = edit_node->index;
19504 if (in_index < edit_index && in_index * 8 < input_size)
19506 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19507 contents + in_index * 8, add_to_offsets);
19508 out_index++;
19509 in_index++;
19511 else if (in_index == edit_index
19512 || (in_index * 8 >= input_size
19513 && edit_index == UINT_MAX))
19515 switch (edit_node->type)
19517 case DELETE_EXIDX_ENTRY:
19518 in_index++;
19519 add_to_offsets += 8;
19520 break;
19522 case INSERT_EXIDX_CANTUNWIND_AT_END:
19524 asection *text_sec = edit_node->linked_section;
19525 bfd_vma text_offset = text_sec->output_section->vma
19526 + text_sec->output_offset
19527 + text_sec->size;
19528 bfd_vma exidx_offset = offset + out_index * 8;
19529 unsigned long prel31_offset;
19531 /* Note: this is meant to be equivalent to an
19532 R_ARM_PREL31 relocation. These synthetic
19533 EXIDX_CANTUNWIND markers are not relocated by the
19534 usual BFD method. */
19535 prel31_offset = (text_offset - exidx_offset)
19536 & 0x7ffffffful;
19537 if (bfd_link_relocatable (link_info))
19539 /* Here relocation for new EXIDX_CANTUNWIND is
19540 created, so there is no need to
19541 adjust offset by hand. */
19542 prel31_offset = text_sec->output_offset
19543 + text_sec->size;
19546 /* First address we can't unwind. */
19547 bfd_put_32 (output_bfd, prel31_offset,
19548 &edited_contents[out_index * 8]);
19550 /* Code for EXIDX_CANTUNWIND. */
19551 bfd_put_32 (output_bfd, 0x1,
19552 &edited_contents[out_index * 8 + 4]);
19554 out_index++;
19555 add_to_offsets -= 8;
19557 break;
19560 edit_node = edit_node->next;
19563 else
19565 /* No more edits, copy remaining entries verbatim. */
19566 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19567 contents + in_index * 8, add_to_offsets);
19568 out_index++;
19569 in_index++;
19573 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19574 bfd_set_section_contents (output_bfd, sec->output_section,
19575 edited_contents,
19576 (file_ptr) sec->output_offset, sec->size);
19578 return true;
19581 /* Fix code to point to Cortex-A8 erratum stubs. */
19582 if (globals->fix_cortex_a8)
19584 struct a8_branch_to_stub_data data;
19586 data.writing_section = sec;
19587 data.contents = contents;
19589 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19590 & data);
19593 if (mapcount == 0)
19594 return false;
19596 if (globals->byteswap_code)
19598 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19600 ptr = map[0].vma;
19601 for (i = 0; i < mapcount; i++)
19603 if (i == mapcount - 1)
19604 end = sec->size;
19605 else
19606 end = map[i + 1].vma;
19608 switch (map[i].type)
19610 case 'a':
19611 /* Byte swap code words. */
19612 while (ptr + 3 < end)
19614 tmp = contents[ptr];
19615 contents[ptr] = contents[ptr + 3];
19616 contents[ptr + 3] = tmp;
19617 tmp = contents[ptr + 1];
19618 contents[ptr + 1] = contents[ptr + 2];
19619 contents[ptr + 2] = tmp;
19620 ptr += 4;
19622 break;
19624 case 't':
19625 /* Byte swap code halfwords. */
19626 while (ptr + 1 < end)
19628 tmp = contents[ptr];
19629 contents[ptr] = contents[ptr + 1];
19630 contents[ptr + 1] = tmp;
19631 ptr += 2;
19633 break;
19635 case 'd':
19636 /* Leave data alone. */
19637 break;
19639 ptr = end;
19643 free (map);
19644 arm_data->mapcount = -1;
19645 arm_data->mapsize = 0;
19646 arm_data->map = NULL;
19648 return false;
19651 /* Mangle thumb function symbols as we read them in. */
19653 static bool
19654 elf32_arm_swap_symbol_in (bfd * abfd,
19655 const void *psrc,
19656 const void *pshn,
19657 Elf_Internal_Sym *dst)
19659 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19660 return false;
19661 dst->st_target_internal = 0;
19663 /* New EABI objects mark thumb function symbols by setting the low bit of
19664 the address. */
19665 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19666 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19668 if (dst->st_value & 1)
19670 dst->st_value &= ~(bfd_vma) 1;
19671 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19672 ST_BRANCH_TO_THUMB);
19674 else
19675 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19677 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19679 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19680 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19682 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19683 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19684 else
19685 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19687 return true;
19691 /* Mangle thumb function symbols as we write them out. */
19693 static void
19694 elf32_arm_swap_symbol_out (bfd *abfd,
19695 const Elf_Internal_Sym *src,
19696 void *cdst,
19697 void *shndx)
19699 Elf_Internal_Sym newsym;
19701 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19702 of the address set, as per the new EABI. We do this unconditionally
19703 because objcopy does not set the elf header flags until after
19704 it writes out the symbol table. */
19705 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19707 newsym = *src;
19708 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19709 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19710 if (newsym.st_shndx != SHN_UNDEF)
19712 /* Do this only for defined symbols. At link type, the static
19713 linker will simulate the work of dynamic linker of resolving
19714 symbols and will carry over the thumbness of found symbols to
19715 the output symbol table. It's not clear how it happens, but
19716 the thumbness of undefined symbols can well be different at
19717 runtime, and writing '1' for them will be confusing for users
19718 and possibly for dynamic linker itself.
19720 newsym.st_value |= 1;
19723 src = &newsym;
19725 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19728 /* Add the PT_ARM_EXIDX program header. */
19730 static bool
19731 elf32_arm_modify_segment_map (bfd *abfd,
19732 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19734 struct elf_segment_map *m;
19735 asection *sec;
19737 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19738 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19740 /* If there is already a PT_ARM_EXIDX header, then we do not
19741 want to add another one. This situation arises when running
19742 "strip"; the input binary already has the header. */
19743 m = elf_seg_map (abfd);
19744 while (m && m->p_type != PT_ARM_EXIDX)
19745 m = m->next;
19746 if (!m)
19748 m = (struct elf_segment_map *)
19749 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19750 if (m == NULL)
19751 return false;
19752 m->p_type = PT_ARM_EXIDX;
19753 m->count = 1;
19754 m->sections[0] = sec;
19756 m->next = elf_seg_map (abfd);
19757 elf_seg_map (abfd) = m;
19761 return true;
19764 /* We may add a PT_ARM_EXIDX program header. */
19766 static int
19767 elf32_arm_additional_program_headers (bfd *abfd,
19768 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19770 asection *sec;
19772 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19773 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19774 return 1;
19775 else
19776 return 0;
19779 /* Hook called by the linker routine which adds symbols from an object
19780 file. */
19782 static bool
19783 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19784 Elf_Internal_Sym *sym, const char **namep,
19785 flagword *flagsp, asection **secp, bfd_vma *valp)
19787 if (elf32_arm_hash_table (info) == NULL)
19788 return false;
19790 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19791 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19792 flagsp, secp, valp))
19793 return false;
19795 return true;
19798 /* We use this to override swap_symbol_in and swap_symbol_out. */
19799 const struct elf_size_info elf32_arm_size_info =
19801 sizeof (Elf32_External_Ehdr),
19802 sizeof (Elf32_External_Phdr),
19803 sizeof (Elf32_External_Shdr),
19804 sizeof (Elf32_External_Rel),
19805 sizeof (Elf32_External_Rela),
19806 sizeof (Elf32_External_Sym),
19807 sizeof (Elf32_External_Dyn),
19808 sizeof (Elf_External_Note),
19811 32, 2,
19812 ELFCLASS32, EV_CURRENT,
19813 bfd_elf32_write_out_phdrs,
19814 bfd_elf32_write_shdrs_and_ehdr,
19815 bfd_elf32_checksum_contents,
19816 bfd_elf32_write_relocs,
19817 elf32_arm_swap_symbol_in,
19818 elf32_arm_swap_symbol_out,
19819 bfd_elf32_slurp_reloc_table,
19820 bfd_elf32_slurp_symbol_table,
19821 bfd_elf32_swap_dyn_in,
19822 bfd_elf32_swap_dyn_out,
19823 bfd_elf32_swap_reloc_in,
19824 bfd_elf32_swap_reloc_out,
19825 bfd_elf32_swap_reloca_in,
19826 bfd_elf32_swap_reloca_out
19829 static bfd_vma
19830 read_code32 (const bfd *abfd, const bfd_byte *addr)
19832 /* V7 BE8 code is always little endian. */
19833 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19834 return bfd_getl32 (addr);
19836 return bfd_get_32 (abfd, addr);
19839 static bfd_vma
19840 read_code16 (const bfd *abfd, const bfd_byte *addr)
19842 /* V7 BE8 code is always little endian. */
19843 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19844 return bfd_getl16 (addr);
19846 return bfd_get_16 (abfd, addr);
19849 /* Return size of plt0 entry starting at ADDR
19850 or (bfd_vma) -1 if size can not be determined. */
19852 static bfd_vma
19853 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19855 bfd_vma first_word;
19856 bfd_vma plt0_size;
19858 first_word = read_code32 (abfd, addr);
19860 if (first_word == elf32_arm_plt0_entry[0])
19861 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19862 else if (first_word == elf32_thumb2_plt0_entry[0])
19863 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19864 else
19865 /* We don't yet handle this PLT format. */
19866 return (bfd_vma) -1;
19868 return plt0_size;
19871 /* Return size of plt entry starting at offset OFFSET
19872 of plt section located at address START
19873 or (bfd_vma) -1 if size can not be determined. */
19875 static bfd_vma
19876 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19878 bfd_vma first_insn;
19879 bfd_vma plt_size = 0;
19880 const bfd_byte *addr = start + offset;
19882 /* PLT entry size if fixed on Thumb-only platforms. */
19883 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19884 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19886 /* Respect Thumb stub if necessary. */
19887 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19889 plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
19892 /* Strip immediate from first add. */
19893 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19895 #ifdef FOUR_WORD_PLT
19896 if (first_insn == elf32_arm_plt_entry[0])
19897 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19898 #else
19899 if (first_insn == elf32_arm_plt_entry_long[0])
19900 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19901 else if (first_insn == elf32_arm_plt_entry_short[0])
19902 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19903 #endif
19904 else
19905 /* We don't yet handle this PLT format. */
19906 return (bfd_vma) -1;
19908 return plt_size;
19911 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19913 static long
19914 elf32_arm_get_synthetic_symtab (bfd *abfd,
19915 long symcount ATTRIBUTE_UNUSED,
19916 asymbol **syms ATTRIBUTE_UNUSED,
19917 long dynsymcount,
19918 asymbol **dynsyms,
19919 asymbol **ret)
19921 asection *relplt;
19922 asymbol *s;
19923 arelent *p;
19924 long count, i, n;
19925 size_t size;
19926 Elf_Internal_Shdr *hdr;
19927 char *names;
19928 asection *plt;
19929 bfd_vma offset;
19930 bfd_byte *data;
19932 *ret = NULL;
19934 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19935 return 0;
19937 if (dynsymcount <= 0)
19938 return 0;
19940 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19941 if (relplt == NULL)
19942 return 0;
19944 hdr = &elf_section_data (relplt)->this_hdr;
19945 if (hdr->sh_link != elf_dynsymtab (abfd)
19946 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19947 return 0;
19949 plt = bfd_get_section_by_name (abfd, ".plt");
19950 if (plt == NULL)
19951 return 0;
19953 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
19954 return -1;
19956 data = plt->contents;
19957 if (data == NULL)
19959 if (!bfd_get_full_section_contents (abfd, (asection *) plt, &data) || data == NULL)
19960 return -1;
19961 bfd_cache_section_contents ((asection *) plt, data);
19964 count = relplt->size / hdr->sh_entsize;
19965 size = count * sizeof (asymbol);
19966 p = relplt->relocation;
19967 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19969 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19970 if (p->addend != 0)
19971 size += sizeof ("+0x") - 1 + 8;
19974 s = *ret = (asymbol *) bfd_malloc (size);
19975 if (s == NULL)
19976 return -1;
19978 offset = elf32_arm_plt0_size (abfd, data);
19979 if (offset == (bfd_vma) -1)
19980 return -1;
19982 names = (char *) (s + count);
19983 p = relplt->relocation;
19984 n = 0;
19985 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19987 size_t len;
19989 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19990 if (plt_size == (bfd_vma) -1)
19991 break;
19993 *s = **p->sym_ptr_ptr;
19994 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19995 we are defining a symbol, ensure one of them is set. */
19996 if ((s->flags & BSF_LOCAL) == 0)
19997 s->flags |= BSF_GLOBAL;
19998 s->flags |= BSF_SYNTHETIC;
19999 s->section = plt;
20000 s->value = offset;
20001 s->name = names;
20002 s->udata.p = NULL;
20003 len = strlen ((*p->sym_ptr_ptr)->name);
20004 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20005 names += len;
20006 if (p->addend != 0)
20008 char buf[30], *a;
20010 memcpy (names, "+0x", sizeof ("+0x") - 1);
20011 names += sizeof ("+0x") - 1;
20012 bfd_sprintf_vma (abfd, buf, p->addend);
20013 for (a = buf; *a == '0'; ++a)
20015 len = strlen (a);
20016 memcpy (names, a, len);
20017 names += len;
20019 memcpy (names, "@plt", sizeof ("@plt"));
20020 names += sizeof ("@plt");
20021 ++s, ++n;
20022 offset += plt_size;
20025 return n;
20028 static bool
20029 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20031 if (hdr->sh_flags & SHF_ARM_PURECODE)
20032 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20033 return true;
20036 static flagword
20037 elf32_arm_lookup_section_flags (char *flag_name)
20039 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20040 return SHF_ARM_PURECODE;
20042 return SEC_NO_FLAGS;
20045 static unsigned int
20046 elf32_arm_count_additional_relocs (asection *sec)
20048 struct _arm_elf_section_data *arm_data;
20049 arm_data = get_arm_elf_section_data (sec);
20051 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20054 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20055 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20056 FALSE otherwise. ISECTION is the best guess matching section from the
20057 input bfd IBFD, but it might be NULL. */
20059 static bool
20060 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20061 bfd *obfd ATTRIBUTE_UNUSED,
20062 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20063 Elf_Internal_Shdr *osection)
20065 switch (osection->sh_type)
20067 case SHT_ARM_EXIDX:
20069 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20070 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20071 unsigned i = 0;
20073 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20074 osection->sh_info = 0;
20076 /* The sh_link field must be set to the text section associated with
20077 this index section. Unfortunately the ARM EHABI does not specify
20078 exactly how to determine this association. Our caller does try
20079 to match up OSECTION with its corresponding input section however
20080 so that is a good first guess. */
20081 if (isection != NULL
20082 && osection->bfd_section != NULL
20083 && isection->bfd_section != NULL
20084 && isection->bfd_section->output_section != NULL
20085 && isection->bfd_section->output_section == osection->bfd_section
20086 && iheaders != NULL
20087 && isection->sh_link > 0
20088 && isection->sh_link < elf_numsections (ibfd)
20089 && iheaders[isection->sh_link]->bfd_section != NULL
20090 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20093 for (i = elf_numsections (obfd); i-- > 0;)
20094 if (oheaders[i]->bfd_section
20095 == iheaders[isection->sh_link]->bfd_section->output_section)
20096 break;
20099 if (i == 0)
20101 /* Failing that we have to find a matching section ourselves. If
20102 we had the output section name available we could compare that
20103 with input section names. Unfortunately we don't. So instead
20104 we use a simple heuristic and look for the nearest executable
20105 section before this one. */
20106 for (i = elf_numsections (obfd); i-- > 0;)
20107 if (oheaders[i] == osection)
20108 break;
20109 if (i == 0)
20110 break;
20112 while (i-- > 0)
20113 if (oheaders[i]->sh_type == SHT_PROGBITS
20114 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20115 == (SHF_ALLOC | SHF_EXECINSTR))
20116 break;
20119 if (i)
20121 osection->sh_link = i;
20122 /* If the text section was part of a group
20123 then the index section should be too. */
20124 if (oheaders[i]->sh_flags & SHF_GROUP)
20125 osection->sh_flags |= SHF_GROUP;
20126 return true;
20129 break;
20131 case SHT_ARM_PREEMPTMAP:
20132 osection->sh_flags = SHF_ALLOC;
20133 break;
20135 case SHT_ARM_ATTRIBUTES:
20136 case SHT_ARM_DEBUGOVERLAY:
20137 case SHT_ARM_OVERLAYSECTION:
20138 default:
20139 break;
20142 return false;
20145 /* Returns TRUE if NAME is an ARM mapping symbol.
20146 Traditionally the symbols $a, $d and $t have been used.
20147 The ARM ELF standard also defines $x (for A64 code). It also allows a
20148 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20149 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20150 not support them here. $t.x indicates the start of ThumbEE instructions. */
20152 static bool
20153 is_arm_mapping_symbol (const char * name)
20155 return name != NULL /* Paranoia. */
20156 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20157 the mapping symbols could have acquired a prefix.
20158 We do not support this here, since such symbols no
20159 longer conform to the ARM ELF ABI. */
20160 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20161 && (name[2] == 0 || name[2] == '.');
20162 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20163 any characters that follow the period are legal characters for the body
20164 of a symbol's name. For now we just assume that this is the case. */
20167 /* Make sure that mapping symbols in object files are not removed via the
20168 "strip --strip-unneeded" tool. These symbols are needed in order to
20169 correctly generate interworking veneers, and for byte swapping code
20170 regions. Once an object file has been linked, it is safe to remove the
20171 symbols as they will no longer be needed. */
20173 static void
20174 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20176 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20177 && sym->section != bfd_abs_section_ptr
20178 && is_arm_mapping_symbol (sym->name))
20179 sym->flags |= BSF_KEEP;
20182 #undef elf_backend_copy_special_section_fields
20183 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20185 #define ELF_ARCH bfd_arch_arm
20186 #define ELF_TARGET_ID ARM_ELF_DATA
20187 #define ELF_MACHINE_CODE EM_ARM
20188 #ifdef __QNXTARGET__
20189 #define ELF_MAXPAGESIZE 0x1000
20190 #else
20191 #define ELF_MAXPAGESIZE 0x10000
20192 #endif
20193 #define ELF_MINPAGESIZE 0x1000
20194 #define ELF_COMMONPAGESIZE 0x1000
20196 #define bfd_elf32_mkobject elf32_arm_mkobject
20198 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20199 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20200 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20201 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20202 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20203 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20204 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20205 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20206 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20207 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20208 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20209 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20211 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20212 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20213 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20214 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20215 #define elf_backend_check_relocs elf32_arm_check_relocs
20216 #define elf_backend_update_relocs elf32_arm_update_relocs
20217 #define elf_backend_relocate_section elf32_arm_relocate_section
20218 #define elf_backend_write_section elf32_arm_write_section
20219 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20220 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20221 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20222 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20223 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20224 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20225 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20226 #define elf_backend_init_file_header elf32_arm_init_file_header
20227 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20228 #define elf_backend_object_p elf32_arm_object_p
20229 #define elf_backend_fake_sections elf32_arm_fake_sections
20230 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20231 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20232 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20233 #define elf_backend_size_info elf32_arm_size_info
20234 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20235 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20236 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20237 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20238 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20239 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20240 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20241 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20243 #define elf_backend_can_refcount 1
20244 #define elf_backend_can_gc_sections 1
20245 #define elf_backend_plt_readonly 1
20246 #define elf_backend_want_got_plt 1
20247 #define elf_backend_want_plt_sym 0
20248 #define elf_backend_want_dynrelro 1
20249 #define elf_backend_may_use_rel_p 1
20250 #define elf_backend_may_use_rela_p 0
20251 #define elf_backend_default_use_rela_p 0
20252 #define elf_backend_dtrel_excludes_plt 1
20254 #define elf_backend_got_header_size 12
20255 #define elf_backend_extern_protected_data 1
20257 #undef elf_backend_obj_attrs_vendor
20258 #define elf_backend_obj_attrs_vendor "aeabi"
20259 #undef elf_backend_obj_attrs_section
20260 #define elf_backend_obj_attrs_section ".ARM.attributes"
20261 #undef elf_backend_obj_attrs_arg_type
20262 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20263 #undef elf_backend_obj_attrs_section_type
20264 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20265 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20266 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20268 #undef elf_backend_section_flags
20269 #define elf_backend_section_flags elf32_arm_section_flags
20270 #undef elf_backend_lookup_section_flags_hook
20271 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20273 #define elf_backend_linux_prpsinfo32_ugid16 true
20275 #include "elf32-target.h"
20277 /* Native Client targets. */
20279 #undef TARGET_LITTLE_SYM
20280 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20281 #undef TARGET_LITTLE_NAME
20282 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20283 #undef TARGET_BIG_SYM
20284 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20285 #undef TARGET_BIG_NAME
20286 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20288 /* Like elf32_arm_link_hash_table_create -- but overrides
20289 appropriately for NaCl. */
20291 static struct bfd_link_hash_table *
20292 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20294 struct bfd_link_hash_table *ret;
20296 ret = elf32_arm_link_hash_table_create (abfd);
20297 if (ret)
20299 struct elf32_arm_link_hash_table *htab
20300 = (struct elf32_arm_link_hash_table *) ret;
20302 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20303 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20305 return ret;
20308 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20309 really need to use elf32_arm_modify_segment_map. But we do it
20310 anyway just to reduce gratuitous differences with the stock ARM backend. */
20312 static bool
20313 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20315 return (elf32_arm_modify_segment_map (abfd, info)
20316 && nacl_modify_segment_map (abfd, info));
20319 static bool
20320 elf32_arm_nacl_final_write_processing (bfd *abfd)
20322 arm_final_write_processing (abfd);
20323 return nacl_final_write_processing (abfd);
20326 static bfd_vma
20327 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20328 const arelent *rel ATTRIBUTE_UNUSED)
20330 return plt->vma
20331 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20332 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20335 #undef elf32_bed
20336 #define elf32_bed elf32_arm_nacl_bed
20337 #undef bfd_elf32_bfd_link_hash_table_create
20338 #define bfd_elf32_bfd_link_hash_table_create \
20339 elf32_arm_nacl_link_hash_table_create
20340 #undef elf_backend_plt_alignment
20341 #define elf_backend_plt_alignment 4
20342 #undef elf_backend_modify_segment_map
20343 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20344 #undef elf_backend_modify_headers
20345 #define elf_backend_modify_headers nacl_modify_headers
20346 #undef elf_backend_final_write_processing
20347 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20348 #undef bfd_elf32_get_synthetic_symtab
20349 #undef elf_backend_plt_sym_val
20350 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20351 #undef elf_backend_copy_special_section_fields
20353 #undef ELF_MINPAGESIZE
20354 #undef ELF_COMMONPAGESIZE
20356 #undef ELF_TARGET_OS
20357 #define ELF_TARGET_OS is_nacl
20359 #include "elf32-target.h"
20361 /* Reset to defaults. */
20362 #undef elf_backend_plt_alignment
20363 #undef elf_backend_modify_segment_map
20364 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20365 #undef elf_backend_modify_headers
20366 #undef elf_backend_final_write_processing
20367 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20368 #undef ELF_MINPAGESIZE
20369 #define ELF_MINPAGESIZE 0x1000
20370 #undef ELF_COMMONPAGESIZE
20371 #define ELF_COMMONPAGESIZE 0x1000
20374 /* FDPIC Targets. */
20376 #undef TARGET_LITTLE_SYM
20377 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20378 #undef TARGET_LITTLE_NAME
20379 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20380 #undef TARGET_BIG_SYM
20381 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20382 #undef TARGET_BIG_NAME
20383 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20384 #undef elf_match_priority
20385 #define elf_match_priority 128
20386 #undef ELF_OSABI
20387 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20389 /* Like elf32_arm_link_hash_table_create -- but overrides
20390 appropriately for FDPIC. */
20392 static struct bfd_link_hash_table *
20393 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20395 struct bfd_link_hash_table *ret;
20397 ret = elf32_arm_link_hash_table_create (abfd);
20398 if (ret)
20400 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20402 htab->fdpic_p = 1;
20404 return ret;
20407 /* We need dynamic symbols for every section, since segments can
20408 relocate independently. */
20409 static bool
20410 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20411 struct bfd_link_info *info
20412 ATTRIBUTE_UNUSED,
20413 asection *p ATTRIBUTE_UNUSED)
20415 switch (elf_section_data (p)->this_hdr.sh_type)
20417 case SHT_PROGBITS:
20418 case SHT_NOBITS:
20419 /* If sh_type is yet undecided, assume it could be
20420 SHT_PROGBITS/SHT_NOBITS. */
20421 case SHT_NULL:
20422 return false;
20424 /* There shouldn't be section relative relocations
20425 against any other section. */
20426 default:
20427 return true;
20431 #undef elf32_bed
20432 #define elf32_bed elf32_arm_fdpic_bed
20434 #undef bfd_elf32_bfd_link_hash_table_create
20435 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20437 #undef elf_backend_omit_section_dynsym
20438 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20440 #undef ELF_TARGET_OS
20442 #include "elf32-target.h"
20444 #undef elf_match_priority
20445 #undef ELF_OSABI
20446 #undef elf_backend_omit_section_dynsym
20448 /* VxWorks Targets. */
20450 #undef TARGET_LITTLE_SYM
20451 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20452 #undef TARGET_LITTLE_NAME
20453 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20454 #undef TARGET_BIG_SYM
20455 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20456 #undef TARGET_BIG_NAME
20457 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20459 /* Like elf32_arm_link_hash_table_create -- but overrides
20460 appropriately for VxWorks. */
20462 static struct bfd_link_hash_table *
20463 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20465 struct bfd_link_hash_table *ret;
20467 ret = elf32_arm_link_hash_table_create (abfd);
20468 if (ret)
20470 struct elf32_arm_link_hash_table *htab
20471 = (struct elf32_arm_link_hash_table *) ret;
20472 htab->use_rel = 0;
20474 return ret;
20477 static bool
20478 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20480 arm_final_write_processing (abfd);
20481 return elf_vxworks_final_write_processing (abfd);
20484 #undef elf32_bed
20485 #define elf32_bed elf32_arm_vxworks_bed
20487 #undef bfd_elf32_bfd_link_hash_table_create
20488 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20489 #undef elf_backend_final_write_processing
20490 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20491 #undef elf_backend_emit_relocs
20492 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20494 #undef elf_backend_may_use_rel_p
20495 #define elf_backend_may_use_rel_p 0
20496 #undef elf_backend_may_use_rela_p
20497 #define elf_backend_may_use_rela_p 1
20498 #undef elf_backend_default_use_rela_p
20499 #define elf_backend_default_use_rela_p 1
20500 #undef elf_backend_want_plt_sym
20501 #define elf_backend_want_plt_sym 1
20502 #undef ELF_MAXPAGESIZE
20503 #define ELF_MAXPAGESIZE 0x1000
20504 #undef ELF_TARGET_OS
20505 #define ELF_TARGET_OS is_vxworks
20507 #include "elf32-target.h"
20510 /* Merge backend specific data from an object file to the output
20511 object file when linking. */
20513 static bool
20514 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20516 bfd *obfd = info->output_bfd;
20517 flagword out_flags;
20518 flagword in_flags;
20519 bool flags_compatible = true;
20520 asection *sec;
20522 /* Check if we have the same endianness. */
20523 if (! _bfd_generic_verify_endian_match (ibfd, info))
20524 return false;
20526 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20527 return true;
20529 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20530 return false;
20532 /* The input BFD must have had its flags initialised. */
20533 /* The following seems bogus to me -- The flags are initialized in
20534 the assembler but I don't think an elf_flags_init field is
20535 written into the object. */
20536 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20538 in_flags = elf_elfheader (ibfd)->e_flags;
20539 out_flags = elf_elfheader (obfd)->e_flags;
20541 /* In theory there is no reason why we couldn't handle this. However
20542 in practice it isn't even close to working and there is no real
20543 reason to want it. */
20544 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20545 && !(ibfd->flags & DYNAMIC)
20546 && (in_flags & EF_ARM_BE8))
20548 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20549 ibfd);
20550 return false;
20553 if (!elf_flags_init (obfd))
20555 /* If the input is the default architecture and had the default
20556 flags then do not bother setting the flags for the output
20557 architecture, instead allow future merges to do this. If no
20558 future merges ever set these flags then they will retain their
20559 uninitialised values, which surprise surprise, correspond
20560 to the default values. */
20561 if (bfd_get_arch_info (ibfd)->the_default
20562 && elf_elfheader (ibfd)->e_flags == 0)
20563 return true;
20565 elf_flags_init (obfd) = true;
20566 elf_elfheader (obfd)->e_flags = in_flags;
20568 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20569 && bfd_get_arch_info (obfd)->the_default)
20570 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20572 return true;
20575 /* Determine what should happen if the input ARM architecture
20576 does not match the output ARM architecture. */
20577 if (! bfd_arm_merge_machines (ibfd, obfd))
20578 return false;
20580 /* Identical flags must be compatible. */
20581 if (in_flags == out_flags)
20582 return true;
20584 /* Check to see if the input BFD actually contains any sections. If
20585 not, its flags may not have been initialised either, but it
20586 cannot actually cause any incompatiblity. Do not short-circuit
20587 dynamic objects; their section list may be emptied by
20588 elf_link_add_object_symbols.
20590 Also check to see if there are no code sections in the input.
20591 In this case there is no need to check for code specific flags.
20592 XXX - do we need to worry about floating-point format compatability
20593 in data sections ? */
20594 if (!(ibfd->flags & DYNAMIC))
20596 bool null_input_bfd = true;
20597 bool only_data_sections = true;
20599 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20601 /* Ignore synthetic glue sections. */
20602 if (strcmp (sec->name, ".glue_7")
20603 && strcmp (sec->name, ".glue_7t"))
20605 if ((bfd_section_flags (sec)
20606 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20607 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20608 only_data_sections = false;
20610 null_input_bfd = false;
20611 break;
20615 if (null_input_bfd || only_data_sections)
20616 return true;
20619 /* Complain about various flag mismatches. */
20620 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20621 EF_ARM_EABI_VERSION (out_flags)))
20623 _bfd_error_handler
20624 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20625 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20626 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20627 return false;
20630 /* Not sure what needs to be checked for EABI versions >= 1. */
20631 /* VxWorks libraries do not use these flags. */
20632 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20633 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20634 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20636 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20638 _bfd_error_handler
20639 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20640 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20641 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20642 flags_compatible = false;
20645 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20647 if (in_flags & EF_ARM_APCS_FLOAT)
20648 _bfd_error_handler
20649 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20650 ibfd, obfd);
20651 else
20652 _bfd_error_handler
20653 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20654 ibfd, obfd);
20656 flags_compatible = false;
20659 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20661 if (in_flags & EF_ARM_VFP_FLOAT)
20662 _bfd_error_handler
20663 (_("error: %pB uses %s instructions, whereas %pB does not"),
20664 ibfd, "VFP", obfd);
20665 else
20666 _bfd_error_handler
20667 (_("error: %pB uses %s instructions, whereas %pB does not"),
20668 ibfd, "FPA", obfd);
20670 flags_compatible = false;
20673 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20675 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20676 _bfd_error_handler
20677 (_("error: %pB uses %s instructions, whereas %pB does not"),
20678 ibfd, "Maverick", obfd);
20679 else
20680 _bfd_error_handler
20681 (_("error: %pB does not use %s instructions, whereas %pB does"),
20682 ibfd, "Maverick", obfd);
20684 flags_compatible = false;
20687 #ifdef EF_ARM_SOFT_FLOAT
20688 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20690 /* We can allow interworking between code that is VFP format
20691 layout, and uses either soft float or integer regs for
20692 passing floating point arguments and results. We already
20693 know that the APCS_FLOAT flags match; similarly for VFP
20694 flags. */
20695 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20696 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20698 if (in_flags & EF_ARM_SOFT_FLOAT)
20699 _bfd_error_handler
20700 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20701 ibfd, obfd);
20702 else
20703 _bfd_error_handler
20704 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20705 ibfd, obfd);
20707 flags_compatible = false;
20710 #endif
20712 /* Interworking mismatch is only a warning. */
20713 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20715 if (in_flags & EF_ARM_INTERWORK)
20717 _bfd_error_handler
20718 (_("warning: %pB supports interworking, whereas %pB does not"),
20719 ibfd, obfd);
20721 else
20723 _bfd_error_handler
20724 (_("warning: %pB does not support interworking, whereas %pB does"),
20725 ibfd, obfd);
20730 return flags_compatible;