[binutils][Arm] Fix Branch Future relocation handling and testisms
[binutils-gdb.git] / bfd / elf32-arm.c
blob7f085269c45d31bf797384806e82b8a614490289
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 #include "sysdep.h"
22 #include <limits.h>
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto NULL
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 /* The Adjusted Place, as defined by AAELF. */
65 #define Pa(X) ((X) & 0xfffffffc)
67 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
68 struct bfd_link_info *link_info,
69 asection *sec,
70 bfd_byte *contents);
72 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
73 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
74 in that slot. */
76 static reloc_howto_type elf32_arm_howto_table_1[] =
78 /* No relocation. */
79 HOWTO (R_ARM_NONE, /* type */
80 0, /* rightshift */
81 3, /* size (0 = byte, 1 = short, 2 = long) */
82 0, /* bitsize */
83 FALSE, /* pc_relative */
84 0, /* bitpos */
85 complain_overflow_dont,/* complain_on_overflow */
86 bfd_elf_generic_reloc, /* special_function */
87 "R_ARM_NONE", /* name */
88 FALSE, /* partial_inplace */
89 0, /* src_mask */
90 0, /* dst_mask */
91 FALSE), /* pcrel_offset */
93 HOWTO (R_ARM_PC24, /* type */
94 2, /* rightshift */
95 2, /* size (0 = byte, 1 = short, 2 = long) */
96 24, /* bitsize */
97 TRUE, /* pc_relative */
98 0, /* bitpos */
99 complain_overflow_signed,/* complain_on_overflow */
100 bfd_elf_generic_reloc, /* special_function */
101 "R_ARM_PC24", /* name */
102 FALSE, /* partial_inplace */
103 0x00ffffff, /* src_mask */
104 0x00ffffff, /* dst_mask */
105 TRUE), /* pcrel_offset */
107 /* 32 bit absolute */
108 HOWTO (R_ARM_ABS32, /* type */
109 0, /* rightshift */
110 2, /* size (0 = byte, 1 = short, 2 = long) */
111 32, /* bitsize */
112 FALSE, /* pc_relative */
113 0, /* bitpos */
114 complain_overflow_bitfield,/* complain_on_overflow */
115 bfd_elf_generic_reloc, /* special_function */
116 "R_ARM_ABS32", /* name */
117 FALSE, /* partial_inplace */
118 0xffffffff, /* src_mask */
119 0xffffffff, /* dst_mask */
120 FALSE), /* pcrel_offset */
122 /* standard 32bit pc-relative reloc */
123 HOWTO (R_ARM_REL32, /* type */
124 0, /* rightshift */
125 2, /* size (0 = byte, 1 = short, 2 = long) */
126 32, /* bitsize */
127 TRUE, /* pc_relative */
128 0, /* bitpos */
129 complain_overflow_bitfield,/* complain_on_overflow */
130 bfd_elf_generic_reloc, /* special_function */
131 "R_ARM_REL32", /* name */
132 FALSE, /* partial_inplace */
133 0xffffffff, /* src_mask */
134 0xffffffff, /* dst_mask */
135 TRUE), /* pcrel_offset */
137 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
138 HOWTO (R_ARM_LDR_PC_G0, /* type */
139 0, /* rightshift */
140 0, /* size (0 = byte, 1 = short, 2 = long) */
141 32, /* bitsize */
142 TRUE, /* pc_relative */
143 0, /* bitpos */
144 complain_overflow_dont,/* complain_on_overflow */
145 bfd_elf_generic_reloc, /* special_function */
146 "R_ARM_LDR_PC_G0", /* name */
147 FALSE, /* partial_inplace */
148 0xffffffff, /* src_mask */
149 0xffffffff, /* dst_mask */
150 TRUE), /* pcrel_offset */
152 /* 16 bit absolute */
153 HOWTO (R_ARM_ABS16, /* type */
154 0, /* rightshift */
155 1, /* size (0 = byte, 1 = short, 2 = long) */
156 16, /* bitsize */
157 FALSE, /* pc_relative */
158 0, /* bitpos */
159 complain_overflow_bitfield,/* complain_on_overflow */
160 bfd_elf_generic_reloc, /* special_function */
161 "R_ARM_ABS16", /* name */
162 FALSE, /* partial_inplace */
163 0x0000ffff, /* src_mask */
164 0x0000ffff, /* dst_mask */
165 FALSE), /* pcrel_offset */
167 /* 12 bit absolute */
168 HOWTO (R_ARM_ABS12, /* type */
169 0, /* rightshift */
170 2, /* size (0 = byte, 1 = short, 2 = long) */
171 12, /* bitsize */
172 FALSE, /* pc_relative */
173 0, /* bitpos */
174 complain_overflow_bitfield,/* complain_on_overflow */
175 bfd_elf_generic_reloc, /* special_function */
176 "R_ARM_ABS12", /* name */
177 FALSE, /* partial_inplace */
178 0x00000fff, /* src_mask */
179 0x00000fff, /* dst_mask */
180 FALSE), /* pcrel_offset */
182 HOWTO (R_ARM_THM_ABS5, /* type */
183 6, /* rightshift */
184 1, /* size (0 = byte, 1 = short, 2 = long) */
185 5, /* bitsize */
186 FALSE, /* pc_relative */
187 0, /* bitpos */
188 complain_overflow_bitfield,/* complain_on_overflow */
189 bfd_elf_generic_reloc, /* special_function */
190 "R_ARM_THM_ABS5", /* name */
191 FALSE, /* partial_inplace */
192 0x000007e0, /* src_mask */
193 0x000007e0, /* dst_mask */
194 FALSE), /* pcrel_offset */
196 /* 8 bit absolute */
197 HOWTO (R_ARM_ABS8, /* type */
198 0, /* rightshift */
199 0, /* size (0 = byte, 1 = short, 2 = long) */
200 8, /* bitsize */
201 FALSE, /* pc_relative */
202 0, /* bitpos */
203 complain_overflow_bitfield,/* complain_on_overflow */
204 bfd_elf_generic_reloc, /* special_function */
205 "R_ARM_ABS8", /* name */
206 FALSE, /* partial_inplace */
207 0x000000ff, /* src_mask */
208 0x000000ff, /* dst_mask */
209 FALSE), /* pcrel_offset */
211 HOWTO (R_ARM_SBREL32, /* type */
212 0, /* rightshift */
213 2, /* size (0 = byte, 1 = short, 2 = long) */
214 32, /* bitsize */
215 FALSE, /* pc_relative */
216 0, /* bitpos */
217 complain_overflow_dont,/* complain_on_overflow */
218 bfd_elf_generic_reloc, /* special_function */
219 "R_ARM_SBREL32", /* name */
220 FALSE, /* partial_inplace */
221 0xffffffff, /* src_mask */
222 0xffffffff, /* dst_mask */
223 FALSE), /* pcrel_offset */
225 HOWTO (R_ARM_THM_CALL, /* type */
226 1, /* rightshift */
227 2, /* size (0 = byte, 1 = short, 2 = long) */
228 24, /* bitsize */
229 TRUE, /* pc_relative */
230 0, /* bitpos */
231 complain_overflow_signed,/* complain_on_overflow */
232 bfd_elf_generic_reloc, /* special_function */
233 "R_ARM_THM_CALL", /* name */
234 FALSE, /* partial_inplace */
235 0x07ff2fff, /* src_mask */
236 0x07ff2fff, /* dst_mask */
237 TRUE), /* pcrel_offset */
239 HOWTO (R_ARM_THM_PC8, /* type */
240 1, /* rightshift */
241 1, /* size (0 = byte, 1 = short, 2 = long) */
242 8, /* bitsize */
243 TRUE, /* pc_relative */
244 0, /* bitpos */
245 complain_overflow_signed,/* complain_on_overflow */
246 bfd_elf_generic_reloc, /* special_function */
247 "R_ARM_THM_PC8", /* name */
248 FALSE, /* partial_inplace */
249 0x000000ff, /* src_mask */
250 0x000000ff, /* dst_mask */
251 TRUE), /* pcrel_offset */
253 HOWTO (R_ARM_BREL_ADJ, /* type */
254 1, /* rightshift */
255 1, /* size (0 = byte, 1 = short, 2 = long) */
256 32, /* bitsize */
257 FALSE, /* pc_relative */
258 0, /* bitpos */
259 complain_overflow_signed,/* complain_on_overflow */
260 bfd_elf_generic_reloc, /* special_function */
261 "R_ARM_BREL_ADJ", /* name */
262 FALSE, /* partial_inplace */
263 0xffffffff, /* src_mask */
264 0xffffffff, /* dst_mask */
265 FALSE), /* pcrel_offset */
267 HOWTO (R_ARM_TLS_DESC, /* type */
268 0, /* rightshift */
269 2, /* size (0 = byte, 1 = short, 2 = long) */
270 32, /* bitsize */
271 FALSE, /* pc_relative */
272 0, /* bitpos */
273 complain_overflow_bitfield,/* complain_on_overflow */
274 bfd_elf_generic_reloc, /* special_function */
275 "R_ARM_TLS_DESC", /* name */
276 FALSE, /* partial_inplace */
277 0xffffffff, /* src_mask */
278 0xffffffff, /* dst_mask */
279 FALSE), /* pcrel_offset */
281 HOWTO (R_ARM_THM_SWI8, /* type */
282 0, /* rightshift */
283 0, /* size (0 = byte, 1 = short, 2 = long) */
284 0, /* bitsize */
285 FALSE, /* pc_relative */
286 0, /* bitpos */
287 complain_overflow_signed,/* complain_on_overflow */
288 bfd_elf_generic_reloc, /* special_function */
289 "R_ARM_SWI8", /* name */
290 FALSE, /* partial_inplace */
291 0x00000000, /* src_mask */
292 0x00000000, /* dst_mask */
293 FALSE), /* pcrel_offset */
295 /* BLX instruction for the ARM. */
296 HOWTO (R_ARM_XPC25, /* type */
297 2, /* rightshift */
298 2, /* size (0 = byte, 1 = short, 2 = long) */
299 24, /* bitsize */
300 TRUE, /* pc_relative */
301 0, /* bitpos */
302 complain_overflow_signed,/* complain_on_overflow */
303 bfd_elf_generic_reloc, /* special_function */
304 "R_ARM_XPC25", /* name */
305 FALSE, /* partial_inplace */
306 0x00ffffff, /* src_mask */
307 0x00ffffff, /* dst_mask */
308 TRUE), /* pcrel_offset */
310 /* BLX instruction for the Thumb. */
311 HOWTO (R_ARM_THM_XPC22, /* type */
312 2, /* rightshift */
313 2, /* size (0 = byte, 1 = short, 2 = long) */
314 24, /* bitsize */
315 TRUE, /* pc_relative */
316 0, /* bitpos */
317 complain_overflow_signed,/* complain_on_overflow */
318 bfd_elf_generic_reloc, /* special_function */
319 "R_ARM_THM_XPC22", /* name */
320 FALSE, /* partial_inplace */
321 0x07ff2fff, /* src_mask */
322 0x07ff2fff, /* dst_mask */
323 TRUE), /* pcrel_offset */
325 /* Dynamic TLS relocations. */
327 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
328 0, /* rightshift */
329 2, /* size (0 = byte, 1 = short, 2 = long) */
330 32, /* bitsize */
331 FALSE, /* pc_relative */
332 0, /* bitpos */
333 complain_overflow_bitfield,/* complain_on_overflow */
334 bfd_elf_generic_reloc, /* special_function */
335 "R_ARM_TLS_DTPMOD32", /* name */
336 TRUE, /* partial_inplace */
337 0xffffffff, /* src_mask */
338 0xffffffff, /* dst_mask */
339 FALSE), /* pcrel_offset */
341 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
342 0, /* rightshift */
343 2, /* size (0 = byte, 1 = short, 2 = long) */
344 32, /* bitsize */
345 FALSE, /* pc_relative */
346 0, /* bitpos */
347 complain_overflow_bitfield,/* complain_on_overflow */
348 bfd_elf_generic_reloc, /* special_function */
349 "R_ARM_TLS_DTPOFF32", /* name */
350 TRUE, /* partial_inplace */
351 0xffffffff, /* src_mask */
352 0xffffffff, /* dst_mask */
353 FALSE), /* pcrel_offset */
355 HOWTO (R_ARM_TLS_TPOFF32, /* type */
356 0, /* rightshift */
357 2, /* size (0 = byte, 1 = short, 2 = long) */
358 32, /* bitsize */
359 FALSE, /* pc_relative */
360 0, /* bitpos */
361 complain_overflow_bitfield,/* complain_on_overflow */
362 bfd_elf_generic_reloc, /* special_function */
363 "R_ARM_TLS_TPOFF32", /* name */
364 TRUE, /* partial_inplace */
365 0xffffffff, /* src_mask */
366 0xffffffff, /* dst_mask */
367 FALSE), /* pcrel_offset */
369 /* Relocs used in ARM Linux */
371 HOWTO (R_ARM_COPY, /* type */
372 0, /* rightshift */
373 2, /* size (0 = byte, 1 = short, 2 = long) */
374 32, /* bitsize */
375 FALSE, /* pc_relative */
376 0, /* bitpos */
377 complain_overflow_bitfield,/* complain_on_overflow */
378 bfd_elf_generic_reloc, /* special_function */
379 "R_ARM_COPY", /* name */
380 TRUE, /* partial_inplace */
381 0xffffffff, /* src_mask */
382 0xffffffff, /* dst_mask */
383 FALSE), /* pcrel_offset */
385 HOWTO (R_ARM_GLOB_DAT, /* type */
386 0, /* rightshift */
387 2, /* size (0 = byte, 1 = short, 2 = long) */
388 32, /* bitsize */
389 FALSE, /* pc_relative */
390 0, /* bitpos */
391 complain_overflow_bitfield,/* complain_on_overflow */
392 bfd_elf_generic_reloc, /* special_function */
393 "R_ARM_GLOB_DAT", /* name */
394 TRUE, /* partial_inplace */
395 0xffffffff, /* src_mask */
396 0xffffffff, /* dst_mask */
397 FALSE), /* pcrel_offset */
399 HOWTO (R_ARM_JUMP_SLOT, /* type */
400 0, /* rightshift */
401 2, /* size (0 = byte, 1 = short, 2 = long) */
402 32, /* bitsize */
403 FALSE, /* pc_relative */
404 0, /* bitpos */
405 complain_overflow_bitfield,/* complain_on_overflow */
406 bfd_elf_generic_reloc, /* special_function */
407 "R_ARM_JUMP_SLOT", /* name */
408 TRUE, /* partial_inplace */
409 0xffffffff, /* src_mask */
410 0xffffffff, /* dst_mask */
411 FALSE), /* pcrel_offset */
413 HOWTO (R_ARM_RELATIVE, /* type */
414 0, /* rightshift */
415 2, /* size (0 = byte, 1 = short, 2 = long) */
416 32, /* bitsize */
417 FALSE, /* pc_relative */
418 0, /* bitpos */
419 complain_overflow_bitfield,/* complain_on_overflow */
420 bfd_elf_generic_reloc, /* special_function */
421 "R_ARM_RELATIVE", /* name */
422 TRUE, /* partial_inplace */
423 0xffffffff, /* src_mask */
424 0xffffffff, /* dst_mask */
425 FALSE), /* pcrel_offset */
427 HOWTO (R_ARM_GOTOFF32, /* type */
428 0, /* rightshift */
429 2, /* size (0 = byte, 1 = short, 2 = long) */
430 32, /* bitsize */
431 FALSE, /* pc_relative */
432 0, /* bitpos */
433 complain_overflow_bitfield,/* complain_on_overflow */
434 bfd_elf_generic_reloc, /* special_function */
435 "R_ARM_GOTOFF32", /* name */
436 TRUE, /* partial_inplace */
437 0xffffffff, /* src_mask */
438 0xffffffff, /* dst_mask */
439 FALSE), /* pcrel_offset */
441 HOWTO (R_ARM_GOTPC, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 TRUE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_bitfield,/* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_ARM_GOTPC", /* name */
450 TRUE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 TRUE), /* pcrel_offset */
455 HOWTO (R_ARM_GOT32, /* type */
456 0, /* rightshift */
457 2, /* size (0 = byte, 1 = short, 2 = long) */
458 32, /* bitsize */
459 FALSE, /* pc_relative */
460 0, /* bitpos */
461 complain_overflow_bitfield,/* complain_on_overflow */
462 bfd_elf_generic_reloc, /* special_function */
463 "R_ARM_GOT32", /* name */
464 TRUE, /* partial_inplace */
465 0xffffffff, /* src_mask */
466 0xffffffff, /* dst_mask */
467 FALSE), /* pcrel_offset */
469 HOWTO (R_ARM_PLT32, /* type */
470 2, /* rightshift */
471 2, /* size (0 = byte, 1 = short, 2 = long) */
472 24, /* bitsize */
473 TRUE, /* pc_relative */
474 0, /* bitpos */
475 complain_overflow_bitfield,/* complain_on_overflow */
476 bfd_elf_generic_reloc, /* special_function */
477 "R_ARM_PLT32", /* name */
478 FALSE, /* partial_inplace */
479 0x00ffffff, /* src_mask */
480 0x00ffffff, /* dst_mask */
481 TRUE), /* pcrel_offset */
483 HOWTO (R_ARM_CALL, /* type */
484 2, /* rightshift */
485 2, /* size (0 = byte, 1 = short, 2 = long) */
486 24, /* bitsize */
487 TRUE, /* pc_relative */
488 0, /* bitpos */
489 complain_overflow_signed,/* complain_on_overflow */
490 bfd_elf_generic_reloc, /* special_function */
491 "R_ARM_CALL", /* name */
492 FALSE, /* partial_inplace */
493 0x00ffffff, /* src_mask */
494 0x00ffffff, /* dst_mask */
495 TRUE), /* pcrel_offset */
497 HOWTO (R_ARM_JUMP24, /* type */
498 2, /* rightshift */
499 2, /* size (0 = byte, 1 = short, 2 = long) */
500 24, /* bitsize */
501 TRUE, /* pc_relative */
502 0, /* bitpos */
503 complain_overflow_signed,/* complain_on_overflow */
504 bfd_elf_generic_reloc, /* special_function */
505 "R_ARM_JUMP24", /* name */
506 FALSE, /* partial_inplace */
507 0x00ffffff, /* src_mask */
508 0x00ffffff, /* dst_mask */
509 TRUE), /* pcrel_offset */
511 HOWTO (R_ARM_THM_JUMP24, /* type */
512 1, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 24, /* bitsize */
515 TRUE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_signed,/* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 "R_ARM_THM_JUMP24", /* name */
520 FALSE, /* partial_inplace */
521 0x07ff2fff, /* src_mask */
522 0x07ff2fff, /* dst_mask */
523 TRUE), /* pcrel_offset */
525 HOWTO (R_ARM_BASE_ABS, /* type */
526 0, /* rightshift */
527 2, /* size (0 = byte, 1 = short, 2 = long) */
528 32, /* bitsize */
529 FALSE, /* pc_relative */
530 0, /* bitpos */
531 complain_overflow_dont,/* complain_on_overflow */
532 bfd_elf_generic_reloc, /* special_function */
533 "R_ARM_BASE_ABS", /* name */
534 FALSE, /* partial_inplace */
535 0xffffffff, /* src_mask */
536 0xffffffff, /* dst_mask */
537 FALSE), /* pcrel_offset */
539 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
540 0, /* rightshift */
541 2, /* size (0 = byte, 1 = short, 2 = long) */
542 12, /* bitsize */
543 TRUE, /* pc_relative */
544 0, /* bitpos */
545 complain_overflow_dont,/* complain_on_overflow */
546 bfd_elf_generic_reloc, /* special_function */
547 "R_ARM_ALU_PCREL_7_0", /* name */
548 FALSE, /* partial_inplace */
549 0x00000fff, /* src_mask */
550 0x00000fff, /* dst_mask */
551 TRUE), /* pcrel_offset */
553 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
554 0, /* rightshift */
555 2, /* size (0 = byte, 1 = short, 2 = long) */
556 12, /* bitsize */
557 TRUE, /* pc_relative */
558 8, /* bitpos */
559 complain_overflow_dont,/* complain_on_overflow */
560 bfd_elf_generic_reloc, /* special_function */
561 "R_ARM_ALU_PCREL_15_8",/* name */
562 FALSE, /* partial_inplace */
563 0x00000fff, /* src_mask */
564 0x00000fff, /* dst_mask */
565 TRUE), /* pcrel_offset */
567 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
568 0, /* rightshift */
569 2, /* size (0 = byte, 1 = short, 2 = long) */
570 12, /* bitsize */
571 TRUE, /* pc_relative */
572 16, /* bitpos */
573 complain_overflow_dont,/* complain_on_overflow */
574 bfd_elf_generic_reloc, /* special_function */
575 "R_ARM_ALU_PCREL_23_15",/* name */
576 FALSE, /* partial_inplace */
577 0x00000fff, /* src_mask */
578 0x00000fff, /* dst_mask */
579 TRUE), /* pcrel_offset */
581 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
582 0, /* rightshift */
583 2, /* size (0 = byte, 1 = short, 2 = long) */
584 12, /* bitsize */
585 FALSE, /* pc_relative */
586 0, /* bitpos */
587 complain_overflow_dont,/* complain_on_overflow */
588 bfd_elf_generic_reloc, /* special_function */
589 "R_ARM_LDR_SBREL_11_0",/* name */
590 FALSE, /* partial_inplace */
591 0x00000fff, /* src_mask */
592 0x00000fff, /* dst_mask */
593 FALSE), /* pcrel_offset */
595 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
596 0, /* rightshift */
597 2, /* size (0 = byte, 1 = short, 2 = long) */
598 8, /* bitsize */
599 FALSE, /* pc_relative */
600 12, /* bitpos */
601 complain_overflow_dont,/* complain_on_overflow */
602 bfd_elf_generic_reloc, /* special_function */
603 "R_ARM_ALU_SBREL_19_12",/* name */
604 FALSE, /* partial_inplace */
605 0x000ff000, /* src_mask */
606 0x000ff000, /* dst_mask */
607 FALSE), /* pcrel_offset */
609 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
610 0, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 8, /* bitsize */
613 FALSE, /* pc_relative */
614 20, /* bitpos */
615 complain_overflow_dont,/* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_ARM_ALU_SBREL_27_20",/* name */
618 FALSE, /* partial_inplace */
619 0x0ff00000, /* src_mask */
620 0x0ff00000, /* dst_mask */
621 FALSE), /* pcrel_offset */
623 HOWTO (R_ARM_TARGET1, /* type */
624 0, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 32, /* bitsize */
627 FALSE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont,/* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 "R_ARM_TARGET1", /* name */
632 FALSE, /* partial_inplace */
633 0xffffffff, /* src_mask */
634 0xffffffff, /* dst_mask */
635 FALSE), /* pcrel_offset */
637 HOWTO (R_ARM_ROSEGREL32, /* type */
638 0, /* rightshift */
639 2, /* size (0 = byte, 1 = short, 2 = long) */
640 32, /* bitsize */
641 FALSE, /* pc_relative */
642 0, /* bitpos */
643 complain_overflow_dont,/* complain_on_overflow */
644 bfd_elf_generic_reloc, /* special_function */
645 "R_ARM_ROSEGREL32", /* name */
646 FALSE, /* partial_inplace */
647 0xffffffff, /* src_mask */
648 0xffffffff, /* dst_mask */
649 FALSE), /* pcrel_offset */
651 HOWTO (R_ARM_V4BX, /* type */
652 0, /* rightshift */
653 2, /* size (0 = byte, 1 = short, 2 = long) */
654 32, /* bitsize */
655 FALSE, /* pc_relative */
656 0, /* bitpos */
657 complain_overflow_dont,/* complain_on_overflow */
658 bfd_elf_generic_reloc, /* special_function */
659 "R_ARM_V4BX", /* name */
660 FALSE, /* partial_inplace */
661 0xffffffff, /* src_mask */
662 0xffffffff, /* dst_mask */
663 FALSE), /* pcrel_offset */
665 HOWTO (R_ARM_TARGET2, /* type */
666 0, /* rightshift */
667 2, /* size (0 = byte, 1 = short, 2 = long) */
668 32, /* bitsize */
669 FALSE, /* pc_relative */
670 0, /* bitpos */
671 complain_overflow_signed,/* complain_on_overflow */
672 bfd_elf_generic_reloc, /* special_function */
673 "R_ARM_TARGET2", /* name */
674 FALSE, /* partial_inplace */
675 0xffffffff, /* src_mask */
676 0xffffffff, /* dst_mask */
677 TRUE), /* pcrel_offset */
679 HOWTO (R_ARM_PREL31, /* type */
680 0, /* rightshift */
681 2, /* size (0 = byte, 1 = short, 2 = long) */
682 31, /* bitsize */
683 TRUE, /* pc_relative */
684 0, /* bitpos */
685 complain_overflow_signed,/* complain_on_overflow */
686 bfd_elf_generic_reloc, /* special_function */
687 "R_ARM_PREL31", /* name */
688 FALSE, /* partial_inplace */
689 0x7fffffff, /* src_mask */
690 0x7fffffff, /* dst_mask */
691 TRUE), /* pcrel_offset */
693 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
694 0, /* rightshift */
695 2, /* size (0 = byte, 1 = short, 2 = long) */
696 16, /* bitsize */
697 FALSE, /* pc_relative */
698 0, /* bitpos */
699 complain_overflow_dont,/* complain_on_overflow */
700 bfd_elf_generic_reloc, /* special_function */
701 "R_ARM_MOVW_ABS_NC", /* name */
702 FALSE, /* partial_inplace */
703 0x000f0fff, /* src_mask */
704 0x000f0fff, /* dst_mask */
705 FALSE), /* pcrel_offset */
707 HOWTO (R_ARM_MOVT_ABS, /* type */
708 0, /* rightshift */
709 2, /* size (0 = byte, 1 = short, 2 = long) */
710 16, /* bitsize */
711 FALSE, /* pc_relative */
712 0, /* bitpos */
713 complain_overflow_bitfield,/* complain_on_overflow */
714 bfd_elf_generic_reloc, /* special_function */
715 "R_ARM_MOVT_ABS", /* name */
716 FALSE, /* partial_inplace */
717 0x000f0fff, /* src_mask */
718 0x000f0fff, /* dst_mask */
719 FALSE), /* pcrel_offset */
721 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
722 0, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 16, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont,/* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_ARM_MOVW_PREL_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x000f0fff, /* src_mask */
732 0x000f0fff, /* dst_mask */
733 TRUE), /* pcrel_offset */
735 HOWTO (R_ARM_MOVT_PREL, /* type */
736 0, /* rightshift */
737 2, /* size (0 = byte, 1 = short, 2 = long) */
738 16, /* bitsize */
739 TRUE, /* pc_relative */
740 0, /* bitpos */
741 complain_overflow_bitfield,/* complain_on_overflow */
742 bfd_elf_generic_reloc, /* special_function */
743 "R_ARM_MOVT_PREL", /* name */
744 FALSE, /* partial_inplace */
745 0x000f0fff, /* src_mask */
746 0x000f0fff, /* dst_mask */
747 TRUE), /* pcrel_offset */
749 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
750 0, /* rightshift */
751 2, /* size (0 = byte, 1 = short, 2 = long) */
752 16, /* bitsize */
753 FALSE, /* pc_relative */
754 0, /* bitpos */
755 complain_overflow_dont,/* complain_on_overflow */
756 bfd_elf_generic_reloc, /* special_function */
757 "R_ARM_THM_MOVW_ABS_NC",/* name */
758 FALSE, /* partial_inplace */
759 0x040f70ff, /* src_mask */
760 0x040f70ff, /* dst_mask */
761 FALSE), /* pcrel_offset */
763 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
764 0, /* rightshift */
765 2, /* size (0 = byte, 1 = short, 2 = long) */
766 16, /* bitsize */
767 FALSE, /* pc_relative */
768 0, /* bitpos */
769 complain_overflow_bitfield,/* complain_on_overflow */
770 bfd_elf_generic_reloc, /* special_function */
771 "R_ARM_THM_MOVT_ABS", /* name */
772 FALSE, /* partial_inplace */
773 0x040f70ff, /* src_mask */
774 0x040f70ff, /* dst_mask */
775 FALSE), /* pcrel_offset */
777 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 0, /* rightshift */
779 2, /* size (0 = byte, 1 = short, 2 = long) */
780 16, /* bitsize */
781 TRUE, /* pc_relative */
782 0, /* bitpos */
783 complain_overflow_dont,/* complain_on_overflow */
784 bfd_elf_generic_reloc, /* special_function */
785 "R_ARM_THM_MOVW_PREL_NC",/* name */
786 FALSE, /* partial_inplace */
787 0x040f70ff, /* src_mask */
788 0x040f70ff, /* dst_mask */
789 TRUE), /* pcrel_offset */
791 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
792 0, /* rightshift */
793 2, /* size (0 = byte, 1 = short, 2 = long) */
794 16, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_bitfield,/* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 "R_ARM_THM_MOVT_PREL", /* name */
800 FALSE, /* partial_inplace */
801 0x040f70ff, /* src_mask */
802 0x040f70ff, /* dst_mask */
803 TRUE), /* pcrel_offset */
805 HOWTO (R_ARM_THM_JUMP19, /* type */
806 1, /* rightshift */
807 2, /* size (0 = byte, 1 = short, 2 = long) */
808 19, /* bitsize */
809 TRUE, /* pc_relative */
810 0, /* bitpos */
811 complain_overflow_signed,/* complain_on_overflow */
812 bfd_elf_generic_reloc, /* special_function */
813 "R_ARM_THM_JUMP19", /* name */
814 FALSE, /* partial_inplace */
815 0x043f2fff, /* src_mask */
816 0x043f2fff, /* dst_mask */
817 TRUE), /* pcrel_offset */
819 HOWTO (R_ARM_THM_JUMP6, /* type */
820 1, /* rightshift */
821 1, /* size (0 = byte, 1 = short, 2 = long) */
822 6, /* bitsize */
823 TRUE, /* pc_relative */
824 0, /* bitpos */
825 complain_overflow_unsigned,/* complain_on_overflow */
826 bfd_elf_generic_reloc, /* special_function */
827 "R_ARM_THM_JUMP6", /* name */
828 FALSE, /* partial_inplace */
829 0x02f8, /* src_mask */
830 0x02f8, /* dst_mask */
831 TRUE), /* pcrel_offset */
833 /* These are declared as 13-bit signed relocations because we can
834 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835 versa. */
836 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 0, /* rightshift */
838 2, /* size (0 = byte, 1 = short, 2 = long) */
839 13, /* bitsize */
840 TRUE, /* pc_relative */
841 0, /* bitpos */
842 complain_overflow_dont,/* complain_on_overflow */
843 bfd_elf_generic_reloc, /* special_function */
844 "R_ARM_THM_ALU_PREL_11_0",/* name */
845 FALSE, /* partial_inplace */
846 0xffffffff, /* src_mask */
847 0xffffffff, /* dst_mask */
848 TRUE), /* pcrel_offset */
850 HOWTO (R_ARM_THM_PC12, /* type */
851 0, /* rightshift */
852 2, /* size (0 = byte, 1 = short, 2 = long) */
853 13, /* bitsize */
854 TRUE, /* pc_relative */
855 0, /* bitpos */
856 complain_overflow_dont,/* complain_on_overflow */
857 bfd_elf_generic_reloc, /* special_function */
858 "R_ARM_THM_PC12", /* name */
859 FALSE, /* partial_inplace */
860 0xffffffff, /* src_mask */
861 0xffffffff, /* dst_mask */
862 TRUE), /* pcrel_offset */
864 HOWTO (R_ARM_ABS32_NOI, /* type */
865 0, /* rightshift */
866 2, /* size (0 = byte, 1 = short, 2 = long) */
867 32, /* bitsize */
868 FALSE, /* pc_relative */
869 0, /* bitpos */
870 complain_overflow_dont,/* complain_on_overflow */
871 bfd_elf_generic_reloc, /* special_function */
872 "R_ARM_ABS32_NOI", /* name */
873 FALSE, /* partial_inplace */
874 0xffffffff, /* src_mask */
875 0xffffffff, /* dst_mask */
876 FALSE), /* pcrel_offset */
878 HOWTO (R_ARM_REL32_NOI, /* type */
879 0, /* rightshift */
880 2, /* size (0 = byte, 1 = short, 2 = long) */
881 32, /* bitsize */
882 TRUE, /* pc_relative */
883 0, /* bitpos */
884 complain_overflow_dont,/* complain_on_overflow */
885 bfd_elf_generic_reloc, /* special_function */
886 "R_ARM_REL32_NOI", /* name */
887 FALSE, /* partial_inplace */
888 0xffffffff, /* src_mask */
889 0xffffffff, /* dst_mask */
890 FALSE), /* pcrel_offset */
892 /* Group relocations. */
894 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
895 0, /* rightshift */
896 2, /* size (0 = byte, 1 = short, 2 = long) */
897 32, /* bitsize */
898 TRUE, /* pc_relative */
899 0, /* bitpos */
900 complain_overflow_dont,/* complain_on_overflow */
901 bfd_elf_generic_reloc, /* special_function */
902 "R_ARM_ALU_PC_G0_NC", /* name */
903 FALSE, /* partial_inplace */
904 0xffffffff, /* src_mask */
905 0xffffffff, /* dst_mask */
906 TRUE), /* pcrel_offset */
908 HOWTO (R_ARM_ALU_PC_G0, /* type */
909 0, /* rightshift */
910 2, /* size (0 = byte, 1 = short, 2 = long) */
911 32, /* bitsize */
912 TRUE, /* pc_relative */
913 0, /* bitpos */
914 complain_overflow_dont,/* complain_on_overflow */
915 bfd_elf_generic_reloc, /* special_function */
916 "R_ARM_ALU_PC_G0", /* name */
917 FALSE, /* partial_inplace */
918 0xffffffff, /* src_mask */
919 0xffffffff, /* dst_mask */
920 TRUE), /* pcrel_offset */
922 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
923 0, /* rightshift */
924 2, /* size (0 = byte, 1 = short, 2 = long) */
925 32, /* bitsize */
926 TRUE, /* pc_relative */
927 0, /* bitpos */
928 complain_overflow_dont,/* complain_on_overflow */
929 bfd_elf_generic_reloc, /* special_function */
930 "R_ARM_ALU_PC_G1_NC", /* name */
931 FALSE, /* partial_inplace */
932 0xffffffff, /* src_mask */
933 0xffffffff, /* dst_mask */
934 TRUE), /* pcrel_offset */
936 HOWTO (R_ARM_ALU_PC_G1, /* type */
937 0, /* rightshift */
938 2, /* size (0 = byte, 1 = short, 2 = long) */
939 32, /* bitsize */
940 TRUE, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_dont,/* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 "R_ARM_ALU_PC_G1", /* name */
945 FALSE, /* partial_inplace */
946 0xffffffff, /* src_mask */
947 0xffffffff, /* dst_mask */
948 TRUE), /* pcrel_offset */
950 HOWTO (R_ARM_ALU_PC_G2, /* type */
951 0, /* rightshift */
952 2, /* size (0 = byte, 1 = short, 2 = long) */
953 32, /* bitsize */
954 TRUE, /* pc_relative */
955 0, /* bitpos */
956 complain_overflow_dont,/* complain_on_overflow */
957 bfd_elf_generic_reloc, /* special_function */
958 "R_ARM_ALU_PC_G2", /* name */
959 FALSE, /* partial_inplace */
960 0xffffffff, /* src_mask */
961 0xffffffff, /* dst_mask */
962 TRUE), /* pcrel_offset */
964 HOWTO (R_ARM_LDR_PC_G1, /* type */
965 0, /* rightshift */
966 2, /* size (0 = byte, 1 = short, 2 = long) */
967 32, /* bitsize */
968 TRUE, /* pc_relative */
969 0, /* bitpos */
970 complain_overflow_dont,/* complain_on_overflow */
971 bfd_elf_generic_reloc, /* special_function */
972 "R_ARM_LDR_PC_G1", /* name */
973 FALSE, /* partial_inplace */
974 0xffffffff, /* src_mask */
975 0xffffffff, /* dst_mask */
976 TRUE), /* pcrel_offset */
978 HOWTO (R_ARM_LDR_PC_G2, /* type */
979 0, /* rightshift */
980 2, /* size (0 = byte, 1 = short, 2 = long) */
981 32, /* bitsize */
982 TRUE, /* pc_relative */
983 0, /* bitpos */
984 complain_overflow_dont,/* complain_on_overflow */
985 bfd_elf_generic_reloc, /* special_function */
986 "R_ARM_LDR_PC_G2", /* name */
987 FALSE, /* partial_inplace */
988 0xffffffff, /* src_mask */
989 0xffffffff, /* dst_mask */
990 TRUE), /* pcrel_offset */
992 HOWTO (R_ARM_LDRS_PC_G0, /* type */
993 0, /* rightshift */
994 2, /* size (0 = byte, 1 = short, 2 = long) */
995 32, /* bitsize */
996 TRUE, /* pc_relative */
997 0, /* bitpos */
998 complain_overflow_dont,/* complain_on_overflow */
999 bfd_elf_generic_reloc, /* special_function */
1000 "R_ARM_LDRS_PC_G0", /* name */
1001 FALSE, /* partial_inplace */
1002 0xffffffff, /* src_mask */
1003 0xffffffff, /* dst_mask */
1004 TRUE), /* pcrel_offset */
1006 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1007 0, /* rightshift */
1008 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 32, /* bitsize */
1010 TRUE, /* pc_relative */
1011 0, /* bitpos */
1012 complain_overflow_dont,/* complain_on_overflow */
1013 bfd_elf_generic_reloc, /* special_function */
1014 "R_ARM_LDRS_PC_G1", /* name */
1015 FALSE, /* partial_inplace */
1016 0xffffffff, /* src_mask */
1017 0xffffffff, /* dst_mask */
1018 TRUE), /* pcrel_offset */
1020 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1021 0, /* rightshift */
1022 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 32, /* bitsize */
1024 TRUE, /* pc_relative */
1025 0, /* bitpos */
1026 complain_overflow_dont,/* complain_on_overflow */
1027 bfd_elf_generic_reloc, /* special_function */
1028 "R_ARM_LDRS_PC_G2", /* name */
1029 FALSE, /* partial_inplace */
1030 0xffffffff, /* src_mask */
1031 0xffffffff, /* dst_mask */
1032 TRUE), /* pcrel_offset */
1034 HOWTO (R_ARM_LDC_PC_G0, /* type */
1035 0, /* rightshift */
1036 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 32, /* bitsize */
1038 TRUE, /* pc_relative */
1039 0, /* bitpos */
1040 complain_overflow_dont,/* complain_on_overflow */
1041 bfd_elf_generic_reloc, /* special_function */
1042 "R_ARM_LDC_PC_G0", /* name */
1043 FALSE, /* partial_inplace */
1044 0xffffffff, /* src_mask */
1045 0xffffffff, /* dst_mask */
1046 TRUE), /* pcrel_offset */
1048 HOWTO (R_ARM_LDC_PC_G1, /* type */
1049 0, /* rightshift */
1050 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 32, /* bitsize */
1052 TRUE, /* pc_relative */
1053 0, /* bitpos */
1054 complain_overflow_dont,/* complain_on_overflow */
1055 bfd_elf_generic_reloc, /* special_function */
1056 "R_ARM_LDC_PC_G1", /* name */
1057 FALSE, /* partial_inplace */
1058 0xffffffff, /* src_mask */
1059 0xffffffff, /* dst_mask */
1060 TRUE), /* pcrel_offset */
1062 HOWTO (R_ARM_LDC_PC_G2, /* type */
1063 0, /* rightshift */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 32, /* bitsize */
1066 TRUE, /* pc_relative */
1067 0, /* bitpos */
1068 complain_overflow_dont,/* complain_on_overflow */
1069 bfd_elf_generic_reloc, /* special_function */
1070 "R_ARM_LDC_PC_G2", /* name */
1071 FALSE, /* partial_inplace */
1072 0xffffffff, /* src_mask */
1073 0xffffffff, /* dst_mask */
1074 TRUE), /* pcrel_offset */
1076 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1077 0, /* rightshift */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 32, /* bitsize */
1080 TRUE, /* pc_relative */
1081 0, /* bitpos */
1082 complain_overflow_dont,/* complain_on_overflow */
1083 bfd_elf_generic_reloc, /* special_function */
1084 "R_ARM_ALU_SB_G0_NC", /* name */
1085 FALSE, /* partial_inplace */
1086 0xffffffff, /* src_mask */
1087 0xffffffff, /* dst_mask */
1088 TRUE), /* pcrel_offset */
1090 HOWTO (R_ARM_ALU_SB_G0, /* type */
1091 0, /* rightshift */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 32, /* bitsize */
1094 TRUE, /* pc_relative */
1095 0, /* bitpos */
1096 complain_overflow_dont,/* complain_on_overflow */
1097 bfd_elf_generic_reloc, /* special_function */
1098 "R_ARM_ALU_SB_G0", /* name */
1099 FALSE, /* partial_inplace */
1100 0xffffffff, /* src_mask */
1101 0xffffffff, /* dst_mask */
1102 TRUE), /* pcrel_offset */
1104 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1105 0, /* rightshift */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 32, /* bitsize */
1108 TRUE, /* pc_relative */
1109 0, /* bitpos */
1110 complain_overflow_dont,/* complain_on_overflow */
1111 bfd_elf_generic_reloc, /* special_function */
1112 "R_ARM_ALU_SB_G1_NC", /* name */
1113 FALSE, /* partial_inplace */
1114 0xffffffff, /* src_mask */
1115 0xffffffff, /* dst_mask */
1116 TRUE), /* pcrel_offset */
1118 HOWTO (R_ARM_ALU_SB_G1, /* type */
1119 0, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 32, /* bitsize */
1122 TRUE, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_dont,/* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 "R_ARM_ALU_SB_G1", /* name */
1127 FALSE, /* partial_inplace */
1128 0xffffffff, /* src_mask */
1129 0xffffffff, /* dst_mask */
1130 TRUE), /* pcrel_offset */
1132 HOWTO (R_ARM_ALU_SB_G2, /* type */
1133 0, /* rightshift */
1134 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 32, /* bitsize */
1136 TRUE, /* pc_relative */
1137 0, /* bitpos */
1138 complain_overflow_dont,/* complain_on_overflow */
1139 bfd_elf_generic_reloc, /* special_function */
1140 "R_ARM_ALU_SB_G2", /* name */
1141 FALSE, /* partial_inplace */
1142 0xffffffff, /* src_mask */
1143 0xffffffff, /* dst_mask */
1144 TRUE), /* pcrel_offset */
1146 HOWTO (R_ARM_LDR_SB_G0, /* type */
1147 0, /* rightshift */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 32, /* bitsize */
1150 TRUE, /* pc_relative */
1151 0, /* bitpos */
1152 complain_overflow_dont,/* complain_on_overflow */
1153 bfd_elf_generic_reloc, /* special_function */
1154 "R_ARM_LDR_SB_G0", /* name */
1155 FALSE, /* partial_inplace */
1156 0xffffffff, /* src_mask */
1157 0xffffffff, /* dst_mask */
1158 TRUE), /* pcrel_offset */
1160 HOWTO (R_ARM_LDR_SB_G1, /* type */
1161 0, /* rightshift */
1162 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 32, /* bitsize */
1164 TRUE, /* pc_relative */
1165 0, /* bitpos */
1166 complain_overflow_dont,/* complain_on_overflow */
1167 bfd_elf_generic_reloc, /* special_function */
1168 "R_ARM_LDR_SB_G1", /* name */
1169 FALSE, /* partial_inplace */
1170 0xffffffff, /* src_mask */
1171 0xffffffff, /* dst_mask */
1172 TRUE), /* pcrel_offset */
1174 HOWTO (R_ARM_LDR_SB_G2, /* type */
1175 0, /* rightshift */
1176 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 32, /* bitsize */
1178 TRUE, /* pc_relative */
1179 0, /* bitpos */
1180 complain_overflow_dont,/* complain_on_overflow */
1181 bfd_elf_generic_reloc, /* special_function */
1182 "R_ARM_LDR_SB_G2", /* name */
1183 FALSE, /* partial_inplace */
1184 0xffffffff, /* src_mask */
1185 0xffffffff, /* dst_mask */
1186 TRUE), /* pcrel_offset */
1188 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1189 0, /* rightshift */
1190 2, /* size (0 = byte, 1 = short, 2 = long) */
1191 32, /* bitsize */
1192 TRUE, /* pc_relative */
1193 0, /* bitpos */
1194 complain_overflow_dont,/* complain_on_overflow */
1195 bfd_elf_generic_reloc, /* special_function */
1196 "R_ARM_LDRS_SB_G0", /* name */
1197 FALSE, /* partial_inplace */
1198 0xffffffff, /* src_mask */
1199 0xffffffff, /* dst_mask */
1200 TRUE), /* pcrel_offset */
1202 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1203 0, /* rightshift */
1204 2, /* size (0 = byte, 1 = short, 2 = long) */
1205 32, /* bitsize */
1206 TRUE, /* pc_relative */
1207 0, /* bitpos */
1208 complain_overflow_dont,/* complain_on_overflow */
1209 bfd_elf_generic_reloc, /* special_function */
1210 "R_ARM_LDRS_SB_G1", /* name */
1211 FALSE, /* partial_inplace */
1212 0xffffffff, /* src_mask */
1213 0xffffffff, /* dst_mask */
1214 TRUE), /* pcrel_offset */
1216 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1217 0, /* rightshift */
1218 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 32, /* bitsize */
1220 TRUE, /* pc_relative */
1221 0, /* bitpos */
1222 complain_overflow_dont,/* complain_on_overflow */
1223 bfd_elf_generic_reloc, /* special_function */
1224 "R_ARM_LDRS_SB_G2", /* name */
1225 FALSE, /* partial_inplace */
1226 0xffffffff, /* src_mask */
1227 0xffffffff, /* dst_mask */
1228 TRUE), /* pcrel_offset */
1230 HOWTO (R_ARM_LDC_SB_G0, /* type */
1231 0, /* rightshift */
1232 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 32, /* bitsize */
1234 TRUE, /* pc_relative */
1235 0, /* bitpos */
1236 complain_overflow_dont,/* complain_on_overflow */
1237 bfd_elf_generic_reloc, /* special_function */
1238 "R_ARM_LDC_SB_G0", /* name */
1239 FALSE, /* partial_inplace */
1240 0xffffffff, /* src_mask */
1241 0xffffffff, /* dst_mask */
1242 TRUE), /* pcrel_offset */
1244 HOWTO (R_ARM_LDC_SB_G1, /* type */
1245 0, /* rightshift */
1246 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 32, /* bitsize */
1248 TRUE, /* pc_relative */
1249 0, /* bitpos */
1250 complain_overflow_dont,/* complain_on_overflow */
1251 bfd_elf_generic_reloc, /* special_function */
1252 "R_ARM_LDC_SB_G1", /* name */
1253 FALSE, /* partial_inplace */
1254 0xffffffff, /* src_mask */
1255 0xffffffff, /* dst_mask */
1256 TRUE), /* pcrel_offset */
1258 HOWTO (R_ARM_LDC_SB_G2, /* type */
1259 0, /* rightshift */
1260 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 32, /* bitsize */
1262 TRUE, /* pc_relative */
1263 0, /* bitpos */
1264 complain_overflow_dont,/* complain_on_overflow */
1265 bfd_elf_generic_reloc, /* special_function */
1266 "R_ARM_LDC_SB_G2", /* name */
1267 FALSE, /* partial_inplace */
1268 0xffffffff, /* src_mask */
1269 0xffffffff, /* dst_mask */
1270 TRUE), /* pcrel_offset */
1272 /* End of group relocations. */
1274 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1275 0, /* rightshift */
1276 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 16, /* bitsize */
1278 FALSE, /* pc_relative */
1279 0, /* bitpos */
1280 complain_overflow_dont,/* complain_on_overflow */
1281 bfd_elf_generic_reloc, /* special_function */
1282 "R_ARM_MOVW_BREL_NC", /* name */
1283 FALSE, /* partial_inplace */
1284 0x0000ffff, /* src_mask */
1285 0x0000ffff, /* dst_mask */
1286 FALSE), /* pcrel_offset */
1288 HOWTO (R_ARM_MOVT_BREL, /* type */
1289 0, /* rightshift */
1290 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 16, /* bitsize */
1292 FALSE, /* pc_relative */
1293 0, /* bitpos */
1294 complain_overflow_bitfield,/* complain_on_overflow */
1295 bfd_elf_generic_reloc, /* special_function */
1296 "R_ARM_MOVT_BREL", /* name */
1297 FALSE, /* partial_inplace */
1298 0x0000ffff, /* src_mask */
1299 0x0000ffff, /* dst_mask */
1300 FALSE), /* pcrel_offset */
1302 HOWTO (R_ARM_MOVW_BREL, /* type */
1303 0, /* rightshift */
1304 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 16, /* bitsize */
1306 FALSE, /* pc_relative */
1307 0, /* bitpos */
1308 complain_overflow_dont,/* complain_on_overflow */
1309 bfd_elf_generic_reloc, /* special_function */
1310 "R_ARM_MOVW_BREL", /* name */
1311 FALSE, /* partial_inplace */
1312 0x0000ffff, /* src_mask */
1313 0x0000ffff, /* dst_mask */
1314 FALSE), /* pcrel_offset */
1316 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 0, /* rightshift */
1318 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 16, /* bitsize */
1320 FALSE, /* pc_relative */
1321 0, /* bitpos */
1322 complain_overflow_dont,/* complain_on_overflow */
1323 bfd_elf_generic_reloc, /* special_function */
1324 "R_ARM_THM_MOVW_BREL_NC",/* name */
1325 FALSE, /* partial_inplace */
1326 0x040f70ff, /* src_mask */
1327 0x040f70ff, /* dst_mask */
1328 FALSE), /* pcrel_offset */
1330 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1331 0, /* rightshift */
1332 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 16, /* bitsize */
1334 FALSE, /* pc_relative */
1335 0, /* bitpos */
1336 complain_overflow_bitfield,/* complain_on_overflow */
1337 bfd_elf_generic_reloc, /* special_function */
1338 "R_ARM_THM_MOVT_BREL", /* name */
1339 FALSE, /* partial_inplace */
1340 0x040f70ff, /* src_mask */
1341 0x040f70ff, /* dst_mask */
1342 FALSE), /* pcrel_offset */
1344 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1345 0, /* rightshift */
1346 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 16, /* bitsize */
1348 FALSE, /* pc_relative */
1349 0, /* bitpos */
1350 complain_overflow_dont,/* complain_on_overflow */
1351 bfd_elf_generic_reloc, /* special_function */
1352 "R_ARM_THM_MOVW_BREL", /* name */
1353 FALSE, /* partial_inplace */
1354 0x040f70ff, /* src_mask */
1355 0x040f70ff, /* dst_mask */
1356 FALSE), /* pcrel_offset */
1358 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1359 0, /* rightshift */
1360 2, /* size (0 = byte, 1 = short, 2 = long) */
1361 32, /* bitsize */
1362 FALSE, /* pc_relative */
1363 0, /* bitpos */
1364 complain_overflow_bitfield,/* complain_on_overflow */
1365 NULL, /* special_function */
1366 "R_ARM_TLS_GOTDESC", /* name */
1367 TRUE, /* partial_inplace */
1368 0xffffffff, /* src_mask */
1369 0xffffffff, /* dst_mask */
1370 FALSE), /* pcrel_offset */
1372 HOWTO (R_ARM_TLS_CALL, /* type */
1373 0, /* rightshift */
1374 2, /* size (0 = byte, 1 = short, 2 = long) */
1375 24, /* bitsize */
1376 FALSE, /* pc_relative */
1377 0, /* bitpos */
1378 complain_overflow_dont,/* complain_on_overflow */
1379 bfd_elf_generic_reloc, /* special_function */
1380 "R_ARM_TLS_CALL", /* name */
1381 FALSE, /* partial_inplace */
1382 0x00ffffff, /* src_mask */
1383 0x00ffffff, /* dst_mask */
1384 FALSE), /* pcrel_offset */
1386 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1387 0, /* rightshift */
1388 2, /* size (0 = byte, 1 = short, 2 = long) */
1389 0, /* bitsize */
1390 FALSE, /* pc_relative */
1391 0, /* bitpos */
1392 complain_overflow_bitfield,/* complain_on_overflow */
1393 bfd_elf_generic_reloc, /* special_function */
1394 "R_ARM_TLS_DESCSEQ", /* name */
1395 FALSE, /* partial_inplace */
1396 0x00000000, /* src_mask */
1397 0x00000000, /* dst_mask */
1398 FALSE), /* pcrel_offset */
1400 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1401 0, /* rightshift */
1402 2, /* size (0 = byte, 1 = short, 2 = long) */
1403 24, /* bitsize */
1404 FALSE, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_dont,/* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 "R_ARM_THM_TLS_CALL", /* name */
1409 FALSE, /* partial_inplace */
1410 0x07ff07ff, /* src_mask */
1411 0x07ff07ff, /* dst_mask */
1412 FALSE), /* pcrel_offset */
1414 HOWTO (R_ARM_PLT32_ABS, /* type */
1415 0, /* rightshift */
1416 2, /* size (0 = byte, 1 = short, 2 = long) */
1417 32, /* bitsize */
1418 FALSE, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_dont,/* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 "R_ARM_PLT32_ABS", /* name */
1423 FALSE, /* partial_inplace */
1424 0xffffffff, /* src_mask */
1425 0xffffffff, /* dst_mask */
1426 FALSE), /* pcrel_offset */
1428 HOWTO (R_ARM_GOT_ABS, /* type */
1429 0, /* rightshift */
1430 2, /* size (0 = byte, 1 = short, 2 = long) */
1431 32, /* bitsize */
1432 FALSE, /* pc_relative */
1433 0, /* bitpos */
1434 complain_overflow_dont,/* complain_on_overflow */
1435 bfd_elf_generic_reloc, /* special_function */
1436 "R_ARM_GOT_ABS", /* name */
1437 FALSE, /* partial_inplace */
1438 0xffffffff, /* src_mask */
1439 0xffffffff, /* dst_mask */
1440 FALSE), /* pcrel_offset */
1442 HOWTO (R_ARM_GOT_PREL, /* type */
1443 0, /* rightshift */
1444 2, /* size (0 = byte, 1 = short, 2 = long) */
1445 32, /* bitsize */
1446 TRUE, /* pc_relative */
1447 0, /* bitpos */
1448 complain_overflow_dont, /* complain_on_overflow */
1449 bfd_elf_generic_reloc, /* special_function */
1450 "R_ARM_GOT_PREL", /* name */
1451 FALSE, /* partial_inplace */
1452 0xffffffff, /* src_mask */
1453 0xffffffff, /* dst_mask */
1454 TRUE), /* pcrel_offset */
1456 HOWTO (R_ARM_GOT_BREL12, /* type */
1457 0, /* rightshift */
1458 2, /* size (0 = byte, 1 = short, 2 = long) */
1459 12, /* bitsize */
1460 FALSE, /* pc_relative */
1461 0, /* bitpos */
1462 complain_overflow_bitfield,/* complain_on_overflow */
1463 bfd_elf_generic_reloc, /* special_function */
1464 "R_ARM_GOT_BREL12", /* name */
1465 FALSE, /* partial_inplace */
1466 0x00000fff, /* src_mask */
1467 0x00000fff, /* dst_mask */
1468 FALSE), /* pcrel_offset */
1470 HOWTO (R_ARM_GOTOFF12, /* type */
1471 0, /* rightshift */
1472 2, /* size (0 = byte, 1 = short, 2 = long) */
1473 12, /* bitsize */
1474 FALSE, /* pc_relative */
1475 0, /* bitpos */
1476 complain_overflow_bitfield,/* complain_on_overflow */
1477 bfd_elf_generic_reloc, /* special_function */
1478 "R_ARM_GOTOFF12", /* name */
1479 FALSE, /* partial_inplace */
1480 0x00000fff, /* src_mask */
1481 0x00000fff, /* dst_mask */
1482 FALSE), /* pcrel_offset */
1484 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486 /* GNU extension to record C++ vtable member usage */
1487 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1488 0, /* rightshift */
1489 2, /* size (0 = byte, 1 = short, 2 = long) */
1490 0, /* bitsize */
1491 FALSE, /* pc_relative */
1492 0, /* bitpos */
1493 complain_overflow_dont, /* complain_on_overflow */
1494 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1495 "R_ARM_GNU_VTENTRY", /* name */
1496 FALSE, /* partial_inplace */
1497 0, /* src_mask */
1498 0, /* dst_mask */
1499 FALSE), /* pcrel_offset */
1501 /* GNU extension to record C++ vtable hierarchy */
1502 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1503 0, /* rightshift */
1504 2, /* size (0 = byte, 1 = short, 2 = long) */
1505 0, /* bitsize */
1506 FALSE, /* pc_relative */
1507 0, /* bitpos */
1508 complain_overflow_dont, /* complain_on_overflow */
1509 NULL, /* special_function */
1510 "R_ARM_GNU_VTINHERIT", /* name */
1511 FALSE, /* partial_inplace */
1512 0, /* src_mask */
1513 0, /* dst_mask */
1514 FALSE), /* pcrel_offset */
1516 HOWTO (R_ARM_THM_JUMP11, /* type */
1517 1, /* rightshift */
1518 1, /* size (0 = byte, 1 = short, 2 = long) */
1519 11, /* bitsize */
1520 TRUE, /* pc_relative */
1521 0, /* bitpos */
1522 complain_overflow_signed, /* complain_on_overflow */
1523 bfd_elf_generic_reloc, /* special_function */
1524 "R_ARM_THM_JUMP11", /* name */
1525 FALSE, /* partial_inplace */
1526 0x000007ff, /* src_mask */
1527 0x000007ff, /* dst_mask */
1528 TRUE), /* pcrel_offset */
1530 HOWTO (R_ARM_THM_JUMP8, /* type */
1531 1, /* rightshift */
1532 1, /* size (0 = byte, 1 = short, 2 = long) */
1533 8, /* bitsize */
1534 TRUE, /* pc_relative */
1535 0, /* bitpos */
1536 complain_overflow_signed, /* complain_on_overflow */
1537 bfd_elf_generic_reloc, /* special_function */
1538 "R_ARM_THM_JUMP8", /* name */
1539 FALSE, /* partial_inplace */
1540 0x000000ff, /* src_mask */
1541 0x000000ff, /* dst_mask */
1542 TRUE), /* pcrel_offset */
1544 /* TLS relocations */
1545 HOWTO (R_ARM_TLS_GD32, /* type */
1546 0, /* rightshift */
1547 2, /* size (0 = byte, 1 = short, 2 = long) */
1548 32, /* bitsize */
1549 FALSE, /* pc_relative */
1550 0, /* bitpos */
1551 complain_overflow_bitfield,/* complain_on_overflow */
1552 NULL, /* special_function */
1553 "R_ARM_TLS_GD32", /* name */
1554 TRUE, /* partial_inplace */
1555 0xffffffff, /* src_mask */
1556 0xffffffff, /* dst_mask */
1557 FALSE), /* pcrel_offset */
1559 HOWTO (R_ARM_TLS_LDM32, /* type */
1560 0, /* rightshift */
1561 2, /* size (0 = byte, 1 = short, 2 = long) */
1562 32, /* bitsize */
1563 FALSE, /* pc_relative */
1564 0, /* bitpos */
1565 complain_overflow_bitfield,/* complain_on_overflow */
1566 bfd_elf_generic_reloc, /* special_function */
1567 "R_ARM_TLS_LDM32", /* name */
1568 TRUE, /* partial_inplace */
1569 0xffffffff, /* src_mask */
1570 0xffffffff, /* dst_mask */
1571 FALSE), /* pcrel_offset */
1573 HOWTO (R_ARM_TLS_LDO32, /* type */
1574 0, /* rightshift */
1575 2, /* size (0 = byte, 1 = short, 2 = long) */
1576 32, /* bitsize */
1577 FALSE, /* pc_relative */
1578 0, /* bitpos */
1579 complain_overflow_bitfield,/* complain_on_overflow */
1580 bfd_elf_generic_reloc, /* special_function */
1581 "R_ARM_TLS_LDO32", /* name */
1582 TRUE, /* partial_inplace */
1583 0xffffffff, /* src_mask */
1584 0xffffffff, /* dst_mask */
1585 FALSE), /* pcrel_offset */
1587 HOWTO (R_ARM_TLS_IE32, /* type */
1588 0, /* rightshift */
1589 2, /* size (0 = byte, 1 = short, 2 = long) */
1590 32, /* bitsize */
1591 FALSE, /* pc_relative */
1592 0, /* bitpos */
1593 complain_overflow_bitfield,/* complain_on_overflow */
1594 NULL, /* special_function */
1595 "R_ARM_TLS_IE32", /* name */
1596 TRUE, /* partial_inplace */
1597 0xffffffff, /* src_mask */
1598 0xffffffff, /* dst_mask */
1599 FALSE), /* pcrel_offset */
1601 HOWTO (R_ARM_TLS_LE32, /* type */
1602 0, /* rightshift */
1603 2, /* size (0 = byte, 1 = short, 2 = long) */
1604 32, /* bitsize */
1605 FALSE, /* pc_relative */
1606 0, /* bitpos */
1607 complain_overflow_bitfield,/* complain_on_overflow */
1608 NULL, /* special_function */
1609 "R_ARM_TLS_LE32", /* name */
1610 TRUE, /* partial_inplace */
1611 0xffffffff, /* src_mask */
1612 0xffffffff, /* dst_mask */
1613 FALSE), /* pcrel_offset */
1615 HOWTO (R_ARM_TLS_LDO12, /* type */
1616 0, /* rightshift */
1617 2, /* size (0 = byte, 1 = short, 2 = long) */
1618 12, /* bitsize */
1619 FALSE, /* pc_relative */
1620 0, /* bitpos */
1621 complain_overflow_bitfield,/* complain_on_overflow */
1622 bfd_elf_generic_reloc, /* special_function */
1623 "R_ARM_TLS_LDO12", /* name */
1624 FALSE, /* partial_inplace */
1625 0x00000fff, /* src_mask */
1626 0x00000fff, /* dst_mask */
1627 FALSE), /* pcrel_offset */
1629 HOWTO (R_ARM_TLS_LE12, /* type */
1630 0, /* rightshift */
1631 2, /* size (0 = byte, 1 = short, 2 = long) */
1632 12, /* bitsize */
1633 FALSE, /* pc_relative */
1634 0, /* bitpos */
1635 complain_overflow_bitfield,/* complain_on_overflow */
1636 bfd_elf_generic_reloc, /* special_function */
1637 "R_ARM_TLS_LE12", /* name */
1638 FALSE, /* partial_inplace */
1639 0x00000fff, /* src_mask */
1640 0x00000fff, /* dst_mask */
1641 FALSE), /* pcrel_offset */
1643 HOWTO (R_ARM_TLS_IE12GP, /* type */
1644 0, /* rightshift */
1645 2, /* size (0 = byte, 1 = short, 2 = long) */
1646 12, /* bitsize */
1647 FALSE, /* pc_relative */
1648 0, /* bitpos */
1649 complain_overflow_bitfield,/* complain_on_overflow */
1650 bfd_elf_generic_reloc, /* special_function */
1651 "R_ARM_TLS_IE12GP", /* name */
1652 FALSE, /* partial_inplace */
1653 0x00000fff, /* src_mask */
1654 0x00000fff, /* dst_mask */
1655 FALSE), /* pcrel_offset */
1657 /* 112-127 private relocations. */
1658 EMPTY_HOWTO (112),
1659 EMPTY_HOWTO (113),
1660 EMPTY_HOWTO (114),
1661 EMPTY_HOWTO (115),
1662 EMPTY_HOWTO (116),
1663 EMPTY_HOWTO (117),
1664 EMPTY_HOWTO (118),
1665 EMPTY_HOWTO (119),
1666 EMPTY_HOWTO (120),
1667 EMPTY_HOWTO (121),
1668 EMPTY_HOWTO (122),
1669 EMPTY_HOWTO (123),
1670 EMPTY_HOWTO (124),
1671 EMPTY_HOWTO (125),
1672 EMPTY_HOWTO (126),
1673 EMPTY_HOWTO (127),
1675 /* R_ARM_ME_TOO, obsolete. */
1676 EMPTY_HOWTO (128),
1678 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1679 0, /* rightshift */
1680 1, /* size (0 = byte, 1 = short, 2 = long) */
1681 0, /* bitsize */
1682 FALSE, /* pc_relative */
1683 0, /* bitpos */
1684 complain_overflow_bitfield,/* complain_on_overflow */
1685 bfd_elf_generic_reloc, /* special_function */
1686 "R_ARM_THM_TLS_DESCSEQ",/* name */
1687 FALSE, /* partial_inplace */
1688 0x00000000, /* src_mask */
1689 0x00000000, /* dst_mask */
1690 FALSE), /* pcrel_offset */
1691 EMPTY_HOWTO (130),
1692 EMPTY_HOWTO (131),
1693 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1694 0, /* rightshift. */
1695 1, /* size (0 = byte, 1 = short, 2 = long). */
1696 16, /* bitsize. */
1697 FALSE, /* pc_relative. */
1698 0, /* bitpos. */
1699 complain_overflow_bitfield,/* complain_on_overflow. */
1700 bfd_elf_generic_reloc, /* special_function. */
1701 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1702 FALSE, /* partial_inplace. */
1703 0x00000000, /* src_mask. */
1704 0x00000000, /* dst_mask. */
1705 FALSE), /* pcrel_offset. */
1706 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1707 0, /* rightshift. */
1708 1, /* size (0 = byte, 1 = short, 2 = long). */
1709 16, /* bitsize. */
1710 FALSE, /* pc_relative. */
1711 0, /* bitpos. */
1712 complain_overflow_bitfield,/* complain_on_overflow. */
1713 bfd_elf_generic_reloc, /* special_function. */
1714 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1715 FALSE, /* partial_inplace. */
1716 0x00000000, /* src_mask. */
1717 0x00000000, /* dst_mask. */
1718 FALSE), /* pcrel_offset. */
1719 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1720 0, /* rightshift. */
1721 1, /* size (0 = byte, 1 = short, 2 = long). */
1722 16, /* bitsize. */
1723 FALSE, /* pc_relative. */
1724 0, /* bitpos. */
1725 complain_overflow_bitfield,/* complain_on_overflow. */
1726 bfd_elf_generic_reloc, /* special_function. */
1727 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1728 FALSE, /* partial_inplace. */
1729 0x00000000, /* src_mask. */
1730 0x00000000, /* dst_mask. */
1731 FALSE), /* pcrel_offset. */
1732 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1733 0, /* rightshift. */
1734 1, /* size (0 = byte, 1 = short, 2 = long). */
1735 16, /* bitsize. */
1736 FALSE, /* pc_relative. */
1737 0, /* bitpos. */
1738 complain_overflow_bitfield,/* complain_on_overflow. */
1739 bfd_elf_generic_reloc, /* special_function. */
1740 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1741 FALSE, /* partial_inplace. */
1742 0x00000000, /* src_mask. */
1743 0x00000000, /* dst_mask. */
1744 FALSE), /* pcrel_offset. */
1745 /* Relocations for Armv8.1-M Mainline. */
1746 HOWTO (R_ARM_THM_BF16, /* type. */
1747 0, /* rightshift. */
1748 1, /* size (0 = byte, 1 = short, 2 = long). */
1749 16, /* bitsize. */
1750 TRUE, /* pc_relative. */
1751 0, /* bitpos. */
1752 complain_overflow_dont,/* do not complain_on_overflow. */
1753 bfd_elf_generic_reloc, /* special_function. */
1754 "R_ARM_THM_BF16", /* name. */
1755 FALSE, /* partial_inplace. */
1756 0x001f0ffe, /* src_mask. */
1757 0x001f0ffe, /* dst_mask. */
1758 TRUE), /* pcrel_offset. */
1759 HOWTO (R_ARM_THM_BF12, /* type. */
1760 0, /* rightshift. */
1761 1, /* size (0 = byte, 1 = short, 2 = long). */
1762 12, /* bitsize. */
1763 TRUE, /* pc_relative. */
1764 0, /* bitpos. */
1765 complain_overflow_dont,/* do not complain_on_overflow. */
1766 bfd_elf_generic_reloc, /* special_function. */
1767 "R_ARM_THM_BF12", /* name. */
1768 FALSE, /* partial_inplace. */
1769 0x00010ffe, /* src_mask. */
1770 0x00010ffe, /* dst_mask. */
1771 TRUE), /* pcrel_offset. */
1772 HOWTO (R_ARM_THM_BF18, /* type. */
1773 0, /* rightshift. */
1774 1, /* size (0 = byte, 1 = short, 2 = long). */
1775 18, /* bitsize. */
1776 TRUE, /* pc_relative. */
1777 0, /* bitpos. */
1778 complain_overflow_dont,/* do not complain_on_overflow. */
1779 bfd_elf_generic_reloc, /* special_function. */
1780 "R_ARM_THM_BF18", /* name. */
1781 FALSE, /* partial_inplace. */
1782 0x007f0ffe, /* src_mask. */
1783 0x007f0ffe, /* dst_mask. */
1784 TRUE), /* pcrel_offset. */
1787 /* 160 onwards: */
1788 static reloc_howto_type elf32_arm_howto_table_2[8] =
1790 HOWTO (R_ARM_IRELATIVE, /* type */
1791 0, /* rightshift */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1793 32, /* bitsize */
1794 FALSE, /* pc_relative */
1795 0, /* bitpos */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_IRELATIVE", /* name */
1799 TRUE, /* partial_inplace */
1800 0xffffffff, /* src_mask */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1804 0, /* rightshift */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1806 32, /* bitsize */
1807 FALSE, /* pc_relative */
1808 0, /* bitpos */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_GOTFUNCDESC", /* name */
1812 FALSE, /* partial_inplace */
1813 0, /* src_mask */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1817 0, /* rightshift */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1819 32, /* bitsize */
1820 FALSE, /* pc_relative */
1821 0, /* bitpos */
1822 complain_overflow_bitfield,/* complain_on_overflow */
1823 bfd_elf_generic_reloc, /* special_function */
1824 "R_ARM_GOTOFFFUNCDESC",/* name */
1825 FALSE, /* partial_inplace */
1826 0, /* src_mask */
1827 0xffffffff, /* dst_mask */
1828 FALSE), /* pcrel_offset */
1829 HOWTO (R_ARM_FUNCDESC, /* type */
1830 0, /* rightshift */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1832 32, /* bitsize */
1833 FALSE, /* pc_relative */
1834 0, /* bitpos */
1835 complain_overflow_bitfield,/* complain_on_overflow */
1836 bfd_elf_generic_reloc, /* special_function */
1837 "R_ARM_FUNCDESC", /* name */
1838 FALSE, /* partial_inplace */
1839 0, /* src_mask */
1840 0xffffffff, /* dst_mask */
1841 FALSE), /* pcrel_offset */
1842 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1843 0, /* rightshift */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1845 64, /* bitsize */
1846 FALSE, /* pc_relative */
1847 0, /* bitpos */
1848 complain_overflow_bitfield,/* complain_on_overflow */
1849 bfd_elf_generic_reloc, /* special_function */
1850 "R_ARM_FUNCDESC_VALUE",/* name */
1851 FALSE, /* partial_inplace */
1852 0, /* src_mask */
1853 0xffffffff, /* dst_mask */
1854 FALSE), /* pcrel_offset */
1855 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1856 0, /* rightshift */
1857 2, /* size (0 = byte, 1 = short, 2 = long) */
1858 32, /* bitsize */
1859 FALSE, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_bitfield,/* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 "R_ARM_TLS_GD32_FDPIC",/* name */
1864 FALSE, /* partial_inplace */
1865 0, /* src_mask */
1866 0xffffffff, /* dst_mask */
1867 FALSE), /* pcrel_offset */
1868 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1869 0, /* rightshift */
1870 2, /* size (0 = byte, 1 = short, 2 = long) */
1871 32, /* bitsize */
1872 FALSE, /* pc_relative */
1873 0, /* bitpos */
1874 complain_overflow_bitfield,/* complain_on_overflow */
1875 bfd_elf_generic_reloc, /* special_function */
1876 "R_ARM_TLS_LDM32_FDPIC",/* name */
1877 FALSE, /* partial_inplace */
1878 0, /* src_mask */
1879 0xffffffff, /* dst_mask */
1880 FALSE), /* pcrel_offset */
1881 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1882 0, /* rightshift */
1883 2, /* size (0 = byte, 1 = short, 2 = long) */
1884 32, /* bitsize */
1885 FALSE, /* pc_relative */
1886 0, /* bitpos */
1887 complain_overflow_bitfield,/* complain_on_overflow */
1888 bfd_elf_generic_reloc, /* special_function */
1889 "R_ARM_TLS_IE32_FDPIC",/* name */
1890 FALSE, /* partial_inplace */
1891 0, /* src_mask */
1892 0xffffffff, /* dst_mask */
1893 FALSE), /* pcrel_offset */
1896 /* 249-255 extended, currently unused, relocations: */
1897 static reloc_howto_type elf32_arm_howto_table_3[4] =
1899 HOWTO (R_ARM_RREL32, /* type */
1900 0, /* rightshift */
1901 0, /* size (0 = byte, 1 = short, 2 = long) */
1902 0, /* bitsize */
1903 FALSE, /* pc_relative */
1904 0, /* bitpos */
1905 complain_overflow_dont,/* complain_on_overflow */
1906 bfd_elf_generic_reloc, /* special_function */
1907 "R_ARM_RREL32", /* name */
1908 FALSE, /* partial_inplace */
1909 0, /* src_mask */
1910 0, /* dst_mask */
1911 FALSE), /* pcrel_offset */
1913 HOWTO (R_ARM_RABS32, /* type */
1914 0, /* rightshift */
1915 0, /* size (0 = byte, 1 = short, 2 = long) */
1916 0, /* bitsize */
1917 FALSE, /* pc_relative */
1918 0, /* bitpos */
1919 complain_overflow_dont,/* complain_on_overflow */
1920 bfd_elf_generic_reloc, /* special_function */
1921 "R_ARM_RABS32", /* name */
1922 FALSE, /* partial_inplace */
1923 0, /* src_mask */
1924 0, /* dst_mask */
1925 FALSE), /* pcrel_offset */
1927 HOWTO (R_ARM_RPC24, /* type */
1928 0, /* rightshift */
1929 0, /* size (0 = byte, 1 = short, 2 = long) */
1930 0, /* bitsize */
1931 FALSE, /* pc_relative */
1932 0, /* bitpos */
1933 complain_overflow_dont,/* complain_on_overflow */
1934 bfd_elf_generic_reloc, /* special_function */
1935 "R_ARM_RPC24", /* name */
1936 FALSE, /* partial_inplace */
1937 0, /* src_mask */
1938 0, /* dst_mask */
1939 FALSE), /* pcrel_offset */
1941 HOWTO (R_ARM_RBASE, /* type */
1942 0, /* rightshift */
1943 0, /* size (0 = byte, 1 = short, 2 = long) */
1944 0, /* bitsize */
1945 FALSE, /* pc_relative */
1946 0, /* bitpos */
1947 complain_overflow_dont,/* complain_on_overflow */
1948 bfd_elf_generic_reloc, /* special_function */
1949 "R_ARM_RBASE", /* name */
1950 FALSE, /* partial_inplace */
1951 0, /* src_mask */
1952 0, /* dst_mask */
1953 FALSE) /* pcrel_offset */
1956 static reloc_howto_type *
1957 elf32_arm_howto_from_type (unsigned int r_type)
1959 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1960 return &elf32_arm_howto_table_1[r_type];
1962 if (r_type >= R_ARM_IRELATIVE
1963 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1964 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1966 if (r_type >= R_ARM_RREL32
1967 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1968 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1970 return NULL;
1973 static bfd_boolean
1974 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1975 Elf_Internal_Rela * elf_reloc)
1977 unsigned int r_type;
1979 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1980 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1982 /* xgettext:c-format */
1983 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1984 abfd, r_type);
1985 bfd_set_error (bfd_error_bad_value);
1986 return FALSE;
1988 return TRUE;
1991 struct elf32_arm_reloc_map
1993 bfd_reloc_code_real_type bfd_reloc_val;
1994 unsigned char elf_reloc_val;
1997 /* All entries in this list must also be present in elf32_arm_howto_table. */
1998 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2000 {BFD_RELOC_NONE, R_ARM_NONE},
2001 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2002 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2003 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2004 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2005 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2006 {BFD_RELOC_32, R_ARM_ABS32},
2007 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2008 {BFD_RELOC_8, R_ARM_ABS8},
2009 {BFD_RELOC_16, R_ARM_ABS16},
2010 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2011 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2012 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2013 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2018 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2019 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2020 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2021 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2022 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2023 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2024 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2025 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2026 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2027 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2028 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2029 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2030 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2031 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2032 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2033 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2034 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2035 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2036 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2037 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2038 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2039 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2040 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2041 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2042 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2043 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2044 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2045 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2046 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2047 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2048 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2049 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2050 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2051 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2052 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2053 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2054 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2055 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2056 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2057 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2058 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2059 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2060 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2061 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2062 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2064 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2065 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2066 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2068 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2069 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2070 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2071 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2072 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2073 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2074 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2075 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2076 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2077 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2078 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2079 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2080 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2082 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2083 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2084 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2085 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2086 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2087 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2088 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2089 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2090 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2091 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2092 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2093 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2094 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2097 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2098 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2099 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102 static reloc_howto_type *
2103 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2104 bfd_reloc_code_real_type code)
2106 unsigned int i;
2108 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2109 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2110 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2112 return NULL;
2115 static reloc_howto_type *
2116 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2117 const char *r_name)
2119 unsigned int i;
2121 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2122 if (elf32_arm_howto_table_1[i].name != NULL
2123 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2124 return &elf32_arm_howto_table_1[i];
2126 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2127 if (elf32_arm_howto_table_2[i].name != NULL
2128 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2129 return &elf32_arm_howto_table_2[i];
2131 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2132 if (elf32_arm_howto_table_3[i].name != NULL
2133 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2134 return &elf32_arm_howto_table_3[i];
2136 return NULL;
2139 /* Support for core dump NOTE sections. */
2141 static bfd_boolean
2142 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2144 int offset;
2145 size_t size;
2147 switch (note->descsz)
2149 default:
2150 return FALSE;
2152 case 148: /* Linux/ARM 32-bit. */
2153 /* pr_cursig */
2154 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2156 /* pr_pid */
2157 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2159 /* pr_reg */
2160 offset = 72;
2161 size = 72;
2163 break;
2166 /* Make a ".reg/999" section. */
2167 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2168 size, note->descpos + offset);
2171 static bfd_boolean
2172 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2174 switch (note->descsz)
2176 default:
2177 return FALSE;
2179 case 124: /* Linux/ARM elf_prpsinfo. */
2180 elf_tdata (abfd)->core->pid
2181 = bfd_get_32 (abfd, note->descdata + 12);
2182 elf_tdata (abfd)->core->program
2183 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2184 elf_tdata (abfd)->core->command
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188 /* Note that for some reason, a spurious space is tacked
2189 onto the end of the args in some (at least one anyway)
2190 implementations, so strip it off if it exists. */
2192 char *command = elf_tdata (abfd)->core->command;
2193 int n = strlen (command);
2195 if (0 < n && command[n - 1] == ' ')
2196 command[n - 1] = '\0';
2199 return TRUE;
2202 static char *
2203 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2204 int note_type, ...)
2206 switch (note_type)
2208 default:
2209 return NULL;
2211 case NT_PRPSINFO:
2213 char data[124] ATTRIBUTE_NONSTRING;
2214 va_list ap;
2216 va_start (ap, note_type);
2217 memset (data, 0, sizeof (data));
2218 strncpy (data + 28, va_arg (ap, const char *), 16);
2219 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2220 DIAGNOSTIC_PUSH;
2221 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2222 -Wstringop-truncation:
2223 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2225 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2226 #endif
2227 strncpy (data + 44, va_arg (ap, const char *), 80);
2228 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2229 DIAGNOSTIC_POP;
2230 #endif
2231 va_end (ap);
2233 return elfcore_write_note (abfd, buf, bufsiz,
2234 "CORE", note_type, data, sizeof (data));
2237 case NT_PRSTATUS:
2239 char data[148];
2240 va_list ap;
2241 long pid;
2242 int cursig;
2243 const void *greg;
2245 va_start (ap, note_type);
2246 memset (data, 0, sizeof (data));
2247 pid = va_arg (ap, long);
2248 bfd_put_32 (abfd, pid, data + 24);
2249 cursig = va_arg (ap, int);
2250 bfd_put_16 (abfd, cursig, data + 12);
2251 greg = va_arg (ap, const void *);
2252 memcpy (data + 72, greg, 72);
2253 va_end (ap);
2255 return elfcore_write_note (abfd, buf, bufsiz,
2256 "CORE", note_type, data, sizeof (data));
2261 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2262 #define TARGET_LITTLE_NAME "elf32-littlearm"
2263 #define TARGET_BIG_SYM arm_elf32_be_vec
2264 #define TARGET_BIG_NAME "elf32-bigarm"
2266 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2267 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2268 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2270 typedef unsigned long int insn32;
2271 typedef unsigned short int insn16;
2273 /* In lieu of proper flags, assume all EABIv4 or later objects are
2274 interworkable. */
2275 #define INTERWORK_FLAG(abfd) \
2276 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2277 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2278 || ((abfd)->flags & BFD_LINKER_CREATED))
2280 /* The linker script knows the section names for placement.
2281 The entry_names are used to do simple name mangling on the stubs.
2282 Given a function name, and its type, the stub can be found. The
2283 name can be changed. The only requirement is the %s be present. */
2284 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2285 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2287 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2288 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2290 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2291 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2293 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2294 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2296 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2297 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2299 #define STUB_ENTRY_NAME "__%s_veneer"
2301 #define CMSE_PREFIX "__acle_se_"
2303 /* The name of the dynamic interpreter. This is put in the .interp
2304 section. */
2305 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2307 /* FDPIC default stack size. */
2308 #define DEFAULT_STACK_SIZE 0x8000
2310 static const unsigned long tls_trampoline [] =
2312 0xe08e0000, /* add r0, lr, r0 */
2313 0xe5901004, /* ldr r1, [r0,#4] */
2314 0xe12fff11, /* bx r1 */
2317 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2319 0xe52d2004, /* push {r2} */
2320 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2321 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2322 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2323 0xe081100f, /* 2: add r1, pc */
2324 0xe12fff12, /* bx r2 */
2325 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2326 + dl_tlsdesc_lazy_resolver(GOT) */
2327 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2330 /* ARM FDPIC PLT entry. */
2331 /* The last 5 words contain PLT lazy fragment code and data. */
2332 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2334 0xe59fc008, /* ldr r12, .L1 */
2335 0xe08cc009, /* add r12, r12, r9 */
2336 0xe59c9004, /* ldr r9, [r12, #4] */
2337 0xe59cf000, /* ldr pc, [r12] */
2338 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2339 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2340 0xe51fc00c, /* ldr r12, [pc, #-12] */
2341 0xe92d1000, /* push {r12} */
2342 0xe599c004, /* ldr r12, [r9, #4] */
2343 0xe599f000, /* ldr pc, [r9] */
2346 /* Thumb FDPIC PLT entry. */
2347 /* The last 5 words contain PLT lazy fragment code and data. */
2348 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2350 0xc00cf8df, /* ldr.w r12, .L1 */
2351 0x0c09eb0c, /* add.w r12, r12, r9 */
2352 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2353 0xf000f8dc, /* ldr.w pc, [r12] */
2354 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2355 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2356 0xc008f85f, /* ldr.w r12, .L2 */
2357 0xcd04f84d, /* push {r12} */
2358 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2359 0xf000f8d9, /* ldr.w pc, [r9] */
2362 #ifdef FOUR_WORD_PLT
2364 /* The first entry in a procedure linkage table looks like
2365 this. It is set up so that any shared library function that is
2366 called before the relocation has been set up calls the dynamic
2367 linker first. */
2368 static const bfd_vma elf32_arm_plt0_entry [] =
2370 0xe52de004, /* str lr, [sp, #-4]! */
2371 0xe59fe010, /* ldr lr, [pc, #16] */
2372 0xe08fe00e, /* add lr, pc, lr */
2373 0xe5bef008, /* ldr pc, [lr, #8]! */
2376 /* Subsequent entries in a procedure linkage table look like
2377 this. */
2378 static const bfd_vma elf32_arm_plt_entry [] =
2380 0xe28fc600, /* add ip, pc, #NN */
2381 0xe28cca00, /* add ip, ip, #NN */
2382 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2383 0x00000000, /* unused */
2386 #else /* not FOUR_WORD_PLT */
2388 /* The first entry in a procedure linkage table looks like
2389 this. It is set up so that any shared library function that is
2390 called before the relocation has been set up calls the dynamic
2391 linker first. */
2392 static const bfd_vma elf32_arm_plt0_entry [] =
2394 0xe52de004, /* str lr, [sp, #-4]! */
2395 0xe59fe004, /* ldr lr, [pc, #4] */
2396 0xe08fe00e, /* add lr, pc, lr */
2397 0xe5bef008, /* ldr pc, [lr, #8]! */
2398 0x00000000, /* &GOT[0] - . */
2401 /* By default subsequent entries in a procedure linkage table look like
2402 this. Offsets that don't fit into 28 bits will cause link error. */
2403 static const bfd_vma elf32_arm_plt_entry_short [] =
2405 0xe28fc600, /* add ip, pc, #0xNN00000 */
2406 0xe28cca00, /* add ip, ip, #0xNN000 */
2407 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2410 /* When explicitly asked, we'll use this "long" entry format
2411 which can cope with arbitrary displacements. */
2412 static const bfd_vma elf32_arm_plt_entry_long [] =
2414 0xe28fc200, /* add ip, pc, #0xN0000000 */
2415 0xe28cc600, /* add ip, ip, #0xNN00000 */
2416 0xe28cca00, /* add ip, ip, #0xNN000 */
2417 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2420 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2422 #endif /* not FOUR_WORD_PLT */
2424 /* The first entry in a procedure linkage table looks like this.
2425 It is set up so that any shared library function that is called before the
2426 relocation has been set up calls the dynamic linker first. */
2427 static const bfd_vma elf32_thumb2_plt0_entry [] =
2429 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2430 an instruction maybe encoded to one or two array elements. */
2431 0xf8dfb500, /* push {lr} */
2432 0x44fee008, /* ldr.w lr, [pc, #8] */
2433 /* add lr, pc */
2434 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2435 0x00000000, /* &GOT[0] - . */
2438 /* Subsequent entries in a procedure linkage table for thumb only target
2439 look like this. */
2440 static const bfd_vma elf32_thumb2_plt_entry [] =
2442 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2443 an instruction maybe encoded to one or two array elements. */
2444 0x0c00f240, /* movw ip, #0xNNNN */
2445 0x0c00f2c0, /* movt ip, #0xNNNN */
2446 0xf8dc44fc, /* add ip, pc */
2447 0xbf00f000 /* ldr.w pc, [ip] */
2448 /* nop */
2451 /* The format of the first entry in the procedure linkage table
2452 for a VxWorks executable. */
2453 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2455 0xe52dc008, /* str ip,[sp,#-8]! */
2456 0xe59fc000, /* ldr ip,[pc] */
2457 0xe59cf008, /* ldr pc,[ip,#8] */
2458 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2461 /* The format of subsequent entries in a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2464 0xe59fc000, /* ldr ip,[pc] */
2465 0xe59cf000, /* ldr pc,[ip] */
2466 0x00000000, /* .long @got */
2467 0xe59fc000, /* ldr ip,[pc] */
2468 0xea000000, /* b _PLT */
2469 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2472 /* The format of entries in a VxWorks shared library. */
2473 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2475 0xe59fc000, /* ldr ip,[pc] */
2476 0xe79cf009, /* ldr pc,[ip,r9] */
2477 0x00000000, /* .long @got */
2478 0xe59fc000, /* ldr ip,[pc] */
2479 0xe599f008, /* ldr pc,[r9,#8] */
2480 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2483 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2484 #define PLT_THUMB_STUB_SIZE 4
2485 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2487 0x4778, /* bx pc */
2488 0x46c0 /* nop */
2491 /* The entries in a PLT when using a DLL-based target with multiple
2492 address spaces. */
2493 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2495 0xe51ff004, /* ldr pc, [pc, #-4] */
2496 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2499 /* The first entry in a procedure linkage table looks like
2500 this. It is set up so that any shared library function that is
2501 called before the relocation has been set up calls the dynamic
2502 linker first. */
2503 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2505 /* First bundle: */
2506 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2507 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2508 0xe08cc00f, /* add ip, ip, pc */
2509 0xe52dc008, /* str ip, [sp, #-8]! */
2510 /* Second bundle: */
2511 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2512 0xe59cc000, /* ldr ip, [ip] */
2513 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2514 0xe12fff1c, /* bx ip */
2515 /* Third bundle: */
2516 0xe320f000, /* nop */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 /* .Lplt_tail: */
2520 0xe50dc004, /* str ip, [sp, #-4] */
2521 /* Fourth bundle: */
2522 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2523 0xe59cc000, /* ldr ip, [ip] */
2524 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2525 0xe12fff1c, /* bx ip */
2527 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2529 /* Subsequent entries in a procedure linkage table look like this. */
2530 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2532 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2533 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2534 0xe08cc00f, /* add ip, ip, pc */
2535 0xea000000, /* b .Lplt_tail */
2538 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2539 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2540 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2541 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2542 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2543 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2544 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2545 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2547 enum stub_insn_type
2549 THUMB16_TYPE = 1,
2550 THUMB32_TYPE,
2551 ARM_TYPE,
2552 DATA_TYPE
2555 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2556 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2557 is inserted in arm_build_one_stub(). */
2558 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2559 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2560 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2561 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2562 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2563 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2564 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2565 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2567 typedef struct
2569 bfd_vma data;
2570 enum stub_insn_type type;
2571 unsigned int r_type;
2572 int reloc_addend;
2573 } insn_sequence;
2575 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2576 to reach the stub if necessary. */
2577 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2579 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2580 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2583 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2584 available. */
2585 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2587 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2588 ARM_INSN (0xe12fff1c), /* bx ip */
2589 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2592 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2593 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2595 THUMB16_INSN (0xb401), /* push {r0} */
2596 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2597 THUMB16_INSN (0x4684), /* mov ip, r0 */
2598 THUMB16_INSN (0xbc01), /* pop {r0} */
2599 THUMB16_INSN (0x4760), /* bx ip */
2600 THUMB16_INSN (0xbf00), /* nop */
2601 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2604 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2605 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2607 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2608 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2611 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2612 M-profile architectures. */
2613 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2615 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2616 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2617 THUMB16_INSN (0x4760), /* bx ip */
2620 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2621 allowed. */
2622 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2624 THUMB16_INSN (0x4778), /* bx pc */
2625 THUMB16_INSN (0x46c0), /* nop */
2626 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2627 ARM_INSN (0xe12fff1c), /* bx ip */
2628 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2631 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2632 available. */
2633 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2635 THUMB16_INSN (0x4778), /* bx pc */
2636 THUMB16_INSN (0x46c0), /* nop */
2637 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2638 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2641 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2642 one, when the destination is close enough. */
2643 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2645 THUMB16_INSN (0x4778), /* bx pc */
2646 THUMB16_INSN (0x46c0), /* nop */
2647 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2650 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2651 blx to reach the stub if necessary. */
2652 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2654 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2655 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2656 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2659 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2660 blx to reach the stub if necessary. We can not add into pc;
2661 it is not guaranteed to mode switch (different in ARMv6 and
2662 ARMv7). */
2663 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2665 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2666 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2667 ARM_INSN (0xe12fff1c), /* bx ip */
2668 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2671 /* V4T ARM -> ARM long branch stub, PIC. */
2672 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2674 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2675 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2676 ARM_INSN (0xe12fff1c), /* bx ip */
2677 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2680 /* V4T Thumb -> ARM long branch stub, PIC. */
2681 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2683 THUMB16_INSN (0x4778), /* bx pc */
2684 THUMB16_INSN (0x46c0), /* nop */
2685 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2686 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2687 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2690 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2691 architectures. */
2692 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2694 THUMB16_INSN (0xb401), /* push {r0} */
2695 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2696 THUMB16_INSN (0x46fc), /* mov ip, pc */
2697 THUMB16_INSN (0x4484), /* add ip, r0 */
2698 THUMB16_INSN (0xbc01), /* pop {r0} */
2699 THUMB16_INSN (0x4760), /* bx ip */
2700 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2703 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2704 allowed. */
2705 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2707 THUMB16_INSN (0x4778), /* bx pc */
2708 THUMB16_INSN (0x46c0), /* nop */
2709 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2710 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2711 ARM_INSN (0xe12fff1c), /* bx ip */
2712 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2715 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2716 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2717 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2719 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2720 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2721 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2724 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2725 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2726 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2728 THUMB16_INSN (0x4778), /* bx pc */
2729 THUMB16_INSN (0x46c0), /* nop */
2730 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2731 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2732 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2735 /* NaCl ARM -> ARM long branch stub. */
2736 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2738 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2739 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2740 ARM_INSN (0xe12fff1c), /* bx ip */
2741 ARM_INSN (0xe320f000), /* nop */
2742 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2743 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2744 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2745 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2748 /* NaCl ARM -> ARM long branch stub, PIC. */
2749 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2751 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2752 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2753 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2754 ARM_INSN (0xe12fff1c), /* bx ip */
2755 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2756 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2757 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2758 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2761 /* Stub used for transition to secure state (aka SG veneer). */
2762 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2764 THUMB32_INSN (0xe97fe97f), /* sg. */
2765 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2769 /* Cortex-A8 erratum-workaround stubs. */
2771 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2772 can't use a conditional branch to reach this stub). */
2774 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2776 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2777 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2778 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2781 /* Stub used for b.w and bl.w instructions. */
2783 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2785 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2788 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2790 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2793 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2794 instruction (which switches to ARM mode) to point to this stub. Jump to the
2795 real destination using an ARM-mode branch. */
2797 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2799 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2802 /* For each section group there can be a specially created linker section
2803 to hold the stubs for that group. The name of the stub section is based
2804 upon the name of another section within that group with the suffix below
2805 applied.
2807 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2808 create what appeared to be a linker stub section when it actually
2809 contained user code/data. For example, consider this fragment:
2811 const char * stubborn_problems[] = { "np" };
2813 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2814 section called:
2816 .data.rel.local.stubborn_problems
2818 This then causes problems in arm32_arm_build_stubs() as it triggers:
2820 // Ignore non-stub sections.
2821 if (!strstr (stub_sec->name, STUB_SUFFIX))
2822 continue;
2824 And so the section would be ignored instead of being processed. Hence
2825 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2826 C identifier. */
2827 #define STUB_SUFFIX ".__stub"
2829 /* One entry per long/short branch stub defined above. */
2830 #define DEF_STUBS \
2831 DEF_STUB(long_branch_any_any) \
2832 DEF_STUB(long_branch_v4t_arm_thumb) \
2833 DEF_STUB(long_branch_thumb_only) \
2834 DEF_STUB(long_branch_v4t_thumb_thumb) \
2835 DEF_STUB(long_branch_v4t_thumb_arm) \
2836 DEF_STUB(short_branch_v4t_thumb_arm) \
2837 DEF_STUB(long_branch_any_arm_pic) \
2838 DEF_STUB(long_branch_any_thumb_pic) \
2839 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2840 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2841 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2842 DEF_STUB(long_branch_thumb_only_pic) \
2843 DEF_STUB(long_branch_any_tls_pic) \
2844 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2845 DEF_STUB(long_branch_arm_nacl) \
2846 DEF_STUB(long_branch_arm_nacl_pic) \
2847 DEF_STUB(cmse_branch_thumb_only) \
2848 DEF_STUB(a8_veneer_b_cond) \
2849 DEF_STUB(a8_veneer_b) \
2850 DEF_STUB(a8_veneer_bl) \
2851 DEF_STUB(a8_veneer_blx) \
2852 DEF_STUB(long_branch_thumb2_only) \
2853 DEF_STUB(long_branch_thumb2_only_pure)
2855 #define DEF_STUB(x) arm_stub_##x,
2856 enum elf32_arm_stub_type
2858 arm_stub_none,
2859 DEF_STUBS
2860 max_stub_type
2862 #undef DEF_STUB
2864 /* Note the first a8_veneer type. */
2865 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2867 typedef struct
2869 const insn_sequence* template_sequence;
2870 int template_size;
2871 } stub_def;
2873 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2874 static const stub_def stub_definitions[] =
2876 {NULL, 0},
2877 DEF_STUBS
2880 struct elf32_arm_stub_hash_entry
2882 /* Base hash table entry structure. */
2883 struct bfd_hash_entry root;
2885 /* The stub section. */
2886 asection *stub_sec;
2888 /* Offset within stub_sec of the beginning of this stub. */
2889 bfd_vma stub_offset;
2891 /* Given the symbol's value and its section we can determine its final
2892 value when building the stubs (so the stub knows where to jump). */
2893 bfd_vma target_value;
2894 asection *target_section;
2896 /* Same as above but for the source of the branch to the stub. Used for
2897 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2898 such, source section does not need to be recorded since Cortex-A8 erratum
2899 workaround stubs are only generated when both source and target are in the
2900 same section. */
2901 bfd_vma source_value;
2903 /* The instruction which caused this stub to be generated (only valid for
2904 Cortex-A8 erratum workaround stubs at present). */
2905 unsigned long orig_insn;
2907 /* The stub type. */
2908 enum elf32_arm_stub_type stub_type;
2909 /* Its encoding size in bytes. */
2910 int stub_size;
2911 /* Its template. */
2912 const insn_sequence *stub_template;
2913 /* The size of the template (number of entries). */
2914 int stub_template_size;
2916 /* The symbol table entry, if any, that this was derived from. */
2917 struct elf32_arm_link_hash_entry *h;
2919 /* Type of branch. */
2920 enum arm_st_branch_type branch_type;
2922 /* Where this stub is being called from, or, in the case of combined
2923 stub sections, the first input section in the group. */
2924 asection *id_sec;
2926 /* The name for the local symbol at the start of this stub. The
2927 stub name in the hash table has to be unique; this does not, so
2928 it can be friendlier. */
2929 char *output_name;
2932 /* Used to build a map of a section. This is required for mixed-endian
2933 code/data. */
2935 typedef struct elf32_elf_section_map
2937 bfd_vma vma;
2938 char type;
2940 elf32_arm_section_map;
2942 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2944 typedef enum
2946 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2947 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2948 VFP11_ERRATUM_ARM_VENEER,
2949 VFP11_ERRATUM_THUMB_VENEER
2951 elf32_vfp11_erratum_type;
2953 typedef struct elf32_vfp11_erratum_list
2955 struct elf32_vfp11_erratum_list *next;
2956 bfd_vma vma;
2957 union
2959 struct
2961 struct elf32_vfp11_erratum_list *veneer;
2962 unsigned int vfp_insn;
2963 } b;
2964 struct
2966 struct elf32_vfp11_erratum_list *branch;
2967 unsigned int id;
2968 } v;
2969 } u;
2970 elf32_vfp11_erratum_type type;
2972 elf32_vfp11_erratum_list;
2974 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2975 veneer. */
2976 typedef enum
2978 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2979 STM32L4XX_ERRATUM_VENEER
2981 elf32_stm32l4xx_erratum_type;
2983 typedef struct elf32_stm32l4xx_erratum_list
2985 struct elf32_stm32l4xx_erratum_list *next;
2986 bfd_vma vma;
2987 union
2989 struct
2991 struct elf32_stm32l4xx_erratum_list *veneer;
2992 unsigned int insn;
2993 } b;
2994 struct
2996 struct elf32_stm32l4xx_erratum_list *branch;
2997 unsigned int id;
2998 } v;
2999 } u;
3000 elf32_stm32l4xx_erratum_type type;
3002 elf32_stm32l4xx_erratum_list;
3004 typedef enum
3006 DELETE_EXIDX_ENTRY,
3007 INSERT_EXIDX_CANTUNWIND_AT_END
3009 arm_unwind_edit_type;
3011 /* A (sorted) list of edits to apply to an unwind table. */
3012 typedef struct arm_unwind_table_edit
3014 arm_unwind_edit_type type;
3015 /* Note: we sometimes want to insert an unwind entry corresponding to a
3016 section different from the one we're currently writing out, so record the
3017 (text) section this edit relates to here. */
3018 asection *linked_section;
3019 unsigned int index;
3020 struct arm_unwind_table_edit *next;
3022 arm_unwind_table_edit;
3024 typedef struct _arm_elf_section_data
3026 /* Information about mapping symbols. */
3027 struct bfd_elf_section_data elf;
3028 unsigned int mapcount;
3029 unsigned int mapsize;
3030 elf32_arm_section_map *map;
3031 /* Information about CPU errata. */
3032 unsigned int erratumcount;
3033 elf32_vfp11_erratum_list *erratumlist;
3034 unsigned int stm32l4xx_erratumcount;
3035 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3036 unsigned int additional_reloc_count;
3037 /* Information about unwind tables. */
3038 union
3040 /* Unwind info attached to a text section. */
3041 struct
3043 asection *arm_exidx_sec;
3044 } text;
3046 /* Unwind info attached to an .ARM.exidx section. */
3047 struct
3049 arm_unwind_table_edit *unwind_edit_list;
3050 arm_unwind_table_edit *unwind_edit_tail;
3051 } exidx;
3052 } u;
3054 _arm_elf_section_data;
3056 #define elf32_arm_section_data(sec) \
3057 ((_arm_elf_section_data *) elf_section_data (sec))
3059 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3060 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3061 so may be created multiple times: we use an array of these entries whilst
3062 relaxing which we can refresh easily, then create stubs for each potentially
3063 erratum-triggering instruction once we've settled on a solution. */
3065 struct a8_erratum_fix
3067 bfd *input_bfd;
3068 asection *section;
3069 bfd_vma offset;
3070 bfd_vma target_offset;
3071 unsigned long orig_insn;
3072 char *stub_name;
3073 enum elf32_arm_stub_type stub_type;
3074 enum arm_st_branch_type branch_type;
3077 /* A table of relocs applied to branches which might trigger Cortex-A8
3078 erratum. */
3080 struct a8_erratum_reloc
3082 bfd_vma from;
3083 bfd_vma destination;
3084 struct elf32_arm_link_hash_entry *hash;
3085 const char *sym_name;
3086 unsigned int r_type;
3087 enum arm_st_branch_type branch_type;
3088 bfd_boolean non_a8_stub;
3091 /* The size of the thread control block. */
3092 #define TCB_SIZE 8
3094 /* ARM-specific information about a PLT entry, over and above the usual
3095 gotplt_union. */
3096 struct arm_plt_info
3098 /* We reference count Thumb references to a PLT entry separately,
3099 so that we can emit the Thumb trampoline only if needed. */
3100 bfd_signed_vma thumb_refcount;
3102 /* Some references from Thumb code may be eliminated by BL->BLX
3103 conversion, so record them separately. */
3104 bfd_signed_vma maybe_thumb_refcount;
3106 /* How many of the recorded PLT accesses were from non-call relocations.
3107 This information is useful when deciding whether anything takes the
3108 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3109 non-call references to the function should resolve directly to the
3110 real runtime target. */
3111 unsigned int noncall_refcount;
3113 /* Since PLT entries have variable size if the Thumb prologue is
3114 used, we need to record the index into .got.plt instead of
3115 recomputing it from the PLT offset. */
3116 bfd_signed_vma got_offset;
3119 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3120 struct arm_local_iplt_info
3122 /* The information that is usually found in the generic ELF part of
3123 the hash table entry. */
3124 union gotplt_union root;
3126 /* The information that is usually found in the ARM-specific part of
3127 the hash table entry. */
3128 struct arm_plt_info arm;
3130 /* A list of all potential dynamic relocations against this symbol. */
3131 struct elf_dyn_relocs *dyn_relocs;
3134 /* Structure to handle FDPIC support for local functions. */
3135 struct fdpic_local {
3136 unsigned int funcdesc_cnt;
3137 unsigned int gotofffuncdesc_cnt;
3138 int funcdesc_offset;
3141 struct elf_arm_obj_tdata
3143 struct elf_obj_tdata root;
3145 /* tls_type for each local got entry. */
3146 char *local_got_tls_type;
3148 /* GOTPLT entries for TLS descriptors. */
3149 bfd_vma *local_tlsdesc_gotent;
3151 /* Information for local symbols that need entries in .iplt. */
3152 struct arm_local_iplt_info **local_iplt;
3154 /* Zero to warn when linking objects with incompatible enum sizes. */
3155 int no_enum_size_warning;
3157 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3158 int no_wchar_size_warning;
3160 /* Maintains FDPIC counters and funcdesc info. */
3161 struct fdpic_local *local_fdpic_cnts;
3164 #define elf_arm_tdata(bfd) \
3165 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3167 #define elf32_arm_local_got_tls_type(bfd) \
3168 (elf_arm_tdata (bfd)->local_got_tls_type)
3170 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3171 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3173 #define elf32_arm_local_iplt(bfd) \
3174 (elf_arm_tdata (bfd)->local_iplt)
3176 #define elf32_arm_local_fdpic_cnts(bfd) \
3177 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3179 #define is_arm_elf(bfd) \
3180 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3181 && elf_tdata (bfd) != NULL \
3182 && elf_object_id (bfd) == ARM_ELF_DATA)
3184 static bfd_boolean
3185 elf32_arm_mkobject (bfd *abfd)
3187 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3188 ARM_ELF_DATA);
3191 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3193 /* Structure to handle FDPIC support for extern functions. */
3194 struct fdpic_global {
3195 unsigned int gotofffuncdesc_cnt;
3196 unsigned int gotfuncdesc_cnt;
3197 unsigned int funcdesc_cnt;
3198 int funcdesc_offset;
3199 int gotfuncdesc_offset;
3202 /* Arm ELF linker hash entry. */
3203 struct elf32_arm_link_hash_entry
3205 struct elf_link_hash_entry root;
3207 /* Track dynamic relocs copied for this symbol. */
3208 struct elf_dyn_relocs *dyn_relocs;
3210 /* ARM-specific PLT information. */
3211 struct arm_plt_info plt;
3213 #define GOT_UNKNOWN 0
3214 #define GOT_NORMAL 1
3215 #define GOT_TLS_GD 2
3216 #define GOT_TLS_IE 4
3217 #define GOT_TLS_GDESC 8
3218 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3219 unsigned int tls_type : 8;
3221 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3222 unsigned int is_iplt : 1;
3224 unsigned int unused : 23;
3226 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3227 starting at the end of the jump table. */
3228 bfd_vma tlsdesc_got;
3230 /* The symbol marking the real symbol location for exported thumb
3231 symbols with Arm stubs. */
3232 struct elf_link_hash_entry *export_glue;
3234 /* A pointer to the most recently used stub hash entry against this
3235 symbol. */
3236 struct elf32_arm_stub_hash_entry *stub_cache;
3238 /* Counter for FDPIC relocations against this symbol. */
3239 struct fdpic_global fdpic_cnts;
3242 /* Traverse an arm ELF linker hash table. */
3243 #define elf32_arm_link_hash_traverse(table, func, info) \
3244 (elf_link_hash_traverse \
3245 (&(table)->root, \
3246 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3247 (info)))
3249 /* Get the ARM elf linker hash table from a link_info structure. */
3250 #define elf32_arm_hash_table(info) \
3251 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3252 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3254 #define arm_stub_hash_lookup(table, string, create, copy) \
3255 ((struct elf32_arm_stub_hash_entry *) \
3256 bfd_hash_lookup ((table), (string), (create), (copy)))
3258 /* Array to keep track of which stub sections have been created, and
3259 information on stub grouping. */
3260 struct map_stub
3262 /* This is the section to which stubs in the group will be
3263 attached. */
3264 asection *link_sec;
3265 /* The stub section. */
3266 asection *stub_sec;
3269 #define elf32_arm_compute_jump_table_size(htab) \
3270 ((htab)->next_tls_desc_index * 4)
3272 /* ARM ELF linker hash table. */
3273 struct elf32_arm_link_hash_table
3275 /* The main hash table. */
3276 struct elf_link_hash_table root;
3278 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3279 bfd_size_type thumb_glue_size;
3281 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3282 bfd_size_type arm_glue_size;
3284 /* The size in bytes of section containing the ARMv4 BX veneers. */
3285 bfd_size_type bx_glue_size;
3287 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3288 veneer has been populated. */
3289 bfd_vma bx_glue_offset[15];
3291 /* The size in bytes of the section containing glue for VFP11 erratum
3292 veneers. */
3293 bfd_size_type vfp11_erratum_glue_size;
3295 /* The size in bytes of the section containing glue for STM32L4XX erratum
3296 veneers. */
3297 bfd_size_type stm32l4xx_erratum_glue_size;
3299 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3300 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3301 elf32_arm_write_section(). */
3302 struct a8_erratum_fix *a8_erratum_fixes;
3303 unsigned int num_a8_erratum_fixes;
3305 /* An arbitrary input BFD chosen to hold the glue sections. */
3306 bfd * bfd_of_glue_owner;
3308 /* Nonzero to output a BE8 image. */
3309 int byteswap_code;
3311 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3312 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3313 int target1_is_rel;
3315 /* The relocation to use for R_ARM_TARGET2 relocations. */
3316 int target2_reloc;
3318 /* 0 = Ignore R_ARM_V4BX.
3319 1 = Convert BX to MOV PC.
3320 2 = Generate v4 interworing stubs. */
3321 int fix_v4bx;
3323 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3324 int fix_cortex_a8;
3326 /* Whether we should fix the ARM1176 BLX immediate issue. */
3327 int fix_arm1176;
3329 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3330 int use_blx;
3332 /* What sort of code sequences we should look for which may trigger the
3333 VFP11 denorm erratum. */
3334 bfd_arm_vfp11_fix vfp11_fix;
3336 /* Global counter for the number of fixes we have emitted. */
3337 int num_vfp11_fixes;
3339 /* What sort of code sequences we should look for which may trigger the
3340 STM32L4XX erratum. */
3341 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3343 /* Global counter for the number of fixes we have emitted. */
3344 int num_stm32l4xx_fixes;
3346 /* Nonzero to force PIC branch veneers. */
3347 int pic_veneer;
3349 /* The number of bytes in the initial entry in the PLT. */
3350 bfd_size_type plt_header_size;
3352 /* The number of bytes in the subsequent PLT etries. */
3353 bfd_size_type plt_entry_size;
3355 /* True if the target system is VxWorks. */
3356 int vxworks_p;
3358 /* True if the target system is Symbian OS. */
3359 int symbian_p;
3361 /* True if the target system is Native Client. */
3362 int nacl_p;
3364 /* True if the target uses REL relocations. */
3365 bfd_boolean use_rel;
3367 /* Nonzero if import library must be a secure gateway import library
3368 as per ARMv8-M Security Extensions. */
3369 int cmse_implib;
3371 /* The import library whose symbols' address must remain stable in
3372 the import library generated. */
3373 bfd *in_implib_bfd;
3375 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3376 bfd_vma next_tls_desc_index;
3378 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3379 bfd_vma num_tls_desc;
3381 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3382 asection *srelplt2;
3384 /* The offset into splt of the PLT entry for the TLS descriptor
3385 resolver. Special values are 0, if not necessary (or not found
3386 to be necessary yet), and -1 if needed but not determined
3387 yet. */
3388 bfd_vma dt_tlsdesc_plt;
3390 /* The offset into sgot of the GOT entry used by the PLT entry
3391 above. */
3392 bfd_vma dt_tlsdesc_got;
3394 /* Offset in .plt section of tls_arm_trampoline. */
3395 bfd_vma tls_trampoline;
3397 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3398 union
3400 bfd_signed_vma refcount;
3401 bfd_vma offset;
3402 } tls_ldm_got;
3404 /* Small local sym cache. */
3405 struct sym_cache sym_cache;
3407 /* For convenience in allocate_dynrelocs. */
3408 bfd * obfd;
3410 /* The amount of space used by the reserved portion of the sgotplt
3411 section, plus whatever space is used by the jump slots. */
3412 bfd_vma sgotplt_jump_table_size;
3414 /* The stub hash table. */
3415 struct bfd_hash_table stub_hash_table;
3417 /* Linker stub bfd. */
3418 bfd *stub_bfd;
3420 /* Linker call-backs. */
3421 asection * (*add_stub_section) (const char *, asection *, asection *,
3422 unsigned int);
3423 void (*layout_sections_again) (void);
3425 /* Array to keep track of which stub sections have been created, and
3426 information on stub grouping. */
3427 struct map_stub *stub_group;
3429 /* Input stub section holding secure gateway veneers. */
3430 asection *cmse_stub_sec;
3432 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3433 start to be allocated. */
3434 bfd_vma new_cmse_stub_offset;
3436 /* Number of elements in stub_group. */
3437 unsigned int top_id;
3439 /* Assorted information used by elf32_arm_size_stubs. */
3440 unsigned int bfd_count;
3441 unsigned int top_index;
3442 asection **input_list;
3444 /* True if the target system uses FDPIC. */
3445 int fdpic_p;
3447 /* Fixup section. Used for FDPIC. */
3448 asection *srofixup;
3451 /* Add an FDPIC read-only fixup. */
3452 static void
3453 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3455 bfd_vma fixup_offset;
3457 fixup_offset = srofixup->reloc_count++ * 4;
3458 BFD_ASSERT (fixup_offset < srofixup->size);
3459 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3462 static inline int
3463 ctz (unsigned int mask)
3465 #if GCC_VERSION >= 3004
3466 return __builtin_ctz (mask);
3467 #else
3468 unsigned int i;
3470 for (i = 0; i < 8 * sizeof (mask); i++)
3472 if (mask & 0x1)
3473 break;
3474 mask = (mask >> 1);
3476 return i;
3477 #endif
3480 static inline int
3481 elf32_arm_popcount (unsigned int mask)
3483 #if GCC_VERSION >= 3004
3484 return __builtin_popcount (mask);
3485 #else
3486 unsigned int i;
3487 int sum = 0;
3489 for (i = 0; i < 8 * sizeof (mask); i++)
3491 if (mask & 0x1)
3492 sum++;
3493 mask = (mask >> 1);
3495 return sum;
3496 #endif
3499 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3500 asection *sreloc, Elf_Internal_Rela *rel);
3502 static void
3503 arm_elf_fill_funcdesc(bfd *output_bfd,
3504 struct bfd_link_info *info,
3505 int *funcdesc_offset,
3506 int dynindx,
3507 int offset,
3508 bfd_vma addr,
3509 bfd_vma dynreloc_value,
3510 bfd_vma seg)
3512 if ((*funcdesc_offset & 1) == 0)
3514 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3515 asection *sgot = globals->root.sgot;
3517 if (bfd_link_pic(info))
3519 asection *srelgot = globals->root.srelgot;
3520 Elf_Internal_Rela outrel;
3522 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3523 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3524 outrel.r_addend = 0;
3526 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3527 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3528 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3530 else
3532 struct elf_link_hash_entry *hgot = globals->root.hgot;
3533 bfd_vma got_value = hgot->root.u.def.value
3534 + hgot->root.u.def.section->output_section->vma
3535 + hgot->root.u.def.section->output_offset;
3537 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3538 sgot->output_section->vma + sgot->output_offset
3539 + offset);
3540 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3541 sgot->output_section->vma + sgot->output_offset
3542 + offset + 4);
3543 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3544 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3546 *funcdesc_offset |= 1;
3550 /* Create an entry in an ARM ELF linker hash table. */
3552 static struct bfd_hash_entry *
3553 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3554 struct bfd_hash_table * table,
3555 const char * string)
3557 struct elf32_arm_link_hash_entry * ret =
3558 (struct elf32_arm_link_hash_entry *) entry;
3560 /* Allocate the structure if it has not already been allocated by a
3561 subclass. */
3562 if (ret == NULL)
3563 ret = (struct elf32_arm_link_hash_entry *)
3564 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3565 if (ret == NULL)
3566 return (struct bfd_hash_entry *) ret;
3568 /* Call the allocation method of the superclass. */
3569 ret = ((struct elf32_arm_link_hash_entry *)
3570 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3571 table, string));
3572 if (ret != NULL)
3574 ret->dyn_relocs = NULL;
3575 ret->tls_type = GOT_UNKNOWN;
3576 ret->tlsdesc_got = (bfd_vma) -1;
3577 ret->plt.thumb_refcount = 0;
3578 ret->plt.maybe_thumb_refcount = 0;
3579 ret->plt.noncall_refcount = 0;
3580 ret->plt.got_offset = -1;
3581 ret->is_iplt = FALSE;
3582 ret->export_glue = NULL;
3584 ret->stub_cache = NULL;
3586 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3587 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3588 ret->fdpic_cnts.funcdesc_cnt = 0;
3589 ret->fdpic_cnts.funcdesc_offset = -1;
3590 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3593 return (struct bfd_hash_entry *) ret;
3596 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3597 symbols. */
3599 static bfd_boolean
3600 elf32_arm_allocate_local_sym_info (bfd *abfd)
3602 if (elf_local_got_refcounts (abfd) == NULL)
3604 bfd_size_type num_syms;
3605 bfd_size_type size;
3606 char *data;
3608 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3609 size = num_syms * (sizeof (bfd_signed_vma)
3610 + sizeof (struct arm_local_iplt_info *)
3611 + sizeof (bfd_vma)
3612 + sizeof (char)
3613 + sizeof (struct fdpic_local));
3614 data = bfd_zalloc (abfd, size);
3615 if (data == NULL)
3616 return FALSE;
3618 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3619 data += num_syms * sizeof (struct fdpic_local);
3621 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3622 data += num_syms * sizeof (bfd_signed_vma);
3624 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3625 data += num_syms * sizeof (struct arm_local_iplt_info *);
3627 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3628 data += num_syms * sizeof (bfd_vma);
3630 elf32_arm_local_got_tls_type (abfd) = data;
3632 return TRUE;
3635 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3636 to input bfd ABFD. Create the information if it doesn't already exist.
3637 Return null if an allocation fails. */
3639 static struct arm_local_iplt_info *
3640 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3642 struct arm_local_iplt_info **ptr;
3644 if (!elf32_arm_allocate_local_sym_info (abfd))
3645 return NULL;
3647 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3648 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3649 if (*ptr == NULL)
3650 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3651 return *ptr;
3654 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3655 in ABFD's symbol table. If the symbol is global, H points to its
3656 hash table entry, otherwise H is null.
3658 Return true if the symbol does have PLT information. When returning
3659 true, point *ROOT_PLT at the target-independent reference count/offset
3660 union and *ARM_PLT at the ARM-specific information. */
3662 static bfd_boolean
3663 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3664 struct elf32_arm_link_hash_entry *h,
3665 unsigned long r_symndx, union gotplt_union **root_plt,
3666 struct arm_plt_info **arm_plt)
3668 struct arm_local_iplt_info *local_iplt;
3670 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3671 return FALSE;
3673 if (h != NULL)
3675 *root_plt = &h->root.plt;
3676 *arm_plt = &h->plt;
3677 return TRUE;
3680 if (elf32_arm_local_iplt (abfd) == NULL)
3681 return FALSE;
3683 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3684 if (local_iplt == NULL)
3685 return FALSE;
3687 *root_plt = &local_iplt->root;
3688 *arm_plt = &local_iplt->arm;
3689 return TRUE;
3692 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3694 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3695 before it. */
3697 static bfd_boolean
3698 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3699 struct arm_plt_info *arm_plt)
3701 struct elf32_arm_link_hash_table *htab;
3703 htab = elf32_arm_hash_table (info);
3705 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3706 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3709 /* Return a pointer to the head of the dynamic reloc list that should
3710 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3711 ABFD's symbol table. Return null if an error occurs. */
3713 static struct elf_dyn_relocs **
3714 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3715 Elf_Internal_Sym *isym)
3717 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3719 struct arm_local_iplt_info *local_iplt;
3721 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3722 if (local_iplt == NULL)
3723 return NULL;
3724 return &local_iplt->dyn_relocs;
3726 else
3728 /* Track dynamic relocs needed for local syms too.
3729 We really need local syms available to do this
3730 easily. Oh well. */
3731 asection *s;
3732 void *vpp;
3734 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3735 if (s == NULL)
3736 abort ();
3738 vpp = &elf_section_data (s)->local_dynrel;
3739 return (struct elf_dyn_relocs **) vpp;
3743 /* Initialize an entry in the stub hash table. */
3745 static struct bfd_hash_entry *
3746 stub_hash_newfunc (struct bfd_hash_entry *entry,
3747 struct bfd_hash_table *table,
3748 const char *string)
3750 /* Allocate the structure if it has not already been allocated by a
3751 subclass. */
3752 if (entry == NULL)
3754 entry = (struct bfd_hash_entry *)
3755 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3756 if (entry == NULL)
3757 return entry;
3760 /* Call the allocation method of the superclass. */
3761 entry = bfd_hash_newfunc (entry, table, string);
3762 if (entry != NULL)
3764 struct elf32_arm_stub_hash_entry *eh;
3766 /* Initialize the local fields. */
3767 eh = (struct elf32_arm_stub_hash_entry *) entry;
3768 eh->stub_sec = NULL;
3769 eh->stub_offset = (bfd_vma) -1;
3770 eh->source_value = 0;
3771 eh->target_value = 0;
3772 eh->target_section = NULL;
3773 eh->orig_insn = 0;
3774 eh->stub_type = arm_stub_none;
3775 eh->stub_size = 0;
3776 eh->stub_template = NULL;
3777 eh->stub_template_size = -1;
3778 eh->h = NULL;
3779 eh->id_sec = NULL;
3780 eh->output_name = NULL;
3783 return entry;
3786 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3787 shortcuts to them in our hash table. */
3789 static bfd_boolean
3790 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3792 struct elf32_arm_link_hash_table *htab;
3794 htab = elf32_arm_hash_table (info);
3795 if (htab == NULL)
3796 return FALSE;
3798 /* BPABI objects never have a GOT, or associated sections. */
3799 if (htab->symbian_p)
3800 return TRUE;
3802 if (! _bfd_elf_create_got_section (dynobj, info))
3803 return FALSE;
3805 /* Also create .rofixup. */
3806 if (htab->fdpic_p)
3808 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3809 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3810 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3811 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3812 return FALSE;
3815 return TRUE;
3818 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3820 static bfd_boolean
3821 create_ifunc_sections (struct bfd_link_info *info)
3823 struct elf32_arm_link_hash_table *htab;
3824 const struct elf_backend_data *bed;
3825 bfd *dynobj;
3826 asection *s;
3827 flagword flags;
3829 htab = elf32_arm_hash_table (info);
3830 dynobj = htab->root.dynobj;
3831 bed = get_elf_backend_data (dynobj);
3832 flags = bed->dynamic_sec_flags;
3834 if (htab->root.iplt == NULL)
3836 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3837 flags | SEC_READONLY | SEC_CODE);
3838 if (s == NULL
3839 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3840 return FALSE;
3841 htab->root.iplt = s;
3844 if (htab->root.irelplt == NULL)
3846 s = bfd_make_section_anyway_with_flags (dynobj,
3847 RELOC_SECTION (htab, ".iplt"),
3848 flags | SEC_READONLY);
3849 if (s == NULL
3850 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3851 return FALSE;
3852 htab->root.irelplt = s;
3855 if (htab->root.igotplt == NULL)
3857 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3858 if (s == NULL
3859 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3860 return FALSE;
3861 htab->root.igotplt = s;
3863 return TRUE;
3866 /* Determine if we're dealing with a Thumb only architecture. */
3868 static bfd_boolean
3869 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3871 int arch;
3872 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3873 Tag_CPU_arch_profile);
3875 if (profile)
3876 return profile == 'M';
3878 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3880 /* Force return logic to be reviewed for each new architecture. */
3881 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3883 if (arch == TAG_CPU_ARCH_V6_M
3884 || arch == TAG_CPU_ARCH_V6S_M
3885 || arch == TAG_CPU_ARCH_V7E_M
3886 || arch == TAG_CPU_ARCH_V8M_BASE
3887 || arch == TAG_CPU_ARCH_V8M_MAIN
3888 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3889 return TRUE;
3891 return FALSE;
3894 /* Determine if we're dealing with a Thumb-2 object. */
3896 static bfd_boolean
3897 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3899 int arch;
3900 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3901 Tag_THUMB_ISA_use);
3903 if (thumb_isa)
3904 return thumb_isa == 2;
3906 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3908 /* Force return logic to be reviewed for each new architecture. */
3909 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3911 return (arch == TAG_CPU_ARCH_V6T2
3912 || arch == TAG_CPU_ARCH_V7
3913 || arch == TAG_CPU_ARCH_V7E_M
3914 || arch == TAG_CPU_ARCH_V8
3915 || arch == TAG_CPU_ARCH_V8R
3916 || arch == TAG_CPU_ARCH_V8M_MAIN
3917 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3920 /* Determine whether Thumb-2 BL instruction is available. */
3922 static bfd_boolean
3923 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3925 int arch =
3926 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3928 /* Force return logic to be reviewed for each new architecture. */
3929 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3931 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3932 return (arch == TAG_CPU_ARCH_V6T2
3933 || arch >= TAG_CPU_ARCH_V7);
3936 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3937 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3938 hash table. */
3940 static bfd_boolean
3941 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3943 struct elf32_arm_link_hash_table *htab;
3945 htab = elf32_arm_hash_table (info);
3946 if (htab == NULL)
3947 return FALSE;
3949 if (!htab->root.sgot && !create_got_section (dynobj, info))
3950 return FALSE;
3952 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3953 return FALSE;
3955 if (htab->vxworks_p)
3957 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3958 return FALSE;
3960 if (bfd_link_pic (info))
3962 htab->plt_header_size = 0;
3963 htab->plt_entry_size
3964 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3966 else
3968 htab->plt_header_size
3969 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3970 htab->plt_entry_size
3971 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3974 if (elf_elfheader (dynobj))
3975 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3977 else
3979 /* PR ld/16017
3980 Test for thumb only architectures. Note - we cannot just call
3981 using_thumb_only() as the attributes in the output bfd have not been
3982 initialised at this point, so instead we use the input bfd. */
3983 bfd * saved_obfd = htab->obfd;
3985 htab->obfd = dynobj;
3986 if (using_thumb_only (htab))
3988 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3989 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3991 htab->obfd = saved_obfd;
3994 if (htab->fdpic_p) {
3995 htab->plt_header_size = 0;
3996 if (info->flags & DF_BIND_NOW)
3997 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3998 else
3999 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
4002 if (!htab->root.splt
4003 || !htab->root.srelplt
4004 || !htab->root.sdynbss
4005 || (!bfd_link_pic (info) && !htab->root.srelbss))
4006 abort ();
4008 return TRUE;
4011 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4013 static void
4014 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4015 struct elf_link_hash_entry *dir,
4016 struct elf_link_hash_entry *ind)
4018 struct elf32_arm_link_hash_entry *edir, *eind;
4020 edir = (struct elf32_arm_link_hash_entry *) dir;
4021 eind = (struct elf32_arm_link_hash_entry *) ind;
4023 if (eind->dyn_relocs != NULL)
4025 if (edir->dyn_relocs != NULL)
4027 struct elf_dyn_relocs **pp;
4028 struct elf_dyn_relocs *p;
4030 /* Add reloc counts against the indirect sym to the direct sym
4031 list. Merge any entries against the same section. */
4032 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
4034 struct elf_dyn_relocs *q;
4036 for (q = edir->dyn_relocs; q != NULL; q = q->next)
4037 if (q->sec == p->sec)
4039 q->pc_count += p->pc_count;
4040 q->count += p->count;
4041 *pp = p->next;
4042 break;
4044 if (q == NULL)
4045 pp = &p->next;
4047 *pp = edir->dyn_relocs;
4050 edir->dyn_relocs = eind->dyn_relocs;
4051 eind->dyn_relocs = NULL;
4054 if (ind->root.type == bfd_link_hash_indirect)
4056 /* Copy over PLT info. */
4057 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4058 eind->plt.thumb_refcount = 0;
4059 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4060 eind->plt.maybe_thumb_refcount = 0;
4061 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4062 eind->plt.noncall_refcount = 0;
4064 /* Copy FDPIC counters. */
4065 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4066 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4067 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4069 /* We should only allocate a function to .iplt once the final
4070 symbol information is known. */
4071 BFD_ASSERT (!eind->is_iplt);
4073 if (dir->got.refcount <= 0)
4075 edir->tls_type = eind->tls_type;
4076 eind->tls_type = GOT_UNKNOWN;
4080 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4083 /* Destroy an ARM elf linker hash table. */
4085 static void
4086 elf32_arm_link_hash_table_free (bfd *obfd)
4088 struct elf32_arm_link_hash_table *ret
4089 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4091 bfd_hash_table_free (&ret->stub_hash_table);
4092 _bfd_elf_link_hash_table_free (obfd);
4095 /* Create an ARM elf linker hash table. */
4097 static struct bfd_link_hash_table *
4098 elf32_arm_link_hash_table_create (bfd *abfd)
4100 struct elf32_arm_link_hash_table *ret;
4101 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4103 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4104 if (ret == NULL)
4105 return NULL;
4107 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4108 elf32_arm_link_hash_newfunc,
4109 sizeof (struct elf32_arm_link_hash_entry),
4110 ARM_ELF_DATA))
4112 free (ret);
4113 return NULL;
4116 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4117 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4118 #ifdef FOUR_WORD_PLT
4119 ret->plt_header_size = 16;
4120 ret->plt_entry_size = 16;
4121 #else
4122 ret->plt_header_size = 20;
4123 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4124 #endif
4125 ret->use_rel = TRUE;
4126 ret->obfd = abfd;
4127 ret->fdpic_p = 0;
4129 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4130 sizeof (struct elf32_arm_stub_hash_entry)))
4132 _bfd_elf_link_hash_table_free (abfd);
4133 return NULL;
4135 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4137 return &ret->root.root;
4140 /* Determine what kind of NOPs are available. */
4142 static bfd_boolean
4143 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4145 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4146 Tag_CPU_arch);
4148 /* Force return logic to be reviewed for each new architecture. */
4149 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4151 return (arch == TAG_CPU_ARCH_V6T2
4152 || arch == TAG_CPU_ARCH_V6K
4153 || arch == TAG_CPU_ARCH_V7
4154 || arch == TAG_CPU_ARCH_V8
4155 || arch == TAG_CPU_ARCH_V8R);
4158 static bfd_boolean
4159 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4161 switch (stub_type)
4163 case arm_stub_long_branch_thumb_only:
4164 case arm_stub_long_branch_thumb2_only:
4165 case arm_stub_long_branch_thumb2_only_pure:
4166 case arm_stub_long_branch_v4t_thumb_arm:
4167 case arm_stub_short_branch_v4t_thumb_arm:
4168 case arm_stub_long_branch_v4t_thumb_arm_pic:
4169 case arm_stub_long_branch_v4t_thumb_tls_pic:
4170 case arm_stub_long_branch_thumb_only_pic:
4171 case arm_stub_cmse_branch_thumb_only:
4172 return TRUE;
4173 case arm_stub_none:
4174 BFD_FAIL ();
4175 return FALSE;
4176 break;
4177 default:
4178 return FALSE;
4182 /* Determine the type of stub needed, if any, for a call. */
4184 static enum elf32_arm_stub_type
4185 arm_type_of_stub (struct bfd_link_info *info,
4186 asection *input_sec,
4187 const Elf_Internal_Rela *rel,
4188 unsigned char st_type,
4189 enum arm_st_branch_type *actual_branch_type,
4190 struct elf32_arm_link_hash_entry *hash,
4191 bfd_vma destination,
4192 asection *sym_sec,
4193 bfd *input_bfd,
4194 const char *name)
4196 bfd_vma location;
4197 bfd_signed_vma branch_offset;
4198 unsigned int r_type;
4199 struct elf32_arm_link_hash_table * globals;
4200 bfd_boolean thumb2, thumb2_bl, thumb_only;
4201 enum elf32_arm_stub_type stub_type = arm_stub_none;
4202 int use_plt = 0;
4203 enum arm_st_branch_type branch_type = *actual_branch_type;
4204 union gotplt_union *root_plt;
4205 struct arm_plt_info *arm_plt;
4206 int arch;
4207 int thumb2_movw;
4209 if (branch_type == ST_BRANCH_LONG)
4210 return stub_type;
4212 globals = elf32_arm_hash_table (info);
4213 if (globals == NULL)
4214 return stub_type;
4216 thumb_only = using_thumb_only (globals);
4217 thumb2 = using_thumb2 (globals);
4218 thumb2_bl = using_thumb2_bl (globals);
4220 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4222 /* True for architectures that implement the thumb2 movw instruction. */
4223 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4225 /* Determine where the call point is. */
4226 location = (input_sec->output_offset
4227 + input_sec->output_section->vma
4228 + rel->r_offset);
4230 r_type = ELF32_R_TYPE (rel->r_info);
4232 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4233 are considering a function call relocation. */
4234 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4235 || r_type == R_ARM_THM_JUMP19)
4236 && branch_type == ST_BRANCH_TO_ARM)
4237 branch_type = ST_BRANCH_TO_THUMB;
4239 /* For TLS call relocs, it is the caller's responsibility to provide
4240 the address of the appropriate trampoline. */
4241 if (r_type != R_ARM_TLS_CALL
4242 && r_type != R_ARM_THM_TLS_CALL
4243 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4244 ELF32_R_SYM (rel->r_info), &root_plt,
4245 &arm_plt)
4246 && root_plt->offset != (bfd_vma) -1)
4248 asection *splt;
4250 if (hash == NULL || hash->is_iplt)
4251 splt = globals->root.iplt;
4252 else
4253 splt = globals->root.splt;
4254 if (splt != NULL)
4256 use_plt = 1;
4258 /* Note when dealing with PLT entries: the main PLT stub is in
4259 ARM mode, so if the branch is in Thumb mode, another
4260 Thumb->ARM stub will be inserted later just before the ARM
4261 PLT stub. If a long branch stub is needed, we'll add a
4262 Thumb->Arm one and branch directly to the ARM PLT entry.
4263 Here, we have to check if a pre-PLT Thumb->ARM stub
4264 is needed and if it will be close enough. */
4266 destination = (splt->output_section->vma
4267 + splt->output_offset
4268 + root_plt->offset);
4269 st_type = STT_FUNC;
4271 /* Thumb branch/call to PLT: it can become a branch to ARM
4272 or to Thumb. We must perform the same checks and
4273 corrections as in elf32_arm_final_link_relocate. */
4274 if ((r_type == R_ARM_THM_CALL)
4275 || (r_type == R_ARM_THM_JUMP24))
4277 if (globals->use_blx
4278 && r_type == R_ARM_THM_CALL
4279 && !thumb_only)
4281 /* If the Thumb BLX instruction is available, convert
4282 the BL to a BLX instruction to call the ARM-mode
4283 PLT entry. */
4284 branch_type = ST_BRANCH_TO_ARM;
4286 else
4288 if (!thumb_only)
4289 /* Target the Thumb stub before the ARM PLT entry. */
4290 destination -= PLT_THUMB_STUB_SIZE;
4291 branch_type = ST_BRANCH_TO_THUMB;
4294 else
4296 branch_type = ST_BRANCH_TO_ARM;
4300 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4301 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4303 branch_offset = (bfd_signed_vma)(destination - location);
4305 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4306 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4308 /* Handle cases where:
4309 - this call goes too far (different Thumb/Thumb2 max
4310 distance)
4311 - it's a Thumb->Arm call and blx is not available, or it's a
4312 Thumb->Arm branch (not bl). A stub is needed in this case,
4313 but only if this call is not through a PLT entry. Indeed,
4314 PLT stubs handle mode switching already. */
4315 if ((!thumb2_bl
4316 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4317 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4318 || (thumb2_bl
4319 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4320 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4321 || (thumb2
4322 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4323 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4324 && (r_type == R_ARM_THM_JUMP19))
4325 || (branch_type == ST_BRANCH_TO_ARM
4326 && (((r_type == R_ARM_THM_CALL
4327 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4328 || (r_type == R_ARM_THM_JUMP24)
4329 || (r_type == R_ARM_THM_JUMP19))
4330 && !use_plt))
4332 /* If we need to insert a Thumb-Thumb long branch stub to a
4333 PLT, use one that branches directly to the ARM PLT
4334 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4335 stub, undo this now. */
4336 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4338 branch_type = ST_BRANCH_TO_ARM;
4339 branch_offset += PLT_THUMB_STUB_SIZE;
4342 if (branch_type == ST_BRANCH_TO_THUMB)
4344 /* Thumb to thumb. */
4345 if (!thumb_only)
4347 if (input_sec->flags & SEC_ELF_PURECODE)
4348 _bfd_error_handler
4349 (_("%pB(%pA): warning: long branch veneers used in"
4350 " section with SHF_ARM_PURECODE section"
4351 " attribute is only supported for M-profile"
4352 " targets that implement the movw instruction"),
4353 input_bfd, input_sec);
4355 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4356 /* PIC stubs. */
4357 ? ((globals->use_blx
4358 && (r_type == R_ARM_THM_CALL))
4359 /* V5T and above. Stub starts with ARM code, so
4360 we must be able to switch mode before
4361 reaching it, which is only possible for 'bl'
4362 (ie R_ARM_THM_CALL relocation). */
4363 ? arm_stub_long_branch_any_thumb_pic
4364 /* On V4T, use Thumb code only. */
4365 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4367 /* non-PIC stubs. */
4368 : ((globals->use_blx
4369 && (r_type == R_ARM_THM_CALL))
4370 /* V5T and above. */
4371 ? arm_stub_long_branch_any_any
4372 /* V4T. */
4373 : arm_stub_long_branch_v4t_thumb_thumb);
4375 else
4377 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4378 stub_type = arm_stub_long_branch_thumb2_only_pure;
4379 else
4381 if (input_sec->flags & SEC_ELF_PURECODE)
4382 _bfd_error_handler
4383 (_("%pB(%pA): warning: long branch veneers used in"
4384 " section with SHF_ARM_PURECODE section"
4385 " attribute is only supported for M-profile"
4386 " targets that implement the movw instruction"),
4387 input_bfd, input_sec);
4389 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4390 /* PIC stub. */
4391 ? arm_stub_long_branch_thumb_only_pic
4392 /* non-PIC stub. */
4393 : (thumb2 ? arm_stub_long_branch_thumb2_only
4394 : arm_stub_long_branch_thumb_only);
4398 else
4400 if (input_sec->flags & SEC_ELF_PURECODE)
4401 _bfd_error_handler
4402 (_("%pB(%pA): warning: long branch veneers used in"
4403 " section with SHF_ARM_PURECODE section"
4404 " attribute is only supported" " for M-profile"
4405 " targets that implement the movw instruction"),
4406 input_bfd, input_sec);
4408 /* Thumb to arm. */
4409 if (sym_sec != NULL
4410 && sym_sec->owner != NULL
4411 && !INTERWORK_FLAG (sym_sec->owner))
4413 _bfd_error_handler
4414 (_("%pB(%s): warning: interworking not enabled;"
4415 " first occurrence: %pB: %s call to %s"),
4416 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4419 stub_type =
4420 (bfd_link_pic (info) | globals->pic_veneer)
4421 /* PIC stubs. */
4422 ? (r_type == R_ARM_THM_TLS_CALL
4423 /* TLS PIC stubs. */
4424 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4425 : arm_stub_long_branch_v4t_thumb_tls_pic)
4426 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4427 /* V5T PIC and above. */
4428 ? arm_stub_long_branch_any_arm_pic
4429 /* V4T PIC stub. */
4430 : arm_stub_long_branch_v4t_thumb_arm_pic))
4432 /* non-PIC stubs. */
4433 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4434 /* V5T and above. */
4435 ? arm_stub_long_branch_any_any
4436 /* V4T. */
4437 : arm_stub_long_branch_v4t_thumb_arm);
4439 /* Handle v4t short branches. */
4440 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4441 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4442 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4443 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4447 else if (r_type == R_ARM_CALL
4448 || r_type == R_ARM_JUMP24
4449 || r_type == R_ARM_PLT32
4450 || r_type == R_ARM_TLS_CALL)
4452 if (input_sec->flags & SEC_ELF_PURECODE)
4453 _bfd_error_handler
4454 (_("%pB(%pA): warning: long branch veneers used in"
4455 " section with SHF_ARM_PURECODE section"
4456 " attribute is only supported for M-profile"
4457 " targets that implement the movw instruction"),
4458 input_bfd, input_sec);
4459 if (branch_type == ST_BRANCH_TO_THUMB)
4461 /* Arm to thumb. */
4463 if (sym_sec != NULL
4464 && sym_sec->owner != NULL
4465 && !INTERWORK_FLAG (sym_sec->owner))
4467 _bfd_error_handler
4468 (_("%pB(%s): warning: interworking not enabled;"
4469 " first occurrence: %pB: %s call to %s"),
4470 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4473 /* We have an extra 2-bytes reach because of
4474 the mode change (bit 24 (H) of BLX encoding). */
4475 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4476 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4477 || (r_type == R_ARM_CALL && !globals->use_blx)
4478 || (r_type == R_ARM_JUMP24)
4479 || (r_type == R_ARM_PLT32))
4481 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4482 /* PIC stubs. */
4483 ? ((globals->use_blx)
4484 /* V5T and above. */
4485 ? arm_stub_long_branch_any_thumb_pic
4486 /* V4T stub. */
4487 : arm_stub_long_branch_v4t_arm_thumb_pic)
4489 /* non-PIC stubs. */
4490 : ((globals->use_blx)
4491 /* V5T and above. */
4492 ? arm_stub_long_branch_any_any
4493 /* V4T. */
4494 : arm_stub_long_branch_v4t_arm_thumb);
4497 else
4499 /* Arm to arm. */
4500 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4501 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4503 stub_type =
4504 (bfd_link_pic (info) | globals->pic_veneer)
4505 /* PIC stubs. */
4506 ? (r_type == R_ARM_TLS_CALL
4507 /* TLS PIC Stub. */
4508 ? arm_stub_long_branch_any_tls_pic
4509 : (globals->nacl_p
4510 ? arm_stub_long_branch_arm_nacl_pic
4511 : arm_stub_long_branch_any_arm_pic))
4512 /* non-PIC stubs. */
4513 : (globals->nacl_p
4514 ? arm_stub_long_branch_arm_nacl
4515 : arm_stub_long_branch_any_any);
4520 /* If a stub is needed, record the actual destination type. */
4521 if (stub_type != arm_stub_none)
4522 *actual_branch_type = branch_type;
4524 return stub_type;
4527 /* Build a name for an entry in the stub hash table. */
4529 static char *
4530 elf32_arm_stub_name (const asection *input_section,
4531 const asection *sym_sec,
4532 const struct elf32_arm_link_hash_entry *hash,
4533 const Elf_Internal_Rela *rel,
4534 enum elf32_arm_stub_type stub_type)
4536 char *stub_name;
4537 bfd_size_type len;
4539 if (hash)
4541 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4542 stub_name = (char *) bfd_malloc (len);
4543 if (stub_name != NULL)
4544 sprintf (stub_name, "%08x_%s+%x_%d",
4545 input_section->id & 0xffffffff,
4546 hash->root.root.root.string,
4547 (int) rel->r_addend & 0xffffffff,
4548 (int) stub_type);
4550 else
4552 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4553 stub_name = (char *) bfd_malloc (len);
4554 if (stub_name != NULL)
4555 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4556 input_section->id & 0xffffffff,
4557 sym_sec->id & 0xffffffff,
4558 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4559 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4560 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4561 (int) rel->r_addend & 0xffffffff,
4562 (int) stub_type);
4565 return stub_name;
4568 /* Look up an entry in the stub hash. Stub entries are cached because
4569 creating the stub name takes a bit of time. */
4571 static struct elf32_arm_stub_hash_entry *
4572 elf32_arm_get_stub_entry (const asection *input_section,
4573 const asection *sym_sec,
4574 struct elf_link_hash_entry *hash,
4575 const Elf_Internal_Rela *rel,
4576 struct elf32_arm_link_hash_table *htab,
4577 enum elf32_arm_stub_type stub_type)
4579 struct elf32_arm_stub_hash_entry *stub_entry;
4580 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4581 const asection *id_sec;
4583 if ((input_section->flags & SEC_CODE) == 0)
4584 return NULL;
4586 /* If this input section is part of a group of sections sharing one
4587 stub section, then use the id of the first section in the group.
4588 Stub names need to include a section id, as there may well be
4589 more than one stub used to reach say, printf, and we need to
4590 distinguish between them. */
4591 BFD_ASSERT (input_section->id <= htab->top_id);
4592 id_sec = htab->stub_group[input_section->id].link_sec;
4594 if (h != NULL && h->stub_cache != NULL
4595 && h->stub_cache->h == h
4596 && h->stub_cache->id_sec == id_sec
4597 && h->stub_cache->stub_type == stub_type)
4599 stub_entry = h->stub_cache;
4601 else
4603 char *stub_name;
4605 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4606 if (stub_name == NULL)
4607 return NULL;
4609 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4610 stub_name, FALSE, FALSE);
4611 if (h != NULL)
4612 h->stub_cache = stub_entry;
4614 free (stub_name);
4617 return stub_entry;
4620 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4621 section. */
4623 static bfd_boolean
4624 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4626 if (stub_type >= max_stub_type)
4627 abort (); /* Should be unreachable. */
4629 switch (stub_type)
4631 case arm_stub_cmse_branch_thumb_only:
4632 return TRUE;
4634 default:
4635 return FALSE;
4638 abort (); /* Should be unreachable. */
4641 /* Required alignment (as a power of 2) for the dedicated section holding
4642 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4643 with input sections. */
4645 static int
4646 arm_dedicated_stub_output_section_required_alignment
4647 (enum elf32_arm_stub_type stub_type)
4649 if (stub_type >= max_stub_type)
4650 abort (); /* Should be unreachable. */
4652 switch (stub_type)
4654 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4655 boundary. */
4656 case arm_stub_cmse_branch_thumb_only:
4657 return 5;
4659 default:
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4661 return 0;
4664 abort (); /* Should be unreachable. */
4667 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4668 NULL if veneers of this type are interspersed with input sections. */
4670 static const char *
4671 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4673 if (stub_type >= max_stub_type)
4674 abort (); /* Should be unreachable. */
4676 switch (stub_type)
4678 case arm_stub_cmse_branch_thumb_only:
4679 return ".gnu.sgstubs";
4681 default:
4682 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4683 return NULL;
4686 abort (); /* Should be unreachable. */
4689 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4690 returns the address of the hash table field in HTAB holding a pointer to the
4691 corresponding input section. Otherwise, returns NULL. */
4693 static asection **
4694 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4695 enum elf32_arm_stub_type stub_type)
4697 if (stub_type >= max_stub_type)
4698 abort (); /* Should be unreachable. */
4700 switch (stub_type)
4702 case arm_stub_cmse_branch_thumb_only:
4703 return &htab->cmse_stub_sec;
4705 default:
4706 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4707 return NULL;
4710 abort (); /* Should be unreachable. */
4713 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4714 is the section that branch into veneer and can be NULL if stub should go in
4715 a dedicated output section. Returns a pointer to the stub section, and the
4716 section to which the stub section will be attached (in *LINK_SEC_P).
4717 LINK_SEC_P may be NULL. */
4719 static asection *
4720 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4721 struct elf32_arm_link_hash_table *htab,
4722 enum elf32_arm_stub_type stub_type)
4724 asection *link_sec, *out_sec, **stub_sec_p;
4725 const char *stub_sec_prefix;
4726 bfd_boolean dedicated_output_section =
4727 arm_dedicated_stub_output_section_required (stub_type);
4728 int align;
4730 if (dedicated_output_section)
4732 bfd *output_bfd = htab->obfd;
4733 const char *out_sec_name =
4734 arm_dedicated_stub_output_section_name (stub_type);
4735 link_sec = NULL;
4736 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4737 stub_sec_prefix = out_sec_name;
4738 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4739 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4740 if (out_sec == NULL)
4742 _bfd_error_handler (_("no address assigned to the veneers output "
4743 "section %s"), out_sec_name);
4744 return NULL;
4747 else
4749 BFD_ASSERT (section->id <= htab->top_id);
4750 link_sec = htab->stub_group[section->id].link_sec;
4751 BFD_ASSERT (link_sec != NULL);
4752 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4753 if (*stub_sec_p == NULL)
4754 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4755 stub_sec_prefix = link_sec->name;
4756 out_sec = link_sec->output_section;
4757 align = htab->nacl_p ? 4 : 3;
4760 if (*stub_sec_p == NULL)
4762 size_t namelen;
4763 bfd_size_type len;
4764 char *s_name;
4766 namelen = strlen (stub_sec_prefix);
4767 len = namelen + sizeof (STUB_SUFFIX);
4768 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4769 if (s_name == NULL)
4770 return NULL;
4772 memcpy (s_name, stub_sec_prefix, namelen);
4773 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4774 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4775 align);
4776 if (*stub_sec_p == NULL)
4777 return NULL;
4779 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4780 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4781 | SEC_KEEP;
4784 if (!dedicated_output_section)
4785 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4787 if (link_sec_p)
4788 *link_sec_p = link_sec;
4790 return *stub_sec_p;
4793 /* Add a new stub entry to the stub hash. Not all fields of the new
4794 stub entry are initialised. */
4796 static struct elf32_arm_stub_hash_entry *
4797 elf32_arm_add_stub (const char *stub_name, asection *section,
4798 struct elf32_arm_link_hash_table *htab,
4799 enum elf32_arm_stub_type stub_type)
4801 asection *link_sec;
4802 asection *stub_sec;
4803 struct elf32_arm_stub_hash_entry *stub_entry;
4805 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4806 stub_type);
4807 if (stub_sec == NULL)
4808 return NULL;
4810 /* Enter this entry into the linker stub hash table. */
4811 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4812 TRUE, FALSE);
4813 if (stub_entry == NULL)
4815 if (section == NULL)
4816 section = stub_sec;
4817 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4818 section->owner, stub_name);
4819 return NULL;
4822 stub_entry->stub_sec = stub_sec;
4823 stub_entry->stub_offset = (bfd_vma) -1;
4824 stub_entry->id_sec = link_sec;
4826 return stub_entry;
4829 /* Store an Arm insn into an output section not processed by
4830 elf32_arm_write_section. */
4832 static void
4833 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4834 bfd * output_bfd, bfd_vma val, void * ptr)
4836 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4837 bfd_putl32 (val, ptr);
4838 else
4839 bfd_putb32 (val, ptr);
4842 /* Store a 16-bit Thumb insn into an output section not processed by
4843 elf32_arm_write_section. */
4845 static void
4846 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4847 bfd * output_bfd, bfd_vma val, void * ptr)
4849 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4850 bfd_putl16 (val, ptr);
4851 else
4852 bfd_putb16 (val, ptr);
4855 /* Store a Thumb2 insn into an output section not processed by
4856 elf32_arm_write_section. */
4858 static void
4859 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4860 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4862 /* T2 instructions are 16-bit streamed. */
4863 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4865 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4866 bfd_putl16 ((val & 0xffff), ptr + 2);
4868 else
4870 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4871 bfd_putb16 ((val & 0xffff), ptr + 2);
4875 /* If it's possible to change R_TYPE to a more efficient access
4876 model, return the new reloc type. */
4878 static unsigned
4879 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4880 struct elf_link_hash_entry *h)
4882 int is_local = (h == NULL);
4884 if (bfd_link_pic (info)
4885 || (h && h->root.type == bfd_link_hash_undefweak))
4886 return r_type;
4888 /* We do not support relaxations for Old TLS models. */
4889 switch (r_type)
4891 case R_ARM_TLS_GOTDESC:
4892 case R_ARM_TLS_CALL:
4893 case R_ARM_THM_TLS_CALL:
4894 case R_ARM_TLS_DESCSEQ:
4895 case R_ARM_THM_TLS_DESCSEQ:
4896 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4899 return r_type;
4902 static bfd_reloc_status_type elf32_arm_final_link_relocate
4903 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4904 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4905 const char *, unsigned char, enum arm_st_branch_type,
4906 struct elf_link_hash_entry *, bfd_boolean *, char **);
4908 static unsigned int
4909 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4911 switch (stub_type)
4913 case arm_stub_a8_veneer_b_cond:
4914 case arm_stub_a8_veneer_b:
4915 case arm_stub_a8_veneer_bl:
4916 return 2;
4918 case arm_stub_long_branch_any_any:
4919 case arm_stub_long_branch_v4t_arm_thumb:
4920 case arm_stub_long_branch_thumb_only:
4921 case arm_stub_long_branch_thumb2_only:
4922 case arm_stub_long_branch_thumb2_only_pure:
4923 case arm_stub_long_branch_v4t_thumb_thumb:
4924 case arm_stub_long_branch_v4t_thumb_arm:
4925 case arm_stub_short_branch_v4t_thumb_arm:
4926 case arm_stub_long_branch_any_arm_pic:
4927 case arm_stub_long_branch_any_thumb_pic:
4928 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4929 case arm_stub_long_branch_v4t_arm_thumb_pic:
4930 case arm_stub_long_branch_v4t_thumb_arm_pic:
4931 case arm_stub_long_branch_thumb_only_pic:
4932 case arm_stub_long_branch_any_tls_pic:
4933 case arm_stub_long_branch_v4t_thumb_tls_pic:
4934 case arm_stub_cmse_branch_thumb_only:
4935 case arm_stub_a8_veneer_blx:
4936 return 4;
4938 case arm_stub_long_branch_arm_nacl:
4939 case arm_stub_long_branch_arm_nacl_pic:
4940 return 16;
4942 default:
4943 abort (); /* Should be unreachable. */
4947 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4948 veneering (TRUE) or have their own symbol (FALSE). */
4950 static bfd_boolean
4951 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4953 if (stub_type >= max_stub_type)
4954 abort (); /* Should be unreachable. */
4956 switch (stub_type)
4958 case arm_stub_cmse_branch_thumb_only:
4959 return TRUE;
4961 default:
4962 return FALSE;
4965 abort (); /* Should be unreachable. */
4968 /* Returns the padding needed for the dedicated section used stubs of type
4969 STUB_TYPE. */
4971 static int
4972 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4974 if (stub_type >= max_stub_type)
4975 abort (); /* Should be unreachable. */
4977 switch (stub_type)
4979 case arm_stub_cmse_branch_thumb_only:
4980 return 32;
4982 default:
4983 return 0;
4986 abort (); /* Should be unreachable. */
4989 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4990 returns the address of the hash table field in HTAB holding the offset at
4991 which new veneers should be layed out in the stub section. */
4993 static bfd_vma*
4994 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4995 enum elf32_arm_stub_type stub_type)
4997 switch (stub_type)
4999 case arm_stub_cmse_branch_thumb_only:
5000 return &htab->new_cmse_stub_offset;
5002 default:
5003 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5004 return NULL;
5008 static bfd_boolean
5009 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5010 void * in_arg)
5012 #define MAXRELOCS 3
5013 bfd_boolean removed_sg_veneer;
5014 struct elf32_arm_stub_hash_entry *stub_entry;
5015 struct elf32_arm_link_hash_table *globals;
5016 struct bfd_link_info *info;
5017 asection *stub_sec;
5018 bfd *stub_bfd;
5019 bfd_byte *loc;
5020 bfd_vma sym_value;
5021 int template_size;
5022 int size;
5023 const insn_sequence *template_sequence;
5024 int i;
5025 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5026 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5027 int nrelocs = 0;
5028 int just_allocated = 0;
5030 /* Massage our args to the form they really have. */
5031 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5032 info = (struct bfd_link_info *) in_arg;
5034 globals = elf32_arm_hash_table (info);
5035 if (globals == NULL)
5036 return FALSE;
5038 stub_sec = stub_entry->stub_sec;
5040 if ((globals->fix_cortex_a8 < 0)
5041 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5042 /* We have to do less-strictly-aligned fixes last. */
5043 return TRUE;
5045 /* Assign a slot at the end of section if none assigned yet. */
5046 if (stub_entry->stub_offset == (bfd_vma) -1)
5048 stub_entry->stub_offset = stub_sec->size;
5049 just_allocated = 1;
5051 loc = stub_sec->contents + stub_entry->stub_offset;
5053 stub_bfd = stub_sec->owner;
5055 /* This is the address of the stub destination. */
5056 sym_value = (stub_entry->target_value
5057 + stub_entry->target_section->output_offset
5058 + stub_entry->target_section->output_section->vma);
5060 template_sequence = stub_entry->stub_template;
5061 template_size = stub_entry->stub_template_size;
5063 size = 0;
5064 for (i = 0; i < template_size; i++)
5066 switch (template_sequence[i].type)
5068 case THUMB16_TYPE:
5070 bfd_vma data = (bfd_vma) template_sequence[i].data;
5071 if (template_sequence[i].reloc_addend != 0)
5073 /* We've borrowed the reloc_addend field to mean we should
5074 insert a condition code into this (Thumb-1 branch)
5075 instruction. See THUMB16_BCOND_INSN. */
5076 BFD_ASSERT ((data & 0xff00) == 0xd000);
5077 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5079 bfd_put_16 (stub_bfd, data, loc + size);
5080 size += 2;
5082 break;
5084 case THUMB32_TYPE:
5085 bfd_put_16 (stub_bfd,
5086 (template_sequence[i].data >> 16) & 0xffff,
5087 loc + size);
5088 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5089 loc + size + 2);
5090 if (template_sequence[i].r_type != R_ARM_NONE)
5092 stub_reloc_idx[nrelocs] = i;
5093 stub_reloc_offset[nrelocs++] = size;
5095 size += 4;
5096 break;
5098 case ARM_TYPE:
5099 bfd_put_32 (stub_bfd, template_sequence[i].data,
5100 loc + size);
5101 /* Handle cases where the target is encoded within the
5102 instruction. */
5103 if (template_sequence[i].r_type == R_ARM_JUMP24)
5105 stub_reloc_idx[nrelocs] = i;
5106 stub_reloc_offset[nrelocs++] = size;
5108 size += 4;
5109 break;
5111 case DATA_TYPE:
5112 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5113 stub_reloc_idx[nrelocs] = i;
5114 stub_reloc_offset[nrelocs++] = size;
5115 size += 4;
5116 break;
5118 default:
5119 BFD_FAIL ();
5120 return FALSE;
5124 if (just_allocated)
5125 stub_sec->size += size;
5127 /* Stub size has already been computed in arm_size_one_stub. Check
5128 consistency. */
5129 BFD_ASSERT (size == stub_entry->stub_size);
5131 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5132 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5133 sym_value |= 1;
5135 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5136 to relocate in each stub. */
5137 removed_sg_veneer =
5138 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5139 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5141 for (i = 0; i < nrelocs; i++)
5143 Elf_Internal_Rela rel;
5144 bfd_boolean unresolved_reloc;
5145 char *error_message;
5146 bfd_vma points_to =
5147 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5149 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5150 rel.r_info = ELF32_R_INFO (0,
5151 template_sequence[stub_reloc_idx[i]].r_type);
5152 rel.r_addend = 0;
5154 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5155 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5156 template should refer back to the instruction after the original
5157 branch. We use target_section as Cortex-A8 erratum workaround stubs
5158 are only generated when both source and target are in the same
5159 section. */
5160 points_to = stub_entry->target_section->output_section->vma
5161 + stub_entry->target_section->output_offset
5162 + stub_entry->source_value;
5164 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5165 (template_sequence[stub_reloc_idx[i]].r_type),
5166 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5167 points_to, info, stub_entry->target_section, "", STT_FUNC,
5168 stub_entry->branch_type,
5169 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5170 &error_message);
5173 return TRUE;
5174 #undef MAXRELOCS
5177 /* Calculate the template, template size and instruction size for a stub.
5178 Return value is the instruction size. */
5180 static unsigned int
5181 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5182 const insn_sequence **stub_template,
5183 int *stub_template_size)
5185 const insn_sequence *template_sequence = NULL;
5186 int template_size = 0, i;
5187 unsigned int size;
5189 template_sequence = stub_definitions[stub_type].template_sequence;
5190 if (stub_template)
5191 *stub_template = template_sequence;
5193 template_size = stub_definitions[stub_type].template_size;
5194 if (stub_template_size)
5195 *stub_template_size = template_size;
5197 size = 0;
5198 for (i = 0; i < template_size; i++)
5200 switch (template_sequence[i].type)
5202 case THUMB16_TYPE:
5203 size += 2;
5204 break;
5206 case ARM_TYPE:
5207 case THUMB32_TYPE:
5208 case DATA_TYPE:
5209 size += 4;
5210 break;
5212 default:
5213 BFD_FAIL ();
5214 return 0;
5218 return size;
5221 /* As above, but don't actually build the stub. Just bump offset so
5222 we know stub section sizes. */
5224 static bfd_boolean
5225 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5226 void *in_arg ATTRIBUTE_UNUSED)
5228 struct elf32_arm_stub_hash_entry *stub_entry;
5229 const insn_sequence *template_sequence;
5230 int template_size, size;
5232 /* Massage our args to the form they really have. */
5233 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5235 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5236 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5238 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5239 &template_size);
5241 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5242 if (stub_entry->stub_template_size)
5244 stub_entry->stub_size = size;
5245 stub_entry->stub_template = template_sequence;
5246 stub_entry->stub_template_size = template_size;
5249 /* Already accounted for. */
5250 if (stub_entry->stub_offset != (bfd_vma) -1)
5251 return TRUE;
5253 size = (size + 7) & ~7;
5254 stub_entry->stub_sec->size += size;
5256 return TRUE;
5259 /* External entry points for sizing and building linker stubs. */
5261 /* Set up various things so that we can make a list of input sections
5262 for each output section included in the link. Returns -1 on error,
5263 0 when no stubs will be needed, and 1 on success. */
5266 elf32_arm_setup_section_lists (bfd *output_bfd,
5267 struct bfd_link_info *info)
5269 bfd *input_bfd;
5270 unsigned int bfd_count;
5271 unsigned int top_id, top_index;
5272 asection *section;
5273 asection **input_list, **list;
5274 bfd_size_type amt;
5275 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5277 if (htab == NULL)
5278 return 0;
5279 if (! is_elf_hash_table (htab))
5280 return 0;
5282 /* Count the number of input BFDs and find the top input section id. */
5283 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5284 input_bfd != NULL;
5285 input_bfd = input_bfd->link.next)
5287 bfd_count += 1;
5288 for (section = input_bfd->sections;
5289 section != NULL;
5290 section = section->next)
5292 if (top_id < section->id)
5293 top_id = section->id;
5296 htab->bfd_count = bfd_count;
5298 amt = sizeof (struct map_stub) * (top_id + 1);
5299 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5300 if (htab->stub_group == NULL)
5301 return -1;
5302 htab->top_id = top_id;
5304 /* We can't use output_bfd->section_count here to find the top output
5305 section index as some sections may have been removed, and
5306 _bfd_strip_section_from_output doesn't renumber the indices. */
5307 for (section = output_bfd->sections, top_index = 0;
5308 section != NULL;
5309 section = section->next)
5311 if (top_index < section->index)
5312 top_index = section->index;
5315 htab->top_index = top_index;
5316 amt = sizeof (asection *) * (top_index + 1);
5317 input_list = (asection **) bfd_malloc (amt);
5318 htab->input_list = input_list;
5319 if (input_list == NULL)
5320 return -1;
5322 /* For sections we aren't interested in, mark their entries with a
5323 value we can check later. */
5324 list = input_list + top_index;
5326 *list = bfd_abs_section_ptr;
5327 while (list-- != input_list);
5329 for (section = output_bfd->sections;
5330 section != NULL;
5331 section = section->next)
5333 if ((section->flags & SEC_CODE) != 0)
5334 input_list[section->index] = NULL;
5337 return 1;
5340 /* The linker repeatedly calls this function for each input section,
5341 in the order that input sections are linked into output sections.
5342 Build lists of input sections to determine groupings between which
5343 we may insert linker stubs. */
5345 void
5346 elf32_arm_next_input_section (struct bfd_link_info *info,
5347 asection *isec)
5349 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5351 if (htab == NULL)
5352 return;
5354 if (isec->output_section->index <= htab->top_index)
5356 asection **list = htab->input_list + isec->output_section->index;
5358 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5360 /* Steal the link_sec pointer for our list. */
5361 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5362 /* This happens to make the list in reverse order,
5363 which we reverse later. */
5364 PREV_SEC (isec) = *list;
5365 *list = isec;
5370 /* See whether we can group stub sections together. Grouping stub
5371 sections may result in fewer stubs. More importantly, we need to
5372 put all .init* and .fini* stubs at the end of the .init or
5373 .fini output sections respectively, because glibc splits the
5374 _init and _fini functions into multiple parts. Putting a stub in
5375 the middle of a function is not a good idea. */
5377 static void
5378 group_sections (struct elf32_arm_link_hash_table *htab,
5379 bfd_size_type stub_group_size,
5380 bfd_boolean stubs_always_after_branch)
5382 asection **list = htab->input_list;
5386 asection *tail = *list;
5387 asection *head;
5389 if (tail == bfd_abs_section_ptr)
5390 continue;
5392 /* Reverse the list: we must avoid placing stubs at the
5393 beginning of the section because the beginning of the text
5394 section may be required for an interrupt vector in bare metal
5395 code. */
5396 #define NEXT_SEC PREV_SEC
5397 head = NULL;
5398 while (tail != NULL)
5400 /* Pop from tail. */
5401 asection *item = tail;
5402 tail = PREV_SEC (item);
5404 /* Push on head. */
5405 NEXT_SEC (item) = head;
5406 head = item;
5409 while (head != NULL)
5411 asection *curr;
5412 asection *next;
5413 bfd_vma stub_group_start = head->output_offset;
5414 bfd_vma end_of_next;
5416 curr = head;
5417 while (NEXT_SEC (curr) != NULL)
5419 next = NEXT_SEC (curr);
5420 end_of_next = next->output_offset + next->size;
5421 if (end_of_next - stub_group_start >= stub_group_size)
5422 /* End of NEXT is too far from start, so stop. */
5423 break;
5424 /* Add NEXT to the group. */
5425 curr = next;
5428 /* OK, the size from the start to the start of CURR is less
5429 than stub_group_size and thus can be handled by one stub
5430 section. (Or the head section is itself larger than
5431 stub_group_size, in which case we may be toast.)
5432 We should really be keeping track of the total size of
5433 stubs added here, as stubs contribute to the final output
5434 section size. */
5437 next = NEXT_SEC (head);
5438 /* Set up this stub group. */
5439 htab->stub_group[head->id].link_sec = curr;
5441 while (head != curr && (head = next) != NULL);
5443 /* But wait, there's more! Input sections up to stub_group_size
5444 bytes after the stub section can be handled by it too. */
5445 if (!stubs_always_after_branch)
5447 stub_group_start = curr->output_offset + curr->size;
5449 while (next != NULL)
5451 end_of_next = next->output_offset + next->size;
5452 if (end_of_next - stub_group_start >= stub_group_size)
5453 /* End of NEXT is too far from stubs, so stop. */
5454 break;
5455 /* Add NEXT to the stub group. */
5456 head = next;
5457 next = NEXT_SEC (head);
5458 htab->stub_group[head->id].link_sec = curr;
5461 head = next;
5464 while (list++ != htab->input_list + htab->top_index);
5466 free (htab->input_list);
5467 #undef PREV_SEC
5468 #undef NEXT_SEC
5471 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5472 erratum fix. */
5474 static int
5475 a8_reloc_compare (const void *a, const void *b)
5477 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5478 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5480 if (ra->from < rb->from)
5481 return -1;
5482 else if (ra->from > rb->from)
5483 return 1;
5484 else
5485 return 0;
5488 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5489 const char *, char **);
5491 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5492 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5493 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5494 otherwise. */
5496 static bfd_boolean
5497 cortex_a8_erratum_scan (bfd *input_bfd,
5498 struct bfd_link_info *info,
5499 struct a8_erratum_fix **a8_fixes_p,
5500 unsigned int *num_a8_fixes_p,
5501 unsigned int *a8_fix_table_size_p,
5502 struct a8_erratum_reloc *a8_relocs,
5503 unsigned int num_a8_relocs,
5504 unsigned prev_num_a8_fixes,
5505 bfd_boolean *stub_changed_p)
5507 asection *section;
5508 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5509 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5510 unsigned int num_a8_fixes = *num_a8_fixes_p;
5511 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5513 if (htab == NULL)
5514 return FALSE;
5516 for (section = input_bfd->sections;
5517 section != NULL;
5518 section = section->next)
5520 bfd_byte *contents = NULL;
5521 struct _arm_elf_section_data *sec_data;
5522 unsigned int span;
5523 bfd_vma base_vma;
5525 if (elf_section_type (section) != SHT_PROGBITS
5526 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5527 || (section->flags & SEC_EXCLUDE) != 0
5528 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5529 || (section->output_section == bfd_abs_section_ptr))
5530 continue;
5532 base_vma = section->output_section->vma + section->output_offset;
5534 if (elf_section_data (section)->this_hdr.contents != NULL)
5535 contents = elf_section_data (section)->this_hdr.contents;
5536 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5537 return TRUE;
5539 sec_data = elf32_arm_section_data (section);
5541 for (span = 0; span < sec_data->mapcount; span++)
5543 unsigned int span_start = sec_data->map[span].vma;
5544 unsigned int span_end = (span == sec_data->mapcount - 1)
5545 ? section->size : sec_data->map[span + 1].vma;
5546 unsigned int i;
5547 char span_type = sec_data->map[span].type;
5548 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5550 if (span_type != 't')
5551 continue;
5553 /* Span is entirely within a single 4KB region: skip scanning. */
5554 if (((base_vma + span_start) & ~0xfff)
5555 == ((base_vma + span_end) & ~0xfff))
5556 continue;
5558 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5560 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5561 * The branch target is in the same 4KB region as the
5562 first half of the branch.
5563 * The instruction before the branch is a 32-bit
5564 length non-branch instruction. */
5565 for (i = span_start; i < span_end;)
5567 unsigned int insn = bfd_getl16 (&contents[i]);
5568 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5569 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5571 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5572 insn_32bit = TRUE;
5574 if (insn_32bit)
5576 /* Load the rest of the insn (in manual-friendly order). */
5577 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5579 /* Encoding T4: B<c>.W. */
5580 is_b = (insn & 0xf800d000) == 0xf0009000;
5581 /* Encoding T1: BL<c>.W. */
5582 is_bl = (insn & 0xf800d000) == 0xf000d000;
5583 /* Encoding T2: BLX<c>.W. */
5584 is_blx = (insn & 0xf800d000) == 0xf000c000;
5585 /* Encoding T3: B<c>.W (not permitted in IT block). */
5586 is_bcc = (insn & 0xf800d000) == 0xf0008000
5587 && (insn & 0x07f00000) != 0x03800000;
5590 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5592 if (((base_vma + i) & 0xfff) == 0xffe
5593 && insn_32bit
5594 && is_32bit_branch
5595 && last_was_32bit
5596 && ! last_was_branch)
5598 bfd_signed_vma offset = 0;
5599 bfd_boolean force_target_arm = FALSE;
5600 bfd_boolean force_target_thumb = FALSE;
5601 bfd_vma target;
5602 enum elf32_arm_stub_type stub_type = arm_stub_none;
5603 struct a8_erratum_reloc key, *found;
5604 bfd_boolean use_plt = FALSE;
5606 key.from = base_vma + i;
5607 found = (struct a8_erratum_reloc *)
5608 bsearch (&key, a8_relocs, num_a8_relocs,
5609 sizeof (struct a8_erratum_reloc),
5610 &a8_reloc_compare);
5612 if (found)
5614 char *error_message = NULL;
5615 struct elf_link_hash_entry *entry;
5617 /* We don't care about the error returned from this
5618 function, only if there is glue or not. */
5619 entry = find_thumb_glue (info, found->sym_name,
5620 &error_message);
5622 if (entry)
5623 found->non_a8_stub = TRUE;
5625 /* Keep a simpler condition, for the sake of clarity. */
5626 if (htab->root.splt != NULL && found->hash != NULL
5627 && found->hash->root.plt.offset != (bfd_vma) -1)
5628 use_plt = TRUE;
5630 if (found->r_type == R_ARM_THM_CALL)
5632 if (found->branch_type == ST_BRANCH_TO_ARM
5633 || use_plt)
5634 force_target_arm = TRUE;
5635 else
5636 force_target_thumb = TRUE;
5640 /* Check if we have an offending branch instruction. */
5642 if (found && found->non_a8_stub)
5643 /* We've already made a stub for this instruction, e.g.
5644 it's a long branch or a Thumb->ARM stub. Assume that
5645 stub will suffice to work around the A8 erratum (see
5646 setting of always_after_branch above). */
5648 else if (is_bcc)
5650 offset = (insn & 0x7ff) << 1;
5651 offset |= (insn & 0x3f0000) >> 4;
5652 offset |= (insn & 0x2000) ? 0x40000 : 0;
5653 offset |= (insn & 0x800) ? 0x80000 : 0;
5654 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5655 if (offset & 0x100000)
5656 offset |= ~ ((bfd_signed_vma) 0xfffff);
5657 stub_type = arm_stub_a8_veneer_b_cond;
5659 else if (is_b || is_bl || is_blx)
5661 int s = (insn & 0x4000000) != 0;
5662 int j1 = (insn & 0x2000) != 0;
5663 int j2 = (insn & 0x800) != 0;
5664 int i1 = !(j1 ^ s);
5665 int i2 = !(j2 ^ s);
5667 offset = (insn & 0x7ff) << 1;
5668 offset |= (insn & 0x3ff0000) >> 4;
5669 offset |= i2 << 22;
5670 offset |= i1 << 23;
5671 offset |= s << 24;
5672 if (offset & 0x1000000)
5673 offset |= ~ ((bfd_signed_vma) 0xffffff);
5675 if (is_blx)
5676 offset &= ~ ((bfd_signed_vma) 3);
5678 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5679 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5682 if (stub_type != arm_stub_none)
5684 bfd_vma pc_for_insn = base_vma + i + 4;
5686 /* The original instruction is a BL, but the target is
5687 an ARM instruction. If we were not making a stub,
5688 the BL would have been converted to a BLX. Use the
5689 BLX stub instead in that case. */
5690 if (htab->use_blx && force_target_arm
5691 && stub_type == arm_stub_a8_veneer_bl)
5693 stub_type = arm_stub_a8_veneer_blx;
5694 is_blx = TRUE;
5695 is_bl = FALSE;
5697 /* Conversely, if the original instruction was
5698 BLX but the target is Thumb mode, use the BL
5699 stub. */
5700 else if (force_target_thumb
5701 && stub_type == arm_stub_a8_veneer_blx)
5703 stub_type = arm_stub_a8_veneer_bl;
5704 is_blx = FALSE;
5705 is_bl = TRUE;
5708 if (is_blx)
5709 pc_for_insn &= ~ ((bfd_vma) 3);
5711 /* If we found a relocation, use the proper destination,
5712 not the offset in the (unrelocated) instruction.
5713 Note this is always done if we switched the stub type
5714 above. */
5715 if (found)
5716 offset =
5717 (bfd_signed_vma) (found->destination - pc_for_insn);
5719 /* If the stub will use a Thumb-mode branch to a
5720 PLT target, redirect it to the preceding Thumb
5721 entry point. */
5722 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5723 offset -= PLT_THUMB_STUB_SIZE;
5725 target = pc_for_insn + offset;
5727 /* The BLX stub is ARM-mode code. Adjust the offset to
5728 take the different PC value (+8 instead of +4) into
5729 account. */
5730 if (stub_type == arm_stub_a8_veneer_blx)
5731 offset += 4;
5733 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5735 char *stub_name = NULL;
5737 if (num_a8_fixes == a8_fix_table_size)
5739 a8_fix_table_size *= 2;
5740 a8_fixes = (struct a8_erratum_fix *)
5741 bfd_realloc (a8_fixes,
5742 sizeof (struct a8_erratum_fix)
5743 * a8_fix_table_size);
5746 if (num_a8_fixes < prev_num_a8_fixes)
5748 /* If we're doing a subsequent scan,
5749 check if we've found the same fix as
5750 before, and try and reuse the stub
5751 name. */
5752 stub_name = a8_fixes[num_a8_fixes].stub_name;
5753 if ((a8_fixes[num_a8_fixes].section != section)
5754 || (a8_fixes[num_a8_fixes].offset != i))
5756 free (stub_name);
5757 stub_name = NULL;
5758 *stub_changed_p = TRUE;
5762 if (!stub_name)
5764 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5765 if (stub_name != NULL)
5766 sprintf (stub_name, "%x:%x", section->id, i);
5769 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5770 a8_fixes[num_a8_fixes].section = section;
5771 a8_fixes[num_a8_fixes].offset = i;
5772 a8_fixes[num_a8_fixes].target_offset =
5773 target - base_vma;
5774 a8_fixes[num_a8_fixes].orig_insn = insn;
5775 a8_fixes[num_a8_fixes].stub_name = stub_name;
5776 a8_fixes[num_a8_fixes].stub_type = stub_type;
5777 a8_fixes[num_a8_fixes].branch_type =
5778 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5780 num_a8_fixes++;
5785 i += insn_32bit ? 4 : 2;
5786 last_was_32bit = insn_32bit;
5787 last_was_branch = is_32bit_branch;
5791 if (elf_section_data (section)->this_hdr.contents == NULL)
5792 free (contents);
5795 *a8_fixes_p = a8_fixes;
5796 *num_a8_fixes_p = num_a8_fixes;
5797 *a8_fix_table_size_p = a8_fix_table_size;
5799 return FALSE;
5802 /* Create or update a stub entry depending on whether the stub can already be
5803 found in HTAB. The stub is identified by:
5804 - its type STUB_TYPE
5805 - its source branch (note that several can share the same stub) whose
5806 section and relocation (if any) are given by SECTION and IRELA
5807 respectively
5808 - its target symbol whose input section, hash, name, value and branch type
5809 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5810 respectively
5812 If found, the value of the stub's target symbol is updated from SYM_VALUE
5813 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5814 TRUE and the stub entry is initialized.
5816 Returns the stub that was created or updated, or NULL if an error
5817 occurred. */
5819 static struct elf32_arm_stub_hash_entry *
5820 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5821 enum elf32_arm_stub_type stub_type, asection *section,
5822 Elf_Internal_Rela *irela, asection *sym_sec,
5823 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5824 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5825 bfd_boolean *new_stub)
5827 const asection *id_sec;
5828 char *stub_name;
5829 struct elf32_arm_stub_hash_entry *stub_entry;
5830 unsigned int r_type;
5831 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5833 BFD_ASSERT (stub_type != arm_stub_none);
5834 *new_stub = FALSE;
5836 if (sym_claimed)
5837 stub_name = sym_name;
5838 else
5840 BFD_ASSERT (irela);
5841 BFD_ASSERT (section);
5842 BFD_ASSERT (section->id <= htab->top_id);
5844 /* Support for grouping stub sections. */
5845 id_sec = htab->stub_group[section->id].link_sec;
5847 /* Get the name of this stub. */
5848 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5849 stub_type);
5850 if (!stub_name)
5851 return NULL;
5854 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5855 FALSE);
5856 /* The proper stub has already been created, just update its value. */
5857 if (stub_entry != NULL)
5859 if (!sym_claimed)
5860 free (stub_name);
5861 stub_entry->target_value = sym_value;
5862 return stub_entry;
5865 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5866 if (stub_entry == NULL)
5868 if (!sym_claimed)
5869 free (stub_name);
5870 return NULL;
5873 stub_entry->target_value = sym_value;
5874 stub_entry->target_section = sym_sec;
5875 stub_entry->stub_type = stub_type;
5876 stub_entry->h = hash;
5877 stub_entry->branch_type = branch_type;
5879 if (sym_claimed)
5880 stub_entry->output_name = sym_name;
5881 else
5883 if (sym_name == NULL)
5884 sym_name = "unnamed";
5885 stub_entry->output_name = (char *)
5886 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5887 + strlen (sym_name));
5888 if (stub_entry->output_name == NULL)
5890 free (stub_name);
5891 return NULL;
5894 /* For historical reasons, use the existing names for ARM-to-Thumb and
5895 Thumb-to-ARM stubs. */
5896 r_type = ELF32_R_TYPE (irela->r_info);
5897 if ((r_type == (unsigned int) R_ARM_THM_CALL
5898 || r_type == (unsigned int) R_ARM_THM_JUMP24
5899 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5900 && branch_type == ST_BRANCH_TO_ARM)
5901 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5902 else if ((r_type == (unsigned int) R_ARM_CALL
5903 || r_type == (unsigned int) R_ARM_JUMP24)
5904 && branch_type == ST_BRANCH_TO_THUMB)
5905 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5906 else
5907 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5910 *new_stub = TRUE;
5911 return stub_entry;
5914 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5915 gateway veneer to transition from non secure to secure state and create them
5916 accordingly.
5918 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5919 defines the conditions that govern Secure Gateway veneer creation for a
5920 given symbol <SYM> as follows:
5921 - it has function type
5922 - it has non local binding
5923 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5924 same type, binding and value as <SYM> (called normal symbol).
5925 An entry function can handle secure state transition itself in which case
5926 its special symbol would have a different value from the normal symbol.
5928 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5929 entry mapping while HTAB gives the name to hash entry mapping.
5930 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5931 created.
5933 The return value gives whether a stub failed to be allocated. */
5935 static bfd_boolean
5936 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5937 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5938 int *cmse_stub_created)
5940 const struct elf_backend_data *bed;
5941 Elf_Internal_Shdr *symtab_hdr;
5942 unsigned i, j, sym_count, ext_start;
5943 Elf_Internal_Sym *cmse_sym, *local_syms;
5944 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5945 enum arm_st_branch_type branch_type;
5946 char *sym_name, *lsym_name;
5947 bfd_vma sym_value;
5948 asection *section;
5949 struct elf32_arm_stub_hash_entry *stub_entry;
5950 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5952 bed = get_elf_backend_data (input_bfd);
5953 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5954 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5955 ext_start = symtab_hdr->sh_info;
5956 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5957 && out_attr[Tag_CPU_arch_profile].i == 'M');
5959 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5960 if (local_syms == NULL)
5961 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5962 symtab_hdr->sh_info, 0, NULL, NULL,
5963 NULL);
5964 if (symtab_hdr->sh_info && local_syms == NULL)
5965 return FALSE;
5967 /* Scan symbols. */
5968 for (i = 0; i < sym_count; i++)
5970 cmse_invalid = FALSE;
5972 if (i < ext_start)
5974 cmse_sym = &local_syms[i];
5975 /* Not a special symbol. */
5976 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5977 continue;
5978 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5979 symtab_hdr->sh_link,
5980 cmse_sym->st_name);
5981 /* Special symbol with local binding. */
5982 cmse_invalid = TRUE;
5984 else
5986 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5987 sym_name = (char *) cmse_hash->root.root.root.string;
5989 /* Not a special symbol. */
5990 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5991 continue;
5993 /* Special symbol has incorrect binding or type. */
5994 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5995 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5996 || cmse_hash->root.type != STT_FUNC)
5997 cmse_invalid = TRUE;
6000 if (!is_v8m)
6002 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6003 "ARMv8-M architecture or later"),
6004 input_bfd, sym_name);
6005 is_v8m = TRUE; /* Avoid multiple warning. */
6006 ret = FALSE;
6009 if (cmse_invalid)
6011 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6012 " a global or weak function symbol"),
6013 input_bfd, sym_name);
6014 ret = FALSE;
6015 if (i < ext_start)
6016 continue;
6019 sym_name += strlen (CMSE_PREFIX);
6020 hash = (struct elf32_arm_link_hash_entry *)
6021 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6023 /* No associated normal symbol or it is neither global nor weak. */
6024 if (!hash
6025 || (hash->root.root.type != bfd_link_hash_defined
6026 && hash->root.root.type != bfd_link_hash_defweak)
6027 || hash->root.type != STT_FUNC)
6029 /* Initialize here to avoid warning about use of possibly
6030 uninitialized variable. */
6031 j = 0;
6033 if (!hash)
6035 /* Searching for a normal symbol with local binding. */
6036 for (; j < ext_start; j++)
6038 lsym_name =
6039 bfd_elf_string_from_elf_section (input_bfd,
6040 symtab_hdr->sh_link,
6041 local_syms[j].st_name);
6042 if (!strcmp (sym_name, lsym_name))
6043 break;
6047 if (hash || j < ext_start)
6049 _bfd_error_handler
6050 (_("%pB: invalid standard symbol `%s'; it must be "
6051 "a global or weak function symbol"),
6052 input_bfd, sym_name);
6054 else
6055 _bfd_error_handler
6056 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6057 ret = FALSE;
6058 if (!hash)
6059 continue;
6062 sym_value = hash->root.root.u.def.value;
6063 section = hash->root.root.u.def.section;
6065 if (cmse_hash->root.root.u.def.section != section)
6067 _bfd_error_handler
6068 (_("%pB: `%s' and its special symbol are in different sections"),
6069 input_bfd, sym_name);
6070 ret = FALSE;
6072 if (cmse_hash->root.root.u.def.value != sym_value)
6073 continue; /* Ignore: could be an entry function starting with SG. */
6075 /* If this section is a link-once section that will be discarded, then
6076 don't create any stubs. */
6077 if (section->output_section == NULL)
6079 _bfd_error_handler
6080 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6081 continue;
6084 if (hash->root.size == 0)
6086 _bfd_error_handler
6087 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6088 ret = FALSE;
6091 if (!ret)
6092 continue;
6093 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6094 stub_entry
6095 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6096 NULL, NULL, section, hash, sym_name,
6097 sym_value, branch_type, &new_stub);
6099 if (stub_entry == NULL)
6100 ret = FALSE;
6101 else
6103 BFD_ASSERT (new_stub);
6104 (*cmse_stub_created)++;
6108 if (!symtab_hdr->contents)
6109 free (local_syms);
6110 return ret;
6113 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6114 code entry function, ie can be called from non secure code without using a
6115 veneer. */
6117 static bfd_boolean
6118 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6120 bfd_byte contents[4];
6121 uint32_t first_insn;
6122 asection *section;
6123 file_ptr offset;
6124 bfd *abfd;
6126 /* Defined symbol of function type. */
6127 if (hash->root.root.type != bfd_link_hash_defined
6128 && hash->root.root.type != bfd_link_hash_defweak)
6129 return FALSE;
6130 if (hash->root.type != STT_FUNC)
6131 return FALSE;
6133 /* Read first instruction. */
6134 section = hash->root.root.u.def.section;
6135 abfd = section->owner;
6136 offset = hash->root.root.u.def.value - section->vma;
6137 if (!bfd_get_section_contents (abfd, section, contents, offset,
6138 sizeof (contents)))
6139 return FALSE;
6141 first_insn = bfd_get_32 (abfd, contents);
6143 /* Starts by SG instruction. */
6144 return first_insn == 0xe97fe97f;
6147 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6148 secure gateway veneers (ie. the veneers was not in the input import library)
6149 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6151 static bfd_boolean
6152 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6154 struct elf32_arm_stub_hash_entry *stub_entry;
6155 struct bfd_link_info *info;
6157 /* Massage our args to the form they really have. */
6158 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6159 info = (struct bfd_link_info *) gen_info;
6161 if (info->out_implib_bfd)
6162 return TRUE;
6164 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6165 return TRUE;
6167 if (stub_entry->stub_offset == (bfd_vma) -1)
6168 _bfd_error_handler (" %s", stub_entry->output_name);
6170 return TRUE;
6173 /* Set offset of each secure gateway veneers so that its address remain
6174 identical to the one in the input import library referred by
6175 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6176 (present in input import library but absent from the executable being
6177 linked) or if new veneers appeared and there is no output import library
6178 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6179 number of secure gateway veneers found in the input import library.
6181 The function returns whether an error occurred. If no error occurred,
6182 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6183 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6184 veneer observed set for new veneers to be layed out after. */
6186 static bfd_boolean
6187 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6188 struct elf32_arm_link_hash_table *htab,
6189 int *cmse_stub_created)
6191 long symsize;
6192 char *sym_name;
6193 flagword flags;
6194 long i, symcount;
6195 bfd *in_implib_bfd;
6196 asection *stub_out_sec;
6197 bfd_boolean ret = TRUE;
6198 Elf_Internal_Sym *intsym;
6199 const char *out_sec_name;
6200 bfd_size_type cmse_stub_size;
6201 asymbol **sympp = NULL, *sym;
6202 struct elf32_arm_link_hash_entry *hash;
6203 const insn_sequence *cmse_stub_template;
6204 struct elf32_arm_stub_hash_entry *stub_entry;
6205 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6206 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6207 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6209 /* No input secure gateway import library. */
6210 if (!htab->in_implib_bfd)
6211 return TRUE;
6213 in_implib_bfd = htab->in_implib_bfd;
6214 if (!htab->cmse_implib)
6216 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6217 "Gateway import libraries"), in_implib_bfd);
6218 return FALSE;
6221 /* Get symbol table size. */
6222 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6223 if (symsize < 0)
6224 return FALSE;
6226 /* Read in the input secure gateway import library's symbol table. */
6227 sympp = (asymbol **) xmalloc (symsize);
6228 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6229 if (symcount < 0)
6231 ret = FALSE;
6232 goto free_sym_buf;
6235 htab->new_cmse_stub_offset = 0;
6236 cmse_stub_size =
6237 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6238 &cmse_stub_template,
6239 &cmse_stub_template_size);
6240 out_sec_name =
6241 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6242 stub_out_sec =
6243 bfd_get_section_by_name (htab->obfd, out_sec_name);
6244 if (stub_out_sec != NULL)
6245 cmse_stub_sec_vma = stub_out_sec->vma;
6247 /* Set addresses of veneers mentionned in input secure gateway import
6248 library's symbol table. */
6249 for (i = 0; i < symcount; i++)
6251 sym = sympp[i];
6252 flags = sym->flags;
6253 sym_name = (char *) bfd_asymbol_name (sym);
6254 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6256 if (sym->section != bfd_abs_section_ptr
6257 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6258 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6259 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6260 != ST_BRANCH_TO_THUMB))
6262 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6263 "symbol should be absolute, global and "
6264 "refer to Thumb functions"),
6265 in_implib_bfd, sym_name);
6266 ret = FALSE;
6267 continue;
6270 veneer_value = bfd_asymbol_value (sym);
6271 stub_offset = veneer_value - cmse_stub_sec_vma;
6272 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6273 FALSE, FALSE);
6274 hash = (struct elf32_arm_link_hash_entry *)
6275 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6277 /* Stub entry should have been created by cmse_scan or the symbol be of
6278 a secure function callable from non secure code. */
6279 if (!stub_entry && !hash)
6281 bfd_boolean new_stub;
6283 _bfd_error_handler
6284 (_("entry function `%s' disappeared from secure code"), sym_name);
6285 hash = (struct elf32_arm_link_hash_entry *)
6286 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6287 stub_entry
6288 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6289 NULL, NULL, bfd_abs_section_ptr, hash,
6290 sym_name, veneer_value,
6291 ST_BRANCH_TO_THUMB, &new_stub);
6292 if (stub_entry == NULL)
6293 ret = FALSE;
6294 else
6296 BFD_ASSERT (new_stub);
6297 new_cmse_stubs_created++;
6298 (*cmse_stub_created)++;
6300 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6301 stub_entry->stub_offset = stub_offset;
6303 /* Symbol found is not callable from non secure code. */
6304 else if (!stub_entry)
6306 if (!cmse_entry_fct_p (hash))
6308 _bfd_error_handler (_("`%s' refers to a non entry function"),
6309 sym_name);
6310 ret = FALSE;
6312 continue;
6314 else
6316 /* Only stubs for SG veneers should have been created. */
6317 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6319 /* Check visibility hasn't changed. */
6320 if (!!(flags & BSF_GLOBAL)
6321 != (hash->root.root.type == bfd_link_hash_defined))
6322 _bfd_error_handler
6323 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6324 sym_name);
6326 stub_entry->stub_offset = stub_offset;
6329 /* Size should match that of a SG veneer. */
6330 if (intsym->st_size != cmse_stub_size)
6332 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6333 in_implib_bfd, sym_name);
6334 ret = FALSE;
6337 /* Previous veneer address is before current SG veneer section. */
6338 if (veneer_value < cmse_stub_sec_vma)
6340 /* Avoid offset underflow. */
6341 if (stub_entry)
6342 stub_entry->stub_offset = 0;
6343 stub_offset = 0;
6344 ret = FALSE;
6347 /* Complain if stub offset not a multiple of stub size. */
6348 if (stub_offset % cmse_stub_size)
6350 _bfd_error_handler
6351 (_("offset of veneer for entry function `%s' not a multiple of "
6352 "its size"), sym_name);
6353 ret = FALSE;
6356 if (!ret)
6357 continue;
6359 new_cmse_stubs_created--;
6360 if (veneer_value < cmse_stub_array_start)
6361 cmse_stub_array_start = veneer_value;
6362 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6363 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6364 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6367 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6369 BFD_ASSERT (new_cmse_stubs_created > 0);
6370 _bfd_error_handler
6371 (_("new entry function(s) introduced but no output import library "
6372 "specified:"));
6373 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6376 if (cmse_stub_array_start != cmse_stub_sec_vma)
6378 _bfd_error_handler
6379 (_("start address of `%s' is different from previous link"),
6380 out_sec_name);
6381 ret = FALSE;
6384 free_sym_buf:
6385 free (sympp);
6386 return ret;
6389 /* Determine and set the size of the stub section for a final link.
6391 The basic idea here is to examine all the relocations looking for
6392 PC-relative calls to a target that is unreachable with a "bl"
6393 instruction. */
6395 bfd_boolean
6396 elf32_arm_size_stubs (bfd *output_bfd,
6397 bfd *stub_bfd,
6398 struct bfd_link_info *info,
6399 bfd_signed_vma group_size,
6400 asection * (*add_stub_section) (const char *, asection *,
6401 asection *,
6402 unsigned int),
6403 void (*layout_sections_again) (void))
6405 bfd_boolean ret = TRUE;
6406 obj_attribute *out_attr;
6407 int cmse_stub_created = 0;
6408 bfd_size_type stub_group_size;
6409 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6410 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6411 struct a8_erratum_fix *a8_fixes = NULL;
6412 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6413 struct a8_erratum_reloc *a8_relocs = NULL;
6414 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6416 if (htab == NULL)
6417 return FALSE;
6419 if (htab->fix_cortex_a8)
6421 a8_fixes = (struct a8_erratum_fix *)
6422 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6423 a8_relocs = (struct a8_erratum_reloc *)
6424 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6427 /* Propagate mach to stub bfd, because it may not have been
6428 finalized when we created stub_bfd. */
6429 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6430 bfd_get_mach (output_bfd));
6432 /* Stash our params away. */
6433 htab->stub_bfd = stub_bfd;
6434 htab->add_stub_section = add_stub_section;
6435 htab->layout_sections_again = layout_sections_again;
6436 stubs_always_after_branch = group_size < 0;
6438 out_attr = elf_known_obj_attributes_proc (output_bfd);
6439 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6441 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6442 as the first half of a 32-bit branch straddling two 4K pages. This is a
6443 crude way of enforcing that. */
6444 if (htab->fix_cortex_a8)
6445 stubs_always_after_branch = 1;
6447 if (group_size < 0)
6448 stub_group_size = -group_size;
6449 else
6450 stub_group_size = group_size;
6452 if (stub_group_size == 1)
6454 /* Default values. */
6455 /* Thumb branch range is +-4MB has to be used as the default
6456 maximum size (a given section can contain both ARM and Thumb
6457 code, so the worst case has to be taken into account).
6459 This value is 24K less than that, which allows for 2025
6460 12-byte stubs. If we exceed that, then we will fail to link.
6461 The user will have to relink with an explicit group size
6462 option. */
6463 stub_group_size = 4170000;
6466 group_sections (htab, stub_group_size, stubs_always_after_branch);
6468 /* If we're applying the cortex A8 fix, we need to determine the
6469 program header size now, because we cannot change it later --
6470 that could alter section placements. Notice the A8 erratum fix
6471 ends up requiring the section addresses to remain unchanged
6472 modulo the page size. That's something we cannot represent
6473 inside BFD, and we don't want to force the section alignment to
6474 be the page size. */
6475 if (htab->fix_cortex_a8)
6476 (*htab->layout_sections_again) ();
6478 while (1)
6480 bfd *input_bfd;
6481 unsigned int bfd_indx;
6482 asection *stub_sec;
6483 enum elf32_arm_stub_type stub_type;
6484 bfd_boolean stub_changed = FALSE;
6485 unsigned prev_num_a8_fixes = num_a8_fixes;
6487 num_a8_fixes = 0;
6488 for (input_bfd = info->input_bfds, bfd_indx = 0;
6489 input_bfd != NULL;
6490 input_bfd = input_bfd->link.next, bfd_indx++)
6492 Elf_Internal_Shdr *symtab_hdr;
6493 asection *section;
6494 Elf_Internal_Sym *local_syms = NULL;
6496 if (!is_arm_elf (input_bfd)
6497 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0)
6498 continue;
6500 num_a8_relocs = 0;
6502 /* We'll need the symbol table in a second. */
6503 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6504 if (symtab_hdr->sh_info == 0)
6505 continue;
6507 /* Limit scan of symbols to object file whose profile is
6508 Microcontroller to not hinder performance in the general case. */
6509 if (m_profile && first_veneer_scan)
6511 struct elf_link_hash_entry **sym_hashes;
6513 sym_hashes = elf_sym_hashes (input_bfd);
6514 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6515 &cmse_stub_created))
6516 goto error_ret_free_local;
6518 if (cmse_stub_created != 0)
6519 stub_changed = TRUE;
6522 /* Walk over each section attached to the input bfd. */
6523 for (section = input_bfd->sections;
6524 section != NULL;
6525 section = section->next)
6527 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6529 /* If there aren't any relocs, then there's nothing more
6530 to do. */
6531 if ((section->flags & SEC_RELOC) == 0
6532 || section->reloc_count == 0
6533 || (section->flags & SEC_CODE) == 0)
6534 continue;
6536 /* If this section is a link-once section that will be
6537 discarded, then don't create any stubs. */
6538 if (section->output_section == NULL
6539 || section->output_section->owner != output_bfd)
6540 continue;
6542 /* Get the relocs. */
6543 internal_relocs
6544 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6545 NULL, info->keep_memory);
6546 if (internal_relocs == NULL)
6547 goto error_ret_free_local;
6549 /* Now examine each relocation. */
6550 irela = internal_relocs;
6551 irelaend = irela + section->reloc_count;
6552 for (; irela < irelaend; irela++)
6554 unsigned int r_type, r_indx;
6555 asection *sym_sec;
6556 bfd_vma sym_value;
6557 bfd_vma destination;
6558 struct elf32_arm_link_hash_entry *hash;
6559 const char *sym_name;
6560 unsigned char st_type;
6561 enum arm_st_branch_type branch_type;
6562 bfd_boolean created_stub = FALSE;
6564 r_type = ELF32_R_TYPE (irela->r_info);
6565 r_indx = ELF32_R_SYM (irela->r_info);
6567 if (r_type >= (unsigned int) R_ARM_max)
6569 bfd_set_error (bfd_error_bad_value);
6570 error_ret_free_internal:
6571 if (elf_section_data (section)->relocs == NULL)
6572 free (internal_relocs);
6573 /* Fall through. */
6574 error_ret_free_local:
6575 if (local_syms != NULL
6576 && (symtab_hdr->contents
6577 != (unsigned char *) local_syms))
6578 free (local_syms);
6579 return FALSE;
6582 hash = NULL;
6583 if (r_indx >= symtab_hdr->sh_info)
6584 hash = elf32_arm_hash_entry
6585 (elf_sym_hashes (input_bfd)
6586 [r_indx - symtab_hdr->sh_info]);
6588 /* Only look for stubs on branch instructions, or
6589 non-relaxed TLSCALL */
6590 if ((r_type != (unsigned int) R_ARM_CALL)
6591 && (r_type != (unsigned int) R_ARM_THM_CALL)
6592 && (r_type != (unsigned int) R_ARM_JUMP24)
6593 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6594 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6595 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6596 && (r_type != (unsigned int) R_ARM_PLT32)
6597 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6598 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6599 && r_type == elf32_arm_tls_transition
6600 (info, r_type, &hash->root)
6601 && ((hash ? hash->tls_type
6602 : (elf32_arm_local_got_tls_type
6603 (input_bfd)[r_indx]))
6604 & GOT_TLS_GDESC) != 0))
6605 continue;
6607 /* Now determine the call target, its name, value,
6608 section. */
6609 sym_sec = NULL;
6610 sym_value = 0;
6611 destination = 0;
6612 sym_name = NULL;
6614 if (r_type == (unsigned int) R_ARM_TLS_CALL
6615 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6617 /* A non-relaxed TLS call. The target is the
6618 plt-resident trampoline and nothing to do
6619 with the symbol. */
6620 BFD_ASSERT (htab->tls_trampoline > 0);
6621 sym_sec = htab->root.splt;
6622 sym_value = htab->tls_trampoline;
6623 hash = 0;
6624 st_type = STT_FUNC;
6625 branch_type = ST_BRANCH_TO_ARM;
6627 else if (!hash)
6629 /* It's a local symbol. */
6630 Elf_Internal_Sym *sym;
6632 if (local_syms == NULL)
6634 local_syms
6635 = (Elf_Internal_Sym *) symtab_hdr->contents;
6636 if (local_syms == NULL)
6637 local_syms
6638 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6639 symtab_hdr->sh_info, 0,
6640 NULL, NULL, NULL);
6641 if (local_syms == NULL)
6642 goto error_ret_free_internal;
6645 sym = local_syms + r_indx;
6646 if (sym->st_shndx == SHN_UNDEF)
6647 sym_sec = bfd_und_section_ptr;
6648 else if (sym->st_shndx == SHN_ABS)
6649 sym_sec = bfd_abs_section_ptr;
6650 else if (sym->st_shndx == SHN_COMMON)
6651 sym_sec = bfd_com_section_ptr;
6652 else
6653 sym_sec =
6654 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6656 if (!sym_sec)
6657 /* This is an undefined symbol. It can never
6658 be resolved. */
6659 continue;
6661 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6662 sym_value = sym->st_value;
6663 destination = (sym_value + irela->r_addend
6664 + sym_sec->output_offset
6665 + sym_sec->output_section->vma);
6666 st_type = ELF_ST_TYPE (sym->st_info);
6667 branch_type =
6668 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6669 sym_name
6670 = bfd_elf_string_from_elf_section (input_bfd,
6671 symtab_hdr->sh_link,
6672 sym->st_name);
6674 else
6676 /* It's an external symbol. */
6677 while (hash->root.root.type == bfd_link_hash_indirect
6678 || hash->root.root.type == bfd_link_hash_warning)
6679 hash = ((struct elf32_arm_link_hash_entry *)
6680 hash->root.root.u.i.link);
6682 if (hash->root.root.type == bfd_link_hash_defined
6683 || hash->root.root.type == bfd_link_hash_defweak)
6685 sym_sec = hash->root.root.u.def.section;
6686 sym_value = hash->root.root.u.def.value;
6688 struct elf32_arm_link_hash_table *globals =
6689 elf32_arm_hash_table (info);
6691 /* For a destination in a shared library,
6692 use the PLT stub as target address to
6693 decide whether a branch stub is
6694 needed. */
6695 if (globals != NULL
6696 && globals->root.splt != NULL
6697 && hash != NULL
6698 && hash->root.plt.offset != (bfd_vma) -1)
6700 sym_sec = globals->root.splt;
6701 sym_value = hash->root.plt.offset;
6702 if (sym_sec->output_section != NULL)
6703 destination = (sym_value
6704 + sym_sec->output_offset
6705 + sym_sec->output_section->vma);
6707 else if (sym_sec->output_section != NULL)
6708 destination = (sym_value + irela->r_addend
6709 + sym_sec->output_offset
6710 + sym_sec->output_section->vma);
6712 else if ((hash->root.root.type == bfd_link_hash_undefined)
6713 || (hash->root.root.type == bfd_link_hash_undefweak))
6715 /* For a shared library, use the PLT stub as
6716 target address to decide whether a long
6717 branch stub is needed.
6718 For absolute code, they cannot be handled. */
6719 struct elf32_arm_link_hash_table *globals =
6720 elf32_arm_hash_table (info);
6722 if (globals != NULL
6723 && globals->root.splt != NULL
6724 && hash != NULL
6725 && hash->root.plt.offset != (bfd_vma) -1)
6727 sym_sec = globals->root.splt;
6728 sym_value = hash->root.plt.offset;
6729 if (sym_sec->output_section != NULL)
6730 destination = (sym_value
6731 + sym_sec->output_offset
6732 + sym_sec->output_section->vma);
6734 else
6735 continue;
6737 else
6739 bfd_set_error (bfd_error_bad_value);
6740 goto error_ret_free_internal;
6742 st_type = hash->root.type;
6743 branch_type =
6744 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6745 sym_name = hash->root.root.root.string;
6750 bfd_boolean new_stub;
6751 struct elf32_arm_stub_hash_entry *stub_entry;
6753 /* Determine what (if any) linker stub is needed. */
6754 stub_type = arm_type_of_stub (info, section, irela,
6755 st_type, &branch_type,
6756 hash, destination, sym_sec,
6757 input_bfd, sym_name);
6758 if (stub_type == arm_stub_none)
6759 break;
6761 /* We've either created a stub for this reloc already,
6762 or we are about to. */
6763 stub_entry =
6764 elf32_arm_create_stub (htab, stub_type, section, irela,
6765 sym_sec, hash,
6766 (char *) sym_name, sym_value,
6767 branch_type, &new_stub);
6769 created_stub = stub_entry != NULL;
6770 if (!created_stub)
6771 goto error_ret_free_internal;
6772 else if (!new_stub)
6773 break;
6774 else
6775 stub_changed = TRUE;
6777 while (0);
6779 /* Look for relocations which might trigger Cortex-A8
6780 erratum. */
6781 if (htab->fix_cortex_a8
6782 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6783 || r_type == (unsigned int) R_ARM_THM_JUMP19
6784 || r_type == (unsigned int) R_ARM_THM_CALL
6785 || r_type == (unsigned int) R_ARM_THM_XPC22))
6787 bfd_vma from = section->output_section->vma
6788 + section->output_offset
6789 + irela->r_offset;
6791 if ((from & 0xfff) == 0xffe)
6793 /* Found a candidate. Note we haven't checked the
6794 destination is within 4K here: if we do so (and
6795 don't create an entry in a8_relocs) we can't tell
6796 that a branch should have been relocated when
6797 scanning later. */
6798 if (num_a8_relocs == a8_reloc_table_size)
6800 a8_reloc_table_size *= 2;
6801 a8_relocs = (struct a8_erratum_reloc *)
6802 bfd_realloc (a8_relocs,
6803 sizeof (struct a8_erratum_reloc)
6804 * a8_reloc_table_size);
6807 a8_relocs[num_a8_relocs].from = from;
6808 a8_relocs[num_a8_relocs].destination = destination;
6809 a8_relocs[num_a8_relocs].r_type = r_type;
6810 a8_relocs[num_a8_relocs].branch_type = branch_type;
6811 a8_relocs[num_a8_relocs].sym_name = sym_name;
6812 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6813 a8_relocs[num_a8_relocs].hash = hash;
6815 num_a8_relocs++;
6820 /* We're done with the internal relocs, free them. */
6821 if (elf_section_data (section)->relocs == NULL)
6822 free (internal_relocs);
6825 if (htab->fix_cortex_a8)
6827 /* Sort relocs which might apply to Cortex-A8 erratum. */
6828 qsort (a8_relocs, num_a8_relocs,
6829 sizeof (struct a8_erratum_reloc),
6830 &a8_reloc_compare);
6832 /* Scan for branches which might trigger Cortex-A8 erratum. */
6833 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6834 &num_a8_fixes, &a8_fix_table_size,
6835 a8_relocs, num_a8_relocs,
6836 prev_num_a8_fixes, &stub_changed)
6837 != 0)
6838 goto error_ret_free_local;
6841 if (local_syms != NULL
6842 && symtab_hdr->contents != (unsigned char *) local_syms)
6844 if (!info->keep_memory)
6845 free (local_syms);
6846 else
6847 symtab_hdr->contents = (unsigned char *) local_syms;
6851 if (first_veneer_scan
6852 && !set_cmse_veneer_addr_from_implib (info, htab,
6853 &cmse_stub_created))
6854 ret = FALSE;
6856 if (prev_num_a8_fixes != num_a8_fixes)
6857 stub_changed = TRUE;
6859 if (!stub_changed)
6860 break;
6862 /* OK, we've added some stubs. Find out the new size of the
6863 stub sections. */
6864 for (stub_sec = htab->stub_bfd->sections;
6865 stub_sec != NULL;
6866 stub_sec = stub_sec->next)
6868 /* Ignore non-stub sections. */
6869 if (!strstr (stub_sec->name, STUB_SUFFIX))
6870 continue;
6872 stub_sec->size = 0;
6875 /* Add new SG veneers after those already in the input import
6876 library. */
6877 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6878 stub_type++)
6880 bfd_vma *start_offset_p;
6881 asection **stub_sec_p;
6883 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6884 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6885 if (start_offset_p == NULL)
6886 continue;
6888 BFD_ASSERT (stub_sec_p != NULL);
6889 if (*stub_sec_p != NULL)
6890 (*stub_sec_p)->size = *start_offset_p;
6893 /* Compute stub section size, considering padding. */
6894 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6895 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6896 stub_type++)
6898 int size, padding;
6899 asection **stub_sec_p;
6901 padding = arm_dedicated_stub_section_padding (stub_type);
6902 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6903 /* Skip if no stub input section or no stub section padding
6904 required. */
6905 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6906 continue;
6907 /* Stub section padding required but no dedicated section. */
6908 BFD_ASSERT (stub_sec_p);
6910 size = (*stub_sec_p)->size;
6911 size = (size + padding - 1) & ~(padding - 1);
6912 (*stub_sec_p)->size = size;
6915 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6916 if (htab->fix_cortex_a8)
6917 for (i = 0; i < num_a8_fixes; i++)
6919 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6920 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6922 if (stub_sec == NULL)
6923 return FALSE;
6925 stub_sec->size
6926 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6927 NULL);
6931 /* Ask the linker to do its stuff. */
6932 (*htab->layout_sections_again) ();
6933 first_veneer_scan = FALSE;
6936 /* Add stubs for Cortex-A8 erratum fixes now. */
6937 if (htab->fix_cortex_a8)
6939 for (i = 0; i < num_a8_fixes; i++)
6941 struct elf32_arm_stub_hash_entry *stub_entry;
6942 char *stub_name = a8_fixes[i].stub_name;
6943 asection *section = a8_fixes[i].section;
6944 unsigned int section_id = a8_fixes[i].section->id;
6945 asection *link_sec = htab->stub_group[section_id].link_sec;
6946 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6947 const insn_sequence *template_sequence;
6948 int template_size, size = 0;
6950 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6951 TRUE, FALSE);
6952 if (stub_entry == NULL)
6954 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6955 section->owner, stub_name);
6956 return FALSE;
6959 stub_entry->stub_sec = stub_sec;
6960 stub_entry->stub_offset = (bfd_vma) -1;
6961 stub_entry->id_sec = link_sec;
6962 stub_entry->stub_type = a8_fixes[i].stub_type;
6963 stub_entry->source_value = a8_fixes[i].offset;
6964 stub_entry->target_section = a8_fixes[i].section;
6965 stub_entry->target_value = a8_fixes[i].target_offset;
6966 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6967 stub_entry->branch_type = a8_fixes[i].branch_type;
6969 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6970 &template_sequence,
6971 &template_size);
6973 stub_entry->stub_size = size;
6974 stub_entry->stub_template = template_sequence;
6975 stub_entry->stub_template_size = template_size;
6978 /* Stash the Cortex-A8 erratum fix array for use later in
6979 elf32_arm_write_section(). */
6980 htab->a8_erratum_fixes = a8_fixes;
6981 htab->num_a8_erratum_fixes = num_a8_fixes;
6983 else
6985 htab->a8_erratum_fixes = NULL;
6986 htab->num_a8_erratum_fixes = 0;
6988 return ret;
6991 /* Build all the stubs associated with the current output file. The
6992 stubs are kept in a hash table attached to the main linker hash
6993 table. We also set up the .plt entries for statically linked PIC
6994 functions here. This function is called via arm_elf_finish in the
6995 linker. */
6997 bfd_boolean
6998 elf32_arm_build_stubs (struct bfd_link_info *info)
7000 asection *stub_sec;
7001 struct bfd_hash_table *table;
7002 enum elf32_arm_stub_type stub_type;
7003 struct elf32_arm_link_hash_table *htab;
7005 htab = elf32_arm_hash_table (info);
7006 if (htab == NULL)
7007 return FALSE;
7009 for (stub_sec = htab->stub_bfd->sections;
7010 stub_sec != NULL;
7011 stub_sec = stub_sec->next)
7013 bfd_size_type size;
7015 /* Ignore non-stub sections. */
7016 if (!strstr (stub_sec->name, STUB_SUFFIX))
7017 continue;
7019 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7020 must at least be done for stub section requiring padding and for SG
7021 veneers to ensure that a non secure code branching to a removed SG
7022 veneer causes an error. */
7023 size = stub_sec->size;
7024 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7025 if (stub_sec->contents == NULL && size != 0)
7026 return FALSE;
7028 stub_sec->size = 0;
7031 /* Add new SG veneers after those already in the input import library. */
7032 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7034 bfd_vma *start_offset_p;
7035 asection **stub_sec_p;
7037 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7038 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7039 if (start_offset_p == NULL)
7040 continue;
7042 BFD_ASSERT (stub_sec_p != NULL);
7043 if (*stub_sec_p != NULL)
7044 (*stub_sec_p)->size = *start_offset_p;
7047 /* Build the stubs as directed by the stub hash table. */
7048 table = &htab->stub_hash_table;
7049 bfd_hash_traverse (table, arm_build_one_stub, info);
7050 if (htab->fix_cortex_a8)
7052 /* Place the cortex a8 stubs last. */
7053 htab->fix_cortex_a8 = -1;
7054 bfd_hash_traverse (table, arm_build_one_stub, info);
7057 return TRUE;
7060 /* Locate the Thumb encoded calling stub for NAME. */
7062 static struct elf_link_hash_entry *
7063 find_thumb_glue (struct bfd_link_info *link_info,
7064 const char *name,
7065 char **error_message)
7067 char *tmp_name;
7068 struct elf_link_hash_entry *hash;
7069 struct elf32_arm_link_hash_table *hash_table;
7071 /* We need a pointer to the armelf specific hash table. */
7072 hash_table = elf32_arm_hash_table (link_info);
7073 if (hash_table == NULL)
7074 return NULL;
7076 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7077 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7079 BFD_ASSERT (tmp_name);
7081 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7083 hash = elf_link_hash_lookup
7084 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7086 if (hash == NULL
7087 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7088 "Thumb", tmp_name, name) == -1)
7089 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7091 free (tmp_name);
7093 return hash;
7096 /* Locate the ARM encoded calling stub for NAME. */
7098 static struct elf_link_hash_entry *
7099 find_arm_glue (struct bfd_link_info *link_info,
7100 const char *name,
7101 char **error_message)
7103 char *tmp_name;
7104 struct elf_link_hash_entry *myh;
7105 struct elf32_arm_link_hash_table *hash_table;
7107 /* We need a pointer to the elfarm specific hash table. */
7108 hash_table = elf32_arm_hash_table (link_info);
7109 if (hash_table == NULL)
7110 return NULL;
7112 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7113 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7115 BFD_ASSERT (tmp_name);
7117 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7119 myh = elf_link_hash_lookup
7120 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7122 if (myh == NULL
7123 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7124 "ARM", tmp_name, name) == -1)
7125 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7127 free (tmp_name);
7129 return myh;
7132 /* ARM->Thumb glue (static images):
7134 .arm
7135 __func_from_arm:
7136 ldr r12, __func_addr
7137 bx r12
7138 __func_addr:
7139 .word func @ behave as if you saw a ARM_32 reloc.
7141 (v5t static images)
7142 .arm
7143 __func_from_arm:
7144 ldr pc, __func_addr
7145 __func_addr:
7146 .word func @ behave as if you saw a ARM_32 reloc.
7148 (relocatable images)
7149 .arm
7150 __func_from_arm:
7151 ldr r12, __func_offset
7152 add r12, r12, pc
7153 bx r12
7154 __func_offset:
7155 .word func - . */
7157 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7158 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7159 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7160 static const insn32 a2t3_func_addr_insn = 0x00000001;
7162 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7163 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7164 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7166 #define ARM2THUMB_PIC_GLUE_SIZE 16
7167 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7168 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7169 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7171 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7173 .thumb .thumb
7174 .align 2 .align 2
7175 __func_from_thumb: __func_from_thumb:
7176 bx pc push {r6, lr}
7177 nop ldr r6, __func_addr
7178 .arm mov lr, pc
7179 b func bx r6
7180 .arm
7181 ;; back_to_thumb
7182 ldmia r13! {r6, lr}
7183 bx lr
7184 __func_addr:
7185 .word func */
7187 #define THUMB2ARM_GLUE_SIZE 8
7188 static const insn16 t2a1_bx_pc_insn = 0x4778;
7189 static const insn16 t2a2_noop_insn = 0x46c0;
7190 static const insn32 t2a3_b_insn = 0xea000000;
7192 #define VFP11_ERRATUM_VENEER_SIZE 8
7193 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7194 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7196 #define ARM_BX_VENEER_SIZE 12
7197 static const insn32 armbx1_tst_insn = 0xe3100001;
7198 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7199 static const insn32 armbx3_bx_insn = 0xe12fff10;
7201 #ifndef ELFARM_NABI_C_INCLUDED
7202 static void
7203 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7205 asection * s;
7206 bfd_byte * contents;
7208 if (size == 0)
7210 /* Do not include empty glue sections in the output. */
7211 if (abfd != NULL)
7213 s = bfd_get_linker_section (abfd, name);
7214 if (s != NULL)
7215 s->flags |= SEC_EXCLUDE;
7217 return;
7220 BFD_ASSERT (abfd != NULL);
7222 s = bfd_get_linker_section (abfd, name);
7223 BFD_ASSERT (s != NULL);
7225 contents = (bfd_byte *) bfd_alloc (abfd, size);
7227 BFD_ASSERT (s->size == size);
7228 s->contents = contents;
7231 bfd_boolean
7232 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7234 struct elf32_arm_link_hash_table * globals;
7236 globals = elf32_arm_hash_table (info);
7237 BFD_ASSERT (globals != NULL);
7239 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7240 globals->arm_glue_size,
7241 ARM2THUMB_GLUE_SECTION_NAME);
7243 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7244 globals->thumb_glue_size,
7245 THUMB2ARM_GLUE_SECTION_NAME);
7247 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7248 globals->vfp11_erratum_glue_size,
7249 VFP11_ERRATUM_VENEER_SECTION_NAME);
7251 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7252 globals->stm32l4xx_erratum_glue_size,
7253 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7255 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7256 globals->bx_glue_size,
7257 ARM_BX_GLUE_SECTION_NAME);
7259 return TRUE;
7262 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7263 returns the symbol identifying the stub. */
7265 static struct elf_link_hash_entry *
7266 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7267 struct elf_link_hash_entry * h)
7269 const char * name = h->root.root.string;
7270 asection * s;
7271 char * tmp_name;
7272 struct elf_link_hash_entry * myh;
7273 struct bfd_link_hash_entry * bh;
7274 struct elf32_arm_link_hash_table * globals;
7275 bfd_vma val;
7276 bfd_size_type size;
7278 globals = elf32_arm_hash_table (link_info);
7279 BFD_ASSERT (globals != NULL);
7280 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7282 s = bfd_get_linker_section
7283 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7285 BFD_ASSERT (s != NULL);
7287 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7288 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7290 BFD_ASSERT (tmp_name);
7292 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7294 myh = elf_link_hash_lookup
7295 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7297 if (myh != NULL)
7299 /* We've already seen this guy. */
7300 free (tmp_name);
7301 return myh;
7304 /* The only trick here is using hash_table->arm_glue_size as the value.
7305 Even though the section isn't allocated yet, this is where we will be
7306 putting it. The +1 on the value marks that the stub has not been
7307 output yet - not that it is a Thumb function. */
7308 bh = NULL;
7309 val = globals->arm_glue_size + 1;
7310 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7311 tmp_name, BSF_GLOBAL, s, val,
7312 NULL, TRUE, FALSE, &bh);
7314 myh = (struct elf_link_hash_entry *) bh;
7315 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7316 myh->forced_local = 1;
7318 free (tmp_name);
7320 if (bfd_link_pic (link_info)
7321 || globals->root.is_relocatable_executable
7322 || globals->pic_veneer)
7323 size = ARM2THUMB_PIC_GLUE_SIZE;
7324 else if (globals->use_blx)
7325 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7326 else
7327 size = ARM2THUMB_STATIC_GLUE_SIZE;
7329 s->size += size;
7330 globals->arm_glue_size += size;
7332 return myh;
7335 /* Allocate space for ARMv4 BX veneers. */
7337 static void
7338 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7340 asection * s;
7341 struct elf32_arm_link_hash_table *globals;
7342 char *tmp_name;
7343 struct elf_link_hash_entry *myh;
7344 struct bfd_link_hash_entry *bh;
7345 bfd_vma val;
7347 /* BX PC does not need a veneer. */
7348 if (reg == 15)
7349 return;
7351 globals = elf32_arm_hash_table (link_info);
7352 BFD_ASSERT (globals != NULL);
7353 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7355 /* Check if this veneer has already been allocated. */
7356 if (globals->bx_glue_offset[reg])
7357 return;
7359 s = bfd_get_linker_section
7360 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7362 BFD_ASSERT (s != NULL);
7364 /* Add symbol for veneer. */
7365 tmp_name = (char *)
7366 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7368 BFD_ASSERT (tmp_name);
7370 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7372 myh = elf_link_hash_lookup
7373 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7375 BFD_ASSERT (myh == NULL);
7377 bh = NULL;
7378 val = globals->bx_glue_size;
7379 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7380 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7381 NULL, TRUE, FALSE, &bh);
7383 myh = (struct elf_link_hash_entry *) bh;
7384 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7385 myh->forced_local = 1;
7387 s->size += ARM_BX_VENEER_SIZE;
7388 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7389 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7393 /* Add an entry to the code/data map for section SEC. */
7395 static void
7396 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7398 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7399 unsigned int newidx;
7401 if (sec_data->map == NULL)
7403 sec_data->map = (elf32_arm_section_map *)
7404 bfd_malloc (sizeof (elf32_arm_section_map));
7405 sec_data->mapcount = 0;
7406 sec_data->mapsize = 1;
7409 newidx = sec_data->mapcount++;
7411 if (sec_data->mapcount > sec_data->mapsize)
7413 sec_data->mapsize *= 2;
7414 sec_data->map = (elf32_arm_section_map *)
7415 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7416 * sizeof (elf32_arm_section_map));
7419 if (sec_data->map)
7421 sec_data->map[newidx].vma = vma;
7422 sec_data->map[newidx].type = type;
7427 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7428 veneers are handled for now. */
7430 static bfd_vma
7431 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7432 elf32_vfp11_erratum_list *branch,
7433 bfd *branch_bfd,
7434 asection *branch_sec,
7435 unsigned int offset)
7437 asection *s;
7438 struct elf32_arm_link_hash_table *hash_table;
7439 char *tmp_name;
7440 struct elf_link_hash_entry *myh;
7441 struct bfd_link_hash_entry *bh;
7442 bfd_vma val;
7443 struct _arm_elf_section_data *sec_data;
7444 elf32_vfp11_erratum_list *newerr;
7446 hash_table = elf32_arm_hash_table (link_info);
7447 BFD_ASSERT (hash_table != NULL);
7448 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7450 s = bfd_get_linker_section
7451 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7453 sec_data = elf32_arm_section_data (s);
7455 BFD_ASSERT (s != NULL);
7457 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7458 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7460 BFD_ASSERT (tmp_name);
7462 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7463 hash_table->num_vfp11_fixes);
7465 myh = elf_link_hash_lookup
7466 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7468 BFD_ASSERT (myh == NULL);
7470 bh = NULL;
7471 val = hash_table->vfp11_erratum_glue_size;
7472 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7473 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7474 NULL, TRUE, FALSE, &bh);
7476 myh = (struct elf_link_hash_entry *) bh;
7477 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7478 myh->forced_local = 1;
7480 /* Link veneer back to calling location. */
7481 sec_data->erratumcount += 1;
7482 newerr = (elf32_vfp11_erratum_list *)
7483 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7485 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7486 newerr->vma = -1;
7487 newerr->u.v.branch = branch;
7488 newerr->u.v.id = hash_table->num_vfp11_fixes;
7489 branch->u.b.veneer = newerr;
7491 newerr->next = sec_data->erratumlist;
7492 sec_data->erratumlist = newerr;
7494 /* A symbol for the return from the veneer. */
7495 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7496 hash_table->num_vfp11_fixes);
7498 myh = elf_link_hash_lookup
7499 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7501 if (myh != NULL)
7502 abort ();
7504 bh = NULL;
7505 val = offset + 4;
7506 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7507 branch_sec, val, NULL, TRUE, FALSE, &bh);
7509 myh = (struct elf_link_hash_entry *) bh;
7510 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7511 myh->forced_local = 1;
7513 free (tmp_name);
7515 /* Generate a mapping symbol for the veneer section, and explicitly add an
7516 entry for that symbol to the code/data map for the section. */
7517 if (hash_table->vfp11_erratum_glue_size == 0)
7519 bh = NULL;
7520 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7521 ever requires this erratum fix. */
7522 _bfd_generic_link_add_one_symbol (link_info,
7523 hash_table->bfd_of_glue_owner, "$a",
7524 BSF_LOCAL, s, 0, NULL,
7525 TRUE, FALSE, &bh);
7527 myh = (struct elf_link_hash_entry *) bh;
7528 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7529 myh->forced_local = 1;
7531 /* The elf32_arm_init_maps function only cares about symbols from input
7532 BFDs. We must make a note of this generated mapping symbol
7533 ourselves so that code byteswapping works properly in
7534 elf32_arm_write_section. */
7535 elf32_arm_section_map_add (s, 'a', 0);
7538 s->size += VFP11_ERRATUM_VENEER_SIZE;
7539 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7540 hash_table->num_vfp11_fixes++;
7542 /* The offset of the veneer. */
7543 return val;
7546 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7547 veneers need to be handled because used only in Cortex-M. */
7549 static bfd_vma
7550 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7551 elf32_stm32l4xx_erratum_list *branch,
7552 bfd *branch_bfd,
7553 asection *branch_sec,
7554 unsigned int offset,
7555 bfd_size_type veneer_size)
7557 asection *s;
7558 struct elf32_arm_link_hash_table *hash_table;
7559 char *tmp_name;
7560 struct elf_link_hash_entry *myh;
7561 struct bfd_link_hash_entry *bh;
7562 bfd_vma val;
7563 struct _arm_elf_section_data *sec_data;
7564 elf32_stm32l4xx_erratum_list *newerr;
7566 hash_table = elf32_arm_hash_table (link_info);
7567 BFD_ASSERT (hash_table != NULL);
7568 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7570 s = bfd_get_linker_section
7571 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7573 BFD_ASSERT (s != NULL);
7575 sec_data = elf32_arm_section_data (s);
7577 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7578 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7580 BFD_ASSERT (tmp_name);
7582 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7583 hash_table->num_stm32l4xx_fixes);
7585 myh = elf_link_hash_lookup
7586 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7588 BFD_ASSERT (myh == NULL);
7590 bh = NULL;
7591 val = hash_table->stm32l4xx_erratum_glue_size;
7592 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7593 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7594 NULL, TRUE, FALSE, &bh);
7596 myh = (struct elf_link_hash_entry *) bh;
7597 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7598 myh->forced_local = 1;
7600 /* Link veneer back to calling location. */
7601 sec_data->stm32l4xx_erratumcount += 1;
7602 newerr = (elf32_stm32l4xx_erratum_list *)
7603 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7605 newerr->type = STM32L4XX_ERRATUM_VENEER;
7606 newerr->vma = -1;
7607 newerr->u.v.branch = branch;
7608 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7609 branch->u.b.veneer = newerr;
7611 newerr->next = sec_data->stm32l4xx_erratumlist;
7612 sec_data->stm32l4xx_erratumlist = newerr;
7614 /* A symbol for the return from the veneer. */
7615 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7616 hash_table->num_stm32l4xx_fixes);
7618 myh = elf_link_hash_lookup
7619 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7621 if (myh != NULL)
7622 abort ();
7624 bh = NULL;
7625 val = offset + 4;
7626 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7627 branch_sec, val, NULL, TRUE, FALSE, &bh);
7629 myh = (struct elf_link_hash_entry *) bh;
7630 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7631 myh->forced_local = 1;
7633 free (tmp_name);
7635 /* Generate a mapping symbol for the veneer section, and explicitly add an
7636 entry for that symbol to the code/data map for the section. */
7637 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7639 bh = NULL;
7640 /* Creates a THUMB symbol since there is no other choice. */
7641 _bfd_generic_link_add_one_symbol (link_info,
7642 hash_table->bfd_of_glue_owner, "$t",
7643 BSF_LOCAL, s, 0, NULL,
7644 TRUE, FALSE, &bh);
7646 myh = (struct elf_link_hash_entry *) bh;
7647 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7648 myh->forced_local = 1;
7650 /* The elf32_arm_init_maps function only cares about symbols from input
7651 BFDs. We must make a note of this generated mapping symbol
7652 ourselves so that code byteswapping works properly in
7653 elf32_arm_write_section. */
7654 elf32_arm_section_map_add (s, 't', 0);
7657 s->size += veneer_size;
7658 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7659 hash_table->num_stm32l4xx_fixes++;
7661 /* The offset of the veneer. */
7662 return val;
7665 #define ARM_GLUE_SECTION_FLAGS \
7666 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7667 | SEC_READONLY | SEC_LINKER_CREATED)
7669 /* Create a fake section for use by the ARM backend of the linker. */
7671 static bfd_boolean
7672 arm_make_glue_section (bfd * abfd, const char * name)
7674 asection * sec;
7676 sec = bfd_get_linker_section (abfd, name);
7677 if (sec != NULL)
7678 /* Already made. */
7679 return TRUE;
7681 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7683 if (sec == NULL
7684 || !bfd_set_section_alignment (abfd, sec, 2))
7685 return FALSE;
7687 /* Set the gc mark to prevent the section from being removed by garbage
7688 collection, despite the fact that no relocs refer to this section. */
7689 sec->gc_mark = 1;
7691 return TRUE;
7694 /* Set size of .plt entries. This function is called from the
7695 linker scripts in ld/emultempl/{armelf}.em. */
7697 void
7698 bfd_elf32_arm_use_long_plt (void)
7700 elf32_arm_use_long_plt_entry = TRUE;
7703 /* Add the glue sections to ABFD. This function is called from the
7704 linker scripts in ld/emultempl/{armelf}.em. */
7706 bfd_boolean
7707 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7708 struct bfd_link_info *info)
7710 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7711 bfd_boolean dostm32l4xx = globals
7712 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7713 bfd_boolean addglue;
7715 /* If we are only performing a partial
7716 link do not bother adding the glue. */
7717 if (bfd_link_relocatable (info))
7718 return TRUE;
7720 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7721 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7722 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7723 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7725 if (!dostm32l4xx)
7726 return addglue;
7728 return addglue
7729 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7732 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7733 ensures they are not marked for deletion by
7734 strip_excluded_output_sections () when veneers are going to be created
7735 later. Not doing so would trigger assert on empty section size in
7736 lang_size_sections_1 (). */
7738 void
7739 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7741 enum elf32_arm_stub_type stub_type;
7743 /* If we are only performing a partial
7744 link do not bother adding the glue. */
7745 if (bfd_link_relocatable (info))
7746 return;
7748 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7750 asection *out_sec;
7751 const char *out_sec_name;
7753 if (!arm_dedicated_stub_output_section_required (stub_type))
7754 continue;
7756 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7757 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7758 if (out_sec != NULL)
7759 out_sec->flags |= SEC_KEEP;
7763 /* Select a BFD to be used to hold the sections used by the glue code.
7764 This function is called from the linker scripts in ld/emultempl/
7765 {armelf/pe}.em. */
7767 bfd_boolean
7768 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7770 struct elf32_arm_link_hash_table *globals;
7772 /* If we are only performing a partial link
7773 do not bother getting a bfd to hold the glue. */
7774 if (bfd_link_relocatable (info))
7775 return TRUE;
7777 /* Make sure we don't attach the glue sections to a dynamic object. */
7778 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7780 globals = elf32_arm_hash_table (info);
7781 BFD_ASSERT (globals != NULL);
7783 if (globals->bfd_of_glue_owner != NULL)
7784 return TRUE;
7786 /* Save the bfd for later use. */
7787 globals->bfd_of_glue_owner = abfd;
7789 return TRUE;
7792 static void
7793 check_use_blx (struct elf32_arm_link_hash_table *globals)
7795 int cpu_arch;
7797 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7798 Tag_CPU_arch);
7800 if (globals->fix_arm1176)
7802 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7803 globals->use_blx = 1;
7805 else
7807 if (cpu_arch > TAG_CPU_ARCH_V4T)
7808 globals->use_blx = 1;
7812 bfd_boolean
7813 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7814 struct bfd_link_info *link_info)
7816 Elf_Internal_Shdr *symtab_hdr;
7817 Elf_Internal_Rela *internal_relocs = NULL;
7818 Elf_Internal_Rela *irel, *irelend;
7819 bfd_byte *contents = NULL;
7821 asection *sec;
7822 struct elf32_arm_link_hash_table *globals;
7824 /* If we are only performing a partial link do not bother
7825 to construct any glue. */
7826 if (bfd_link_relocatable (link_info))
7827 return TRUE;
7829 /* Here we have a bfd that is to be included on the link. We have a
7830 hook to do reloc rummaging, before section sizes are nailed down. */
7831 globals = elf32_arm_hash_table (link_info);
7832 BFD_ASSERT (globals != NULL);
7834 check_use_blx (globals);
7836 if (globals->byteswap_code && !bfd_big_endian (abfd))
7838 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7839 abfd);
7840 return FALSE;
7843 /* PR 5398: If we have not decided to include any loadable sections in
7844 the output then we will not have a glue owner bfd. This is OK, it
7845 just means that there is nothing else for us to do here. */
7846 if (globals->bfd_of_glue_owner == NULL)
7847 return TRUE;
7849 /* Rummage around all the relocs and map the glue vectors. */
7850 sec = abfd->sections;
7852 if (sec == NULL)
7853 return TRUE;
7855 for (; sec != NULL; sec = sec->next)
7857 if (sec->reloc_count == 0)
7858 continue;
7860 if ((sec->flags & SEC_EXCLUDE) != 0)
7861 continue;
7863 symtab_hdr = & elf_symtab_hdr (abfd);
7865 /* Load the relocs. */
7866 internal_relocs
7867 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7869 if (internal_relocs == NULL)
7870 goto error_return;
7872 irelend = internal_relocs + sec->reloc_count;
7873 for (irel = internal_relocs; irel < irelend; irel++)
7875 long r_type;
7876 unsigned long r_index;
7878 struct elf_link_hash_entry *h;
7880 r_type = ELF32_R_TYPE (irel->r_info);
7881 r_index = ELF32_R_SYM (irel->r_info);
7883 /* These are the only relocation types we care about. */
7884 if ( r_type != R_ARM_PC24
7885 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7886 continue;
7888 /* Get the section contents if we haven't done so already. */
7889 if (contents == NULL)
7891 /* Get cached copy if it exists. */
7892 if (elf_section_data (sec)->this_hdr.contents != NULL)
7893 contents = elf_section_data (sec)->this_hdr.contents;
7894 else
7896 /* Go get them off disk. */
7897 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7898 goto error_return;
7902 if (r_type == R_ARM_V4BX)
7904 int reg;
7906 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7907 record_arm_bx_glue (link_info, reg);
7908 continue;
7911 /* If the relocation is not against a symbol it cannot concern us. */
7912 h = NULL;
7914 /* We don't care about local symbols. */
7915 if (r_index < symtab_hdr->sh_info)
7916 continue;
7918 /* This is an external symbol. */
7919 r_index -= symtab_hdr->sh_info;
7920 h = (struct elf_link_hash_entry *)
7921 elf_sym_hashes (abfd)[r_index];
7923 /* If the relocation is against a static symbol it must be within
7924 the current section and so cannot be a cross ARM/Thumb relocation. */
7925 if (h == NULL)
7926 continue;
7928 /* If the call will go through a PLT entry then we do not need
7929 glue. */
7930 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7931 continue;
7933 switch (r_type)
7935 case R_ARM_PC24:
7936 /* This one is a call from arm code. We need to look up
7937 the target of the call. If it is a thumb target, we
7938 insert glue. */
7939 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7940 == ST_BRANCH_TO_THUMB)
7941 record_arm_to_thumb_glue (link_info, h);
7942 break;
7944 default:
7945 abort ();
7949 if (contents != NULL
7950 && elf_section_data (sec)->this_hdr.contents != contents)
7951 free (contents);
7952 contents = NULL;
7954 if (internal_relocs != NULL
7955 && elf_section_data (sec)->relocs != internal_relocs)
7956 free (internal_relocs);
7957 internal_relocs = NULL;
7960 return TRUE;
7962 error_return:
7963 if (contents != NULL
7964 && elf_section_data (sec)->this_hdr.contents != contents)
7965 free (contents);
7966 if (internal_relocs != NULL
7967 && elf_section_data (sec)->relocs != internal_relocs)
7968 free (internal_relocs);
7970 return FALSE;
7972 #endif
7975 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7977 void
7978 bfd_elf32_arm_init_maps (bfd *abfd)
7980 Elf_Internal_Sym *isymbuf;
7981 Elf_Internal_Shdr *hdr;
7982 unsigned int i, localsyms;
7984 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7985 if (! is_arm_elf (abfd))
7986 return;
7988 if ((abfd->flags & DYNAMIC) != 0)
7989 return;
7991 hdr = & elf_symtab_hdr (abfd);
7992 localsyms = hdr->sh_info;
7994 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7995 should contain the number of local symbols, which should come before any
7996 global symbols. Mapping symbols are always local. */
7997 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7998 NULL);
8000 /* No internal symbols read? Skip this BFD. */
8001 if (isymbuf == NULL)
8002 return;
8004 for (i = 0; i < localsyms; i++)
8006 Elf_Internal_Sym *isym = &isymbuf[i];
8007 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8008 const char *name;
8010 if (sec != NULL
8011 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8013 name = bfd_elf_string_from_elf_section (abfd,
8014 hdr->sh_link, isym->st_name);
8016 if (bfd_is_arm_special_symbol_name (name,
8017 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8018 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8024 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8025 say what they wanted. */
8027 void
8028 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8030 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8031 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8033 if (globals == NULL)
8034 return;
8036 if (globals->fix_cortex_a8 == -1)
8038 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8039 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8040 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8041 || out_attr[Tag_CPU_arch_profile].i == 0))
8042 globals->fix_cortex_a8 = 1;
8043 else
8044 globals->fix_cortex_a8 = 0;
8049 void
8050 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8052 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8053 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8055 if (globals == NULL)
8056 return;
8057 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8058 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8060 switch (globals->vfp11_fix)
8062 case BFD_ARM_VFP11_FIX_DEFAULT:
8063 case BFD_ARM_VFP11_FIX_NONE:
8064 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8065 break;
8067 default:
8068 /* Give a warning, but do as the user requests anyway. */
8069 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8070 "workaround is not necessary for target architecture"), obfd);
8073 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8074 /* For earlier architectures, we might need the workaround, but do not
8075 enable it by default. If users is running with broken hardware, they
8076 must enable the erratum fix explicitly. */
8077 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8080 void
8081 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8083 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8084 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8086 if (globals == NULL)
8087 return;
8089 /* We assume only Cortex-M4 may require the fix. */
8090 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8091 || out_attr[Tag_CPU_arch_profile].i != 'M')
8093 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8094 /* Give a warning, but do as the user requests anyway. */
8095 _bfd_error_handler
8096 (_("%pB: warning: selected STM32L4XX erratum "
8097 "workaround is not necessary for target architecture"), obfd);
8101 enum bfd_arm_vfp11_pipe
8103 VFP11_FMAC,
8104 VFP11_LS,
8105 VFP11_DS,
8106 VFP11_BAD
8109 /* Return a VFP register number. This is encoded as RX:X for single-precision
8110 registers, or X:RX for double-precision registers, where RX is the group of
8111 four bits in the instruction encoding and X is the single extension bit.
8112 RX and X fields are specified using their lowest (starting) bit. The return
8113 value is:
8115 0...31: single-precision registers s0...s31
8116 32...63: double-precision registers d0...d31.
8118 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8119 encounter VFP3 instructions, so we allow the full range for DP registers. */
8121 static unsigned int
8122 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8123 unsigned int x)
8125 if (is_double)
8126 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8127 else
8128 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8131 /* Set bits in *WMASK according to a register number REG as encoded by
8132 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8134 static void
8135 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8137 if (reg < 32)
8138 *wmask |= 1 << reg;
8139 else if (reg < 48)
8140 *wmask |= 3 << ((reg - 32) * 2);
8143 /* Return TRUE if WMASK overwrites anything in REGS. */
8145 static bfd_boolean
8146 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8148 int i;
8150 for (i = 0; i < numregs; i++)
8152 unsigned int reg = regs[i];
8154 if (reg < 32 && (wmask & (1 << reg)) != 0)
8155 return TRUE;
8157 reg -= 32;
8159 if (reg >= 16)
8160 continue;
8162 if ((wmask & (3 << (reg * 2))) != 0)
8163 return TRUE;
8166 return FALSE;
8169 /* In this function, we're interested in two things: finding input registers
8170 for VFP data-processing instructions, and finding the set of registers which
8171 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8172 hold the written set, so FLDM etc. are easy to deal with (we're only
8173 interested in 32 SP registers or 16 dp registers, due to the VFP version
8174 implemented by the chip in question). DP registers are marked by setting
8175 both SP registers in the write mask). */
8177 static enum bfd_arm_vfp11_pipe
8178 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8179 int *numregs)
8181 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8182 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8184 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8186 unsigned int pqrs;
8187 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8188 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8190 pqrs = ((insn & 0x00800000) >> 20)
8191 | ((insn & 0x00300000) >> 19)
8192 | ((insn & 0x00000040) >> 6);
8194 switch (pqrs)
8196 case 0: /* fmac[sd]. */
8197 case 1: /* fnmac[sd]. */
8198 case 2: /* fmsc[sd]. */
8199 case 3: /* fnmsc[sd]. */
8200 vpipe = VFP11_FMAC;
8201 bfd_arm_vfp11_write_mask (destmask, fd);
8202 regs[0] = fd;
8203 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8204 regs[2] = fm;
8205 *numregs = 3;
8206 break;
8208 case 4: /* fmul[sd]. */
8209 case 5: /* fnmul[sd]. */
8210 case 6: /* fadd[sd]. */
8211 case 7: /* fsub[sd]. */
8212 vpipe = VFP11_FMAC;
8213 goto vfp_binop;
8215 case 8: /* fdiv[sd]. */
8216 vpipe = VFP11_DS;
8217 vfp_binop:
8218 bfd_arm_vfp11_write_mask (destmask, fd);
8219 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8220 regs[1] = fm;
8221 *numregs = 2;
8222 break;
8224 case 15: /* extended opcode. */
8226 unsigned int extn = ((insn >> 15) & 0x1e)
8227 | ((insn >> 7) & 1);
8229 switch (extn)
8231 case 0: /* fcpy[sd]. */
8232 case 1: /* fabs[sd]. */
8233 case 2: /* fneg[sd]. */
8234 case 8: /* fcmp[sd]. */
8235 case 9: /* fcmpe[sd]. */
8236 case 10: /* fcmpz[sd]. */
8237 case 11: /* fcmpez[sd]. */
8238 case 16: /* fuito[sd]. */
8239 case 17: /* fsito[sd]. */
8240 case 24: /* ftoui[sd]. */
8241 case 25: /* ftouiz[sd]. */
8242 case 26: /* ftosi[sd]. */
8243 case 27: /* ftosiz[sd]. */
8244 /* These instructions will not bounce due to underflow. */
8245 *numregs = 0;
8246 vpipe = VFP11_FMAC;
8247 break;
8249 case 3: /* fsqrt[sd]. */
8250 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8251 registers to cause the erratum in previous instructions. */
8252 bfd_arm_vfp11_write_mask (destmask, fd);
8253 vpipe = VFP11_DS;
8254 break;
8256 case 15: /* fcvt{ds,sd}. */
8258 int rnum = 0;
8260 bfd_arm_vfp11_write_mask (destmask, fd);
8262 /* Only FCVTSD can underflow. */
8263 if ((insn & 0x100) != 0)
8264 regs[rnum++] = fm;
8266 *numregs = rnum;
8268 vpipe = VFP11_FMAC;
8270 break;
8272 default:
8273 return VFP11_BAD;
8276 break;
8278 default:
8279 return VFP11_BAD;
8282 /* Two-register transfer. */
8283 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8285 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8287 if ((insn & 0x100000) == 0)
8289 if (is_double)
8290 bfd_arm_vfp11_write_mask (destmask, fm);
8291 else
8293 bfd_arm_vfp11_write_mask (destmask, fm);
8294 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8298 vpipe = VFP11_LS;
8300 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8302 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8303 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8305 switch (puw)
8307 case 0: /* Two-reg transfer. We should catch these above. */
8308 abort ();
8310 case 2: /* fldm[sdx]. */
8311 case 3:
8312 case 5:
8314 unsigned int i, offset = insn & 0xff;
8316 if (is_double)
8317 offset >>= 1;
8319 for (i = fd; i < fd + offset; i++)
8320 bfd_arm_vfp11_write_mask (destmask, i);
8322 break;
8324 case 4: /* fld[sd]. */
8325 case 6:
8326 bfd_arm_vfp11_write_mask (destmask, fd);
8327 break;
8329 default:
8330 return VFP11_BAD;
8333 vpipe = VFP11_LS;
8335 /* Single-register transfer. Note L==0. */
8336 else if ((insn & 0x0f100e10) == 0x0e000a10)
8338 unsigned int opcode = (insn >> 21) & 7;
8339 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8341 switch (opcode)
8343 case 0: /* fmsr/fmdlr. */
8344 case 1: /* fmdhr. */
8345 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8346 destination register. I don't know if this is exactly right,
8347 but it is the conservative choice. */
8348 bfd_arm_vfp11_write_mask (destmask, fn);
8349 break;
8351 case 7: /* fmxr. */
8352 break;
8355 vpipe = VFP11_LS;
8358 return vpipe;
8362 static int elf32_arm_compare_mapping (const void * a, const void * b);
8365 /* Look for potentially-troublesome code sequences which might trigger the
8366 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8367 (available from ARM) for details of the erratum. A short version is
8368 described in ld.texinfo. */
8370 bfd_boolean
8371 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8373 asection *sec;
8374 bfd_byte *contents = NULL;
8375 int state = 0;
8376 int regs[3], numregs = 0;
8377 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8378 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8380 if (globals == NULL)
8381 return FALSE;
8383 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8384 The states transition as follows:
8386 0 -> 1 (vector) or 0 -> 2 (scalar)
8387 A VFP FMAC-pipeline instruction has been seen. Fill
8388 regs[0]..regs[numregs-1] with its input operands. Remember this
8389 instruction in 'first_fmac'.
8391 1 -> 2
8392 Any instruction, except for a VFP instruction which overwrites
8393 regs[*].
8395 1 -> 3 [ -> 0 ] or
8396 2 -> 3 [ -> 0 ]
8397 A VFP instruction has been seen which overwrites any of regs[*].
8398 We must make a veneer! Reset state to 0 before examining next
8399 instruction.
8401 2 -> 0
8402 If we fail to match anything in state 2, reset to state 0 and reset
8403 the instruction pointer to the instruction after 'first_fmac'.
8405 If the VFP11 vector mode is in use, there must be at least two unrelated
8406 instructions between anti-dependent VFP11 instructions to properly avoid
8407 triggering the erratum, hence the use of the extra state 1. */
8409 /* If we are only performing a partial link do not bother
8410 to construct any glue. */
8411 if (bfd_link_relocatable (link_info))
8412 return TRUE;
8414 /* Skip if this bfd does not correspond to an ELF image. */
8415 if (! is_arm_elf (abfd))
8416 return TRUE;
8418 /* We should have chosen a fix type by the time we get here. */
8419 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8421 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8422 return TRUE;
8424 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8425 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8426 return TRUE;
8428 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8430 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8431 struct _arm_elf_section_data *sec_data;
8433 /* If we don't have executable progbits, we're not interested in this
8434 section. Also skip if section is to be excluded. */
8435 if (elf_section_type (sec) != SHT_PROGBITS
8436 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8437 || (sec->flags & SEC_EXCLUDE) != 0
8438 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8439 || sec->output_section == bfd_abs_section_ptr
8440 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8441 continue;
8443 sec_data = elf32_arm_section_data (sec);
8445 if (sec_data->mapcount == 0)
8446 continue;
8448 if (elf_section_data (sec)->this_hdr.contents != NULL)
8449 contents = elf_section_data (sec)->this_hdr.contents;
8450 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8451 goto error_return;
8453 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8454 elf32_arm_compare_mapping);
8456 for (span = 0; span < sec_data->mapcount; span++)
8458 unsigned int span_start = sec_data->map[span].vma;
8459 unsigned int span_end = (span == sec_data->mapcount - 1)
8460 ? sec->size : sec_data->map[span + 1].vma;
8461 char span_type = sec_data->map[span].type;
8463 /* FIXME: Only ARM mode is supported at present. We may need to
8464 support Thumb-2 mode also at some point. */
8465 if (span_type != 'a')
8466 continue;
8468 for (i = span_start; i < span_end;)
8470 unsigned int next_i = i + 4;
8471 unsigned int insn = bfd_big_endian (abfd)
8472 ? (contents[i] << 24)
8473 | (contents[i + 1] << 16)
8474 | (contents[i + 2] << 8)
8475 | contents[i + 3]
8476 : (contents[i + 3] << 24)
8477 | (contents[i + 2] << 16)
8478 | (contents[i + 1] << 8)
8479 | contents[i];
8480 unsigned int writemask = 0;
8481 enum bfd_arm_vfp11_pipe vpipe;
8483 switch (state)
8485 case 0:
8486 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8487 &numregs);
8488 /* I'm assuming the VFP11 erratum can trigger with denorm
8489 operands on either the FMAC or the DS pipeline. This might
8490 lead to slightly overenthusiastic veneer insertion. */
8491 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8493 state = use_vector ? 1 : 2;
8494 first_fmac = i;
8495 veneer_of_insn = insn;
8497 break;
8499 case 1:
8501 int other_regs[3], other_numregs;
8502 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8503 other_regs,
8504 &other_numregs);
8505 if (vpipe != VFP11_BAD
8506 && bfd_arm_vfp11_antidependency (writemask, regs,
8507 numregs))
8508 state = 3;
8509 else
8510 state = 2;
8512 break;
8514 case 2:
8516 int other_regs[3], other_numregs;
8517 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8518 other_regs,
8519 &other_numregs);
8520 if (vpipe != VFP11_BAD
8521 && bfd_arm_vfp11_antidependency (writemask, regs,
8522 numregs))
8523 state = 3;
8524 else
8526 state = 0;
8527 next_i = first_fmac + 4;
8530 break;
8532 case 3:
8533 abort (); /* Should be unreachable. */
8536 if (state == 3)
8538 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8539 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8541 elf32_arm_section_data (sec)->erratumcount += 1;
8543 newerr->u.b.vfp_insn = veneer_of_insn;
8545 switch (span_type)
8547 case 'a':
8548 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8549 break;
8551 default:
8552 abort ();
8555 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8556 first_fmac);
8558 newerr->vma = -1;
8560 newerr->next = sec_data->erratumlist;
8561 sec_data->erratumlist = newerr;
8563 state = 0;
8566 i = next_i;
8570 if (contents != NULL
8571 && elf_section_data (sec)->this_hdr.contents != contents)
8572 free (contents);
8573 contents = NULL;
8576 return TRUE;
8578 error_return:
8579 if (contents != NULL
8580 && elf_section_data (sec)->this_hdr.contents != contents)
8581 free (contents);
8583 return FALSE;
8586 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8587 after sections have been laid out, using specially-named symbols. */
8589 void
8590 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8591 struct bfd_link_info *link_info)
8593 asection *sec;
8594 struct elf32_arm_link_hash_table *globals;
8595 char *tmp_name;
8597 if (bfd_link_relocatable (link_info))
8598 return;
8600 /* Skip if this bfd does not correspond to an ELF image. */
8601 if (! is_arm_elf (abfd))
8602 return;
8604 globals = elf32_arm_hash_table (link_info);
8605 if (globals == NULL)
8606 return;
8608 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8609 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8611 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8613 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8614 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8616 for (; errnode != NULL; errnode = errnode->next)
8618 struct elf_link_hash_entry *myh;
8619 bfd_vma vma;
8621 switch (errnode->type)
8623 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8624 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8625 /* Find veneer symbol. */
8626 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8627 errnode->u.b.veneer->u.v.id);
8629 myh = elf_link_hash_lookup
8630 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8632 if (myh == NULL)
8633 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8634 abfd, "VFP11", tmp_name);
8636 vma = myh->root.u.def.section->output_section->vma
8637 + myh->root.u.def.section->output_offset
8638 + myh->root.u.def.value;
8640 errnode->u.b.veneer->vma = vma;
8641 break;
8643 case VFP11_ERRATUM_ARM_VENEER:
8644 case VFP11_ERRATUM_THUMB_VENEER:
8645 /* Find return location. */
8646 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8647 errnode->u.v.id);
8649 myh = elf_link_hash_lookup
8650 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8652 if (myh == NULL)
8653 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8654 abfd, "VFP11", tmp_name);
8656 vma = myh->root.u.def.section->output_section->vma
8657 + myh->root.u.def.section->output_offset
8658 + myh->root.u.def.value;
8660 errnode->u.v.branch->vma = vma;
8661 break;
8663 default:
8664 abort ();
8669 free (tmp_name);
8672 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8673 return locations after sections have been laid out, using
8674 specially-named symbols. */
8676 void
8677 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8678 struct bfd_link_info *link_info)
8680 asection *sec;
8681 struct elf32_arm_link_hash_table *globals;
8682 char *tmp_name;
8684 if (bfd_link_relocatable (link_info))
8685 return;
8687 /* Skip if this bfd does not correspond to an ELF image. */
8688 if (! is_arm_elf (abfd))
8689 return;
8691 globals = elf32_arm_hash_table (link_info);
8692 if (globals == NULL)
8693 return;
8695 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8696 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8698 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8700 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8701 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8703 for (; errnode != NULL; errnode = errnode->next)
8705 struct elf_link_hash_entry *myh;
8706 bfd_vma vma;
8708 switch (errnode->type)
8710 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8711 /* Find veneer symbol. */
8712 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8713 errnode->u.b.veneer->u.v.id);
8715 myh = elf_link_hash_lookup
8716 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8718 if (myh == NULL)
8719 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8720 abfd, "STM32L4XX", tmp_name);
8722 vma = myh->root.u.def.section->output_section->vma
8723 + myh->root.u.def.section->output_offset
8724 + myh->root.u.def.value;
8726 errnode->u.b.veneer->vma = vma;
8727 break;
8729 case STM32L4XX_ERRATUM_VENEER:
8730 /* Find return location. */
8731 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8732 errnode->u.v.id);
8734 myh = elf_link_hash_lookup
8735 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8737 if (myh == NULL)
8738 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8739 abfd, "STM32L4XX", tmp_name);
8741 vma = myh->root.u.def.section->output_section->vma
8742 + myh->root.u.def.section->output_offset
8743 + myh->root.u.def.value;
8745 errnode->u.v.branch->vma = vma;
8746 break;
8748 default:
8749 abort ();
8754 free (tmp_name);
8757 static inline bfd_boolean
8758 is_thumb2_ldmia (const insn32 insn)
8760 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8761 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8762 return (insn & 0xffd02000) == 0xe8900000;
8765 static inline bfd_boolean
8766 is_thumb2_ldmdb (const insn32 insn)
8768 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8769 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8770 return (insn & 0xffd02000) == 0xe9100000;
8773 static inline bfd_boolean
8774 is_thumb2_vldm (const insn32 insn)
8776 /* A6.5 Extension register load or store instruction
8777 A7.7.229
8778 We look for SP 32-bit and DP 64-bit registers.
8779 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8780 <list> is consecutive 64-bit registers
8781 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8782 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8783 <list> is consecutive 32-bit registers
8784 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8785 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8786 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8787 return
8788 (((insn & 0xfe100f00) == 0xec100b00) ||
8789 ((insn & 0xfe100f00) == 0xec100a00))
8790 && /* (IA without !). */
8791 (((((insn << 7) >> 28) & 0xd) == 0x4)
8792 /* (IA with !), includes VPOP (when reg number is SP). */
8793 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8794 /* (DB with !). */
8795 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8798 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8799 VLDM opcode and:
8800 - computes the number and the mode of memory accesses
8801 - decides if the replacement should be done:
8802 . replaces only if > 8-word accesses
8803 . or (testing purposes only) replaces all accesses. */
8805 static bfd_boolean
8806 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8807 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8809 int nb_words = 0;
8811 /* The field encoding the register list is the same for both LDMIA
8812 and LDMDB encodings. */
8813 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8814 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8815 else if (is_thumb2_vldm (insn))
8816 nb_words = (insn & 0xff);
8818 /* DEFAULT mode accounts for the real bug condition situation,
8819 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8820 return
8821 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8822 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8825 /* Look for potentially-troublesome code sequences which might trigger
8826 the STM STM32L4XX erratum. */
8828 bfd_boolean
8829 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8830 struct bfd_link_info *link_info)
8832 asection *sec;
8833 bfd_byte *contents = NULL;
8834 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8836 if (globals == NULL)
8837 return FALSE;
8839 /* If we are only performing a partial link do not bother
8840 to construct any glue. */
8841 if (bfd_link_relocatable (link_info))
8842 return TRUE;
8844 /* Skip if this bfd does not correspond to an ELF image. */
8845 if (! is_arm_elf (abfd))
8846 return TRUE;
8848 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8849 return TRUE;
8851 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8852 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8853 return TRUE;
8855 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8857 unsigned int i, span;
8858 struct _arm_elf_section_data *sec_data;
8860 /* If we don't have executable progbits, we're not interested in this
8861 section. Also skip if section is to be excluded. */
8862 if (elf_section_type (sec) != SHT_PROGBITS
8863 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8864 || (sec->flags & SEC_EXCLUDE) != 0
8865 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8866 || sec->output_section == bfd_abs_section_ptr
8867 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8868 continue;
8870 sec_data = elf32_arm_section_data (sec);
8872 if (sec_data->mapcount == 0)
8873 continue;
8875 if (elf_section_data (sec)->this_hdr.contents != NULL)
8876 contents = elf_section_data (sec)->this_hdr.contents;
8877 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8878 goto error_return;
8880 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8881 elf32_arm_compare_mapping);
8883 for (span = 0; span < sec_data->mapcount; span++)
8885 unsigned int span_start = sec_data->map[span].vma;
8886 unsigned int span_end = (span == sec_data->mapcount - 1)
8887 ? sec->size : sec_data->map[span + 1].vma;
8888 char span_type = sec_data->map[span].type;
8889 int itblock_current_pos = 0;
8891 /* Only Thumb2 mode need be supported with this CM4 specific
8892 code, we should not encounter any arm mode eg span_type
8893 != 'a'. */
8894 if (span_type != 't')
8895 continue;
8897 for (i = span_start; i < span_end;)
8899 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8900 bfd_boolean insn_32bit = FALSE;
8901 bfd_boolean is_ldm = FALSE;
8902 bfd_boolean is_vldm = FALSE;
8903 bfd_boolean is_not_last_in_it_block = FALSE;
8905 /* The first 16-bits of all 32-bit thumb2 instructions start
8906 with opcode[15..13]=0b111 and the encoded op1 can be anything
8907 except opcode[12..11]!=0b00.
8908 See 32-bit Thumb instruction encoding. */
8909 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8910 insn_32bit = TRUE;
8912 /* Compute the predicate that tells if the instruction
8913 is concerned by the IT block
8914 - Creates an error if there is a ldm that is not
8915 last in the IT block thus cannot be replaced
8916 - Otherwise we can create a branch at the end of the
8917 IT block, it will be controlled naturally by IT
8918 with the proper pseudo-predicate
8919 - So the only interesting predicate is the one that
8920 tells that we are not on the last item of an IT
8921 block. */
8922 if (itblock_current_pos != 0)
8923 is_not_last_in_it_block = !!--itblock_current_pos;
8925 if (insn_32bit)
8927 /* Load the rest of the insn (in manual-friendly order). */
8928 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8929 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8930 is_vldm = is_thumb2_vldm (insn);
8932 /* Veneers are created for (v)ldm depending on
8933 option flags and memory accesses conditions; but
8934 if the instruction is not the last instruction of
8935 an IT block, we cannot create a jump there, so we
8936 bail out. */
8937 if ((is_ldm || is_vldm)
8938 && stm32l4xx_need_create_replacing_stub
8939 (insn, globals->stm32l4xx_fix))
8941 if (is_not_last_in_it_block)
8943 _bfd_error_handler
8944 /* xgettext:c-format */
8945 (_("%pB(%pA+%#x): error: multiple load detected"
8946 " in non-last IT block instruction:"
8947 " STM32L4XX veneer cannot be generated; "
8948 "use gcc option -mrestrict-it to generate"
8949 " only one instruction per IT block"),
8950 abfd, sec, i);
8952 else
8954 elf32_stm32l4xx_erratum_list *newerr =
8955 (elf32_stm32l4xx_erratum_list *)
8956 bfd_zmalloc
8957 (sizeof (elf32_stm32l4xx_erratum_list));
8959 elf32_arm_section_data (sec)
8960 ->stm32l4xx_erratumcount += 1;
8961 newerr->u.b.insn = insn;
8962 /* We create only thumb branches. */
8963 newerr->type =
8964 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8965 record_stm32l4xx_erratum_veneer
8966 (link_info, newerr, abfd, sec,
8968 is_ldm ?
8969 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8970 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8971 newerr->vma = -1;
8972 newerr->next = sec_data->stm32l4xx_erratumlist;
8973 sec_data->stm32l4xx_erratumlist = newerr;
8977 else
8979 /* A7.7.37 IT p208
8980 IT blocks are only encoded in T1
8981 Encoding T1: IT{x{y{z}}} <firstcond>
8982 1 0 1 1 - 1 1 1 1 - firstcond - mask
8983 if mask = '0000' then see 'related encodings'
8984 We don't deal with UNPREDICTABLE, just ignore these.
8985 There can be no nested IT blocks so an IT block
8986 is naturally a new one for which it is worth
8987 computing its size. */
8988 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8989 && ((insn & 0x000f) != 0x0000);
8990 /* If we have a new IT block we compute its size. */
8991 if (is_newitblock)
8993 /* Compute the number of instructions controlled
8994 by the IT block, it will be used to decide
8995 whether we are inside an IT block or not. */
8996 unsigned int mask = insn & 0x000f;
8997 itblock_current_pos = 4 - ctz (mask);
9001 i += insn_32bit ? 4 : 2;
9005 if (contents != NULL
9006 && elf_section_data (sec)->this_hdr.contents != contents)
9007 free (contents);
9008 contents = NULL;
9011 return TRUE;
9013 error_return:
9014 if (contents != NULL
9015 && elf_section_data (sec)->this_hdr.contents != contents)
9016 free (contents);
9018 return FALSE;
9021 /* Set target relocation values needed during linking. */
9023 void
9024 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9025 struct bfd_link_info *link_info,
9026 struct elf32_arm_params *params)
9028 struct elf32_arm_link_hash_table *globals;
9030 globals = elf32_arm_hash_table (link_info);
9031 if (globals == NULL)
9032 return;
9034 globals->target1_is_rel = params->target1_is_rel;
9035 if (globals->fdpic_p)
9036 globals->target2_reloc = R_ARM_GOT32;
9037 else if (strcmp (params->target2_type, "rel") == 0)
9038 globals->target2_reloc = R_ARM_REL32;
9039 else if (strcmp (params->target2_type, "abs") == 0)
9040 globals->target2_reloc = R_ARM_ABS32;
9041 else if (strcmp (params->target2_type, "got-rel") == 0)
9042 globals->target2_reloc = R_ARM_GOT_PREL;
9043 else
9045 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9046 params->target2_type);
9048 globals->fix_v4bx = params->fix_v4bx;
9049 globals->use_blx |= params->use_blx;
9050 globals->vfp11_fix = params->vfp11_denorm_fix;
9051 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9052 if (globals->fdpic_p)
9053 globals->pic_veneer = 1;
9054 else
9055 globals->pic_veneer = params->pic_veneer;
9056 globals->fix_cortex_a8 = params->fix_cortex_a8;
9057 globals->fix_arm1176 = params->fix_arm1176;
9058 globals->cmse_implib = params->cmse_implib;
9059 globals->in_implib_bfd = params->in_implib_bfd;
9061 BFD_ASSERT (is_arm_elf (output_bfd));
9062 elf_arm_tdata (output_bfd)->no_enum_size_warning
9063 = params->no_enum_size_warning;
9064 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9065 = params->no_wchar_size_warning;
9068 /* Replace the target offset of a Thumb bl or b.w instruction. */
9070 static void
9071 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9073 bfd_vma upper;
9074 bfd_vma lower;
9075 int reloc_sign;
9077 BFD_ASSERT ((offset & 1) == 0);
9079 upper = bfd_get_16 (abfd, insn);
9080 lower = bfd_get_16 (abfd, insn + 2);
9081 reloc_sign = (offset < 0) ? 1 : 0;
9082 upper = (upper & ~(bfd_vma) 0x7ff)
9083 | ((offset >> 12) & 0x3ff)
9084 | (reloc_sign << 10);
9085 lower = (lower & ~(bfd_vma) 0x2fff)
9086 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9087 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9088 | ((offset >> 1) & 0x7ff);
9089 bfd_put_16 (abfd, upper, insn);
9090 bfd_put_16 (abfd, lower, insn + 2);
9093 /* Thumb code calling an ARM function. */
9095 static int
9096 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9097 const char * name,
9098 bfd * input_bfd,
9099 bfd * output_bfd,
9100 asection * input_section,
9101 bfd_byte * hit_data,
9102 asection * sym_sec,
9103 bfd_vma offset,
9104 bfd_signed_vma addend,
9105 bfd_vma val,
9106 char **error_message)
9108 asection * s = 0;
9109 bfd_vma my_offset;
9110 long int ret_offset;
9111 struct elf_link_hash_entry * myh;
9112 struct elf32_arm_link_hash_table * globals;
9114 myh = find_thumb_glue (info, name, error_message);
9115 if (myh == NULL)
9116 return FALSE;
9118 globals = elf32_arm_hash_table (info);
9119 BFD_ASSERT (globals != NULL);
9120 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9122 my_offset = myh->root.u.def.value;
9124 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9125 THUMB2ARM_GLUE_SECTION_NAME);
9127 BFD_ASSERT (s != NULL);
9128 BFD_ASSERT (s->contents != NULL);
9129 BFD_ASSERT (s->output_section != NULL);
9131 if ((my_offset & 0x01) == 0x01)
9133 if (sym_sec != NULL
9134 && sym_sec->owner != NULL
9135 && !INTERWORK_FLAG (sym_sec->owner))
9137 _bfd_error_handler
9138 (_("%pB(%s): warning: interworking not enabled;"
9139 " first occurrence: %pB: %s call to %s"),
9140 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9142 return FALSE;
9145 --my_offset;
9146 myh->root.u.def.value = my_offset;
9148 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9149 s->contents + my_offset);
9151 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9152 s->contents + my_offset + 2);
9154 ret_offset =
9155 /* Address of destination of the stub. */
9156 ((bfd_signed_vma) val)
9157 - ((bfd_signed_vma)
9158 /* Offset from the start of the current section
9159 to the start of the stubs. */
9160 (s->output_offset
9161 /* Offset of the start of this stub from the start of the stubs. */
9162 + my_offset
9163 /* Address of the start of the current section. */
9164 + s->output_section->vma)
9165 /* The branch instruction is 4 bytes into the stub. */
9167 /* ARM branches work from the pc of the instruction + 8. */
9168 + 8);
9170 put_arm_insn (globals, output_bfd,
9171 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9172 s->contents + my_offset + 4);
9175 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9177 /* Now go back and fix up the original BL insn to point to here. */
9178 ret_offset =
9179 /* Address of where the stub is located. */
9180 (s->output_section->vma + s->output_offset + my_offset)
9181 /* Address of where the BL is located. */
9182 - (input_section->output_section->vma + input_section->output_offset
9183 + offset)
9184 /* Addend in the relocation. */
9185 - addend
9186 /* Biassing for PC-relative addressing. */
9187 - 8;
9189 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9191 return TRUE;
9194 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9196 static struct elf_link_hash_entry *
9197 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9198 const char * name,
9199 bfd * input_bfd,
9200 bfd * output_bfd,
9201 asection * sym_sec,
9202 bfd_vma val,
9203 asection * s,
9204 char ** error_message)
9206 bfd_vma my_offset;
9207 long int ret_offset;
9208 struct elf_link_hash_entry * myh;
9209 struct elf32_arm_link_hash_table * globals;
9211 myh = find_arm_glue (info, name, error_message);
9212 if (myh == NULL)
9213 return NULL;
9215 globals = elf32_arm_hash_table (info);
9216 BFD_ASSERT (globals != NULL);
9217 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9219 my_offset = myh->root.u.def.value;
9221 if ((my_offset & 0x01) == 0x01)
9223 if (sym_sec != NULL
9224 && sym_sec->owner != NULL
9225 && !INTERWORK_FLAG (sym_sec->owner))
9227 _bfd_error_handler
9228 (_("%pB(%s): warning: interworking not enabled;"
9229 " first occurrence: %pB: %s call to %s"),
9230 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9233 --my_offset;
9234 myh->root.u.def.value = my_offset;
9236 if (bfd_link_pic (info)
9237 || globals->root.is_relocatable_executable
9238 || globals->pic_veneer)
9240 /* For relocatable objects we can't use absolute addresses,
9241 so construct the address from a relative offset. */
9242 /* TODO: If the offset is small it's probably worth
9243 constructing the address with adds. */
9244 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9245 s->contents + my_offset);
9246 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9247 s->contents + my_offset + 4);
9248 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9249 s->contents + my_offset + 8);
9250 /* Adjust the offset by 4 for the position of the add,
9251 and 8 for the pipeline offset. */
9252 ret_offset = (val - (s->output_offset
9253 + s->output_section->vma
9254 + my_offset + 12))
9255 | 1;
9256 bfd_put_32 (output_bfd, ret_offset,
9257 s->contents + my_offset + 12);
9259 else if (globals->use_blx)
9261 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9262 s->contents + my_offset);
9264 /* It's a thumb address. Add the low order bit. */
9265 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9266 s->contents + my_offset + 4);
9268 else
9270 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9271 s->contents + my_offset);
9273 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9274 s->contents + my_offset + 4);
9276 /* It's a thumb address. Add the low order bit. */
9277 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9278 s->contents + my_offset + 8);
9280 my_offset += 12;
9284 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9286 return myh;
9289 /* Arm code calling a Thumb function. */
9291 static int
9292 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9293 const char * name,
9294 bfd * input_bfd,
9295 bfd * output_bfd,
9296 asection * input_section,
9297 bfd_byte * hit_data,
9298 asection * sym_sec,
9299 bfd_vma offset,
9300 bfd_signed_vma addend,
9301 bfd_vma val,
9302 char **error_message)
9304 unsigned long int tmp;
9305 bfd_vma my_offset;
9306 asection * s;
9307 long int ret_offset;
9308 struct elf_link_hash_entry * myh;
9309 struct elf32_arm_link_hash_table * globals;
9311 globals = elf32_arm_hash_table (info);
9312 BFD_ASSERT (globals != NULL);
9313 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9315 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9316 ARM2THUMB_GLUE_SECTION_NAME);
9317 BFD_ASSERT (s != NULL);
9318 BFD_ASSERT (s->contents != NULL);
9319 BFD_ASSERT (s->output_section != NULL);
9321 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9322 sym_sec, val, s, error_message);
9323 if (!myh)
9324 return FALSE;
9326 my_offset = myh->root.u.def.value;
9327 tmp = bfd_get_32 (input_bfd, hit_data);
9328 tmp = tmp & 0xFF000000;
9330 /* Somehow these are both 4 too far, so subtract 8. */
9331 ret_offset = (s->output_offset
9332 + my_offset
9333 + s->output_section->vma
9334 - (input_section->output_offset
9335 + input_section->output_section->vma
9336 + offset + addend)
9337 - 8);
9339 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9341 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9343 return TRUE;
9346 /* Populate Arm stub for an exported Thumb function. */
9348 static bfd_boolean
9349 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9351 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9352 asection * s;
9353 struct elf_link_hash_entry * myh;
9354 struct elf32_arm_link_hash_entry *eh;
9355 struct elf32_arm_link_hash_table * globals;
9356 asection *sec;
9357 bfd_vma val;
9358 char *error_message;
9360 eh = elf32_arm_hash_entry (h);
9361 /* Allocate stubs for exported Thumb functions on v4t. */
9362 if (eh->export_glue == NULL)
9363 return TRUE;
9365 globals = elf32_arm_hash_table (info);
9366 BFD_ASSERT (globals != NULL);
9367 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9369 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9370 ARM2THUMB_GLUE_SECTION_NAME);
9371 BFD_ASSERT (s != NULL);
9372 BFD_ASSERT (s->contents != NULL);
9373 BFD_ASSERT (s->output_section != NULL);
9375 sec = eh->export_glue->root.u.def.section;
9377 BFD_ASSERT (sec->output_section != NULL);
9379 val = eh->export_glue->root.u.def.value + sec->output_offset
9380 + sec->output_section->vma;
9382 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9383 h->root.u.def.section->owner,
9384 globals->obfd, sec, val, s,
9385 &error_message);
9386 BFD_ASSERT (myh);
9387 return TRUE;
9390 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9392 static bfd_vma
9393 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9395 bfd_byte *p;
9396 bfd_vma glue_addr;
9397 asection *s;
9398 struct elf32_arm_link_hash_table *globals;
9400 globals = elf32_arm_hash_table (info);
9401 BFD_ASSERT (globals != NULL);
9402 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9404 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9405 ARM_BX_GLUE_SECTION_NAME);
9406 BFD_ASSERT (s != NULL);
9407 BFD_ASSERT (s->contents != NULL);
9408 BFD_ASSERT (s->output_section != NULL);
9410 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9412 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9414 if ((globals->bx_glue_offset[reg] & 1) == 0)
9416 p = s->contents + glue_addr;
9417 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9418 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9419 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9420 globals->bx_glue_offset[reg] |= 1;
9423 return glue_addr + s->output_section->vma + s->output_offset;
9426 /* Generate Arm stubs for exported Thumb symbols. */
9427 static void
9428 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9429 struct bfd_link_info *link_info)
9431 struct elf32_arm_link_hash_table * globals;
9433 if (link_info == NULL)
9434 /* Ignore this if we are not called by the ELF backend linker. */
9435 return;
9437 globals = elf32_arm_hash_table (link_info);
9438 if (globals == NULL)
9439 return;
9441 /* If blx is available then exported Thumb symbols are OK and there is
9442 nothing to do. */
9443 if (globals->use_blx)
9444 return;
9446 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9447 link_info);
9450 /* Reserve space for COUNT dynamic relocations in relocation selection
9451 SRELOC. */
9453 static void
9454 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9455 bfd_size_type count)
9457 struct elf32_arm_link_hash_table *htab;
9459 htab = elf32_arm_hash_table (info);
9460 BFD_ASSERT (htab->root.dynamic_sections_created);
9461 if (sreloc == NULL)
9462 abort ();
9463 sreloc->size += RELOC_SIZE (htab) * count;
9466 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9467 dynamic, the relocations should go in SRELOC, otherwise they should
9468 go in the special .rel.iplt section. */
9470 static void
9471 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9472 bfd_size_type count)
9474 struct elf32_arm_link_hash_table *htab;
9476 htab = elf32_arm_hash_table (info);
9477 if (!htab->root.dynamic_sections_created)
9478 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9479 else
9481 BFD_ASSERT (sreloc != NULL);
9482 sreloc->size += RELOC_SIZE (htab) * count;
9486 /* Add relocation REL to the end of relocation section SRELOC. */
9488 static void
9489 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9490 asection *sreloc, Elf_Internal_Rela *rel)
9492 bfd_byte *loc;
9493 struct elf32_arm_link_hash_table *htab;
9495 htab = elf32_arm_hash_table (info);
9496 if (!htab->root.dynamic_sections_created
9497 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9498 sreloc = htab->root.irelplt;
9499 if (sreloc == NULL)
9500 abort ();
9501 loc = sreloc->contents;
9502 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9503 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9504 abort ();
9505 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9508 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9509 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9510 to .plt. */
9512 static void
9513 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9514 bfd_boolean is_iplt_entry,
9515 union gotplt_union *root_plt,
9516 struct arm_plt_info *arm_plt)
9518 struct elf32_arm_link_hash_table *htab;
9519 asection *splt;
9520 asection *sgotplt;
9522 htab = elf32_arm_hash_table (info);
9524 if (is_iplt_entry)
9526 splt = htab->root.iplt;
9527 sgotplt = htab->root.igotplt;
9529 /* NaCl uses a special first entry in .iplt too. */
9530 if (htab->nacl_p && splt->size == 0)
9531 splt->size += htab->plt_header_size;
9533 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9534 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9536 else
9538 splt = htab->root.splt;
9539 sgotplt = htab->root.sgotplt;
9541 if (htab->fdpic_p)
9543 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9544 /* For lazy binding, relocations will be put into .rel.plt, in
9545 .rel.got otherwise. */
9546 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9547 if (info->flags & DF_BIND_NOW)
9548 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9549 else
9550 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9552 else
9554 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9555 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9558 /* If this is the first .plt entry, make room for the special
9559 first entry. */
9560 if (splt->size == 0)
9561 splt->size += htab->plt_header_size;
9563 htab->next_tls_desc_index++;
9566 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9567 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9568 splt->size += PLT_THUMB_STUB_SIZE;
9569 root_plt->offset = splt->size;
9570 splt->size += htab->plt_entry_size;
9572 if (!htab->symbian_p)
9574 /* We also need to make an entry in the .got.plt section, which
9575 will be placed in the .got section by the linker script. */
9576 if (is_iplt_entry)
9577 arm_plt->got_offset = sgotplt->size;
9578 else
9579 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9580 if (htab->fdpic_p)
9581 /* Function descriptor takes 64 bits in GOT. */
9582 sgotplt->size += 8;
9583 else
9584 sgotplt->size += 4;
9588 static bfd_vma
9589 arm_movw_immediate (bfd_vma value)
9591 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9594 static bfd_vma
9595 arm_movt_immediate (bfd_vma value)
9597 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9600 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9601 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9602 Otherwise, DYNINDX is the index of the symbol in the dynamic
9603 symbol table and SYM_VALUE is undefined.
9605 ROOT_PLT points to the offset of the PLT entry from the start of its
9606 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9607 bookkeeping information.
9609 Returns FALSE if there was a problem. */
9611 static bfd_boolean
9612 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9613 union gotplt_union *root_plt,
9614 struct arm_plt_info *arm_plt,
9615 int dynindx, bfd_vma sym_value)
9617 struct elf32_arm_link_hash_table *htab;
9618 asection *sgot;
9619 asection *splt;
9620 asection *srel;
9621 bfd_byte *loc;
9622 bfd_vma plt_index;
9623 Elf_Internal_Rela rel;
9624 bfd_vma plt_header_size;
9625 bfd_vma got_header_size;
9627 htab = elf32_arm_hash_table (info);
9629 /* Pick the appropriate sections and sizes. */
9630 if (dynindx == -1)
9632 splt = htab->root.iplt;
9633 sgot = htab->root.igotplt;
9634 srel = htab->root.irelplt;
9636 /* There are no reserved entries in .igot.plt, and no special
9637 first entry in .iplt. */
9638 got_header_size = 0;
9639 plt_header_size = 0;
9641 else
9643 splt = htab->root.splt;
9644 sgot = htab->root.sgotplt;
9645 srel = htab->root.srelplt;
9647 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9648 plt_header_size = htab->plt_header_size;
9650 BFD_ASSERT (splt != NULL && srel != NULL);
9652 /* Fill in the entry in the procedure linkage table. */
9653 if (htab->symbian_p)
9655 BFD_ASSERT (dynindx >= 0);
9656 put_arm_insn (htab, output_bfd,
9657 elf32_arm_symbian_plt_entry[0],
9658 splt->contents + root_plt->offset);
9659 bfd_put_32 (output_bfd,
9660 elf32_arm_symbian_plt_entry[1],
9661 splt->contents + root_plt->offset + 4);
9663 /* Fill in the entry in the .rel.plt section. */
9664 rel.r_offset = (splt->output_section->vma
9665 + splt->output_offset
9666 + root_plt->offset + 4);
9667 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9669 /* Get the index in the procedure linkage table which
9670 corresponds to this symbol. This is the index of this symbol
9671 in all the symbols for which we are making plt entries. The
9672 first entry in the procedure linkage table is reserved. */
9673 plt_index = ((root_plt->offset - plt_header_size)
9674 / htab->plt_entry_size);
9676 else
9678 bfd_vma got_offset, got_address, plt_address;
9679 bfd_vma got_displacement, initial_got_entry;
9680 bfd_byte * ptr;
9682 BFD_ASSERT (sgot != NULL);
9684 /* Get the offset into the .(i)got.plt table of the entry that
9685 corresponds to this function. */
9686 got_offset = (arm_plt->got_offset & -2);
9688 /* Get the index in the procedure linkage table which
9689 corresponds to this symbol. This is the index of this symbol
9690 in all the symbols for which we are making plt entries.
9691 After the reserved .got.plt entries, all symbols appear in
9692 the same order as in .plt. */
9693 if (htab->fdpic_p)
9694 /* Function descriptor takes 8 bytes. */
9695 plt_index = (got_offset - got_header_size) / 8;
9696 else
9697 plt_index = (got_offset - got_header_size) / 4;
9699 /* Calculate the address of the GOT entry. */
9700 got_address = (sgot->output_section->vma
9701 + sgot->output_offset
9702 + got_offset);
9704 /* ...and the address of the PLT entry. */
9705 plt_address = (splt->output_section->vma
9706 + splt->output_offset
9707 + root_plt->offset);
9709 ptr = splt->contents + root_plt->offset;
9710 if (htab->vxworks_p && bfd_link_pic (info))
9712 unsigned int i;
9713 bfd_vma val;
9715 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9717 val = elf32_arm_vxworks_shared_plt_entry[i];
9718 if (i == 2)
9719 val |= got_address - sgot->output_section->vma;
9720 if (i == 5)
9721 val |= plt_index * RELOC_SIZE (htab);
9722 if (i == 2 || i == 5)
9723 bfd_put_32 (output_bfd, val, ptr);
9724 else
9725 put_arm_insn (htab, output_bfd, val, ptr);
9728 else if (htab->vxworks_p)
9730 unsigned int i;
9731 bfd_vma val;
9733 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9735 val = elf32_arm_vxworks_exec_plt_entry[i];
9736 if (i == 2)
9737 val |= got_address;
9738 if (i == 4)
9739 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9740 if (i == 5)
9741 val |= plt_index * RELOC_SIZE (htab);
9742 if (i == 2 || i == 5)
9743 bfd_put_32 (output_bfd, val, ptr);
9744 else
9745 put_arm_insn (htab, output_bfd, val, ptr);
9748 loc = (htab->srelplt2->contents
9749 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9751 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9752 referencing the GOT for this PLT entry. */
9753 rel.r_offset = plt_address + 8;
9754 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9755 rel.r_addend = got_offset;
9756 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9757 loc += RELOC_SIZE (htab);
9759 /* Create the R_ARM_ABS32 relocation referencing the
9760 beginning of the PLT for this GOT entry. */
9761 rel.r_offset = got_address;
9762 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9763 rel.r_addend = 0;
9764 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9766 else if (htab->nacl_p)
9768 /* Calculate the displacement between the PLT slot and the
9769 common tail that's part of the special initial PLT slot. */
9770 int32_t tail_displacement
9771 = ((splt->output_section->vma + splt->output_offset
9772 + ARM_NACL_PLT_TAIL_OFFSET)
9773 - (plt_address + htab->plt_entry_size + 4));
9774 BFD_ASSERT ((tail_displacement & 3) == 0);
9775 tail_displacement >>= 2;
9777 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9778 || (-tail_displacement & 0xff000000) == 0);
9780 /* Calculate the displacement between the PLT slot and the entry
9781 in the GOT. The offset accounts for the value produced by
9782 adding to pc in the penultimate instruction of the PLT stub. */
9783 got_displacement = (got_address
9784 - (plt_address + htab->plt_entry_size));
9786 /* NaCl does not support interworking at all. */
9787 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9789 put_arm_insn (htab, output_bfd,
9790 elf32_arm_nacl_plt_entry[0]
9791 | arm_movw_immediate (got_displacement),
9792 ptr + 0);
9793 put_arm_insn (htab, output_bfd,
9794 elf32_arm_nacl_plt_entry[1]
9795 | arm_movt_immediate (got_displacement),
9796 ptr + 4);
9797 put_arm_insn (htab, output_bfd,
9798 elf32_arm_nacl_plt_entry[2],
9799 ptr + 8);
9800 put_arm_insn (htab, output_bfd,
9801 elf32_arm_nacl_plt_entry[3]
9802 | (tail_displacement & 0x00ffffff),
9803 ptr + 12);
9805 else if (htab->fdpic_p)
9807 const bfd_vma *plt_entry = using_thumb_only(htab)
9808 ? elf32_arm_fdpic_thumb_plt_entry
9809 : elf32_arm_fdpic_plt_entry;
9811 /* Fill-up Thumb stub if needed. */
9812 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9814 put_thumb_insn (htab, output_bfd,
9815 elf32_arm_plt_thumb_stub[0], ptr - 4);
9816 put_thumb_insn (htab, output_bfd,
9817 elf32_arm_plt_thumb_stub[1], ptr - 2);
9819 /* As we are using 32 bit instructions even for the Thumb
9820 version, we have to use 'put_arm_insn' instead of
9821 'put_thumb_insn'. */
9822 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9823 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9824 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9825 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9826 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9828 if (!(info->flags & DF_BIND_NOW))
9830 /* funcdesc_value_reloc_offset. */
9831 bfd_put_32 (output_bfd,
9832 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9833 ptr + 20);
9834 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9835 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9836 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9837 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9840 else if (using_thumb_only (htab))
9842 /* PR ld/16017: Generate thumb only PLT entries. */
9843 if (!using_thumb2 (htab))
9845 /* FIXME: We ought to be able to generate thumb-1 PLT
9846 instructions... */
9847 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9848 output_bfd);
9849 return FALSE;
9852 /* Calculate the displacement between the PLT slot and the entry in
9853 the GOT. The 12-byte offset accounts for the value produced by
9854 adding to pc in the 3rd instruction of the PLT stub. */
9855 got_displacement = got_address - (plt_address + 12);
9857 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9858 instead of 'put_thumb_insn'. */
9859 put_arm_insn (htab, output_bfd,
9860 elf32_thumb2_plt_entry[0]
9861 | ((got_displacement & 0x000000ff) << 16)
9862 | ((got_displacement & 0x00000700) << 20)
9863 | ((got_displacement & 0x00000800) >> 1)
9864 | ((got_displacement & 0x0000f000) >> 12),
9865 ptr + 0);
9866 put_arm_insn (htab, output_bfd,
9867 elf32_thumb2_plt_entry[1]
9868 | ((got_displacement & 0x00ff0000) )
9869 | ((got_displacement & 0x07000000) << 4)
9870 | ((got_displacement & 0x08000000) >> 17)
9871 | ((got_displacement & 0xf0000000) >> 28),
9872 ptr + 4);
9873 put_arm_insn (htab, output_bfd,
9874 elf32_thumb2_plt_entry[2],
9875 ptr + 8);
9876 put_arm_insn (htab, output_bfd,
9877 elf32_thumb2_plt_entry[3],
9878 ptr + 12);
9880 else
9882 /* Calculate the displacement between the PLT slot and the
9883 entry in the GOT. The eight-byte offset accounts for the
9884 value produced by adding to pc in the first instruction
9885 of the PLT stub. */
9886 got_displacement = got_address - (plt_address + 8);
9888 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9890 put_thumb_insn (htab, output_bfd,
9891 elf32_arm_plt_thumb_stub[0], ptr - 4);
9892 put_thumb_insn (htab, output_bfd,
9893 elf32_arm_plt_thumb_stub[1], ptr - 2);
9896 if (!elf32_arm_use_long_plt_entry)
9898 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9900 put_arm_insn (htab, output_bfd,
9901 elf32_arm_plt_entry_short[0]
9902 | ((got_displacement & 0x0ff00000) >> 20),
9903 ptr + 0);
9904 put_arm_insn (htab, output_bfd,
9905 elf32_arm_plt_entry_short[1]
9906 | ((got_displacement & 0x000ff000) >> 12),
9907 ptr+ 4);
9908 put_arm_insn (htab, output_bfd,
9909 elf32_arm_plt_entry_short[2]
9910 | (got_displacement & 0x00000fff),
9911 ptr + 8);
9912 #ifdef FOUR_WORD_PLT
9913 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9914 #endif
9916 else
9918 put_arm_insn (htab, output_bfd,
9919 elf32_arm_plt_entry_long[0]
9920 | ((got_displacement & 0xf0000000) >> 28),
9921 ptr + 0);
9922 put_arm_insn (htab, output_bfd,
9923 elf32_arm_plt_entry_long[1]
9924 | ((got_displacement & 0x0ff00000) >> 20),
9925 ptr + 4);
9926 put_arm_insn (htab, output_bfd,
9927 elf32_arm_plt_entry_long[2]
9928 | ((got_displacement & 0x000ff000) >> 12),
9929 ptr+ 8);
9930 put_arm_insn (htab, output_bfd,
9931 elf32_arm_plt_entry_long[3]
9932 | (got_displacement & 0x00000fff),
9933 ptr + 12);
9937 /* Fill in the entry in the .rel(a).(i)plt section. */
9938 rel.r_offset = got_address;
9939 rel.r_addend = 0;
9940 if (dynindx == -1)
9942 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9943 The dynamic linker or static executable then calls SYM_VALUE
9944 to determine the correct run-time value of the .igot.plt entry. */
9945 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9946 initial_got_entry = sym_value;
9948 else
9950 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9951 used by PLT entry. */
9952 if (htab->fdpic_p)
9954 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9955 initial_got_entry = 0;
9957 else
9959 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9960 initial_got_entry = (splt->output_section->vma
9961 + splt->output_offset);
9965 /* Fill in the entry in the global offset table. */
9966 bfd_put_32 (output_bfd, initial_got_entry,
9967 sgot->contents + got_offset);
9969 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9971 /* Setup initial funcdesc value. */
9972 /* FIXME: we don't support lazy binding because there is a
9973 race condition between both words getting written and
9974 some other thread attempting to read them. The ARM
9975 architecture does not have an atomic 64 bit load/store
9976 instruction that could be used to prevent it; it is
9977 recommended that threaded FDPIC applications run with the
9978 LD_BIND_NOW environment variable set. */
9979 bfd_put_32(output_bfd, plt_address + 0x18,
9980 sgot->contents + got_offset);
9981 bfd_put_32(output_bfd, -1 /*TODO*/,
9982 sgot->contents + got_offset + 4);
9986 if (dynindx == -1)
9987 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9988 else
9990 if (htab->fdpic_p)
9992 /* For FDPIC we put PLT relocationss into .rel.got when not
9993 lazy binding otherwise we put them in .rel.plt. For now,
9994 we don't support lazy binding so put it in .rel.got. */
9995 if (info->flags & DF_BIND_NOW)
9996 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9997 else
9998 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
10000 else
10002 loc = srel->contents + plt_index * RELOC_SIZE (htab);
10003 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10007 return TRUE;
10010 /* Some relocations map to different relocations depending on the
10011 target. Return the real relocation. */
10013 static int
10014 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10015 int r_type)
10017 switch (r_type)
10019 case R_ARM_TARGET1:
10020 if (globals->target1_is_rel)
10021 return R_ARM_REL32;
10022 else
10023 return R_ARM_ABS32;
10025 case R_ARM_TARGET2:
10026 return globals->target2_reloc;
10028 default:
10029 return r_type;
10033 /* Return the base VMA address which should be subtracted from real addresses
10034 when resolving @dtpoff relocation.
10035 This is PT_TLS segment p_vaddr. */
10037 static bfd_vma
10038 dtpoff_base (struct bfd_link_info *info)
10040 /* If tls_sec is NULL, we should have signalled an error already. */
10041 if (elf_hash_table (info)->tls_sec == NULL)
10042 return 0;
10043 return elf_hash_table (info)->tls_sec->vma;
10046 /* Return the relocation value for @tpoff relocation
10047 if STT_TLS virtual address is ADDRESS. */
10049 static bfd_vma
10050 tpoff (struct bfd_link_info *info, bfd_vma address)
10052 struct elf_link_hash_table *htab = elf_hash_table (info);
10053 bfd_vma base;
10055 /* If tls_sec is NULL, we should have signalled an error already. */
10056 if (htab->tls_sec == NULL)
10057 return 0;
10058 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10059 return address - htab->tls_sec->vma + base;
10062 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10063 VALUE is the relocation value. */
10065 static bfd_reloc_status_type
10066 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10068 if (value > 0xfff)
10069 return bfd_reloc_overflow;
10071 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10072 bfd_put_32 (abfd, value, data);
10073 return bfd_reloc_ok;
10076 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10077 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10078 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10080 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10081 is to then call final_link_relocate. Return other values in the
10082 case of error.
10084 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10085 the pre-relaxed code. It would be nice if the relocs were updated
10086 to match the optimization. */
10088 static bfd_reloc_status_type
10089 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10090 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10091 Elf_Internal_Rela *rel, unsigned long is_local)
10093 unsigned long insn;
10095 switch (ELF32_R_TYPE (rel->r_info))
10097 default:
10098 return bfd_reloc_notsupported;
10100 case R_ARM_TLS_GOTDESC:
10101 if (is_local)
10102 insn = 0;
10103 else
10105 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10106 if (insn & 1)
10107 insn -= 5; /* THUMB */
10108 else
10109 insn -= 8; /* ARM */
10111 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10112 return bfd_reloc_continue;
10114 case R_ARM_THM_TLS_DESCSEQ:
10115 /* Thumb insn. */
10116 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10117 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10119 if (is_local)
10120 /* nop */
10121 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10123 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10125 if (is_local)
10126 /* nop */
10127 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10128 else
10129 /* ldr rx,[ry] */
10130 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10132 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10134 if (is_local)
10135 /* nop */
10136 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10137 else
10138 /* mov r0, rx */
10139 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10140 contents + rel->r_offset);
10142 else
10144 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10145 /* It's a 32 bit instruction, fetch the rest of it for
10146 error generation. */
10147 insn = (insn << 16)
10148 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10149 _bfd_error_handler
10150 /* xgettext:c-format */
10151 (_("%pB(%pA+%#" PRIx64 "): "
10152 "unexpected %s instruction '%#lx' in TLS trampoline"),
10153 input_bfd, input_sec, (uint64_t) rel->r_offset,
10154 "Thumb", insn);
10155 return bfd_reloc_notsupported;
10157 break;
10159 case R_ARM_TLS_DESCSEQ:
10160 /* arm insn. */
10161 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10162 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10164 if (is_local)
10165 /* mov rx, ry */
10166 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10167 contents + rel->r_offset);
10169 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10171 if (is_local)
10172 /* nop */
10173 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10174 else
10175 /* ldr rx,[ry] */
10176 bfd_put_32 (input_bfd, insn & 0xfffff000,
10177 contents + rel->r_offset);
10179 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10181 if (is_local)
10182 /* nop */
10183 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10184 else
10185 /* mov r0, rx */
10186 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10187 contents + rel->r_offset);
10189 else
10191 _bfd_error_handler
10192 /* xgettext:c-format */
10193 (_("%pB(%pA+%#" PRIx64 "): "
10194 "unexpected %s instruction '%#lx' in TLS trampoline"),
10195 input_bfd, input_sec, (uint64_t) rel->r_offset,
10196 "ARM", insn);
10197 return bfd_reloc_notsupported;
10199 break;
10201 case R_ARM_TLS_CALL:
10202 /* GD->IE relaxation, turn the instruction into 'nop' or
10203 'ldr r0, [pc,r0]' */
10204 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10205 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10206 break;
10208 case R_ARM_THM_TLS_CALL:
10209 /* GD->IE relaxation. */
10210 if (!is_local)
10211 /* add r0,pc; ldr r0, [r0] */
10212 insn = 0x44786800;
10213 else if (using_thumb2 (globals))
10214 /* nop.w */
10215 insn = 0xf3af8000;
10216 else
10217 /* nop; nop */
10218 insn = 0xbf00bf00;
10220 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10221 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10222 break;
10224 return bfd_reloc_ok;
10227 /* For a given value of n, calculate the value of G_n as required to
10228 deal with group relocations. We return it in the form of an
10229 encoded constant-and-rotation, together with the final residual. If n is
10230 specified as less than zero, then final_residual is filled with the
10231 input value and no further action is performed. */
10233 static bfd_vma
10234 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10236 int current_n;
10237 bfd_vma g_n;
10238 bfd_vma encoded_g_n = 0;
10239 bfd_vma residual = value; /* Also known as Y_n. */
10241 for (current_n = 0; current_n <= n; current_n++)
10243 int shift;
10245 /* Calculate which part of the value to mask. */
10246 if (residual == 0)
10247 shift = 0;
10248 else
10250 int msb;
10252 /* Determine the most significant bit in the residual and
10253 align the resulting value to a 2-bit boundary. */
10254 for (msb = 30; msb >= 0; msb -= 2)
10255 if (residual & (3 << msb))
10256 break;
10258 /* The desired shift is now (msb - 6), or zero, whichever
10259 is the greater. */
10260 shift = msb - 6;
10261 if (shift < 0)
10262 shift = 0;
10265 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10266 g_n = residual & (0xff << shift);
10267 encoded_g_n = (g_n >> shift)
10268 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10270 /* Calculate the residual for the next time around. */
10271 residual &= ~g_n;
10274 *final_residual = residual;
10276 return encoded_g_n;
10279 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10280 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10282 static int
10283 identify_add_or_sub (bfd_vma insn)
10285 int opcode = insn & 0x1e00000;
10287 if (opcode == 1 << 23) /* ADD */
10288 return 1;
10290 if (opcode == 1 << 22) /* SUB */
10291 return -1;
10293 return 0;
10296 /* Perform a relocation as part of a final link. */
10298 static bfd_reloc_status_type
10299 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10300 bfd * input_bfd,
10301 bfd * output_bfd,
10302 asection * input_section,
10303 bfd_byte * contents,
10304 Elf_Internal_Rela * rel,
10305 bfd_vma value,
10306 struct bfd_link_info * info,
10307 asection * sym_sec,
10308 const char * sym_name,
10309 unsigned char st_type,
10310 enum arm_st_branch_type branch_type,
10311 struct elf_link_hash_entry * h,
10312 bfd_boolean * unresolved_reloc_p,
10313 char ** error_message)
10315 unsigned long r_type = howto->type;
10316 unsigned long r_symndx;
10317 bfd_byte * hit_data = contents + rel->r_offset;
10318 bfd_vma * local_got_offsets;
10319 bfd_vma * local_tlsdesc_gotents;
10320 asection * sgot;
10321 asection * splt;
10322 asection * sreloc = NULL;
10323 asection * srelgot;
10324 bfd_vma addend;
10325 bfd_signed_vma signed_addend;
10326 unsigned char dynreloc_st_type;
10327 bfd_vma dynreloc_value;
10328 struct elf32_arm_link_hash_table * globals;
10329 struct elf32_arm_link_hash_entry *eh;
10330 union gotplt_union *root_plt;
10331 struct arm_plt_info *arm_plt;
10332 bfd_vma plt_offset;
10333 bfd_vma gotplt_offset;
10334 bfd_boolean has_iplt_entry;
10335 bfd_boolean resolved_to_zero;
10337 globals = elf32_arm_hash_table (info);
10338 if (globals == NULL)
10339 return bfd_reloc_notsupported;
10341 BFD_ASSERT (is_arm_elf (input_bfd));
10342 BFD_ASSERT (howto != NULL);
10344 /* Some relocation types map to different relocations depending on the
10345 target. We pick the right one here. */
10346 r_type = arm_real_reloc_type (globals, r_type);
10348 /* It is possible to have linker relaxations on some TLS access
10349 models. Update our information here. */
10350 r_type = elf32_arm_tls_transition (info, r_type, h);
10352 if (r_type != howto->type)
10353 howto = elf32_arm_howto_from_type (r_type);
10355 eh = (struct elf32_arm_link_hash_entry *) h;
10356 sgot = globals->root.sgot;
10357 local_got_offsets = elf_local_got_offsets (input_bfd);
10358 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10360 if (globals->root.dynamic_sections_created)
10361 srelgot = globals->root.srelgot;
10362 else
10363 srelgot = NULL;
10365 r_symndx = ELF32_R_SYM (rel->r_info);
10367 if (globals->use_rel)
10369 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10371 if (addend & ((howto->src_mask + 1) >> 1))
10373 signed_addend = -1;
10374 signed_addend &= ~ howto->src_mask;
10375 signed_addend |= addend;
10377 else
10378 signed_addend = addend;
10380 else
10381 addend = signed_addend = rel->r_addend;
10383 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10384 are resolving a function call relocation. */
10385 if (using_thumb_only (globals)
10386 && (r_type == R_ARM_THM_CALL
10387 || r_type == R_ARM_THM_JUMP24)
10388 && branch_type == ST_BRANCH_TO_ARM)
10389 branch_type = ST_BRANCH_TO_THUMB;
10391 /* Record the symbol information that should be used in dynamic
10392 relocations. */
10393 dynreloc_st_type = st_type;
10394 dynreloc_value = value;
10395 if (branch_type == ST_BRANCH_TO_THUMB)
10396 dynreloc_value |= 1;
10398 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10399 VALUE appropriately for relocations that we resolve at link time. */
10400 has_iplt_entry = FALSE;
10401 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10402 &arm_plt)
10403 && root_plt->offset != (bfd_vma) -1)
10405 plt_offset = root_plt->offset;
10406 gotplt_offset = arm_plt->got_offset;
10408 if (h == NULL || eh->is_iplt)
10410 has_iplt_entry = TRUE;
10411 splt = globals->root.iplt;
10413 /* Populate .iplt entries here, because not all of them will
10414 be seen by finish_dynamic_symbol. The lower bit is set if
10415 we have already populated the entry. */
10416 if (plt_offset & 1)
10417 plt_offset--;
10418 else
10420 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10421 -1, dynreloc_value))
10422 root_plt->offset |= 1;
10423 else
10424 return bfd_reloc_notsupported;
10427 /* Static relocations always resolve to the .iplt entry. */
10428 st_type = STT_FUNC;
10429 value = (splt->output_section->vma
10430 + splt->output_offset
10431 + plt_offset);
10432 branch_type = ST_BRANCH_TO_ARM;
10434 /* If there are non-call relocations that resolve to the .iplt
10435 entry, then all dynamic ones must too. */
10436 if (arm_plt->noncall_refcount != 0)
10438 dynreloc_st_type = st_type;
10439 dynreloc_value = value;
10442 else
10443 /* We populate the .plt entry in finish_dynamic_symbol. */
10444 splt = globals->root.splt;
10446 else
10448 splt = NULL;
10449 plt_offset = (bfd_vma) -1;
10450 gotplt_offset = (bfd_vma) -1;
10453 resolved_to_zero = (h != NULL
10454 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10456 switch (r_type)
10458 case R_ARM_NONE:
10459 /* We don't need to find a value for this symbol. It's just a
10460 marker. */
10461 *unresolved_reloc_p = FALSE;
10462 return bfd_reloc_ok;
10464 case R_ARM_ABS12:
10465 if (!globals->vxworks_p)
10466 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10467 /* Fall through. */
10469 case R_ARM_PC24:
10470 case R_ARM_ABS32:
10471 case R_ARM_ABS32_NOI:
10472 case R_ARM_REL32:
10473 case R_ARM_REL32_NOI:
10474 case R_ARM_CALL:
10475 case R_ARM_JUMP24:
10476 case R_ARM_XPC25:
10477 case R_ARM_PREL31:
10478 case R_ARM_PLT32:
10479 /* Handle relocations which should use the PLT entry. ABS32/REL32
10480 will use the symbol's value, which may point to a PLT entry, but we
10481 don't need to handle that here. If we created a PLT entry, all
10482 branches in this object should go to it, except if the PLT is too
10483 far away, in which case a long branch stub should be inserted. */
10484 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10485 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10486 && r_type != R_ARM_CALL
10487 && r_type != R_ARM_JUMP24
10488 && r_type != R_ARM_PLT32)
10489 && plt_offset != (bfd_vma) -1)
10491 /* If we've created a .plt section, and assigned a PLT entry
10492 to this function, it must either be a STT_GNU_IFUNC reference
10493 or not be known to bind locally. In other cases, we should
10494 have cleared the PLT entry by now. */
10495 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10497 value = (splt->output_section->vma
10498 + splt->output_offset
10499 + plt_offset);
10500 *unresolved_reloc_p = FALSE;
10501 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10502 contents, rel->r_offset, value,
10503 rel->r_addend);
10506 /* When generating a shared object or relocatable executable, these
10507 relocations are copied into the output file to be resolved at
10508 run time. */
10509 if ((bfd_link_pic (info)
10510 || globals->root.is_relocatable_executable
10511 || globals->fdpic_p)
10512 && (input_section->flags & SEC_ALLOC)
10513 && !(globals->vxworks_p
10514 && strcmp (input_section->output_section->name,
10515 ".tls_vars") == 0)
10516 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10517 || !SYMBOL_CALLS_LOCAL (info, h))
10518 && !(input_bfd == globals->stub_bfd
10519 && strstr (input_section->name, STUB_SUFFIX))
10520 && (h == NULL
10521 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10522 && !resolved_to_zero)
10523 || h->root.type != bfd_link_hash_undefweak)
10524 && r_type != R_ARM_PC24
10525 && r_type != R_ARM_CALL
10526 && r_type != R_ARM_JUMP24
10527 && r_type != R_ARM_PREL31
10528 && r_type != R_ARM_PLT32)
10530 Elf_Internal_Rela outrel;
10531 bfd_boolean skip, relocate;
10532 int isrofixup = 0;
10534 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10535 && !h->def_regular)
10537 char *v = _("shared object");
10539 if (bfd_link_executable (info))
10540 v = _("PIE executable");
10542 _bfd_error_handler
10543 (_("%pB: relocation %s against external or undefined symbol `%s'"
10544 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10545 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10546 return bfd_reloc_notsupported;
10549 *unresolved_reloc_p = FALSE;
10551 if (sreloc == NULL && globals->root.dynamic_sections_created)
10553 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10554 ! globals->use_rel);
10556 if (sreloc == NULL)
10557 return bfd_reloc_notsupported;
10560 skip = FALSE;
10561 relocate = FALSE;
10563 outrel.r_addend = addend;
10564 outrel.r_offset =
10565 _bfd_elf_section_offset (output_bfd, info, input_section,
10566 rel->r_offset);
10567 if (outrel.r_offset == (bfd_vma) -1)
10568 skip = TRUE;
10569 else if (outrel.r_offset == (bfd_vma) -2)
10570 skip = TRUE, relocate = TRUE;
10571 outrel.r_offset += (input_section->output_section->vma
10572 + input_section->output_offset);
10574 if (skip)
10575 memset (&outrel, 0, sizeof outrel);
10576 else if (h != NULL
10577 && h->dynindx != -1
10578 && (!bfd_link_pic (info)
10579 || !(bfd_link_pie (info)
10580 || SYMBOLIC_BIND (info, h))
10581 || !h->def_regular))
10582 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10583 else
10585 int symbol;
10587 /* This symbol is local, or marked to become local. */
10588 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10589 || (globals->fdpic_p && !bfd_link_pic(info)));
10590 if (globals->symbian_p)
10592 asection *osec;
10594 /* On Symbian OS, the data segment and text segement
10595 can be relocated independently. Therefore, we
10596 must indicate the segment to which this
10597 relocation is relative. The BPABI allows us to
10598 use any symbol in the right segment; we just use
10599 the section symbol as it is convenient. (We
10600 cannot use the symbol given by "h" directly as it
10601 will not appear in the dynamic symbol table.)
10603 Note that the dynamic linker ignores the section
10604 symbol value, so we don't subtract osec->vma
10605 from the emitted reloc addend. */
10606 if (sym_sec)
10607 osec = sym_sec->output_section;
10608 else
10609 osec = input_section->output_section;
10610 symbol = elf_section_data (osec)->dynindx;
10611 if (symbol == 0)
10613 struct elf_link_hash_table *htab = elf_hash_table (info);
10615 if ((osec->flags & SEC_READONLY) == 0
10616 && htab->data_index_section != NULL)
10617 osec = htab->data_index_section;
10618 else
10619 osec = htab->text_index_section;
10620 symbol = elf_section_data (osec)->dynindx;
10622 BFD_ASSERT (symbol != 0);
10624 else
10625 /* On SVR4-ish systems, the dynamic loader cannot
10626 relocate the text and data segments independently,
10627 so the symbol does not matter. */
10628 symbol = 0;
10629 if (dynreloc_st_type == STT_GNU_IFUNC)
10630 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10631 to the .iplt entry. Instead, every non-call reference
10632 must use an R_ARM_IRELATIVE relocation to obtain the
10633 correct run-time address. */
10634 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10635 else if (globals->fdpic_p && !bfd_link_pic(info))
10636 isrofixup = 1;
10637 else
10638 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10639 if (globals->use_rel)
10640 relocate = TRUE;
10641 else
10642 outrel.r_addend += dynreloc_value;
10645 if (isrofixup)
10646 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10647 else
10648 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10650 /* If this reloc is against an external symbol, we do not want to
10651 fiddle with the addend. Otherwise, we need to include the symbol
10652 value so that it becomes an addend for the dynamic reloc. */
10653 if (! relocate)
10654 return bfd_reloc_ok;
10656 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10657 contents, rel->r_offset,
10658 dynreloc_value, (bfd_vma) 0);
10660 else switch (r_type)
10662 case R_ARM_ABS12:
10663 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10665 case R_ARM_XPC25: /* Arm BLX instruction. */
10666 case R_ARM_CALL:
10667 case R_ARM_JUMP24:
10668 case R_ARM_PC24: /* Arm B/BL instruction. */
10669 case R_ARM_PLT32:
10671 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10673 if (r_type == R_ARM_XPC25)
10675 /* Check for Arm calling Arm function. */
10676 /* FIXME: Should we translate the instruction into a BL
10677 instruction instead ? */
10678 if (branch_type != ST_BRANCH_TO_THUMB)
10679 _bfd_error_handler
10680 (_("\%pB: warning: %s BLX instruction targets"
10681 " %s function '%s'"),
10682 input_bfd, "ARM",
10683 "ARM", h ? h->root.root.string : "(local)");
10685 else if (r_type == R_ARM_PC24)
10687 /* Check for Arm calling Thumb function. */
10688 if (branch_type == ST_BRANCH_TO_THUMB)
10690 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10691 output_bfd, input_section,
10692 hit_data, sym_sec, rel->r_offset,
10693 signed_addend, value,
10694 error_message))
10695 return bfd_reloc_ok;
10696 else
10697 return bfd_reloc_dangerous;
10701 /* Check if a stub has to be inserted because the
10702 destination is too far or we are changing mode. */
10703 if ( r_type == R_ARM_CALL
10704 || r_type == R_ARM_JUMP24
10705 || r_type == R_ARM_PLT32)
10707 enum elf32_arm_stub_type stub_type = arm_stub_none;
10708 struct elf32_arm_link_hash_entry *hash;
10710 hash = (struct elf32_arm_link_hash_entry *) h;
10711 stub_type = arm_type_of_stub (info, input_section, rel,
10712 st_type, &branch_type,
10713 hash, value, sym_sec,
10714 input_bfd, sym_name);
10716 if (stub_type != arm_stub_none)
10718 /* The target is out of reach, so redirect the
10719 branch to the local stub for this function. */
10720 stub_entry = elf32_arm_get_stub_entry (input_section,
10721 sym_sec, h,
10722 rel, globals,
10723 stub_type);
10725 if (stub_entry != NULL)
10726 value = (stub_entry->stub_offset
10727 + stub_entry->stub_sec->output_offset
10728 + stub_entry->stub_sec->output_section->vma);
10730 if (plt_offset != (bfd_vma) -1)
10731 *unresolved_reloc_p = FALSE;
10734 else
10736 /* If the call goes through a PLT entry, make sure to
10737 check distance to the right destination address. */
10738 if (plt_offset != (bfd_vma) -1)
10740 value = (splt->output_section->vma
10741 + splt->output_offset
10742 + plt_offset);
10743 *unresolved_reloc_p = FALSE;
10744 /* The PLT entry is in ARM mode, regardless of the
10745 target function. */
10746 branch_type = ST_BRANCH_TO_ARM;
10751 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10752 where:
10753 S is the address of the symbol in the relocation.
10754 P is address of the instruction being relocated.
10755 A is the addend (extracted from the instruction) in bytes.
10757 S is held in 'value'.
10758 P is the base address of the section containing the
10759 instruction plus the offset of the reloc into that
10760 section, ie:
10761 (input_section->output_section->vma +
10762 input_section->output_offset +
10763 rel->r_offset).
10764 A is the addend, converted into bytes, ie:
10765 (signed_addend * 4)
10767 Note: None of these operations have knowledge of the pipeline
10768 size of the processor, thus it is up to the assembler to
10769 encode this information into the addend. */
10770 value -= (input_section->output_section->vma
10771 + input_section->output_offset);
10772 value -= rel->r_offset;
10773 if (globals->use_rel)
10774 value += (signed_addend << howto->size);
10775 else
10776 /* RELA addends do not have to be adjusted by howto->size. */
10777 value += signed_addend;
10779 signed_addend = value;
10780 signed_addend >>= howto->rightshift;
10782 /* A branch to an undefined weak symbol is turned into a jump to
10783 the next instruction unless a PLT entry will be created.
10784 Do the same for local undefined symbols (but not for STN_UNDEF).
10785 The jump to the next instruction is optimized as a NOP depending
10786 on the architecture. */
10787 if (h ? (h->root.type == bfd_link_hash_undefweak
10788 && plt_offset == (bfd_vma) -1)
10789 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10791 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10793 if (arch_has_arm_nop (globals))
10794 value |= 0x0320f000;
10795 else
10796 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10798 else
10800 /* Perform a signed range check. */
10801 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10802 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10803 return bfd_reloc_overflow;
10805 addend = (value & 2);
10807 value = (signed_addend & howto->dst_mask)
10808 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10810 if (r_type == R_ARM_CALL)
10812 /* Set the H bit in the BLX instruction. */
10813 if (branch_type == ST_BRANCH_TO_THUMB)
10815 if (addend)
10816 value |= (1 << 24);
10817 else
10818 value &= ~(bfd_vma)(1 << 24);
10821 /* Select the correct instruction (BL or BLX). */
10822 /* Only if we are not handling a BL to a stub. In this
10823 case, mode switching is performed by the stub. */
10824 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10825 value |= (1 << 28);
10826 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10828 value &= ~(bfd_vma)(1 << 28);
10829 value |= (1 << 24);
10834 break;
10836 case R_ARM_ABS32:
10837 value += addend;
10838 if (branch_type == ST_BRANCH_TO_THUMB)
10839 value |= 1;
10840 break;
10842 case R_ARM_ABS32_NOI:
10843 value += addend;
10844 break;
10846 case R_ARM_REL32:
10847 value += addend;
10848 if (branch_type == ST_BRANCH_TO_THUMB)
10849 value |= 1;
10850 value -= (input_section->output_section->vma
10851 + input_section->output_offset + rel->r_offset);
10852 break;
10854 case R_ARM_REL32_NOI:
10855 value += addend;
10856 value -= (input_section->output_section->vma
10857 + input_section->output_offset + rel->r_offset);
10858 break;
10860 case R_ARM_PREL31:
10861 value -= (input_section->output_section->vma
10862 + input_section->output_offset + rel->r_offset);
10863 value += signed_addend;
10864 if (! h || h->root.type != bfd_link_hash_undefweak)
10866 /* Check for overflow. */
10867 if ((value ^ (value >> 1)) & (1 << 30))
10868 return bfd_reloc_overflow;
10870 value &= 0x7fffffff;
10871 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10872 if (branch_type == ST_BRANCH_TO_THUMB)
10873 value |= 1;
10874 break;
10877 bfd_put_32 (input_bfd, value, hit_data);
10878 return bfd_reloc_ok;
10880 case R_ARM_ABS8:
10881 /* PR 16202: Refectch the addend using the correct size. */
10882 if (globals->use_rel)
10883 addend = bfd_get_8 (input_bfd, hit_data);
10884 value += addend;
10886 /* There is no way to tell whether the user intended to use a signed or
10887 unsigned addend. When checking for overflow we accept either,
10888 as specified by the AAELF. */
10889 if ((long) value > 0xff || (long) value < -0x80)
10890 return bfd_reloc_overflow;
10892 bfd_put_8 (input_bfd, value, hit_data);
10893 return bfd_reloc_ok;
10895 case R_ARM_ABS16:
10896 /* PR 16202: Refectch the addend using the correct size. */
10897 if (globals->use_rel)
10898 addend = bfd_get_16 (input_bfd, hit_data);
10899 value += addend;
10901 /* See comment for R_ARM_ABS8. */
10902 if ((long) value > 0xffff || (long) value < -0x8000)
10903 return bfd_reloc_overflow;
10905 bfd_put_16 (input_bfd, value, hit_data);
10906 return bfd_reloc_ok;
10908 case R_ARM_THM_ABS5:
10909 /* Support ldr and str instructions for the thumb. */
10910 if (globals->use_rel)
10912 /* Need to refetch addend. */
10913 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10914 /* ??? Need to determine shift amount from operand size. */
10915 addend >>= howto->rightshift;
10917 value += addend;
10919 /* ??? Isn't value unsigned? */
10920 if ((long) value > 0x1f || (long) value < -0x10)
10921 return bfd_reloc_overflow;
10923 /* ??? Value needs to be properly shifted into place first. */
10924 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10925 bfd_put_16 (input_bfd, value, hit_data);
10926 return bfd_reloc_ok;
10928 case R_ARM_THM_ALU_PREL_11_0:
10929 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10931 bfd_vma insn;
10932 bfd_signed_vma relocation;
10934 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10935 | bfd_get_16 (input_bfd, hit_data + 2);
10937 if (globals->use_rel)
10939 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10940 | ((insn & (1 << 26)) >> 15);
10941 if (insn & 0xf00000)
10942 signed_addend = -signed_addend;
10945 relocation = value + signed_addend;
10946 relocation -= Pa (input_section->output_section->vma
10947 + input_section->output_offset
10948 + rel->r_offset);
10950 /* PR 21523: Use an absolute value. The user of this reloc will
10951 have already selected an ADD or SUB insn appropriately. */
10952 value = llabs (relocation);
10954 if (value >= 0x1000)
10955 return bfd_reloc_overflow;
10957 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10958 if (branch_type == ST_BRANCH_TO_THUMB)
10959 value |= 1;
10961 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10962 | ((value & 0x700) << 4)
10963 | ((value & 0x800) << 15);
10964 if (relocation < 0)
10965 insn |= 0xa00000;
10967 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10968 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10970 return bfd_reloc_ok;
10973 case R_ARM_THM_PC8:
10974 /* PR 10073: This reloc is not generated by the GNU toolchain,
10975 but it is supported for compatibility with third party libraries
10976 generated by other compilers, specifically the ARM/IAR. */
10978 bfd_vma insn;
10979 bfd_signed_vma relocation;
10981 insn = bfd_get_16 (input_bfd, hit_data);
10983 if (globals->use_rel)
10984 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10986 relocation = value + addend;
10987 relocation -= Pa (input_section->output_section->vma
10988 + input_section->output_offset
10989 + rel->r_offset);
10991 value = relocation;
10993 /* We do not check for overflow of this reloc. Although strictly
10994 speaking this is incorrect, it appears to be necessary in order
10995 to work with IAR generated relocs. Since GCC and GAS do not
10996 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10997 a problem for them. */
10998 value &= 0x3fc;
11000 insn = (insn & 0xff00) | (value >> 2);
11002 bfd_put_16 (input_bfd, insn, hit_data);
11004 return bfd_reloc_ok;
11007 case R_ARM_THM_PC12:
11008 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11010 bfd_vma insn;
11011 bfd_signed_vma relocation;
11013 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11014 | bfd_get_16 (input_bfd, hit_data + 2);
11016 if (globals->use_rel)
11018 signed_addend = insn & 0xfff;
11019 if (!(insn & (1 << 23)))
11020 signed_addend = -signed_addend;
11023 relocation = value + signed_addend;
11024 relocation -= Pa (input_section->output_section->vma
11025 + input_section->output_offset
11026 + rel->r_offset);
11028 value = relocation;
11030 if (value >= 0x1000)
11031 return bfd_reloc_overflow;
11033 insn = (insn & 0xff7ff000) | value;
11034 if (relocation >= 0)
11035 insn |= (1 << 23);
11037 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11038 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11040 return bfd_reloc_ok;
11043 case R_ARM_THM_XPC22:
11044 case R_ARM_THM_CALL:
11045 case R_ARM_THM_JUMP24:
11046 /* Thumb BL (branch long instruction). */
11048 bfd_vma relocation;
11049 bfd_vma reloc_sign;
11050 bfd_boolean overflow = FALSE;
11051 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11052 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11053 bfd_signed_vma reloc_signed_max;
11054 bfd_signed_vma reloc_signed_min;
11055 bfd_vma check;
11056 bfd_signed_vma signed_check;
11057 int bitsize;
11058 const int thumb2 = using_thumb2 (globals);
11059 const int thumb2_bl = using_thumb2_bl (globals);
11061 /* A branch to an undefined weak symbol is turned into a jump to
11062 the next instruction unless a PLT entry will be created.
11063 The jump to the next instruction is optimized as a NOP.W for
11064 Thumb-2 enabled architectures. */
11065 if (h && h->root.type == bfd_link_hash_undefweak
11066 && plt_offset == (bfd_vma) -1)
11068 if (thumb2)
11070 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11071 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11073 else
11075 bfd_put_16 (input_bfd, 0xe000, hit_data);
11076 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11078 return bfd_reloc_ok;
11081 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11082 with Thumb-1) involving the J1 and J2 bits. */
11083 if (globals->use_rel)
11085 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11086 bfd_vma upper = upper_insn & 0x3ff;
11087 bfd_vma lower = lower_insn & 0x7ff;
11088 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11089 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11090 bfd_vma i1 = j1 ^ s ? 0 : 1;
11091 bfd_vma i2 = j2 ^ s ? 0 : 1;
11093 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11094 /* Sign extend. */
11095 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11097 signed_addend = addend;
11100 if (r_type == R_ARM_THM_XPC22)
11102 /* Check for Thumb to Thumb call. */
11103 /* FIXME: Should we translate the instruction into a BL
11104 instruction instead ? */
11105 if (branch_type == ST_BRANCH_TO_THUMB)
11106 _bfd_error_handler
11107 (_("%pB: warning: %s BLX instruction targets"
11108 " %s function '%s'"),
11109 input_bfd, "Thumb",
11110 "Thumb", h ? h->root.root.string : "(local)");
11112 else
11114 /* If it is not a call to Thumb, assume call to Arm.
11115 If it is a call relative to a section name, then it is not a
11116 function call at all, but rather a long jump. Calls through
11117 the PLT do not require stubs. */
11118 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11120 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11122 /* Convert BL to BLX. */
11123 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11125 else if (( r_type != R_ARM_THM_CALL)
11126 && (r_type != R_ARM_THM_JUMP24))
11128 if (elf32_thumb_to_arm_stub
11129 (info, sym_name, input_bfd, output_bfd, input_section,
11130 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11131 error_message))
11132 return bfd_reloc_ok;
11133 else
11134 return bfd_reloc_dangerous;
11137 else if (branch_type == ST_BRANCH_TO_THUMB
11138 && globals->use_blx
11139 && r_type == R_ARM_THM_CALL)
11141 /* Make sure this is a BL. */
11142 lower_insn |= 0x1800;
11146 enum elf32_arm_stub_type stub_type = arm_stub_none;
11147 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11149 /* Check if a stub has to be inserted because the destination
11150 is too far. */
11151 struct elf32_arm_stub_hash_entry *stub_entry;
11152 struct elf32_arm_link_hash_entry *hash;
11154 hash = (struct elf32_arm_link_hash_entry *) h;
11156 stub_type = arm_type_of_stub (info, input_section, rel,
11157 st_type, &branch_type,
11158 hash, value, sym_sec,
11159 input_bfd, sym_name);
11161 if (stub_type != arm_stub_none)
11163 /* The target is out of reach or we are changing modes, so
11164 redirect the branch to the local stub for this
11165 function. */
11166 stub_entry = elf32_arm_get_stub_entry (input_section,
11167 sym_sec, h,
11168 rel, globals,
11169 stub_type);
11170 if (stub_entry != NULL)
11172 value = (stub_entry->stub_offset
11173 + stub_entry->stub_sec->output_offset
11174 + stub_entry->stub_sec->output_section->vma);
11176 if (plt_offset != (bfd_vma) -1)
11177 *unresolved_reloc_p = FALSE;
11180 /* If this call becomes a call to Arm, force BLX. */
11181 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11183 if ((stub_entry
11184 && !arm_stub_is_thumb (stub_entry->stub_type))
11185 || branch_type != ST_BRANCH_TO_THUMB)
11186 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11191 /* Handle calls via the PLT. */
11192 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11194 value = (splt->output_section->vma
11195 + splt->output_offset
11196 + plt_offset);
11198 if (globals->use_blx
11199 && r_type == R_ARM_THM_CALL
11200 && ! using_thumb_only (globals))
11202 /* If the Thumb BLX instruction is available, convert
11203 the BL to a BLX instruction to call the ARM-mode
11204 PLT entry. */
11205 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11206 branch_type = ST_BRANCH_TO_ARM;
11208 else
11210 if (! using_thumb_only (globals))
11211 /* Target the Thumb stub before the ARM PLT entry. */
11212 value -= PLT_THUMB_STUB_SIZE;
11213 branch_type = ST_BRANCH_TO_THUMB;
11215 *unresolved_reloc_p = FALSE;
11218 relocation = value + signed_addend;
11220 relocation -= (input_section->output_section->vma
11221 + input_section->output_offset
11222 + rel->r_offset);
11224 check = relocation >> howto->rightshift;
11226 /* If this is a signed value, the rightshift just dropped
11227 leading 1 bits (assuming twos complement). */
11228 if ((bfd_signed_vma) relocation >= 0)
11229 signed_check = check;
11230 else
11231 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11233 /* Calculate the permissable maximum and minimum values for
11234 this relocation according to whether we're relocating for
11235 Thumb-2 or not. */
11236 bitsize = howto->bitsize;
11237 if (!thumb2_bl)
11238 bitsize -= 2;
11239 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11240 reloc_signed_min = ~reloc_signed_max;
11242 /* Assumes two's complement. */
11243 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11244 overflow = TRUE;
11246 if ((lower_insn & 0x5000) == 0x4000)
11247 /* For a BLX instruction, make sure that the relocation is rounded up
11248 to a word boundary. This follows the semantics of the instruction
11249 which specifies that bit 1 of the target address will come from bit
11250 1 of the base address. */
11251 relocation = (relocation + 2) & ~ 3;
11253 /* Put RELOCATION back into the insn. Assumes two's complement.
11254 We use the Thumb-2 encoding, which is safe even if dealing with
11255 a Thumb-1 instruction by virtue of our overflow check above. */
11256 reloc_sign = (signed_check < 0) ? 1 : 0;
11257 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11258 | ((relocation >> 12) & 0x3ff)
11259 | (reloc_sign << 10);
11260 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11261 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11262 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11263 | ((relocation >> 1) & 0x7ff);
11265 /* Put the relocated value back in the object file: */
11266 bfd_put_16 (input_bfd, upper_insn, hit_data);
11267 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11269 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11271 break;
11273 case R_ARM_THM_JUMP19:
11274 /* Thumb32 conditional branch instruction. */
11276 bfd_vma relocation;
11277 bfd_boolean overflow = FALSE;
11278 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11279 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11280 bfd_signed_vma reloc_signed_max = 0xffffe;
11281 bfd_signed_vma reloc_signed_min = -0x100000;
11282 bfd_signed_vma signed_check;
11283 enum elf32_arm_stub_type stub_type = arm_stub_none;
11284 struct elf32_arm_stub_hash_entry *stub_entry;
11285 struct elf32_arm_link_hash_entry *hash;
11287 /* Need to refetch the addend, reconstruct the top three bits,
11288 and squish the two 11 bit pieces together. */
11289 if (globals->use_rel)
11291 bfd_vma S = (upper_insn & 0x0400) >> 10;
11292 bfd_vma upper = (upper_insn & 0x003f);
11293 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11294 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11295 bfd_vma lower = (lower_insn & 0x07ff);
11297 upper |= J1 << 6;
11298 upper |= J2 << 7;
11299 upper |= (!S) << 8;
11300 upper -= 0x0100; /* Sign extend. */
11302 addend = (upper << 12) | (lower << 1);
11303 signed_addend = addend;
11306 /* Handle calls via the PLT. */
11307 if (plt_offset != (bfd_vma) -1)
11309 value = (splt->output_section->vma
11310 + splt->output_offset
11311 + plt_offset);
11312 /* Target the Thumb stub before the ARM PLT entry. */
11313 value -= PLT_THUMB_STUB_SIZE;
11314 *unresolved_reloc_p = FALSE;
11317 hash = (struct elf32_arm_link_hash_entry *)h;
11319 stub_type = arm_type_of_stub (info, input_section, rel,
11320 st_type, &branch_type,
11321 hash, value, sym_sec,
11322 input_bfd, sym_name);
11323 if (stub_type != arm_stub_none)
11325 stub_entry = elf32_arm_get_stub_entry (input_section,
11326 sym_sec, h,
11327 rel, globals,
11328 stub_type);
11329 if (stub_entry != NULL)
11331 value = (stub_entry->stub_offset
11332 + stub_entry->stub_sec->output_offset
11333 + stub_entry->stub_sec->output_section->vma);
11337 relocation = value + signed_addend;
11338 relocation -= (input_section->output_section->vma
11339 + input_section->output_offset
11340 + rel->r_offset);
11341 signed_check = (bfd_signed_vma) relocation;
11343 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11344 overflow = TRUE;
11346 /* Put RELOCATION back into the insn. */
11348 bfd_vma S = (relocation & 0x00100000) >> 20;
11349 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11350 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11351 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11352 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11354 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11355 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11358 /* Put the relocated value back in the object file: */
11359 bfd_put_16 (input_bfd, upper_insn, hit_data);
11360 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11362 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11365 case R_ARM_THM_JUMP11:
11366 case R_ARM_THM_JUMP8:
11367 case R_ARM_THM_JUMP6:
11368 /* Thumb B (branch) instruction). */
11370 bfd_signed_vma relocation;
11371 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11372 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11373 bfd_signed_vma signed_check;
11375 /* CZB cannot jump backward. */
11376 if (r_type == R_ARM_THM_JUMP6)
11377 reloc_signed_min = 0;
11379 if (globals->use_rel)
11381 /* Need to refetch addend. */
11382 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11383 if (addend & ((howto->src_mask + 1) >> 1))
11385 signed_addend = -1;
11386 signed_addend &= ~ howto->src_mask;
11387 signed_addend |= addend;
11389 else
11390 signed_addend = addend;
11391 /* The value in the insn has been right shifted. We need to
11392 undo this, so that we can perform the address calculation
11393 in terms of bytes. */
11394 signed_addend <<= howto->rightshift;
11396 relocation = value + signed_addend;
11398 relocation -= (input_section->output_section->vma
11399 + input_section->output_offset
11400 + rel->r_offset);
11402 relocation >>= howto->rightshift;
11403 signed_check = relocation;
11405 if (r_type == R_ARM_THM_JUMP6)
11406 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11407 else
11408 relocation &= howto->dst_mask;
11409 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11411 bfd_put_16 (input_bfd, relocation, hit_data);
11413 /* Assumes two's complement. */
11414 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11415 return bfd_reloc_overflow;
11417 return bfd_reloc_ok;
11420 case R_ARM_ALU_PCREL7_0:
11421 case R_ARM_ALU_PCREL15_8:
11422 case R_ARM_ALU_PCREL23_15:
11424 bfd_vma insn;
11425 bfd_vma relocation;
11427 insn = bfd_get_32 (input_bfd, hit_data);
11428 if (globals->use_rel)
11430 /* Extract the addend. */
11431 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11432 signed_addend = addend;
11434 relocation = value + signed_addend;
11436 relocation -= (input_section->output_section->vma
11437 + input_section->output_offset
11438 + rel->r_offset);
11439 insn = (insn & ~0xfff)
11440 | ((howto->bitpos << 7) & 0xf00)
11441 | ((relocation >> howto->bitpos) & 0xff);
11442 bfd_put_32 (input_bfd, value, hit_data);
11444 return bfd_reloc_ok;
11446 case R_ARM_GNU_VTINHERIT:
11447 case R_ARM_GNU_VTENTRY:
11448 return bfd_reloc_ok;
11450 case R_ARM_GOTOFF32:
11451 /* Relocation is relative to the start of the
11452 global offset table. */
11454 BFD_ASSERT (sgot != NULL);
11455 if (sgot == NULL)
11456 return bfd_reloc_notsupported;
11458 /* If we are addressing a Thumb function, we need to adjust the
11459 address by one, so that attempts to call the function pointer will
11460 correctly interpret it as Thumb code. */
11461 if (branch_type == ST_BRANCH_TO_THUMB)
11462 value += 1;
11464 /* Note that sgot->output_offset is not involved in this
11465 calculation. We always want the start of .got. If we
11466 define _GLOBAL_OFFSET_TABLE in a different way, as is
11467 permitted by the ABI, we might have to change this
11468 calculation. */
11469 value -= sgot->output_section->vma;
11470 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11471 contents, rel->r_offset, value,
11472 rel->r_addend);
11474 case R_ARM_GOTPC:
11475 /* Use global offset table as symbol value. */
11476 BFD_ASSERT (sgot != NULL);
11478 if (sgot == NULL)
11479 return bfd_reloc_notsupported;
11481 *unresolved_reloc_p = FALSE;
11482 value = sgot->output_section->vma;
11483 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11484 contents, rel->r_offset, value,
11485 rel->r_addend);
11487 case R_ARM_GOT32:
11488 case R_ARM_GOT_PREL:
11489 /* Relocation is to the entry for this symbol in the
11490 global offset table. */
11491 if (sgot == NULL)
11492 return bfd_reloc_notsupported;
11494 if (dynreloc_st_type == STT_GNU_IFUNC
11495 && plt_offset != (bfd_vma) -1
11496 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11498 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11499 symbol, and the relocation resolves directly to the runtime
11500 target rather than to the .iplt entry. This means that any
11501 .got entry would be the same value as the .igot.plt entry,
11502 so there's no point creating both. */
11503 sgot = globals->root.igotplt;
11504 value = sgot->output_offset + gotplt_offset;
11506 else if (h != NULL)
11508 bfd_vma off;
11510 off = h->got.offset;
11511 BFD_ASSERT (off != (bfd_vma) -1);
11512 if ((off & 1) != 0)
11514 /* We have already processsed one GOT relocation against
11515 this symbol. */
11516 off &= ~1;
11517 if (globals->root.dynamic_sections_created
11518 && !SYMBOL_REFERENCES_LOCAL (info, h))
11519 *unresolved_reloc_p = FALSE;
11521 else
11523 Elf_Internal_Rela outrel;
11524 int isrofixup = 0;
11526 if (((h->dynindx != -1) || globals->fdpic_p)
11527 && !SYMBOL_REFERENCES_LOCAL (info, h))
11529 /* If the symbol doesn't resolve locally in a static
11530 object, we have an undefined reference. If the
11531 symbol doesn't resolve locally in a dynamic object,
11532 it should be resolved by the dynamic linker. */
11533 if (globals->root.dynamic_sections_created)
11535 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11536 *unresolved_reloc_p = FALSE;
11538 else
11539 outrel.r_info = 0;
11540 outrel.r_addend = 0;
11542 else
11544 if (dynreloc_st_type == STT_GNU_IFUNC)
11545 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11546 else if (bfd_link_pic (info)
11547 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11548 || h->root.type != bfd_link_hash_undefweak))
11549 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11550 else
11552 outrel.r_info = 0;
11553 if (globals->fdpic_p)
11554 isrofixup = 1;
11556 outrel.r_addend = dynreloc_value;
11559 /* The GOT entry is initialized to zero by default.
11560 See if we should install a different value. */
11561 if (outrel.r_addend != 0
11562 && (globals->use_rel || outrel.r_info == 0))
11564 bfd_put_32 (output_bfd, outrel.r_addend,
11565 sgot->contents + off);
11566 outrel.r_addend = 0;
11569 if (isrofixup)
11570 arm_elf_add_rofixup (output_bfd,
11571 elf32_arm_hash_table(info)->srofixup,
11572 sgot->output_section->vma
11573 + sgot->output_offset + off);
11575 else if (outrel.r_info != 0)
11577 outrel.r_offset = (sgot->output_section->vma
11578 + sgot->output_offset
11579 + off);
11580 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11583 h->got.offset |= 1;
11585 value = sgot->output_offset + off;
11587 else
11589 bfd_vma off;
11591 BFD_ASSERT (local_got_offsets != NULL
11592 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11594 off = local_got_offsets[r_symndx];
11596 /* The offset must always be a multiple of 4. We use the
11597 least significant bit to record whether we have already
11598 generated the necessary reloc. */
11599 if ((off & 1) != 0)
11600 off &= ~1;
11601 else
11603 Elf_Internal_Rela outrel;
11604 int isrofixup = 0;
11606 if (dynreloc_st_type == STT_GNU_IFUNC)
11607 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11608 else if (bfd_link_pic (info))
11609 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11610 else
11612 outrel.r_info = 0;
11613 if (globals->fdpic_p)
11614 isrofixup = 1;
11617 /* The GOT entry is initialized to zero by default.
11618 See if we should install a different value. */
11619 if (globals->use_rel || outrel.r_info == 0)
11620 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11622 if (isrofixup)
11623 arm_elf_add_rofixup (output_bfd,
11624 globals->srofixup,
11625 sgot->output_section->vma
11626 + sgot->output_offset + off);
11628 else if (outrel.r_info != 0)
11630 outrel.r_addend = addend + dynreloc_value;
11631 outrel.r_offset = (sgot->output_section->vma
11632 + sgot->output_offset
11633 + off);
11634 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11637 local_got_offsets[r_symndx] |= 1;
11640 value = sgot->output_offset + off;
11642 if (r_type != R_ARM_GOT32)
11643 value += sgot->output_section->vma;
11645 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11646 contents, rel->r_offset, value,
11647 rel->r_addend);
11649 case R_ARM_TLS_LDO32:
11650 value = value - dtpoff_base (info);
11652 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11653 contents, rel->r_offset, value,
11654 rel->r_addend);
11656 case R_ARM_TLS_LDM32:
11657 case R_ARM_TLS_LDM32_FDPIC:
11659 bfd_vma off;
11661 if (sgot == NULL)
11662 abort ();
11664 off = globals->tls_ldm_got.offset;
11666 if ((off & 1) != 0)
11667 off &= ~1;
11668 else
11670 /* If we don't know the module number, create a relocation
11671 for it. */
11672 if (bfd_link_pic (info))
11674 Elf_Internal_Rela outrel;
11676 if (srelgot == NULL)
11677 abort ();
11679 outrel.r_addend = 0;
11680 outrel.r_offset = (sgot->output_section->vma
11681 + sgot->output_offset + off);
11682 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11684 if (globals->use_rel)
11685 bfd_put_32 (output_bfd, outrel.r_addend,
11686 sgot->contents + off);
11688 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11690 else
11691 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11693 globals->tls_ldm_got.offset |= 1;
11696 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11698 bfd_put_32(output_bfd,
11699 globals->root.sgot->output_offset + off,
11700 contents + rel->r_offset);
11702 return bfd_reloc_ok;
11704 else
11706 value = sgot->output_section->vma + sgot->output_offset + off
11707 - (input_section->output_section->vma
11708 + input_section->output_offset + rel->r_offset);
11710 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11711 contents, rel->r_offset, value,
11712 rel->r_addend);
11716 case R_ARM_TLS_CALL:
11717 case R_ARM_THM_TLS_CALL:
11718 case R_ARM_TLS_GD32:
11719 case R_ARM_TLS_GD32_FDPIC:
11720 case R_ARM_TLS_IE32:
11721 case R_ARM_TLS_IE32_FDPIC:
11722 case R_ARM_TLS_GOTDESC:
11723 case R_ARM_TLS_DESCSEQ:
11724 case R_ARM_THM_TLS_DESCSEQ:
11726 bfd_vma off, offplt;
11727 int indx = 0;
11728 char tls_type;
11730 BFD_ASSERT (sgot != NULL);
11732 if (h != NULL)
11734 bfd_boolean dyn;
11735 dyn = globals->root.dynamic_sections_created;
11736 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11737 bfd_link_pic (info),
11739 && (!bfd_link_pic (info)
11740 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11742 *unresolved_reloc_p = FALSE;
11743 indx = h->dynindx;
11745 off = h->got.offset;
11746 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11747 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11749 else
11751 BFD_ASSERT (local_got_offsets != NULL);
11752 off = local_got_offsets[r_symndx];
11753 offplt = local_tlsdesc_gotents[r_symndx];
11754 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11757 /* Linker relaxations happens from one of the
11758 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11759 if (ELF32_R_TYPE(rel->r_info) != r_type)
11760 tls_type = GOT_TLS_IE;
11762 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11764 if ((off & 1) != 0)
11765 off &= ~1;
11766 else
11768 bfd_boolean need_relocs = FALSE;
11769 Elf_Internal_Rela outrel;
11770 int cur_off = off;
11772 /* The GOT entries have not been initialized yet. Do it
11773 now, and emit any relocations. If both an IE GOT and a
11774 GD GOT are necessary, we emit the GD first. */
11776 if ((bfd_link_pic (info) || indx != 0)
11777 && (h == NULL
11778 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11779 && !resolved_to_zero)
11780 || h->root.type != bfd_link_hash_undefweak))
11782 need_relocs = TRUE;
11783 BFD_ASSERT (srelgot != NULL);
11786 if (tls_type & GOT_TLS_GDESC)
11788 bfd_byte *loc;
11790 /* We should have relaxed, unless this is an undefined
11791 weak symbol. */
11792 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11793 || bfd_link_pic (info));
11794 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11795 <= globals->root.sgotplt->size);
11797 outrel.r_addend = 0;
11798 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11799 + globals->root.sgotplt->output_offset
11800 + offplt
11801 + globals->sgotplt_jump_table_size);
11803 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11804 sreloc = globals->root.srelplt;
11805 loc = sreloc->contents;
11806 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11807 BFD_ASSERT (loc + RELOC_SIZE (globals)
11808 <= sreloc->contents + sreloc->size);
11810 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11812 /* For globals, the first word in the relocation gets
11813 the relocation index and the top bit set, or zero,
11814 if we're binding now. For locals, it gets the
11815 symbol's offset in the tls section. */
11816 bfd_put_32 (output_bfd,
11817 !h ? value - elf_hash_table (info)->tls_sec->vma
11818 : info->flags & DF_BIND_NOW ? 0
11819 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11820 globals->root.sgotplt->contents + offplt
11821 + globals->sgotplt_jump_table_size);
11823 /* Second word in the relocation is always zero. */
11824 bfd_put_32 (output_bfd, 0,
11825 globals->root.sgotplt->contents + offplt
11826 + globals->sgotplt_jump_table_size + 4);
11828 if (tls_type & GOT_TLS_GD)
11830 if (need_relocs)
11832 outrel.r_addend = 0;
11833 outrel.r_offset = (sgot->output_section->vma
11834 + sgot->output_offset
11835 + cur_off);
11836 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11838 if (globals->use_rel)
11839 bfd_put_32 (output_bfd, outrel.r_addend,
11840 sgot->contents + cur_off);
11842 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11844 if (indx == 0)
11845 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11846 sgot->contents + cur_off + 4);
11847 else
11849 outrel.r_addend = 0;
11850 outrel.r_info = ELF32_R_INFO (indx,
11851 R_ARM_TLS_DTPOFF32);
11852 outrel.r_offset += 4;
11854 if (globals->use_rel)
11855 bfd_put_32 (output_bfd, outrel.r_addend,
11856 sgot->contents + cur_off + 4);
11858 elf32_arm_add_dynreloc (output_bfd, info,
11859 srelgot, &outrel);
11862 else
11864 /* If we are not emitting relocations for a
11865 general dynamic reference, then we must be in a
11866 static link or an executable link with the
11867 symbol binding locally. Mark it as belonging
11868 to module 1, the executable. */
11869 bfd_put_32 (output_bfd, 1,
11870 sgot->contents + cur_off);
11871 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11872 sgot->contents + cur_off + 4);
11875 cur_off += 8;
11878 if (tls_type & GOT_TLS_IE)
11880 if (need_relocs)
11882 if (indx == 0)
11883 outrel.r_addend = value - dtpoff_base (info);
11884 else
11885 outrel.r_addend = 0;
11886 outrel.r_offset = (sgot->output_section->vma
11887 + sgot->output_offset
11888 + cur_off);
11889 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11891 if (globals->use_rel)
11892 bfd_put_32 (output_bfd, outrel.r_addend,
11893 sgot->contents + cur_off);
11895 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11897 else
11898 bfd_put_32 (output_bfd, tpoff (info, value),
11899 sgot->contents + cur_off);
11900 cur_off += 4;
11903 if (h != NULL)
11904 h->got.offset |= 1;
11905 else
11906 local_got_offsets[r_symndx] |= 1;
11909 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11910 off += 8;
11911 else if (tls_type & GOT_TLS_GDESC)
11912 off = offplt;
11914 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11915 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11917 bfd_signed_vma offset;
11918 /* TLS stubs are arm mode. The original symbol is a
11919 data object, so branch_type is bogus. */
11920 branch_type = ST_BRANCH_TO_ARM;
11921 enum elf32_arm_stub_type stub_type
11922 = arm_type_of_stub (info, input_section, rel,
11923 st_type, &branch_type,
11924 (struct elf32_arm_link_hash_entry *)h,
11925 globals->tls_trampoline, globals->root.splt,
11926 input_bfd, sym_name);
11928 if (stub_type != arm_stub_none)
11930 struct elf32_arm_stub_hash_entry *stub_entry
11931 = elf32_arm_get_stub_entry
11932 (input_section, globals->root.splt, 0, rel,
11933 globals, stub_type);
11934 offset = (stub_entry->stub_offset
11935 + stub_entry->stub_sec->output_offset
11936 + stub_entry->stub_sec->output_section->vma);
11938 else
11939 offset = (globals->root.splt->output_section->vma
11940 + globals->root.splt->output_offset
11941 + globals->tls_trampoline);
11943 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11945 unsigned long inst;
11947 offset -= (input_section->output_section->vma
11948 + input_section->output_offset
11949 + rel->r_offset + 8);
11951 inst = offset >> 2;
11952 inst &= 0x00ffffff;
11953 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11955 else
11957 /* Thumb blx encodes the offset in a complicated
11958 fashion. */
11959 unsigned upper_insn, lower_insn;
11960 unsigned neg;
11962 offset -= (input_section->output_section->vma
11963 + input_section->output_offset
11964 + rel->r_offset + 4);
11966 if (stub_type != arm_stub_none
11967 && arm_stub_is_thumb (stub_type))
11969 lower_insn = 0xd000;
11971 else
11973 lower_insn = 0xc000;
11974 /* Round up the offset to a word boundary. */
11975 offset = (offset + 2) & ~2;
11978 neg = offset < 0;
11979 upper_insn = (0xf000
11980 | ((offset >> 12) & 0x3ff)
11981 | (neg << 10));
11982 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11983 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11984 | ((offset >> 1) & 0x7ff);
11985 bfd_put_16 (input_bfd, upper_insn, hit_data);
11986 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11987 return bfd_reloc_ok;
11990 /* These relocations needs special care, as besides the fact
11991 they point somewhere in .gotplt, the addend must be
11992 adjusted accordingly depending on the type of instruction
11993 we refer to. */
11994 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11996 unsigned long data, insn;
11997 unsigned thumb;
11999 data = bfd_get_32 (input_bfd, hit_data);
12000 thumb = data & 1;
12001 data &= ~1u;
12003 if (thumb)
12005 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12006 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12007 insn = (insn << 16)
12008 | bfd_get_16 (input_bfd,
12009 contents + rel->r_offset - data + 2);
12010 if ((insn & 0xf800c000) == 0xf000c000)
12011 /* bl/blx */
12012 value = -6;
12013 else if ((insn & 0xffffff00) == 0x4400)
12014 /* add */
12015 value = -5;
12016 else
12018 _bfd_error_handler
12019 /* xgettext:c-format */
12020 (_("%pB(%pA+%#" PRIx64 "): "
12021 "unexpected %s instruction '%#lx' "
12022 "referenced by TLS_GOTDESC"),
12023 input_bfd, input_section, (uint64_t) rel->r_offset,
12024 "Thumb", insn);
12025 return bfd_reloc_notsupported;
12028 else
12030 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12032 switch (insn >> 24)
12034 case 0xeb: /* bl */
12035 case 0xfa: /* blx */
12036 value = -4;
12037 break;
12039 case 0xe0: /* add */
12040 value = -8;
12041 break;
12043 default:
12044 _bfd_error_handler
12045 /* xgettext:c-format */
12046 (_("%pB(%pA+%#" PRIx64 "): "
12047 "unexpected %s instruction '%#lx' "
12048 "referenced by TLS_GOTDESC"),
12049 input_bfd, input_section, (uint64_t) rel->r_offset,
12050 "ARM", insn);
12051 return bfd_reloc_notsupported;
12055 value += ((globals->root.sgotplt->output_section->vma
12056 + globals->root.sgotplt->output_offset + off)
12057 - (input_section->output_section->vma
12058 + input_section->output_offset
12059 + rel->r_offset)
12060 + globals->sgotplt_jump_table_size);
12062 else
12063 value = ((globals->root.sgot->output_section->vma
12064 + globals->root.sgot->output_offset + off)
12065 - (input_section->output_section->vma
12066 + input_section->output_offset + rel->r_offset));
12068 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12069 r_type == R_ARM_TLS_IE32_FDPIC))
12071 /* For FDPIC relocations, resolve to the offset of the GOT
12072 entry from the start of GOT. */
12073 bfd_put_32(output_bfd,
12074 globals->root.sgot->output_offset + off,
12075 contents + rel->r_offset);
12077 return bfd_reloc_ok;
12079 else
12081 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12082 contents, rel->r_offset, value,
12083 rel->r_addend);
12087 case R_ARM_TLS_LE32:
12088 if (bfd_link_dll (info))
12090 _bfd_error_handler
12091 /* xgettext:c-format */
12092 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12093 "in shared object"),
12094 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12095 return bfd_reloc_notsupported;
12097 else
12098 value = tpoff (info, value);
12100 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12101 contents, rel->r_offset, value,
12102 rel->r_addend);
12104 case R_ARM_V4BX:
12105 if (globals->fix_v4bx)
12107 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12109 /* Ensure that we have a BX instruction. */
12110 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12112 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12114 /* Branch to veneer. */
12115 bfd_vma glue_addr;
12116 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12117 glue_addr -= input_section->output_section->vma
12118 + input_section->output_offset
12119 + rel->r_offset + 8;
12120 insn = (insn & 0xf0000000) | 0x0a000000
12121 | ((glue_addr >> 2) & 0x00ffffff);
12123 else
12125 /* Preserve Rm (lowest four bits) and the condition code
12126 (highest four bits). Other bits encode MOV PC,Rm. */
12127 insn = (insn & 0xf000000f) | 0x01a0f000;
12130 bfd_put_32 (input_bfd, insn, hit_data);
12132 return bfd_reloc_ok;
12134 case R_ARM_MOVW_ABS_NC:
12135 case R_ARM_MOVT_ABS:
12136 case R_ARM_MOVW_PREL_NC:
12137 case R_ARM_MOVT_PREL:
12138 /* Until we properly support segment-base-relative addressing then
12139 we assume the segment base to be zero, as for the group relocations.
12140 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12141 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12142 case R_ARM_MOVW_BREL_NC:
12143 case R_ARM_MOVW_BREL:
12144 case R_ARM_MOVT_BREL:
12146 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12148 if (globals->use_rel)
12150 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12151 signed_addend = (addend ^ 0x8000) - 0x8000;
12154 value += signed_addend;
12156 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12157 value -= (input_section->output_section->vma
12158 + input_section->output_offset + rel->r_offset);
12160 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12161 return bfd_reloc_overflow;
12163 if (branch_type == ST_BRANCH_TO_THUMB)
12164 value |= 1;
12166 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12167 || r_type == R_ARM_MOVT_BREL)
12168 value >>= 16;
12170 insn &= 0xfff0f000;
12171 insn |= value & 0xfff;
12172 insn |= (value & 0xf000) << 4;
12173 bfd_put_32 (input_bfd, insn, hit_data);
12175 return bfd_reloc_ok;
12177 case R_ARM_THM_MOVW_ABS_NC:
12178 case R_ARM_THM_MOVT_ABS:
12179 case R_ARM_THM_MOVW_PREL_NC:
12180 case R_ARM_THM_MOVT_PREL:
12181 /* Until we properly support segment-base-relative addressing then
12182 we assume the segment base to be zero, as for the above relocations.
12183 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12184 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12185 as R_ARM_THM_MOVT_ABS. */
12186 case R_ARM_THM_MOVW_BREL_NC:
12187 case R_ARM_THM_MOVW_BREL:
12188 case R_ARM_THM_MOVT_BREL:
12190 bfd_vma insn;
12192 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12193 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12195 if (globals->use_rel)
12197 addend = ((insn >> 4) & 0xf000)
12198 | ((insn >> 15) & 0x0800)
12199 | ((insn >> 4) & 0x0700)
12200 | (insn & 0x00ff);
12201 signed_addend = (addend ^ 0x8000) - 0x8000;
12204 value += signed_addend;
12206 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12207 value -= (input_section->output_section->vma
12208 + input_section->output_offset + rel->r_offset);
12210 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12211 return bfd_reloc_overflow;
12213 if (branch_type == ST_BRANCH_TO_THUMB)
12214 value |= 1;
12216 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12217 || r_type == R_ARM_THM_MOVT_BREL)
12218 value >>= 16;
12220 insn &= 0xfbf08f00;
12221 insn |= (value & 0xf000) << 4;
12222 insn |= (value & 0x0800) << 15;
12223 insn |= (value & 0x0700) << 4;
12224 insn |= (value & 0x00ff);
12226 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12227 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12229 return bfd_reloc_ok;
12231 case R_ARM_ALU_PC_G0_NC:
12232 case R_ARM_ALU_PC_G1_NC:
12233 case R_ARM_ALU_PC_G0:
12234 case R_ARM_ALU_PC_G1:
12235 case R_ARM_ALU_PC_G2:
12236 case R_ARM_ALU_SB_G0_NC:
12237 case R_ARM_ALU_SB_G1_NC:
12238 case R_ARM_ALU_SB_G0:
12239 case R_ARM_ALU_SB_G1:
12240 case R_ARM_ALU_SB_G2:
12242 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12243 bfd_vma pc = input_section->output_section->vma
12244 + input_section->output_offset + rel->r_offset;
12245 /* sb is the origin of the *segment* containing the symbol. */
12246 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12247 bfd_vma residual;
12248 bfd_vma g_n;
12249 bfd_signed_vma signed_value;
12250 int group = 0;
12252 /* Determine which group of bits to select. */
12253 switch (r_type)
12255 case R_ARM_ALU_PC_G0_NC:
12256 case R_ARM_ALU_PC_G0:
12257 case R_ARM_ALU_SB_G0_NC:
12258 case R_ARM_ALU_SB_G0:
12259 group = 0;
12260 break;
12262 case R_ARM_ALU_PC_G1_NC:
12263 case R_ARM_ALU_PC_G1:
12264 case R_ARM_ALU_SB_G1_NC:
12265 case R_ARM_ALU_SB_G1:
12266 group = 1;
12267 break;
12269 case R_ARM_ALU_PC_G2:
12270 case R_ARM_ALU_SB_G2:
12271 group = 2;
12272 break;
12274 default:
12275 abort ();
12278 /* If REL, extract the addend from the insn. If RELA, it will
12279 have already been fetched for us. */
12280 if (globals->use_rel)
12282 int negative;
12283 bfd_vma constant = insn & 0xff;
12284 bfd_vma rotation = (insn & 0xf00) >> 8;
12286 if (rotation == 0)
12287 signed_addend = constant;
12288 else
12290 /* Compensate for the fact that in the instruction, the
12291 rotation is stored in multiples of 2 bits. */
12292 rotation *= 2;
12294 /* Rotate "constant" right by "rotation" bits. */
12295 signed_addend = (constant >> rotation) |
12296 (constant << (8 * sizeof (bfd_vma) - rotation));
12299 /* Determine if the instruction is an ADD or a SUB.
12300 (For REL, this determines the sign of the addend.) */
12301 negative = identify_add_or_sub (insn);
12302 if (negative == 0)
12304 _bfd_error_handler
12305 /* xgettext:c-format */
12306 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12307 "are allowed for ALU group relocations"),
12308 input_bfd, input_section, (uint64_t) rel->r_offset);
12309 return bfd_reloc_overflow;
12312 signed_addend *= negative;
12315 /* Compute the value (X) to go in the place. */
12316 if (r_type == R_ARM_ALU_PC_G0_NC
12317 || r_type == R_ARM_ALU_PC_G1_NC
12318 || r_type == R_ARM_ALU_PC_G0
12319 || r_type == R_ARM_ALU_PC_G1
12320 || r_type == R_ARM_ALU_PC_G2)
12321 /* PC relative. */
12322 signed_value = value - pc + signed_addend;
12323 else
12324 /* Section base relative. */
12325 signed_value = value - sb + signed_addend;
12327 /* If the target symbol is a Thumb function, then set the
12328 Thumb bit in the address. */
12329 if (branch_type == ST_BRANCH_TO_THUMB)
12330 signed_value |= 1;
12332 /* Calculate the value of the relevant G_n, in encoded
12333 constant-with-rotation format. */
12334 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12335 group, &residual);
12337 /* Check for overflow if required. */
12338 if ((r_type == R_ARM_ALU_PC_G0
12339 || r_type == R_ARM_ALU_PC_G1
12340 || r_type == R_ARM_ALU_PC_G2
12341 || r_type == R_ARM_ALU_SB_G0
12342 || r_type == R_ARM_ALU_SB_G1
12343 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12345 _bfd_error_handler
12346 /* xgettext:c-format */
12347 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12348 "splitting %#" PRIx64 " for group relocation %s"),
12349 input_bfd, input_section, (uint64_t) rel->r_offset,
12350 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12351 howto->name);
12352 return bfd_reloc_overflow;
12355 /* Mask out the value and the ADD/SUB part of the opcode; take care
12356 not to destroy the S bit. */
12357 insn &= 0xff1ff000;
12359 /* Set the opcode according to whether the value to go in the
12360 place is negative. */
12361 if (signed_value < 0)
12362 insn |= 1 << 22;
12363 else
12364 insn |= 1 << 23;
12366 /* Encode the offset. */
12367 insn |= g_n;
12369 bfd_put_32 (input_bfd, insn, hit_data);
12371 return bfd_reloc_ok;
12373 case R_ARM_LDR_PC_G0:
12374 case R_ARM_LDR_PC_G1:
12375 case R_ARM_LDR_PC_G2:
12376 case R_ARM_LDR_SB_G0:
12377 case R_ARM_LDR_SB_G1:
12378 case R_ARM_LDR_SB_G2:
12380 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12381 bfd_vma pc = input_section->output_section->vma
12382 + input_section->output_offset + rel->r_offset;
12383 /* sb is the origin of the *segment* containing the symbol. */
12384 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12385 bfd_vma residual;
12386 bfd_signed_vma signed_value;
12387 int group = 0;
12389 /* Determine which groups of bits to calculate. */
12390 switch (r_type)
12392 case R_ARM_LDR_PC_G0:
12393 case R_ARM_LDR_SB_G0:
12394 group = 0;
12395 break;
12397 case R_ARM_LDR_PC_G1:
12398 case R_ARM_LDR_SB_G1:
12399 group = 1;
12400 break;
12402 case R_ARM_LDR_PC_G2:
12403 case R_ARM_LDR_SB_G2:
12404 group = 2;
12405 break;
12407 default:
12408 abort ();
12411 /* If REL, extract the addend from the insn. If RELA, it will
12412 have already been fetched for us. */
12413 if (globals->use_rel)
12415 int negative = (insn & (1 << 23)) ? 1 : -1;
12416 signed_addend = negative * (insn & 0xfff);
12419 /* Compute the value (X) to go in the place. */
12420 if (r_type == R_ARM_LDR_PC_G0
12421 || r_type == R_ARM_LDR_PC_G1
12422 || r_type == R_ARM_LDR_PC_G2)
12423 /* PC relative. */
12424 signed_value = value - pc + signed_addend;
12425 else
12426 /* Section base relative. */
12427 signed_value = value - sb + signed_addend;
12429 /* Calculate the value of the relevant G_{n-1} to obtain
12430 the residual at that stage. */
12431 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12432 group - 1, &residual);
12434 /* Check for overflow. */
12435 if (residual >= 0x1000)
12437 _bfd_error_handler
12438 /* xgettext:c-format */
12439 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12440 "splitting %#" PRIx64 " for group relocation %s"),
12441 input_bfd, input_section, (uint64_t) rel->r_offset,
12442 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12443 howto->name);
12444 return bfd_reloc_overflow;
12447 /* Mask out the value and U bit. */
12448 insn &= 0xff7ff000;
12450 /* Set the U bit if the value to go in the place is non-negative. */
12451 if (signed_value >= 0)
12452 insn |= 1 << 23;
12454 /* Encode the offset. */
12455 insn |= residual;
12457 bfd_put_32 (input_bfd, insn, hit_data);
12459 return bfd_reloc_ok;
12461 case R_ARM_LDRS_PC_G0:
12462 case R_ARM_LDRS_PC_G1:
12463 case R_ARM_LDRS_PC_G2:
12464 case R_ARM_LDRS_SB_G0:
12465 case R_ARM_LDRS_SB_G1:
12466 case R_ARM_LDRS_SB_G2:
12468 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12469 bfd_vma pc = input_section->output_section->vma
12470 + input_section->output_offset + rel->r_offset;
12471 /* sb is the origin of the *segment* containing the symbol. */
12472 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12473 bfd_vma residual;
12474 bfd_signed_vma signed_value;
12475 int group = 0;
12477 /* Determine which groups of bits to calculate. */
12478 switch (r_type)
12480 case R_ARM_LDRS_PC_G0:
12481 case R_ARM_LDRS_SB_G0:
12482 group = 0;
12483 break;
12485 case R_ARM_LDRS_PC_G1:
12486 case R_ARM_LDRS_SB_G1:
12487 group = 1;
12488 break;
12490 case R_ARM_LDRS_PC_G2:
12491 case R_ARM_LDRS_SB_G2:
12492 group = 2;
12493 break;
12495 default:
12496 abort ();
12499 /* If REL, extract the addend from the insn. If RELA, it will
12500 have already been fetched for us. */
12501 if (globals->use_rel)
12503 int negative = (insn & (1 << 23)) ? 1 : -1;
12504 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12507 /* Compute the value (X) to go in the place. */
12508 if (r_type == R_ARM_LDRS_PC_G0
12509 || r_type == R_ARM_LDRS_PC_G1
12510 || r_type == R_ARM_LDRS_PC_G2)
12511 /* PC relative. */
12512 signed_value = value - pc + signed_addend;
12513 else
12514 /* Section base relative. */
12515 signed_value = value - sb + signed_addend;
12517 /* Calculate the value of the relevant G_{n-1} to obtain
12518 the residual at that stage. */
12519 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12520 group - 1, &residual);
12522 /* Check for overflow. */
12523 if (residual >= 0x100)
12525 _bfd_error_handler
12526 /* xgettext:c-format */
12527 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12528 "splitting %#" PRIx64 " for group relocation %s"),
12529 input_bfd, input_section, (uint64_t) rel->r_offset,
12530 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12531 howto->name);
12532 return bfd_reloc_overflow;
12535 /* Mask out the value and U bit. */
12536 insn &= 0xff7ff0f0;
12538 /* Set the U bit if the value to go in the place is non-negative. */
12539 if (signed_value >= 0)
12540 insn |= 1 << 23;
12542 /* Encode the offset. */
12543 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12545 bfd_put_32 (input_bfd, insn, hit_data);
12547 return bfd_reloc_ok;
12549 case R_ARM_LDC_PC_G0:
12550 case R_ARM_LDC_PC_G1:
12551 case R_ARM_LDC_PC_G2:
12552 case R_ARM_LDC_SB_G0:
12553 case R_ARM_LDC_SB_G1:
12554 case R_ARM_LDC_SB_G2:
12556 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12557 bfd_vma pc = input_section->output_section->vma
12558 + input_section->output_offset + rel->r_offset;
12559 /* sb is the origin of the *segment* containing the symbol. */
12560 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12561 bfd_vma residual;
12562 bfd_signed_vma signed_value;
12563 int group = 0;
12565 /* Determine which groups of bits to calculate. */
12566 switch (r_type)
12568 case R_ARM_LDC_PC_G0:
12569 case R_ARM_LDC_SB_G0:
12570 group = 0;
12571 break;
12573 case R_ARM_LDC_PC_G1:
12574 case R_ARM_LDC_SB_G1:
12575 group = 1;
12576 break;
12578 case R_ARM_LDC_PC_G2:
12579 case R_ARM_LDC_SB_G2:
12580 group = 2;
12581 break;
12583 default:
12584 abort ();
12587 /* If REL, extract the addend from the insn. If RELA, it will
12588 have already been fetched for us. */
12589 if (globals->use_rel)
12591 int negative = (insn & (1 << 23)) ? 1 : -1;
12592 signed_addend = negative * ((insn & 0xff) << 2);
12595 /* Compute the value (X) to go in the place. */
12596 if (r_type == R_ARM_LDC_PC_G0
12597 || r_type == R_ARM_LDC_PC_G1
12598 || r_type == R_ARM_LDC_PC_G2)
12599 /* PC relative. */
12600 signed_value = value - pc + signed_addend;
12601 else
12602 /* Section base relative. */
12603 signed_value = value - sb + signed_addend;
12605 /* Calculate the value of the relevant G_{n-1} to obtain
12606 the residual at that stage. */
12607 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12608 group - 1, &residual);
12610 /* Check for overflow. (The absolute value to go in the place must be
12611 divisible by four and, after having been divided by four, must
12612 fit in eight bits.) */
12613 if ((residual & 0x3) != 0 || residual >= 0x400)
12615 _bfd_error_handler
12616 /* xgettext:c-format */
12617 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12618 "splitting %#" PRIx64 " for group relocation %s"),
12619 input_bfd, input_section, (uint64_t) rel->r_offset,
12620 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12621 howto->name);
12622 return bfd_reloc_overflow;
12625 /* Mask out the value and U bit. */
12626 insn &= 0xff7fff00;
12628 /* Set the U bit if the value to go in the place is non-negative. */
12629 if (signed_value >= 0)
12630 insn |= 1 << 23;
12632 /* Encode the offset. */
12633 insn |= residual >> 2;
12635 bfd_put_32 (input_bfd, insn, hit_data);
12637 return bfd_reloc_ok;
12639 case R_ARM_THM_ALU_ABS_G0_NC:
12640 case R_ARM_THM_ALU_ABS_G1_NC:
12641 case R_ARM_THM_ALU_ABS_G2_NC:
12642 case R_ARM_THM_ALU_ABS_G3_NC:
12644 const int shift_array[4] = {0, 8, 16, 24};
12645 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12646 bfd_vma addr = value;
12647 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12649 /* Compute address. */
12650 if (globals->use_rel)
12651 signed_addend = insn & 0xff;
12652 addr += signed_addend;
12653 if (branch_type == ST_BRANCH_TO_THUMB)
12654 addr |= 1;
12655 /* Clean imm8 insn. */
12656 insn &= 0xff00;
12657 /* And update with correct part of address. */
12658 insn |= (addr >> shift) & 0xff;
12659 /* Update insn. */
12660 bfd_put_16 (input_bfd, insn, hit_data);
12663 *unresolved_reloc_p = FALSE;
12664 return bfd_reloc_ok;
12666 case R_ARM_GOTOFFFUNCDESC:
12668 if (h == NULL)
12670 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12671 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12672 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12673 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12674 bfd_vma seg = -1;
12676 if (bfd_link_pic(info) && dynindx == 0)
12677 abort();
12679 /* Resolve relocation. */
12680 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12681 , contents + rel->r_offset);
12682 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12683 not done yet. */
12684 arm_elf_fill_funcdesc(output_bfd, info,
12685 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12686 dynindx, offset, addr, dynreloc_value, seg);
12688 else
12690 int dynindx;
12691 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12692 bfd_vma addr;
12693 bfd_vma seg = -1;
12695 /* For static binaries, sym_sec can be null. */
12696 if (sym_sec)
12698 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12699 addr = dynreloc_value - sym_sec->output_section->vma;
12701 else
12703 dynindx = 0;
12704 addr = 0;
12707 if (bfd_link_pic(info) && dynindx == 0)
12708 abort();
12710 /* This case cannot occur since funcdesc is allocated by
12711 the dynamic loader so we cannot resolve the relocation. */
12712 if (h->dynindx != -1)
12713 abort();
12715 /* Resolve relocation. */
12716 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12717 contents + rel->r_offset);
12718 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12719 arm_elf_fill_funcdesc(output_bfd, info,
12720 &eh->fdpic_cnts.funcdesc_offset,
12721 dynindx, offset, addr, dynreloc_value, seg);
12724 *unresolved_reloc_p = FALSE;
12725 return bfd_reloc_ok;
12727 case R_ARM_GOTFUNCDESC:
12729 if (h != NULL)
12731 Elf_Internal_Rela outrel;
12733 /* Resolve relocation. */
12734 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12735 + sgot->output_offset),
12736 contents + rel->r_offset);
12737 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12738 if(h->dynindx == -1)
12740 int dynindx;
12741 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12742 bfd_vma addr;
12743 bfd_vma seg = -1;
12745 /* For static binaries sym_sec can be null. */
12746 if (sym_sec)
12748 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12749 addr = dynreloc_value - sym_sec->output_section->vma;
12751 else
12753 dynindx = 0;
12754 addr = 0;
12757 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12758 arm_elf_fill_funcdesc(output_bfd, info,
12759 &eh->fdpic_cnts.funcdesc_offset,
12760 dynindx, offset, addr, dynreloc_value, seg);
12763 /* Add a dynamic relocation on GOT entry if not already done. */
12764 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12766 if (h->dynindx == -1)
12768 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12769 if (h->root.type == bfd_link_hash_undefweak)
12770 bfd_put_32(output_bfd, 0, sgot->contents
12771 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12772 else
12773 bfd_put_32(output_bfd, sgot->output_section->vma
12774 + sgot->output_offset
12775 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12776 sgot->contents
12777 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12779 else
12781 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12783 outrel.r_offset = sgot->output_section->vma
12784 + sgot->output_offset
12785 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12786 outrel.r_addend = 0;
12787 if (h->dynindx == -1 && !bfd_link_pic(info))
12788 if (h->root.type == bfd_link_hash_undefweak)
12789 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12790 else
12791 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12792 outrel.r_offset);
12793 else
12794 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12795 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12798 else
12800 /* Such relocation on static function should not have been
12801 emitted by the compiler. */
12802 abort();
12805 *unresolved_reloc_p = FALSE;
12806 return bfd_reloc_ok;
12808 case R_ARM_FUNCDESC:
12810 if (h == NULL)
12812 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12813 Elf_Internal_Rela outrel;
12814 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12815 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12816 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12817 bfd_vma seg = -1;
12819 if (bfd_link_pic(info) && dynindx == 0)
12820 abort();
12822 /* Replace static FUNCDESC relocation with a
12823 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12824 executable. */
12825 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12826 outrel.r_offset = input_section->output_section->vma
12827 + input_section->output_offset + rel->r_offset;
12828 outrel.r_addend = 0;
12829 if (bfd_link_pic(info))
12830 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12831 else
12832 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12834 bfd_put_32 (input_bfd, sgot->output_section->vma
12835 + sgot->output_offset + offset, hit_data);
12837 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12838 arm_elf_fill_funcdesc(output_bfd, info,
12839 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12840 dynindx, offset, addr, dynreloc_value, seg);
12842 else
12844 if (h->dynindx == -1)
12846 int dynindx;
12847 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12848 bfd_vma addr;
12849 bfd_vma seg = -1;
12850 Elf_Internal_Rela outrel;
12852 /* For static binaries sym_sec can be null. */
12853 if (sym_sec)
12855 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12856 addr = dynreloc_value - sym_sec->output_section->vma;
12858 else
12860 dynindx = 0;
12861 addr = 0;
12864 if (bfd_link_pic(info) && dynindx == 0)
12865 abort();
12867 /* Replace static FUNCDESC relocation with a
12868 R_ARM_RELATIVE dynamic relocation. */
12869 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12870 outrel.r_offset = input_section->output_section->vma
12871 + input_section->output_offset + rel->r_offset;
12872 outrel.r_addend = 0;
12873 if (bfd_link_pic(info))
12874 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12875 else
12876 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12878 bfd_put_32 (input_bfd, sgot->output_section->vma
12879 + sgot->output_offset + offset, hit_data);
12881 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12882 arm_elf_fill_funcdesc(output_bfd, info,
12883 &eh->fdpic_cnts.funcdesc_offset,
12884 dynindx, offset, addr, dynreloc_value, seg);
12886 else
12888 Elf_Internal_Rela outrel;
12890 /* Add a dynamic relocation. */
12891 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12892 outrel.r_offset = input_section->output_section->vma
12893 + input_section->output_offset + rel->r_offset;
12894 outrel.r_addend = 0;
12895 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12899 *unresolved_reloc_p = FALSE;
12900 return bfd_reloc_ok;
12902 case R_ARM_THM_BF16:
12904 bfd_vma relocation;
12905 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12906 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12908 if (globals->use_rel)
12910 bfd_vma immA = (upper_insn & 0x001f);
12911 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12912 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12913 addend = (immA << 12);
12914 addend |= (immB << 2);
12915 addend |= (immC << 1);
12916 addend |= 1;
12917 /* Sign extend. */
12918 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12921 relocation = value + signed_addend;
12922 relocation -= (input_section->output_section->vma
12923 + input_section->output_offset
12924 + rel->r_offset);
12926 /* Put RELOCATION back into the insn. */
12928 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12929 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12930 bfd_vma immC = (relocation & 0x00000002) >> 1;
12932 upper_insn = (upper_insn & 0xffe0) | immA;
12933 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12936 /* Put the relocated value back in the object file: */
12937 bfd_put_16 (input_bfd, upper_insn, hit_data);
12938 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12940 return bfd_reloc_ok;
12943 case R_ARM_THM_BF12:
12945 bfd_vma relocation;
12946 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12947 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12949 if (globals->use_rel)
12951 bfd_vma immA = (upper_insn & 0x0001);
12952 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12953 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12954 addend = (immA << 12);
12955 addend |= (immB << 2);
12956 addend |= (immC << 1);
12957 addend |= 1;
12958 /* Sign extend. */
12959 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12960 signed_addend = addend;
12963 relocation = value + signed_addend;
12964 relocation -= (input_section->output_section->vma
12965 + input_section->output_offset
12966 + rel->r_offset);
12968 /* Put RELOCATION back into the insn. */
12970 bfd_vma immA = (relocation & 0x00001000) >> 12;
12971 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12972 bfd_vma immC = (relocation & 0x00000002) >> 1;
12974 upper_insn = (upper_insn & 0xfffe) | immA;
12975 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12978 /* Put the relocated value back in the object file: */
12979 bfd_put_16 (input_bfd, upper_insn, hit_data);
12980 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12982 return bfd_reloc_ok;
12985 case R_ARM_THM_BF18:
12987 bfd_vma relocation;
12988 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12989 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12991 if (globals->use_rel)
12993 bfd_vma immA = (upper_insn & 0x007f);
12994 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12995 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12996 addend = (immA << 12);
12997 addend |= (immB << 2);
12998 addend |= (immC << 1);
12999 addend |= 1;
13000 /* Sign extend. */
13001 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
13002 signed_addend = addend;
13005 relocation = value + signed_addend;
13006 relocation -= (input_section->output_section->vma
13007 + input_section->output_offset
13008 + rel->r_offset);
13010 /* Put RELOCATION back into the insn. */
13012 bfd_vma immA = (relocation & 0x0007f000) >> 12;
13013 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13014 bfd_vma immC = (relocation & 0x00000002) >> 1;
13016 upper_insn = (upper_insn & 0xff80) | immA;
13017 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13020 /* Put the relocated value back in the object file: */
13021 bfd_put_16 (input_bfd, upper_insn, hit_data);
13022 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13024 return bfd_reloc_ok;
13027 default:
13028 return bfd_reloc_notsupported;
13032 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13033 static void
13034 arm_add_to_rel (bfd * abfd,
13035 bfd_byte * address,
13036 reloc_howto_type * howto,
13037 bfd_signed_vma increment)
13039 bfd_signed_vma addend;
13041 if (howto->type == R_ARM_THM_CALL
13042 || howto->type == R_ARM_THM_JUMP24)
13044 int upper_insn, lower_insn;
13045 int upper, lower;
13047 upper_insn = bfd_get_16 (abfd, address);
13048 lower_insn = bfd_get_16 (abfd, address + 2);
13049 upper = upper_insn & 0x7ff;
13050 lower = lower_insn & 0x7ff;
13052 addend = (upper << 12) | (lower << 1);
13053 addend += increment;
13054 addend >>= 1;
13056 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13057 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13059 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13060 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13062 else
13064 bfd_vma contents;
13066 contents = bfd_get_32 (abfd, address);
13068 /* Get the (signed) value from the instruction. */
13069 addend = contents & howto->src_mask;
13070 if (addend & ((howto->src_mask + 1) >> 1))
13072 bfd_signed_vma mask;
13074 mask = -1;
13075 mask &= ~ howto->src_mask;
13076 addend |= mask;
13079 /* Add in the increment, (which is a byte value). */
13080 switch (howto->type)
13082 default:
13083 addend += increment;
13084 break;
13086 case R_ARM_PC24:
13087 case R_ARM_PLT32:
13088 case R_ARM_CALL:
13089 case R_ARM_JUMP24:
13090 addend <<= howto->size;
13091 addend += increment;
13093 /* Should we check for overflow here ? */
13095 /* Drop any undesired bits. */
13096 addend >>= howto->rightshift;
13097 break;
13100 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13102 bfd_put_32 (abfd, contents, address);
13106 #define IS_ARM_TLS_RELOC(R_TYPE) \
13107 ((R_TYPE) == R_ARM_TLS_GD32 \
13108 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13109 || (R_TYPE) == R_ARM_TLS_LDO32 \
13110 || (R_TYPE) == R_ARM_TLS_LDM32 \
13111 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13112 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13113 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13114 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13115 || (R_TYPE) == R_ARM_TLS_LE32 \
13116 || (R_TYPE) == R_ARM_TLS_IE32 \
13117 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13118 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13120 /* Specific set of relocations for the gnu tls dialect. */
13121 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13122 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13123 || (R_TYPE) == R_ARM_TLS_CALL \
13124 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13125 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13126 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13128 /* Relocate an ARM ELF section. */
13130 static bfd_boolean
13131 elf32_arm_relocate_section (bfd * output_bfd,
13132 struct bfd_link_info * info,
13133 bfd * input_bfd,
13134 asection * input_section,
13135 bfd_byte * contents,
13136 Elf_Internal_Rela * relocs,
13137 Elf_Internal_Sym * local_syms,
13138 asection ** local_sections)
13140 Elf_Internal_Shdr *symtab_hdr;
13141 struct elf_link_hash_entry **sym_hashes;
13142 Elf_Internal_Rela *rel;
13143 Elf_Internal_Rela *relend;
13144 const char *name;
13145 struct elf32_arm_link_hash_table * globals;
13147 globals = elf32_arm_hash_table (info);
13148 if (globals == NULL)
13149 return FALSE;
13151 symtab_hdr = & elf_symtab_hdr (input_bfd);
13152 sym_hashes = elf_sym_hashes (input_bfd);
13154 rel = relocs;
13155 relend = relocs + input_section->reloc_count;
13156 for (; rel < relend; rel++)
13158 int r_type;
13159 reloc_howto_type * howto;
13160 unsigned long r_symndx;
13161 Elf_Internal_Sym * sym;
13162 asection * sec;
13163 struct elf_link_hash_entry * h;
13164 bfd_vma relocation;
13165 bfd_reloc_status_type r;
13166 arelent bfd_reloc;
13167 char sym_type;
13168 bfd_boolean unresolved_reloc = FALSE;
13169 char *error_message = NULL;
13171 r_symndx = ELF32_R_SYM (rel->r_info);
13172 r_type = ELF32_R_TYPE (rel->r_info);
13173 r_type = arm_real_reloc_type (globals, r_type);
13175 if ( r_type == R_ARM_GNU_VTENTRY
13176 || r_type == R_ARM_GNU_VTINHERIT)
13177 continue;
13179 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13181 if (howto == NULL)
13182 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13184 h = NULL;
13185 sym = NULL;
13186 sec = NULL;
13188 if (r_symndx < symtab_hdr->sh_info)
13190 sym = local_syms + r_symndx;
13191 sym_type = ELF32_ST_TYPE (sym->st_info);
13192 sec = local_sections[r_symndx];
13194 /* An object file might have a reference to a local
13195 undefined symbol. This is a daft object file, but we
13196 should at least do something about it. V4BX & NONE
13197 relocations do not use the symbol and are explicitly
13198 allowed to use the undefined symbol, so allow those.
13199 Likewise for relocations against STN_UNDEF. */
13200 if (r_type != R_ARM_V4BX
13201 && r_type != R_ARM_NONE
13202 && r_symndx != STN_UNDEF
13203 && bfd_is_und_section (sec)
13204 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13205 (*info->callbacks->undefined_symbol)
13206 (info, bfd_elf_string_from_elf_section
13207 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13208 input_bfd, input_section,
13209 rel->r_offset, TRUE);
13211 if (globals->use_rel)
13213 relocation = (sec->output_section->vma
13214 + sec->output_offset
13215 + sym->st_value);
13216 if (!bfd_link_relocatable (info)
13217 && (sec->flags & SEC_MERGE)
13218 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13220 asection *msec;
13221 bfd_vma addend, value;
13223 switch (r_type)
13225 case R_ARM_MOVW_ABS_NC:
13226 case R_ARM_MOVT_ABS:
13227 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13228 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13229 addend = (addend ^ 0x8000) - 0x8000;
13230 break;
13232 case R_ARM_THM_MOVW_ABS_NC:
13233 case R_ARM_THM_MOVT_ABS:
13234 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13235 << 16;
13236 value |= bfd_get_16 (input_bfd,
13237 contents + rel->r_offset + 2);
13238 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13239 | ((value & 0x04000000) >> 15);
13240 addend = (addend ^ 0x8000) - 0x8000;
13241 break;
13243 default:
13244 if (howto->rightshift
13245 || (howto->src_mask & (howto->src_mask + 1)))
13247 _bfd_error_handler
13248 /* xgettext:c-format */
13249 (_("%pB(%pA+%#" PRIx64 "): "
13250 "%s relocation against SEC_MERGE section"),
13251 input_bfd, input_section,
13252 (uint64_t) rel->r_offset, howto->name);
13253 return FALSE;
13256 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13258 /* Get the (signed) value from the instruction. */
13259 addend = value & howto->src_mask;
13260 if (addend & ((howto->src_mask + 1) >> 1))
13262 bfd_signed_vma mask;
13264 mask = -1;
13265 mask &= ~ howto->src_mask;
13266 addend |= mask;
13268 break;
13271 msec = sec;
13272 addend =
13273 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13274 - relocation;
13275 addend += msec->output_section->vma + msec->output_offset;
13277 /* Cases here must match those in the preceding
13278 switch statement. */
13279 switch (r_type)
13281 case R_ARM_MOVW_ABS_NC:
13282 case R_ARM_MOVT_ABS:
13283 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13284 | (addend & 0xfff);
13285 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13286 break;
13288 case R_ARM_THM_MOVW_ABS_NC:
13289 case R_ARM_THM_MOVT_ABS:
13290 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13291 | (addend & 0xff) | ((addend & 0x0800) << 15);
13292 bfd_put_16 (input_bfd, value >> 16,
13293 contents + rel->r_offset);
13294 bfd_put_16 (input_bfd, value,
13295 contents + rel->r_offset + 2);
13296 break;
13298 default:
13299 value = (value & ~ howto->dst_mask)
13300 | (addend & howto->dst_mask);
13301 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13302 break;
13306 else
13307 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13309 else
13311 bfd_boolean warned, ignored;
13313 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13314 r_symndx, symtab_hdr, sym_hashes,
13315 h, sec, relocation,
13316 unresolved_reloc, warned, ignored);
13318 sym_type = h->type;
13321 if (sec != NULL && discarded_section (sec))
13322 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13323 rel, 1, relend, howto, 0, contents);
13325 if (bfd_link_relocatable (info))
13327 /* This is a relocatable link. We don't have to change
13328 anything, unless the reloc is against a section symbol,
13329 in which case we have to adjust according to where the
13330 section symbol winds up in the output section. */
13331 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13333 if (globals->use_rel)
13334 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13335 howto, (bfd_signed_vma) sec->output_offset);
13336 else
13337 rel->r_addend += sec->output_offset;
13339 continue;
13342 if (h != NULL)
13343 name = h->root.root.string;
13344 else
13346 name = (bfd_elf_string_from_elf_section
13347 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13348 if (name == NULL || *name == '\0')
13349 name = bfd_section_name (input_bfd, sec);
13352 if (r_symndx != STN_UNDEF
13353 && r_type != R_ARM_NONE
13354 && (h == NULL
13355 || h->root.type == bfd_link_hash_defined
13356 || h->root.type == bfd_link_hash_defweak)
13357 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13359 _bfd_error_handler
13360 ((sym_type == STT_TLS
13361 /* xgettext:c-format */
13362 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13363 /* xgettext:c-format */
13364 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13365 input_bfd,
13366 input_section,
13367 (uint64_t) rel->r_offset,
13368 howto->name,
13369 name);
13372 /* We call elf32_arm_final_link_relocate unless we're completely
13373 done, i.e., the relaxation produced the final output we want,
13374 and we won't let anybody mess with it. Also, we have to do
13375 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13376 both in relaxed and non-relaxed cases. */
13377 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13378 || (IS_ARM_TLS_GNU_RELOC (r_type)
13379 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13380 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13381 & GOT_TLS_GDESC)))
13383 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13384 contents, rel, h == NULL);
13385 /* This may have been marked unresolved because it came from
13386 a shared library. But we've just dealt with that. */
13387 unresolved_reloc = 0;
13389 else
13390 r = bfd_reloc_continue;
13392 if (r == bfd_reloc_continue)
13394 unsigned char branch_type =
13395 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13396 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13398 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13399 input_section, contents, rel,
13400 relocation, info, sec, name,
13401 sym_type, branch_type, h,
13402 &unresolved_reloc,
13403 &error_message);
13406 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13407 because such sections are not SEC_ALLOC and thus ld.so will
13408 not process them. */
13409 if (unresolved_reloc
13410 && !((input_section->flags & SEC_DEBUGGING) != 0
13411 && h->def_dynamic)
13412 && _bfd_elf_section_offset (output_bfd, info, input_section,
13413 rel->r_offset) != (bfd_vma) -1)
13415 _bfd_error_handler
13416 /* xgettext:c-format */
13417 (_("%pB(%pA+%#" PRIx64 "): "
13418 "unresolvable %s relocation against symbol `%s'"),
13419 input_bfd,
13420 input_section,
13421 (uint64_t) rel->r_offset,
13422 howto->name,
13423 h->root.root.string);
13424 return FALSE;
13427 if (r != bfd_reloc_ok)
13429 switch (r)
13431 case bfd_reloc_overflow:
13432 /* If the overflowing reloc was to an undefined symbol,
13433 we have already printed one error message and there
13434 is no point complaining again. */
13435 if (!h || h->root.type != bfd_link_hash_undefined)
13436 (*info->callbacks->reloc_overflow)
13437 (info, (h ? &h->root : NULL), name, howto->name,
13438 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13439 break;
13441 case bfd_reloc_undefined:
13442 (*info->callbacks->undefined_symbol)
13443 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13444 break;
13446 case bfd_reloc_outofrange:
13447 error_message = _("out of range");
13448 goto common_error;
13450 case bfd_reloc_notsupported:
13451 error_message = _("unsupported relocation");
13452 goto common_error;
13454 case bfd_reloc_dangerous:
13455 /* error_message should already be set. */
13456 goto common_error;
13458 default:
13459 error_message = _("unknown error");
13460 /* Fall through. */
13462 common_error:
13463 BFD_ASSERT (error_message != NULL);
13464 (*info->callbacks->reloc_dangerous)
13465 (info, error_message, input_bfd, input_section, rel->r_offset);
13466 break;
13471 return TRUE;
13474 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13475 adds the edit to the start of the list. (The list must be built in order of
13476 ascending TINDEX: the function's callers are primarily responsible for
13477 maintaining that condition). */
13479 static void
13480 add_unwind_table_edit (arm_unwind_table_edit **head,
13481 arm_unwind_table_edit **tail,
13482 arm_unwind_edit_type type,
13483 asection *linked_section,
13484 unsigned int tindex)
13486 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13487 xmalloc (sizeof (arm_unwind_table_edit));
13489 new_edit->type = type;
13490 new_edit->linked_section = linked_section;
13491 new_edit->index = tindex;
13493 if (tindex > 0)
13495 new_edit->next = NULL;
13497 if (*tail)
13498 (*tail)->next = new_edit;
13500 (*tail) = new_edit;
13502 if (!*head)
13503 (*head) = new_edit;
13505 else
13507 new_edit->next = *head;
13509 if (!*tail)
13510 *tail = new_edit;
13512 *head = new_edit;
13516 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13518 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13519 static void
13520 adjust_exidx_size(asection *exidx_sec, int adjust)
13522 asection *out_sec;
13524 if (!exidx_sec->rawsize)
13525 exidx_sec->rawsize = exidx_sec->size;
13527 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13528 out_sec = exidx_sec->output_section;
13529 /* Adjust size of output section. */
13530 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13533 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13534 static void
13535 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13537 struct _arm_elf_section_data *exidx_arm_data;
13539 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13540 add_unwind_table_edit (
13541 &exidx_arm_data->u.exidx.unwind_edit_list,
13542 &exidx_arm_data->u.exidx.unwind_edit_tail,
13543 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13545 exidx_arm_data->additional_reloc_count++;
13547 adjust_exidx_size(exidx_sec, 8);
13550 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13551 made to those tables, such that:
13553 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13554 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13555 codes which have been inlined into the index).
13557 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13559 The edits are applied when the tables are written
13560 (in elf32_arm_write_section). */
13562 bfd_boolean
13563 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13564 unsigned int num_text_sections,
13565 struct bfd_link_info *info,
13566 bfd_boolean merge_exidx_entries)
13568 bfd *inp;
13569 unsigned int last_second_word = 0, i;
13570 asection *last_exidx_sec = NULL;
13571 asection *last_text_sec = NULL;
13572 int last_unwind_type = -1;
13574 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13575 text sections. */
13576 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13578 asection *sec;
13580 for (sec = inp->sections; sec != NULL; sec = sec->next)
13582 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13583 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13585 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13586 continue;
13588 if (elf_sec->linked_to)
13590 Elf_Internal_Shdr *linked_hdr
13591 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13592 struct _arm_elf_section_data *linked_sec_arm_data
13593 = get_arm_elf_section_data (linked_hdr->bfd_section);
13595 if (linked_sec_arm_data == NULL)
13596 continue;
13598 /* Link this .ARM.exidx section back from the text section it
13599 describes. */
13600 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13605 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13606 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13607 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13609 for (i = 0; i < num_text_sections; i++)
13611 asection *sec = text_section_order[i];
13612 asection *exidx_sec;
13613 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13614 struct _arm_elf_section_data *exidx_arm_data;
13615 bfd_byte *contents = NULL;
13616 int deleted_exidx_bytes = 0;
13617 bfd_vma j;
13618 arm_unwind_table_edit *unwind_edit_head = NULL;
13619 arm_unwind_table_edit *unwind_edit_tail = NULL;
13620 Elf_Internal_Shdr *hdr;
13621 bfd *ibfd;
13623 if (arm_data == NULL)
13624 continue;
13626 exidx_sec = arm_data->u.text.arm_exidx_sec;
13627 if (exidx_sec == NULL)
13629 /* Section has no unwind data. */
13630 if (last_unwind_type == 0 || !last_exidx_sec)
13631 continue;
13633 /* Ignore zero sized sections. */
13634 if (sec->size == 0)
13635 continue;
13637 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13638 last_unwind_type = 0;
13639 continue;
13642 /* Skip /DISCARD/ sections. */
13643 if (bfd_is_abs_section (exidx_sec->output_section))
13644 continue;
13646 hdr = &elf_section_data (exidx_sec)->this_hdr;
13647 if (hdr->sh_type != SHT_ARM_EXIDX)
13648 continue;
13650 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13651 if (exidx_arm_data == NULL)
13652 continue;
13654 ibfd = exidx_sec->owner;
13656 if (hdr->contents != NULL)
13657 contents = hdr->contents;
13658 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13659 /* An error? */
13660 continue;
13662 if (last_unwind_type > 0)
13664 unsigned int first_word = bfd_get_32 (ibfd, contents);
13665 /* Add cantunwind if first unwind item does not match section
13666 start. */
13667 if (first_word != sec->vma)
13669 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13670 last_unwind_type = 0;
13674 for (j = 0; j < hdr->sh_size; j += 8)
13676 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13677 int unwind_type;
13678 int elide = 0;
13680 /* An EXIDX_CANTUNWIND entry. */
13681 if (second_word == 1)
13683 if (last_unwind_type == 0)
13684 elide = 1;
13685 unwind_type = 0;
13687 /* Inlined unwinding data. Merge if equal to previous. */
13688 else if ((second_word & 0x80000000) != 0)
13690 if (merge_exidx_entries
13691 && last_second_word == second_word && last_unwind_type == 1)
13692 elide = 1;
13693 unwind_type = 1;
13694 last_second_word = second_word;
13696 /* Normal table entry. In theory we could merge these too,
13697 but duplicate entries are likely to be much less common. */
13698 else
13699 unwind_type = 2;
13701 if (elide && !bfd_link_relocatable (info))
13703 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13704 DELETE_EXIDX_ENTRY, NULL, j / 8);
13706 deleted_exidx_bytes += 8;
13709 last_unwind_type = unwind_type;
13712 /* Free contents if we allocated it ourselves. */
13713 if (contents != hdr->contents)
13714 free (contents);
13716 /* Record edits to be applied later (in elf32_arm_write_section). */
13717 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13718 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13720 if (deleted_exidx_bytes > 0)
13721 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13723 last_exidx_sec = exidx_sec;
13724 last_text_sec = sec;
13727 /* Add terminating CANTUNWIND entry. */
13728 if (!bfd_link_relocatable (info) && last_exidx_sec
13729 && last_unwind_type != 0)
13730 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13732 return TRUE;
13735 static bfd_boolean
13736 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13737 bfd *ibfd, const char *name)
13739 asection *sec, *osec;
13741 sec = bfd_get_linker_section (ibfd, name);
13742 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13743 return TRUE;
13745 osec = sec->output_section;
13746 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13747 return TRUE;
13749 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13750 sec->output_offset, sec->size))
13751 return FALSE;
13753 return TRUE;
13756 static bfd_boolean
13757 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13759 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13760 asection *sec, *osec;
13762 if (globals == NULL)
13763 return FALSE;
13765 /* Invoke the regular ELF backend linker to do all the work. */
13766 if (!bfd_elf_final_link (abfd, info))
13767 return FALSE;
13769 /* Process stub sections (eg BE8 encoding, ...). */
13770 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13771 unsigned int i;
13772 for (i=0; i<htab->top_id; i++)
13774 sec = htab->stub_group[i].stub_sec;
13775 /* Only process it once, in its link_sec slot. */
13776 if (sec && i == htab->stub_group[i].link_sec->id)
13778 osec = sec->output_section;
13779 elf32_arm_write_section (abfd, info, sec, sec->contents);
13780 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13781 sec->output_offset, sec->size))
13782 return FALSE;
13786 /* Write out any glue sections now that we have created all the
13787 stubs. */
13788 if (globals->bfd_of_glue_owner != NULL)
13790 if (! elf32_arm_output_glue_section (info, abfd,
13791 globals->bfd_of_glue_owner,
13792 ARM2THUMB_GLUE_SECTION_NAME))
13793 return FALSE;
13795 if (! elf32_arm_output_glue_section (info, abfd,
13796 globals->bfd_of_glue_owner,
13797 THUMB2ARM_GLUE_SECTION_NAME))
13798 return FALSE;
13800 if (! elf32_arm_output_glue_section (info, abfd,
13801 globals->bfd_of_glue_owner,
13802 VFP11_ERRATUM_VENEER_SECTION_NAME))
13803 return FALSE;
13805 if (! elf32_arm_output_glue_section (info, abfd,
13806 globals->bfd_of_glue_owner,
13807 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13808 return FALSE;
13810 if (! elf32_arm_output_glue_section (info, abfd,
13811 globals->bfd_of_glue_owner,
13812 ARM_BX_GLUE_SECTION_NAME))
13813 return FALSE;
13816 return TRUE;
13819 /* Return a best guess for the machine number based on the attributes. */
13821 static unsigned int
13822 bfd_arm_get_mach_from_attributes (bfd * abfd)
13824 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13826 switch (arch)
13828 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13829 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13830 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13831 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13833 case TAG_CPU_ARCH_V5TE:
13835 char * name;
13837 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13838 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13840 if (name)
13842 if (strcmp (name, "IWMMXT2") == 0)
13843 return bfd_mach_arm_iWMMXt2;
13845 if (strcmp (name, "IWMMXT") == 0)
13846 return bfd_mach_arm_iWMMXt;
13848 if (strcmp (name, "XSCALE") == 0)
13850 int wmmx;
13852 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13853 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13854 switch (wmmx)
13856 case 1: return bfd_mach_arm_iWMMXt;
13857 case 2: return bfd_mach_arm_iWMMXt2;
13858 default: return bfd_mach_arm_XScale;
13863 return bfd_mach_arm_5TE;
13866 case TAG_CPU_ARCH_V5TEJ:
13867 return bfd_mach_arm_5TEJ;
13868 case TAG_CPU_ARCH_V6:
13869 return bfd_mach_arm_6;
13870 case TAG_CPU_ARCH_V6KZ:
13871 return bfd_mach_arm_6KZ;
13872 case TAG_CPU_ARCH_V6T2:
13873 return bfd_mach_arm_6T2;
13874 case TAG_CPU_ARCH_V6K:
13875 return bfd_mach_arm_6K;
13876 case TAG_CPU_ARCH_V7:
13877 return bfd_mach_arm_7;
13878 case TAG_CPU_ARCH_V6_M:
13879 return bfd_mach_arm_6M;
13880 case TAG_CPU_ARCH_V6S_M:
13881 return bfd_mach_arm_6SM;
13882 case TAG_CPU_ARCH_V7E_M:
13883 return bfd_mach_arm_7EM;
13884 case TAG_CPU_ARCH_V8:
13885 return bfd_mach_arm_8;
13886 case TAG_CPU_ARCH_V8R:
13887 return bfd_mach_arm_8R;
13888 case TAG_CPU_ARCH_V8M_BASE:
13889 return bfd_mach_arm_8M_BASE;
13890 case TAG_CPU_ARCH_V8M_MAIN:
13891 return bfd_mach_arm_8M_MAIN;
13892 case TAG_CPU_ARCH_V8_1M_MAIN:
13893 return bfd_mach_arm_8_1M_MAIN;
13895 default:
13896 /* Force entry to be added for any new known Tag_CPU_arch value. */
13897 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13899 /* Unknown Tag_CPU_arch value. */
13900 return bfd_mach_arm_unknown;
13904 /* Set the right machine number. */
13906 static bfd_boolean
13907 elf32_arm_object_p (bfd *abfd)
13909 unsigned int mach;
13911 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13913 if (mach == bfd_mach_arm_unknown)
13915 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13916 mach = bfd_mach_arm_ep9312;
13917 else
13918 mach = bfd_arm_get_mach_from_attributes (abfd);
13921 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13922 return TRUE;
13925 /* Function to keep ARM specific flags in the ELF header. */
13927 static bfd_boolean
13928 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13930 if (elf_flags_init (abfd)
13931 && elf_elfheader (abfd)->e_flags != flags)
13933 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13935 if (flags & EF_ARM_INTERWORK)
13936 _bfd_error_handler
13937 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13938 abfd);
13939 else
13940 _bfd_error_handler
13941 (_("warning: clearing the interworking flag of %pB due to outside request"),
13942 abfd);
13945 else
13947 elf_elfheader (abfd)->e_flags = flags;
13948 elf_flags_init (abfd) = TRUE;
13951 return TRUE;
13954 /* Copy backend specific data from one object module to another. */
13956 static bfd_boolean
13957 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13959 flagword in_flags;
13960 flagword out_flags;
13962 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13963 return TRUE;
13965 in_flags = elf_elfheader (ibfd)->e_flags;
13966 out_flags = elf_elfheader (obfd)->e_flags;
13968 if (elf_flags_init (obfd)
13969 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13970 && in_flags != out_flags)
13972 /* Cannot mix APCS26 and APCS32 code. */
13973 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13974 return FALSE;
13976 /* Cannot mix float APCS and non-float APCS code. */
13977 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13978 return FALSE;
13980 /* If the src and dest have different interworking flags
13981 then turn off the interworking bit. */
13982 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13984 if (out_flags & EF_ARM_INTERWORK)
13985 _bfd_error_handler
13986 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13987 obfd, ibfd);
13989 in_flags &= ~EF_ARM_INTERWORK;
13992 /* Likewise for PIC, though don't warn for this case. */
13993 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13994 in_flags &= ~EF_ARM_PIC;
13997 elf_elfheader (obfd)->e_flags = in_flags;
13998 elf_flags_init (obfd) = TRUE;
14000 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
14003 /* Values for Tag_ABI_PCS_R9_use. */
14004 enum
14006 AEABI_R9_V6,
14007 AEABI_R9_SB,
14008 AEABI_R9_TLS,
14009 AEABI_R9_unused
14012 /* Values for Tag_ABI_PCS_RW_data. */
14013 enum
14015 AEABI_PCS_RW_data_absolute,
14016 AEABI_PCS_RW_data_PCrel,
14017 AEABI_PCS_RW_data_SBrel,
14018 AEABI_PCS_RW_data_unused
14021 /* Values for Tag_ABI_enum_size. */
14022 enum
14024 AEABI_enum_unused,
14025 AEABI_enum_short,
14026 AEABI_enum_wide,
14027 AEABI_enum_forced_wide
14030 /* Determine whether an object attribute tag takes an integer, a
14031 string or both. */
14033 static int
14034 elf32_arm_obj_attrs_arg_type (int tag)
14036 if (tag == Tag_compatibility)
14037 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14038 else if (tag == Tag_nodefaults)
14039 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14040 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14041 return ATTR_TYPE_FLAG_STR_VAL;
14042 else if (tag < 32)
14043 return ATTR_TYPE_FLAG_INT_VAL;
14044 else
14045 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14048 /* The ABI defines that Tag_conformance should be emitted first, and that
14049 Tag_nodefaults should be second (if either is defined). This sets those
14050 two positions, and bumps up the position of all the remaining tags to
14051 compensate. */
14052 static int
14053 elf32_arm_obj_attrs_order (int num)
14055 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14056 return Tag_conformance;
14057 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14058 return Tag_nodefaults;
14059 if ((num - 2) < Tag_nodefaults)
14060 return num - 2;
14061 if ((num - 1) < Tag_conformance)
14062 return num - 1;
14063 return num;
14066 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14067 static bfd_boolean
14068 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14070 if ((tag & 127) < 64)
14072 _bfd_error_handler
14073 (_("%pB: unknown mandatory EABI object attribute %d"),
14074 abfd, tag);
14075 bfd_set_error (bfd_error_bad_value);
14076 return FALSE;
14078 else
14080 _bfd_error_handler
14081 (_("warning: %pB: unknown EABI object attribute %d"),
14082 abfd, tag);
14083 return TRUE;
14087 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14088 Returns -1 if no architecture could be read. */
14090 static int
14091 get_secondary_compatible_arch (bfd *abfd)
14093 obj_attribute *attr =
14094 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14096 /* Note: the tag and its argument below are uleb128 values, though
14097 currently-defined values fit in one byte for each. */
14098 if (attr->s
14099 && attr->s[0] == Tag_CPU_arch
14100 && (attr->s[1] & 128) != 128
14101 && attr->s[2] == 0)
14102 return attr->s[1];
14104 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14105 return -1;
14108 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14109 The tag is removed if ARCH is -1. */
14111 static void
14112 set_secondary_compatible_arch (bfd *abfd, int arch)
14114 obj_attribute *attr =
14115 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14117 if (arch == -1)
14119 attr->s = NULL;
14120 return;
14123 /* Note: the tag and its argument below are uleb128 values, though
14124 currently-defined values fit in one byte for each. */
14125 if (!attr->s)
14126 attr->s = (char *) bfd_alloc (abfd, 3);
14127 attr->s[0] = Tag_CPU_arch;
14128 attr->s[1] = arch;
14129 attr->s[2] = '\0';
14132 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14133 into account. */
14135 static int
14136 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14137 int newtag, int secondary_compat)
14139 #define T(X) TAG_CPU_ARCH_##X
14140 int tagl, tagh, result;
14141 const int v6t2[] =
14143 T(V6T2), /* PRE_V4. */
14144 T(V6T2), /* V4. */
14145 T(V6T2), /* V4T. */
14146 T(V6T2), /* V5T. */
14147 T(V6T2), /* V5TE. */
14148 T(V6T2), /* V5TEJ. */
14149 T(V6T2), /* V6. */
14150 T(V7), /* V6KZ. */
14151 T(V6T2) /* V6T2. */
14153 const int v6k[] =
14155 T(V6K), /* PRE_V4. */
14156 T(V6K), /* V4. */
14157 T(V6K), /* V4T. */
14158 T(V6K), /* V5T. */
14159 T(V6K), /* V5TE. */
14160 T(V6K), /* V5TEJ. */
14161 T(V6K), /* V6. */
14162 T(V6KZ), /* V6KZ. */
14163 T(V7), /* V6T2. */
14164 T(V6K) /* V6K. */
14166 const int v7[] =
14168 T(V7), /* PRE_V4. */
14169 T(V7), /* V4. */
14170 T(V7), /* V4T. */
14171 T(V7), /* V5T. */
14172 T(V7), /* V5TE. */
14173 T(V7), /* V5TEJ. */
14174 T(V7), /* V6. */
14175 T(V7), /* V6KZ. */
14176 T(V7), /* V6T2. */
14177 T(V7), /* V6K. */
14178 T(V7) /* V7. */
14180 const int v6_m[] =
14182 -1, /* PRE_V4. */
14183 -1, /* V4. */
14184 T(V6K), /* V4T. */
14185 T(V6K), /* V5T. */
14186 T(V6K), /* V5TE. */
14187 T(V6K), /* V5TEJ. */
14188 T(V6K), /* V6. */
14189 T(V6KZ), /* V6KZ. */
14190 T(V7), /* V6T2. */
14191 T(V6K), /* V6K. */
14192 T(V7), /* V7. */
14193 T(V6_M) /* V6_M. */
14195 const int v6s_m[] =
14197 -1, /* PRE_V4. */
14198 -1, /* V4. */
14199 T(V6K), /* V4T. */
14200 T(V6K), /* V5T. */
14201 T(V6K), /* V5TE. */
14202 T(V6K), /* V5TEJ. */
14203 T(V6K), /* V6. */
14204 T(V6KZ), /* V6KZ. */
14205 T(V7), /* V6T2. */
14206 T(V6K), /* V6K. */
14207 T(V7), /* V7. */
14208 T(V6S_M), /* V6_M. */
14209 T(V6S_M) /* V6S_M. */
14211 const int v7e_m[] =
14213 -1, /* PRE_V4. */
14214 -1, /* V4. */
14215 T(V7E_M), /* V4T. */
14216 T(V7E_M), /* V5T. */
14217 T(V7E_M), /* V5TE. */
14218 T(V7E_M), /* V5TEJ. */
14219 T(V7E_M), /* V6. */
14220 T(V7E_M), /* V6KZ. */
14221 T(V7E_M), /* V6T2. */
14222 T(V7E_M), /* V6K. */
14223 T(V7E_M), /* V7. */
14224 T(V7E_M), /* V6_M. */
14225 T(V7E_M), /* V6S_M. */
14226 T(V7E_M) /* V7E_M. */
14228 const int v8[] =
14230 T(V8), /* PRE_V4. */
14231 T(V8), /* V4. */
14232 T(V8), /* V4T. */
14233 T(V8), /* V5T. */
14234 T(V8), /* V5TE. */
14235 T(V8), /* V5TEJ. */
14236 T(V8), /* V6. */
14237 T(V8), /* V6KZ. */
14238 T(V8), /* V6T2. */
14239 T(V8), /* V6K. */
14240 T(V8), /* V7. */
14241 T(V8), /* V6_M. */
14242 T(V8), /* V6S_M. */
14243 T(V8), /* V7E_M. */
14244 T(V8) /* V8. */
14246 const int v8r[] =
14248 T(V8R), /* PRE_V4. */
14249 T(V8R), /* V4. */
14250 T(V8R), /* V4T. */
14251 T(V8R), /* V5T. */
14252 T(V8R), /* V5TE. */
14253 T(V8R), /* V5TEJ. */
14254 T(V8R), /* V6. */
14255 T(V8R), /* V6KZ. */
14256 T(V8R), /* V6T2. */
14257 T(V8R), /* V6K. */
14258 T(V8R), /* V7. */
14259 T(V8R), /* V6_M. */
14260 T(V8R), /* V6S_M. */
14261 T(V8R), /* V7E_M. */
14262 T(V8), /* V8. */
14263 T(V8R), /* V8R. */
14265 const int v8m_baseline[] =
14267 -1, /* PRE_V4. */
14268 -1, /* V4. */
14269 -1, /* V4T. */
14270 -1, /* V5T. */
14271 -1, /* V5TE. */
14272 -1, /* V5TEJ. */
14273 -1, /* V6. */
14274 -1, /* V6KZ. */
14275 -1, /* V6T2. */
14276 -1, /* V6K. */
14277 -1, /* V7. */
14278 T(V8M_BASE), /* V6_M. */
14279 T(V8M_BASE), /* V6S_M. */
14280 -1, /* V7E_M. */
14281 -1, /* V8. */
14282 -1, /* V8R. */
14283 T(V8M_BASE) /* V8-M BASELINE. */
14285 const int v8m_mainline[] =
14287 -1, /* PRE_V4. */
14288 -1, /* V4. */
14289 -1, /* V4T. */
14290 -1, /* V5T. */
14291 -1, /* V5TE. */
14292 -1, /* V5TEJ. */
14293 -1, /* V6. */
14294 -1, /* V6KZ. */
14295 -1, /* V6T2. */
14296 -1, /* V6K. */
14297 T(V8M_MAIN), /* V7. */
14298 T(V8M_MAIN), /* V6_M. */
14299 T(V8M_MAIN), /* V6S_M. */
14300 T(V8M_MAIN), /* V7E_M. */
14301 -1, /* V8. */
14302 -1, /* V8R. */
14303 T(V8M_MAIN), /* V8-M BASELINE. */
14304 T(V8M_MAIN) /* V8-M MAINLINE. */
14306 const int v8_1m_mainline[] =
14308 -1, /* PRE_V4. */
14309 -1, /* V4. */
14310 -1, /* V4T. */
14311 -1, /* V5T. */
14312 -1, /* V5TE. */
14313 -1, /* V5TEJ. */
14314 -1, /* V6. */
14315 -1, /* V6KZ. */
14316 -1, /* V6T2. */
14317 -1, /* V6K. */
14318 T(V8_1M_MAIN), /* V7. */
14319 T(V8_1M_MAIN), /* V6_M. */
14320 T(V8_1M_MAIN), /* V6S_M. */
14321 T(V8_1M_MAIN), /* V7E_M. */
14322 -1, /* V8. */
14323 -1, /* V8R. */
14324 T(V8_1M_MAIN), /* V8-M BASELINE. */
14325 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14326 -1, /* Unused (18). */
14327 -1, /* Unused (19). */
14328 -1, /* Unused (20). */
14329 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14331 const int v4t_plus_v6_m[] =
14333 -1, /* PRE_V4. */
14334 -1, /* V4. */
14335 T(V4T), /* V4T. */
14336 T(V5T), /* V5T. */
14337 T(V5TE), /* V5TE. */
14338 T(V5TEJ), /* V5TEJ. */
14339 T(V6), /* V6. */
14340 T(V6KZ), /* V6KZ. */
14341 T(V6T2), /* V6T2. */
14342 T(V6K), /* V6K. */
14343 T(V7), /* V7. */
14344 T(V6_M), /* V6_M. */
14345 T(V6S_M), /* V6S_M. */
14346 T(V7E_M), /* V7E_M. */
14347 T(V8), /* V8. */
14348 -1, /* V8R. */
14349 T(V8M_BASE), /* V8-M BASELINE. */
14350 T(V8M_MAIN), /* V8-M MAINLINE. */
14351 -1, /* Unused (18). */
14352 -1, /* Unused (19). */
14353 -1, /* Unused (20). */
14354 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14355 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14357 const int *comb[] =
14359 v6t2,
14360 v6k,
14362 v6_m,
14363 v6s_m,
14364 v7e_m,
14366 v8r,
14367 v8m_baseline,
14368 v8m_mainline,
14369 NULL,
14370 NULL,
14371 NULL,
14372 v8_1m_mainline,
14373 /* Pseudo-architecture. */
14374 v4t_plus_v6_m
14377 /* Check we've not got a higher architecture than we know about. */
14379 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14381 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14382 return -1;
14385 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14387 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14388 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14389 oldtag = T(V4T_PLUS_V6_M);
14391 /* And override the new tag if we have a Tag_also_compatible_with on the
14392 input. */
14394 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14395 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14396 newtag = T(V4T_PLUS_V6_M);
14398 tagl = (oldtag < newtag) ? oldtag : newtag;
14399 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14401 /* Architectures before V6KZ add features monotonically. */
14402 if (tagh <= TAG_CPU_ARCH_V6KZ)
14403 return result;
14405 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14407 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14408 as the canonical version. */
14409 if (result == T(V4T_PLUS_V6_M))
14411 result = T(V4T);
14412 *secondary_compat_out = T(V6_M);
14414 else
14415 *secondary_compat_out = -1;
14417 if (result == -1)
14419 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14420 ibfd, oldtag, newtag);
14421 return -1;
14424 return result;
14425 #undef T
14428 /* Query attributes object to see if integer divide instructions may be
14429 present in an object. */
14430 static bfd_boolean
14431 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14433 int arch = attr[Tag_CPU_arch].i;
14434 int profile = attr[Tag_CPU_arch_profile].i;
14436 switch (attr[Tag_DIV_use].i)
14438 case 0:
14439 /* Integer divide allowed if instruction contained in archetecture. */
14440 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14441 return TRUE;
14442 else if (arch >= TAG_CPU_ARCH_V7E_M)
14443 return TRUE;
14444 else
14445 return FALSE;
14447 case 1:
14448 /* Integer divide explicitly prohibited. */
14449 return FALSE;
14451 default:
14452 /* Unrecognised case - treat as allowing divide everywhere. */
14453 case 2:
14454 /* Integer divide allowed in ARM state. */
14455 return TRUE;
14459 /* Query attributes object to see if integer divide instructions are
14460 forbidden to be in the object. This is not the inverse of
14461 elf32_arm_attributes_accept_div. */
14462 static bfd_boolean
14463 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14465 return attr[Tag_DIV_use].i == 1;
14468 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14469 are conflicting attributes. */
14471 static bfd_boolean
14472 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14474 bfd *obfd = info->output_bfd;
14475 obj_attribute *in_attr;
14476 obj_attribute *out_attr;
14477 /* Some tags have 0 = don't care, 1 = strong requirement,
14478 2 = weak requirement. */
14479 static const int order_021[3] = {0, 2, 1};
14480 int i;
14481 bfd_boolean result = TRUE;
14482 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14484 /* Skip the linker stubs file. This preserves previous behavior
14485 of accepting unknown attributes in the first input file - but
14486 is that a bug? */
14487 if (ibfd->flags & BFD_LINKER_CREATED)
14488 return TRUE;
14490 /* Skip any input that hasn't attribute section.
14491 This enables to link object files without attribute section with
14492 any others. */
14493 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14494 return TRUE;
14496 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14498 /* This is the first object. Copy the attributes. */
14499 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14501 out_attr = elf_known_obj_attributes_proc (obfd);
14503 /* Use the Tag_null value to indicate the attributes have been
14504 initialized. */
14505 out_attr[0].i = 1;
14507 /* We do not output objects with Tag_MPextension_use_legacy - we move
14508 the attribute's value to Tag_MPextension_use. */
14509 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14511 if (out_attr[Tag_MPextension_use].i != 0
14512 && out_attr[Tag_MPextension_use_legacy].i
14513 != out_attr[Tag_MPextension_use].i)
14515 _bfd_error_handler
14516 (_("Error: %pB has both the current and legacy "
14517 "Tag_MPextension_use attributes"), ibfd);
14518 result = FALSE;
14521 out_attr[Tag_MPextension_use] =
14522 out_attr[Tag_MPextension_use_legacy];
14523 out_attr[Tag_MPextension_use_legacy].type = 0;
14524 out_attr[Tag_MPextension_use_legacy].i = 0;
14527 return result;
14530 in_attr = elf_known_obj_attributes_proc (ibfd);
14531 out_attr = elf_known_obj_attributes_proc (obfd);
14532 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14533 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14535 /* Ignore mismatches if the object doesn't use floating point or is
14536 floating point ABI independent. */
14537 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14538 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14539 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14540 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14541 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14542 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14544 _bfd_error_handler
14545 (_("error: %pB uses VFP register arguments, %pB does not"),
14546 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14547 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14548 result = FALSE;
14552 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14554 /* Merge this attribute with existing attributes. */
14555 switch (i)
14557 case Tag_CPU_raw_name:
14558 case Tag_CPU_name:
14559 /* These are merged after Tag_CPU_arch. */
14560 break;
14562 case Tag_ABI_optimization_goals:
14563 case Tag_ABI_FP_optimization_goals:
14564 /* Use the first value seen. */
14565 break;
14567 case Tag_CPU_arch:
14569 int secondary_compat = -1, secondary_compat_out = -1;
14570 unsigned int saved_out_attr = out_attr[i].i;
14571 int arch_attr;
14572 static const char *name_table[] =
14574 /* These aren't real CPU names, but we can't guess
14575 that from the architecture version alone. */
14576 "Pre v4",
14577 "ARM v4",
14578 "ARM v4T",
14579 "ARM v5T",
14580 "ARM v5TE",
14581 "ARM v5TEJ",
14582 "ARM v6",
14583 "ARM v6KZ",
14584 "ARM v6T2",
14585 "ARM v6K",
14586 "ARM v7",
14587 "ARM v6-M",
14588 "ARM v6S-M",
14589 "ARM v8",
14591 "ARM v8-M.baseline",
14592 "ARM v8-M.mainline",
14595 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14596 secondary_compat = get_secondary_compatible_arch (ibfd);
14597 secondary_compat_out = get_secondary_compatible_arch (obfd);
14598 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14599 &secondary_compat_out,
14600 in_attr[i].i,
14601 secondary_compat);
14603 /* Return with error if failed to merge. */
14604 if (arch_attr == -1)
14605 return FALSE;
14607 out_attr[i].i = arch_attr;
14609 set_secondary_compatible_arch (obfd, secondary_compat_out);
14611 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14612 if (out_attr[i].i == saved_out_attr)
14613 ; /* Leave the names alone. */
14614 else if (out_attr[i].i == in_attr[i].i)
14616 /* The output architecture has been changed to match the
14617 input architecture. Use the input names. */
14618 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14619 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14620 : NULL;
14621 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14622 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14623 : NULL;
14625 else
14627 out_attr[Tag_CPU_name].s = NULL;
14628 out_attr[Tag_CPU_raw_name].s = NULL;
14631 /* If we still don't have a value for Tag_CPU_name,
14632 make one up now. Tag_CPU_raw_name remains blank. */
14633 if (out_attr[Tag_CPU_name].s == NULL
14634 && out_attr[i].i < ARRAY_SIZE (name_table))
14635 out_attr[Tag_CPU_name].s =
14636 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14638 break;
14640 case Tag_ARM_ISA_use:
14641 case Tag_THUMB_ISA_use:
14642 case Tag_WMMX_arch:
14643 case Tag_Advanced_SIMD_arch:
14644 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14645 case Tag_ABI_FP_rounding:
14646 case Tag_ABI_FP_exceptions:
14647 case Tag_ABI_FP_user_exceptions:
14648 case Tag_ABI_FP_number_model:
14649 case Tag_FP_HP_extension:
14650 case Tag_CPU_unaligned_access:
14651 case Tag_T2EE_use:
14652 case Tag_MPextension_use:
14653 case Tag_MVE_arch:
14654 /* Use the largest value specified. */
14655 if (in_attr[i].i > out_attr[i].i)
14656 out_attr[i].i = in_attr[i].i;
14657 break;
14659 case Tag_ABI_align_preserved:
14660 case Tag_ABI_PCS_RO_data:
14661 /* Use the smallest value specified. */
14662 if (in_attr[i].i < out_attr[i].i)
14663 out_attr[i].i = in_attr[i].i;
14664 break;
14666 case Tag_ABI_align_needed:
14667 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14668 && (in_attr[Tag_ABI_align_preserved].i == 0
14669 || out_attr[Tag_ABI_align_preserved].i == 0))
14671 /* This error message should be enabled once all non-conformant
14672 binaries in the toolchain have had the attributes set
14673 properly.
14674 _bfd_error_handler
14675 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14676 obfd, ibfd);
14677 result = FALSE; */
14679 /* Fall through. */
14680 case Tag_ABI_FP_denormal:
14681 case Tag_ABI_PCS_GOT_use:
14682 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14683 value if greater than 2 (for future-proofing). */
14684 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14685 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14686 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14687 out_attr[i].i = in_attr[i].i;
14688 break;
14690 case Tag_Virtualization_use:
14691 /* The virtualization tag effectively stores two bits of
14692 information: the intended use of TrustZone (in bit 0), and the
14693 intended use of Virtualization (in bit 1). */
14694 if (out_attr[i].i == 0)
14695 out_attr[i].i = in_attr[i].i;
14696 else if (in_attr[i].i != 0
14697 && in_attr[i].i != out_attr[i].i)
14699 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14700 out_attr[i].i = 3;
14701 else
14703 _bfd_error_handler
14704 (_("error: %pB: unable to merge virtualization attributes "
14705 "with %pB"),
14706 obfd, ibfd);
14707 result = FALSE;
14710 break;
14712 case Tag_CPU_arch_profile:
14713 if (out_attr[i].i != in_attr[i].i)
14715 /* 0 will merge with anything.
14716 'A' and 'S' merge to 'A'.
14717 'R' and 'S' merge to 'R'.
14718 'M' and 'A|R|S' is an error. */
14719 if (out_attr[i].i == 0
14720 || (out_attr[i].i == 'S'
14721 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14722 out_attr[i].i = in_attr[i].i;
14723 else if (in_attr[i].i == 0
14724 || (in_attr[i].i == 'S'
14725 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14726 ; /* Do nothing. */
14727 else
14729 _bfd_error_handler
14730 (_("error: %pB: conflicting architecture profiles %c/%c"),
14731 ibfd,
14732 in_attr[i].i ? in_attr[i].i : '0',
14733 out_attr[i].i ? out_attr[i].i : '0');
14734 result = FALSE;
14737 break;
14739 case Tag_DSP_extension:
14740 /* No need to change output value if any of:
14741 - pre (<=) ARMv5T input architecture (do not have DSP)
14742 - M input profile not ARMv7E-M and do not have DSP. */
14743 if (in_attr[Tag_CPU_arch].i <= 3
14744 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14745 && in_attr[Tag_CPU_arch].i != 13
14746 && in_attr[i].i == 0))
14747 ; /* Do nothing. */
14748 /* Output value should be 0 if DSP part of architecture, ie.
14749 - post (>=) ARMv5te architecture output
14750 - A, R or S profile output or ARMv7E-M output architecture. */
14751 else if (out_attr[Tag_CPU_arch].i >= 4
14752 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14753 || out_attr[Tag_CPU_arch_profile].i == 'R'
14754 || out_attr[Tag_CPU_arch_profile].i == 'S'
14755 || out_attr[Tag_CPU_arch].i == 13))
14756 out_attr[i].i = 0;
14757 /* Otherwise, DSP instructions are added and not part of output
14758 architecture. */
14759 else
14760 out_attr[i].i = 1;
14761 break;
14763 case Tag_FP_arch:
14765 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14766 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14767 when it's 0. It might mean absence of FP hardware if
14768 Tag_FP_arch is zero. */
14770 #define VFP_VERSION_COUNT 9
14771 static const struct
14773 int ver;
14774 int regs;
14775 } vfp_versions[VFP_VERSION_COUNT] =
14777 {0, 0},
14778 {1, 16},
14779 {2, 16},
14780 {3, 32},
14781 {3, 16},
14782 {4, 32},
14783 {4, 16},
14784 {8, 32},
14785 {8, 16}
14787 int ver;
14788 int regs;
14789 int newval;
14791 /* If the output has no requirement about FP hardware,
14792 follow the requirement of the input. */
14793 if (out_attr[i].i == 0)
14795 /* This assert is still reasonable, we shouldn't
14796 produce the suspicious build attribute
14797 combination (See below for in_attr). */
14798 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14799 out_attr[i].i = in_attr[i].i;
14800 out_attr[Tag_ABI_HardFP_use].i
14801 = in_attr[Tag_ABI_HardFP_use].i;
14802 break;
14804 /* If the input has no requirement about FP hardware, do
14805 nothing. */
14806 else if (in_attr[i].i == 0)
14808 /* We used to assert that Tag_ABI_HardFP_use was
14809 zero here, but we should never assert when
14810 consuming an object file that has suspicious
14811 build attributes. The single precision variant
14812 of 'no FP architecture' is still 'no FP
14813 architecture', so we just ignore the tag in this
14814 case. */
14815 break;
14818 /* Both the input and the output have nonzero Tag_FP_arch.
14819 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14821 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14822 do nothing. */
14823 if (in_attr[Tag_ABI_HardFP_use].i == 0
14824 && out_attr[Tag_ABI_HardFP_use].i == 0)
14826 /* If the input and the output have different Tag_ABI_HardFP_use,
14827 the combination of them is 0 (implied by Tag_FP_arch). */
14828 else if (in_attr[Tag_ABI_HardFP_use].i
14829 != out_attr[Tag_ABI_HardFP_use].i)
14830 out_attr[Tag_ABI_HardFP_use].i = 0;
14832 /* Now we can handle Tag_FP_arch. */
14834 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14835 pick the biggest. */
14836 if (in_attr[i].i >= VFP_VERSION_COUNT
14837 && in_attr[i].i > out_attr[i].i)
14839 out_attr[i] = in_attr[i];
14840 break;
14842 /* The output uses the superset of input features
14843 (ISA version) and registers. */
14844 ver = vfp_versions[in_attr[i].i].ver;
14845 if (ver < vfp_versions[out_attr[i].i].ver)
14846 ver = vfp_versions[out_attr[i].i].ver;
14847 regs = vfp_versions[in_attr[i].i].regs;
14848 if (regs < vfp_versions[out_attr[i].i].regs)
14849 regs = vfp_versions[out_attr[i].i].regs;
14850 /* This assumes all possible supersets are also a valid
14851 options. */
14852 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14854 if (regs == vfp_versions[newval].regs
14855 && ver == vfp_versions[newval].ver)
14856 break;
14858 out_attr[i].i = newval;
14860 break;
14861 case Tag_PCS_config:
14862 if (out_attr[i].i == 0)
14863 out_attr[i].i = in_attr[i].i;
14864 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14866 /* It's sometimes ok to mix different configs, so this is only
14867 a warning. */
14868 _bfd_error_handler
14869 (_("warning: %pB: conflicting platform configuration"), ibfd);
14871 break;
14872 case Tag_ABI_PCS_R9_use:
14873 if (in_attr[i].i != out_attr[i].i
14874 && out_attr[i].i != AEABI_R9_unused
14875 && in_attr[i].i != AEABI_R9_unused)
14877 _bfd_error_handler
14878 (_("error: %pB: conflicting use of R9"), ibfd);
14879 result = FALSE;
14881 if (out_attr[i].i == AEABI_R9_unused)
14882 out_attr[i].i = in_attr[i].i;
14883 break;
14884 case Tag_ABI_PCS_RW_data:
14885 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14886 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14887 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14889 _bfd_error_handler
14890 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14891 ibfd);
14892 result = FALSE;
14894 /* Use the smallest value specified. */
14895 if (in_attr[i].i < out_attr[i].i)
14896 out_attr[i].i = in_attr[i].i;
14897 break;
14898 case Tag_ABI_PCS_wchar_t:
14899 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14900 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14902 _bfd_error_handler
14903 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14904 ibfd, in_attr[i].i, out_attr[i].i);
14906 else if (in_attr[i].i && !out_attr[i].i)
14907 out_attr[i].i = in_attr[i].i;
14908 break;
14909 case Tag_ABI_enum_size:
14910 if (in_attr[i].i != AEABI_enum_unused)
14912 if (out_attr[i].i == AEABI_enum_unused
14913 || out_attr[i].i == AEABI_enum_forced_wide)
14915 /* The existing object is compatible with anything.
14916 Use whatever requirements the new object has. */
14917 out_attr[i].i = in_attr[i].i;
14919 else if (in_attr[i].i != AEABI_enum_forced_wide
14920 && out_attr[i].i != in_attr[i].i
14921 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14923 static const char *aeabi_enum_names[] =
14924 { "", "variable-size", "32-bit", "" };
14925 const char *in_name =
14926 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14927 ? aeabi_enum_names[in_attr[i].i]
14928 : "<unknown>";
14929 const char *out_name =
14930 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14931 ? aeabi_enum_names[out_attr[i].i]
14932 : "<unknown>";
14933 _bfd_error_handler
14934 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14935 ibfd, in_name, out_name);
14938 break;
14939 case Tag_ABI_VFP_args:
14940 /* Aready done. */
14941 break;
14942 case Tag_ABI_WMMX_args:
14943 if (in_attr[i].i != out_attr[i].i)
14945 _bfd_error_handler
14946 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14947 ibfd, obfd);
14948 result = FALSE;
14950 break;
14951 case Tag_compatibility:
14952 /* Merged in target-independent code. */
14953 break;
14954 case Tag_ABI_HardFP_use:
14955 /* This is handled along with Tag_FP_arch. */
14956 break;
14957 case Tag_ABI_FP_16bit_format:
14958 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14960 if (in_attr[i].i != out_attr[i].i)
14962 _bfd_error_handler
14963 (_("error: fp16 format mismatch between %pB and %pB"),
14964 ibfd, obfd);
14965 result = FALSE;
14968 if (in_attr[i].i != 0)
14969 out_attr[i].i = in_attr[i].i;
14970 break;
14972 case Tag_DIV_use:
14973 /* A value of zero on input means that the divide instruction may
14974 be used if available in the base architecture as specified via
14975 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14976 the user did not want divide instructions. A value of 2
14977 explicitly means that divide instructions were allowed in ARM
14978 and Thumb state. */
14979 if (in_attr[i].i == out_attr[i].i)
14980 /* Do nothing. */ ;
14981 else if (elf32_arm_attributes_forbid_div (in_attr)
14982 && !elf32_arm_attributes_accept_div (out_attr))
14983 out_attr[i].i = 1;
14984 else if (elf32_arm_attributes_forbid_div (out_attr)
14985 && elf32_arm_attributes_accept_div (in_attr))
14986 out_attr[i].i = in_attr[i].i;
14987 else if (in_attr[i].i == 2)
14988 out_attr[i].i = in_attr[i].i;
14989 break;
14991 case Tag_MPextension_use_legacy:
14992 /* We don't output objects with Tag_MPextension_use_legacy - we
14993 move the value to Tag_MPextension_use. */
14994 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14996 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14998 _bfd_error_handler
14999 (_("%pB has both the current and legacy "
15000 "Tag_MPextension_use attributes"),
15001 ibfd);
15002 result = FALSE;
15006 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15007 out_attr[Tag_MPextension_use] = in_attr[i];
15009 break;
15011 case Tag_nodefaults:
15012 /* This tag is set if it exists, but the value is unused (and is
15013 typically zero). We don't actually need to do anything here -
15014 the merge happens automatically when the type flags are merged
15015 below. */
15016 break;
15017 case Tag_also_compatible_with:
15018 /* Already done in Tag_CPU_arch. */
15019 break;
15020 case Tag_conformance:
15021 /* Keep the attribute if it matches. Throw it away otherwise.
15022 No attribute means no claim to conform. */
15023 if (!in_attr[i].s || !out_attr[i].s
15024 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15025 out_attr[i].s = NULL;
15026 break;
15028 default:
15029 result
15030 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15033 /* If out_attr was copied from in_attr then it won't have a type yet. */
15034 if (in_attr[i].type && !out_attr[i].type)
15035 out_attr[i].type = in_attr[i].type;
15038 /* Merge Tag_compatibility attributes and any common GNU ones. */
15039 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15040 return FALSE;
15042 /* Check for any attributes not known on ARM. */
15043 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15045 return result;
15049 /* Return TRUE if the two EABI versions are incompatible. */
15051 static bfd_boolean
15052 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15054 /* v4 and v5 are the same spec before and after it was released,
15055 so allow mixing them. */
15056 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15057 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15058 return TRUE;
15060 return (iver == over);
15063 /* Merge backend specific data from an object file to the output
15064 object file when linking. */
15066 static bfd_boolean
15067 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15069 /* Display the flags field. */
15071 static bfd_boolean
15072 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15074 FILE * file = (FILE *) ptr;
15075 unsigned long flags;
15077 BFD_ASSERT (abfd != NULL && ptr != NULL);
15079 /* Print normal ELF private data. */
15080 _bfd_elf_print_private_bfd_data (abfd, ptr);
15082 flags = elf_elfheader (abfd)->e_flags;
15083 /* Ignore init flag - it may not be set, despite the flags field
15084 containing valid data. */
15086 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
15088 switch (EF_ARM_EABI_VERSION (flags))
15090 case EF_ARM_EABI_UNKNOWN:
15091 /* The following flag bits are GNU extensions and not part of the
15092 official ARM ELF extended ABI. Hence they are only decoded if
15093 the EABI version is not set. */
15094 if (flags & EF_ARM_INTERWORK)
15095 fprintf (file, _(" [interworking enabled]"));
15097 if (flags & EF_ARM_APCS_26)
15098 fprintf (file, " [APCS-26]");
15099 else
15100 fprintf (file, " [APCS-32]");
15102 if (flags & EF_ARM_VFP_FLOAT)
15103 fprintf (file, _(" [VFP float format]"));
15104 else if (flags & EF_ARM_MAVERICK_FLOAT)
15105 fprintf (file, _(" [Maverick float format]"));
15106 else
15107 fprintf (file, _(" [FPA float format]"));
15109 if (flags & EF_ARM_APCS_FLOAT)
15110 fprintf (file, _(" [floats passed in float registers]"));
15112 if (flags & EF_ARM_PIC)
15113 fprintf (file, _(" [position independent]"));
15115 if (flags & EF_ARM_NEW_ABI)
15116 fprintf (file, _(" [new ABI]"));
15118 if (flags & EF_ARM_OLD_ABI)
15119 fprintf (file, _(" [old ABI]"));
15121 if (flags & EF_ARM_SOFT_FLOAT)
15122 fprintf (file, _(" [software FP]"));
15124 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15125 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15126 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15127 | EF_ARM_MAVERICK_FLOAT);
15128 break;
15130 case EF_ARM_EABI_VER1:
15131 fprintf (file, _(" [Version1 EABI]"));
15133 if (flags & EF_ARM_SYMSARESORTED)
15134 fprintf (file, _(" [sorted symbol table]"));
15135 else
15136 fprintf (file, _(" [unsorted symbol table]"));
15138 flags &= ~ EF_ARM_SYMSARESORTED;
15139 break;
15141 case EF_ARM_EABI_VER2:
15142 fprintf (file, _(" [Version2 EABI]"));
15144 if (flags & EF_ARM_SYMSARESORTED)
15145 fprintf (file, _(" [sorted symbol table]"));
15146 else
15147 fprintf (file, _(" [unsorted symbol table]"));
15149 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15150 fprintf (file, _(" [dynamic symbols use segment index]"));
15152 if (flags & EF_ARM_MAPSYMSFIRST)
15153 fprintf (file, _(" [mapping symbols precede others]"));
15155 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15156 | EF_ARM_MAPSYMSFIRST);
15157 break;
15159 case EF_ARM_EABI_VER3:
15160 fprintf (file, _(" [Version3 EABI]"));
15161 break;
15163 case EF_ARM_EABI_VER4:
15164 fprintf (file, _(" [Version4 EABI]"));
15165 goto eabi;
15167 case EF_ARM_EABI_VER5:
15168 fprintf (file, _(" [Version5 EABI]"));
15170 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15171 fprintf (file, _(" [soft-float ABI]"));
15173 if (flags & EF_ARM_ABI_FLOAT_HARD)
15174 fprintf (file, _(" [hard-float ABI]"));
15176 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15178 eabi:
15179 if (flags & EF_ARM_BE8)
15180 fprintf (file, _(" [BE8]"));
15182 if (flags & EF_ARM_LE8)
15183 fprintf (file, _(" [LE8]"));
15185 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15186 break;
15188 default:
15189 fprintf (file, _(" <EABI version unrecognised>"));
15190 break;
15193 flags &= ~ EF_ARM_EABIMASK;
15195 if (flags & EF_ARM_RELEXEC)
15196 fprintf (file, _(" [relocatable executable]"));
15198 if (flags & EF_ARM_PIC)
15199 fprintf (file, _(" [position independent]"));
15201 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15202 fprintf (file, _(" [FDPIC ABI supplement]"));
15204 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15206 if (flags)
15207 fprintf (file, _("<Unrecognised flag bits set>"));
15209 fputc ('\n', file);
15211 return TRUE;
15214 static int
15215 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15217 switch (ELF_ST_TYPE (elf_sym->st_info))
15219 case STT_ARM_TFUNC:
15220 return ELF_ST_TYPE (elf_sym->st_info);
15222 case STT_ARM_16BIT:
15223 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15224 This allows us to distinguish between data used by Thumb instructions
15225 and non-data (which is probably code) inside Thumb regions of an
15226 executable. */
15227 if (type != STT_OBJECT && type != STT_TLS)
15228 return ELF_ST_TYPE (elf_sym->st_info);
15229 break;
15231 default:
15232 break;
15235 return type;
15238 static asection *
15239 elf32_arm_gc_mark_hook (asection *sec,
15240 struct bfd_link_info *info,
15241 Elf_Internal_Rela *rel,
15242 struct elf_link_hash_entry *h,
15243 Elf_Internal_Sym *sym)
15245 if (h != NULL)
15246 switch (ELF32_R_TYPE (rel->r_info))
15248 case R_ARM_GNU_VTINHERIT:
15249 case R_ARM_GNU_VTENTRY:
15250 return NULL;
15253 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15256 /* Look through the relocs for a section during the first phase. */
15258 static bfd_boolean
15259 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15260 asection *sec, const Elf_Internal_Rela *relocs)
15262 Elf_Internal_Shdr *symtab_hdr;
15263 struct elf_link_hash_entry **sym_hashes;
15264 const Elf_Internal_Rela *rel;
15265 const Elf_Internal_Rela *rel_end;
15266 bfd *dynobj;
15267 asection *sreloc;
15268 struct elf32_arm_link_hash_table *htab;
15269 bfd_boolean call_reloc_p;
15270 bfd_boolean may_become_dynamic_p;
15271 bfd_boolean may_need_local_target_p;
15272 unsigned long nsyms;
15274 if (bfd_link_relocatable (info))
15275 return TRUE;
15277 BFD_ASSERT (is_arm_elf (abfd));
15279 htab = elf32_arm_hash_table (info);
15280 if (htab == NULL)
15281 return FALSE;
15283 sreloc = NULL;
15285 /* Create dynamic sections for relocatable executables so that we can
15286 copy relocations. */
15287 if (htab->root.is_relocatable_executable
15288 && ! htab->root.dynamic_sections_created)
15290 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15291 return FALSE;
15294 if (htab->root.dynobj == NULL)
15295 htab->root.dynobj = abfd;
15296 if (!create_ifunc_sections (info))
15297 return FALSE;
15299 dynobj = htab->root.dynobj;
15301 symtab_hdr = & elf_symtab_hdr (abfd);
15302 sym_hashes = elf_sym_hashes (abfd);
15303 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15305 rel_end = relocs + sec->reloc_count;
15306 for (rel = relocs; rel < rel_end; rel++)
15308 Elf_Internal_Sym *isym;
15309 struct elf_link_hash_entry *h;
15310 struct elf32_arm_link_hash_entry *eh;
15311 unsigned int r_symndx;
15312 int r_type;
15314 r_symndx = ELF32_R_SYM (rel->r_info);
15315 r_type = ELF32_R_TYPE (rel->r_info);
15316 r_type = arm_real_reloc_type (htab, r_type);
15318 if (r_symndx >= nsyms
15319 /* PR 9934: It is possible to have relocations that do not
15320 refer to symbols, thus it is also possible to have an
15321 object file containing relocations but no symbol table. */
15322 && (r_symndx > STN_UNDEF || nsyms > 0))
15324 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15325 r_symndx);
15326 return FALSE;
15329 h = NULL;
15330 isym = NULL;
15331 if (nsyms > 0)
15333 if (r_symndx < symtab_hdr->sh_info)
15335 /* A local symbol. */
15336 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15337 abfd, r_symndx);
15338 if (isym == NULL)
15339 return FALSE;
15341 else
15343 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15344 while (h->root.type == bfd_link_hash_indirect
15345 || h->root.type == bfd_link_hash_warning)
15346 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15350 eh = (struct elf32_arm_link_hash_entry *) h;
15352 call_reloc_p = FALSE;
15353 may_become_dynamic_p = FALSE;
15354 may_need_local_target_p = FALSE;
15356 /* Could be done earlier, if h were already available. */
15357 r_type = elf32_arm_tls_transition (info, r_type, h);
15358 switch (r_type)
15360 case R_ARM_GOTOFFFUNCDESC:
15362 if (h == NULL)
15364 if (!elf32_arm_allocate_local_sym_info (abfd))
15365 return FALSE;
15366 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15367 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15369 else
15371 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15374 break;
15376 case R_ARM_GOTFUNCDESC:
15378 if (h == NULL)
15380 /* Such a relocation is not supposed to be generated
15381 by gcc on a static function. */
15382 /* Anyway if needed it could be handled. */
15383 abort();
15385 else
15387 eh->fdpic_cnts.gotfuncdesc_cnt++;
15390 break;
15392 case R_ARM_FUNCDESC:
15394 if (h == NULL)
15396 if (!elf32_arm_allocate_local_sym_info (abfd))
15397 return FALSE;
15398 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15399 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15401 else
15403 eh->fdpic_cnts.funcdesc_cnt++;
15406 break;
15408 case R_ARM_GOT32:
15409 case R_ARM_GOT_PREL:
15410 case R_ARM_TLS_GD32:
15411 case R_ARM_TLS_GD32_FDPIC:
15412 case R_ARM_TLS_IE32:
15413 case R_ARM_TLS_IE32_FDPIC:
15414 case R_ARM_TLS_GOTDESC:
15415 case R_ARM_TLS_DESCSEQ:
15416 case R_ARM_THM_TLS_DESCSEQ:
15417 case R_ARM_TLS_CALL:
15418 case R_ARM_THM_TLS_CALL:
15419 /* This symbol requires a global offset table entry. */
15421 int tls_type, old_tls_type;
15423 switch (r_type)
15425 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15426 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15428 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15429 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15431 case R_ARM_TLS_GOTDESC:
15432 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15433 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15434 tls_type = GOT_TLS_GDESC; break;
15436 default: tls_type = GOT_NORMAL; break;
15439 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15440 info->flags |= DF_STATIC_TLS;
15442 if (h != NULL)
15444 h->got.refcount++;
15445 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15447 else
15449 /* This is a global offset table entry for a local symbol. */
15450 if (!elf32_arm_allocate_local_sym_info (abfd))
15451 return FALSE;
15452 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15453 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15456 /* If a variable is accessed with both tls methods, two
15457 slots may be created. */
15458 if (GOT_TLS_GD_ANY_P (old_tls_type)
15459 && GOT_TLS_GD_ANY_P (tls_type))
15460 tls_type |= old_tls_type;
15462 /* We will already have issued an error message if there
15463 is a TLS/non-TLS mismatch, based on the symbol
15464 type. So just combine any TLS types needed. */
15465 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15466 && tls_type != GOT_NORMAL)
15467 tls_type |= old_tls_type;
15469 /* If the symbol is accessed in both IE and GDESC
15470 method, we're able to relax. Turn off the GDESC flag,
15471 without messing up with any other kind of tls types
15472 that may be involved. */
15473 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15474 tls_type &= ~GOT_TLS_GDESC;
15476 if (old_tls_type != tls_type)
15478 if (h != NULL)
15479 elf32_arm_hash_entry (h)->tls_type = tls_type;
15480 else
15481 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15484 /* Fall through. */
15486 case R_ARM_TLS_LDM32:
15487 case R_ARM_TLS_LDM32_FDPIC:
15488 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15489 htab->tls_ldm_got.refcount++;
15490 /* Fall through. */
15492 case R_ARM_GOTOFF32:
15493 case R_ARM_GOTPC:
15494 if (htab->root.sgot == NULL
15495 && !create_got_section (htab->root.dynobj, info))
15496 return FALSE;
15497 break;
15499 case R_ARM_PC24:
15500 case R_ARM_PLT32:
15501 case R_ARM_CALL:
15502 case R_ARM_JUMP24:
15503 case R_ARM_PREL31:
15504 case R_ARM_THM_CALL:
15505 case R_ARM_THM_JUMP24:
15506 case R_ARM_THM_JUMP19:
15507 call_reloc_p = TRUE;
15508 may_need_local_target_p = TRUE;
15509 break;
15511 case R_ARM_ABS12:
15512 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15513 ldr __GOTT_INDEX__ offsets. */
15514 if (!htab->vxworks_p)
15516 may_need_local_target_p = TRUE;
15517 break;
15519 else goto jump_over;
15521 /* Fall through. */
15523 case R_ARM_MOVW_ABS_NC:
15524 case R_ARM_MOVT_ABS:
15525 case R_ARM_THM_MOVW_ABS_NC:
15526 case R_ARM_THM_MOVT_ABS:
15527 if (bfd_link_pic (info))
15529 _bfd_error_handler
15530 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15531 abfd, elf32_arm_howto_table_1[r_type].name,
15532 (h) ? h->root.root.string : "a local symbol");
15533 bfd_set_error (bfd_error_bad_value);
15534 return FALSE;
15537 /* Fall through. */
15538 case R_ARM_ABS32:
15539 case R_ARM_ABS32_NOI:
15540 jump_over:
15541 if (h != NULL && bfd_link_executable (info))
15543 h->pointer_equality_needed = 1;
15545 /* Fall through. */
15546 case R_ARM_REL32:
15547 case R_ARM_REL32_NOI:
15548 case R_ARM_MOVW_PREL_NC:
15549 case R_ARM_MOVT_PREL:
15550 case R_ARM_THM_MOVW_PREL_NC:
15551 case R_ARM_THM_MOVT_PREL:
15553 /* Should the interworking branches be listed here? */
15554 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15555 || htab->fdpic_p)
15556 && (sec->flags & SEC_ALLOC) != 0)
15558 if (h == NULL
15559 && elf32_arm_howto_from_type (r_type)->pc_relative)
15561 /* In shared libraries and relocatable executables,
15562 we treat local relative references as calls;
15563 see the related SYMBOL_CALLS_LOCAL code in
15564 allocate_dynrelocs. */
15565 call_reloc_p = TRUE;
15566 may_need_local_target_p = TRUE;
15568 else
15569 /* We are creating a shared library or relocatable
15570 executable, and this is a reloc against a global symbol,
15571 or a non-PC-relative reloc against a local symbol.
15572 We may need to copy the reloc into the output. */
15573 may_become_dynamic_p = TRUE;
15575 else
15576 may_need_local_target_p = TRUE;
15577 break;
15579 /* This relocation describes the C++ object vtable hierarchy.
15580 Reconstruct it for later use during GC. */
15581 case R_ARM_GNU_VTINHERIT:
15582 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15583 return FALSE;
15584 break;
15586 /* This relocation describes which C++ vtable entries are actually
15587 used. Record for later use during GC. */
15588 case R_ARM_GNU_VTENTRY:
15589 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15590 return FALSE;
15591 break;
15594 if (h != NULL)
15596 if (call_reloc_p)
15597 /* We may need a .plt entry if the function this reloc
15598 refers to is in a different object, regardless of the
15599 symbol's type. We can't tell for sure yet, because
15600 something later might force the symbol local. */
15601 h->needs_plt = 1;
15602 else if (may_need_local_target_p)
15603 /* If this reloc is in a read-only section, we might
15604 need a copy reloc. We can't check reliably at this
15605 stage whether the section is read-only, as input
15606 sections have not yet been mapped to output sections.
15607 Tentatively set the flag for now, and correct in
15608 adjust_dynamic_symbol. */
15609 h->non_got_ref = 1;
15612 if (may_need_local_target_p
15613 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15615 union gotplt_union *root_plt;
15616 struct arm_plt_info *arm_plt;
15617 struct arm_local_iplt_info *local_iplt;
15619 if (h != NULL)
15621 root_plt = &h->plt;
15622 arm_plt = &eh->plt;
15624 else
15626 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15627 if (local_iplt == NULL)
15628 return FALSE;
15629 root_plt = &local_iplt->root;
15630 arm_plt = &local_iplt->arm;
15633 /* If the symbol is a function that doesn't bind locally,
15634 this relocation will need a PLT entry. */
15635 if (root_plt->refcount != -1)
15636 root_plt->refcount += 1;
15638 if (!call_reloc_p)
15639 arm_plt->noncall_refcount++;
15641 /* It's too early to use htab->use_blx here, so we have to
15642 record possible blx references separately from
15643 relocs that definitely need a thumb stub. */
15645 if (r_type == R_ARM_THM_CALL)
15646 arm_plt->maybe_thumb_refcount += 1;
15648 if (r_type == R_ARM_THM_JUMP24
15649 || r_type == R_ARM_THM_JUMP19)
15650 arm_plt->thumb_refcount += 1;
15653 if (may_become_dynamic_p)
15655 struct elf_dyn_relocs *p, **head;
15657 /* Create a reloc section in dynobj. */
15658 if (sreloc == NULL)
15660 sreloc = _bfd_elf_make_dynamic_reloc_section
15661 (sec, dynobj, 2, abfd, ! htab->use_rel);
15663 if (sreloc == NULL)
15664 return FALSE;
15666 /* BPABI objects never have dynamic relocations mapped. */
15667 if (htab->symbian_p)
15669 flagword flags;
15671 flags = bfd_get_section_flags (dynobj, sreloc);
15672 flags &= ~(SEC_LOAD | SEC_ALLOC);
15673 bfd_set_section_flags (dynobj, sreloc, flags);
15677 /* If this is a global symbol, count the number of
15678 relocations we need for this symbol. */
15679 if (h != NULL)
15680 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15681 else
15683 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15684 if (head == NULL)
15685 return FALSE;
15688 p = *head;
15689 if (p == NULL || p->sec != sec)
15691 bfd_size_type amt = sizeof *p;
15693 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15694 if (p == NULL)
15695 return FALSE;
15696 p->next = *head;
15697 *head = p;
15698 p->sec = sec;
15699 p->count = 0;
15700 p->pc_count = 0;
15703 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15704 p->pc_count += 1;
15705 p->count += 1;
15706 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15707 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15708 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15709 that will become rofixup. */
15710 /* This is due to the fact that we suppose all will become rofixup. */
15711 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15712 _bfd_error_handler
15713 (_("FDPIC does not yet support %s relocation"
15714 " to become dynamic for executable"),
15715 elf32_arm_howto_table_1[r_type].name);
15716 abort();
15721 return TRUE;
15724 static void
15725 elf32_arm_update_relocs (asection *o,
15726 struct bfd_elf_section_reloc_data *reldata)
15728 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15729 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15730 const struct elf_backend_data *bed;
15731 _arm_elf_section_data *eado;
15732 struct bfd_link_order *p;
15733 bfd_byte *erela_head, *erela;
15734 Elf_Internal_Rela *irela_head, *irela;
15735 Elf_Internal_Shdr *rel_hdr;
15736 bfd *abfd;
15737 unsigned int count;
15739 eado = get_arm_elf_section_data (o);
15741 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15742 return;
15744 abfd = o->owner;
15745 bed = get_elf_backend_data (abfd);
15746 rel_hdr = reldata->hdr;
15748 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15750 swap_in = bed->s->swap_reloc_in;
15751 swap_out = bed->s->swap_reloc_out;
15753 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15755 swap_in = bed->s->swap_reloca_in;
15756 swap_out = bed->s->swap_reloca_out;
15758 else
15759 abort ();
15761 erela_head = rel_hdr->contents;
15762 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15763 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15765 erela = erela_head;
15766 irela = irela_head;
15767 count = 0;
15769 for (p = o->map_head.link_order; p; p = p->next)
15771 if (p->type == bfd_section_reloc_link_order
15772 || p->type == bfd_symbol_reloc_link_order)
15774 (*swap_in) (abfd, erela, irela);
15775 erela += rel_hdr->sh_entsize;
15776 irela++;
15777 count++;
15779 else if (p->type == bfd_indirect_link_order)
15781 struct bfd_elf_section_reloc_data *input_reldata;
15782 arm_unwind_table_edit *edit_list, *edit_tail;
15783 _arm_elf_section_data *eadi;
15784 bfd_size_type j;
15785 bfd_vma offset;
15786 asection *i;
15788 i = p->u.indirect.section;
15790 eadi = get_arm_elf_section_data (i);
15791 edit_list = eadi->u.exidx.unwind_edit_list;
15792 edit_tail = eadi->u.exidx.unwind_edit_tail;
15793 offset = o->vma + i->output_offset;
15795 if (eadi->elf.rel.hdr &&
15796 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15797 input_reldata = &eadi->elf.rel;
15798 else if (eadi->elf.rela.hdr &&
15799 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15800 input_reldata = &eadi->elf.rela;
15801 else
15802 abort ();
15804 if (edit_list)
15806 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15808 arm_unwind_table_edit *edit_node, *edit_next;
15809 bfd_vma bias;
15810 bfd_vma reloc_index;
15812 (*swap_in) (abfd, erela, irela);
15813 reloc_index = (irela->r_offset - offset) / 8;
15815 bias = 0;
15816 edit_node = edit_list;
15817 for (edit_next = edit_list;
15818 edit_next && edit_next->index <= reloc_index;
15819 edit_next = edit_node->next)
15821 bias++;
15822 edit_node = edit_next;
15825 if (edit_node->type != DELETE_EXIDX_ENTRY
15826 || edit_node->index != reloc_index)
15828 irela->r_offset -= bias * 8;
15829 irela++;
15830 count++;
15833 erela += rel_hdr->sh_entsize;
15836 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15838 /* New relocation entity. */
15839 asection *text_sec = edit_tail->linked_section;
15840 asection *text_out = text_sec->output_section;
15841 bfd_vma exidx_offset = offset + i->size - 8;
15843 irela->r_addend = 0;
15844 irela->r_offset = exidx_offset;
15845 irela->r_info = ELF32_R_INFO
15846 (text_out->target_index, R_ARM_PREL31);
15847 irela++;
15848 count++;
15851 else
15853 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15855 (*swap_in) (abfd, erela, irela);
15856 erela += rel_hdr->sh_entsize;
15857 irela++;
15860 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15865 reldata->count = count;
15866 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15868 erela = erela_head;
15869 irela = irela_head;
15870 while (count > 0)
15872 (*swap_out) (abfd, irela, erela);
15873 erela += rel_hdr->sh_entsize;
15874 irela++;
15875 count--;
15878 free (irela_head);
15880 /* Hashes are no longer valid. */
15881 free (reldata->hashes);
15882 reldata->hashes = NULL;
15885 /* Unwinding tables are not referenced directly. This pass marks them as
15886 required if the corresponding code section is marked. Similarly, ARMv8-M
15887 secure entry functions can only be referenced by SG veneers which are
15888 created after the GC process. They need to be marked in case they reside in
15889 their own section (as would be the case if code was compiled with
15890 -ffunction-sections). */
15892 static bfd_boolean
15893 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15894 elf_gc_mark_hook_fn gc_mark_hook)
15896 bfd *sub;
15897 Elf_Internal_Shdr **elf_shdrp;
15898 asection *cmse_sec;
15899 obj_attribute *out_attr;
15900 Elf_Internal_Shdr *symtab_hdr;
15901 unsigned i, sym_count, ext_start;
15902 const struct elf_backend_data *bed;
15903 struct elf_link_hash_entry **sym_hashes;
15904 struct elf32_arm_link_hash_entry *cmse_hash;
15905 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15907 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15909 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15910 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15911 && out_attr[Tag_CPU_arch_profile].i == 'M';
15913 /* Marking EH data may cause additional code sections to be marked,
15914 requiring multiple passes. */
15915 again = TRUE;
15916 while (again)
15918 again = FALSE;
15919 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15921 asection *o;
15923 if (! is_arm_elf (sub))
15924 continue;
15926 elf_shdrp = elf_elfsections (sub);
15927 for (o = sub->sections; o != NULL; o = o->next)
15929 Elf_Internal_Shdr *hdr;
15931 hdr = &elf_section_data (o)->this_hdr;
15932 if (hdr->sh_type == SHT_ARM_EXIDX
15933 && hdr->sh_link
15934 && hdr->sh_link < elf_numsections (sub)
15935 && !o->gc_mark
15936 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15938 again = TRUE;
15939 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15940 return FALSE;
15944 /* Mark section holding ARMv8-M secure entry functions. We mark all
15945 of them so no need for a second browsing. */
15946 if (is_v8m && first_bfd_browse)
15948 sym_hashes = elf_sym_hashes (sub);
15949 bed = get_elf_backend_data (sub);
15950 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15951 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15952 ext_start = symtab_hdr->sh_info;
15954 /* Scan symbols. */
15955 for (i = ext_start; i < sym_count; i++)
15957 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15959 /* Assume it is a special symbol. If not, cmse_scan will
15960 warn about it and user can do something about it. */
15961 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15963 cmse_sec = cmse_hash->root.root.u.def.section;
15964 if (!cmse_sec->gc_mark
15965 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15966 return FALSE;
15971 first_bfd_browse = FALSE;
15974 return TRUE;
15977 /* Treat mapping symbols as special target symbols. */
15979 static bfd_boolean
15980 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15982 return bfd_is_arm_special_symbol_name (sym->name,
15983 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15986 /* This is a copy of elf_find_function() from elf.c except that
15987 ARM mapping symbols are ignored when looking for function names
15988 and STT_ARM_TFUNC is considered to a function type. */
15990 static bfd_boolean
15991 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15992 asymbol ** symbols,
15993 asection * section,
15994 bfd_vma offset,
15995 const char ** filename_ptr,
15996 const char ** functionname_ptr)
15998 const char * filename = NULL;
15999 asymbol * func = NULL;
16000 bfd_vma low_func = 0;
16001 asymbol ** p;
16003 for (p = symbols; *p != NULL; p++)
16005 elf_symbol_type *q;
16007 q = (elf_symbol_type *) *p;
16009 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
16011 default:
16012 break;
16013 case STT_FILE:
16014 filename = bfd_asymbol_name (&q->symbol);
16015 break;
16016 case STT_FUNC:
16017 case STT_ARM_TFUNC:
16018 case STT_NOTYPE:
16019 /* Skip mapping symbols. */
16020 if ((q->symbol.flags & BSF_LOCAL)
16021 && bfd_is_arm_special_symbol_name (q->symbol.name,
16022 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16023 continue;
16024 /* Fall through. */
16025 if (bfd_get_section (&q->symbol) == section
16026 && q->symbol.value >= low_func
16027 && q->symbol.value <= offset)
16029 func = (asymbol *) q;
16030 low_func = q->symbol.value;
16032 break;
16036 if (func == NULL)
16037 return FALSE;
16039 if (filename_ptr)
16040 *filename_ptr = filename;
16041 if (functionname_ptr)
16042 *functionname_ptr = bfd_asymbol_name (func);
16044 return TRUE;
16048 /* Find the nearest line to a particular section and offset, for error
16049 reporting. This code is a duplicate of the code in elf.c, except
16050 that it uses arm_elf_find_function. */
16052 static bfd_boolean
16053 elf32_arm_find_nearest_line (bfd * abfd,
16054 asymbol ** symbols,
16055 asection * section,
16056 bfd_vma offset,
16057 const char ** filename_ptr,
16058 const char ** functionname_ptr,
16059 unsigned int * line_ptr,
16060 unsigned int * discriminator_ptr)
16062 bfd_boolean found = FALSE;
16064 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
16065 filename_ptr, functionname_ptr,
16066 line_ptr, discriminator_ptr,
16067 dwarf_debug_sections, 0,
16068 & elf_tdata (abfd)->dwarf2_find_line_info))
16070 if (!*functionname_ptr)
16071 arm_elf_find_function (abfd, symbols, section, offset,
16072 *filename_ptr ? NULL : filename_ptr,
16073 functionname_ptr);
16075 return TRUE;
16078 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
16079 uses DWARF1. */
16081 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
16082 & found, filename_ptr,
16083 functionname_ptr, line_ptr,
16084 & elf_tdata (abfd)->line_info))
16085 return FALSE;
16087 if (found && (*functionname_ptr || *line_ptr))
16088 return TRUE;
16090 if (symbols == NULL)
16091 return FALSE;
16093 if (! arm_elf_find_function (abfd, symbols, section, offset,
16094 filename_ptr, functionname_ptr))
16095 return FALSE;
16097 *line_ptr = 0;
16098 return TRUE;
16101 static bfd_boolean
16102 elf32_arm_find_inliner_info (bfd * abfd,
16103 const char ** filename_ptr,
16104 const char ** functionname_ptr,
16105 unsigned int * line_ptr)
16107 bfd_boolean found;
16108 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16109 functionname_ptr, line_ptr,
16110 & elf_tdata (abfd)->dwarf2_find_line_info);
16111 return found;
16114 /* Find dynamic relocs for H that apply to read-only sections. */
16116 static asection *
16117 readonly_dynrelocs (struct elf_link_hash_entry *h)
16119 struct elf_dyn_relocs *p;
16121 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
16123 asection *s = p->sec->output_section;
16125 if (s != NULL && (s->flags & SEC_READONLY) != 0)
16126 return p->sec;
16128 return NULL;
16131 /* Adjust a symbol defined by a dynamic object and referenced by a
16132 regular object. The current definition is in some section of the
16133 dynamic object, but we're not including those sections. We have to
16134 change the definition to something the rest of the link can
16135 understand. */
16137 static bfd_boolean
16138 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16139 struct elf_link_hash_entry * h)
16141 bfd * dynobj;
16142 asection *s, *srel;
16143 struct elf32_arm_link_hash_entry * eh;
16144 struct elf32_arm_link_hash_table *globals;
16146 globals = elf32_arm_hash_table (info);
16147 if (globals == NULL)
16148 return FALSE;
16150 dynobj = elf_hash_table (info)->dynobj;
16152 /* Make sure we know what is going on here. */
16153 BFD_ASSERT (dynobj != NULL
16154 && (h->needs_plt
16155 || h->type == STT_GNU_IFUNC
16156 || h->is_weakalias
16157 || (h->def_dynamic
16158 && h->ref_regular
16159 && !h->def_regular)));
16161 eh = (struct elf32_arm_link_hash_entry *) h;
16163 /* If this is a function, put it in the procedure linkage table. We
16164 will fill in the contents of the procedure linkage table later,
16165 when we know the address of the .got section. */
16166 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16168 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16169 symbol binds locally. */
16170 if (h->plt.refcount <= 0
16171 || (h->type != STT_GNU_IFUNC
16172 && (SYMBOL_CALLS_LOCAL (info, h)
16173 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16174 && h->root.type == bfd_link_hash_undefweak))))
16176 /* This case can occur if we saw a PLT32 reloc in an input
16177 file, but the symbol was never referred to by a dynamic
16178 object, or if all references were garbage collected. In
16179 such a case, we don't actually need to build a procedure
16180 linkage table, and we can just do a PC24 reloc instead. */
16181 h->plt.offset = (bfd_vma) -1;
16182 eh->plt.thumb_refcount = 0;
16183 eh->plt.maybe_thumb_refcount = 0;
16184 eh->plt.noncall_refcount = 0;
16185 h->needs_plt = 0;
16188 return TRUE;
16190 else
16192 /* It's possible that we incorrectly decided a .plt reloc was
16193 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16194 in check_relocs. We can't decide accurately between function
16195 and non-function syms in check-relocs; Objects loaded later in
16196 the link may change h->type. So fix it now. */
16197 h->plt.offset = (bfd_vma) -1;
16198 eh->plt.thumb_refcount = 0;
16199 eh->plt.maybe_thumb_refcount = 0;
16200 eh->plt.noncall_refcount = 0;
16203 /* If this is a weak symbol, and there is a real definition, the
16204 processor independent code will have arranged for us to see the
16205 real definition first, and we can just use the same value. */
16206 if (h->is_weakalias)
16208 struct elf_link_hash_entry *def = weakdef (h);
16209 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16210 h->root.u.def.section = def->root.u.def.section;
16211 h->root.u.def.value = def->root.u.def.value;
16212 return TRUE;
16215 /* If there are no non-GOT references, we do not need a copy
16216 relocation. */
16217 if (!h->non_got_ref)
16218 return TRUE;
16220 /* This is a reference to a symbol defined by a dynamic object which
16221 is not a function. */
16223 /* If we are creating a shared library, we must presume that the
16224 only references to the symbol are via the global offset table.
16225 For such cases we need not do anything here; the relocations will
16226 be handled correctly by relocate_section. Relocatable executables
16227 can reference data in shared objects directly, so we don't need to
16228 do anything here. */
16229 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16230 return TRUE;
16232 /* We must allocate the symbol in our .dynbss section, which will
16233 become part of the .bss section of the executable. There will be
16234 an entry for this symbol in the .dynsym section. The dynamic
16235 object will contain position independent code, so all references
16236 from the dynamic object to this symbol will go through the global
16237 offset table. The dynamic linker will use the .dynsym entry to
16238 determine the address it must put in the global offset table, so
16239 both the dynamic object and the regular object will refer to the
16240 same memory location for the variable. */
16241 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16242 linker to copy the initial value out of the dynamic object and into
16243 the runtime process image. We need to remember the offset into the
16244 .rel(a).bss section we are going to use. */
16245 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16247 s = globals->root.sdynrelro;
16248 srel = globals->root.sreldynrelro;
16250 else
16252 s = globals->root.sdynbss;
16253 srel = globals->root.srelbss;
16255 if (info->nocopyreloc == 0
16256 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16257 && h->size != 0)
16259 elf32_arm_allocate_dynrelocs (info, srel, 1);
16260 h->needs_copy = 1;
16263 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16266 /* Allocate space in .plt, .got and associated reloc sections for
16267 dynamic relocs. */
16269 static bfd_boolean
16270 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16272 struct bfd_link_info *info;
16273 struct elf32_arm_link_hash_table *htab;
16274 struct elf32_arm_link_hash_entry *eh;
16275 struct elf_dyn_relocs *p;
16277 if (h->root.type == bfd_link_hash_indirect)
16278 return TRUE;
16280 eh = (struct elf32_arm_link_hash_entry *) h;
16282 info = (struct bfd_link_info *) inf;
16283 htab = elf32_arm_hash_table (info);
16284 if (htab == NULL)
16285 return FALSE;
16287 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16288 && h->plt.refcount > 0)
16290 /* Make sure this symbol is output as a dynamic symbol.
16291 Undefined weak syms won't yet be marked as dynamic. */
16292 if (h->dynindx == -1 && !h->forced_local
16293 && h->root.type == bfd_link_hash_undefweak)
16295 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16296 return FALSE;
16299 /* If the call in the PLT entry binds locally, the associated
16300 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16301 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16302 than the .plt section. */
16303 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16305 eh->is_iplt = 1;
16306 if (eh->plt.noncall_refcount == 0
16307 && SYMBOL_REFERENCES_LOCAL (info, h))
16308 /* All non-call references can be resolved directly.
16309 This means that they can (and in some cases, must)
16310 resolve directly to the run-time target, rather than
16311 to the PLT. That in turns means that any .got entry
16312 would be equal to the .igot.plt entry, so there's
16313 no point having both. */
16314 h->got.refcount = 0;
16317 if (bfd_link_pic (info)
16318 || eh->is_iplt
16319 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16321 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16323 /* If this symbol is not defined in a regular file, and we are
16324 not generating a shared library, then set the symbol to this
16325 location in the .plt. This is required to make function
16326 pointers compare as equal between the normal executable and
16327 the shared library. */
16328 if (! bfd_link_pic (info)
16329 && !h->def_regular)
16331 h->root.u.def.section = htab->root.splt;
16332 h->root.u.def.value = h->plt.offset;
16334 /* Make sure the function is not marked as Thumb, in case
16335 it is the target of an ABS32 relocation, which will
16336 point to the PLT entry. */
16337 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16340 /* VxWorks executables have a second set of relocations for
16341 each PLT entry. They go in a separate relocation section,
16342 which is processed by the kernel loader. */
16343 if (htab->vxworks_p && !bfd_link_pic (info))
16345 /* There is a relocation for the initial PLT entry:
16346 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16347 if (h->plt.offset == htab->plt_header_size)
16348 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16350 /* There are two extra relocations for each subsequent
16351 PLT entry: an R_ARM_32 relocation for the GOT entry,
16352 and an R_ARM_32 relocation for the PLT entry. */
16353 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16356 else
16358 h->plt.offset = (bfd_vma) -1;
16359 h->needs_plt = 0;
16362 else
16364 h->plt.offset = (bfd_vma) -1;
16365 h->needs_plt = 0;
16368 eh = (struct elf32_arm_link_hash_entry *) h;
16369 eh->tlsdesc_got = (bfd_vma) -1;
16371 if (h->got.refcount > 0)
16373 asection *s;
16374 bfd_boolean dyn;
16375 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16376 int indx;
16378 /* Make sure this symbol is output as a dynamic symbol.
16379 Undefined weak syms won't yet be marked as dynamic. */
16380 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16381 && h->root.type == bfd_link_hash_undefweak)
16383 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16384 return FALSE;
16387 if (!htab->symbian_p)
16389 s = htab->root.sgot;
16390 h->got.offset = s->size;
16392 if (tls_type == GOT_UNKNOWN)
16393 abort ();
16395 if (tls_type == GOT_NORMAL)
16396 /* Non-TLS symbols need one GOT slot. */
16397 s->size += 4;
16398 else
16400 if (tls_type & GOT_TLS_GDESC)
16402 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16403 eh->tlsdesc_got
16404 = (htab->root.sgotplt->size
16405 - elf32_arm_compute_jump_table_size (htab));
16406 htab->root.sgotplt->size += 8;
16407 h->got.offset = (bfd_vma) -2;
16408 /* plt.got_offset needs to know there's a TLS_DESC
16409 reloc in the middle of .got.plt. */
16410 htab->num_tls_desc++;
16413 if (tls_type & GOT_TLS_GD)
16415 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16416 consecutive GOT slots. If the symbol is both GD
16417 and GDESC, got.offset may have been
16418 overwritten. */
16419 h->got.offset = s->size;
16420 s->size += 8;
16423 if (tls_type & GOT_TLS_IE)
16424 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16425 slot. */
16426 s->size += 4;
16429 dyn = htab->root.dynamic_sections_created;
16431 indx = 0;
16432 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16433 bfd_link_pic (info),
16435 && (!bfd_link_pic (info)
16436 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16437 indx = h->dynindx;
16439 if (tls_type != GOT_NORMAL
16440 && (bfd_link_pic (info) || indx != 0)
16441 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16442 || h->root.type != bfd_link_hash_undefweak))
16444 if (tls_type & GOT_TLS_IE)
16445 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16447 if (tls_type & GOT_TLS_GD)
16448 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16450 if (tls_type & GOT_TLS_GDESC)
16452 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16453 /* GDESC needs a trampoline to jump to. */
16454 htab->tls_trampoline = -1;
16457 /* Only GD needs it. GDESC just emits one relocation per
16458 2 entries. */
16459 if ((tls_type & GOT_TLS_GD) && indx != 0)
16460 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16462 else if (((indx != -1) || htab->fdpic_p)
16463 && !SYMBOL_REFERENCES_LOCAL (info, h))
16465 if (htab->root.dynamic_sections_created)
16466 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16467 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16469 else if (h->type == STT_GNU_IFUNC
16470 && eh->plt.noncall_refcount == 0)
16471 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16472 they all resolve dynamically instead. Reserve room for the
16473 GOT entry's R_ARM_IRELATIVE relocation. */
16474 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16475 else if (bfd_link_pic (info)
16476 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16477 || h->root.type != bfd_link_hash_undefweak))
16478 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16479 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16480 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16481 /* Reserve room for rofixup for FDPIC executable. */
16482 /* TLS relocs do not need space since they are completely
16483 resolved. */
16484 htab->srofixup->size += 4;
16487 else
16488 h->got.offset = (bfd_vma) -1;
16490 /* FDPIC support. */
16491 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16493 /* Symbol musn't be exported. */
16494 if (h->dynindx != -1)
16495 abort();
16497 /* We only allocate one function descriptor with its associated relocation. */
16498 if (eh->fdpic_cnts.funcdesc_offset == -1)
16500 asection *s = htab->root.sgot;
16502 eh->fdpic_cnts.funcdesc_offset = s->size;
16503 s->size += 8;
16504 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16505 if (bfd_link_pic(info))
16506 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16507 else
16508 htab->srofixup->size += 8;
16512 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16514 asection *s = htab->root.sgot;
16516 if (htab->root.dynamic_sections_created && h->dynindx == -1
16517 && !h->forced_local)
16518 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16519 return FALSE;
16521 if (h->dynindx == -1)
16523 /* We only allocate one function descriptor with its associated relocation. q */
16524 if (eh->fdpic_cnts.funcdesc_offset == -1)
16527 eh->fdpic_cnts.funcdesc_offset = s->size;
16528 s->size += 8;
16529 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16530 if (bfd_link_pic(info))
16531 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16532 else
16533 htab->srofixup->size += 8;
16537 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16538 R_ARM_RELATIVE/rofixup relocation on it. */
16539 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16540 s->size += 4;
16541 if (h->dynindx == -1 && !bfd_link_pic(info))
16542 htab->srofixup->size += 4;
16543 else
16544 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16547 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16549 if (htab->root.dynamic_sections_created && h->dynindx == -1
16550 && !h->forced_local)
16551 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16552 return FALSE;
16554 if (h->dynindx == -1)
16556 /* We only allocate one function descriptor with its associated relocation. */
16557 if (eh->fdpic_cnts.funcdesc_offset == -1)
16559 asection *s = htab->root.sgot;
16561 eh->fdpic_cnts.funcdesc_offset = s->size;
16562 s->size += 8;
16563 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16564 if (bfd_link_pic(info))
16565 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16566 else
16567 htab->srofixup->size += 8;
16570 if (h->dynindx == -1 && !bfd_link_pic(info))
16572 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16573 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16575 else
16577 /* Will need one dynamic reloc per reference. will be either
16578 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16579 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16580 eh->fdpic_cnts.funcdesc_cnt);
16584 /* Allocate stubs for exported Thumb functions on v4t. */
16585 if (!htab->use_blx && h->dynindx != -1
16586 && h->def_regular
16587 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16588 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16590 struct elf_link_hash_entry * th;
16591 struct bfd_link_hash_entry * bh;
16592 struct elf_link_hash_entry * myh;
16593 char name[1024];
16594 asection *s;
16595 bh = NULL;
16596 /* Create a new symbol to regist the real location of the function. */
16597 s = h->root.u.def.section;
16598 sprintf (name, "__real_%s", h->root.root.string);
16599 _bfd_generic_link_add_one_symbol (info, s->owner,
16600 name, BSF_GLOBAL, s,
16601 h->root.u.def.value,
16602 NULL, TRUE, FALSE, &bh);
16604 myh = (struct elf_link_hash_entry *) bh;
16605 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16606 myh->forced_local = 1;
16607 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16608 eh->export_glue = myh;
16609 th = record_arm_to_thumb_glue (info, h);
16610 /* Point the symbol at the stub. */
16611 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16612 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16613 h->root.u.def.section = th->root.u.def.section;
16614 h->root.u.def.value = th->root.u.def.value & ~1;
16617 if (eh->dyn_relocs == NULL)
16618 return TRUE;
16620 /* In the shared -Bsymbolic case, discard space allocated for
16621 dynamic pc-relative relocs against symbols which turn out to be
16622 defined in regular objects. For the normal shared case, discard
16623 space for pc-relative relocs that have become local due to symbol
16624 visibility changes. */
16626 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16628 /* Relocs that use pc_count are PC-relative forms, which will appear
16629 on something like ".long foo - ." or "movw REG, foo - .". We want
16630 calls to protected symbols to resolve directly to the function
16631 rather than going via the plt. If people want function pointer
16632 comparisons to work as expected then they should avoid writing
16633 assembly like ".long foo - .". */
16634 if (SYMBOL_CALLS_LOCAL (info, h))
16636 struct elf_dyn_relocs **pp;
16638 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16640 p->count -= p->pc_count;
16641 p->pc_count = 0;
16642 if (p->count == 0)
16643 *pp = p->next;
16644 else
16645 pp = &p->next;
16649 if (htab->vxworks_p)
16651 struct elf_dyn_relocs **pp;
16653 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16655 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16656 *pp = p->next;
16657 else
16658 pp = &p->next;
16662 /* Also discard relocs on undefined weak syms with non-default
16663 visibility. */
16664 if (eh->dyn_relocs != NULL
16665 && h->root.type == bfd_link_hash_undefweak)
16667 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16668 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16669 eh->dyn_relocs = NULL;
16671 /* Make sure undefined weak symbols are output as a dynamic
16672 symbol in PIEs. */
16673 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16674 && !h->forced_local)
16676 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16677 return FALSE;
16681 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16682 && h->root.type == bfd_link_hash_new)
16684 /* Output absolute symbols so that we can create relocations
16685 against them. For normal symbols we output a relocation
16686 against the section that contains them. */
16687 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16688 return FALSE;
16692 else
16694 /* For the non-shared case, discard space for relocs against
16695 symbols which turn out to need copy relocs or are not
16696 dynamic. */
16698 if (!h->non_got_ref
16699 && ((h->def_dynamic
16700 && !h->def_regular)
16701 || (htab->root.dynamic_sections_created
16702 && (h->root.type == bfd_link_hash_undefweak
16703 || h->root.type == bfd_link_hash_undefined))))
16705 /* Make sure this symbol is output as a dynamic symbol.
16706 Undefined weak syms won't yet be marked as dynamic. */
16707 if (h->dynindx == -1 && !h->forced_local
16708 && h->root.type == bfd_link_hash_undefweak)
16710 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16711 return FALSE;
16714 /* If that succeeded, we know we'll be keeping all the
16715 relocs. */
16716 if (h->dynindx != -1)
16717 goto keep;
16720 eh->dyn_relocs = NULL;
16722 keep: ;
16725 /* Finally, allocate space. */
16726 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16728 asection *sreloc = elf_section_data (p->sec)->sreloc;
16730 if (h->type == STT_GNU_IFUNC
16731 && eh->plt.noncall_refcount == 0
16732 && SYMBOL_REFERENCES_LOCAL (info, h))
16733 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16734 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16735 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16736 else if (htab->fdpic_p && !bfd_link_pic(info))
16737 htab->srofixup->size += 4 * p->count;
16738 else
16739 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16742 return TRUE;
16745 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16746 read-only sections. */
16748 static bfd_boolean
16749 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16751 asection *sec;
16753 if (h->root.type == bfd_link_hash_indirect)
16754 return TRUE;
16756 sec = readonly_dynrelocs (h);
16757 if (sec != NULL)
16759 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16761 info->flags |= DF_TEXTREL;
16762 info->callbacks->minfo
16763 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16764 sec->owner, h->root.root.string, sec);
16766 /* Not an error, just cut short the traversal. */
16767 return FALSE;
16770 return TRUE;
16773 void
16774 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16775 int byteswap_code)
16777 struct elf32_arm_link_hash_table *globals;
16779 globals = elf32_arm_hash_table (info);
16780 if (globals == NULL)
16781 return;
16783 globals->byteswap_code = byteswap_code;
16786 /* Set the sizes of the dynamic sections. */
16788 static bfd_boolean
16789 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16790 struct bfd_link_info * info)
16792 bfd * dynobj;
16793 asection * s;
16794 bfd_boolean plt;
16795 bfd_boolean relocs;
16796 bfd *ibfd;
16797 struct elf32_arm_link_hash_table *htab;
16799 htab = elf32_arm_hash_table (info);
16800 if (htab == NULL)
16801 return FALSE;
16803 dynobj = elf_hash_table (info)->dynobj;
16804 BFD_ASSERT (dynobj != NULL);
16805 check_use_blx (htab);
16807 if (elf_hash_table (info)->dynamic_sections_created)
16809 /* Set the contents of the .interp section to the interpreter. */
16810 if (bfd_link_executable (info) && !info->nointerp)
16812 s = bfd_get_linker_section (dynobj, ".interp");
16813 BFD_ASSERT (s != NULL);
16814 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16815 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16819 /* Set up .got offsets for local syms, and space for local dynamic
16820 relocs. */
16821 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16823 bfd_signed_vma *local_got;
16824 bfd_signed_vma *end_local_got;
16825 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16826 char *local_tls_type;
16827 bfd_vma *local_tlsdesc_gotent;
16828 bfd_size_type locsymcount;
16829 Elf_Internal_Shdr *symtab_hdr;
16830 asection *srel;
16831 bfd_boolean is_vxworks = htab->vxworks_p;
16832 unsigned int symndx;
16833 struct fdpic_local *local_fdpic_cnts;
16835 if (! is_arm_elf (ibfd))
16836 continue;
16838 for (s = ibfd->sections; s != NULL; s = s->next)
16840 struct elf_dyn_relocs *p;
16842 for (p = (struct elf_dyn_relocs *)
16843 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16845 if (!bfd_is_abs_section (p->sec)
16846 && bfd_is_abs_section (p->sec->output_section))
16848 /* Input section has been discarded, either because
16849 it is a copy of a linkonce section or due to
16850 linker script /DISCARD/, so we'll be discarding
16851 the relocs too. */
16853 else if (is_vxworks
16854 && strcmp (p->sec->output_section->name,
16855 ".tls_vars") == 0)
16857 /* Relocations in vxworks .tls_vars sections are
16858 handled specially by the loader. */
16860 else if (p->count != 0)
16862 srel = elf_section_data (p->sec)->sreloc;
16863 if (htab->fdpic_p && !bfd_link_pic(info))
16864 htab->srofixup->size += 4 * p->count;
16865 else
16866 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16867 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16868 info->flags |= DF_TEXTREL;
16873 local_got = elf_local_got_refcounts (ibfd);
16874 if (!local_got)
16875 continue;
16877 symtab_hdr = & elf_symtab_hdr (ibfd);
16878 locsymcount = symtab_hdr->sh_info;
16879 end_local_got = local_got + locsymcount;
16880 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16881 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16882 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16883 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16884 symndx = 0;
16885 s = htab->root.sgot;
16886 srel = htab->root.srelgot;
16887 for (; local_got < end_local_got;
16888 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16889 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16891 *local_tlsdesc_gotent = (bfd_vma) -1;
16892 local_iplt = *local_iplt_ptr;
16894 /* FDPIC support. */
16895 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16897 if (local_fdpic_cnts->funcdesc_offset == -1)
16899 local_fdpic_cnts->funcdesc_offset = s->size;
16900 s->size += 8;
16902 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16903 if (bfd_link_pic(info))
16904 elf32_arm_allocate_dynrelocs (info, srel, 1);
16905 else
16906 htab->srofixup->size += 8;
16910 if (local_fdpic_cnts->funcdesc_cnt > 0)
16912 if (local_fdpic_cnts->funcdesc_offset == -1)
16914 local_fdpic_cnts->funcdesc_offset = s->size;
16915 s->size += 8;
16917 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16918 if (bfd_link_pic(info))
16919 elf32_arm_allocate_dynrelocs (info, srel, 1);
16920 else
16921 htab->srofixup->size += 8;
16924 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16925 if (bfd_link_pic(info))
16926 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16927 else
16928 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16931 if (local_iplt != NULL)
16933 struct elf_dyn_relocs *p;
16935 if (local_iplt->root.refcount > 0)
16937 elf32_arm_allocate_plt_entry (info, TRUE,
16938 &local_iplt->root,
16939 &local_iplt->arm);
16940 if (local_iplt->arm.noncall_refcount == 0)
16941 /* All references to the PLT are calls, so all
16942 non-call references can resolve directly to the
16943 run-time target. This means that the .got entry
16944 would be the same as the .igot.plt entry, so there's
16945 no point creating both. */
16946 *local_got = 0;
16948 else
16950 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16951 local_iplt->root.offset = (bfd_vma) -1;
16954 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16956 asection *psrel;
16958 psrel = elf_section_data (p->sec)->sreloc;
16959 if (local_iplt->arm.noncall_refcount == 0)
16960 elf32_arm_allocate_irelocs (info, psrel, p->count);
16961 else
16962 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16965 if (*local_got > 0)
16967 Elf_Internal_Sym *isym;
16969 *local_got = s->size;
16970 if (*local_tls_type & GOT_TLS_GD)
16971 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16972 s->size += 8;
16973 if (*local_tls_type & GOT_TLS_GDESC)
16975 *local_tlsdesc_gotent = htab->root.sgotplt->size
16976 - elf32_arm_compute_jump_table_size (htab);
16977 htab->root.sgotplt->size += 8;
16978 *local_got = (bfd_vma) -2;
16979 /* plt.got_offset needs to know there's a TLS_DESC
16980 reloc in the middle of .got.plt. */
16981 htab->num_tls_desc++;
16983 if (*local_tls_type & GOT_TLS_IE)
16984 s->size += 4;
16986 if (*local_tls_type & GOT_NORMAL)
16988 /* If the symbol is both GD and GDESC, *local_got
16989 may have been overwritten. */
16990 *local_got = s->size;
16991 s->size += 4;
16994 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16995 if (isym == NULL)
16996 return FALSE;
16998 /* If all references to an STT_GNU_IFUNC PLT are calls,
16999 then all non-call references, including this GOT entry,
17000 resolve directly to the run-time target. */
17001 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
17002 && (local_iplt == NULL
17003 || local_iplt->arm.noncall_refcount == 0))
17004 elf32_arm_allocate_irelocs (info, srel, 1);
17005 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
17007 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
17008 elf32_arm_allocate_dynrelocs (info, srel, 1);
17009 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
17010 htab->srofixup->size += 4;
17012 if ((bfd_link_pic (info) || htab->fdpic_p)
17013 && *local_tls_type & GOT_TLS_GDESC)
17015 elf32_arm_allocate_dynrelocs (info,
17016 htab->root.srelplt, 1);
17017 htab->tls_trampoline = -1;
17021 else
17022 *local_got = (bfd_vma) -1;
17026 if (htab->tls_ldm_got.refcount > 0)
17028 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17029 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17030 htab->tls_ldm_got.offset = htab->root.sgot->size;
17031 htab->root.sgot->size += 8;
17032 if (bfd_link_pic (info))
17033 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
17035 else
17036 htab->tls_ldm_got.offset = -1;
17038 /* At the very end of the .rofixup section is a pointer to the GOT,
17039 reserve space for it. */
17040 if (htab->fdpic_p && htab->srofixup != NULL)
17041 htab->srofixup->size += 4;
17043 /* Allocate global sym .plt and .got entries, and space for global
17044 sym dynamic relocs. */
17045 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
17047 /* Here we rummage through the found bfds to collect glue information. */
17048 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
17050 if (! is_arm_elf (ibfd))
17051 continue;
17053 /* Initialise mapping tables for code/data. */
17054 bfd_elf32_arm_init_maps (ibfd);
17056 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17057 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17058 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17059 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17062 /* Allocate space for the glue sections now that we've sized them. */
17063 bfd_elf32_arm_allocate_interworking_sections (info);
17065 /* For every jump slot reserved in the sgotplt, reloc_count is
17066 incremented. However, when we reserve space for TLS descriptors,
17067 it's not incremented, so in order to compute the space reserved
17068 for them, it suffices to multiply the reloc count by the jump
17069 slot size. */
17070 if (htab->root.srelplt)
17071 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
17073 if (htab->tls_trampoline)
17075 if (htab->root.splt->size == 0)
17076 htab->root.splt->size += htab->plt_header_size;
17078 htab->tls_trampoline = htab->root.splt->size;
17079 htab->root.splt->size += htab->plt_entry_size;
17081 /* If we're not using lazy TLS relocations, don't generate the
17082 PLT and GOT entries they require. */
17083 if (!(info->flags & DF_BIND_NOW))
17085 htab->dt_tlsdesc_got = htab->root.sgot->size;
17086 htab->root.sgot->size += 4;
17088 htab->dt_tlsdesc_plt = htab->root.splt->size;
17089 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17093 /* The check_relocs and adjust_dynamic_symbol entry points have
17094 determined the sizes of the various dynamic sections. Allocate
17095 memory for them. */
17096 plt = FALSE;
17097 relocs = FALSE;
17098 for (s = dynobj->sections; s != NULL; s = s->next)
17100 const char * name;
17102 if ((s->flags & SEC_LINKER_CREATED) == 0)
17103 continue;
17105 /* It's OK to base decisions on the section name, because none
17106 of the dynobj section names depend upon the input files. */
17107 name = bfd_get_section_name (dynobj, s);
17109 if (s == htab->root.splt)
17111 /* Remember whether there is a PLT. */
17112 plt = s->size != 0;
17114 else if (CONST_STRNEQ (name, ".rel"))
17116 if (s->size != 0)
17118 /* Remember whether there are any reloc sections other
17119 than .rel(a).plt and .rela.plt.unloaded. */
17120 if (s != htab->root.srelplt && s != htab->srelplt2)
17121 relocs = TRUE;
17123 /* We use the reloc_count field as a counter if we need
17124 to copy relocs into the output file. */
17125 s->reloc_count = 0;
17128 else if (s != htab->root.sgot
17129 && s != htab->root.sgotplt
17130 && s != htab->root.iplt
17131 && s != htab->root.igotplt
17132 && s != htab->root.sdynbss
17133 && s != htab->root.sdynrelro
17134 && s != htab->srofixup)
17136 /* It's not one of our sections, so don't allocate space. */
17137 continue;
17140 if (s->size == 0)
17142 /* If we don't need this section, strip it from the
17143 output file. This is mostly to handle .rel(a).bss and
17144 .rel(a).plt. We must create both sections in
17145 create_dynamic_sections, because they must be created
17146 before the linker maps input sections to output
17147 sections. The linker does that before
17148 adjust_dynamic_symbol is called, and it is that
17149 function which decides whether anything needs to go
17150 into these sections. */
17151 s->flags |= SEC_EXCLUDE;
17152 continue;
17155 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17156 continue;
17158 /* Allocate memory for the section contents. */
17159 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17160 if (s->contents == NULL)
17161 return FALSE;
17164 if (elf_hash_table (info)->dynamic_sections_created)
17166 /* Add some entries to the .dynamic section. We fill in the
17167 values later, in elf32_arm_finish_dynamic_sections, but we
17168 must add the entries now so that we get the correct size for
17169 the .dynamic section. The DT_DEBUG entry is filled in by the
17170 dynamic linker and used by the debugger. */
17171 #define add_dynamic_entry(TAG, VAL) \
17172 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17174 if (bfd_link_executable (info))
17176 if (!add_dynamic_entry (DT_DEBUG, 0))
17177 return FALSE;
17180 if (plt)
17182 if ( !add_dynamic_entry (DT_PLTGOT, 0)
17183 || !add_dynamic_entry (DT_PLTRELSZ, 0)
17184 || !add_dynamic_entry (DT_PLTREL,
17185 htab->use_rel ? DT_REL : DT_RELA)
17186 || !add_dynamic_entry (DT_JMPREL, 0))
17187 return FALSE;
17189 if (htab->dt_tlsdesc_plt
17190 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
17191 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
17192 return FALSE;
17195 if (relocs)
17197 if (htab->use_rel)
17199 if (!add_dynamic_entry (DT_REL, 0)
17200 || !add_dynamic_entry (DT_RELSZ, 0)
17201 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
17202 return FALSE;
17204 else
17206 if (!add_dynamic_entry (DT_RELA, 0)
17207 || !add_dynamic_entry (DT_RELASZ, 0)
17208 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17209 return FALSE;
17213 /* If any dynamic relocs apply to a read-only section,
17214 then we need a DT_TEXTREL entry. */
17215 if ((info->flags & DF_TEXTREL) == 0)
17216 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17218 if ((info->flags & DF_TEXTREL) != 0)
17220 if (!add_dynamic_entry (DT_TEXTREL, 0))
17221 return FALSE;
17223 if (htab->vxworks_p
17224 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17225 return FALSE;
17227 #undef add_dynamic_entry
17229 return TRUE;
17232 /* Size sections even though they're not dynamic. We use it to setup
17233 _TLS_MODULE_BASE_, if needed. */
17235 static bfd_boolean
17236 elf32_arm_always_size_sections (bfd *output_bfd,
17237 struct bfd_link_info *info)
17239 asection *tls_sec;
17240 struct elf32_arm_link_hash_table *htab;
17242 htab = elf32_arm_hash_table (info);
17244 if (bfd_link_relocatable (info))
17245 return TRUE;
17247 tls_sec = elf_hash_table (info)->tls_sec;
17249 if (tls_sec)
17251 struct elf_link_hash_entry *tlsbase;
17253 tlsbase = elf_link_hash_lookup
17254 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17256 if (tlsbase)
17258 struct bfd_link_hash_entry *bh = NULL;
17259 const struct elf_backend_data *bed
17260 = get_elf_backend_data (output_bfd);
17262 if (!(_bfd_generic_link_add_one_symbol
17263 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17264 tls_sec, 0, NULL, FALSE,
17265 bed->collect, &bh)))
17266 return FALSE;
17268 tlsbase->type = STT_TLS;
17269 tlsbase = (struct elf_link_hash_entry *)bh;
17270 tlsbase->def_regular = 1;
17271 tlsbase->other = STV_HIDDEN;
17272 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17276 if (htab->fdpic_p && !bfd_link_relocatable (info)
17277 && !bfd_elf_stack_segment_size (output_bfd, info,
17278 "__stacksize", DEFAULT_STACK_SIZE))
17279 return FALSE;
17281 return TRUE;
17284 /* Finish up dynamic symbol handling. We set the contents of various
17285 dynamic sections here. */
17287 static bfd_boolean
17288 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17289 struct bfd_link_info * info,
17290 struct elf_link_hash_entry * h,
17291 Elf_Internal_Sym * sym)
17293 struct elf32_arm_link_hash_table *htab;
17294 struct elf32_arm_link_hash_entry *eh;
17296 htab = elf32_arm_hash_table (info);
17297 if (htab == NULL)
17298 return FALSE;
17300 eh = (struct elf32_arm_link_hash_entry *) h;
17302 if (h->plt.offset != (bfd_vma) -1)
17304 if (!eh->is_iplt)
17306 BFD_ASSERT (h->dynindx != -1);
17307 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17308 h->dynindx, 0))
17309 return FALSE;
17312 if (!h->def_regular)
17314 /* Mark the symbol as undefined, rather than as defined in
17315 the .plt section. */
17316 sym->st_shndx = SHN_UNDEF;
17317 /* If the symbol is weak we need to clear the value.
17318 Otherwise, the PLT entry would provide a definition for
17319 the symbol even if the symbol wasn't defined anywhere,
17320 and so the symbol would never be NULL. Leave the value if
17321 there were any relocations where pointer equality matters
17322 (this is a clue for the dynamic linker, to make function
17323 pointer comparisons work between an application and shared
17324 library). */
17325 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17326 sym->st_value = 0;
17328 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17330 /* At least one non-call relocation references this .iplt entry,
17331 so the .iplt entry is the function's canonical address. */
17332 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17333 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17334 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17335 (output_bfd, htab->root.iplt->output_section));
17336 sym->st_value = (h->plt.offset
17337 + htab->root.iplt->output_section->vma
17338 + htab->root.iplt->output_offset);
17342 if (h->needs_copy)
17344 asection * s;
17345 Elf_Internal_Rela rel;
17347 /* This symbol needs a copy reloc. Set it up. */
17348 BFD_ASSERT (h->dynindx != -1
17349 && (h->root.type == bfd_link_hash_defined
17350 || h->root.type == bfd_link_hash_defweak));
17352 rel.r_addend = 0;
17353 rel.r_offset = (h->root.u.def.value
17354 + h->root.u.def.section->output_section->vma
17355 + h->root.u.def.section->output_offset);
17356 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17357 if (h->root.u.def.section == htab->root.sdynrelro)
17358 s = htab->root.sreldynrelro;
17359 else
17360 s = htab->root.srelbss;
17361 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17364 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17365 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17366 it is relative to the ".got" section. */
17367 if (h == htab->root.hdynamic
17368 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17369 sym->st_shndx = SHN_ABS;
17371 return TRUE;
17374 static void
17375 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17376 void *contents,
17377 const unsigned long *template, unsigned count)
17379 unsigned ix;
17381 for (ix = 0; ix != count; ix++)
17383 unsigned long insn = template[ix];
17385 /* Emit mov pc,rx if bx is not permitted. */
17386 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17387 insn = (insn & 0xf000000f) | 0x01a0f000;
17388 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17392 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17393 other variants, NaCl needs this entry in a static executable's
17394 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17395 zero. For .iplt really only the last bundle is useful, and .iplt
17396 could have a shorter first entry, with each individual PLT entry's
17397 relative branch calculated differently so it targets the last
17398 bundle instead of the instruction before it (labelled .Lplt_tail
17399 above). But it's simpler to keep the size and layout of PLT0
17400 consistent with the dynamic case, at the cost of some dead code at
17401 the start of .iplt and the one dead store to the stack at the start
17402 of .Lplt_tail. */
17403 static void
17404 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17405 asection *plt, bfd_vma got_displacement)
17407 unsigned int i;
17409 put_arm_insn (htab, output_bfd,
17410 elf32_arm_nacl_plt0_entry[0]
17411 | arm_movw_immediate (got_displacement),
17412 plt->contents + 0);
17413 put_arm_insn (htab, output_bfd,
17414 elf32_arm_nacl_plt0_entry[1]
17415 | arm_movt_immediate (got_displacement),
17416 plt->contents + 4);
17418 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17419 put_arm_insn (htab, output_bfd,
17420 elf32_arm_nacl_plt0_entry[i],
17421 plt->contents + (i * 4));
17424 /* Finish up the dynamic sections. */
17426 static bfd_boolean
17427 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17429 bfd * dynobj;
17430 asection * sgot;
17431 asection * sdyn;
17432 struct elf32_arm_link_hash_table *htab;
17434 htab = elf32_arm_hash_table (info);
17435 if (htab == NULL)
17436 return FALSE;
17438 dynobj = elf_hash_table (info)->dynobj;
17440 sgot = htab->root.sgotplt;
17441 /* A broken linker script might have discarded the dynamic sections.
17442 Catch this here so that we do not seg-fault later on. */
17443 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17444 return FALSE;
17445 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17447 if (elf_hash_table (info)->dynamic_sections_created)
17449 asection *splt;
17450 Elf32_External_Dyn *dyncon, *dynconend;
17452 splt = htab->root.splt;
17453 BFD_ASSERT (splt != NULL && sdyn != NULL);
17454 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17456 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17457 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17459 for (; dyncon < dynconend; dyncon++)
17461 Elf_Internal_Dyn dyn;
17462 const char * name;
17463 asection * s;
17465 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17467 switch (dyn.d_tag)
17469 unsigned int type;
17471 default:
17472 if (htab->vxworks_p
17473 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17474 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17475 break;
17477 case DT_HASH:
17478 name = ".hash";
17479 goto get_vma_if_bpabi;
17480 case DT_STRTAB:
17481 name = ".dynstr";
17482 goto get_vma_if_bpabi;
17483 case DT_SYMTAB:
17484 name = ".dynsym";
17485 goto get_vma_if_bpabi;
17486 case DT_VERSYM:
17487 name = ".gnu.version";
17488 goto get_vma_if_bpabi;
17489 case DT_VERDEF:
17490 name = ".gnu.version_d";
17491 goto get_vma_if_bpabi;
17492 case DT_VERNEED:
17493 name = ".gnu.version_r";
17494 goto get_vma_if_bpabi;
17496 case DT_PLTGOT:
17497 name = htab->symbian_p ? ".got" : ".got.plt";
17498 goto get_vma;
17499 case DT_JMPREL:
17500 name = RELOC_SECTION (htab, ".plt");
17501 get_vma:
17502 s = bfd_get_linker_section (dynobj, name);
17503 if (s == NULL)
17505 _bfd_error_handler
17506 (_("could not find section %s"), name);
17507 bfd_set_error (bfd_error_invalid_operation);
17508 return FALSE;
17510 if (!htab->symbian_p)
17511 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17512 else
17513 /* In the BPABI, tags in the PT_DYNAMIC section point
17514 at the file offset, not the memory address, for the
17515 convenience of the post linker. */
17516 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17517 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17518 break;
17520 get_vma_if_bpabi:
17521 if (htab->symbian_p)
17522 goto get_vma;
17523 break;
17525 case DT_PLTRELSZ:
17526 s = htab->root.srelplt;
17527 BFD_ASSERT (s != NULL);
17528 dyn.d_un.d_val = s->size;
17529 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17530 break;
17532 case DT_RELSZ:
17533 case DT_RELASZ:
17534 case DT_REL:
17535 case DT_RELA:
17536 /* In the BPABI, the DT_REL tag must point at the file
17537 offset, not the VMA, of the first relocation
17538 section. So, we use code similar to that in
17539 elflink.c, but do not check for SHF_ALLOC on the
17540 relocation section, since relocation sections are
17541 never allocated under the BPABI. PLT relocs are also
17542 included. */
17543 if (htab->symbian_p)
17545 unsigned int i;
17546 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17547 ? SHT_REL : SHT_RELA);
17548 dyn.d_un.d_val = 0;
17549 for (i = 1; i < elf_numsections (output_bfd); i++)
17551 Elf_Internal_Shdr *hdr
17552 = elf_elfsections (output_bfd)[i];
17553 if (hdr->sh_type == type)
17555 if (dyn.d_tag == DT_RELSZ
17556 || dyn.d_tag == DT_RELASZ)
17557 dyn.d_un.d_val += hdr->sh_size;
17558 else if ((ufile_ptr) hdr->sh_offset
17559 <= dyn.d_un.d_val - 1)
17560 dyn.d_un.d_val = hdr->sh_offset;
17563 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17565 break;
17567 case DT_TLSDESC_PLT:
17568 s = htab->root.splt;
17569 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17570 + htab->dt_tlsdesc_plt);
17571 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17572 break;
17574 case DT_TLSDESC_GOT:
17575 s = htab->root.sgot;
17576 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17577 + htab->dt_tlsdesc_got);
17578 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17579 break;
17581 /* Set the bottom bit of DT_INIT/FINI if the
17582 corresponding function is Thumb. */
17583 case DT_INIT:
17584 name = info->init_function;
17585 goto get_sym;
17586 case DT_FINI:
17587 name = info->fini_function;
17588 get_sym:
17589 /* If it wasn't set by elf_bfd_final_link
17590 then there is nothing to adjust. */
17591 if (dyn.d_un.d_val != 0)
17593 struct elf_link_hash_entry * eh;
17595 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17596 FALSE, FALSE, TRUE);
17597 if (eh != NULL
17598 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17599 == ST_BRANCH_TO_THUMB)
17601 dyn.d_un.d_val |= 1;
17602 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17605 break;
17609 /* Fill in the first entry in the procedure linkage table. */
17610 if (splt->size > 0 && htab->plt_header_size)
17612 const bfd_vma *plt0_entry;
17613 bfd_vma got_address, plt_address, got_displacement;
17615 /* Calculate the addresses of the GOT and PLT. */
17616 got_address = sgot->output_section->vma + sgot->output_offset;
17617 plt_address = splt->output_section->vma + splt->output_offset;
17619 if (htab->vxworks_p)
17621 /* The VxWorks GOT is relocated by the dynamic linker.
17622 Therefore, we must emit relocations rather than simply
17623 computing the values now. */
17624 Elf_Internal_Rela rel;
17626 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17627 put_arm_insn (htab, output_bfd, plt0_entry[0],
17628 splt->contents + 0);
17629 put_arm_insn (htab, output_bfd, plt0_entry[1],
17630 splt->contents + 4);
17631 put_arm_insn (htab, output_bfd, plt0_entry[2],
17632 splt->contents + 8);
17633 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17635 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17636 rel.r_offset = plt_address + 12;
17637 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17638 rel.r_addend = 0;
17639 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17640 htab->srelplt2->contents);
17642 else if (htab->nacl_p)
17643 arm_nacl_put_plt0 (htab, output_bfd, splt,
17644 got_address + 8 - (plt_address + 16));
17645 else if (using_thumb_only (htab))
17647 got_displacement = got_address - (plt_address + 12);
17649 plt0_entry = elf32_thumb2_plt0_entry;
17650 put_arm_insn (htab, output_bfd, plt0_entry[0],
17651 splt->contents + 0);
17652 put_arm_insn (htab, output_bfd, plt0_entry[1],
17653 splt->contents + 4);
17654 put_arm_insn (htab, output_bfd, plt0_entry[2],
17655 splt->contents + 8);
17657 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17659 else
17661 got_displacement = got_address - (plt_address + 16);
17663 plt0_entry = elf32_arm_plt0_entry;
17664 put_arm_insn (htab, output_bfd, plt0_entry[0],
17665 splt->contents + 0);
17666 put_arm_insn (htab, output_bfd, plt0_entry[1],
17667 splt->contents + 4);
17668 put_arm_insn (htab, output_bfd, plt0_entry[2],
17669 splt->contents + 8);
17670 put_arm_insn (htab, output_bfd, plt0_entry[3],
17671 splt->contents + 12);
17673 #ifdef FOUR_WORD_PLT
17674 /* The displacement value goes in the otherwise-unused
17675 last word of the second entry. */
17676 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17677 #else
17678 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17679 #endif
17683 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17684 really seem like the right value. */
17685 if (splt->output_section->owner == output_bfd)
17686 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17688 if (htab->dt_tlsdesc_plt)
17690 bfd_vma got_address
17691 = sgot->output_section->vma + sgot->output_offset;
17692 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17693 + htab->root.sgot->output_offset);
17694 bfd_vma plt_address
17695 = splt->output_section->vma + splt->output_offset;
17697 arm_put_trampoline (htab, output_bfd,
17698 splt->contents + htab->dt_tlsdesc_plt,
17699 dl_tlsdesc_lazy_trampoline, 6);
17701 bfd_put_32 (output_bfd,
17702 gotplt_address + htab->dt_tlsdesc_got
17703 - (plt_address + htab->dt_tlsdesc_plt)
17704 - dl_tlsdesc_lazy_trampoline[6],
17705 splt->contents + htab->dt_tlsdesc_plt + 24);
17706 bfd_put_32 (output_bfd,
17707 got_address - (plt_address + htab->dt_tlsdesc_plt)
17708 - dl_tlsdesc_lazy_trampoline[7],
17709 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17712 if (htab->tls_trampoline)
17714 arm_put_trampoline (htab, output_bfd,
17715 splt->contents + htab->tls_trampoline,
17716 tls_trampoline, 3);
17717 #ifdef FOUR_WORD_PLT
17718 bfd_put_32 (output_bfd, 0x00000000,
17719 splt->contents + htab->tls_trampoline + 12);
17720 #endif
17723 if (htab->vxworks_p
17724 && !bfd_link_pic (info)
17725 && htab->root.splt->size > 0)
17727 /* Correct the .rel(a).plt.unloaded relocations. They will have
17728 incorrect symbol indexes. */
17729 int num_plts;
17730 unsigned char *p;
17732 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17733 / htab->plt_entry_size);
17734 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17736 for (; num_plts; num_plts--)
17738 Elf_Internal_Rela rel;
17740 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17741 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17742 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17743 p += RELOC_SIZE (htab);
17745 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17746 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17747 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17748 p += RELOC_SIZE (htab);
17753 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17754 /* NaCl uses a special first entry in .iplt too. */
17755 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17757 /* Fill in the first three entries in the global offset table. */
17758 if (sgot)
17760 if (sgot->size > 0)
17762 if (sdyn == NULL)
17763 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17764 else
17765 bfd_put_32 (output_bfd,
17766 sdyn->output_section->vma + sdyn->output_offset,
17767 sgot->contents);
17768 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17769 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17772 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17775 /* At the very end of the .rofixup section is a pointer to the GOT. */
17776 if (htab->fdpic_p && htab->srofixup != NULL)
17778 struct elf_link_hash_entry *hgot = htab->root.hgot;
17780 bfd_vma got_value = hgot->root.u.def.value
17781 + hgot->root.u.def.section->output_section->vma
17782 + hgot->root.u.def.section->output_offset;
17784 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17786 /* Make sure we allocated and generated the same number of fixups. */
17787 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17790 return TRUE;
17793 static void
17794 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17796 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17797 struct elf32_arm_link_hash_table *globals;
17798 struct elf_segment_map *m;
17800 i_ehdrp = elf_elfheader (abfd);
17802 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17803 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17804 else
17805 _bfd_elf_post_process_headers (abfd, link_info);
17806 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17808 if (link_info)
17810 globals = elf32_arm_hash_table (link_info);
17811 if (globals != NULL && globals->byteswap_code)
17812 i_ehdrp->e_flags |= EF_ARM_BE8;
17814 if (globals->fdpic_p)
17815 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17818 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17819 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17821 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17822 if (abi == AEABI_VFP_args_vfp)
17823 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17824 else
17825 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17828 /* Scan segment to set p_flags attribute if it contains only sections with
17829 SHF_ARM_PURECODE flag. */
17830 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17832 unsigned int j;
17834 if (m->count == 0)
17835 continue;
17836 for (j = 0; j < m->count; j++)
17838 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17839 break;
17841 if (j == m->count)
17843 m->p_flags = PF_X;
17844 m->p_flags_valid = 1;
17849 static enum elf_reloc_type_class
17850 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17851 const asection *rel_sec ATTRIBUTE_UNUSED,
17852 const Elf_Internal_Rela *rela)
17854 switch ((int) ELF32_R_TYPE (rela->r_info))
17856 case R_ARM_RELATIVE:
17857 return reloc_class_relative;
17858 case R_ARM_JUMP_SLOT:
17859 return reloc_class_plt;
17860 case R_ARM_COPY:
17861 return reloc_class_copy;
17862 case R_ARM_IRELATIVE:
17863 return reloc_class_ifunc;
17864 default:
17865 return reloc_class_normal;
17869 static void
17870 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17872 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17875 /* Return TRUE if this is an unwinding table entry. */
17877 static bfd_boolean
17878 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17880 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17881 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17885 /* Set the type and flags for an ARM section. We do this by
17886 the section name, which is a hack, but ought to work. */
17888 static bfd_boolean
17889 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17891 const char * name;
17893 name = bfd_get_section_name (abfd, sec);
17895 if (is_arm_elf_unwind_section_name (abfd, name))
17897 hdr->sh_type = SHT_ARM_EXIDX;
17898 hdr->sh_flags |= SHF_LINK_ORDER;
17901 if (sec->flags & SEC_ELF_PURECODE)
17902 hdr->sh_flags |= SHF_ARM_PURECODE;
17904 return TRUE;
17907 /* Handle an ARM specific section when reading an object file. This is
17908 called when bfd_section_from_shdr finds a section with an unknown
17909 type. */
17911 static bfd_boolean
17912 elf32_arm_section_from_shdr (bfd *abfd,
17913 Elf_Internal_Shdr * hdr,
17914 const char *name,
17915 int shindex)
17917 /* There ought to be a place to keep ELF backend specific flags, but
17918 at the moment there isn't one. We just keep track of the
17919 sections by their name, instead. Fortunately, the ABI gives
17920 names for all the ARM specific sections, so we will probably get
17921 away with this. */
17922 switch (hdr->sh_type)
17924 case SHT_ARM_EXIDX:
17925 case SHT_ARM_PREEMPTMAP:
17926 case SHT_ARM_ATTRIBUTES:
17927 break;
17929 default:
17930 return FALSE;
17933 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17934 return FALSE;
17936 return TRUE;
17939 static _arm_elf_section_data *
17940 get_arm_elf_section_data (asection * sec)
17942 if (sec && sec->owner && is_arm_elf (sec->owner))
17943 return elf32_arm_section_data (sec);
17944 else
17945 return NULL;
17948 typedef struct
17950 void *flaginfo;
17951 struct bfd_link_info *info;
17952 asection *sec;
17953 int sec_shndx;
17954 int (*func) (void *, const char *, Elf_Internal_Sym *,
17955 asection *, struct elf_link_hash_entry *);
17956 } output_arch_syminfo;
17958 enum map_symbol_type
17960 ARM_MAP_ARM,
17961 ARM_MAP_THUMB,
17962 ARM_MAP_DATA
17966 /* Output a single mapping symbol. */
17968 static bfd_boolean
17969 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17970 enum map_symbol_type type,
17971 bfd_vma offset)
17973 static const char *names[3] = {"$a", "$t", "$d"};
17974 Elf_Internal_Sym sym;
17976 sym.st_value = osi->sec->output_section->vma
17977 + osi->sec->output_offset
17978 + offset;
17979 sym.st_size = 0;
17980 sym.st_other = 0;
17981 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17982 sym.st_shndx = osi->sec_shndx;
17983 sym.st_target_internal = 0;
17984 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17985 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17988 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17989 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17991 static bfd_boolean
17992 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17993 bfd_boolean is_iplt_entry_p,
17994 union gotplt_union *root_plt,
17995 struct arm_plt_info *arm_plt)
17997 struct elf32_arm_link_hash_table *htab;
17998 bfd_vma addr, plt_header_size;
18000 if (root_plt->offset == (bfd_vma) -1)
18001 return TRUE;
18003 htab = elf32_arm_hash_table (osi->info);
18004 if (htab == NULL)
18005 return FALSE;
18007 if (is_iplt_entry_p)
18009 osi->sec = htab->root.iplt;
18010 plt_header_size = 0;
18012 else
18014 osi->sec = htab->root.splt;
18015 plt_header_size = htab->plt_header_size;
18017 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
18018 (osi->info->output_bfd, osi->sec->output_section));
18020 addr = root_plt->offset & -2;
18021 if (htab->symbian_p)
18023 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18024 return FALSE;
18025 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
18026 return FALSE;
18028 else if (htab->vxworks_p)
18030 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18031 return FALSE;
18032 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
18033 return FALSE;
18034 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
18035 return FALSE;
18036 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
18037 return FALSE;
18039 else if (htab->nacl_p)
18041 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18042 return FALSE;
18044 else if (htab->fdpic_p)
18046 enum map_symbol_type type = using_thumb_only(htab)
18047 ? ARM_MAP_THUMB
18048 : ARM_MAP_ARM;
18050 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
18051 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18052 return FALSE;
18053 if (!elf32_arm_output_map_sym (osi, type, addr))
18054 return FALSE;
18055 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
18056 return FALSE;
18057 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
18058 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
18059 return FALSE;
18061 else if (using_thumb_only (htab))
18063 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
18064 return FALSE;
18066 else
18068 bfd_boolean thumb_stub_p;
18070 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18071 if (thumb_stub_p)
18073 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18074 return FALSE;
18076 #ifdef FOUR_WORD_PLT
18077 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18078 return FALSE;
18079 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18080 return FALSE;
18081 #else
18082 /* A three-word PLT with no Thumb thunk contains only Arm code,
18083 so only need to output a mapping symbol for the first PLT entry and
18084 entries with thumb thunks. */
18085 if (thumb_stub_p || addr == plt_header_size)
18087 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18088 return FALSE;
18090 #endif
18093 return TRUE;
18096 /* Output mapping symbols for PLT entries associated with H. */
18098 static bfd_boolean
18099 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18101 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18102 struct elf32_arm_link_hash_entry *eh;
18104 if (h->root.type == bfd_link_hash_indirect)
18105 return TRUE;
18107 if (h->root.type == bfd_link_hash_warning)
18108 /* When warning symbols are created, they **replace** the "real"
18109 entry in the hash table, thus we never get to see the real
18110 symbol in a hash traversal. So look at it now. */
18111 h = (struct elf_link_hash_entry *) h->root.u.i.link;
18113 eh = (struct elf32_arm_link_hash_entry *) h;
18114 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18115 &h->plt, &eh->plt);
18118 /* Bind a veneered symbol to its veneer identified by its hash entry
18119 STUB_ENTRY. The veneered location thus loose its symbol. */
18121 static void
18122 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18124 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18126 BFD_ASSERT (hash);
18127 hash->root.root.u.def.section = stub_entry->stub_sec;
18128 hash->root.root.u.def.value = stub_entry->stub_offset;
18129 hash->root.size = stub_entry->stub_size;
18132 /* Output a single local symbol for a generated stub. */
18134 static bfd_boolean
18135 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18136 bfd_vma offset, bfd_vma size)
18138 Elf_Internal_Sym sym;
18140 sym.st_value = osi->sec->output_section->vma
18141 + osi->sec->output_offset
18142 + offset;
18143 sym.st_size = size;
18144 sym.st_other = 0;
18145 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18146 sym.st_shndx = osi->sec_shndx;
18147 sym.st_target_internal = 0;
18148 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18151 static bfd_boolean
18152 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18153 void * in_arg)
18155 struct elf32_arm_stub_hash_entry *stub_entry;
18156 asection *stub_sec;
18157 bfd_vma addr;
18158 char *stub_name;
18159 output_arch_syminfo *osi;
18160 const insn_sequence *template_sequence;
18161 enum stub_insn_type prev_type;
18162 int size;
18163 int i;
18164 enum map_symbol_type sym_type;
18166 /* Massage our args to the form they really have. */
18167 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18168 osi = (output_arch_syminfo *) in_arg;
18170 stub_sec = stub_entry->stub_sec;
18172 /* Ensure this stub is attached to the current section being
18173 processed. */
18174 if (stub_sec != osi->sec)
18175 return TRUE;
18177 addr = (bfd_vma) stub_entry->stub_offset;
18178 template_sequence = stub_entry->stub_template;
18180 if (arm_stub_sym_claimed (stub_entry->stub_type))
18181 arm_stub_claim_sym (stub_entry);
18182 else
18184 stub_name = stub_entry->output_name;
18185 switch (template_sequence[0].type)
18187 case ARM_TYPE:
18188 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18189 stub_entry->stub_size))
18190 return FALSE;
18191 break;
18192 case THUMB16_TYPE:
18193 case THUMB32_TYPE:
18194 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18195 stub_entry->stub_size))
18196 return FALSE;
18197 break;
18198 default:
18199 BFD_FAIL ();
18200 return 0;
18204 prev_type = DATA_TYPE;
18205 size = 0;
18206 for (i = 0; i < stub_entry->stub_template_size; i++)
18208 switch (template_sequence[i].type)
18210 case ARM_TYPE:
18211 sym_type = ARM_MAP_ARM;
18212 break;
18214 case THUMB16_TYPE:
18215 case THUMB32_TYPE:
18216 sym_type = ARM_MAP_THUMB;
18217 break;
18219 case DATA_TYPE:
18220 sym_type = ARM_MAP_DATA;
18221 break;
18223 default:
18224 BFD_FAIL ();
18225 return FALSE;
18228 if (template_sequence[i].type != prev_type)
18230 prev_type = template_sequence[i].type;
18231 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18232 return FALSE;
18235 switch (template_sequence[i].type)
18237 case ARM_TYPE:
18238 case THUMB32_TYPE:
18239 size += 4;
18240 break;
18242 case THUMB16_TYPE:
18243 size += 2;
18244 break;
18246 case DATA_TYPE:
18247 size += 4;
18248 break;
18250 default:
18251 BFD_FAIL ();
18252 return FALSE;
18256 return TRUE;
18259 /* Output mapping symbols for linker generated sections,
18260 and for those data-only sections that do not have a
18261 $d. */
18263 static bfd_boolean
18264 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18265 struct bfd_link_info *info,
18266 void *flaginfo,
18267 int (*func) (void *, const char *,
18268 Elf_Internal_Sym *,
18269 asection *,
18270 struct elf_link_hash_entry *))
18272 output_arch_syminfo osi;
18273 struct elf32_arm_link_hash_table *htab;
18274 bfd_vma offset;
18275 bfd_size_type size;
18276 bfd *input_bfd;
18278 htab = elf32_arm_hash_table (info);
18279 if (htab == NULL)
18280 return FALSE;
18282 check_use_blx (htab);
18284 osi.flaginfo = flaginfo;
18285 osi.info = info;
18286 osi.func = func;
18288 /* Add a $d mapping symbol to data-only sections that
18289 don't have any mapping symbol. This may result in (harmless) redundant
18290 mapping symbols. */
18291 for (input_bfd = info->input_bfds;
18292 input_bfd != NULL;
18293 input_bfd = input_bfd->link.next)
18295 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18296 for (osi.sec = input_bfd->sections;
18297 osi.sec != NULL;
18298 osi.sec = osi.sec->next)
18300 if (osi.sec->output_section != NULL
18301 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18302 != 0)
18303 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18304 == SEC_HAS_CONTENTS
18305 && get_arm_elf_section_data (osi.sec) != NULL
18306 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18307 && osi.sec->size > 0
18308 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18310 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18311 (output_bfd, osi.sec->output_section);
18312 if (osi.sec_shndx != (int)SHN_BAD)
18313 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18318 /* ARM->Thumb glue. */
18319 if (htab->arm_glue_size > 0)
18321 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18322 ARM2THUMB_GLUE_SECTION_NAME);
18324 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18325 (output_bfd, osi.sec->output_section);
18326 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18327 || htab->pic_veneer)
18328 size = ARM2THUMB_PIC_GLUE_SIZE;
18329 else if (htab->use_blx)
18330 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18331 else
18332 size = ARM2THUMB_STATIC_GLUE_SIZE;
18334 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18336 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18337 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18341 /* Thumb->ARM glue. */
18342 if (htab->thumb_glue_size > 0)
18344 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18345 THUMB2ARM_GLUE_SECTION_NAME);
18347 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18348 (output_bfd, osi.sec->output_section);
18349 size = THUMB2ARM_GLUE_SIZE;
18351 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18353 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18354 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18358 /* ARMv4 BX veneers. */
18359 if (htab->bx_glue_size > 0)
18361 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18362 ARM_BX_GLUE_SECTION_NAME);
18364 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18365 (output_bfd, osi.sec->output_section);
18367 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18370 /* Long calls stubs. */
18371 if (htab->stub_bfd && htab->stub_bfd->sections)
18373 asection* stub_sec;
18375 for (stub_sec = htab->stub_bfd->sections;
18376 stub_sec != NULL;
18377 stub_sec = stub_sec->next)
18379 /* Ignore non-stub sections. */
18380 if (!strstr (stub_sec->name, STUB_SUFFIX))
18381 continue;
18383 osi.sec = stub_sec;
18385 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18386 (output_bfd, osi.sec->output_section);
18388 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18392 /* Finally, output mapping symbols for the PLT. */
18393 if (htab->root.splt && htab->root.splt->size > 0)
18395 osi.sec = htab->root.splt;
18396 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18397 (output_bfd, osi.sec->output_section));
18399 /* Output mapping symbols for the plt header. SymbianOS does not have a
18400 plt header. */
18401 if (htab->vxworks_p)
18403 /* VxWorks shared libraries have no PLT header. */
18404 if (!bfd_link_pic (info))
18406 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18407 return FALSE;
18408 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18409 return FALSE;
18412 else if (htab->nacl_p)
18414 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18415 return FALSE;
18417 else if (using_thumb_only (htab) && !htab->fdpic_p)
18419 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18420 return FALSE;
18421 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18422 return FALSE;
18423 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18424 return FALSE;
18426 else if (!htab->symbian_p && !htab->fdpic_p)
18428 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18429 return FALSE;
18430 #ifndef FOUR_WORD_PLT
18431 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18432 return FALSE;
18433 #endif
18436 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18438 /* NaCl uses a special first entry in .iplt too. */
18439 osi.sec = htab->root.iplt;
18440 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18441 (output_bfd, osi.sec->output_section));
18442 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18443 return FALSE;
18445 if ((htab->root.splt && htab->root.splt->size > 0)
18446 || (htab->root.iplt && htab->root.iplt->size > 0))
18448 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18449 for (input_bfd = info->input_bfds;
18450 input_bfd != NULL;
18451 input_bfd = input_bfd->link.next)
18453 struct arm_local_iplt_info **local_iplt;
18454 unsigned int i, num_syms;
18456 local_iplt = elf32_arm_local_iplt (input_bfd);
18457 if (local_iplt != NULL)
18459 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18460 for (i = 0; i < num_syms; i++)
18461 if (local_iplt[i] != NULL
18462 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18463 &local_iplt[i]->root,
18464 &local_iplt[i]->arm))
18465 return FALSE;
18469 if (htab->dt_tlsdesc_plt != 0)
18471 /* Mapping symbols for the lazy tls trampoline. */
18472 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18473 return FALSE;
18475 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18476 htab->dt_tlsdesc_plt + 24))
18477 return FALSE;
18479 if (htab->tls_trampoline != 0)
18481 /* Mapping symbols for the tls trampoline. */
18482 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18483 return FALSE;
18484 #ifdef FOUR_WORD_PLT
18485 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18486 htab->tls_trampoline + 12))
18487 return FALSE;
18488 #endif
18491 return TRUE;
18494 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18495 the import library. All SYMCOUNT symbols of ABFD can be examined
18496 from their pointers in SYMS. Pointers of symbols to keep should be
18497 stored continuously at the beginning of that array.
18499 Returns the number of symbols to keep. */
18501 static unsigned int
18502 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18503 struct bfd_link_info *info,
18504 asymbol **syms, long symcount)
18506 size_t maxnamelen;
18507 char *cmse_name;
18508 long src_count, dst_count = 0;
18509 struct elf32_arm_link_hash_table *htab;
18511 htab = elf32_arm_hash_table (info);
18512 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18513 symcount = 0;
18515 maxnamelen = 128;
18516 cmse_name = (char *) bfd_malloc (maxnamelen);
18517 for (src_count = 0; src_count < symcount; src_count++)
18519 struct elf32_arm_link_hash_entry *cmse_hash;
18520 asymbol *sym;
18521 flagword flags;
18522 char *name;
18523 size_t namelen;
18525 sym = syms[src_count];
18526 flags = sym->flags;
18527 name = (char *) bfd_asymbol_name (sym);
18529 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18530 continue;
18531 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18532 continue;
18534 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18535 if (namelen > maxnamelen)
18537 cmse_name = (char *)
18538 bfd_realloc (cmse_name, namelen);
18539 maxnamelen = namelen;
18541 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18542 cmse_hash = (struct elf32_arm_link_hash_entry *)
18543 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18545 if (!cmse_hash
18546 || (cmse_hash->root.root.type != bfd_link_hash_defined
18547 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18548 || cmse_hash->root.type != STT_FUNC)
18549 continue;
18551 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18552 continue;
18554 syms[dst_count++] = sym;
18556 free (cmse_name);
18558 syms[dst_count] = NULL;
18560 return dst_count;
18563 /* Filter symbols of ABFD to include in the import library. All
18564 SYMCOUNT symbols of ABFD can be examined from their pointers in
18565 SYMS. Pointers of symbols to keep should be stored continuously at
18566 the beginning of that array.
18568 Returns the number of symbols to keep. */
18570 static unsigned int
18571 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18572 struct bfd_link_info *info,
18573 asymbol **syms, long symcount)
18575 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18577 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18578 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18579 library to be a relocatable object file. */
18580 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18581 if (globals->cmse_implib)
18582 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18583 else
18584 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18587 /* Allocate target specific section data. */
18589 static bfd_boolean
18590 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18592 if (!sec->used_by_bfd)
18594 _arm_elf_section_data *sdata;
18595 bfd_size_type amt = sizeof (*sdata);
18597 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18598 if (sdata == NULL)
18599 return FALSE;
18600 sec->used_by_bfd = sdata;
18603 return _bfd_elf_new_section_hook (abfd, sec);
18607 /* Used to order a list of mapping symbols by address. */
18609 static int
18610 elf32_arm_compare_mapping (const void * a, const void * b)
18612 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18613 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18615 if (amap->vma > bmap->vma)
18616 return 1;
18617 else if (amap->vma < bmap->vma)
18618 return -1;
18619 else if (amap->type > bmap->type)
18620 /* Ensure results do not depend on the host qsort for objects with
18621 multiple mapping symbols at the same address by sorting on type
18622 after vma. */
18623 return 1;
18624 else if (amap->type < bmap->type)
18625 return -1;
18626 else
18627 return 0;
18630 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18632 static unsigned long
18633 offset_prel31 (unsigned long addr, bfd_vma offset)
18635 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18638 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18639 relocations. */
18641 static void
18642 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18644 unsigned long first_word = bfd_get_32 (output_bfd, from);
18645 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18647 /* High bit of first word is supposed to be zero. */
18648 if ((first_word & 0x80000000ul) == 0)
18649 first_word = offset_prel31 (first_word, offset);
18651 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18652 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18653 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18654 second_word = offset_prel31 (second_word, offset);
18656 bfd_put_32 (output_bfd, first_word, to);
18657 bfd_put_32 (output_bfd, second_word, to + 4);
18660 /* Data for make_branch_to_a8_stub(). */
18662 struct a8_branch_to_stub_data
18664 asection *writing_section;
18665 bfd_byte *contents;
18669 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18670 places for a particular section. */
18672 static bfd_boolean
18673 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18674 void *in_arg)
18676 struct elf32_arm_stub_hash_entry *stub_entry;
18677 struct a8_branch_to_stub_data *data;
18678 bfd_byte *contents;
18679 unsigned long branch_insn;
18680 bfd_vma veneered_insn_loc, veneer_entry_loc;
18681 bfd_signed_vma branch_offset;
18682 bfd *abfd;
18683 unsigned int loc;
18685 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18686 data = (struct a8_branch_to_stub_data *) in_arg;
18688 if (stub_entry->target_section != data->writing_section
18689 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18690 return TRUE;
18692 contents = data->contents;
18694 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18695 generated when both source and target are in the same section. */
18696 veneered_insn_loc = stub_entry->target_section->output_section->vma
18697 + stub_entry->target_section->output_offset
18698 + stub_entry->source_value;
18700 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18701 + stub_entry->stub_sec->output_offset
18702 + stub_entry->stub_offset;
18704 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18705 veneered_insn_loc &= ~3u;
18707 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18709 abfd = stub_entry->target_section->owner;
18710 loc = stub_entry->source_value;
18712 /* We attempt to avoid this condition by setting stubs_always_after_branch
18713 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18714 This check is just to be on the safe side... */
18715 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18717 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18718 "allocated in unsafe location"), abfd);
18719 return FALSE;
18722 switch (stub_entry->stub_type)
18724 case arm_stub_a8_veneer_b:
18725 case arm_stub_a8_veneer_b_cond:
18726 branch_insn = 0xf0009000;
18727 goto jump24;
18729 case arm_stub_a8_veneer_blx:
18730 branch_insn = 0xf000e800;
18731 goto jump24;
18733 case arm_stub_a8_veneer_bl:
18735 unsigned int i1, j1, i2, j2, s;
18737 branch_insn = 0xf000d000;
18739 jump24:
18740 if (branch_offset < -16777216 || branch_offset > 16777214)
18742 /* There's not much we can do apart from complain if this
18743 happens. */
18744 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18745 "of range (input file too large)"), abfd);
18746 return FALSE;
18749 /* i1 = not(j1 eor s), so:
18750 not i1 = j1 eor s
18751 j1 = (not i1) eor s. */
18753 branch_insn |= (branch_offset >> 1) & 0x7ff;
18754 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18755 i2 = (branch_offset >> 22) & 1;
18756 i1 = (branch_offset >> 23) & 1;
18757 s = (branch_offset >> 24) & 1;
18758 j1 = (!i1) ^ s;
18759 j2 = (!i2) ^ s;
18760 branch_insn |= j2 << 11;
18761 branch_insn |= j1 << 13;
18762 branch_insn |= s << 26;
18764 break;
18766 default:
18767 BFD_FAIL ();
18768 return FALSE;
18771 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18772 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18774 return TRUE;
18777 /* Beginning of stm32l4xx work-around. */
18779 /* Functions encoding instructions necessary for the emission of the
18780 fix-stm32l4xx-629360.
18781 Encoding is extracted from the
18782 ARM (C) Architecture Reference Manual
18783 ARMv7-A and ARMv7-R edition
18784 ARM DDI 0406C.b (ID072512). */
18786 static inline bfd_vma
18787 create_instruction_branch_absolute (int branch_offset)
18789 /* A8.8.18 B (A8-334)
18790 B target_address (Encoding T4). */
18791 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18792 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18793 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18795 int s = ((branch_offset & 0x1000000) >> 24);
18796 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18797 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18799 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18800 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18802 bfd_vma patched_inst = 0xf0009000
18803 | s << 26 /* S. */
18804 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18805 | j1 << 13 /* J1. */
18806 | j2 << 11 /* J2. */
18807 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18809 return patched_inst;
18812 static inline bfd_vma
18813 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18815 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18816 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18817 bfd_vma patched_inst = 0xe8900000
18818 | (/*W=*/wback << 21)
18819 | (base_reg << 16)
18820 | (reg_mask & 0x0000ffff);
18822 return patched_inst;
18825 static inline bfd_vma
18826 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18828 /* A8.8.60 LDMDB/LDMEA (A8-402)
18829 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18830 bfd_vma patched_inst = 0xe9100000
18831 | (/*W=*/wback << 21)
18832 | (base_reg << 16)
18833 | (reg_mask & 0x0000ffff);
18835 return patched_inst;
18838 static inline bfd_vma
18839 create_instruction_mov (int target_reg, int source_reg)
18841 /* A8.8.103 MOV (register) (A8-486)
18842 MOV Rd, Rm (Encoding T1). */
18843 bfd_vma patched_inst = 0x4600
18844 | (target_reg & 0x7)
18845 | ((target_reg & 0x8) >> 3) << 7
18846 | (source_reg << 3);
18848 return patched_inst;
18851 static inline bfd_vma
18852 create_instruction_sub (int target_reg, int source_reg, int value)
18854 /* A8.8.221 SUB (immediate) (A8-708)
18855 SUB Rd, Rn, #value (Encoding T3). */
18856 bfd_vma patched_inst = 0xf1a00000
18857 | (target_reg << 8)
18858 | (source_reg << 16)
18859 | (/*S=*/0 << 20)
18860 | ((value & 0x800) >> 11) << 26
18861 | ((value & 0x700) >> 8) << 12
18862 | (value & 0x0ff);
18864 return patched_inst;
18867 static inline bfd_vma
18868 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18869 int first_reg)
18871 /* A8.8.332 VLDM (A8-922)
18872 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18873 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18874 | (/*W=*/wback << 21)
18875 | (base_reg << 16)
18876 | (num_words & 0x000000ff)
18877 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18878 | (first_reg & 0x00000001) << 22;
18880 return patched_inst;
18883 static inline bfd_vma
18884 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18885 int first_reg)
18887 /* A8.8.332 VLDM (A8-922)
18888 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18889 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18890 | (base_reg << 16)
18891 | (num_words & 0x000000ff)
18892 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18893 | (first_reg & 0x00000001) << 22;
18895 return patched_inst;
18898 static inline bfd_vma
18899 create_instruction_udf_w (int value)
18901 /* A8.8.247 UDF (A8-758)
18902 Undefined (Encoding T2). */
18903 bfd_vma patched_inst = 0xf7f0a000
18904 | (value & 0x00000fff)
18905 | (value & 0x000f0000) << 16;
18907 return patched_inst;
18910 static inline bfd_vma
18911 create_instruction_udf (int value)
18913 /* A8.8.247 UDF (A8-758)
18914 Undefined (Encoding T1). */
18915 bfd_vma patched_inst = 0xde00
18916 | (value & 0xff);
18918 return patched_inst;
18921 /* Functions writing an instruction in memory, returning the next
18922 memory position to write to. */
18924 static inline bfd_byte *
18925 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18926 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18928 put_thumb2_insn (htab, output_bfd, insn, pt);
18929 return pt + 4;
18932 static inline bfd_byte *
18933 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18934 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18936 put_thumb_insn (htab, output_bfd, insn, pt);
18937 return pt + 2;
18940 /* Function filling up a region in memory with T1 and T2 UDFs taking
18941 care of alignment. */
18943 static bfd_byte *
18944 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18945 bfd * output_bfd,
18946 const bfd_byte * const base_stub_contents,
18947 bfd_byte * const from_stub_contents,
18948 const bfd_byte * const end_stub_contents)
18950 bfd_byte *current_stub_contents = from_stub_contents;
18952 /* Fill the remaining of the stub with deterministic contents : UDF
18953 instructions.
18954 Check if realignment is needed on modulo 4 frontier using T1, to
18955 further use T2. */
18956 if ((current_stub_contents < end_stub_contents)
18957 && !((current_stub_contents - base_stub_contents) % 2)
18958 && ((current_stub_contents - base_stub_contents) % 4))
18959 current_stub_contents =
18960 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18961 create_instruction_udf (0));
18963 for (; current_stub_contents < end_stub_contents;)
18964 current_stub_contents =
18965 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18966 create_instruction_udf_w (0));
18968 return current_stub_contents;
18971 /* Functions writing the stream of instructions equivalent to the
18972 derived sequence for ldmia, ldmdb, vldm respectively. */
18974 static void
18975 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18976 bfd * output_bfd,
18977 const insn32 initial_insn,
18978 const bfd_byte *const initial_insn_addr,
18979 bfd_byte *const base_stub_contents)
18981 int wback = (initial_insn & 0x00200000) >> 21;
18982 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18983 int insn_all_registers = initial_insn & 0x0000ffff;
18984 int insn_low_registers, insn_high_registers;
18985 int usable_register_mask;
18986 int nb_registers = elf32_arm_popcount (insn_all_registers);
18987 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18988 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18989 bfd_byte *current_stub_contents = base_stub_contents;
18991 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18993 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18994 smaller than 8 registers load sequences that do not cause the
18995 hardware issue. */
18996 if (nb_registers <= 8)
18998 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18999 current_stub_contents =
19000 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19001 initial_insn);
19003 /* B initial_insn_addr+4. */
19004 if (!restore_pc)
19005 current_stub_contents =
19006 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19007 create_instruction_branch_absolute
19008 (initial_insn_addr - current_stub_contents));
19010 /* Fill the remaining of the stub with deterministic contents. */
19011 current_stub_contents =
19012 stm32l4xx_fill_stub_udf (htab, output_bfd,
19013 base_stub_contents, current_stub_contents,
19014 base_stub_contents +
19015 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19017 return;
19020 /* - reg_list[13] == 0. */
19021 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
19023 /* - reg_list[14] & reg_list[15] != 1. */
19024 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19026 /* - if (wback==1) reg_list[rn] == 0. */
19027 BFD_ASSERT (!wback || !restore_rn);
19029 /* - nb_registers > 8. */
19030 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19032 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19034 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
19035 - One with the 7 lowest registers (register mask 0x007F)
19036 This LDM will finally contain between 2 and 7 registers
19037 - One with the 7 highest registers (register mask 0xDF80)
19038 This ldm will finally contain between 2 and 7 registers. */
19039 insn_low_registers = insn_all_registers & 0x007F;
19040 insn_high_registers = insn_all_registers & 0xDF80;
19042 /* A spare register may be needed during this veneer to temporarily
19043 handle the base register. This register will be restored with the
19044 last LDM operation.
19045 The usable register may be any general purpose register (that
19046 excludes PC, SP, LR : register mask is 0x1FFF). */
19047 usable_register_mask = 0x1FFF;
19049 /* Generate the stub function. */
19050 if (wback)
19052 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
19053 current_stub_contents =
19054 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19055 create_instruction_ldmia
19056 (rn, /*wback=*/1, insn_low_registers));
19058 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19059 current_stub_contents =
19060 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19061 create_instruction_ldmia
19062 (rn, /*wback=*/1, insn_high_registers));
19063 if (!restore_pc)
19065 /* B initial_insn_addr+4. */
19066 current_stub_contents =
19067 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19068 create_instruction_branch_absolute
19069 (initial_insn_addr - current_stub_contents));
19072 else /* if (!wback). */
19074 ri = rn;
19076 /* If Rn is not part of the high-register-list, move it there. */
19077 if (!(insn_high_registers & (1 << rn)))
19079 /* Choose a Ri in the high-register-list that will be restored. */
19080 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19082 /* MOV Ri, Rn. */
19083 current_stub_contents =
19084 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19085 create_instruction_mov (ri, rn));
19088 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19089 current_stub_contents =
19090 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19091 create_instruction_ldmia
19092 (ri, /*wback=*/1, insn_low_registers));
19094 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19095 current_stub_contents =
19096 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19097 create_instruction_ldmia
19098 (ri, /*wback=*/0, insn_high_registers));
19100 if (!restore_pc)
19102 /* B initial_insn_addr+4. */
19103 current_stub_contents =
19104 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19105 create_instruction_branch_absolute
19106 (initial_insn_addr - current_stub_contents));
19110 /* Fill the remaining of the stub with deterministic contents. */
19111 current_stub_contents =
19112 stm32l4xx_fill_stub_udf (htab, output_bfd,
19113 base_stub_contents, current_stub_contents,
19114 base_stub_contents +
19115 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19118 static void
19119 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19120 bfd * output_bfd,
19121 const insn32 initial_insn,
19122 const bfd_byte *const initial_insn_addr,
19123 bfd_byte *const base_stub_contents)
19125 int wback = (initial_insn & 0x00200000) >> 21;
19126 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19127 int insn_all_registers = initial_insn & 0x0000ffff;
19128 int insn_low_registers, insn_high_registers;
19129 int usable_register_mask;
19130 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19131 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19132 int nb_registers = elf32_arm_popcount (insn_all_registers);
19133 bfd_byte *current_stub_contents = base_stub_contents;
19135 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19137 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19138 smaller than 8 registers load sequences that do not cause the
19139 hardware issue. */
19140 if (nb_registers <= 8)
19142 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19143 current_stub_contents =
19144 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19145 initial_insn);
19147 /* B initial_insn_addr+4. */
19148 current_stub_contents =
19149 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19150 create_instruction_branch_absolute
19151 (initial_insn_addr - current_stub_contents));
19153 /* Fill the remaining of the stub with deterministic contents. */
19154 current_stub_contents =
19155 stm32l4xx_fill_stub_udf (htab, output_bfd,
19156 base_stub_contents, current_stub_contents,
19157 base_stub_contents +
19158 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19160 return;
19163 /* - reg_list[13] == 0. */
19164 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19166 /* - reg_list[14] & reg_list[15] != 1. */
19167 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19169 /* - if (wback==1) reg_list[rn] == 0. */
19170 BFD_ASSERT (!wback || !restore_rn);
19172 /* - nb_registers > 8. */
19173 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19175 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19177 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19178 - One with the 7 lowest registers (register mask 0x007F)
19179 This LDM will finally contain between 2 and 7 registers
19180 - One with the 7 highest registers (register mask 0xDF80)
19181 This ldm will finally contain between 2 and 7 registers. */
19182 insn_low_registers = insn_all_registers & 0x007F;
19183 insn_high_registers = insn_all_registers & 0xDF80;
19185 /* A spare register may be needed during this veneer to temporarily
19186 handle the base register. This register will be restored with
19187 the last LDM operation.
19188 The usable register may be any general purpose register (that excludes
19189 PC, SP, LR : register mask is 0x1FFF). */
19190 usable_register_mask = 0x1FFF;
19192 /* Generate the stub function. */
19193 if (!wback && !restore_pc && !restore_rn)
19195 /* Choose a Ri in the low-register-list that will be restored. */
19196 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19198 /* MOV Ri, Rn. */
19199 current_stub_contents =
19200 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19201 create_instruction_mov (ri, rn));
19203 /* LDMDB Ri!, {R-high-register-list}. */
19204 current_stub_contents =
19205 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19206 create_instruction_ldmdb
19207 (ri, /*wback=*/1, insn_high_registers));
19209 /* LDMDB Ri, {R-low-register-list}. */
19210 current_stub_contents =
19211 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19212 create_instruction_ldmdb
19213 (ri, /*wback=*/0, insn_low_registers));
19215 /* B initial_insn_addr+4. */
19216 current_stub_contents =
19217 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19218 create_instruction_branch_absolute
19219 (initial_insn_addr - current_stub_contents));
19221 else if (wback && !restore_pc && !restore_rn)
19223 /* LDMDB Rn!, {R-high-register-list}. */
19224 current_stub_contents =
19225 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19226 create_instruction_ldmdb
19227 (rn, /*wback=*/1, insn_high_registers));
19229 /* LDMDB Rn!, {R-low-register-list}. */
19230 current_stub_contents =
19231 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19232 create_instruction_ldmdb
19233 (rn, /*wback=*/1, insn_low_registers));
19235 /* B initial_insn_addr+4. */
19236 current_stub_contents =
19237 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19238 create_instruction_branch_absolute
19239 (initial_insn_addr - current_stub_contents));
19241 else if (!wback && restore_pc && !restore_rn)
19243 /* Choose a Ri in the high-register-list that will be restored. */
19244 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19246 /* SUB Ri, Rn, #(4*nb_registers). */
19247 current_stub_contents =
19248 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19249 create_instruction_sub (ri, rn, (4 * nb_registers)));
19251 /* LDMIA Ri!, {R-low-register-list}. */
19252 current_stub_contents =
19253 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19254 create_instruction_ldmia
19255 (ri, /*wback=*/1, insn_low_registers));
19257 /* LDMIA Ri, {R-high-register-list}. */
19258 current_stub_contents =
19259 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19260 create_instruction_ldmia
19261 (ri, /*wback=*/0, insn_high_registers));
19263 else if (wback && restore_pc && !restore_rn)
19265 /* Choose a Ri in the high-register-list that will be restored. */
19266 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19268 /* SUB Rn, Rn, #(4*nb_registers) */
19269 current_stub_contents =
19270 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19271 create_instruction_sub (rn, rn, (4 * nb_registers)));
19273 /* MOV Ri, Rn. */
19274 current_stub_contents =
19275 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19276 create_instruction_mov (ri, rn));
19278 /* LDMIA Ri!, {R-low-register-list}. */
19279 current_stub_contents =
19280 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19281 create_instruction_ldmia
19282 (ri, /*wback=*/1, insn_low_registers));
19284 /* LDMIA Ri, {R-high-register-list}. */
19285 current_stub_contents =
19286 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19287 create_instruction_ldmia
19288 (ri, /*wback=*/0, insn_high_registers));
19290 else if (!wback && !restore_pc && restore_rn)
19292 ri = rn;
19293 if (!(insn_low_registers & (1 << rn)))
19295 /* Choose a Ri in the low-register-list that will be restored. */
19296 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19298 /* MOV Ri, Rn. */
19299 current_stub_contents =
19300 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19301 create_instruction_mov (ri, rn));
19304 /* LDMDB Ri!, {R-high-register-list}. */
19305 current_stub_contents =
19306 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19307 create_instruction_ldmdb
19308 (ri, /*wback=*/1, insn_high_registers));
19310 /* LDMDB Ri, {R-low-register-list}. */
19311 current_stub_contents =
19312 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19313 create_instruction_ldmdb
19314 (ri, /*wback=*/0, insn_low_registers));
19316 /* B initial_insn_addr+4. */
19317 current_stub_contents =
19318 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19319 create_instruction_branch_absolute
19320 (initial_insn_addr - current_stub_contents));
19322 else if (!wback && restore_pc && restore_rn)
19324 ri = rn;
19325 if (!(insn_high_registers & (1 << rn)))
19327 /* Choose a Ri in the high-register-list that will be restored. */
19328 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19331 /* SUB Ri, Rn, #(4*nb_registers). */
19332 current_stub_contents =
19333 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19334 create_instruction_sub (ri, rn, (4 * nb_registers)));
19336 /* LDMIA Ri!, {R-low-register-list}. */
19337 current_stub_contents =
19338 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19339 create_instruction_ldmia
19340 (ri, /*wback=*/1, insn_low_registers));
19342 /* LDMIA Ri, {R-high-register-list}. */
19343 current_stub_contents =
19344 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19345 create_instruction_ldmia
19346 (ri, /*wback=*/0, insn_high_registers));
19348 else if (wback && restore_rn)
19350 /* The assembler should not have accepted to encode this. */
19351 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19352 "undefined behavior.\n");
19355 /* Fill the remaining of the stub with deterministic contents. */
19356 current_stub_contents =
19357 stm32l4xx_fill_stub_udf (htab, output_bfd,
19358 base_stub_contents, current_stub_contents,
19359 base_stub_contents +
19360 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19364 static void
19365 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19366 bfd * output_bfd,
19367 const insn32 initial_insn,
19368 const bfd_byte *const initial_insn_addr,
19369 bfd_byte *const base_stub_contents)
19371 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19372 bfd_byte *current_stub_contents = base_stub_contents;
19374 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19376 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19377 smaller than 8 words load sequences that do not cause the
19378 hardware issue. */
19379 if (num_words <= 8)
19381 /* Untouched instruction. */
19382 current_stub_contents =
19383 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19384 initial_insn);
19386 /* B initial_insn_addr+4. */
19387 current_stub_contents =
19388 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19389 create_instruction_branch_absolute
19390 (initial_insn_addr - current_stub_contents));
19392 else
19394 bfd_boolean is_dp = /* DP encoding. */
19395 (initial_insn & 0xfe100f00) == 0xec100b00;
19396 bfd_boolean is_ia_nobang = /* (IA without !). */
19397 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19398 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19399 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19400 bfd_boolean is_db_bang = /* (DB with !). */
19401 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19402 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19403 /* d = UInt (Vd:D);. */
19404 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19405 | (((unsigned int)initial_insn << 9) >> 31);
19407 /* Compute the number of 8-words chunks needed to split. */
19408 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19409 int chunk;
19411 /* The test coverage has been done assuming the following
19412 hypothesis that exactly one of the previous is_ predicates is
19413 true. */
19414 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19415 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19417 /* We treat the cutting of the words in one pass for all
19418 cases, then we emit the adjustments:
19420 vldm rx, {...}
19421 -> vldm rx!, {8_words_or_less} for each needed 8_word
19422 -> sub rx, rx, #size (list)
19424 vldm rx!, {...}
19425 -> vldm rx!, {8_words_or_less} for each needed 8_word
19426 This also handles vpop instruction (when rx is sp)
19428 vldmd rx!, {...}
19429 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19430 for (chunk = 0; chunk < chunks; ++chunk)
19432 bfd_vma new_insn = 0;
19434 if (is_ia_nobang || is_ia_bang)
19436 new_insn = create_instruction_vldmia
19437 (base_reg,
19438 is_dp,
19439 /*wback= . */1,
19440 chunks - (chunk + 1) ?
19441 8 : num_words - chunk * 8,
19442 first_reg + chunk * 8);
19444 else if (is_db_bang)
19446 new_insn = create_instruction_vldmdb
19447 (base_reg,
19448 is_dp,
19449 chunks - (chunk + 1) ?
19450 8 : num_words - chunk * 8,
19451 first_reg + chunk * 8);
19454 if (new_insn)
19455 current_stub_contents =
19456 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19457 new_insn);
19460 /* Only this case requires the base register compensation
19461 subtract. */
19462 if (is_ia_nobang)
19464 current_stub_contents =
19465 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19466 create_instruction_sub
19467 (base_reg, base_reg, 4*num_words));
19470 /* B initial_insn_addr+4. */
19471 current_stub_contents =
19472 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19473 create_instruction_branch_absolute
19474 (initial_insn_addr - current_stub_contents));
19477 /* Fill the remaining of the stub with deterministic contents. */
19478 current_stub_contents =
19479 stm32l4xx_fill_stub_udf (htab, output_bfd,
19480 base_stub_contents, current_stub_contents,
19481 base_stub_contents +
19482 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19485 static void
19486 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19487 bfd * output_bfd,
19488 const insn32 wrong_insn,
19489 const bfd_byte *const wrong_insn_addr,
19490 bfd_byte *const stub_contents)
19492 if (is_thumb2_ldmia (wrong_insn))
19493 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19494 wrong_insn, wrong_insn_addr,
19495 stub_contents);
19496 else if (is_thumb2_ldmdb (wrong_insn))
19497 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19498 wrong_insn, wrong_insn_addr,
19499 stub_contents);
19500 else if (is_thumb2_vldm (wrong_insn))
19501 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19502 wrong_insn, wrong_insn_addr,
19503 stub_contents);
19506 /* End of stm32l4xx work-around. */
19509 /* Do code byteswapping. Return FALSE afterwards so that the section is
19510 written out as normal. */
19512 static bfd_boolean
19513 elf32_arm_write_section (bfd *output_bfd,
19514 struct bfd_link_info *link_info,
19515 asection *sec,
19516 bfd_byte *contents)
19518 unsigned int mapcount, errcount;
19519 _arm_elf_section_data *arm_data;
19520 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19521 elf32_arm_section_map *map;
19522 elf32_vfp11_erratum_list *errnode;
19523 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19524 bfd_vma ptr;
19525 bfd_vma end;
19526 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19527 bfd_byte tmp;
19528 unsigned int i;
19530 if (globals == NULL)
19531 return FALSE;
19533 /* If this section has not been allocated an _arm_elf_section_data
19534 structure then we cannot record anything. */
19535 arm_data = get_arm_elf_section_data (sec);
19536 if (arm_data == NULL)
19537 return FALSE;
19539 mapcount = arm_data->mapcount;
19540 map = arm_data->map;
19541 errcount = arm_data->erratumcount;
19543 if (errcount != 0)
19545 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19547 for (errnode = arm_data->erratumlist; errnode != 0;
19548 errnode = errnode->next)
19550 bfd_vma target = errnode->vma - offset;
19552 switch (errnode->type)
19554 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19556 bfd_vma branch_to_veneer;
19557 /* Original condition code of instruction, plus bit mask for
19558 ARM B instruction. */
19559 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19560 | 0x0a000000;
19562 /* The instruction is before the label. */
19563 target -= 4;
19565 /* Above offset included in -4 below. */
19566 branch_to_veneer = errnode->u.b.veneer->vma
19567 - errnode->vma - 4;
19569 if ((signed) branch_to_veneer < -(1 << 25)
19570 || (signed) branch_to_veneer >= (1 << 25))
19571 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19572 "range"), output_bfd);
19574 insn |= (branch_to_veneer >> 2) & 0xffffff;
19575 contents[endianflip ^ target] = insn & 0xff;
19576 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19577 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19578 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19580 break;
19582 case VFP11_ERRATUM_ARM_VENEER:
19584 bfd_vma branch_from_veneer;
19585 unsigned int insn;
19587 /* Take size of veneer into account. */
19588 branch_from_veneer = errnode->u.v.branch->vma
19589 - errnode->vma - 12;
19591 if ((signed) branch_from_veneer < -(1 << 25)
19592 || (signed) branch_from_veneer >= (1 << 25))
19593 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19594 "range"), output_bfd);
19596 /* Original instruction. */
19597 insn = errnode->u.v.branch->u.b.vfp_insn;
19598 contents[endianflip ^ target] = insn & 0xff;
19599 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19600 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19601 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19603 /* Branch back to insn after original insn. */
19604 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19605 contents[endianflip ^ (target + 4)] = insn & 0xff;
19606 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19607 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19608 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19610 break;
19612 default:
19613 abort ();
19618 if (arm_data->stm32l4xx_erratumcount != 0)
19620 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19621 stm32l4xx_errnode != 0;
19622 stm32l4xx_errnode = stm32l4xx_errnode->next)
19624 bfd_vma target = stm32l4xx_errnode->vma - offset;
19626 switch (stm32l4xx_errnode->type)
19628 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19630 unsigned int insn;
19631 bfd_vma branch_to_veneer =
19632 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19634 if ((signed) branch_to_veneer < -(1 << 24)
19635 || (signed) branch_to_veneer >= (1 << 24))
19637 bfd_vma out_of_range =
19638 ((signed) branch_to_veneer < -(1 << 24)) ?
19639 - branch_to_veneer - (1 << 24) :
19640 ((signed) branch_to_veneer >= (1 << 24)) ?
19641 branch_to_veneer - (1 << 24) : 0;
19643 _bfd_error_handler
19644 (_("%pB(%#" PRIx64 "): error: "
19645 "cannot create STM32L4XX veneer; "
19646 "jump out of range by %" PRId64 " bytes; "
19647 "cannot encode branch instruction"),
19648 output_bfd,
19649 (uint64_t) (stm32l4xx_errnode->vma - 4),
19650 (int64_t) out_of_range);
19651 continue;
19654 insn = create_instruction_branch_absolute
19655 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19657 /* The instruction is before the label. */
19658 target -= 4;
19660 put_thumb2_insn (globals, output_bfd,
19661 (bfd_vma) insn, contents + target);
19663 break;
19665 case STM32L4XX_ERRATUM_VENEER:
19667 bfd_byte * veneer;
19668 bfd_byte * veneer_r;
19669 unsigned int insn;
19671 veneer = contents + target;
19672 veneer_r = veneer
19673 + stm32l4xx_errnode->u.b.veneer->vma
19674 - stm32l4xx_errnode->vma - 4;
19676 if ((signed) (veneer_r - veneer -
19677 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19678 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19679 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19680 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19681 || (signed) (veneer_r - veneer) >= (1 << 24))
19683 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19684 "veneer"), output_bfd);
19685 continue;
19688 /* Original instruction. */
19689 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19691 stm32l4xx_create_replacing_stub
19692 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19694 break;
19696 default:
19697 abort ();
19702 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19704 arm_unwind_table_edit *edit_node
19705 = arm_data->u.exidx.unwind_edit_list;
19706 /* Now, sec->size is the size of the section we will write. The original
19707 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19708 markers) was sec->rawsize. (This isn't the case if we perform no
19709 edits, then rawsize will be zero and we should use size). */
19710 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19711 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19712 unsigned int in_index, out_index;
19713 bfd_vma add_to_offsets = 0;
19715 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19717 if (edit_node)
19719 unsigned int edit_index = edit_node->index;
19721 if (in_index < edit_index && in_index * 8 < input_size)
19723 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19724 contents + in_index * 8, add_to_offsets);
19725 out_index++;
19726 in_index++;
19728 else if (in_index == edit_index
19729 || (in_index * 8 >= input_size
19730 && edit_index == UINT_MAX))
19732 switch (edit_node->type)
19734 case DELETE_EXIDX_ENTRY:
19735 in_index++;
19736 add_to_offsets += 8;
19737 break;
19739 case INSERT_EXIDX_CANTUNWIND_AT_END:
19741 asection *text_sec = edit_node->linked_section;
19742 bfd_vma text_offset = text_sec->output_section->vma
19743 + text_sec->output_offset
19744 + text_sec->size;
19745 bfd_vma exidx_offset = offset + out_index * 8;
19746 unsigned long prel31_offset;
19748 /* Note: this is meant to be equivalent to an
19749 R_ARM_PREL31 relocation. These synthetic
19750 EXIDX_CANTUNWIND markers are not relocated by the
19751 usual BFD method. */
19752 prel31_offset = (text_offset - exidx_offset)
19753 & 0x7ffffffful;
19754 if (bfd_link_relocatable (link_info))
19756 /* Here relocation for new EXIDX_CANTUNWIND is
19757 created, so there is no need to
19758 adjust offset by hand. */
19759 prel31_offset = text_sec->output_offset
19760 + text_sec->size;
19763 /* First address we can't unwind. */
19764 bfd_put_32 (output_bfd, prel31_offset,
19765 &edited_contents[out_index * 8]);
19767 /* Code for EXIDX_CANTUNWIND. */
19768 bfd_put_32 (output_bfd, 0x1,
19769 &edited_contents[out_index * 8 + 4]);
19771 out_index++;
19772 add_to_offsets -= 8;
19774 break;
19777 edit_node = edit_node->next;
19780 else
19782 /* No more edits, copy remaining entries verbatim. */
19783 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19784 contents + in_index * 8, add_to_offsets);
19785 out_index++;
19786 in_index++;
19790 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19791 bfd_set_section_contents (output_bfd, sec->output_section,
19792 edited_contents,
19793 (file_ptr) sec->output_offset, sec->size);
19795 return TRUE;
19798 /* Fix code to point to Cortex-A8 erratum stubs. */
19799 if (globals->fix_cortex_a8)
19801 struct a8_branch_to_stub_data data;
19803 data.writing_section = sec;
19804 data.contents = contents;
19806 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19807 & data);
19810 if (mapcount == 0)
19811 return FALSE;
19813 if (globals->byteswap_code)
19815 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19817 ptr = map[0].vma;
19818 for (i = 0; i < mapcount; i++)
19820 if (i == mapcount - 1)
19821 end = sec->size;
19822 else
19823 end = map[i + 1].vma;
19825 switch (map[i].type)
19827 case 'a':
19828 /* Byte swap code words. */
19829 while (ptr + 3 < end)
19831 tmp = contents[ptr];
19832 contents[ptr] = contents[ptr + 3];
19833 contents[ptr + 3] = tmp;
19834 tmp = contents[ptr + 1];
19835 contents[ptr + 1] = contents[ptr + 2];
19836 contents[ptr + 2] = tmp;
19837 ptr += 4;
19839 break;
19841 case 't':
19842 /* Byte swap code halfwords. */
19843 while (ptr + 1 < end)
19845 tmp = contents[ptr];
19846 contents[ptr] = contents[ptr + 1];
19847 contents[ptr + 1] = tmp;
19848 ptr += 2;
19850 break;
19852 case 'd':
19853 /* Leave data alone. */
19854 break;
19856 ptr = end;
19860 free (map);
19861 arm_data->mapcount = -1;
19862 arm_data->mapsize = 0;
19863 arm_data->map = NULL;
19865 return FALSE;
19868 /* Mangle thumb function symbols as we read them in. */
19870 static bfd_boolean
19871 elf32_arm_swap_symbol_in (bfd * abfd,
19872 const void *psrc,
19873 const void *pshn,
19874 Elf_Internal_Sym *dst)
19876 Elf_Internal_Shdr *symtab_hdr;
19877 const char *name = NULL;
19879 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19880 return FALSE;
19881 dst->st_target_internal = 0;
19883 /* New EABI objects mark thumb function symbols by setting the low bit of
19884 the address. */
19885 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19886 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19888 if (dst->st_value & 1)
19890 dst->st_value &= ~(bfd_vma) 1;
19891 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19892 ST_BRANCH_TO_THUMB);
19894 else
19895 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19897 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19899 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19900 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19902 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19903 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19904 else
19905 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19907 /* Mark CMSE special symbols. */
19908 symtab_hdr = & elf_symtab_hdr (abfd);
19909 if (symtab_hdr->sh_size)
19910 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19911 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19912 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19914 return TRUE;
19918 /* Mangle thumb function symbols as we write them out. */
19920 static void
19921 elf32_arm_swap_symbol_out (bfd *abfd,
19922 const Elf_Internal_Sym *src,
19923 void *cdst,
19924 void *shndx)
19926 Elf_Internal_Sym newsym;
19928 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19929 of the address set, as per the new EABI. We do this unconditionally
19930 because objcopy does not set the elf header flags until after
19931 it writes out the symbol table. */
19932 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19934 newsym = *src;
19935 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19936 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19937 if (newsym.st_shndx != SHN_UNDEF)
19939 /* Do this only for defined symbols. At link type, the static
19940 linker will simulate the work of dynamic linker of resolving
19941 symbols and will carry over the thumbness of found symbols to
19942 the output symbol table. It's not clear how it happens, but
19943 the thumbness of undefined symbols can well be different at
19944 runtime, and writing '1' for them will be confusing for users
19945 and possibly for dynamic linker itself.
19947 newsym.st_value |= 1;
19950 src = &newsym;
19952 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19955 /* Add the PT_ARM_EXIDX program header. */
19957 static bfd_boolean
19958 elf32_arm_modify_segment_map (bfd *abfd,
19959 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19961 struct elf_segment_map *m;
19962 asection *sec;
19964 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19965 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19967 /* If there is already a PT_ARM_EXIDX header, then we do not
19968 want to add another one. This situation arises when running
19969 "strip"; the input binary already has the header. */
19970 m = elf_seg_map (abfd);
19971 while (m && m->p_type != PT_ARM_EXIDX)
19972 m = m->next;
19973 if (!m)
19975 m = (struct elf_segment_map *)
19976 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19977 if (m == NULL)
19978 return FALSE;
19979 m->p_type = PT_ARM_EXIDX;
19980 m->count = 1;
19981 m->sections[0] = sec;
19983 m->next = elf_seg_map (abfd);
19984 elf_seg_map (abfd) = m;
19988 return TRUE;
19991 /* We may add a PT_ARM_EXIDX program header. */
19993 static int
19994 elf32_arm_additional_program_headers (bfd *abfd,
19995 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19997 asection *sec;
19999 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
20000 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
20001 return 1;
20002 else
20003 return 0;
20006 /* Hook called by the linker routine which adds symbols from an object
20007 file. */
20009 static bfd_boolean
20010 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
20011 Elf_Internal_Sym *sym, const char **namep,
20012 flagword *flagsp, asection **secp, bfd_vma *valp)
20014 if (elf32_arm_hash_table (info) == NULL)
20015 return FALSE;
20017 if (elf32_arm_hash_table (info)->vxworks_p
20018 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
20019 flagsp, secp, valp))
20020 return FALSE;
20022 return TRUE;
20025 /* We use this to override swap_symbol_in and swap_symbol_out. */
20026 const struct elf_size_info elf32_arm_size_info =
20028 sizeof (Elf32_External_Ehdr),
20029 sizeof (Elf32_External_Phdr),
20030 sizeof (Elf32_External_Shdr),
20031 sizeof (Elf32_External_Rel),
20032 sizeof (Elf32_External_Rela),
20033 sizeof (Elf32_External_Sym),
20034 sizeof (Elf32_External_Dyn),
20035 sizeof (Elf_External_Note),
20038 32, 2,
20039 ELFCLASS32, EV_CURRENT,
20040 bfd_elf32_write_out_phdrs,
20041 bfd_elf32_write_shdrs_and_ehdr,
20042 bfd_elf32_checksum_contents,
20043 bfd_elf32_write_relocs,
20044 elf32_arm_swap_symbol_in,
20045 elf32_arm_swap_symbol_out,
20046 bfd_elf32_slurp_reloc_table,
20047 bfd_elf32_slurp_symbol_table,
20048 bfd_elf32_swap_dyn_in,
20049 bfd_elf32_swap_dyn_out,
20050 bfd_elf32_swap_reloc_in,
20051 bfd_elf32_swap_reloc_out,
20052 bfd_elf32_swap_reloca_in,
20053 bfd_elf32_swap_reloca_out
20056 static bfd_vma
20057 read_code32 (const bfd *abfd, const bfd_byte *addr)
20059 /* V7 BE8 code is always little endian. */
20060 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20061 return bfd_getl32 (addr);
20063 return bfd_get_32 (abfd, addr);
20066 static bfd_vma
20067 read_code16 (const bfd *abfd, const bfd_byte *addr)
20069 /* V7 BE8 code is always little endian. */
20070 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20071 return bfd_getl16 (addr);
20073 return bfd_get_16 (abfd, addr);
20076 /* Return size of plt0 entry starting at ADDR
20077 or (bfd_vma) -1 if size can not be determined. */
20079 static bfd_vma
20080 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
20082 bfd_vma first_word;
20083 bfd_vma plt0_size;
20085 first_word = read_code32 (abfd, addr);
20087 if (first_word == elf32_arm_plt0_entry[0])
20088 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20089 else if (first_word == elf32_thumb2_plt0_entry[0])
20090 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20091 else
20092 /* We don't yet handle this PLT format. */
20093 return (bfd_vma) -1;
20095 return plt0_size;
20098 /* Return size of plt entry starting at offset OFFSET
20099 of plt section located at address START
20100 or (bfd_vma) -1 if size can not be determined. */
20102 static bfd_vma
20103 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
20105 bfd_vma first_insn;
20106 bfd_vma plt_size = 0;
20107 const bfd_byte *addr = start + offset;
20109 /* PLT entry size if fixed on Thumb-only platforms. */
20110 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20111 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20113 /* Respect Thumb stub if necessary. */
20114 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
20116 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
20119 /* Strip immediate from first add. */
20120 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
20122 #ifdef FOUR_WORD_PLT
20123 if (first_insn == elf32_arm_plt_entry[0])
20124 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20125 #else
20126 if (first_insn == elf32_arm_plt_entry_long[0])
20127 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20128 else if (first_insn == elf32_arm_plt_entry_short[0])
20129 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20130 #endif
20131 else
20132 /* We don't yet handle this PLT format. */
20133 return (bfd_vma) -1;
20135 return plt_size;
20138 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20140 static long
20141 elf32_arm_get_synthetic_symtab (bfd *abfd,
20142 long symcount ATTRIBUTE_UNUSED,
20143 asymbol **syms ATTRIBUTE_UNUSED,
20144 long dynsymcount,
20145 asymbol **dynsyms,
20146 asymbol **ret)
20148 asection *relplt;
20149 asymbol *s;
20150 arelent *p;
20151 long count, i, n;
20152 size_t size;
20153 Elf_Internal_Shdr *hdr;
20154 char *names;
20155 asection *plt;
20156 bfd_vma offset;
20157 bfd_byte *data;
20159 *ret = NULL;
20161 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20162 return 0;
20164 if (dynsymcount <= 0)
20165 return 0;
20167 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20168 if (relplt == NULL)
20169 return 0;
20171 hdr = &elf_section_data (relplt)->this_hdr;
20172 if (hdr->sh_link != elf_dynsymtab (abfd)
20173 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20174 return 0;
20176 plt = bfd_get_section_by_name (abfd, ".plt");
20177 if (plt == NULL)
20178 return 0;
20180 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
20181 return -1;
20183 data = plt->contents;
20184 if (data == NULL)
20186 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
20187 return -1;
20188 bfd_cache_section_contents((asection *) plt, data);
20191 count = relplt->size / hdr->sh_entsize;
20192 size = count * sizeof (asymbol);
20193 p = relplt->relocation;
20194 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20196 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20197 if (p->addend != 0)
20198 size += sizeof ("+0x") - 1 + 8;
20201 s = *ret = (asymbol *) bfd_malloc (size);
20202 if (s == NULL)
20203 return -1;
20205 offset = elf32_arm_plt0_size (abfd, data);
20206 if (offset == (bfd_vma) -1)
20207 return -1;
20209 names = (char *) (s + count);
20210 p = relplt->relocation;
20211 n = 0;
20212 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20214 size_t len;
20216 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20217 if (plt_size == (bfd_vma) -1)
20218 break;
20220 *s = **p->sym_ptr_ptr;
20221 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20222 we are defining a symbol, ensure one of them is set. */
20223 if ((s->flags & BSF_LOCAL) == 0)
20224 s->flags |= BSF_GLOBAL;
20225 s->flags |= BSF_SYNTHETIC;
20226 s->section = plt;
20227 s->value = offset;
20228 s->name = names;
20229 s->udata.p = NULL;
20230 len = strlen ((*p->sym_ptr_ptr)->name);
20231 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20232 names += len;
20233 if (p->addend != 0)
20235 char buf[30], *a;
20237 memcpy (names, "+0x", sizeof ("+0x") - 1);
20238 names += sizeof ("+0x") - 1;
20239 bfd_sprintf_vma (abfd, buf, p->addend);
20240 for (a = buf; *a == '0'; ++a)
20242 len = strlen (a);
20243 memcpy (names, a, len);
20244 names += len;
20246 memcpy (names, "@plt", sizeof ("@plt"));
20247 names += sizeof ("@plt");
20248 ++s, ++n;
20249 offset += plt_size;
20252 return n;
20255 static bfd_boolean
20256 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20258 if (hdr->sh_flags & SHF_ARM_PURECODE)
20259 *flags |= SEC_ELF_PURECODE;
20260 return TRUE;
20263 static flagword
20264 elf32_arm_lookup_section_flags (char *flag_name)
20266 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20267 return SHF_ARM_PURECODE;
20269 return SEC_NO_FLAGS;
20272 static unsigned int
20273 elf32_arm_count_additional_relocs (asection *sec)
20275 struct _arm_elf_section_data *arm_data;
20276 arm_data = get_arm_elf_section_data (sec);
20278 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20281 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20282 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20283 FALSE otherwise. ISECTION is the best guess matching section from the
20284 input bfd IBFD, but it might be NULL. */
20286 static bfd_boolean
20287 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20288 bfd *obfd ATTRIBUTE_UNUSED,
20289 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20290 Elf_Internal_Shdr *osection)
20292 switch (osection->sh_type)
20294 case SHT_ARM_EXIDX:
20296 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20297 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20298 unsigned i = 0;
20300 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20301 osection->sh_info = 0;
20303 /* The sh_link field must be set to the text section associated with
20304 this index section. Unfortunately the ARM EHABI does not specify
20305 exactly how to determine this association. Our caller does try
20306 to match up OSECTION with its corresponding input section however
20307 so that is a good first guess. */
20308 if (isection != NULL
20309 && osection->bfd_section != NULL
20310 && isection->bfd_section != NULL
20311 && isection->bfd_section->output_section != NULL
20312 && isection->bfd_section->output_section == osection->bfd_section
20313 && iheaders != NULL
20314 && isection->sh_link > 0
20315 && isection->sh_link < elf_numsections (ibfd)
20316 && iheaders[isection->sh_link]->bfd_section != NULL
20317 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20320 for (i = elf_numsections (obfd); i-- > 0;)
20321 if (oheaders[i]->bfd_section
20322 == iheaders[isection->sh_link]->bfd_section->output_section)
20323 break;
20326 if (i == 0)
20328 /* Failing that we have to find a matching section ourselves. If
20329 we had the output section name available we could compare that
20330 with input section names. Unfortunately we don't. So instead
20331 we use a simple heuristic and look for the nearest executable
20332 section before this one. */
20333 for (i = elf_numsections (obfd); i-- > 0;)
20334 if (oheaders[i] == osection)
20335 break;
20336 if (i == 0)
20337 break;
20339 while (i-- > 0)
20340 if (oheaders[i]->sh_type == SHT_PROGBITS
20341 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20342 == (SHF_ALLOC | SHF_EXECINSTR))
20343 break;
20346 if (i)
20348 osection->sh_link = i;
20349 /* If the text section was part of a group
20350 then the index section should be too. */
20351 if (oheaders[i]->sh_flags & SHF_GROUP)
20352 osection->sh_flags |= SHF_GROUP;
20353 return TRUE;
20356 break;
20358 case SHT_ARM_PREEMPTMAP:
20359 osection->sh_flags = SHF_ALLOC;
20360 break;
20362 case SHT_ARM_ATTRIBUTES:
20363 case SHT_ARM_DEBUGOVERLAY:
20364 case SHT_ARM_OVERLAYSECTION:
20365 default:
20366 break;
20369 return FALSE;
20372 /* Returns TRUE if NAME is an ARM mapping symbol.
20373 Traditionally the symbols $a, $d and $t have been used.
20374 The ARM ELF standard also defines $x (for A64 code). It also allows a
20375 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20376 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20377 not support them here. $t.x indicates the start of ThumbEE instructions. */
20379 static bfd_boolean
20380 is_arm_mapping_symbol (const char * name)
20382 return name != NULL /* Paranoia. */
20383 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20384 the mapping symbols could have acquired a prefix.
20385 We do not support this here, since such symbols no
20386 longer conform to the ARM ELF ABI. */
20387 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20388 && (name[2] == 0 || name[2] == '.');
20389 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20390 any characters that follow the period are legal characters for the body
20391 of a symbol's name. For now we just assume that this is the case. */
20394 /* Make sure that mapping symbols in object files are not removed via the
20395 "strip --strip-unneeded" tool. These symbols are needed in order to
20396 correctly generate interworking veneers, and for byte swapping code
20397 regions. Once an object file has been linked, it is safe to remove the
20398 symbols as they will no longer be needed. */
20400 static void
20401 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20403 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20404 && sym->section != bfd_abs_section_ptr
20405 && is_arm_mapping_symbol (sym->name))
20406 sym->flags |= BSF_KEEP;
20409 #undef elf_backend_copy_special_section_fields
20410 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20412 #define ELF_ARCH bfd_arch_arm
20413 #define ELF_TARGET_ID ARM_ELF_DATA
20414 #define ELF_MACHINE_CODE EM_ARM
20415 #ifdef __QNXTARGET__
20416 #define ELF_MAXPAGESIZE 0x1000
20417 #else
20418 #define ELF_MAXPAGESIZE 0x10000
20419 #endif
20420 #define ELF_MINPAGESIZE 0x1000
20421 #define ELF_COMMONPAGESIZE 0x1000
20423 #define bfd_elf32_mkobject elf32_arm_mkobject
20425 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20426 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20427 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20428 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20429 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20430 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20431 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20432 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20433 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20434 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20435 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20436 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20437 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20439 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20440 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20441 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20442 #define elf_backend_check_relocs elf32_arm_check_relocs
20443 #define elf_backend_update_relocs elf32_arm_update_relocs
20444 #define elf_backend_relocate_section elf32_arm_relocate_section
20445 #define elf_backend_write_section elf32_arm_write_section
20446 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20447 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20448 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20449 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20450 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20451 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20452 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20453 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20454 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20455 #define elf_backend_object_p elf32_arm_object_p
20456 #define elf_backend_fake_sections elf32_arm_fake_sections
20457 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20458 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20459 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20460 #define elf_backend_size_info elf32_arm_size_info
20461 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20462 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20463 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20464 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20465 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20466 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20467 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20468 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20470 #define elf_backend_can_refcount 1
20471 #define elf_backend_can_gc_sections 1
20472 #define elf_backend_plt_readonly 1
20473 #define elf_backend_want_got_plt 1
20474 #define elf_backend_want_plt_sym 0
20475 #define elf_backend_want_dynrelro 1
20476 #define elf_backend_may_use_rel_p 1
20477 #define elf_backend_may_use_rela_p 0
20478 #define elf_backend_default_use_rela_p 0
20479 #define elf_backend_dtrel_excludes_plt 1
20481 #define elf_backend_got_header_size 12
20482 #define elf_backend_extern_protected_data 1
20484 #undef elf_backend_obj_attrs_vendor
20485 #define elf_backend_obj_attrs_vendor "aeabi"
20486 #undef elf_backend_obj_attrs_section
20487 #define elf_backend_obj_attrs_section ".ARM.attributes"
20488 #undef elf_backend_obj_attrs_arg_type
20489 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20490 #undef elf_backend_obj_attrs_section_type
20491 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20492 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20493 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20495 #undef elf_backend_section_flags
20496 #define elf_backend_section_flags elf32_arm_section_flags
20497 #undef elf_backend_lookup_section_flags_hook
20498 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20500 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20502 #include "elf32-target.h"
20504 /* Native Client targets. */
20506 #undef TARGET_LITTLE_SYM
20507 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20508 #undef TARGET_LITTLE_NAME
20509 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20510 #undef TARGET_BIG_SYM
20511 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20512 #undef TARGET_BIG_NAME
20513 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20515 /* Like elf32_arm_link_hash_table_create -- but overrides
20516 appropriately for NaCl. */
20518 static struct bfd_link_hash_table *
20519 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20521 struct bfd_link_hash_table *ret;
20523 ret = elf32_arm_link_hash_table_create (abfd);
20524 if (ret)
20526 struct elf32_arm_link_hash_table *htab
20527 = (struct elf32_arm_link_hash_table *) ret;
20529 htab->nacl_p = 1;
20531 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20532 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20534 return ret;
20537 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20538 really need to use elf32_arm_modify_segment_map. But we do it
20539 anyway just to reduce gratuitous differences with the stock ARM backend. */
20541 static bfd_boolean
20542 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20544 return (elf32_arm_modify_segment_map (abfd, info)
20545 && nacl_modify_segment_map (abfd, info));
20548 static void
20549 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20551 elf32_arm_final_write_processing (abfd, linker);
20552 nacl_final_write_processing (abfd, linker);
20555 static bfd_vma
20556 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20557 const arelent *rel ATTRIBUTE_UNUSED)
20559 return plt->vma
20560 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20561 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20564 #undef elf32_bed
20565 #define elf32_bed elf32_arm_nacl_bed
20566 #undef bfd_elf32_bfd_link_hash_table_create
20567 #define bfd_elf32_bfd_link_hash_table_create \
20568 elf32_arm_nacl_link_hash_table_create
20569 #undef elf_backend_plt_alignment
20570 #define elf_backend_plt_alignment 4
20571 #undef elf_backend_modify_segment_map
20572 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20573 #undef elf_backend_modify_program_headers
20574 #define elf_backend_modify_program_headers nacl_modify_program_headers
20575 #undef elf_backend_final_write_processing
20576 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20577 #undef bfd_elf32_get_synthetic_symtab
20578 #undef elf_backend_plt_sym_val
20579 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20580 #undef elf_backend_copy_special_section_fields
20582 #undef ELF_MINPAGESIZE
20583 #undef ELF_COMMONPAGESIZE
20586 #include "elf32-target.h"
20588 /* Reset to defaults. */
20589 #undef elf_backend_plt_alignment
20590 #undef elf_backend_modify_segment_map
20591 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20592 #undef elf_backend_modify_program_headers
20593 #undef elf_backend_final_write_processing
20594 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20595 #undef ELF_MINPAGESIZE
20596 #define ELF_MINPAGESIZE 0x1000
20597 #undef ELF_COMMONPAGESIZE
20598 #define ELF_COMMONPAGESIZE 0x1000
20601 /* FDPIC Targets. */
20603 #undef TARGET_LITTLE_SYM
20604 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20605 #undef TARGET_LITTLE_NAME
20606 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20607 #undef TARGET_BIG_SYM
20608 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20609 #undef TARGET_BIG_NAME
20610 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20611 #undef elf_match_priority
20612 #define elf_match_priority 128
20613 #undef ELF_OSABI
20614 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20616 /* Like elf32_arm_link_hash_table_create -- but overrides
20617 appropriately for FDPIC. */
20619 static struct bfd_link_hash_table *
20620 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20622 struct bfd_link_hash_table *ret;
20624 ret = elf32_arm_link_hash_table_create (abfd);
20625 if (ret)
20627 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20629 htab->fdpic_p = 1;
20631 return ret;
20634 /* We need dynamic symbols for every section, since segments can
20635 relocate independently. */
20636 static bfd_boolean
20637 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20638 struct bfd_link_info *info
20639 ATTRIBUTE_UNUSED,
20640 asection *p ATTRIBUTE_UNUSED)
20642 switch (elf_section_data (p)->this_hdr.sh_type)
20644 case SHT_PROGBITS:
20645 case SHT_NOBITS:
20646 /* If sh_type is yet undecided, assume it could be
20647 SHT_PROGBITS/SHT_NOBITS. */
20648 case SHT_NULL:
20649 return FALSE;
20651 /* There shouldn't be section relative relocations
20652 against any other section. */
20653 default:
20654 return TRUE;
20658 #undef elf32_bed
20659 #define elf32_bed elf32_arm_fdpic_bed
20661 #undef bfd_elf32_bfd_link_hash_table_create
20662 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20664 #undef elf_backend_omit_section_dynsym
20665 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20667 #include "elf32-target.h"
20669 #undef elf_match_priority
20670 #undef ELF_OSABI
20671 #undef elf_backend_omit_section_dynsym
20673 /* VxWorks Targets. */
20675 #undef TARGET_LITTLE_SYM
20676 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20677 #undef TARGET_LITTLE_NAME
20678 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20679 #undef TARGET_BIG_SYM
20680 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20681 #undef TARGET_BIG_NAME
20682 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20684 /* Like elf32_arm_link_hash_table_create -- but overrides
20685 appropriately for VxWorks. */
20687 static struct bfd_link_hash_table *
20688 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20690 struct bfd_link_hash_table *ret;
20692 ret = elf32_arm_link_hash_table_create (abfd);
20693 if (ret)
20695 struct elf32_arm_link_hash_table *htab
20696 = (struct elf32_arm_link_hash_table *) ret;
20697 htab->use_rel = 0;
20698 htab->vxworks_p = 1;
20700 return ret;
20703 static void
20704 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20706 elf32_arm_final_write_processing (abfd, linker);
20707 elf_vxworks_final_write_processing (abfd, linker);
20710 #undef elf32_bed
20711 #define elf32_bed elf32_arm_vxworks_bed
20713 #undef bfd_elf32_bfd_link_hash_table_create
20714 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20715 #undef elf_backend_final_write_processing
20716 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20717 #undef elf_backend_emit_relocs
20718 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20720 #undef elf_backend_may_use_rel_p
20721 #define elf_backend_may_use_rel_p 0
20722 #undef elf_backend_may_use_rela_p
20723 #define elf_backend_may_use_rela_p 1
20724 #undef elf_backend_default_use_rela_p
20725 #define elf_backend_default_use_rela_p 1
20726 #undef elf_backend_want_plt_sym
20727 #define elf_backend_want_plt_sym 1
20728 #undef ELF_MAXPAGESIZE
20729 #define ELF_MAXPAGESIZE 0x1000
20731 #include "elf32-target.h"
20734 /* Merge backend specific data from an object file to the output
20735 object file when linking. */
20737 static bfd_boolean
20738 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20740 bfd *obfd = info->output_bfd;
20741 flagword out_flags;
20742 flagword in_flags;
20743 bfd_boolean flags_compatible = TRUE;
20744 asection *sec;
20746 /* Check if we have the same endianness. */
20747 if (! _bfd_generic_verify_endian_match (ibfd, info))
20748 return FALSE;
20750 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20751 return TRUE;
20753 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20754 return FALSE;
20756 /* The input BFD must have had its flags initialised. */
20757 /* The following seems bogus to me -- The flags are initialized in
20758 the assembler but I don't think an elf_flags_init field is
20759 written into the object. */
20760 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20762 in_flags = elf_elfheader (ibfd)->e_flags;
20763 out_flags = elf_elfheader (obfd)->e_flags;
20765 /* In theory there is no reason why we couldn't handle this. However
20766 in practice it isn't even close to working and there is no real
20767 reason to want it. */
20768 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20769 && !(ibfd->flags & DYNAMIC)
20770 && (in_flags & EF_ARM_BE8))
20772 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20773 ibfd);
20774 return FALSE;
20777 if (!elf_flags_init (obfd))
20779 /* If the input is the default architecture and had the default
20780 flags then do not bother setting the flags for the output
20781 architecture, instead allow future merges to do this. If no
20782 future merges ever set these flags then they will retain their
20783 uninitialised values, which surprise surprise, correspond
20784 to the default values. */
20785 if (bfd_get_arch_info (ibfd)->the_default
20786 && elf_elfheader (ibfd)->e_flags == 0)
20787 return TRUE;
20789 elf_flags_init (obfd) = TRUE;
20790 elf_elfheader (obfd)->e_flags = in_flags;
20792 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20793 && bfd_get_arch_info (obfd)->the_default)
20794 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20796 return TRUE;
20799 /* Determine what should happen if the input ARM architecture
20800 does not match the output ARM architecture. */
20801 if (! bfd_arm_merge_machines (ibfd, obfd))
20802 return FALSE;
20804 /* Identical flags must be compatible. */
20805 if (in_flags == out_flags)
20806 return TRUE;
20808 /* Check to see if the input BFD actually contains any sections. If
20809 not, its flags may not have been initialised either, but it
20810 cannot actually cause any incompatiblity. Do not short-circuit
20811 dynamic objects; their section list may be emptied by
20812 elf_link_add_object_symbols.
20814 Also check to see if there are no code sections in the input.
20815 In this case there is no need to check for code specific flags.
20816 XXX - do we need to worry about floating-point format compatability
20817 in data sections ? */
20818 if (!(ibfd->flags & DYNAMIC))
20820 bfd_boolean null_input_bfd = TRUE;
20821 bfd_boolean only_data_sections = TRUE;
20823 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20825 /* Ignore synthetic glue sections. */
20826 if (strcmp (sec->name, ".glue_7")
20827 && strcmp (sec->name, ".glue_7t"))
20829 if ((bfd_get_section_flags (ibfd, sec)
20830 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20831 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20832 only_data_sections = FALSE;
20834 null_input_bfd = FALSE;
20835 break;
20839 if (null_input_bfd || only_data_sections)
20840 return TRUE;
20843 /* Complain about various flag mismatches. */
20844 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20845 EF_ARM_EABI_VERSION (out_flags)))
20847 _bfd_error_handler
20848 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20849 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20850 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20851 return FALSE;
20854 /* Not sure what needs to be checked for EABI versions >= 1. */
20855 /* VxWorks libraries do not use these flags. */
20856 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20857 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20858 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20860 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20862 _bfd_error_handler
20863 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20864 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20865 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20866 flags_compatible = FALSE;
20869 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20871 if (in_flags & EF_ARM_APCS_FLOAT)
20872 _bfd_error_handler
20873 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20874 ibfd, obfd);
20875 else
20876 _bfd_error_handler
20877 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20878 ibfd, obfd);
20880 flags_compatible = FALSE;
20883 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20885 if (in_flags & EF_ARM_VFP_FLOAT)
20886 _bfd_error_handler
20887 (_("error: %pB uses %s instructions, whereas %pB does not"),
20888 ibfd, "VFP", obfd);
20889 else
20890 _bfd_error_handler
20891 (_("error: %pB uses %s instructions, whereas %pB does not"),
20892 ibfd, "FPA", obfd);
20894 flags_compatible = FALSE;
20897 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20899 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20900 _bfd_error_handler
20901 (_("error: %pB uses %s instructions, whereas %pB does not"),
20902 ibfd, "Maverick", obfd);
20903 else
20904 _bfd_error_handler
20905 (_("error: %pB does not use %s instructions, whereas %pB does"),
20906 ibfd, "Maverick", obfd);
20908 flags_compatible = FALSE;
20911 #ifdef EF_ARM_SOFT_FLOAT
20912 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20914 /* We can allow interworking between code that is VFP format
20915 layout, and uses either soft float or integer regs for
20916 passing floating point arguments and results. We already
20917 know that the APCS_FLOAT flags match; similarly for VFP
20918 flags. */
20919 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20920 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20922 if (in_flags & EF_ARM_SOFT_FLOAT)
20923 _bfd_error_handler
20924 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20925 ibfd, obfd);
20926 else
20927 _bfd_error_handler
20928 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20929 ibfd, obfd);
20931 flags_compatible = FALSE;
20934 #endif
20936 /* Interworking mismatch is only a warning. */
20937 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20939 if (in_flags & EF_ARM_INTERWORK)
20941 _bfd_error_handler
20942 (_("warning: %pB supports interworking, whereas %pB does not"),
20943 ibfd, obfd);
20945 else
20947 _bfd_error_handler
20948 (_("warning: %pB does not support interworking, whereas %pB does"),
20949 ibfd, obfd);
20954 return flags_compatible;
20958 /* Symbian OS Targets. */
20960 #undef TARGET_LITTLE_SYM
20961 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20962 #undef TARGET_LITTLE_NAME
20963 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20964 #undef TARGET_BIG_SYM
20965 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20966 #undef TARGET_BIG_NAME
20967 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20969 /* Like elf32_arm_link_hash_table_create -- but overrides
20970 appropriately for Symbian OS. */
20972 static struct bfd_link_hash_table *
20973 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20975 struct bfd_link_hash_table *ret;
20977 ret = elf32_arm_link_hash_table_create (abfd);
20978 if (ret)
20980 struct elf32_arm_link_hash_table *htab
20981 = (struct elf32_arm_link_hash_table *)ret;
20982 /* There is no PLT header for Symbian OS. */
20983 htab->plt_header_size = 0;
20984 /* The PLT entries are each one instruction and one word. */
20985 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20986 htab->symbian_p = 1;
20987 /* Symbian uses armv5t or above, so use_blx is always true. */
20988 htab->use_blx = 1;
20989 htab->root.is_relocatable_executable = 1;
20991 return ret;
20994 static const struct bfd_elf_special_section
20995 elf32_arm_symbian_special_sections[] =
20997 /* In a BPABI executable, the dynamic linking sections do not go in
20998 the loadable read-only segment. The post-linker may wish to
20999 refer to these sections, but they are not part of the final
21000 program image. */
21001 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
21002 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
21003 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
21004 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
21005 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
21006 /* These sections do not need to be writable as the SymbianOS
21007 postlinker will arrange things so that no dynamic relocation is
21008 required. */
21009 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
21010 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
21011 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
21012 { NULL, 0, 0, 0, 0 }
21015 static void
21016 elf32_arm_symbian_begin_write_processing (bfd *abfd,
21017 struct bfd_link_info *link_info)
21019 /* BPABI objects are never loaded directly by an OS kernel; they are
21020 processed by a postlinker first, into an OS-specific format. If
21021 the D_PAGED bit is set on the file, BFD will align segments on
21022 page boundaries, so that an OS can directly map the file. With
21023 BPABI objects, that just results in wasted space. In addition,
21024 because we clear the D_PAGED bit, map_sections_to_segments will
21025 recognize that the program headers should not be mapped into any
21026 loadable segment. */
21027 abfd->flags &= ~D_PAGED;
21028 elf32_arm_begin_write_processing (abfd, link_info);
21031 static bfd_boolean
21032 elf32_arm_symbian_modify_segment_map (bfd *abfd,
21033 struct bfd_link_info *info)
21035 struct elf_segment_map *m;
21036 asection *dynsec;
21038 /* BPABI shared libraries and executables should have a PT_DYNAMIC
21039 segment. However, because the .dynamic section is not marked
21040 with SEC_LOAD, the generic ELF code will not create such a
21041 segment. */
21042 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
21043 if (dynsec)
21045 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
21046 if (m->p_type == PT_DYNAMIC)
21047 break;
21049 if (m == NULL)
21051 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
21052 m->next = elf_seg_map (abfd);
21053 elf_seg_map (abfd) = m;
21057 /* Also call the generic arm routine. */
21058 return elf32_arm_modify_segment_map (abfd, info);
21061 /* Return address for Ith PLT stub in section PLT, for relocation REL
21062 or (bfd_vma) -1 if it should not be included. */
21064 static bfd_vma
21065 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
21066 const arelent *rel ATTRIBUTE_UNUSED)
21068 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
21071 #undef elf32_bed
21072 #define elf32_bed elf32_arm_symbian_bed
21074 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21075 will process them and then discard them. */
21076 #undef ELF_DYNAMIC_SEC_FLAGS
21077 #define ELF_DYNAMIC_SEC_FLAGS \
21078 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21080 #undef elf_backend_emit_relocs
21082 #undef bfd_elf32_bfd_link_hash_table_create
21083 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
21084 #undef elf_backend_special_sections
21085 #define elf_backend_special_sections elf32_arm_symbian_special_sections
21086 #undef elf_backend_begin_write_processing
21087 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
21088 #undef elf_backend_final_write_processing
21089 #define elf_backend_final_write_processing elf32_arm_final_write_processing
21091 #undef elf_backend_modify_segment_map
21092 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21094 /* There is no .got section for BPABI objects, and hence no header. */
21095 #undef elf_backend_got_header_size
21096 #define elf_backend_got_header_size 0
21098 /* Similarly, there is no .got.plt section. */
21099 #undef elf_backend_want_got_plt
21100 #define elf_backend_want_got_plt 0
21102 #undef elf_backend_plt_sym_val
21103 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
21105 #undef elf_backend_may_use_rel_p
21106 #define elf_backend_may_use_rel_p 1
21107 #undef elf_backend_may_use_rela_p
21108 #define elf_backend_may_use_rela_p 0
21109 #undef elf_backend_default_use_rela_p
21110 #define elf_backend_default_use_rela_p 0
21111 #undef elf_backend_want_plt_sym
21112 #define elf_backend_want_plt_sym 0
21113 #undef elf_backend_dtrel_excludes_plt
21114 #define elf_backend_dtrel_excludes_plt 0
21115 #undef ELF_MAXPAGESIZE
21116 #define ELF_MAXPAGESIZE 0x8000
21118 #include "elf32-target.h"