1 //===- Relocations.cpp ----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains platform-independent functions to process relocations.
10 // I'll describe the overview of this file here.
12 // Simple relocations are easy to handle for the linker. For example,
13 // for R_X86_64_PC64 relocs, the linker just has to fix up locations
14 // with the relative offsets to the target symbols. It would just be
15 // reading records from relocation sections and applying them to output.
17 // But not all relocations are that easy to handle. For example, for
18 // R_386_GOTOFF relocs, the linker has to create new GOT entries for
19 // symbols if they don't exist, and fix up locations with GOT entry
20 // offsets from the beginning of GOT section. So there is more than
21 // fixing addresses in relocation processing.
23 // ELF defines a large number of complex relocations.
25 // The functions in this file analyze relocations and do whatever needs
26 // to be done. It includes, but not limited to, the following.
28 // - create GOT/PLT entries
29 // - create new relocations in .dynsym to let the dynamic linker resolve
30 // them at runtime (since ELF supports dynamic linking, not all
31 // relocations can be resolved at link-time)
32 // - create COPY relocs and reserve space in .bss
33 // - replace expensive relocs (in terms of runtime cost) with cheap ones
34 // - error out infeasible combinations such as PIC and non-relative relocs
36 // Note that the functions in this file don't actually apply relocations
37 // because it doesn't know about the output file nor the output file buffer.
38 // It instead stores Relocation objects to InputSection's Relocations
39 // vector to let it apply later in InputSection::writeTo.
41 //===----------------------------------------------------------------------===//
43 #include "Relocations.h"
45 #include "InputFiles.h"
46 #include "LinkerScript.h"
47 #include "OutputSections.h"
48 #include "SymbolTable.h"
50 #include "SyntheticSections.h"
53 #include "lld/Common/ErrorHandler.h"
54 #include "lld/Common/Memory.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/BinaryFormat/ELF.h"
57 #include "llvm/Demangle/Demangle.h"
58 #include "llvm/Support/Endian.h"
62 using namespace llvm::ELF
;
63 using namespace llvm::object
;
64 using namespace llvm::support::endian
;
66 using namespace lld::elf
;
68 static void printDefinedLocation(ELFSyncStream
&s
, const Symbol
&sym
) {
69 s
<< "\n>>> defined in " << sym
.file
;
72 // Construct a message in the following format.
74 // >>> defined in /home/alice/src/foo.o
75 // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
76 // >>> /home/alice/src/bar.o:(.text+0x1)
77 static void printLocation(ELFSyncStream
&s
, InputSectionBase
&sec
,
78 const Symbol
&sym
, uint64_t off
) {
79 printDefinedLocation(s
, sym
);
80 s
<< "\n>>> referenced by ";
82 s
<< sec
.getSrcMsg(sym
, off
);
85 s
<< sec
.getObjMsg(off
);
88 void elf::reportRangeError(Ctx
&ctx
, uint8_t *loc
, const Relocation
&rel
,
89 const Twine
&v
, int64_t min
, uint64_t max
) {
90 ErrorPlace errPlace
= getErrorPlace(ctx
, loc
);
92 diag
<< errPlace
.loc
<< "relocation " << rel
.type
93 << " out of range: " << v
.str() << " is not in [" << min
<< ", " << max
97 if (!rel
.sym
->isSection())
98 diag
<< "; references '" << rel
.sym
<< '\'';
99 else if (auto *d
= dyn_cast
<Defined
>(rel
.sym
))
100 diag
<< "; references section '" << d
->section
->name
<< "'";
102 if (ctx
.arg
.emachine
== EM_X86_64
&& rel
.type
== R_X86_64_PC32
&&
103 rel
.sym
->getOutputSection() &&
104 (rel
.sym
->getOutputSection()->flags
& SHF_X86_64_LARGE
)) {
105 diag
<< "; R_X86_64_PC32 should not reference a section marked "
109 if (!errPlace
.srcLoc
.empty())
110 diag
<< "\n>>> referenced by " << errPlace
.srcLoc
;
111 if (rel
.sym
&& !rel
.sym
->isSection())
112 printDefinedLocation(diag
, *rel
.sym
);
114 if (errPlace
.isec
&& errPlace
.isec
->name
.starts_with(".debug"))
115 diag
<< "; consider recompiling with -fdebug-types-section to reduce size "
119 void elf::reportRangeError(Ctx
&ctx
, uint8_t *loc
, int64_t v
, int n
,
120 const Symbol
&sym
, const Twine
&msg
) {
121 auto diag
= Err(ctx
);
122 diag
<< getErrorPlace(ctx
, loc
).loc
<< msg
<< " is out of range: " << v
123 << " is not in [" << llvm::minIntN(n
) << ", " << llvm::maxIntN(n
) << "]";
124 if (!sym
.getName().empty()) {
125 diag
<< "; references '" << &sym
<< '\'';
126 printDefinedLocation(diag
, sym
);
130 // Build a bitmask with one bit set for each 64 subset of RelExpr.
131 static constexpr uint64_t buildMask() { return 0; }
133 template <typename
... Tails
>
134 static constexpr uint64_t buildMask(int head
, Tails
... tails
) {
135 return (0 <= head
&& head
< 64 ? uint64_t(1) << head
: 0) |
139 // Return true if `Expr` is one of `Exprs`.
140 // There are more than 64 but less than 128 RelExprs, so we divide the set of
141 // exprs into [0, 64) and [64, 128) and represent each range as a constant
142 // 64-bit mask. Then we decide which mask to test depending on the value of
143 // expr and use a simple shift and bitwise-and to test for membership.
144 template <RelExpr
... Exprs
> static bool oneof(RelExpr expr
) {
145 assert(0 <= expr
&& (int)expr
< 128 &&
146 "RelExpr is too large for 128-bit mask!");
149 return (uint64_t(1) << (expr
- 64)) & buildMask((Exprs
- 64)...);
150 return (uint64_t(1) << expr
) & buildMask(Exprs
...);
153 static RelType
getMipsPairType(RelType type
, bool isLocal
) {
158 // In case of global symbol, the R_MIPS_GOT16 relocation does not
159 // have a pair. Each global symbol has a unique entry in the GOT
160 // and a corresponding instruction with help of the R_MIPS_GOT16
161 // relocation loads an address of the symbol. In case of local
162 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
163 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16
164 // relocations handle low 16 bits of the address. That allows
165 // to allocate only one GOT entry for every 64 KBytes of local data.
166 return isLocal
? R_MIPS_LO16
: R_MIPS_NONE
;
167 case R_MICROMIPS_GOT16
:
168 return isLocal
? R_MICROMIPS_LO16
: R_MIPS_NONE
;
170 return R_MIPS_PCLO16
;
171 case R_MICROMIPS_HI16
:
172 return R_MICROMIPS_LO16
;
178 // True if non-preemptable symbol always has the same value regardless of where
179 // the DSO is loaded.
180 static bool isAbsolute(const Symbol
&sym
) {
181 if (sym
.isUndefWeak())
183 if (const auto *dr
= dyn_cast
<Defined
>(&sym
))
184 return dr
->section
== nullptr; // Absolute symbol.
188 static bool isAbsoluteValue(const Symbol
&sym
) {
189 return isAbsolute(sym
) || sym
.isTls();
192 // Returns true if Expr refers a PLT entry.
193 static bool needsPlt(RelExpr expr
) {
194 return oneof
<R_PLT
, R_PLT_PC
, R_PLT_GOTREL
, R_PLT_GOTPLT
, R_GOTPLT_GOTREL
,
195 R_GOTPLT_PC
, RE_LOONGARCH_PLT_PAGE_PC
, RE_PPC32_PLTREL
,
196 RE_PPC64_CALL_PLT
>(expr
);
199 bool lld::elf::needsGot(RelExpr expr
) {
200 return oneof
<R_GOT
, RE_AARCH64_AUTH_GOT
, RE_AARCH64_AUTH_GOT_PC
, R_GOT_OFF
,
201 RE_MIPS_GOT_LOCAL_PAGE
, RE_MIPS_GOT_OFF
, RE_MIPS_GOT_OFF32
,
202 RE_AARCH64_GOT_PAGE_PC
, RE_AARCH64_AUTH_GOT_PAGE_PC
,
203 RE_AARCH64_AUTH_GOT_PAGE_PC
, R_GOT_PC
, R_GOTPLT
,
204 RE_AARCH64_GOT_PAGE
, RE_LOONGARCH_GOT
, RE_LOONGARCH_GOT_PAGE_PC
>(
208 // True if this expression is of the form Sym - X, where X is a position in the
209 // file (PC, or GOT for example).
210 static bool isRelExpr(RelExpr expr
) {
211 return oneof
<R_PC
, R_GOTREL
, R_GOTPLTREL
, RE_ARM_PCA
, RE_MIPS_GOTREL
,
212 RE_PPC64_CALL
, RE_PPC64_RELAX_TOC
, RE_AARCH64_PAGE_PC
,
213 R_RELAX_GOT_PC
, RE_RISCV_PC_INDIRECT
, RE_PPC64_RELAX_GOT_PC
,
214 RE_LOONGARCH_PAGE_PC
>(expr
);
217 static RelExpr
toPlt(RelExpr expr
) {
219 case RE_LOONGARCH_PAGE_PC
:
220 return RE_LOONGARCH_PLT_PAGE_PC
;
222 return RE_PPC64_CALL_PLT
;
234 static RelExpr
fromPlt(RelExpr expr
) {
235 // We decided not to use a plt. Optimize a reference to the plt to a
236 // reference to the symbol itself.
239 case RE_PPC32_PLTREL
:
241 case RE_LOONGARCH_PLT_PAGE_PC
:
242 return RE_LOONGARCH_PAGE_PC
;
243 case RE_PPC64_CALL_PLT
:
244 return RE_PPC64_CALL
;
256 // Returns true if a given shared symbol is in a read-only segment in a DSO.
257 template <class ELFT
> static bool isReadOnly(SharedSymbol
&ss
) {
258 using Elf_Phdr
= typename
ELFT::Phdr
;
260 // Determine if the symbol is read-only by scanning the DSO's program headers.
261 const auto &file
= cast
<SharedFile
>(*ss
.file
);
262 for (const Elf_Phdr
&phdr
:
263 check(file
.template getObj
<ELFT
>().program_headers()))
264 if ((phdr
.p_type
== ELF::PT_LOAD
|| phdr
.p_type
== ELF::PT_GNU_RELRO
) &&
265 !(phdr
.p_flags
& ELF::PF_W
) && ss
.value
>= phdr
.p_vaddr
&&
266 ss
.value
< phdr
.p_vaddr
+ phdr
.p_memsz
)
271 // Returns symbols at the same offset as a given symbol, including SS itself.
273 // If two or more symbols are at the same offset, and at least one of
274 // them are copied by a copy relocation, all of them need to be copied.
275 // Otherwise, they would refer to different places at runtime.
276 template <class ELFT
>
277 static SmallSet
<SharedSymbol
*, 4> getSymbolsAt(Ctx
&ctx
, SharedSymbol
&ss
) {
278 using Elf_Sym
= typename
ELFT::Sym
;
280 const auto &file
= cast
<SharedFile
>(*ss
.file
);
282 SmallSet
<SharedSymbol
*, 4> ret
;
283 for (const Elf_Sym
&s
: file
.template getGlobalELFSyms
<ELFT
>()) {
284 if (s
.st_shndx
== SHN_UNDEF
|| s
.st_shndx
== SHN_ABS
||
285 s
.getType() == STT_TLS
|| s
.st_value
!= ss
.value
)
287 StringRef name
= check(s
.getName(file
.getStringTable()));
288 Symbol
*sym
= ctx
.symtab
->find(name
);
289 if (auto *alias
= dyn_cast_or_null
<SharedSymbol
>(sym
))
293 // The loop does not check SHT_GNU_verneed, so ret does not contain
294 // non-default version symbols. If ss has a non-default version, ret won't
295 // contain ss. Just add ss unconditionally. If a non-default version alias is
296 // separately copy relocated, it and ss will have different addresses.
297 // Fortunately this case is impractical and fails with GNU ld as well.
302 // When a symbol is copy relocated or we create a canonical plt entry, it is
303 // effectively a defined symbol. In the case of copy relocation the symbol is
304 // in .bss and in the case of a canonical plt entry it is in .plt. This function
305 // replaces the existing symbol with a Defined pointing to the appropriate
307 static void replaceWithDefined(Ctx
&ctx
, Symbol
&sym
, SectionBase
&sec
,
308 uint64_t value
, uint64_t size
) {
310 Defined(ctx
, sym
.file
, StringRef(), sym
.binding
, sym
.stOther
, sym
.type
, value
,
314 sym
.versionId
= old
.versionId
;
315 sym
.isUsedInRegularObj
= true;
316 // A copy relocated alias may need a GOT entry.
317 sym
.flags
.store(old
.flags
.load(std::memory_order_relaxed
) & NEEDS_GOT
,
318 std::memory_order_relaxed
);
321 // Reserve space in .bss or .bss.rel.ro for copy relocation.
323 // The copy relocation is pretty much a hack. If you use a copy relocation
324 // in your program, not only the symbol name but the symbol's size, RW/RO
325 // bit and alignment become part of the ABI. In addition to that, if the
326 // symbol has aliases, the aliases become part of the ABI. That's subtle,
327 // but if you violate that implicit ABI, that can cause very counter-
328 // intuitive consequences.
330 // So, what is the copy relocation? It's for linking non-position
331 // independent code to DSOs. In an ideal world, all references to data
332 // exported by DSOs should go indirectly through GOT. But if object files
333 // are compiled as non-PIC, all data references are direct. There is no
334 // way for the linker to transform the code to use GOT, as machine
335 // instructions are already set in stone in object files. This is where
336 // the copy relocation takes a role.
338 // A copy relocation instructs the dynamic linker to copy data from a DSO
339 // to a specified address (which is usually in .bss) at load-time. If the
340 // static linker (that's us) finds a direct data reference to a DSO
341 // symbol, it creates a copy relocation, so that the symbol can be
342 // resolved as if it were in .bss rather than in a DSO.
344 // As you can see in this function, we create a copy relocation for the
345 // dynamic linker, and the relocation contains not only symbol name but
346 // various other information about the symbol. So, such attributes become a
349 // Note for application developers: I can give you a piece of advice if
350 // you are writing a shared library. You probably should export only
351 // functions from your library. You shouldn't export variables.
353 // As an example what can happen when you export variables without knowing
354 // the semantics of copy relocations, assume that you have an exported
355 // variable of type T. It is an ABI-breaking change to add new members at
356 // end of T even though doing that doesn't change the layout of the
357 // existing members. That's because the space for the new members are not
358 // reserved in .bss unless you recompile the main program. That means they
359 // are likely to overlap with other data that happens to be laid out next
360 // to the variable in .bss. This kind of issue is sometimes very hard to
361 // debug. What's a solution? Instead of exporting a variable V from a DSO,
362 // define an accessor getV().
363 template <class ELFT
> static void addCopyRelSymbol(Ctx
&ctx
, SharedSymbol
&ss
) {
364 // Copy relocation against zero-sized symbol doesn't make sense.
365 uint64_t symSize
= ss
.getSize();
366 if (symSize
== 0 || ss
.alignment
== 0)
367 Err(ctx
) << "cannot create a copy relocation for symbol " << &ss
;
369 // See if this symbol is in a read-only segment. If so, preserve the symbol's
370 // memory protection by reserving space in the .bss.rel.ro section.
371 bool isRO
= isReadOnly
<ELFT
>(ss
);
372 BssSection
*sec
= make
<BssSection
>(ctx
, isRO
? ".bss.rel.ro" : ".bss",
373 symSize
, ss
.alignment
);
374 OutputSection
*osec
= (isRO
? ctx
.in
.bssRelRo
: ctx
.in
.bss
)->getParent();
376 // At this point, sectionBases has been migrated to sections. Append sec to
378 if (osec
->commands
.empty() ||
379 !isa
<InputSectionDescription
>(osec
->commands
.back()))
380 osec
->commands
.push_back(make
<InputSectionDescription
>(""));
381 auto *isd
= cast
<InputSectionDescription
>(osec
->commands
.back());
382 isd
->sections
.push_back(sec
);
383 osec
->commitSection(sec
);
385 // Look through the DSO's dynamic symbol table for aliases and create a
386 // dynamic symbol for each one. This causes the copy relocation to correctly
387 // interpose any aliases.
388 for (SharedSymbol
*sym
: getSymbolsAt
<ELFT
>(ctx
, ss
))
389 replaceWithDefined(ctx
, *sym
, *sec
, 0, sym
->size
);
391 ctx
.mainPart
->relaDyn
->addSymbolReloc(ctx
.target
->copyRel
, *sec
, 0, ss
);
394 // .eh_frame sections are mergeable input sections, so their input
395 // offsets are not linearly mapped to output section. For each input
396 // offset, we need to find a section piece containing the offset and
397 // add the piece's base address to the input offset to compute the
398 // output offset. That isn't cheap.
400 // This class is to speed up the offset computation. When we process
401 // relocations, we access offsets in the monotonically increasing
402 // order. So we can optimize for that access pattern.
404 // For sections other than .eh_frame, this class doesn't do anything.
408 OffsetGetter() = default;
409 explicit OffsetGetter(InputSectionBase
&sec
) {
410 if (auto *eh
= dyn_cast
<EhInputSection
>(&sec
)) {
418 // Translates offsets in input sections to offsets in output sections.
419 // Given offset must increase monotonically. We assume that Piece is
420 // sorted by inputOff.
421 uint64_t get(Ctx
&ctx
, uint64_t off
) {
425 while (j
!= fdes
.end() && j
->inputOff
<= off
)
428 if (j
== fdes
.begin() || j
[-1].inputOff
+ j
[-1].size
<= off
) {
429 while (i
!= cies
.end() && i
->inputOff
<= off
)
431 if (i
== cies
.begin() || i
[-1].inputOff
+ i
[-1].size
<= off
)
432 Fatal(ctx
) << ".eh_frame: relocation is not in any piece";
436 // Offset -1 means that the piece is dead (i.e. garbage collected).
437 if (it
[-1].outputOff
== -1)
439 return it
[-1].outputOff
+ (off
- it
[-1].inputOff
);
443 ArrayRef
<EhSectionPiece
> cies
, fdes
;
444 ArrayRef
<EhSectionPiece
>::iterator i
, j
;
447 // This class encapsulates states needed to scan relocations for one
449 class RelocationScanner
{
451 RelocationScanner(Ctx
&ctx
) : ctx(ctx
) {}
452 template <class ELFT
>
453 void scanSection(InputSectionBase
&s
, bool isEH
= false);
457 InputSectionBase
*sec
;
460 // End of relocations, used by Mips/PPC64.
461 const void *end
= nullptr;
463 template <class RelTy
> RelType
getMipsN32RelType(RelTy
*&rel
) const;
464 template <class ELFT
, class RelTy
>
465 int64_t computeMipsAddend(const RelTy
&rel
, RelExpr expr
, bool isLocal
) const;
466 bool isStaticLinkTimeConstant(RelExpr e
, RelType type
, const Symbol
&sym
,
467 uint64_t relOff
) const;
468 void processAux(RelExpr expr
, RelType type
, uint64_t offset
, Symbol
&sym
,
469 int64_t addend
) const;
470 unsigned handleTlsRelocation(RelExpr expr
, RelType type
, uint64_t offset
,
471 Symbol
&sym
, int64_t addend
);
473 template <class ELFT
, class RelTy
>
474 void scanOne(typename Relocs
<RelTy
>::const_iterator
&i
);
475 template <class ELFT
, class RelTy
> void scan(Relocs
<RelTy
> rels
);
479 // MIPS has an odd notion of "paired" relocations to calculate addends.
480 // For example, if a relocation is of R_MIPS_HI16, there must be a
481 // R_MIPS_LO16 relocation after that, and an addend is calculated using
482 // the two relocations.
483 template <class ELFT
, class RelTy
>
484 int64_t RelocationScanner::computeMipsAddend(const RelTy
&rel
, RelExpr expr
,
485 bool isLocal
) const {
486 if (expr
== RE_MIPS_GOTREL
&& isLocal
)
487 return sec
->getFile
<ELFT
>()->mipsGp0
;
489 // The ABI says that the paired relocation is used only for REL.
490 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
491 // This generalises to relocation types with implicit addends.
492 if (RelTy::HasAddend
)
495 RelType type
= rel
.getType(ctx
.arg
.isMips64EL
);
496 RelType pairTy
= getMipsPairType(type
, isLocal
);
497 if (pairTy
== R_MIPS_NONE
)
500 const uint8_t *buf
= sec
->content().data();
501 uint32_t symIndex
= rel
.getSymbol(ctx
.arg
.isMips64EL
);
503 // To make things worse, paired relocations might not be contiguous in
504 // the relocation table, so we need to do linear search. *sigh*
505 for (const RelTy
*ri
= &rel
; ri
!= static_cast<const RelTy
*>(end
); ++ri
)
506 if (ri
->getType(ctx
.arg
.isMips64EL
) == pairTy
&&
507 ri
->getSymbol(ctx
.arg
.isMips64EL
) == symIndex
)
508 return ctx
.target
->getImplicitAddend(buf
+ ri
->r_offset
, pairTy
);
510 Warn(ctx
) << "can't find matching " << pairTy
<< " relocation for " << type
;
514 // Custom error message if Sym is defined in a discarded section.
515 template <class ELFT
>
516 static void maybeReportDiscarded(Ctx
&ctx
, ELFSyncStream
&msg
, Undefined
&sym
) {
517 auto *file
= dyn_cast
<ObjFile
<ELFT
>>(sym
.file
);
518 if (!file
|| !sym
.discardedSecIdx
)
520 ArrayRef
<typename
ELFT::Shdr
> objSections
=
521 file
->template getELFShdrs
<ELFT
>();
523 if (sym
.type
== ELF::STT_SECTION
) {
524 msg
<< "relocation refers to a discarded section: ";
526 file
->getObj().getSectionName(objSections
[sym
.discardedSecIdx
]), file
);
528 msg
<< "relocation refers to a symbol in a discarded section: " << &sym
;
530 msg
<< "\n>>> defined in " << file
;
532 Elf_Shdr_Impl
<ELFT
> elfSec
= objSections
[sym
.discardedSecIdx
- 1];
533 if (elfSec
.sh_type
!= SHT_GROUP
)
536 // If the discarded section is a COMDAT.
537 StringRef signature
= file
->getShtGroupSignature(objSections
, elfSec
);
538 if (const InputFile
*prevailing
=
539 ctx
.symtab
->comdatGroups
.lookup(CachedHashStringRef(signature
))) {
540 msg
<< "\n>>> section group signature: " << signature
541 << "\n>>> prevailing definition is in " << prevailing
;
542 if (sym
.nonPrevailing
) {
543 msg
<< "\n>>> or the symbol in the prevailing group had STB_WEAK "
544 "binding and the symbol in a non-prevailing group had STB_GLOBAL "
545 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding "
546 "signature is not supported";
551 // Check whether the definition name def is a mangled function name that matches
552 // the reference name ref.
553 static bool canSuggestExternCForCXX(StringRef ref
, StringRef def
) {
554 llvm::ItaniumPartialDemangler d
;
555 std::string name
= def
.str();
556 if (d
.partialDemangle(name
.c_str()))
558 char *buf
= d
.getFunctionName(nullptr, nullptr);
561 bool ret
= ref
== buf
;
566 // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns
567 // the suggested symbol, which is either in the symbol table, or in the same
569 static const Symbol
*getAlternativeSpelling(Ctx
&ctx
, const Undefined
&sym
,
570 std::string
&pre_hint
,
571 std::string
&post_hint
) {
572 DenseMap
<StringRef
, const Symbol
*> map
;
573 if (sym
.file
->kind() == InputFile::ObjKind
) {
574 auto *file
= cast
<ELFFileBase
>(sym
.file
);
575 // If sym is a symbol defined in a discarded section, maybeReportDiscarded()
576 // will give an error. Don't suggest an alternative spelling.
577 if (sym
.discardedSecIdx
!= 0 &&
578 file
->getSections()[sym
.discardedSecIdx
] == &InputSection::discarded
)
581 // Build a map of local defined symbols.
582 for (const Symbol
*s
: sym
.file
->getSymbols())
583 if (s
->isLocal() && s
->isDefined() && !s
->getName().empty())
584 map
.try_emplace(s
->getName(), s
);
587 auto suggest
= [&](StringRef newName
) -> const Symbol
* {
588 // If defined locally.
589 if (const Symbol
*s
= map
.lookup(newName
))
592 // If in the symbol table and not undefined.
593 if (const Symbol
*s
= ctx
.symtab
->find(newName
))
594 if (!s
->isUndefined())
600 // This loop enumerates all strings of Levenshtein distance 1 as typo
601 // correction candidates and suggests the one that exists as a non-undefined
603 StringRef name
= sym
.getName();
604 for (size_t i
= 0, e
= name
.size(); i
!= e
+ 1; ++i
) {
605 // Insert a character before name[i].
606 std::string newName
= (name
.substr(0, i
) + "0" + name
.substr(i
)).str();
607 for (char c
= '0'; c
<= 'z'; ++c
) {
609 if (const Symbol
*s
= suggest(newName
))
615 // Substitute name[i].
616 newName
= std::string(name
);
617 for (char c
= '0'; c
<= 'z'; ++c
) {
619 if (const Symbol
*s
= suggest(newName
))
623 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is
626 newName
[i
] = name
[i
+ 1];
627 newName
[i
+ 1] = name
[i
];
628 if (const Symbol
*s
= suggest(newName
))
633 newName
= (name
.substr(0, i
) + name
.substr(i
+ 1)).str();
634 if (const Symbol
*s
= suggest(newName
))
638 // Case mismatch, e.g. Foo vs FOO.
640 if (name
.equals_insensitive(it
.first
))
642 for (Symbol
*sym
: ctx
.symtab
->getSymbols())
643 if (!sym
->isUndefined() && name
.equals_insensitive(sym
->getName()))
646 // The reference may be a mangled name while the definition is not. Suggest a
647 // missing extern "C".
648 if (name
.starts_with("_Z")) {
649 std::string buf
= name
.str();
650 llvm::ItaniumPartialDemangler d
;
651 if (!d
.partialDemangle(buf
.c_str()))
652 if (char *buf
= d
.getFunctionName(nullptr, nullptr)) {
653 const Symbol
*s
= suggest(buf
);
656 pre_hint
= ": extern \"C\" ";
661 const Symbol
*s
= nullptr;
663 if (canSuggestExternCForCXX(name
, it
.first
)) {
668 for (Symbol
*sym
: ctx
.symtab
->getSymbols())
669 if (canSuggestExternCForCXX(name
, sym
->getName())) {
674 pre_hint
= " to declare ";
675 post_hint
= " as extern \"C\"?";
683 static void reportUndefinedSymbol(Ctx
&ctx
, const UndefinedDiag
&undef
,
684 bool correctSpelling
) {
685 Undefined
&sym
= *undef
.sym
;
686 ELFSyncStream
msg(ctx
, DiagLevel::None
);
688 auto visibility
= [&]() {
689 switch (sym
.visibility()) {
701 switch (ctx
.arg
.ekind
) {
703 maybeReportDiscarded
<ELF32LE
>(ctx
, msg
, sym
);
706 maybeReportDiscarded
<ELF32BE
>(ctx
, msg
, sym
);
709 maybeReportDiscarded
<ELF64LE
>(ctx
, msg
, sym
);
712 maybeReportDiscarded
<ELF64BE
>(ctx
, msg
, sym
);
715 llvm_unreachable("");
717 if (msg
.str().empty())
718 msg
<< "undefined " << visibility() << "symbol: " << &sym
;
720 const size_t maxUndefReferences
= 3;
721 for (UndefinedDiag::Loc l
:
722 ArrayRef(undef
.locs
).take_front(maxUndefReferences
)) {
723 InputSectionBase
&sec
= *l
.sec
;
724 uint64_t offset
= l
.offset
;
726 msg
<< "\n>>> referenced by ";
727 // In the absence of line number information, utilize DW_TAG_variable (if
728 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`).
729 Symbol
*enclosing
= sec
.getEnclosingSymbol(offset
);
731 ELFSyncStream
msg1(ctx
, DiagLevel::None
);
732 auto tell
= msg
.tell();
733 msg
<< sec
.getSrcMsg(enclosing
? *enclosing
: sym
, offset
);
734 if (tell
!= msg
.tell())
736 msg
<< sec
.getObjMsg(offset
);
739 if (maxUndefReferences
< undef
.locs
.size())
740 msg
<< "\n>>> referenced " << (undef
.locs
.size() - maxUndefReferences
)
743 if (correctSpelling
) {
744 std::string pre_hint
= ": ", post_hint
;
745 if (const Symbol
*corrected
=
746 getAlternativeSpelling(ctx
, sym
, pre_hint
, post_hint
)) {
747 msg
<< "\n>>> did you mean" << pre_hint
<< corrected
<< post_hint
748 << "\n>>> defined in: " << corrected
->file
;
752 if (sym
.getName().starts_with("_ZTV"))
753 msg
<< "\n>>> the vtable symbol may be undefined because the class is "
754 "missing its key function "
755 "(see https://lld.llvm.org/missingkeyfunction)";
756 if (ctx
.arg
.gcSections
&& ctx
.arg
.zStartStopGC
&&
757 sym
.getName().starts_with("__start_")) {
758 msg
<< "\n>>> the encapsulation symbol needs to be retained under "
759 "--gc-sections properly; consider -z nostart-stop-gc "
760 "(see https://lld.llvm.org/ELF/start-stop-gc)";
764 Warn(ctx
) << msg
.str();
766 ctx
.e
.error(msg
.str(), ErrorTag::SymbolNotFound
, {sym
.getName()});
769 void elf::reportUndefinedSymbols(Ctx
&ctx
) {
770 // Find the first "undefined symbol" diagnostic for each diagnostic, and
771 // collect all "referenced from" lines at the first diagnostic.
772 DenseMap
<Symbol
*, UndefinedDiag
*> firstRef
;
773 for (UndefinedDiag
&undef
: ctx
.undefErrs
) {
774 assert(undef
.locs
.size() == 1);
775 if (UndefinedDiag
*canon
= firstRef
.lookup(undef
.sym
)) {
776 canon
->locs
.push_back(undef
.locs
[0]);
779 firstRef
[undef
.sym
] = &undef
;
782 // Enable spell corrector for the first 2 diagnostics.
783 for (auto [i
, undef
] : llvm::enumerate(ctx
.undefErrs
))
784 if (!undef
.locs
.empty())
785 reportUndefinedSymbol(ctx
, undef
, i
< 2);
788 // Report an undefined symbol if necessary.
789 // Returns true if the undefined symbol will produce an error message.
790 static bool maybeReportUndefined(Ctx
&ctx
, Undefined
&sym
,
791 InputSectionBase
&sec
, uint64_t offset
) {
792 std::lock_guard
<std::mutex
> lock(ctx
.relocMutex
);
793 // If versioned, issue an error (even if the symbol is weak) because we don't
794 // know the defining filename which is required to construct a Verneed entry.
795 if (sym
.hasVersionSuffix
) {
796 ctx
.undefErrs
.push_back({&sym
, {{&sec
, offset
}}, false});
802 bool canBeExternal
= !sym
.isLocal() && sym
.visibility() == STV_DEFAULT
;
803 if (ctx
.arg
.unresolvedSymbols
== UnresolvedPolicy::Ignore
&& canBeExternal
)
806 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc
807 // which references a switch table in a discarded .rodata/.text section. The
808 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF
809 // spec says references from outside the group to a STB_LOCAL symbol are not
810 // allowed. Work around the bug.
812 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible
813 // because .LC0-.LTOC is not representable if the two labels are in different
815 if (sym
.discardedSecIdx
!= 0 && (sec
.name
== ".got2" || sec
.name
== ".toc"))
819 (ctx
.arg
.unresolvedSymbols
== UnresolvedPolicy::Warn
&& canBeExternal
) ||
820 ctx
.arg
.noinhibitExec
;
821 ctx
.undefErrs
.push_back({&sym
, {{&sec
, offset
}}, isWarning
});
825 // MIPS N32 ABI treats series of successive relocations with the same offset
826 // as a single relocation. The similar approach used by N64 ABI, but this ABI
827 // packs all relocations into the single relocation record. Here we emulate
828 // this for the N32 ABI. Iterate over relocation with the same offset and put
829 // theirs types into the single bit-set.
830 template <class RelTy
>
831 RelType
RelocationScanner::getMipsN32RelType(RelTy
*&rel
) const {
833 uint64_t offset
= rel
->r_offset
;
836 while (rel
!= static_cast<const RelTy
*>(end
) && rel
->r_offset
== offset
)
837 type
|= (rel
++)->getType(ctx
.arg
.isMips64EL
) << (8 * n
++);
841 template <bool shard
= false>
842 static void addRelativeReloc(Ctx
&ctx
, InputSectionBase
&isec
,
843 uint64_t offsetInSec
, Symbol
&sym
, int64_t addend
,
844 RelExpr expr
, RelType type
) {
845 Partition
&part
= isec
.getPartition(ctx
);
847 if (sym
.isTagged()) {
848 std::lock_guard
<std::mutex
> lock(ctx
.relocMutex
);
849 part
.relaDyn
->addRelativeReloc(ctx
.target
->relativeRel
, isec
, offsetInSec
,
850 sym
, addend
, type
, expr
);
851 // With MTE globals, we always want to derive the address tag by `ldg`-ing
852 // the symbol. When we have a RELATIVE relocation though, we no longer have
853 // a reference to the symbol. Because of this, when we have an addend that
854 // puts the result of the RELATIVE relocation out-of-bounds of the symbol
855 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI
856 // says we should store the offset to the start of the symbol in the target
857 // field. This is described in further detail in:
858 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative
859 if (addend
< 0 || static_cast<uint64_t>(addend
) >= sym
.getSize())
860 isec
.relocations
.push_back({expr
, type
, offsetInSec
, addend
, &sym
});
864 // Add a relative relocation. If relrDyn section is enabled, and the
865 // relocation offset is guaranteed to be even, add the relocation to
866 // the relrDyn section, otherwise add it to the relaDyn section.
867 // relrDyn sections don't support odd offsets. Also, relrDyn sections
868 // don't store the addend values, so we must write it to the relocated
870 if (part
.relrDyn
&& isec
.addralign
>= 2 && offsetInSec
% 2 == 0) {
871 isec
.addReloc({expr
, type
, offsetInSec
, addend
, &sym
});
873 part
.relrDyn
->relocsVec
[parallel::getThreadIndex()].push_back(
874 {&isec
, isec
.relocs().size() - 1});
876 part
.relrDyn
->relocs
.push_back({&isec
, isec
.relocs().size() - 1});
879 part
.relaDyn
->addRelativeReloc
<shard
>(ctx
.target
->relativeRel
, isec
,
880 offsetInSec
, sym
, addend
, type
, expr
);
883 template <class PltSection
, class GotPltSection
>
884 static void addPltEntry(Ctx
&ctx
, PltSection
&plt
, GotPltSection
&gotPlt
,
885 RelocationBaseSection
&rel
, RelType type
, Symbol
&sym
) {
887 gotPlt
.addEntry(sym
);
888 rel
.addReloc({type
, &gotPlt
, sym
.getGotPltOffset(ctx
),
889 sym
.isPreemptible
? DynamicReloc::AgainstSymbol
890 : DynamicReloc::AddendOnlyWithTargetVA
,
894 void elf::addGotEntry(Ctx
&ctx
, Symbol
&sym
) {
895 ctx
.in
.got
->addEntry(sym
);
896 uint64_t off
= sym
.getGotOffset(ctx
);
898 // If preemptible, emit a GLOB_DAT relocation.
899 if (sym
.isPreemptible
) {
900 ctx
.mainPart
->relaDyn
->addReloc({ctx
.target
->gotRel
, ctx
.in
.got
.get(), off
,
901 DynamicReloc::AgainstSymbol
, sym
, 0,
906 // Otherwise, the value is either a link-time constant or the load base
908 if (!ctx
.arg
.isPic
|| isAbsolute(sym
))
909 ctx
.in
.got
->addConstant({R_ABS
, ctx
.target
->symbolicRel
, off
, 0, &sym
});
911 addRelativeReloc(ctx
, *ctx
.in
.got
, off
, sym
, 0, R_ABS
,
912 ctx
.target
->symbolicRel
);
915 static void addGotAuthEntry(Ctx
&ctx
, Symbol
&sym
) {
916 ctx
.in
.got
->addEntry(sym
);
917 ctx
.in
.got
->addAuthEntry(sym
);
918 uint64_t off
= sym
.getGotOffset(ctx
);
920 // If preemptible, emit a GLOB_DAT relocation.
921 if (sym
.isPreemptible
) {
922 ctx
.mainPart
->relaDyn
->addReloc({R_AARCH64_AUTH_GLOB_DAT
, ctx
.in
.got
.get(),
923 off
, DynamicReloc::AgainstSymbol
, sym
, 0,
928 // Signed GOT requires dynamic relocation.
929 ctx
.in
.got
->getPartition(ctx
).relaDyn
->addReloc(
930 {R_AARCH64_AUTH_RELATIVE
, ctx
.in
.got
.get(), off
,
931 DynamicReloc::AddendOnlyWithTargetVA
, sym
, 0, R_ABS
});
934 static void addTpOffsetGotEntry(Ctx
&ctx
, Symbol
&sym
) {
935 ctx
.in
.got
->addEntry(sym
);
936 uint64_t off
= sym
.getGotOffset(ctx
);
937 if (!sym
.isPreemptible
&& !ctx
.arg
.shared
) {
938 ctx
.in
.got
->addConstant({R_TPREL
, ctx
.target
->symbolicRel
, off
, 0, &sym
});
941 ctx
.mainPart
->relaDyn
->addAddendOnlyRelocIfNonPreemptible(
942 ctx
.target
->tlsGotRel
, *ctx
.in
.got
, off
, sym
, ctx
.target
->symbolicRel
);
945 // Return true if we can define a symbol in the executable that
946 // contains the value/function of a symbol defined in a shared
948 static bool canDefineSymbolInExecutable(Ctx
&ctx
, Symbol
&sym
) {
949 // If the symbol has default visibility the symbol defined in the
950 // executable will preempt it.
951 // Note that we want the visibility of the shared symbol itself, not
952 // the visibility of the symbol in the output file we are producing.
953 if (!sym
.dsoProtected
)
956 // If we are allowed to break address equality of functions, defining
957 // a plt entry will allow the program to call the function in the
958 // .so, but the .so and the executable will no agree on the address
959 // of the function. Similar logic for objects.
960 return ((sym
.isFunc() && ctx
.arg
.ignoreFunctionAddressEquality
) ||
961 (sym
.isObject() && ctx
.arg
.ignoreDataAddressEquality
));
964 // Returns true if a given relocation can be computed at link-time.
965 // This only handles relocation types expected in processAux.
967 // For instance, we know the offset from a relocation to its target at
968 // link-time if the relocation is PC-relative and refers a
969 // non-interposable function in the same executable. This function
970 // will return true for such relocation.
972 // If this function returns false, that means we need to emit a
973 // dynamic relocation so that the relocation will be fixed at load-time.
974 bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e
, RelType type
,
976 uint64_t relOff
) const {
977 // These expressions always compute a constant
979 R_GOTPLT
, R_GOT_OFF
, R_RELAX_HINT
, RE_MIPS_GOT_LOCAL_PAGE
,
980 RE_MIPS_GOTREL
, RE_MIPS_GOT_OFF
, RE_MIPS_GOT_OFF32
, RE_MIPS_GOT_GP_PC
,
981 RE_AARCH64_GOT_PAGE_PC
, RE_AARCH64_AUTH_GOT_PAGE_PC
, R_GOT_PC
,
982 R_GOTONLY_PC
, R_GOTPLTONLY_PC
, R_PLT_PC
, R_PLT_GOTREL
, R_PLT_GOTPLT
,
983 R_GOTPLT_GOTREL
, R_GOTPLT_PC
, RE_PPC32_PLTREL
, RE_PPC64_CALL_PLT
,
984 RE_PPC64_RELAX_TOC
, RE_RISCV_ADD
, RE_AARCH64_GOT_PAGE
,
985 RE_AARCH64_AUTH_GOT
, RE_AARCH64_AUTH_GOT_PC
, RE_LOONGARCH_PLT_PAGE_PC
,
986 RE_LOONGARCH_GOT
, RE_LOONGARCH_GOT_PAGE_PC
>(e
))
989 // These never do, except if the entire file is position dependent or if
990 // only the low bits are used.
991 if (e
== R_GOT
|| e
== R_PLT
)
992 return ctx
.target
->usesOnlyLowPageBits(type
) || !ctx
.arg
.isPic
;
994 // R_AARCH64_AUTH_ABS64 requires a dynamic relocation.
995 if (sym
.isPreemptible
|| e
== RE_AARCH64_AUTH
)
1000 // Constant when referencing a non-preemptible symbol.
1001 if (e
== R_SIZE
|| e
== RE_RISCV_LEB128
)
1004 // For the target and the relocation, we want to know if they are
1005 // absolute or relative.
1006 bool absVal
= isAbsoluteValue(sym
);
1007 bool relE
= isRelExpr(e
);
1008 if (absVal
&& !relE
)
1010 if (!absVal
&& relE
)
1012 if (!absVal
&& !relE
)
1013 return ctx
.target
->usesOnlyLowPageBits(type
);
1015 assert(absVal
&& relE
);
1017 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol
1018 // in PIC mode. This is a little strange, but it allows us to link function
1019 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers).
1020 // Normally such a call will be guarded with a comparison, which will load a
1021 // zero from the GOT.
1022 if (sym
.isUndefWeak())
1025 // We set the final symbols values for linker script defined symbols later.
1026 // They always can be computed as a link time constant.
1027 if (sym
.scriptDefined
)
1030 auto diag
= Err(ctx
);
1031 diag
<< "relocation " << type
<< " cannot refer to absolute symbol: " << &sym
;
1032 printLocation(diag
, *sec
, sym
, relOff
);
1036 // The reason we have to do this early scan is as follows
1037 // * To mmap the output file, we need to know the size
1038 // * For that, we need to know how many dynamic relocs we will have.
1039 // It might be possible to avoid this by outputting the file with write:
1040 // * Write the allocated output sections, computing addresses.
1041 // * Apply relocations, recording which ones require a dynamic reloc.
1042 // * Write the dynamic relocations.
1043 // * Write the rest of the file.
1044 // This would have some drawbacks. For example, we would only know if .rela.dyn
1045 // is needed after applying relocations. If it is, it will go after rw and rx
1046 // sections. Given that it is ro, we will need an extra PT_LOAD. This
1047 // complicates things for the dynamic linker and means we would have to reserve
1048 // space for the extra PT_LOAD even if we end up not using it.
1049 void RelocationScanner::processAux(RelExpr expr
, RelType type
, uint64_t offset
,
1050 Symbol
&sym
, int64_t addend
) const {
1051 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT
1053 const bool isIfunc
= sym
.isGnuIFunc();
1054 if (!sym
.isPreemptible
&& (!isIfunc
|| ctx
.arg
.zIfuncNoplt
)) {
1055 if (expr
!= R_GOT_PC
) {
1056 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call
1057 // stub type. It should be ignored if optimized to R_PC.
1058 if (ctx
.arg
.emachine
== EM_PPC
&& expr
== RE_PPC32_PLTREL
)
1060 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into
1061 // call __tls_get_addr even if the symbol is non-preemptible.
1062 if (!(ctx
.arg
.emachine
== EM_HEXAGON
&&
1063 (type
== R_HEX_GD_PLT_B22_PCREL
||
1064 type
== R_HEX_GD_PLT_B22_PCREL_X
||
1065 type
== R_HEX_GD_PLT_B32_PCREL_X
)))
1066 expr
= fromPlt(expr
);
1067 } else if (!isAbsoluteValue(sym
)) {
1068 expr
= ctx
.target
->adjustGotPcExpr(type
, addend
,
1069 sec
->content().data() + offset
);
1070 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
1071 // needing the GOT if we can't relax everything.
1072 if (expr
== R_RELAX_GOT_PC
)
1073 ctx
.in
.got
->hasGotOffRel
.store(true, std::memory_order_relaxed
);
1077 // We were asked not to generate PLT entries for ifuncs. Instead, pass the
1078 // direct relocation on through.
1079 if (LLVM_UNLIKELY(isIfunc
) && ctx
.arg
.zIfuncNoplt
) {
1080 std::lock_guard
<std::mutex
> lock(ctx
.relocMutex
);
1081 sym
.isExported
= true;
1082 ctx
.mainPart
->relaDyn
->addSymbolReloc(type
, *sec
, offset
, sym
, addend
,
1087 if (needsGot(expr
)) {
1088 if (ctx
.arg
.emachine
== EM_MIPS
) {
1089 // MIPS ABI has special rules to process GOT entries and doesn't
1090 // require relocation entries for them. A special case is TLS
1091 // relocations. In that case dynamic loader applies dynamic
1092 // relocations to initialize TLS GOT entries.
1093 // See "Global Offset Table" in Chapter 5 in the following document
1094 // for detailed description:
1095 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1096 ctx
.in
.mipsGot
->addEntry(*sec
->file
, sym
, addend
, expr
);
1097 } else if (!sym
.isTls() || ctx
.arg
.emachine
!= EM_LOONGARCH
) {
1098 // Many LoongArch TLS relocs reuse the RE_LOONGARCH_GOT type, in which
1099 // case the NEEDS_GOT flag shouldn't get set.
1100 if (expr
== RE_AARCH64_AUTH_GOT
|| expr
== RE_AARCH64_AUTH_GOT_PAGE_PC
||
1101 expr
== RE_AARCH64_AUTH_GOT_PC
)
1102 sym
.setFlags(NEEDS_GOT
| NEEDS_GOT_AUTH
);
1104 sym
.setFlags(NEEDS_GOT
| NEEDS_GOT_NONAUTH
);
1106 } else if (needsPlt(expr
)) {
1107 sym
.setFlags(NEEDS_PLT
);
1108 } else if (LLVM_UNLIKELY(isIfunc
)) {
1109 sym
.setFlags(HAS_DIRECT_RELOC
);
1112 // If the relocation is known to be a link-time constant, we know no dynamic
1113 // relocation will be created, pass the control to relocateAlloc() or
1114 // relocateNonAlloc() to resolve it.
1116 // The behavior of an undefined weak reference is implementation defined. For
1117 // non-link-time constants, we resolve relocations statically (let
1118 // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic
1119 // relocations for -pie and -shared.
1121 // The general expectation of -no-pie static linking is that there is no
1122 // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for
1123 // -shared matches the spirit of its -z undefs default. -pie has freedom on
1124 // choices, and we choose dynamic relocations to be consistent with the
1125 // handling of GOT-generating relocations.
1126 if (isStaticLinkTimeConstant(expr
, type
, sym
, offset
) ||
1127 (!ctx
.arg
.isPic
&& sym
.isUndefWeak())) {
1128 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1132 // Use a simple -z notext rule that treats all sections except .eh_frame as
1133 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our
1134 // SectionBase::getOffset would incorrectly adjust the offset).
1136 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel
1137 // conversion. We still emit a dynamic relocation.
1138 bool canWrite
= (sec
->flags
& SHF_WRITE
) ||
1140 (isa
<EhInputSection
>(sec
) && ctx
.arg
.emachine
!= EM_MIPS
));
1142 RelType rel
= ctx
.target
->getDynRel(type
);
1143 if (oneof
<R_GOT
, RE_LOONGARCH_GOT
>(expr
) ||
1144 (rel
== ctx
.target
->symbolicRel
&& !sym
.isPreemptible
)) {
1145 addRelativeReloc
<true>(ctx
, *sec
, offset
, sym
, addend
, expr
, type
);
1149 if (ctx
.arg
.emachine
== EM_MIPS
&& rel
== ctx
.target
->symbolicRel
)
1150 rel
= ctx
.target
->relativeRel
;
1151 std::lock_guard
<std::mutex
> lock(ctx
.relocMutex
);
1152 Partition
&part
= sec
->getPartition(ctx
);
1153 if (ctx
.arg
.emachine
== EM_AARCH64
&& type
== R_AARCH64_AUTH_ABS64
) {
1154 // For a preemptible symbol, we can't use a relative relocation. For an
1155 // undefined symbol, we can't compute offset at link-time and use a
1156 // relative relocation. Use a symbolic relocation instead.
1157 if (sym
.isPreemptible
) {
1158 part
.relaDyn
->addSymbolReloc(type
, *sec
, offset
, sym
, addend
, type
);
1159 } else if (part
.relrAuthDyn
&& sec
->addralign
>= 2 && offset
% 2 == 0) {
1160 // When symbol values are determined in
1161 // finalizeAddressDependentContent, some .relr.auth.dyn relocations
1162 // may be moved to .rela.dyn.
1163 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1164 part
.relrAuthDyn
->relocs
.push_back({sec
, sec
->relocs().size() - 1});
1166 part
.relaDyn
->addReloc({R_AARCH64_AUTH_RELATIVE
, sec
, offset
,
1167 DynamicReloc::AddendOnlyWithTargetVA
, sym
,
1172 part
.relaDyn
->addSymbolReloc(rel
, *sec
, offset
, sym
, addend
, type
);
1174 // MIPS ABI turns using of GOT and dynamic relocations inside out.
1175 // While regular ABI uses dynamic relocations to fill up GOT entries
1176 // MIPS ABI requires dynamic linker to fills up GOT entries using
1177 // specially sorted dynamic symbol table. This affects even dynamic
1178 // relocations against symbols which do not require GOT entries
1179 // creation explicitly, i.e. do not have any GOT-relocations. So if
1180 // a preemptible symbol has a dynamic relocation we anyway have
1181 // to create a GOT entry for it.
1182 // If a non-preemptible symbol has a dynamic relocation against it,
1183 // dynamic linker takes it st_value, adds offset and writes down
1184 // result of the dynamic relocation. In case of preemptible symbol
1185 // dynamic linker performs symbol resolution, writes the symbol value
1186 // to the GOT entry and reads the GOT entry when it needs to perform
1187 // a dynamic relocation.
1188 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
1189 if (ctx
.arg
.emachine
== EM_MIPS
)
1190 ctx
.in
.mipsGot
->addEntry(*sec
->file
, sym
, addend
, expr
);
1195 // When producing an executable, we can perform copy relocations (for
1196 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO.
1197 // Copy relocations/canonical PLT entries are unsupported for
1198 // R_AARCH64_AUTH_ABS64.
1199 if (!ctx
.arg
.shared
&& sym
.isShared() &&
1200 !(ctx
.arg
.emachine
== EM_AARCH64
&& type
== R_AARCH64_AUTH_ABS64
)) {
1201 if (!canDefineSymbolInExecutable(ctx
, sym
)) {
1202 auto diag
= Err(ctx
);
1203 diag
<< "cannot preempt symbol: " << &sym
;
1204 printLocation(diag
, *sec
, sym
, offset
);
1208 if (sym
.isObject()) {
1209 // Produce a copy relocation.
1210 if (auto *ss
= dyn_cast
<SharedSymbol
>(&sym
)) {
1211 if (!ctx
.arg
.zCopyreloc
) {
1212 auto diag
= Err(ctx
);
1213 diag
<< "unresolvable relocation " << type
<< " against symbol '"
1214 << ss
<< "'; recompile with -fPIC or remove '-z nocopyreloc'";
1215 printLocation(diag
, *sec
, sym
, offset
);
1217 sym
.setFlags(NEEDS_COPY
);
1219 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1223 // This handles a non PIC program call to function in a shared library. In
1224 // an ideal world, we could just report an error saying the relocation can
1225 // overflow at runtime. In the real world with glibc, crt1.o has a
1226 // R_X86_64_PC32 pointing to libc.so.
1228 // The general idea on how to handle such cases is to create a PLT entry and
1229 // use that as the function value.
1231 // For the static linking part, we just return a plt expr and everything
1232 // else will use the PLT entry as the address.
1234 // The remaining problem is making sure pointer equality still works. We
1235 // need the help of the dynamic linker for that. We let it know that we have
1236 // a direct reference to a so symbol by creating an undefined symbol with a
1237 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to
1238 // the value of the symbol we created. This is true even for got entries, so
1239 // pointer equality is maintained. To avoid an infinite loop, the only entry
1240 // that points to the real function is a dedicated got entry used by the
1241 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
1242 // R_386_JMP_SLOT, etc).
1244 // For position independent executable on i386, the plt entry requires ebx
1245 // to be set. This causes two problems:
1246 // * If some code has a direct reference to a function, it was probably
1247 // compiled without -fPIE/-fPIC and doesn't maintain ebx.
1248 // * If a library definition gets preempted to the executable, it will have
1249 // the wrong ebx value.
1251 if (ctx
.arg
.pie
&& ctx
.arg
.emachine
== EM_386
) {
1252 auto diag
= Err(ctx
);
1253 diag
<< "symbol '" << &sym
1254 << "' cannot be preempted; recompile with -fPIE";
1255 printLocation(diag
, *sec
, sym
, offset
);
1257 sym
.setFlags(NEEDS_COPY
| NEEDS_PLT
);
1258 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1263 auto diag
= Err(ctx
);
1264 diag
<< "relocation " << type
<< " cannot be used against ";
1265 if (sym
.getName().empty())
1266 diag
<< "local symbol";
1268 diag
<< "symbol '" << &sym
<< "'";
1269 diag
<< "; recompile with -fPIC";
1270 printLocation(diag
, *sec
, sym
, offset
);
1273 // This function is similar to the `handleTlsRelocation`. MIPS does not
1274 // support any relaxations for TLS relocations so by factoring out MIPS
1275 // handling in to the separate function we can simplify the code and do not
1276 // pollute other `handleTlsRelocation` by MIPS `ifs` statements.
1277 // Mips has a custom MipsGotSection that handles the writing of GOT entries
1278 // without dynamic relocations.
1279 static unsigned handleMipsTlsRelocation(Ctx
&ctx
, RelType type
, Symbol
&sym
,
1280 InputSectionBase
&c
, uint64_t offset
,
1281 int64_t addend
, RelExpr expr
) {
1282 if (expr
== RE_MIPS_TLSLD
) {
1283 ctx
.in
.mipsGot
->addTlsIndex(*c
.file
);
1284 c
.addReloc({expr
, type
, offset
, addend
, &sym
});
1287 if (expr
== RE_MIPS_TLSGD
) {
1288 ctx
.in
.mipsGot
->addDynTlsEntry(*c
.file
, sym
);
1289 c
.addReloc({expr
, type
, offset
, addend
, &sym
});
1295 // Notes about General Dynamic and Local Dynamic TLS models below. They may
1296 // require the generation of a pair of GOT entries that have associated dynamic
1297 // relocations. The pair of GOT entries created are of the form GOT[e0] Module
1298 // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of
1299 // symbol in TLS block.
1301 // Returns the number of relocations processed.
1302 unsigned RelocationScanner::handleTlsRelocation(RelExpr expr
, RelType type
,
1303 uint64_t offset
, Symbol
&sym
,
1305 if (expr
== R_TPREL
|| expr
== R_TPREL_NEG
) {
1306 if (ctx
.arg
.shared
) {
1307 auto diag
= Err(ctx
);
1308 diag
<< "relocation " << type
<< " against " << &sym
1309 << " cannot be used with -shared";
1310 printLocation(diag
, *sec
, sym
, offset
);
1316 if (ctx
.arg
.emachine
== EM_MIPS
)
1317 return handleMipsTlsRelocation(ctx
, type
, sym
, *sec
, offset
, addend
, expr
);
1319 // LoongArch does not yet implement transition from TLSDESC to LE/IE, so
1320 // generate TLSDESC dynamic relocation for the dynamic linker to handle.
1321 if (ctx
.arg
.emachine
== EM_LOONGARCH
&&
1322 oneof
<RE_LOONGARCH_TLSDESC_PAGE_PC
, R_TLSDESC
, R_TLSDESC_PC
,
1323 R_TLSDESC_CALL
>(expr
)) {
1324 if (expr
!= R_TLSDESC_CALL
) {
1325 sym
.setFlags(NEEDS_TLSDESC
);
1326 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1331 bool isRISCV
= ctx
.arg
.emachine
== EM_RISCV
;
1333 if (oneof
<RE_AARCH64_TLSDESC_PAGE
, R_TLSDESC
, R_TLSDESC_CALL
, R_TLSDESC_PC
,
1334 R_TLSDESC_GOTPLT
>(expr
) &&
1336 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not
1337 // set NEEDS_TLSDESC on the label.
1338 if (expr
!= R_TLSDESC_CALL
) {
1339 if (!isRISCV
|| type
== R_RISCV_TLSDESC_HI20
)
1340 sym
.setFlags(NEEDS_TLSDESC
);
1341 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1346 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE
1348 // RISC-V supports TLSDESC to IE/LE optimizations.
1349 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable
1350 // optimization as well.
1352 !ctx
.arg
.shared
&& ctx
.arg
.emachine
!= EM_ARM
&&
1353 ctx
.arg
.emachine
!= EM_HEXAGON
&& ctx
.arg
.emachine
!= EM_LOONGARCH
&&
1354 !(isRISCV
&& expr
!= R_TLSDESC_PC
&& expr
!= R_TLSDESC_CALL
) &&
1355 !sec
->file
->ppc64DisableTLSRelax
;
1357 // If we are producing an executable and the symbol is non-preemptable, it
1358 // must be defined and the code sequence can be optimized to use
1361 // ARM and RISC-V do not support any relaxations for TLS relocations, however,
1362 // we can omit the DTPMOD dynamic relocations and resolve them at link time
1363 // because them are always 1. This may be necessary for static linking as
1364 // DTPMOD may not be expected at load time.
1365 bool isLocalInExecutable
= !sym
.isPreemptible
&& !ctx
.arg
.shared
;
1367 // Local Dynamic is for access to module local TLS variables, while still
1368 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the
1369 // module index, with a special value of 0 for the current module. GOT[e1] is
1370 // unused. There only needs to be one module index entry.
1371 if (oneof
<R_TLSLD_GOT
, R_TLSLD_GOTPLT
, R_TLSLD_PC
, R_TLSLD_HINT
>(expr
)) {
1372 // Local-Dynamic relocs can be optimized to Local-Exesec->
1374 sec
->addReloc({ctx
.target
->adjustTlsExpr(type
, R_RELAX_TLS_LD_TO_LE
),
1375 type
, offset
, addend
, &sym
});
1376 return ctx
.target
->getTlsGdRelaxSkip(type
);
1378 if (expr
== R_TLSLD_HINT
)
1380 ctx
.needsTlsLd
.store(true, std::memory_order_relaxed
);
1381 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1385 // Local-Dynamic relocs can be optimized to Local-Exesec->
1386 if (expr
== R_DTPREL
) {
1388 expr
= ctx
.target
->adjustTlsExpr(type
, R_RELAX_TLS_LD_TO_LE
);
1389 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1393 // Local-Dynamic sequence where offset of tls variable relative to dynamic
1394 // thread pointer is stored in the got. This cannot be optimized to
1396 if (expr
== R_TLSLD_GOT_OFF
) {
1397 sym
.setFlags(NEEDS_GOT_DTPREL
);
1398 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1402 if (oneof
<RE_AARCH64_TLSDESC_PAGE
, R_TLSDESC
, R_TLSDESC_CALL
, R_TLSDESC_PC
,
1403 R_TLSDESC_GOTPLT
, R_TLSGD_GOT
, R_TLSGD_GOTPLT
, R_TLSGD_PC
,
1404 RE_LOONGARCH_TLSGD_PAGE_PC
>(expr
)) {
1405 if (!execOptimize
) {
1406 sym
.setFlags(NEEDS_TLSGD
);
1407 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1411 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec
1412 // depending on the symbol being locally defined or not.
1414 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible
1415 // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix
1416 // the categorization in RISCV::relocateAllosec->
1417 if (sym
.isPreemptible
) {
1418 sym
.setFlags(NEEDS_TLSGD_TO_IE
);
1419 sec
->addReloc({ctx
.target
->adjustTlsExpr(type
, R_RELAX_TLS_GD_TO_IE
),
1420 type
, offset
, addend
, &sym
});
1422 sec
->addReloc({ctx
.target
->adjustTlsExpr(type
, R_RELAX_TLS_GD_TO_LE
),
1423 type
, offset
, addend
, &sym
});
1425 return ctx
.target
->getTlsGdRelaxSkip(type
);
1428 if (oneof
<R_GOT
, R_GOTPLT
, R_GOT_PC
, RE_AARCH64_GOT_PAGE_PC
,
1429 RE_LOONGARCH_GOT_PAGE_PC
, R_GOT_OFF
, R_TLSIE_HINT
>(expr
)) {
1430 ctx
.hasTlsIe
.store(true, std::memory_order_relaxed
);
1431 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is
1432 // locally defined. This is not supported on SystemZ.
1433 if (execOptimize
&& isLocalInExecutable
&& ctx
.arg
.emachine
!= EM_S390
) {
1434 sec
->addReloc({R_RELAX_TLS_IE_TO_LE
, type
, offset
, addend
, &sym
});
1435 } else if (expr
!= R_TLSIE_HINT
) {
1436 sym
.setFlags(NEEDS_TLSIE
);
1437 // R_GOT needs a relative relocation for PIC on i386 and Hexagon.
1438 if (expr
== R_GOT
&& ctx
.arg
.isPic
&&
1439 !ctx
.target
->usesOnlyLowPageBits(type
))
1440 addRelativeReloc
<true>(ctx
, *sec
, offset
, sym
, addend
, expr
, type
);
1442 sec
->addReloc({expr
, type
, offset
, addend
, &sym
});
1450 template <class ELFT
, class RelTy
>
1451 void RelocationScanner::scanOne(typename Relocs
<RelTy
>::const_iterator
&i
) {
1452 const RelTy
&rel
= *i
;
1453 uint32_t symIndex
= rel
.getSymbol(ctx
.arg
.isMips64EL
);
1454 Symbol
&sym
= sec
->getFile
<ELFT
>()->getSymbol(symIndex
);
1456 if constexpr (ELFT::Is64Bits
|| RelTy::IsCrel
) {
1457 type
= rel
.getType(ctx
.arg
.isMips64EL
);
1460 // CREL is unsupported for MIPS N32.
1461 if (ctx
.arg
.mipsN32Abi
) {
1462 type
= getMipsN32RelType(i
);
1464 type
= rel
.getType(ctx
.arg
.isMips64EL
);
1468 // Get an offset in an output section this relocation is applied to.
1469 uint64_t offset
= getter
.get(ctx
, rel
.r_offset
);
1470 if (offset
== uint64_t(-1))
1474 ctx
.target
->getRelExpr(type
, sym
, sec
->content().data() + offset
);
1475 int64_t addend
= RelTy::HasAddend
1476 ? getAddend
<ELFT
>(rel
)
1477 : ctx
.target
->getImplicitAddend(
1478 sec
->content().data() + rel
.r_offset
, type
);
1479 if (LLVM_UNLIKELY(ctx
.arg
.emachine
== EM_MIPS
))
1480 addend
+= computeMipsAddend
<ELFT
>(rel
, expr
, sym
.isLocal());
1481 else if (ctx
.arg
.emachine
== EM_PPC64
&& ctx
.arg
.isPic
&& type
== R_PPC64_TOC
)
1482 addend
+= getPPC64TocBase(ctx
);
1484 // Ignore R_*_NONE and other marker relocations.
1488 // Error if the target symbol is undefined. Symbol index 0 may be used by
1489 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them.
1490 if (sym
.isUndefined() && symIndex
!= 0 &&
1491 maybeReportUndefined(ctx
, cast
<Undefined
>(sym
), *sec
, offset
))
1494 if (ctx
.arg
.emachine
== EM_PPC64
) {
1495 // We can separate the small code model relocations into 2 categories:
1496 // 1) Those that access the compiler generated .toc sections.
1497 // 2) Those that access the linker allocated got entries.
1498 // lld allocates got entries to symbols on demand. Since we don't try to
1499 // sort the got entries in any way, we don't have to track which objects
1500 // have got-based small code model relocs. The .toc sections get placed
1501 // after the end of the linker allocated .got section and we do sort those
1502 // so sections addressed with small code model relocations come first.
1503 if (type
== R_PPC64_TOC16
|| type
== R_PPC64_TOC16_DS
)
1504 sec
->file
->ppc64SmallCodeModelTocRelocs
= true;
1506 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in
1507 // InputSectionBase::relocateAlloc().
1508 if (type
== R_PPC64_TOC16_LO
&& sym
.isSection() && isa
<Defined
>(sym
) &&
1509 cast
<Defined
>(sym
).section
->name
== ".toc")
1510 ctx
.ppc64noTocRelax
.insert({&sym
, addend
});
1512 if ((type
== R_PPC64_TLSGD
&& expr
== R_TLSDESC_CALL
) ||
1513 (type
== R_PPC64_TLSLD
&& expr
== R_TLSLD_HINT
)) {
1514 // Skip the error check for CREL, which does not set `end`.
1515 if constexpr (!RelTy::IsCrel
) {
1517 auto diag
= Err(ctx
);
1518 diag
<< "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1520 printLocation(diag
, *sec
, sym
, offset
);
1525 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC
1526 // case, so we can discern it later from the toc-case.
1527 if (i
->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC
)
1532 // If the relocation does not emit a GOT or GOTPLT entry but its computation
1533 // uses their addresses, we need GOT or GOTPLT to be created.
1535 // The 5 types that relative GOTPLT are all x86 and x86-64 specific.
1536 if (oneof
<R_GOTPLTONLY_PC
, R_GOTPLTREL
, R_GOTPLT
, R_PLT_GOTPLT
,
1537 R_TLSDESC_GOTPLT
, R_TLSGD_GOTPLT
>(expr
)) {
1538 ctx
.in
.gotPlt
->hasGotPltOffRel
.store(true, std::memory_order_relaxed
);
1539 } else if (oneof
<R_GOTONLY_PC
, R_GOTREL
, RE_PPC32_PLTREL
, RE_PPC64_TOCBASE
,
1540 RE_PPC64_RELAX_TOC
>(expr
)) {
1541 ctx
.in
.got
->hasGotOffRel
.store(true, std::memory_order_relaxed
);
1544 // Process TLS relocations, including TLS optimizations. Note that
1545 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
1547 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
1548 // but we need to process them in handleTlsRelocation.
1549 if (sym
.isTls() || oneof
<R_TLSDESC_PC
, R_TLSDESC_CALL
>(expr
)) {
1550 if (unsigned processed
=
1551 handleTlsRelocation(expr
, type
, offset
, sym
, addend
)) {
1557 processAux(expr
, type
, offset
, sym
, addend
);
1560 // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
1561 // General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is
1562 // found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the
1563 // instructions are generated by very old IBM XL compilers. Work around the
1564 // issue by disabling GD/LD to IE/LE relaxation.
1565 template <class RelTy
>
1566 static void checkPPC64TLSRelax(InputSectionBase
&sec
, Relocs
<RelTy
> rels
) {
1567 // Skip if sec is synthetic (sec.file is null) or if sec has been marked.
1568 if (!sec
.file
|| sec
.file
->ppc64DisableTLSRelax
)
1570 bool hasGDLD
= false;
1571 for (const RelTy
&rel
: rels
) {
1572 RelType type
= rel
.getType(false);
1576 return; // Found a marker
1577 case R_PPC64_GOT_TLSGD16
:
1578 case R_PPC64_GOT_TLSGD16_HA
:
1579 case R_PPC64_GOT_TLSGD16_HI
:
1580 case R_PPC64_GOT_TLSGD16_LO
:
1581 case R_PPC64_GOT_TLSLD16
:
1582 case R_PPC64_GOT_TLSLD16_HA
:
1583 case R_PPC64_GOT_TLSLD16_HI
:
1584 case R_PPC64_GOT_TLSLD16_LO
:
1590 sec
.file
->ppc64DisableTLSRelax
= true;
1593 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations "
1595 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations";
1599 template <class ELFT
, class RelTy
>
1600 void RelocationScanner::scan(Relocs
<RelTy
> rels
) {
1601 // Not all relocations end up in Sec->Relocations, but a lot do.
1602 sec
->relocations
.reserve(rels
.size());
1604 if (ctx
.arg
.emachine
== EM_PPC64
)
1605 checkPPC64TLSRelax
<RelTy
>(*sec
, rels
);
1607 // For EhInputSection, OffsetGetter expects the relocations to be sorted by
1608 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker
1609 // script), the relocations may be unordered.
1610 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS
1611 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip.
1612 SmallVector
<RelTy
, 0> storage
;
1613 if (isa
<EhInputSection
>(sec
) || ctx
.arg
.emachine
== EM_S390
)
1614 rels
= sortRels(rels
, storage
);
1616 if constexpr (RelTy::IsCrel
) {
1617 for (auto i
= rels
.begin(); i
!= rels
.end();)
1618 scanOne
<ELFT
, RelTy
>(i
);
1620 // The non-CREL code path has additional check for PPC64 TLS.
1621 end
= static_cast<const void *>(rels
.end());
1622 for (auto i
= rels
.begin(); i
!= end
;)
1623 scanOne
<ELFT
, RelTy
>(i
);
1626 // Sort relocations by offset for more efficient searching for
1627 // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64.
1628 if (ctx
.arg
.emachine
== EM_RISCV
||
1629 (ctx
.arg
.emachine
== EM_PPC64
&& sec
->name
== ".toc"))
1630 llvm::stable_sort(sec
->relocs(),
1631 [](const Relocation
&lhs
, const Relocation
&rhs
) {
1632 return lhs
.offset
< rhs
.offset
;
1636 template <class ELFT
>
1637 void RelocationScanner::scanSection(InputSectionBase
&s
, bool isEH
) {
1639 getter
= OffsetGetter(s
);
1640 const RelsOrRelas
<ELFT
> rels
= s
.template relsOrRelas
<ELFT
>(!isEH
);
1641 if (rels
.areRelocsCrel())
1642 scan
<ELFT
>(rels
.crels
);
1643 else if (rels
.areRelocsRel())
1644 scan
<ELFT
>(rels
.rels
);
1646 scan
<ELFT
>(rels
.relas
);
1649 template <class ELFT
> void elf::scanRelocations(Ctx
&ctx
) {
1650 // Scan all relocations. Each relocation goes through a series of tests to
1651 // determine if it needs special treatment, such as creating GOT, PLT,
1652 // copy relocations, etc. Note that relocations for non-alloc sections are
1653 // directly processed by InputSection::relocateNonAlloc.
1655 // Deterministic parallellism needs sorting relocations which is unsuitable
1656 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable
1658 bool serial
= !ctx
.arg
.zCombreloc
|| ctx
.arg
.emachine
== EM_MIPS
||
1659 ctx
.arg
.emachine
== EM_PPC64
;
1660 parallel::TaskGroup tg
;
1661 auto outerFn
= [&]() {
1662 for (ELFFileBase
*f
: ctx
.objectFiles
) {
1663 auto fn
= [f
, &ctx
]() {
1664 RelocationScanner
scanner(ctx
);
1665 for (InputSectionBase
*s
: f
->getSections()) {
1666 if (s
&& s
->kind() == SectionBase::Regular
&& s
->isLive() &&
1667 (s
->flags
& SHF_ALLOC
) &&
1668 !(s
->type
== SHT_ARM_EXIDX
&& ctx
.arg
.emachine
== EM_ARM
))
1669 scanner
.template scanSection
<ELFT
>(*s
);
1678 RelocationScanner
scanner(ctx
);
1679 for (Partition
&part
: ctx
.partitions
) {
1680 for (EhInputSection
*sec
: part
.ehFrame
->sections
)
1681 scanner
.template scanSection
<ELFT
>(*sec
, /*isEH=*/true);
1682 if (part
.armExidx
&& part
.armExidx
->isLive())
1683 for (InputSection
*sec
: part
.armExidx
->exidxSections
)
1685 scanner
.template scanSection
<ELFT
>(*sec
);
1693 // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread
1694 // with valid getThreadIndex().
1701 RelocationBaseSection
&elf::getIRelativeSection(Ctx
&ctx
) {
1702 // Prior to Android V, there was a bug that caused RELR relocations to be
1703 // applied after packed relocations. This meant that resolvers referenced by
1704 // IRELATIVE relocations in the packed relocation section would read
1705 // unrelocated globals with RELR relocations when
1706 // --pack-relative-relocs=android+relr is enabled. Work around this by placing
1707 // IRELATIVE in .rela.plt.
1708 return ctx
.arg
.androidPackDynRelocs
? *ctx
.in
.relaPlt
1709 : *ctx
.mainPart
->relaDyn
;
1712 static bool handleNonPreemptibleIfunc(Ctx
&ctx
, Symbol
&sym
, uint16_t flags
) {
1713 // Handle a reference to a non-preemptible ifunc. These are special in a
1716 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
1717 // a fixed value. But assuming that all references to the ifunc are
1718 // GOT-generating or PLT-generating, the handling of an ifunc is
1719 // relatively straightforward. We create a PLT entry in Iplt, which is
1720 // usually at the end of .plt, which makes an indirect call using a
1721 // matching GOT entry in igotPlt, which is usually at the end of .got.plt.
1722 // The GOT entry is relocated using an IRELATIVE relocation in relaDyn,
1723 // which is usually at the end of .rela.dyn.
1725 // - Despite the fact that an ifunc does not have a fixed value, compilers
1726 // that are not passed -fPIC will assume that they do, and will emit
1727 // direct (non-GOT-generating, non-PLT-generating) relocations to the
1728 // symbol. This means that if a direct relocation to the symbol is
1729 // seen, the linker must set a value for the symbol, and this value must
1730 // be consistent no matter what type of reference is made to the symbol.
1731 // This can be done by creating a PLT entry for the symbol in the way
1732 // described above and making it canonical, that is, making all references
1733 // point to the PLT entry instead of the resolver. In lld we also store
1734 // the address of the PLT entry in the dynamic symbol table, which means
1735 // that the symbol will also have the same value in other modules.
1736 // Because the value loaded from the GOT needs to be consistent with
1737 // the value computed using a direct relocation, a non-preemptible ifunc
1738 // may end up with two GOT entries, one in .got.plt that points to the
1739 // address returned by the resolver and is used only by the PLT entry,
1740 // and another in .got that points to the PLT entry and is used by
1741 // GOT-generating relocations.
1743 // - The fact that these symbols do not have a fixed value makes them an
1744 // exception to the general rule that a statically linked executable does
1745 // not require any form of dynamic relocation. To handle these relocations
1746 // correctly, the IRELATIVE relocations are stored in an array which a
1747 // statically linked executable's startup code must enumerate using the
1748 // linker-defined symbols __rela?_iplt_{start,end}.
1749 if (!sym
.isGnuIFunc() || sym
.isPreemptible
|| ctx
.arg
.zIfuncNoplt
)
1751 // Skip unreferenced non-preemptible ifunc.
1752 if (!(flags
& (NEEDS_GOT
| NEEDS_PLT
| HAS_DIRECT_RELOC
)))
1755 sym
.isInIplt
= true;
1757 // Create an Iplt and the associated IRELATIVE relocation pointing to the
1758 // original section/value pairs. For non-GOT non-PLT relocation case below, we
1759 // may alter section/value, so create a copy of the symbol to make
1760 // section/value fixed.
1761 auto *directSym
= makeDefined(cast
<Defined
>(sym
));
1762 directSym
->allocateAux(ctx
);
1763 auto &dyn
= getIRelativeSection(ctx
);
1764 addPltEntry(ctx
, *ctx
.in
.iplt
, *ctx
.in
.igotPlt
, dyn
, ctx
.target
->iRelativeRel
,
1766 sym
.allocateAux(ctx
);
1767 ctx
.symAux
.back().pltIdx
= ctx
.symAux
[directSym
->auxIdx
].pltIdx
;
1769 if (flags
& HAS_DIRECT_RELOC
) {
1770 // Change the value to the IPLT and redirect all references to it.
1771 auto &d
= cast
<Defined
>(sym
);
1772 d
.section
= ctx
.in
.iplt
.get();
1773 d
.value
= d
.getPltIdx(ctx
) * ctx
.target
->ipltEntrySize
;
1775 // It's important to set the symbol type here so that dynamic loaders
1776 // don't try to call the PLT as if it were an ifunc resolver.
1779 if (flags
& NEEDS_GOT
) {
1780 assert(!(flags
& NEEDS_GOT_AUTH
) &&
1781 "R_AARCH64_AUTH_IRELATIVE is not supported yet");
1782 addGotEntry(ctx
, sym
);
1784 } else if (flags
& NEEDS_GOT
) {
1785 // Redirect GOT accesses to point to the Igot.
1786 sym
.gotInIgot
= true;
1791 void elf::postScanRelocations(Ctx
&ctx
) {
1792 auto fn
= [&](Symbol
&sym
) {
1793 auto flags
= sym
.flags
.load(std::memory_order_relaxed
);
1794 if (handleNonPreemptibleIfunc(ctx
, sym
, flags
))
1797 if (sym
.isTagged() && sym
.isDefined())
1798 ctx
.mainPart
->memtagGlobalDescriptors
->addSymbol(sym
);
1800 if (!sym
.needsDynReloc())
1802 sym
.allocateAux(ctx
);
1804 if (flags
& NEEDS_GOT
) {
1805 if ((flags
& NEEDS_GOT_AUTH
) && (flags
& NEEDS_GOT_NONAUTH
)) {
1806 auto diag
= Err(ctx
);
1807 diag
<< "both AUTH and non-AUTH GOT entries for '" << sym
.getName()
1808 << "' requested, but only one type of GOT entry per symbol is "
1812 if (flags
& NEEDS_GOT_AUTH
)
1813 addGotAuthEntry(ctx
, sym
);
1815 addGotEntry(ctx
, sym
);
1817 if (flags
& NEEDS_PLT
)
1818 addPltEntry(ctx
, *ctx
.in
.plt
, *ctx
.in
.gotPlt
, *ctx
.in
.relaPlt
,
1819 ctx
.target
->pltRel
, sym
);
1820 if (flags
& NEEDS_COPY
) {
1821 if (sym
.isObject()) {
1822 invokeELFT(addCopyRelSymbol
, ctx
, cast
<SharedSymbol
>(sym
));
1823 // NEEDS_COPY is cleared for sym and its aliases so that in
1824 // later iterations aliases won't cause redundant copies.
1825 assert(!sym
.hasFlag(NEEDS_COPY
));
1827 assert(sym
.isFunc() && sym
.hasFlag(NEEDS_PLT
));
1828 if (!sym
.isDefined()) {
1829 replaceWithDefined(ctx
, sym
, *ctx
.in
.plt
,
1830 ctx
.target
->pltHeaderSize
+
1831 ctx
.target
->pltEntrySize
* sym
.getPltIdx(ctx
),
1833 sym
.setFlags(NEEDS_COPY
);
1834 if (ctx
.arg
.emachine
== EM_PPC
) {
1835 // PPC32 canonical PLT entries are at the beginning of .glink
1836 cast
<Defined
>(sym
).value
= ctx
.in
.plt
->headerSize
;
1837 ctx
.in
.plt
->headerSize
+= 16;
1838 cast
<PPC32GlinkSection
>(*ctx
.in
.plt
).canonical_plts
.push_back(&sym
);
1846 bool isLocalInExecutable
= !sym
.isPreemptible
&& !ctx
.arg
.shared
;
1847 GotSection
*got
= ctx
.in
.got
.get();
1849 if (flags
& NEEDS_TLSDESC
) {
1850 got
->addTlsDescEntry(sym
);
1851 ctx
.mainPart
->relaDyn
->addAddendOnlyRelocIfNonPreemptible(
1852 ctx
.target
->tlsDescRel
, *got
, got
->getTlsDescOffset(sym
), sym
,
1853 ctx
.target
->tlsDescRel
);
1855 if (flags
& NEEDS_TLSGD
) {
1856 got
->addDynTlsEntry(sym
);
1857 uint64_t off
= got
->getGlobalDynOffset(sym
);
1858 if (isLocalInExecutable
)
1859 // Write one to the GOT slot.
1860 got
->addConstant({R_ADDEND
, ctx
.target
->symbolicRel
, off
, 1, &sym
});
1862 ctx
.mainPart
->relaDyn
->addSymbolReloc(ctx
.target
->tlsModuleIndexRel
,
1865 // If the symbol is preemptible we need the dynamic linker to write
1867 uint64_t offsetOff
= off
+ ctx
.arg
.wordsize
;
1868 if (sym
.isPreemptible
)
1869 ctx
.mainPart
->relaDyn
->addSymbolReloc(ctx
.target
->tlsOffsetRel
, *got
,
1872 got
->addConstant({R_ABS
, ctx
.target
->tlsOffsetRel
, offsetOff
, 0, &sym
});
1874 if (flags
& NEEDS_TLSGD_TO_IE
) {
1876 ctx
.mainPart
->relaDyn
->addSymbolReloc(ctx
.target
->tlsGotRel
, *got
,
1877 sym
.getGotOffset(ctx
), sym
);
1879 if (flags
& NEEDS_GOT_DTPREL
) {
1882 {R_ABS
, ctx
.target
->tlsOffsetRel
, sym
.getGotOffset(ctx
), 0, &sym
});
1885 if ((flags
& NEEDS_TLSIE
) && !(flags
& NEEDS_TLSGD_TO_IE
))
1886 addTpOffsetGotEntry(ctx
, sym
);
1889 GotSection
*got
= ctx
.in
.got
.get();
1890 if (ctx
.needsTlsLd
.load(std::memory_order_relaxed
) && got
->addTlsIndex()) {
1891 static Undefined
dummy(ctx
.internalFile
, "", STB_LOCAL
, 0, 0);
1893 ctx
.mainPart
->relaDyn
->addReloc(
1894 {ctx
.target
->tlsModuleIndexRel
, got
, got
->getTlsIndexOff()});
1896 got
->addConstant({R_ADDEND
, ctx
.target
->symbolicRel
,
1897 got
->getTlsIndexOff(), 1, &dummy
});
1900 assert(ctx
.symAux
.size() == 1);
1901 for (Symbol
*sym
: ctx
.symtab
->getSymbols())
1904 // Local symbols may need the aforementioned non-preemptible ifunc and GOT
1905 // handling. They don't need regular PLT.
1906 for (ELFFileBase
*file
: ctx
.objectFiles
)
1907 for (Symbol
*sym
: file
->getLocalSymbols())
1911 static bool mergeCmp(const InputSection
*a
, const InputSection
*b
) {
1912 // std::merge requires a strict weak ordering.
1913 if (a
->outSecOff
< b
->outSecOff
)
1916 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection.
1917 if (a
->outSecOff
== b
->outSecOff
&& a
!= b
) {
1918 auto *ta
= dyn_cast
<ThunkSection
>(a
);
1919 auto *tb
= dyn_cast
<ThunkSection
>(b
);
1921 // Check if Thunk is immediately before any specific Target
1922 // InputSection for example Mips LA25 Thunks.
1923 if (ta
&& ta
->getTargetInputSection() == b
)
1926 // Place Thunk Sections without specific targets before
1927 // non-Thunk Sections.
1928 if (ta
&& !tb
&& !ta
->getTargetInputSection())
1935 // Call Fn on every executable InputSection accessed via the linker script
1936 // InputSectionDescription::Sections.
1937 static void forEachInputSectionDescription(
1938 ArrayRef
<OutputSection
*> outputSections
,
1939 llvm::function_ref
<void(OutputSection
*, InputSectionDescription
*)> fn
) {
1940 for (OutputSection
*os
: outputSections
) {
1941 if (!(os
->flags
& SHF_ALLOC
) || !(os
->flags
& SHF_EXECINSTR
))
1943 for (SectionCommand
*bc
: os
->commands
)
1944 if (auto *isd
= dyn_cast
<InputSectionDescription
>(bc
))
1949 ThunkCreator::ThunkCreator(Ctx
&ctx
) : ctx(ctx
) {}
1951 ThunkCreator::~ThunkCreator() {}
1953 // Thunk Implementation
1955 // Thunks (sometimes called stubs, veneers or branch islands) are small pieces
1956 // of code that the linker inserts inbetween a caller and a callee. The thunks
1957 // are added at link time rather than compile time as the decision on whether
1958 // a thunk is needed, such as the caller and callee being out of range, can only
1959 // be made at link time.
1961 // It is straightforward to tell given the current state of the program when a
1962 // thunk is needed for a particular call. The more difficult part is that
1963 // the thunk needs to be placed in the program such that the caller can reach
1964 // the thunk and the thunk can reach the callee; furthermore, adding thunks to
1965 // the program alters addresses, which can mean more thunks etc.
1967 // In lld we have a synthetic ThunkSection that can hold many Thunks.
1968 // The decision to have a ThunkSection act as a container means that we can
1969 // more easily handle the most common case of a single block of contiguous
1970 // Thunks by inserting just a single ThunkSection.
1972 // The implementation of Thunks in lld is split across these areas
1973 // Relocations.cpp : Framework for creating and placing thunks
1974 // Thunks.cpp : The code generated for each supported thunk
1975 // Target.cpp : Target specific hooks that the framework uses to decide when
1977 // Synthetic.cpp : Implementation of ThunkSection
1978 // Writer.cpp : Iteratively call framework until no more Thunks added
1980 // Thunk placement requirements:
1981 // Mips LA25 thunks. These must be placed immediately before the callee section
1982 // We can assume that the caller is in range of the Thunk. These are modelled
1983 // by Thunks that return the section they must precede with
1984 // getTargetInputSection().
1986 // ARM interworking and range extension thunks. These thunks must be placed
1987 // within range of the caller. All implemented ARM thunks can always reach the
1988 // callee as they use an indirect jump via a register that has no range
1991 // Thunk placement algorithm:
1992 // For Mips LA25 ThunkSections; the placement is explicit, it has to be before
1993 // getTargetInputSection().
1995 // For thunks that must be placed within range of the caller there are many
1996 // possible choices given that the maximum range from the caller is usually
1997 // much larger than the average InputSection size. Desirable properties include:
1998 // - Maximize reuse of thunks by multiple callers
1999 // - Minimize number of ThunkSections to simplify insertion
2000 // - Handle impact of already added Thunks on addresses
2001 // - Simple to understand and implement
2003 // In lld for the first pass, we pre-create one or more ThunkSections per
2004 // InputSectionDescription at Target specific intervals. A ThunkSection is
2005 // placed so that the estimated end of the ThunkSection is within range of the
2006 // start of the InputSectionDescription or the previous ThunkSection. For
2008 // InputSectionDescription
2018 // The intention is that we can add a Thunk to a ThunkSection that is well
2019 // spaced enough to service a number of callers without having to do a lot
2020 // of work. An important principle is that it is not an error if a Thunk cannot
2021 // be placed in a pre-created ThunkSection; when this happens we create a new
2022 // ThunkSection placed next to the caller. This allows us to handle the vast
2023 // majority of thunks simply, but also handle rare cases where the branch range
2024 // is smaller than the target specific spacing.
2026 // The algorithm is expected to create all the thunks that are needed in a
2027 // single pass, with a small number of programs needing a second pass due to
2028 // the insertion of thunks in the first pass increasing the offset between
2029 // callers and callees that were only just in range.
2031 // A consequence of allowing new ThunkSections to be created outside of the
2032 // pre-created ThunkSections is that in rare cases calls to Thunks that were in
2033 // range in pass K, are out of range in some pass > K due to the insertion of
2034 // more Thunks in between the caller and callee. When this happens we retarget
2035 // the relocation back to the original target and create another Thunk.
2037 // Remove ThunkSections that are empty, this should only be the initial set
2038 // precreated on pass 0.
2040 // Insert the Thunks for OutputSection OS into their designated place
2041 // in the Sections vector, and recalculate the InputSection output section
2043 // This may invalidate any output section offsets stored outside of InputSection
2044 void ThunkCreator::mergeThunks(ArrayRef
<OutputSection
*> outputSections
) {
2045 forEachInputSectionDescription(
2046 outputSections
, [&](OutputSection
*os
, InputSectionDescription
*isd
) {
2047 if (isd
->thunkSections
.empty())
2050 // Remove any zero sized precreated Thunks.
2051 llvm::erase_if(isd
->thunkSections
,
2052 [](const std::pair
<ThunkSection
*, uint32_t> &ts
) {
2053 return ts
.first
->getSize() == 0;
2056 // ISD->ThunkSections contains all created ThunkSections, including
2057 // those inserted in previous passes. Extract the Thunks created this
2058 // pass and order them in ascending outSecOff.
2059 std::vector
<ThunkSection
*> newThunks
;
2060 for (std::pair
<ThunkSection
*, uint32_t> ts
: isd
->thunkSections
)
2061 if (ts
.second
== pass
)
2062 newThunks
.push_back(ts
.first
);
2063 llvm::stable_sort(newThunks
,
2064 [](const ThunkSection
*a
, const ThunkSection
*b
) {
2065 return a
->outSecOff
< b
->outSecOff
;
2068 // Merge sorted vectors of Thunks and InputSections by outSecOff
2069 SmallVector
<InputSection
*, 0> tmp
;
2070 tmp
.reserve(isd
->sections
.size() + newThunks
.size());
2072 std::merge(isd
->sections
.begin(), isd
->sections
.end(),
2073 newThunks
.begin(), newThunks
.end(), std::back_inserter(tmp
),
2076 isd
->sections
= std::move(tmp
);
2080 static int64_t getPCBias(Ctx
&ctx
, RelType type
) {
2081 if (ctx
.arg
.emachine
!= EM_ARM
)
2084 case R_ARM_THM_JUMP19
:
2085 case R_ARM_THM_JUMP24
:
2086 case R_ARM_THM_CALL
:
2093 // Find or create a ThunkSection within the InputSectionDescription (ISD) that
2094 // is in range of Src. An ISD maps to a range of InputSections described by a
2095 // linker script section pattern such as { .text .text.* }.
2096 ThunkSection
*ThunkCreator::getISDThunkSec(OutputSection
*os
,
2098 InputSectionDescription
*isd
,
2099 const Relocation
&rel
,
2101 // See the comment in getThunk for -pcBias below.
2102 const int64_t pcBias
= getPCBias(ctx
, rel
.type
);
2103 for (std::pair
<ThunkSection
*, uint32_t> tp
: isd
->thunkSections
) {
2104 ThunkSection
*ts
= tp
.first
;
2105 uint64_t tsBase
= os
->addr
+ ts
->outSecOff
- pcBias
;
2106 uint64_t tsLimit
= tsBase
+ ts
->getSize();
2107 if (ctx
.target
->inBranchRange(rel
.type
, src
,
2108 (src
> tsLimit
) ? tsBase
: tsLimit
))
2112 // No suitable ThunkSection exists. This can happen when there is a branch
2113 // with lower range than the ThunkSection spacing or when there are too
2114 // many Thunks. Create a new ThunkSection as close to the InputSection as
2115 // possible. Error if InputSection is so large we cannot place ThunkSection
2116 // anywhere in Range.
2117 uint64_t thunkSecOff
= isec
->outSecOff
;
2118 if (!ctx
.target
->inBranchRange(rel
.type
, src
,
2119 os
->addr
+ thunkSecOff
+ rel
.addend
)) {
2120 thunkSecOff
= isec
->outSecOff
+ isec
->getSize();
2121 if (!ctx
.target
->inBranchRange(rel
.type
, src
,
2122 os
->addr
+ thunkSecOff
+ rel
.addend
))
2123 Fatal(ctx
) << "InputSection too large for range extension thunk "
2124 << isec
->getObjMsg(src
- (os
->addr
<< isec
->outSecOff
));
2126 return addThunkSection(os
, isd
, thunkSecOff
);
2129 // Add a Thunk that needs to be placed in a ThunkSection that immediately
2130 // precedes its Target.
2131 ThunkSection
*ThunkCreator::getISThunkSec(InputSection
*isec
) {
2132 ThunkSection
*ts
= thunkedSections
.lookup(isec
);
2136 // Find InputSectionRange within Target Output Section (TOS) that the
2137 // InputSection (IS) that we need to precede is in.
2138 OutputSection
*tos
= isec
->getParent();
2139 for (SectionCommand
*bc
: tos
->commands
) {
2140 auto *isd
= dyn_cast
<InputSectionDescription
>(bc
);
2141 if (!isd
|| isd
->sections
.empty())
2144 InputSection
*first
= isd
->sections
.front();
2145 InputSection
*last
= isd
->sections
.back();
2147 if (isec
->outSecOff
< first
->outSecOff
|| last
->outSecOff
< isec
->outSecOff
)
2150 ts
= addThunkSection(tos
, isd
, isec
->outSecOff
);
2151 thunkedSections
[isec
] = ts
;
2158 // Create one or more ThunkSections per OS that can be used to place Thunks.
2159 // We attempt to place the ThunkSections using the following desirable
2161 // - Within range of the maximum number of callers
2162 // - Minimise the number of ThunkSections
2164 // We follow a simple but conservative heuristic to place ThunkSections at
2165 // offsets that are multiples of a Target specific branch range.
2166 // For an InputSectionDescription that is smaller than the range, a single
2167 // ThunkSection at the end of the range will do.
2169 // For an InputSectionDescription that is more than twice the size of the range,
2170 // we place the last ThunkSection at range bytes from the end of the
2171 // InputSectionDescription in order to increase the likelihood that the
2172 // distance from a thunk to its target will be sufficiently small to
2173 // allow for the creation of a short thunk.
2174 void ThunkCreator::createInitialThunkSections(
2175 ArrayRef
<OutputSection
*> outputSections
) {
2176 uint32_t thunkSectionSpacing
= ctx
.target
->getThunkSectionSpacing();
2177 forEachInputSectionDescription(
2178 outputSections
, [&](OutputSection
*os
, InputSectionDescription
*isd
) {
2179 if (isd
->sections
.empty())
2182 uint32_t isdBegin
= isd
->sections
.front()->outSecOff
;
2184 isd
->sections
.back()->outSecOff
+ isd
->sections
.back()->getSize();
2185 uint32_t lastThunkLowerBound
= -1;
2186 if (isdEnd
- isdBegin
> thunkSectionSpacing
* 2)
2187 lastThunkLowerBound
= isdEnd
- thunkSectionSpacing
;
2190 uint32_t prevIsecLimit
= isdBegin
;
2191 uint32_t thunkUpperBound
= isdBegin
+ thunkSectionSpacing
;
2193 for (const InputSection
*isec
: isd
->sections
) {
2194 isecLimit
= isec
->outSecOff
+ isec
->getSize();
2195 if (isecLimit
> thunkUpperBound
) {
2196 addThunkSection(os
, isd
, prevIsecLimit
);
2197 thunkUpperBound
= prevIsecLimit
+ thunkSectionSpacing
;
2199 if (isecLimit
> lastThunkLowerBound
)
2201 prevIsecLimit
= isecLimit
;
2203 addThunkSection(os
, isd
, isecLimit
);
2207 ThunkSection
*ThunkCreator::addThunkSection(OutputSection
*os
,
2208 InputSectionDescription
*isd
,
2210 auto *ts
= make
<ThunkSection
>(ctx
, os
, off
);
2211 ts
->partition
= os
->partition
;
2212 if ((ctx
.arg
.fixCortexA53Errata843419
|| ctx
.arg
.fixCortexA8
) &&
2213 !isd
->sections
.empty()) {
2214 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add
2215 // thunks we disturb the base addresses of sections placed after the thunks
2216 // this makes patches we have generated redundant, and may cause us to
2217 // generate more patches as different instructions are now in sensitive
2218 // locations. When we generate more patches we may force more branches to
2219 // go out of range, causing more thunks to be generated. In pathological
2220 // cases this can cause the address dependent content pass not to converge.
2221 // We fix this by rounding up the size of the ThunkSection to 4KiB, this
2222 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB,
2223 // which means that adding Thunks to the section does not invalidate
2224 // errata patches for following code.
2225 // Rounding up the size to 4KiB has consequences for code-size and can
2226 // trip up linker script defined assertions. For example the linux kernel
2227 // has an assertion that what LLD represents as an InputSectionDescription
2228 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib.
2229 // We use the heuristic of rounding up the size when both of the following
2230 // conditions are true:
2231 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This
2232 // accounts for the case where no single InputSectionDescription is
2233 // larger than the OutputSection size. This is conservative but simple.
2234 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent
2235 // any assertion failures that an InputSectionDescription is < 4 KiB
2237 uint64_t isdSize
= isd
->sections
.back()->outSecOff
+
2238 isd
->sections
.back()->getSize() -
2239 isd
->sections
.front()->outSecOff
;
2240 if (os
->size
> ctx
.target
->getThunkSectionSpacing() && isdSize
> 4096)
2241 ts
->roundUpSizeForErrata
= true;
2243 isd
->thunkSections
.push_back({ts
, pass
});
2247 static bool isThunkSectionCompatible(InputSection
*source
,
2248 SectionBase
*target
) {
2249 // We can't reuse thunks in different loadable partitions because they might
2250 // not be loaded. But partition 1 (the main partition) will always be loaded.
2251 if (source
->partition
!= target
->partition
)
2252 return target
->partition
== 1;
2256 std::pair
<Thunk
*, bool> ThunkCreator::getThunk(InputSection
*isec
,
2257 Relocation
&rel
, uint64_t src
) {
2258 SmallVector
<std::unique_ptr
<Thunk
>, 0> *thunkVec
= nullptr;
2259 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled
2260 // out in the relocation addend. We compensate for the PC bias so that
2261 // an Arm and Thumb relocation to the same destination get the same keyAddend,
2262 // which is usually 0.
2263 const int64_t pcBias
= getPCBias(ctx
, rel
.type
);
2264 const int64_t keyAddend
= rel
.addend
+ pcBias
;
2266 // We use a ((section, offset), addend) pair to find the thunk position if
2267 // possible so that we create only one thunk for aliased symbols or ICFed
2268 // sections. There may be multiple relocations sharing the same (section,
2269 // offset + addend) pair. We may revert the relocation back to its original
2270 // non-Thunk target, so we cannot fold offset + addend.
2271 if (auto *d
= dyn_cast
<Defined
>(rel
.sym
))
2272 if (!d
->isInPlt(ctx
) && d
->section
)
2273 thunkVec
= &thunkedSymbolsBySectionAndAddend
[{{d
->section
, d
->value
},
2276 thunkVec
= &thunkedSymbols
[{rel
.sym
, keyAddend
}];
2278 // Check existing Thunks for Sym to see if they can be reused
2279 for (auto &t
: *thunkVec
)
2280 if (isThunkSectionCompatible(isec
, t
->getThunkTargetSym()->section
) &&
2281 t
->isCompatibleWith(*isec
, rel
) &&
2282 ctx
.target
->inBranchRange(rel
.type
, src
,
2283 t
->getThunkTargetSym()->getVA(ctx
, -pcBias
)))
2284 return std::make_pair(t
.get(), false);
2286 // No existing compatible Thunk in range, create a new one
2287 thunkVec
->push_back(addThunk(ctx
, *isec
, rel
));
2288 return std::make_pair(thunkVec
->back().get(), true);
2291 std::pair
<Thunk
*, bool> ThunkCreator::getSyntheticLandingPad(Defined
&d
,
2293 auto [it
, isNew
] = landingPadsBySectionAndAddend
.try_emplace(
2294 {{d
.section
, d
.value
}, a
}, nullptr);
2296 it
->second
= addLandingPadThunk(ctx
, d
, a
);
2297 return {it
->second
.get(), isNew
};
2300 // Return true if the relocation target is an in range Thunk.
2301 // Return false if the relocation is not to a Thunk. If the relocation target
2302 // was originally to a Thunk, but is no longer in range we revert the
2303 // relocation back to its original non-Thunk target.
2304 bool ThunkCreator::normalizeExistingThunk(Relocation
&rel
, uint64_t src
) {
2305 if (Thunk
*t
= thunks
.lookup(rel
.sym
)) {
2306 if (ctx
.target
->inBranchRange(rel
.type
, src
,
2307 rel
.sym
->getVA(ctx
, rel
.addend
)))
2309 rel
.sym
= &t
->destination
;
2310 rel
.addend
= t
->addend
;
2311 if (rel
.sym
->isInPlt(ctx
))
2312 rel
.expr
= toPlt(rel
.expr
);
2317 // When indirect branches are restricted, such as AArch64 BTI Thunks may need
2318 // to target a linker generated landing pad instead of the target. This needs
2319 // to be done once per pass as the need for a BTI thunk is dependent whether
2320 // a thunk is short or long. We iterate over all the thunks to make sure we
2321 // catch thunks that have been created but are no longer live. Non-live thunks
2322 // are not reachable via normalizeExistingThunk() but are still written.
2323 bool ThunkCreator::addSyntheticLandingPads() {
2324 bool addressesChanged
= false;
2325 for (Thunk
*t
: allThunks
) {
2326 if (!t
->needsSyntheticLandingPad())
2330 auto &dr
= cast
<Defined
>(t
->destination
);
2331 std::tie(lpt
, isNew
) = getSyntheticLandingPad(dr
, t
->addend
);
2333 addressesChanged
= true;
2334 getISThunkSec(cast
<InputSection
>(dr
.section
))->addThunk(lpt
);
2336 t
->landingPad
= lpt
->getThunkTargetSym();
2338 return addressesChanged
;
2341 // Process all relocations from the InputSections that have been assigned
2342 // to InputSectionDescriptions and redirect through Thunks if needed. The
2343 // function should be called iteratively until it returns false.
2346 // All InputSections that may need a Thunk are reachable from
2347 // OutputSectionCommands.
2349 // All OutputSections have an address and all InputSections have an offset
2350 // within the OutputSection.
2352 // The offsets between caller (relocation place) and callee
2353 // (relocation target) will not be modified outside of createThunks().
2356 // If return value is true then ThunkSections have been inserted into
2357 // OutputSections. All relocations that needed a Thunk based on the information
2358 // available to createThunks() on entry have been redirected to a Thunk. Note
2359 // that adding Thunks changes offsets between caller and callee so more Thunks
2362 // If return value is false then no more Thunks are needed, and createThunks has
2363 // made no changes. If the target requires range extension thunks, currently
2364 // ARM, then any future change in offset between caller and callee risks a
2365 // relocation out of range error.
2366 bool ThunkCreator::createThunks(uint32_t pass
,
2367 ArrayRef
<OutputSection
*> outputSections
) {
2369 bool addressesChanged
= false;
2371 if (pass
== 0 && ctx
.target
->getThunkSectionSpacing())
2372 createInitialThunkSections(outputSections
);
2374 if (ctx
.arg
.emachine
== EM_AARCH64
)
2375 addressesChanged
= addSyntheticLandingPads();
2377 // Create all the Thunks and insert them into synthetic ThunkSections. The
2378 // ThunkSections are later inserted back into InputSectionDescriptions.
2379 // We separate the creation of ThunkSections from the insertion of the
2380 // ThunkSections as ThunkSections are not always inserted into the same
2381 // InputSectionDescription as the caller.
2382 forEachInputSectionDescription(
2383 outputSections
, [&](OutputSection
*os
, InputSectionDescription
*isd
) {
2384 for (InputSection
*isec
: isd
->sections
)
2385 for (Relocation
&rel
: isec
->relocs()) {
2386 uint64_t src
= isec
->getVA(rel
.offset
);
2388 // If we are a relocation to an existing Thunk, check if it is
2389 // still in range. If not then Rel will be altered to point to its
2390 // original target so another Thunk can be generated.
2391 if (pass
> 0 && normalizeExistingThunk(rel
, src
))
2394 if (!ctx
.target
->needsThunk(rel
.expr
, rel
.type
, isec
->file
, src
,
2395 *rel
.sym
, rel
.addend
))
2400 std::tie(t
, isNew
) = getThunk(isec
, rel
, src
);
2403 // Find or create a ThunkSection for the new Thunk
2405 if (auto *tis
= t
->getTargetInputSection())
2406 ts
= getISThunkSec(tis
);
2408 ts
= getISDThunkSec(os
, isec
, isd
, rel
, src
);
2410 thunks
[t
->getThunkTargetSym()] = t
;
2411 allThunks
.push_back(t
);
2414 // Redirect relocation to Thunk, we never go via the PLT to a Thunk
2415 rel
.sym
= t
->getThunkTargetSym();
2416 rel
.expr
= fromPlt(rel
.expr
);
2418 // On AArch64 and PPC, a jump/call relocation may be encoded as
2419 // STT_SECTION + non-zero addend, clear the addend after
2421 if (ctx
.arg
.emachine
!= EM_MIPS
)
2422 rel
.addend
= -getPCBias(ctx
, rel
.type
);
2425 for (auto &p
: isd
->thunkSections
)
2426 addressesChanged
|= p
.first
->assignOffsets();
2429 for (auto &p
: thunkedSections
)
2430 addressesChanged
|= p
.second
->assignOffsets();
2432 // Merge all created synthetic ThunkSections back into OutputSection
2433 mergeThunks(outputSections
);
2434 return addressesChanged
;
2437 // The following aid in the conversion of call x@GDPLT to call __tls_get_addr
2438 // hexagonNeedsTLSSymbol scans for relocations would require a call to
2440 // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr.
2441 bool elf::hexagonNeedsTLSSymbol(ArrayRef
<OutputSection
*> outputSections
) {
2442 bool needTlsSymbol
= false;
2443 forEachInputSectionDescription(
2444 outputSections
, [&](OutputSection
*os
, InputSectionDescription
*isd
) {
2445 for (InputSection
*isec
: isd
->sections
)
2446 for (Relocation
&rel
: isec
->relocs())
2447 if (rel
.sym
->type
== llvm::ELF::STT_TLS
&& rel
.expr
== R_PLT_PC
) {
2448 needTlsSymbol
= true;
2452 return needTlsSymbol
;
2455 void elf::hexagonTLSSymbolUpdate(Ctx
&ctx
) {
2456 Symbol
*sym
= ctx
.symtab
->find("__tls_get_addr");
2459 bool needEntry
= true;
2460 forEachInputSectionDescription(
2461 ctx
.outputSections
, [&](OutputSection
*os
, InputSectionDescription
*isd
) {
2462 for (InputSection
*isec
: isd
->sections
)
2463 for (Relocation
&rel
: isec
->relocs())
2464 if (rel
.sym
->type
== llvm::ELF::STT_TLS
&& rel
.expr
== R_PLT_PC
) {
2466 sym
->allocateAux(ctx
);
2467 addPltEntry(ctx
, *ctx
.in
.plt
, *ctx
.in
.gotPlt
, *ctx
.in
.relaPlt
,
2468 ctx
.target
->pltRel
, *sym
);
2476 static bool matchesRefTo(const NoCrossRefCommand
&cmd
, StringRef osec
) {
2478 return cmd
.outputSections
[0] == osec
;
2479 return llvm::is_contained(cmd
.outputSections
, osec
);
2482 template <class ELFT
, class Rels
>
2483 static void scanCrossRefs(Ctx
&ctx
, const NoCrossRefCommand
&cmd
,
2484 OutputSection
*osec
, InputSection
*sec
, Rels rels
) {
2485 for (const auto &r
: rels
) {
2486 Symbol
&sym
= sec
->file
->getSymbol(r
.getSymbol(ctx
.arg
.isMips64EL
));
2487 // A legal cross-reference is when the destination output section is
2488 // nullptr, osec for a self-reference, or a section that is described by the
2489 // NOCROSSREFS/NOCROSSREFS_TO command.
2490 auto *dstOsec
= sym
.getOutputSection();
2491 if (!dstOsec
|| dstOsec
== osec
|| !matchesRefTo(cmd
, dstOsec
->name
))
2494 std::string toSymName
;
2495 if (!sym
.isSection())
2496 toSymName
= toStr(ctx
, sym
);
2497 else if (auto *d
= dyn_cast
<Defined
>(&sym
))
2498 toSymName
= d
->section
->name
;
2499 Err(ctx
) << sec
->getLocation(r
.r_offset
)
2500 << ": prohibited cross reference from '" << osec
->name
<< "' to '"
2501 << toSymName
<< "' in '" << dstOsec
->name
<< "'";
2505 // For each output section described by at least one NOCROSSREFS(_TO) command,
2506 // scan relocations from its input sections for prohibited cross references.
2507 template <class ELFT
> void elf::checkNoCrossRefs(Ctx
&ctx
) {
2508 for (OutputSection
*osec
: ctx
.outputSections
) {
2509 for (const NoCrossRefCommand
&noxref
: ctx
.script
->noCrossRefs
) {
2510 if (!llvm::is_contained(noxref
.outputSections
, osec
->name
) ||
2511 (noxref
.toFirst
&& noxref
.outputSections
[0] == osec
->name
))
2513 for (SectionCommand
*cmd
: osec
->commands
) {
2514 auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
);
2517 parallelForEach(isd
->sections
, [&](InputSection
*sec
) {
2518 invokeOnRelocs(*sec
, scanCrossRefs
<ELFT
>, ctx
, noxref
, osec
, sec
);
2525 template void elf::scanRelocations
<ELF32LE
>(Ctx
&);
2526 template void elf::scanRelocations
<ELF32BE
>(Ctx
&);
2527 template void elf::scanRelocations
<ELF64LE
>(Ctx
&);
2528 template void elf::scanRelocations
<ELF64BE
>(Ctx
&);
2530 template void elf::checkNoCrossRefs
<ELF32LE
>(Ctx
&);
2531 template void elf::checkNoCrossRefs
<ELF32BE
>(Ctx
&);
2532 template void elf::checkNoCrossRefs
<ELF64LE
>(Ctx
&);
2533 template void elf::checkNoCrossRefs
<ELF64BE
>(Ctx
&);