[lldb] Fix warning: 'sprintf' is deprecated in RNBSocketTest
[llvm-project.git] / lld / ELF / Relocations.cpp
blob605321b3cc9e3f2d237df68dd6076fcc3f1411ae
1 //===- Relocations.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains platform-independent functions to process relocations.
10 // I'll describe the overview of this file here.
12 // Simple relocations are easy to handle for the linker. For example,
13 // for R_X86_64_PC64 relocs, the linker just has to fix up locations
14 // with the relative offsets to the target symbols. It would just be
15 // reading records from relocation sections and applying them to output.
17 // But not all relocations are that easy to handle. For example, for
18 // R_386_GOTOFF relocs, the linker has to create new GOT entries for
19 // symbols if they don't exist, and fix up locations with GOT entry
20 // offsets from the beginning of GOT section. So there is more than
21 // fixing addresses in relocation processing.
23 // ELF defines a large number of complex relocations.
25 // The functions in this file analyze relocations and do whatever needs
26 // to be done. It includes, but not limited to, the following.
28 // - create GOT/PLT entries
29 // - create new relocations in .dynsym to let the dynamic linker resolve
30 // them at runtime (since ELF supports dynamic linking, not all
31 // relocations can be resolved at link-time)
32 // - create COPY relocs and reserve space in .bss
33 // - replace expensive relocs (in terms of runtime cost) with cheap ones
34 // - error out infeasible combinations such as PIC and non-relative relocs
36 // Note that the functions in this file don't actually apply relocations
37 // because it doesn't know about the output file nor the output file buffer.
38 // It instead stores Relocation objects to InputSection's Relocations
39 // vector to let it apply later in InputSection::writeTo.
41 //===----------------------------------------------------------------------===//
43 #include "Relocations.h"
44 #include "Config.h"
45 #include "InputFiles.h"
46 #include "LinkerScript.h"
47 #include "OutputSections.h"
48 #include "SymbolTable.h"
49 #include "Symbols.h"
50 #include "SyntheticSections.h"
51 #include "Target.h"
52 #include "Thunks.h"
53 #include "lld/Common/ErrorHandler.h"
54 #include "lld/Common/Memory.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/BinaryFormat/ELF.h"
57 #include "llvm/Demangle/Demangle.h"
58 #include "llvm/Support/Endian.h"
59 #include <algorithm>
61 using namespace llvm;
62 using namespace llvm::ELF;
63 using namespace llvm::object;
64 using namespace llvm::support::endian;
65 using namespace lld;
66 using namespace lld::elf;
68 static std::optional<std::string> getLinkerScriptLocation(Ctx &ctx,
69 const Symbol &sym) {
70 for (SectionCommand *cmd : ctx.script->sectionCommands)
71 if (auto *assign = dyn_cast<SymbolAssignment>(cmd))
72 if (assign->sym == &sym)
73 return assign->location;
74 return std::nullopt;
77 static void printDefinedLocation(ELFSyncStream &s, const Symbol &sym) {
78 s << "\n>>> defined in ";
79 if (sym.file)
80 return void(s << sym.file);
81 if (std::optional<std::string> loc = getLinkerScriptLocation(s.ctx, sym))
82 return void(s << *loc);
85 // Construct a message in the following format.
87 // >>> defined in /home/alice/src/foo.o
88 // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
89 // >>> /home/alice/src/bar.o:(.text+0x1)
90 static void printLocation(ELFSyncStream &s, InputSectionBase &sec,
91 const Symbol &sym, uint64_t off) {
92 printDefinedLocation(s, sym);
93 s << "\n>>> referenced by ";
94 auto tell = s.tell();
95 s << sec.getSrcMsg(sym, off);
96 if (tell != s.tell())
97 s << "\n>>> ";
98 s << sec.getObjMsg(off);
101 void elf::reportRangeError(Ctx &ctx, uint8_t *loc, const Relocation &rel,
102 const Twine &v, int64_t min, uint64_t max) {
103 ErrorPlace errPlace = getErrorPlace(ctx, loc);
104 auto diag = Err(ctx);
105 diag << errPlace.loc << "relocation " << rel.type
106 << " out of range: " << v.str() << " is not in [" << min << ", " << max
107 << ']';
109 if (rel.sym) {
110 if (!rel.sym->isSection())
111 diag << "; references '" << rel.sym << '\'';
112 else if (auto *d = dyn_cast<Defined>(rel.sym))
113 diag << "; references section '" << d->section->name << "'";
115 if (ctx.arg.emachine == EM_X86_64 && rel.type == R_X86_64_PC32 &&
116 rel.sym->getOutputSection() &&
117 (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) {
118 diag << "; R_X86_64_PC32 should not reference a section marked "
119 "SHF_X86_64_LARGE";
122 if (!errPlace.srcLoc.empty())
123 diag << "\n>>> referenced by " << errPlace.srcLoc;
124 if (rel.sym && !rel.sym->isSection())
125 printDefinedLocation(diag, *rel.sym);
127 if (errPlace.isec && errPlace.isec->name.starts_with(".debug"))
128 diag << "; consider recompiling with -fdebug-types-section to reduce size "
129 "of debug sections";
132 void elf::reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
133 const Symbol &sym, const Twine &msg) {
134 auto diag = Err(ctx);
135 diag << getErrorPlace(ctx, loc).loc << msg << " is out of range: " << v
136 << " is not in [" << llvm::minIntN(n) << ", " << llvm::maxIntN(n) << "]";
137 if (!sym.getName().empty()) {
138 diag << "; references '" << &sym << '\'';
139 printDefinedLocation(diag, sym);
143 // Build a bitmask with one bit set for each 64 subset of RelExpr.
144 static constexpr uint64_t buildMask() { return 0; }
146 template <typename... Tails>
147 static constexpr uint64_t buildMask(int head, Tails... tails) {
148 return (0 <= head && head < 64 ? uint64_t(1) << head : 0) |
149 buildMask(tails...);
152 // Return true if `Expr` is one of `Exprs`.
153 // There are more than 64 but less than 128 RelExprs, so we divide the set of
154 // exprs into [0, 64) and [64, 128) and represent each range as a constant
155 // 64-bit mask. Then we decide which mask to test depending on the value of
156 // expr and use a simple shift and bitwise-and to test for membership.
157 template <RelExpr... Exprs> static bool oneof(RelExpr expr) {
158 assert(0 <= expr && (int)expr < 128 &&
159 "RelExpr is too large for 128-bit mask!");
161 if (expr >= 64)
162 return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...);
163 return (uint64_t(1) << expr) & buildMask(Exprs...);
166 static RelType getMipsPairType(RelType type, bool isLocal) {
167 switch (type) {
168 case R_MIPS_HI16:
169 return R_MIPS_LO16;
170 case R_MIPS_GOT16:
171 // In case of global symbol, the R_MIPS_GOT16 relocation does not
172 // have a pair. Each global symbol has a unique entry in the GOT
173 // and a corresponding instruction with help of the R_MIPS_GOT16
174 // relocation loads an address of the symbol. In case of local
175 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
176 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16
177 // relocations handle low 16 bits of the address. That allows
178 // to allocate only one GOT entry for every 64 KBytes of local data.
179 return isLocal ? R_MIPS_LO16 : R_MIPS_NONE;
180 case R_MICROMIPS_GOT16:
181 return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE;
182 case R_MIPS_PCHI16:
183 return R_MIPS_PCLO16;
184 case R_MICROMIPS_HI16:
185 return R_MICROMIPS_LO16;
186 default:
187 return R_MIPS_NONE;
191 // True if non-preemptable symbol always has the same value regardless of where
192 // the DSO is loaded.
193 static bool isAbsolute(const Symbol &sym) {
194 if (sym.isUndefWeak())
195 return true;
196 if (const auto *dr = dyn_cast<Defined>(&sym))
197 return dr->section == nullptr; // Absolute symbol.
198 return false;
201 static bool isAbsoluteValue(const Symbol &sym) {
202 return isAbsolute(sym) || sym.isTls();
205 // Returns true if Expr refers a PLT entry.
206 static bool needsPlt(RelExpr expr) {
207 return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL,
208 R_GOTPLT_PC, R_LOONGARCH_PLT_PAGE_PC, R_PPC32_PLTREL,
209 R_PPC64_CALL_PLT>(expr);
212 bool lld::elf::needsGot(RelExpr expr) {
213 return oneof<R_GOT, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOT_OFF,
214 R_MIPS_GOT_OFF32, R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT,
215 R_AARCH64_GOT_PAGE, R_LOONGARCH_GOT, R_LOONGARCH_GOT_PAGE_PC>(
216 expr);
219 // True if this expression is of the form Sym - X, where X is a position in the
220 // file (PC, or GOT for example).
221 static bool isRelExpr(RelExpr expr) {
222 return oneof<R_PC, R_GOTREL, R_GOTPLTREL, R_ARM_PCA, R_MIPS_GOTREL,
223 R_PPC64_CALL, R_PPC64_RELAX_TOC, R_AARCH64_PAGE_PC,
224 R_RELAX_GOT_PC, R_RISCV_PC_INDIRECT, R_PPC64_RELAX_GOT_PC,
225 R_LOONGARCH_PAGE_PC>(expr);
228 static RelExpr toPlt(RelExpr expr) {
229 switch (expr) {
230 case R_LOONGARCH_PAGE_PC:
231 return R_LOONGARCH_PLT_PAGE_PC;
232 case R_PPC64_CALL:
233 return R_PPC64_CALL_PLT;
234 case R_PC:
235 return R_PLT_PC;
236 case R_ABS:
237 return R_PLT;
238 case R_GOTREL:
239 return R_PLT_GOTREL;
240 default:
241 return expr;
245 static RelExpr fromPlt(RelExpr expr) {
246 // We decided not to use a plt. Optimize a reference to the plt to a
247 // reference to the symbol itself.
248 switch (expr) {
249 case R_PLT_PC:
250 case R_PPC32_PLTREL:
251 return R_PC;
252 case R_LOONGARCH_PLT_PAGE_PC:
253 return R_LOONGARCH_PAGE_PC;
254 case R_PPC64_CALL_PLT:
255 return R_PPC64_CALL;
256 case R_PLT:
257 return R_ABS;
258 case R_PLT_GOTPLT:
259 return R_GOTPLTREL;
260 case R_PLT_GOTREL:
261 return R_GOTREL;
262 default:
263 return expr;
267 // Returns true if a given shared symbol is in a read-only segment in a DSO.
268 template <class ELFT> static bool isReadOnly(SharedSymbol &ss) {
269 using Elf_Phdr = typename ELFT::Phdr;
271 // Determine if the symbol is read-only by scanning the DSO's program headers.
272 const auto &file = cast<SharedFile>(*ss.file);
273 for (const Elf_Phdr &phdr :
274 check(file.template getObj<ELFT>().program_headers()))
275 if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) &&
276 !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr &&
277 ss.value < phdr.p_vaddr + phdr.p_memsz)
278 return true;
279 return false;
282 // Returns symbols at the same offset as a given symbol, including SS itself.
284 // If two or more symbols are at the same offset, and at least one of
285 // them are copied by a copy relocation, all of them need to be copied.
286 // Otherwise, they would refer to different places at runtime.
287 template <class ELFT>
288 static SmallSet<SharedSymbol *, 4> getSymbolsAt(Ctx &ctx, SharedSymbol &ss) {
289 using Elf_Sym = typename ELFT::Sym;
291 const auto &file = cast<SharedFile>(*ss.file);
293 SmallSet<SharedSymbol *, 4> ret;
294 for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) {
295 if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS ||
296 s.getType() == STT_TLS || s.st_value != ss.value)
297 continue;
298 StringRef name = check(s.getName(file.getStringTable()));
299 Symbol *sym = ctx.symtab->find(name);
300 if (auto *alias = dyn_cast_or_null<SharedSymbol>(sym))
301 ret.insert(alias);
304 // The loop does not check SHT_GNU_verneed, so ret does not contain
305 // non-default version symbols. If ss has a non-default version, ret won't
306 // contain ss. Just add ss unconditionally. If a non-default version alias is
307 // separately copy relocated, it and ss will have different addresses.
308 // Fortunately this case is impractical and fails with GNU ld as well.
309 ret.insert(&ss);
310 return ret;
313 // When a symbol is copy relocated or we create a canonical plt entry, it is
314 // effectively a defined symbol. In the case of copy relocation the symbol is
315 // in .bss and in the case of a canonical plt entry it is in .plt. This function
316 // replaces the existing symbol with a Defined pointing to the appropriate
317 // location.
318 static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec,
319 uint64_t value, uint64_t size) {
320 Symbol old = sym;
321 Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value,
322 size, &sec)
323 .overwrite(sym);
325 sym.versionId = old.versionId;
326 sym.exportDynamic = true;
327 sym.isUsedInRegularObj = true;
328 // A copy relocated alias may need a GOT entry.
329 sym.flags.store(old.flags.load(std::memory_order_relaxed) & NEEDS_GOT,
330 std::memory_order_relaxed);
333 // Reserve space in .bss or .bss.rel.ro for copy relocation.
335 // The copy relocation is pretty much a hack. If you use a copy relocation
336 // in your program, not only the symbol name but the symbol's size, RW/RO
337 // bit and alignment become part of the ABI. In addition to that, if the
338 // symbol has aliases, the aliases become part of the ABI. That's subtle,
339 // but if you violate that implicit ABI, that can cause very counter-
340 // intuitive consequences.
342 // So, what is the copy relocation? It's for linking non-position
343 // independent code to DSOs. In an ideal world, all references to data
344 // exported by DSOs should go indirectly through GOT. But if object files
345 // are compiled as non-PIC, all data references are direct. There is no
346 // way for the linker to transform the code to use GOT, as machine
347 // instructions are already set in stone in object files. This is where
348 // the copy relocation takes a role.
350 // A copy relocation instructs the dynamic linker to copy data from a DSO
351 // to a specified address (which is usually in .bss) at load-time. If the
352 // static linker (that's us) finds a direct data reference to a DSO
353 // symbol, it creates a copy relocation, so that the symbol can be
354 // resolved as if it were in .bss rather than in a DSO.
356 // As you can see in this function, we create a copy relocation for the
357 // dynamic linker, and the relocation contains not only symbol name but
358 // various other information about the symbol. So, such attributes become a
359 // part of the ABI.
361 // Note for application developers: I can give you a piece of advice if
362 // you are writing a shared library. You probably should export only
363 // functions from your library. You shouldn't export variables.
365 // As an example what can happen when you export variables without knowing
366 // the semantics of copy relocations, assume that you have an exported
367 // variable of type T. It is an ABI-breaking change to add new members at
368 // end of T even though doing that doesn't change the layout of the
369 // existing members. That's because the space for the new members are not
370 // reserved in .bss unless you recompile the main program. That means they
371 // are likely to overlap with other data that happens to be laid out next
372 // to the variable in .bss. This kind of issue is sometimes very hard to
373 // debug. What's a solution? Instead of exporting a variable V from a DSO,
374 // define an accessor getV().
375 template <class ELFT> static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) {
376 // Copy relocation against zero-sized symbol doesn't make sense.
377 uint64_t symSize = ss.getSize();
378 if (symSize == 0 || ss.alignment == 0)
379 Err(ctx) << "cannot create a copy relocation for symbol " << &ss;
381 // See if this symbol is in a read-only segment. If so, preserve the symbol's
382 // memory protection by reserving space in the .bss.rel.ro section.
383 bool isRO = isReadOnly<ELFT>(ss);
384 BssSection *sec = make<BssSection>(ctx, isRO ? ".bss.rel.ro" : ".bss",
385 symSize, ss.alignment);
386 OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent();
388 // At this point, sectionBases has been migrated to sections. Append sec to
389 // sections.
390 if (osec->commands.empty() ||
391 !isa<InputSectionDescription>(osec->commands.back()))
392 osec->commands.push_back(make<InputSectionDescription>(""));
393 auto *isd = cast<InputSectionDescription>(osec->commands.back());
394 isd->sections.push_back(sec);
395 osec->commitSection(sec);
397 // Look through the DSO's dynamic symbol table for aliases and create a
398 // dynamic symbol for each one. This causes the copy relocation to correctly
399 // interpose any aliases.
400 for (SharedSymbol *sym : getSymbolsAt<ELFT>(ctx, ss))
401 replaceWithDefined(ctx, *sym, *sec, 0, sym->size);
403 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->copyRel, *sec, 0, ss);
406 // .eh_frame sections are mergeable input sections, so their input
407 // offsets are not linearly mapped to output section. For each input
408 // offset, we need to find a section piece containing the offset and
409 // add the piece's base address to the input offset to compute the
410 // output offset. That isn't cheap.
412 // This class is to speed up the offset computation. When we process
413 // relocations, we access offsets in the monotonically increasing
414 // order. So we can optimize for that access pattern.
416 // For sections other than .eh_frame, this class doesn't do anything.
417 namespace {
418 class OffsetGetter {
419 public:
420 OffsetGetter() = default;
421 explicit OffsetGetter(InputSectionBase &sec) {
422 if (auto *eh = dyn_cast<EhInputSection>(&sec)) {
423 cies = eh->cies;
424 fdes = eh->fdes;
425 i = cies.begin();
426 j = fdes.begin();
430 // Translates offsets in input sections to offsets in output sections.
431 // Given offset must increase monotonically. We assume that Piece is
432 // sorted by inputOff.
433 uint64_t get(Ctx &ctx, uint64_t off) {
434 if (cies.empty())
435 return off;
437 while (j != fdes.end() && j->inputOff <= off)
438 ++j;
439 auto it = j;
440 if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) {
441 while (i != cies.end() && i->inputOff <= off)
442 ++i;
443 if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off)
444 Fatal(ctx) << ".eh_frame: relocation is not in any piece";
445 it = i;
448 // Offset -1 means that the piece is dead (i.e. garbage collected).
449 if (it[-1].outputOff == -1)
450 return -1;
451 return it[-1].outputOff + (off - it[-1].inputOff);
454 private:
455 ArrayRef<EhSectionPiece> cies, fdes;
456 ArrayRef<EhSectionPiece>::iterator i, j;
459 // This class encapsulates states needed to scan relocations for one
460 // InputSectionBase.
461 class RelocationScanner {
462 public:
463 RelocationScanner(Ctx &ctx) : ctx(ctx) {}
464 template <class ELFT>
465 void scanSection(InputSectionBase &s, bool isEH = false);
467 private:
468 Ctx &ctx;
469 InputSectionBase *sec;
470 OffsetGetter getter;
472 // End of relocations, used by Mips/PPC64.
473 const void *end = nullptr;
475 template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const;
476 template <class ELFT, class RelTy>
477 int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const;
478 bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
479 uint64_t relOff) const;
480 void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym,
481 int64_t addend) const;
482 unsigned handleTlsRelocation(RelExpr expr, RelType type, uint64_t offset,
483 Symbol &sym, int64_t addend);
485 template <class ELFT, class RelTy>
486 void scanOne(typename Relocs<RelTy>::const_iterator &i);
487 template <class ELFT, class RelTy> void scan(Relocs<RelTy> rels);
489 } // namespace
491 // MIPS has an odd notion of "paired" relocations to calculate addends.
492 // For example, if a relocation is of R_MIPS_HI16, there must be a
493 // R_MIPS_LO16 relocation after that, and an addend is calculated using
494 // the two relocations.
495 template <class ELFT, class RelTy>
496 int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr,
497 bool isLocal) const {
498 if (expr == R_MIPS_GOTREL && isLocal)
499 return sec->getFile<ELFT>()->mipsGp0;
501 // The ABI says that the paired relocation is used only for REL.
502 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
503 // This generalises to relocation types with implicit addends.
504 if (RelTy::HasAddend)
505 return 0;
507 RelType type = rel.getType(ctx.arg.isMips64EL);
508 RelType pairTy = getMipsPairType(type, isLocal);
509 if (pairTy == R_MIPS_NONE)
510 return 0;
512 const uint8_t *buf = sec->content().data();
513 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL);
515 // To make things worse, paired relocations might not be contiguous in
516 // the relocation table, so we need to do linear search. *sigh*
517 for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri)
518 if (ri->getType(ctx.arg.isMips64EL) == pairTy &&
519 ri->getSymbol(ctx.arg.isMips64EL) == symIndex)
520 return ctx.target->getImplicitAddend(buf + ri->r_offset, pairTy);
522 Warn(ctx) << "can't find matching " << pairTy << " relocation for " << type;
523 return 0;
526 // Custom error message if Sym is defined in a discarded section.
527 template <class ELFT>
528 static void maybeReportDiscarded(Ctx &ctx, ELFSyncStream &msg, Undefined &sym) {
529 auto *file = dyn_cast_or_null<ObjFile<ELFT>>(sym.file);
530 if (!file || !sym.discardedSecIdx)
531 return;
532 ArrayRef<typename ELFT::Shdr> objSections =
533 file->template getELFShdrs<ELFT>();
535 if (sym.type == ELF::STT_SECTION) {
536 msg << "relocation refers to a discarded section: ";
537 msg << CHECK2(
538 file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file);
539 } else {
540 msg << "relocation refers to a symbol in a discarded section: " << &sym;
542 msg << "\n>>> defined in " << file;
544 Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1];
545 if (elfSec.sh_type != SHT_GROUP)
546 return;
548 // If the discarded section is a COMDAT.
549 StringRef signature = file->getShtGroupSignature(objSections, elfSec);
550 if (const InputFile *prevailing =
551 ctx.symtab->comdatGroups.lookup(CachedHashStringRef(signature))) {
552 msg << "\n>>> section group signature: " << signature
553 << "\n>>> prevailing definition is in " << prevailing;
554 if (sym.nonPrevailing) {
555 msg << "\n>>> or the symbol in the prevailing group had STB_WEAK "
556 "binding and the symbol in a non-prevailing group had STB_GLOBAL "
557 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding "
558 "signature is not supported";
563 // Check whether the definition name def is a mangled function name that matches
564 // the reference name ref.
565 static bool canSuggestExternCForCXX(StringRef ref, StringRef def) {
566 llvm::ItaniumPartialDemangler d;
567 std::string name = def.str();
568 if (d.partialDemangle(name.c_str()))
569 return false;
570 char *buf = d.getFunctionName(nullptr, nullptr);
571 if (!buf)
572 return false;
573 bool ret = ref == buf;
574 free(buf);
575 return ret;
578 // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns
579 // the suggested symbol, which is either in the symbol table, or in the same
580 // file of sym.
581 static const Symbol *getAlternativeSpelling(Ctx &ctx, const Undefined &sym,
582 std::string &pre_hint,
583 std::string &post_hint) {
584 DenseMap<StringRef, const Symbol *> map;
585 if (sym.file && sym.file->kind() == InputFile::ObjKind) {
586 auto *file = cast<ELFFileBase>(sym.file);
587 // If sym is a symbol defined in a discarded section, maybeReportDiscarded()
588 // will give an error. Don't suggest an alternative spelling.
589 if (file && sym.discardedSecIdx != 0 &&
590 file->getSections()[sym.discardedSecIdx] == &InputSection::discarded)
591 return nullptr;
593 // Build a map of local defined symbols.
594 for (const Symbol *s : sym.file->getSymbols())
595 if (s->isLocal() && s->isDefined() && !s->getName().empty())
596 map.try_emplace(s->getName(), s);
599 auto suggest = [&](StringRef newName) -> const Symbol * {
600 // If defined locally.
601 if (const Symbol *s = map.lookup(newName))
602 return s;
604 // If in the symbol table and not undefined.
605 if (const Symbol *s = ctx.symtab->find(newName))
606 if (!s->isUndefined())
607 return s;
609 return nullptr;
612 // This loop enumerates all strings of Levenshtein distance 1 as typo
613 // correction candidates and suggests the one that exists as a non-undefined
614 // symbol.
615 StringRef name = sym.getName();
616 for (size_t i = 0, e = name.size(); i != e + 1; ++i) {
617 // Insert a character before name[i].
618 std::string newName = (name.substr(0, i) + "0" + name.substr(i)).str();
619 for (char c = '0'; c <= 'z'; ++c) {
620 newName[i] = c;
621 if (const Symbol *s = suggest(newName))
622 return s;
624 if (i == e)
625 break;
627 // Substitute name[i].
628 newName = std::string(name);
629 for (char c = '0'; c <= 'z'; ++c) {
630 newName[i] = c;
631 if (const Symbol *s = suggest(newName))
632 return s;
635 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is
636 // common.
637 if (i + 1 < e) {
638 newName[i] = name[i + 1];
639 newName[i + 1] = name[i];
640 if (const Symbol *s = suggest(newName))
641 return s;
644 // Delete name[i].
645 newName = (name.substr(0, i) + name.substr(i + 1)).str();
646 if (const Symbol *s = suggest(newName))
647 return s;
650 // Case mismatch, e.g. Foo vs FOO.
651 for (auto &it : map)
652 if (name.equals_insensitive(it.first))
653 return it.second;
654 for (Symbol *sym : ctx.symtab->getSymbols())
655 if (!sym->isUndefined() && name.equals_insensitive(sym->getName()))
656 return sym;
658 // The reference may be a mangled name while the definition is not. Suggest a
659 // missing extern "C".
660 if (name.starts_with("_Z")) {
661 std::string buf = name.str();
662 llvm::ItaniumPartialDemangler d;
663 if (!d.partialDemangle(buf.c_str()))
664 if (char *buf = d.getFunctionName(nullptr, nullptr)) {
665 const Symbol *s = suggest(buf);
666 free(buf);
667 if (s) {
668 pre_hint = ": extern \"C\" ";
669 return s;
672 } else {
673 const Symbol *s = nullptr;
674 for (auto &it : map)
675 if (canSuggestExternCForCXX(name, it.first)) {
676 s = it.second;
677 break;
679 if (!s)
680 for (Symbol *sym : ctx.symtab->getSymbols())
681 if (canSuggestExternCForCXX(name, sym->getName())) {
682 s = sym;
683 break;
685 if (s) {
686 pre_hint = " to declare ";
687 post_hint = " as extern \"C\"?";
688 return s;
692 return nullptr;
695 static void reportUndefinedSymbol(Ctx &ctx, const UndefinedDiag &undef,
696 bool correctSpelling) {
697 Undefined &sym = *undef.sym;
698 ELFSyncStream msg(ctx, DiagLevel::None);
700 auto visibility = [&]() {
701 switch (sym.visibility()) {
702 case STV_INTERNAL:
703 return "internal ";
704 case STV_HIDDEN:
705 return "hidden ";
706 case STV_PROTECTED:
707 return "protected ";
708 default:
709 return "";
713 switch (ctx.arg.ekind) {
714 case ELF32LEKind:
715 maybeReportDiscarded<ELF32LE>(ctx, msg, sym);
716 break;
717 case ELF32BEKind:
718 maybeReportDiscarded<ELF32BE>(ctx, msg, sym);
719 break;
720 case ELF64LEKind:
721 maybeReportDiscarded<ELF64LE>(ctx, msg, sym);
722 break;
723 case ELF64BEKind:
724 maybeReportDiscarded<ELF64BE>(ctx, msg, sym);
725 break;
726 default:
727 llvm_unreachable("");
729 if (msg.str().empty())
730 msg << "undefined " << visibility() << "symbol: " << &sym;
732 const size_t maxUndefReferences = 3;
733 for (UndefinedDiag::Loc l :
734 ArrayRef(undef.locs).take_front(maxUndefReferences)) {
735 InputSectionBase &sec = *l.sec;
736 uint64_t offset = l.offset;
738 msg << "\n>>> referenced by ";
739 // In the absence of line number information, utilize DW_TAG_variable (if
740 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`).
741 Symbol *enclosing = sec.getEnclosingSymbol(offset);
743 ELFSyncStream msg1(ctx, DiagLevel::None);
744 auto tell = msg.tell();
745 msg << sec.getSrcMsg(enclosing ? *enclosing : sym, offset);
746 if (tell != msg.tell())
747 msg << "\n>>> ";
748 msg << sec.getObjMsg(offset);
751 if (maxUndefReferences < undef.locs.size())
752 msg << "\n>>> referenced " << (undef.locs.size() - maxUndefReferences)
753 << " more times";
755 if (correctSpelling) {
756 std::string pre_hint = ": ", post_hint;
757 if (const Symbol *corrected =
758 getAlternativeSpelling(ctx, sym, pre_hint, post_hint)) {
759 msg << "\n>>> did you mean" << pre_hint << corrected << post_hint;
760 if (corrected->file)
761 msg << "\n>>> defined in: " << corrected->file;
765 if (sym.getName().starts_with("_ZTV"))
766 msg << "\n>>> the vtable symbol may be undefined because the class is "
767 "missing its key function "
768 "(see https://lld.llvm.org/missingkeyfunction)";
769 if (ctx.arg.gcSections && ctx.arg.zStartStopGC &&
770 sym.getName().starts_with("__start_")) {
771 msg << "\n>>> the encapsulation symbol needs to be retained under "
772 "--gc-sections properly; consider -z nostart-stop-gc "
773 "(see https://lld.llvm.org/ELF/start-stop-gc)";
776 if (undef.isWarning)
777 Warn(ctx) << msg.str();
778 else
779 ctx.e.error(msg.str(), ErrorTag::SymbolNotFound, {sym.getName()});
782 void elf::reportUndefinedSymbols(Ctx &ctx) {
783 // Find the first "undefined symbol" diagnostic for each diagnostic, and
784 // collect all "referenced from" lines at the first diagnostic.
785 DenseMap<Symbol *, UndefinedDiag *> firstRef;
786 for (UndefinedDiag &undef : ctx.undefErrs) {
787 assert(undef.locs.size() == 1);
788 if (UndefinedDiag *canon = firstRef.lookup(undef.sym)) {
789 canon->locs.push_back(undef.locs[0]);
790 undef.locs.clear();
791 } else
792 firstRef[undef.sym] = &undef;
795 // Enable spell corrector for the first 2 diagnostics.
796 for (auto [i, undef] : llvm::enumerate(ctx.undefErrs))
797 if (!undef.locs.empty())
798 reportUndefinedSymbol(ctx, undef, i < 2);
801 // Report an undefined symbol if necessary.
802 // Returns true if the undefined symbol will produce an error message.
803 static bool maybeReportUndefined(Ctx &ctx, Undefined &sym,
804 InputSectionBase &sec, uint64_t offset) {
805 std::lock_guard<std::mutex> lock(ctx.relocMutex);
806 // If versioned, issue an error (even if the symbol is weak) because we don't
807 // know the defining filename which is required to construct a Verneed entry.
808 if (sym.hasVersionSuffix) {
809 ctx.undefErrs.push_back({&sym, {{&sec, offset}}, false});
810 return true;
812 if (sym.isWeak())
813 return false;
815 bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT;
816 if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal)
817 return false;
819 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc
820 // which references a switch table in a discarded .rodata/.text section. The
821 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF
822 // spec says references from outside the group to a STB_LOCAL symbol are not
823 // allowed. Work around the bug.
825 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible
826 // because .LC0-.LTOC is not representable if the two labels are in different
827 // .got2
828 if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc"))
829 return false;
831 bool isWarning =
832 (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) ||
833 ctx.arg.noinhibitExec;
834 ctx.undefErrs.push_back({&sym, {{&sec, offset}}, isWarning});
835 return !isWarning;
838 // MIPS N32 ABI treats series of successive relocations with the same offset
839 // as a single relocation. The similar approach used by N64 ABI, but this ABI
840 // packs all relocations into the single relocation record. Here we emulate
841 // this for the N32 ABI. Iterate over relocation with the same offset and put
842 // theirs types into the single bit-set.
843 template <class RelTy>
844 RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const {
845 uint32_t type = 0;
846 uint64_t offset = rel->r_offset;
848 int n = 0;
849 while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset)
850 type |= (rel++)->getType(ctx.arg.isMips64EL) << (8 * n++);
851 return type;
854 template <bool shard = false>
855 static void addRelativeReloc(Ctx &ctx, InputSectionBase &isec,
856 uint64_t offsetInSec, Symbol &sym, int64_t addend,
857 RelExpr expr, RelType type) {
858 Partition &part = isec.getPartition(ctx);
860 if (sym.isTagged()) {
861 std::lock_guard<std::mutex> lock(ctx.relocMutex);
862 part.relaDyn->addRelativeReloc(ctx.target->relativeRel, isec, offsetInSec,
863 sym, addend, type, expr);
864 // With MTE globals, we always want to derive the address tag by `ldg`-ing
865 // the symbol. When we have a RELATIVE relocation though, we no longer have
866 // a reference to the symbol. Because of this, when we have an addend that
867 // puts the result of the RELATIVE relocation out-of-bounds of the symbol
868 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI
869 // says we should store the offset to the start of the symbol in the target
870 // field. This is described in further detail in:
871 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative
872 if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize())
873 isec.relocations.push_back({expr, type, offsetInSec, addend, &sym});
874 return;
877 // Add a relative relocation. If relrDyn section is enabled, and the
878 // relocation offset is guaranteed to be even, add the relocation to
879 // the relrDyn section, otherwise add it to the relaDyn section.
880 // relrDyn sections don't support odd offsets. Also, relrDyn sections
881 // don't store the addend values, so we must write it to the relocated
882 // address.
883 if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) {
884 isec.addReloc({expr, type, offsetInSec, addend, &sym});
885 if (shard)
886 part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back(
887 {&isec, isec.relocs().size() - 1});
888 else
889 part.relrDyn->relocs.push_back({&isec, isec.relocs().size() - 1});
890 return;
892 part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec,
893 offsetInSec, sym, addend, type, expr);
896 template <class PltSection, class GotPltSection>
897 static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt,
898 RelocationBaseSection &rel, RelType type, Symbol &sym) {
899 plt.addEntry(sym);
900 gotPlt.addEntry(sym);
901 rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx),
902 sym.isPreemptible ? DynamicReloc::AgainstSymbol
903 : DynamicReloc::AddendOnlyWithTargetVA,
904 sym, 0, R_ABS});
907 void elf::addGotEntry(Ctx &ctx, Symbol &sym) {
908 ctx.in.got->addEntry(sym);
909 uint64_t off = sym.getGotOffset(ctx);
911 // If preemptible, emit a GLOB_DAT relocation.
912 if (sym.isPreemptible) {
913 ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, ctx.in.got.get(), off,
914 DynamicReloc::AgainstSymbol, sym, 0,
915 R_ABS});
916 return;
919 // Otherwise, the value is either a link-time constant or the load base
920 // plus a constant.
921 if (!ctx.arg.isPic || isAbsolute(sym))
922 ctx.in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym});
923 else
924 addRelativeReloc(ctx, *ctx.in.got, off, sym, 0, R_ABS,
925 ctx.target->symbolicRel);
928 static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) {
929 ctx.in.got->addEntry(sym);
930 uint64_t off = sym.getGotOffset(ctx);
931 if (!sym.isPreemptible && !ctx.arg.shared) {
932 ctx.in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym});
933 return;
935 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
936 ctx.target->tlsGotRel, *ctx.in.got, off, sym, ctx.target->symbolicRel);
939 // Return true if we can define a symbol in the executable that
940 // contains the value/function of a symbol defined in a shared
941 // library.
942 static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) {
943 // If the symbol has default visibility the symbol defined in the
944 // executable will preempt it.
945 // Note that we want the visibility of the shared symbol itself, not
946 // the visibility of the symbol in the output file we are producing.
947 if (!sym.dsoProtected)
948 return true;
950 // If we are allowed to break address equality of functions, defining
951 // a plt entry will allow the program to call the function in the
952 // .so, but the .so and the executable will no agree on the address
953 // of the function. Similar logic for objects.
954 return ((sym.isFunc() && ctx.arg.ignoreFunctionAddressEquality) ||
955 (sym.isObject() && ctx.arg.ignoreDataAddressEquality));
958 // Returns true if a given relocation can be computed at link-time.
959 // This only handles relocation types expected in processAux.
961 // For instance, we know the offset from a relocation to its target at
962 // link-time if the relocation is PC-relative and refers a
963 // non-interposable function in the same executable. This function
964 // will return true for such relocation.
966 // If this function returns false, that means we need to emit a
967 // dynamic relocation so that the relocation will be fixed at load-time.
968 bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
969 const Symbol &sym,
970 uint64_t relOff) const {
971 // These expressions always compute a constant
972 if (oneof<R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, R_MIPS_GOT_LOCAL_PAGE,
973 R_MIPS_GOTREL, R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC,
974 R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC, R_GOTPLTONLY_PC,
975 R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, R_GOTPLT_PC,
976 R_PPC32_PLTREL, R_PPC64_CALL_PLT, R_PPC64_RELAX_TOC, R_RISCV_ADD,
977 R_AARCH64_GOT_PAGE, R_LOONGARCH_PLT_PAGE_PC, R_LOONGARCH_GOT,
978 R_LOONGARCH_GOT_PAGE_PC>(e))
979 return true;
981 // These never do, except if the entire file is position dependent or if
982 // only the low bits are used.
983 if (e == R_GOT || e == R_PLT)
984 return ctx.target->usesOnlyLowPageBits(type) || !ctx.arg.isPic;
986 // R_AARCH64_AUTH_ABS64 requires a dynamic relocation.
987 if (sym.isPreemptible || e == R_AARCH64_AUTH)
988 return false;
989 if (!ctx.arg.isPic)
990 return true;
992 // Constant when referencing a non-preemptible symbol.
993 if (e == R_SIZE || e == R_RISCV_LEB128)
994 return true;
996 // For the target and the relocation, we want to know if they are
997 // absolute or relative.
998 bool absVal = isAbsoluteValue(sym);
999 bool relE = isRelExpr(e);
1000 if (absVal && !relE)
1001 return true;
1002 if (!absVal && relE)
1003 return true;
1004 if (!absVal && !relE)
1005 return ctx.target->usesOnlyLowPageBits(type);
1007 assert(absVal && relE);
1009 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol
1010 // in PIC mode. This is a little strange, but it allows us to link function
1011 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers).
1012 // Normally such a call will be guarded with a comparison, which will load a
1013 // zero from the GOT.
1014 if (sym.isUndefWeak())
1015 return true;
1017 // We set the final symbols values for linker script defined symbols later.
1018 // They always can be computed as a link time constant.
1019 if (sym.scriptDefined)
1020 return true;
1022 auto diag = Err(ctx);
1023 diag << "relocation " << type << " cannot refer to absolute symbol: " << &sym;
1024 printLocation(diag, *sec, sym, relOff);
1025 return true;
1028 // The reason we have to do this early scan is as follows
1029 // * To mmap the output file, we need to know the size
1030 // * For that, we need to know how many dynamic relocs we will have.
1031 // It might be possible to avoid this by outputting the file with write:
1032 // * Write the allocated output sections, computing addresses.
1033 // * Apply relocations, recording which ones require a dynamic reloc.
1034 // * Write the dynamic relocations.
1035 // * Write the rest of the file.
1036 // This would have some drawbacks. For example, we would only know if .rela.dyn
1037 // is needed after applying relocations. If it is, it will go after rw and rx
1038 // sections. Given that it is ro, we will need an extra PT_LOAD. This
1039 // complicates things for the dynamic linker and means we would have to reserve
1040 // space for the extra PT_LOAD even if we end up not using it.
1041 void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
1042 Symbol &sym, int64_t addend) const {
1043 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT
1044 // indirection.
1045 const bool isIfunc = sym.isGnuIFunc();
1046 if (!sym.isPreemptible && (!isIfunc || ctx.arg.zIfuncNoplt)) {
1047 if (expr != R_GOT_PC) {
1048 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call
1049 // stub type. It should be ignored if optimized to R_PC.
1050 if (ctx.arg.emachine == EM_PPC && expr == R_PPC32_PLTREL)
1051 addend &= ~0x8000;
1052 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into
1053 // call __tls_get_addr even if the symbol is non-preemptible.
1054 if (!(ctx.arg.emachine == EM_HEXAGON &&
1055 (type == R_HEX_GD_PLT_B22_PCREL ||
1056 type == R_HEX_GD_PLT_B22_PCREL_X ||
1057 type == R_HEX_GD_PLT_B32_PCREL_X)))
1058 expr = fromPlt(expr);
1059 } else if (!isAbsoluteValue(sym)) {
1060 expr = ctx.target->adjustGotPcExpr(type, addend,
1061 sec->content().data() + offset);
1062 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
1063 // needing the GOT if we can't relax everything.
1064 if (expr == R_RELAX_GOT_PC)
1065 ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
1069 // We were asked not to generate PLT entries for ifuncs. Instead, pass the
1070 // direct relocation on through.
1071 if (LLVM_UNLIKELY(isIfunc) && ctx.arg.zIfuncNoplt) {
1072 std::lock_guard<std::mutex> lock(ctx.relocMutex);
1073 sym.exportDynamic = true;
1074 ctx.mainPart->relaDyn->addSymbolReloc(type, *sec, offset, sym, addend,
1075 type);
1076 return;
1079 if (needsGot(expr)) {
1080 if (ctx.arg.emachine == EM_MIPS) {
1081 // MIPS ABI has special rules to process GOT entries and doesn't
1082 // require relocation entries for them. A special case is TLS
1083 // relocations. In that case dynamic loader applies dynamic
1084 // relocations to initialize TLS GOT entries.
1085 // See "Global Offset Table" in Chapter 5 in the following document
1086 // for detailed description:
1087 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1088 ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr);
1089 } else if (!sym.isTls() || ctx.arg.emachine != EM_LOONGARCH) {
1090 // Many LoongArch TLS relocs reuse the R_LOONGARCH_GOT type, in which
1091 // case the NEEDS_GOT flag shouldn't get set.
1092 sym.setFlags(NEEDS_GOT);
1094 } else if (needsPlt(expr)) {
1095 sym.setFlags(NEEDS_PLT);
1096 } else if (LLVM_UNLIKELY(isIfunc)) {
1097 sym.setFlags(HAS_DIRECT_RELOC);
1100 // If the relocation is known to be a link-time constant, we know no dynamic
1101 // relocation will be created, pass the control to relocateAlloc() or
1102 // relocateNonAlloc() to resolve it.
1104 // The behavior of an undefined weak reference is implementation defined. For
1105 // non-link-time constants, we resolve relocations statically (let
1106 // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic
1107 // relocations for -pie and -shared.
1109 // The general expectation of -no-pie static linking is that there is no
1110 // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for
1111 // -shared matches the spirit of its -z undefs default. -pie has freedom on
1112 // choices, and we choose dynamic relocations to be consistent with the
1113 // handling of GOT-generating relocations.
1114 if (isStaticLinkTimeConstant(expr, type, sym, offset) ||
1115 (!ctx.arg.isPic && sym.isUndefWeak())) {
1116 sec->addReloc({expr, type, offset, addend, &sym});
1117 return;
1120 // Use a simple -z notext rule that treats all sections except .eh_frame as
1121 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our
1122 // SectionBase::getOffset would incorrectly adjust the offset).
1124 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel
1125 // conversion. We still emit a dynamic relocation.
1126 bool canWrite = (sec->flags & SHF_WRITE) ||
1127 !(ctx.arg.zText ||
1128 (isa<EhInputSection>(sec) && ctx.arg.emachine != EM_MIPS));
1129 if (canWrite) {
1130 RelType rel = ctx.target->getDynRel(type);
1131 if (oneof<R_GOT, R_LOONGARCH_GOT>(expr) ||
1132 (rel == ctx.target->symbolicRel && !sym.isPreemptible)) {
1133 addRelativeReloc<true>(ctx, *sec, offset, sym, addend, expr, type);
1134 return;
1136 if (rel != 0) {
1137 if (ctx.arg.emachine == EM_MIPS && rel == ctx.target->symbolicRel)
1138 rel = ctx.target->relativeRel;
1139 std::lock_guard<std::mutex> lock(ctx.relocMutex);
1140 Partition &part = sec->getPartition(ctx);
1141 if (ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) {
1142 // For a preemptible symbol, we can't use a relative relocation. For an
1143 // undefined symbol, we can't compute offset at link-time and use a
1144 // relative relocation. Use a symbolic relocation instead.
1145 if (sym.isPreemptible) {
1146 part.relaDyn->addSymbolReloc(type, *sec, offset, sym, addend, type);
1147 } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) {
1148 // When symbol values are determined in
1149 // finalizeAddressDependentContent, some .relr.auth.dyn relocations
1150 // may be moved to .rela.dyn.
1151 sec->addReloc({expr, type, offset, addend, &sym});
1152 part.relrAuthDyn->relocs.push_back({sec, sec->relocs().size() - 1});
1153 } else {
1154 part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, sec, offset,
1155 DynamicReloc::AddendOnlyWithTargetVA, sym,
1156 addend, R_ABS});
1158 return;
1160 part.relaDyn->addSymbolReloc(rel, *sec, offset, sym, addend, type);
1162 // MIPS ABI turns using of GOT and dynamic relocations inside out.
1163 // While regular ABI uses dynamic relocations to fill up GOT entries
1164 // MIPS ABI requires dynamic linker to fills up GOT entries using
1165 // specially sorted dynamic symbol table. This affects even dynamic
1166 // relocations against symbols which do not require GOT entries
1167 // creation explicitly, i.e. do not have any GOT-relocations. So if
1168 // a preemptible symbol has a dynamic relocation we anyway have
1169 // to create a GOT entry for it.
1170 // If a non-preemptible symbol has a dynamic relocation against it,
1171 // dynamic linker takes it st_value, adds offset and writes down
1172 // result of the dynamic relocation. In case of preemptible symbol
1173 // dynamic linker performs symbol resolution, writes the symbol value
1174 // to the GOT entry and reads the GOT entry when it needs to perform
1175 // a dynamic relocation.
1176 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
1177 if (ctx.arg.emachine == EM_MIPS)
1178 ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr);
1179 return;
1183 // When producing an executable, we can perform copy relocations (for
1184 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO.
1185 // Copy relocations/canonical PLT entries are unsupported for
1186 // R_AARCH64_AUTH_ABS64.
1187 if (!ctx.arg.shared && sym.isShared() &&
1188 !(ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) {
1189 if (!canDefineSymbolInExecutable(ctx, sym)) {
1190 auto diag = Err(ctx);
1191 diag << "cannot preempt symbol: " << &sym;
1192 printLocation(diag, *sec, sym, offset);
1193 return;
1196 if (sym.isObject()) {
1197 // Produce a copy relocation.
1198 if (auto *ss = dyn_cast<SharedSymbol>(&sym)) {
1199 if (!ctx.arg.zCopyreloc) {
1200 auto diag = Err(ctx);
1201 diag << "unresolvable relocation " << type << " against symbol '"
1202 << ss << "'; recompile with -fPIC or remove '-z nocopyreloc'";
1203 printLocation(diag, *sec, sym, offset);
1205 sym.setFlags(NEEDS_COPY);
1207 sec->addReloc({expr, type, offset, addend, &sym});
1208 return;
1211 // This handles a non PIC program call to function in a shared library. In
1212 // an ideal world, we could just report an error saying the relocation can
1213 // overflow at runtime. In the real world with glibc, crt1.o has a
1214 // R_X86_64_PC32 pointing to libc.so.
1216 // The general idea on how to handle such cases is to create a PLT entry and
1217 // use that as the function value.
1219 // For the static linking part, we just return a plt expr and everything
1220 // else will use the PLT entry as the address.
1222 // The remaining problem is making sure pointer equality still works. We
1223 // need the help of the dynamic linker for that. We let it know that we have
1224 // a direct reference to a so symbol by creating an undefined symbol with a
1225 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to
1226 // the value of the symbol we created. This is true even for got entries, so
1227 // pointer equality is maintained. To avoid an infinite loop, the only entry
1228 // that points to the real function is a dedicated got entry used by the
1229 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
1230 // R_386_JMP_SLOT, etc).
1232 // For position independent executable on i386, the plt entry requires ebx
1233 // to be set. This causes two problems:
1234 // * If some code has a direct reference to a function, it was probably
1235 // compiled without -fPIE/-fPIC and doesn't maintain ebx.
1236 // * If a library definition gets preempted to the executable, it will have
1237 // the wrong ebx value.
1238 if (sym.isFunc()) {
1239 if (ctx.arg.pie && ctx.arg.emachine == EM_386) {
1240 auto diag = Err(ctx);
1241 diag << "symbol '" << &sym
1242 << "' cannot be preempted; recompile with -fPIE";
1243 printLocation(diag, *sec, sym, offset);
1245 sym.setFlags(NEEDS_COPY | NEEDS_PLT);
1246 sec->addReloc({expr, type, offset, addend, &sym});
1247 return;
1251 auto diag = Err(ctx);
1252 diag << "relocation " << type << " cannot be used against ";
1253 if (sym.getName().empty())
1254 diag << "local symbol";
1255 else
1256 diag << "symbol '" << &sym << "'";
1257 diag << "; recompile with -fPIC";
1258 printLocation(diag, *sec, sym, offset);
1261 // This function is similar to the `handleTlsRelocation`. MIPS does not
1262 // support any relaxations for TLS relocations so by factoring out MIPS
1263 // handling in to the separate function we can simplify the code and do not
1264 // pollute other `handleTlsRelocation` by MIPS `ifs` statements.
1265 // Mips has a custom MipsGotSection that handles the writing of GOT entries
1266 // without dynamic relocations.
1267 static unsigned handleMipsTlsRelocation(Ctx &ctx, RelType type, Symbol &sym,
1268 InputSectionBase &c, uint64_t offset,
1269 int64_t addend, RelExpr expr) {
1270 if (expr == R_MIPS_TLSLD) {
1271 ctx.in.mipsGot->addTlsIndex(*c.file);
1272 c.addReloc({expr, type, offset, addend, &sym});
1273 return 1;
1275 if (expr == R_MIPS_TLSGD) {
1276 ctx.in.mipsGot->addDynTlsEntry(*c.file, sym);
1277 c.addReloc({expr, type, offset, addend, &sym});
1278 return 1;
1280 return 0;
1283 // Notes about General Dynamic and Local Dynamic TLS models below. They may
1284 // require the generation of a pair of GOT entries that have associated dynamic
1285 // relocations. The pair of GOT entries created are of the form GOT[e0] Module
1286 // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of
1287 // symbol in TLS block.
1289 // Returns the number of relocations processed.
1290 unsigned RelocationScanner::handleTlsRelocation(RelExpr expr, RelType type,
1291 uint64_t offset, Symbol &sym,
1292 int64_t addend) {
1293 if (expr == R_TPREL || expr == R_TPREL_NEG) {
1294 if (ctx.arg.shared) {
1295 auto diag = Err(ctx);
1296 diag << "relocation " << type << " against " << &sym
1297 << " cannot be used with -shared";
1298 printLocation(diag, *sec, sym, offset);
1299 return 1;
1301 return 0;
1304 if (ctx.arg.emachine == EM_MIPS)
1305 return handleMipsTlsRelocation(ctx, type, sym, *sec, offset, addend, expr);
1307 // LoongArch does not yet implement transition from TLSDESC to LE/IE, so
1308 // generate TLSDESC dynamic relocation for the dynamic linker to handle.
1309 if (ctx.arg.emachine == EM_LOONGARCH &&
1310 oneof<R_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC,
1311 R_TLSDESC_CALL>(expr)) {
1312 if (expr != R_TLSDESC_CALL) {
1313 sym.setFlags(NEEDS_TLSDESC);
1314 sec->addReloc({expr, type, offset, addend, &sym});
1316 return 1;
1319 bool isRISCV = ctx.arg.emachine == EM_RISCV;
1321 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1322 R_TLSDESC_GOTPLT>(expr) &&
1323 ctx.arg.shared) {
1324 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not
1325 // set NEEDS_TLSDESC on the label.
1326 if (expr != R_TLSDESC_CALL) {
1327 if (!isRISCV || type == R_RISCV_TLSDESC_HI20)
1328 sym.setFlags(NEEDS_TLSDESC);
1329 sec->addReloc({expr, type, offset, addend, &sym});
1331 return 1;
1334 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE
1335 // optimizations.
1336 // RISC-V supports TLSDESC to IE/LE optimizations.
1337 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable
1338 // optimization as well.
1339 bool execOptimize =
1340 !ctx.arg.shared && ctx.arg.emachine != EM_ARM &&
1341 ctx.arg.emachine != EM_HEXAGON && ctx.arg.emachine != EM_LOONGARCH &&
1342 !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) &&
1343 !sec->file->ppc64DisableTLSRelax;
1345 // If we are producing an executable and the symbol is non-preemptable, it
1346 // must be defined and the code sequence can be optimized to use
1347 // Local-Exesec->
1349 // ARM and RISC-V do not support any relaxations for TLS relocations, however,
1350 // we can omit the DTPMOD dynamic relocations and resolve them at link time
1351 // because them are always 1. This may be necessary for static linking as
1352 // DTPMOD may not be expected at load time.
1353 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared;
1355 // Local Dynamic is for access to module local TLS variables, while still
1356 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the
1357 // module index, with a special value of 0 for the current module. GOT[e1] is
1358 // unused. There only needs to be one module index entry.
1359 if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) {
1360 // Local-Dynamic relocs can be optimized to Local-Exesec->
1361 if (execOptimize) {
1362 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE),
1363 type, offset, addend, &sym});
1364 return ctx.target->getTlsGdRelaxSkip(type);
1366 if (expr == R_TLSLD_HINT)
1367 return 1;
1368 ctx.needsTlsLd.store(true, std::memory_order_relaxed);
1369 sec->addReloc({expr, type, offset, addend, &sym});
1370 return 1;
1373 // Local-Dynamic relocs can be optimized to Local-Exesec->
1374 if (expr == R_DTPREL) {
1375 if (execOptimize)
1376 expr = ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE);
1377 sec->addReloc({expr, type, offset, addend, &sym});
1378 return 1;
1381 // Local-Dynamic sequence where offset of tls variable relative to dynamic
1382 // thread pointer is stored in the got. This cannot be optimized to
1383 // Local-Exesec->
1384 if (expr == R_TLSLD_GOT_OFF) {
1385 sym.setFlags(NEEDS_GOT_DTPREL);
1386 sec->addReloc({expr, type, offset, addend, &sym});
1387 return 1;
1390 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC,
1391 R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC,
1392 R_LOONGARCH_TLSGD_PAGE_PC>(expr)) {
1393 if (!execOptimize) {
1394 sym.setFlags(NEEDS_TLSGD);
1395 sec->addReloc({expr, type, offset, addend, &sym});
1396 return 1;
1399 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec
1400 // depending on the symbol being locally defined or not.
1402 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible
1403 // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix
1404 // the categorization in RISCV::relocateAllosec->
1405 if (sym.isPreemptible) {
1406 sym.setFlags(NEEDS_TLSGD_TO_IE);
1407 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE),
1408 type, offset, addend, &sym});
1409 } else {
1410 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE),
1411 type, offset, addend, &sym});
1413 return ctx.target->getTlsGdRelaxSkip(type);
1416 if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, R_AARCH64_GOT_PAGE_PC,
1417 R_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) {
1418 ctx.hasTlsIe.store(true, std::memory_order_relaxed);
1419 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is
1420 // locally defined. This is not supported on SystemZ.
1421 if (execOptimize && isLocalInExecutable && ctx.arg.emachine != EM_S390) {
1422 sec->addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym});
1423 } else if (expr != R_TLSIE_HINT) {
1424 sym.setFlags(NEEDS_TLSIE);
1425 // R_GOT needs a relative relocation for PIC on i386 and Hexagon.
1426 if (expr == R_GOT && ctx.arg.isPic &&
1427 !ctx.target->usesOnlyLowPageBits(type))
1428 addRelativeReloc<true>(ctx, *sec, offset, sym, addend, expr, type);
1429 else
1430 sec->addReloc({expr, type, offset, addend, &sym});
1432 return 1;
1435 return 0;
1438 template <class ELFT, class RelTy>
1439 void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
1440 const RelTy &rel = *i;
1441 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL);
1442 Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex);
1443 RelType type;
1444 if constexpr (ELFT::Is64Bits || RelTy::IsCrel) {
1445 type = rel.getType(ctx.arg.isMips64EL);
1446 ++i;
1447 } else {
1448 // CREL is unsupported for MIPS N32.
1449 if (ctx.arg.mipsN32Abi) {
1450 type = getMipsN32RelType(i);
1451 } else {
1452 type = rel.getType(ctx.arg.isMips64EL);
1453 ++i;
1456 // Get an offset in an output section this relocation is applied to.
1457 uint64_t offset = getter.get(ctx, rel.r_offset);
1458 if (offset == uint64_t(-1))
1459 return;
1461 RelExpr expr =
1462 ctx.target->getRelExpr(type, sym, sec->content().data() + offset);
1463 int64_t addend = RelTy::HasAddend
1464 ? getAddend<ELFT>(rel)
1465 : ctx.target->getImplicitAddend(
1466 sec->content().data() + rel.r_offset, type);
1467 if (LLVM_UNLIKELY(ctx.arg.emachine == EM_MIPS))
1468 addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal());
1469 else if (ctx.arg.emachine == EM_PPC64 && ctx.arg.isPic && type == R_PPC64_TOC)
1470 addend += getPPC64TocBase(ctx);
1472 // Ignore R_*_NONE and other marker relocations.
1473 if (expr == R_NONE)
1474 return;
1476 // Error if the target symbol is undefined. Symbol index 0 may be used by
1477 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them.
1478 if (sym.isUndefined() && symIndex != 0 &&
1479 maybeReportUndefined(ctx, cast<Undefined>(sym), *sec, offset))
1480 return;
1482 if (ctx.arg.emachine == EM_PPC64) {
1483 // We can separate the small code model relocations into 2 categories:
1484 // 1) Those that access the compiler generated .toc sections.
1485 // 2) Those that access the linker allocated got entries.
1486 // lld allocates got entries to symbols on demand. Since we don't try to
1487 // sort the got entries in any way, we don't have to track which objects
1488 // have got-based small code model relocs. The .toc sections get placed
1489 // after the end of the linker allocated .got section and we do sort those
1490 // so sections addressed with small code model relocations come first.
1491 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS)
1492 sec->file->ppc64SmallCodeModelTocRelocs = true;
1494 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in
1495 // InputSectionBase::relocateAlloc().
1496 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(sym) &&
1497 cast<Defined>(sym).section->name == ".toc")
1498 ctx.ppc64noTocRelax.insert({&sym, addend});
1500 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) ||
1501 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) {
1502 // Skip the error check for CREL, which does not set `end`.
1503 if constexpr (!RelTy::IsCrel) {
1504 if (i == end) {
1505 auto diag = Err(ctx);
1506 diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last "
1507 "relocation";
1508 printLocation(diag, *sec, sym, offset);
1509 return;
1513 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC
1514 // case, so we can discern it later from the toc-case.
1515 if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC)
1516 ++offset;
1520 // If the relocation does not emit a GOT or GOTPLT entry but its computation
1521 // uses their addresses, we need GOT or GOTPLT to be created.
1523 // The 5 types that relative GOTPLT are all x86 and x86-64 specific.
1524 if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT,
1525 R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) {
1526 ctx.in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed);
1527 } else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC32_PLTREL, R_PPC64_TOCBASE,
1528 R_PPC64_RELAX_TOC>(expr)) {
1529 ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
1532 // Process TLS relocations, including TLS optimizations. Note that
1533 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
1535 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
1536 // but we need to process them in handleTlsRelocation.
1537 if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) {
1538 if (unsigned processed =
1539 handleTlsRelocation(expr, type, offset, sym, addend)) {
1540 i += processed - 1;
1541 return;
1545 processAux(expr, type, offset, sym, addend);
1548 // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
1549 // General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is
1550 // found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the
1551 // instructions are generated by very old IBM XL compilers. Work around the
1552 // issue by disabling GD/LD to IE/LE relaxation.
1553 template <class RelTy>
1554 static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) {
1555 // Skip if sec is synthetic (sec.file is null) or if sec has been marked.
1556 if (!sec.file || sec.file->ppc64DisableTLSRelax)
1557 return;
1558 bool hasGDLD = false;
1559 for (const RelTy &rel : rels) {
1560 RelType type = rel.getType(false);
1561 switch (type) {
1562 case R_PPC64_TLSGD:
1563 case R_PPC64_TLSLD:
1564 return; // Found a marker
1565 case R_PPC64_GOT_TLSGD16:
1566 case R_PPC64_GOT_TLSGD16_HA:
1567 case R_PPC64_GOT_TLSGD16_HI:
1568 case R_PPC64_GOT_TLSGD16_LO:
1569 case R_PPC64_GOT_TLSLD16:
1570 case R_PPC64_GOT_TLSLD16_HA:
1571 case R_PPC64_GOT_TLSLD16_HI:
1572 case R_PPC64_GOT_TLSLD16_LO:
1573 hasGDLD = true;
1574 break;
1577 if (hasGDLD) {
1578 sec.file->ppc64DisableTLSRelax = true;
1579 Warn(sec.file->ctx)
1580 << sec.file
1581 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations "
1582 "without "
1583 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations";
1587 template <class ELFT, class RelTy>
1588 void RelocationScanner::scan(Relocs<RelTy> rels) {
1589 // Not all relocations end up in Sec->Relocations, but a lot do.
1590 sec->relocations.reserve(rels.size());
1592 if (ctx.arg.emachine == EM_PPC64)
1593 checkPPC64TLSRelax<RelTy>(*sec, rels);
1595 // For EhInputSection, OffsetGetter expects the relocations to be sorted by
1596 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker
1597 // script), the relocations may be unordered.
1598 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS
1599 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip.
1600 SmallVector<RelTy, 0> storage;
1601 if (isa<EhInputSection>(sec) || ctx.arg.emachine == EM_S390)
1602 rels = sortRels(rels, storage);
1604 if constexpr (RelTy::IsCrel) {
1605 for (auto i = rels.begin(); i != rels.end();)
1606 scanOne<ELFT, RelTy>(i);
1607 } else {
1608 // The non-CREL code path has additional check for PPC64 TLS.
1609 end = static_cast<const void *>(rels.end());
1610 for (auto i = rels.begin(); i != end;)
1611 scanOne<ELFT, RelTy>(i);
1614 // Sort relocations by offset for more efficient searching for
1615 // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64.
1616 if (ctx.arg.emachine == EM_RISCV ||
1617 (ctx.arg.emachine == EM_PPC64 && sec->name == ".toc"))
1618 llvm::stable_sort(sec->relocs(),
1619 [](const Relocation &lhs, const Relocation &rhs) {
1620 return lhs.offset < rhs.offset;
1624 template <class ELFT>
1625 void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) {
1626 sec = &s;
1627 getter = OffsetGetter(s);
1628 const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(!isEH);
1629 if (rels.areRelocsCrel())
1630 scan<ELFT>(rels.crels);
1631 else if (rels.areRelocsRel())
1632 scan<ELFT>(rels.rels);
1633 else
1634 scan<ELFT>(rels.relas);
1637 template <class ELFT> void elf::scanRelocations(Ctx &ctx) {
1638 // Scan all relocations. Each relocation goes through a series of tests to
1639 // determine if it needs special treatment, such as creating GOT, PLT,
1640 // copy relocations, etc. Note that relocations for non-alloc sections are
1641 // directly processed by InputSection::relocateNonAlloc.
1643 // Deterministic parallellism needs sorting relocations which is unsuitable
1644 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable
1645 // for parallelism.
1646 bool serial = !ctx.arg.zCombreloc || ctx.arg.emachine == EM_MIPS ||
1647 ctx.arg.emachine == EM_PPC64;
1648 parallel::TaskGroup tg;
1649 auto outerFn = [&]() {
1650 for (ELFFileBase *f : ctx.objectFiles) {
1651 auto fn = [f, &ctx]() {
1652 RelocationScanner scanner(ctx);
1653 for (InputSectionBase *s : f->getSections()) {
1654 if (s && s->kind() == SectionBase::Regular && s->isLive() &&
1655 (s->flags & SHF_ALLOC) &&
1656 !(s->type == SHT_ARM_EXIDX && ctx.arg.emachine == EM_ARM))
1657 scanner.template scanSection<ELFT>(*s);
1660 if (serial)
1661 fn();
1662 else
1663 tg.spawn(fn);
1665 auto scanEH = [&] {
1666 RelocationScanner scanner(ctx);
1667 for (Partition &part : ctx.partitions) {
1668 for (EhInputSection *sec : part.ehFrame->sections)
1669 scanner.template scanSection<ELFT>(*sec, /*isEH=*/true);
1670 if (part.armExidx && part.armExidx->isLive())
1671 for (InputSection *sec : part.armExidx->exidxSections)
1672 if (sec->isLive())
1673 scanner.template scanSection<ELFT>(*sec);
1676 if (serial)
1677 scanEH();
1678 else
1679 tg.spawn(scanEH);
1681 // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread
1682 // with valid getThreadIndex().
1683 if (serial)
1684 tg.spawn(outerFn);
1685 else
1686 outerFn();
1689 static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) {
1690 // Handle a reference to a non-preemptible ifunc. These are special in a
1691 // few ways:
1693 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
1694 // a fixed value. But assuming that all references to the ifunc are
1695 // GOT-generating or PLT-generating, the handling of an ifunc is
1696 // relatively straightforward. We create a PLT entry in Iplt, which is
1697 // usually at the end of .plt, which makes an indirect call using a
1698 // matching GOT entry in igotPlt, which is usually at the end of .got.plt.
1699 // The GOT entry is relocated using an IRELATIVE relocation in relaDyn,
1700 // which is usually at the end of .rela.dyn.
1702 // - Despite the fact that an ifunc does not have a fixed value, compilers
1703 // that are not passed -fPIC will assume that they do, and will emit
1704 // direct (non-GOT-generating, non-PLT-generating) relocations to the
1705 // symbol. This means that if a direct relocation to the symbol is
1706 // seen, the linker must set a value for the symbol, and this value must
1707 // be consistent no matter what type of reference is made to the symbol.
1708 // This can be done by creating a PLT entry for the symbol in the way
1709 // described above and making it canonical, that is, making all references
1710 // point to the PLT entry instead of the resolver. In lld we also store
1711 // the address of the PLT entry in the dynamic symbol table, which means
1712 // that the symbol will also have the same value in other modules.
1713 // Because the value loaded from the GOT needs to be consistent with
1714 // the value computed using a direct relocation, a non-preemptible ifunc
1715 // may end up with two GOT entries, one in .got.plt that points to the
1716 // address returned by the resolver and is used only by the PLT entry,
1717 // and another in .got that points to the PLT entry and is used by
1718 // GOT-generating relocations.
1720 // - The fact that these symbols do not have a fixed value makes them an
1721 // exception to the general rule that a statically linked executable does
1722 // not require any form of dynamic relocation. To handle these relocations
1723 // correctly, the IRELATIVE relocations are stored in an array which a
1724 // statically linked executable's startup code must enumerate using the
1725 // linker-defined symbols __rela?_iplt_{start,end}.
1726 if (!sym.isGnuIFunc() || sym.isPreemptible || ctx.arg.zIfuncNoplt)
1727 return false;
1728 // Skip unreferenced non-preemptible ifunc.
1729 if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC)))
1730 return true;
1732 sym.isInIplt = true;
1734 // Create an Iplt and the associated IRELATIVE relocation pointing to the
1735 // original section/value pairs. For non-GOT non-PLT relocation case below, we
1736 // may alter section/value, so create a copy of the symbol to make
1737 // section/value fixed.
1739 // Prior to Android V, there was a bug that caused RELR relocations to be
1740 // applied after packed relocations. This meant that resolvers referenced by
1741 // IRELATIVE relocations in the packed relocation section would read
1742 // unrelocated globals with RELR relocations when
1743 // --pack-relative-relocs=android+relr is enabled. Work around this by placing
1744 // IRELATIVE in .rela.plt.
1745 auto *directSym = makeDefined(cast<Defined>(sym));
1746 directSym->allocateAux(ctx);
1747 auto &dyn =
1748 ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt : *ctx.mainPart->relaDyn;
1749 addPltEntry(ctx, *ctx.in.iplt, *ctx.in.igotPlt, dyn, ctx.target->iRelativeRel,
1750 *directSym);
1751 sym.allocateAux(ctx);
1752 ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx;
1754 if (flags & HAS_DIRECT_RELOC) {
1755 // Change the value to the IPLT and redirect all references to it.
1756 auto &d = cast<Defined>(sym);
1757 d.section = ctx.in.iplt.get();
1758 d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize;
1759 d.size = 0;
1760 // It's important to set the symbol type here so that dynamic loaders
1761 // don't try to call the PLT as if it were an ifunc resolver.
1762 d.type = STT_FUNC;
1764 if (flags & NEEDS_GOT)
1765 addGotEntry(ctx, sym);
1766 } else if (flags & NEEDS_GOT) {
1767 // Redirect GOT accesses to point to the Igot.
1768 sym.gotInIgot = true;
1770 return true;
1773 void elf::postScanRelocations(Ctx &ctx) {
1774 auto fn = [&](Symbol &sym) {
1775 auto flags = sym.flags.load(std::memory_order_relaxed);
1776 if (handleNonPreemptibleIfunc(ctx, sym, flags))
1777 return;
1779 if (sym.isTagged() && sym.isDefined())
1780 ctx.mainPart->memtagGlobalDescriptors->addSymbol(sym);
1782 if (!sym.needsDynReloc())
1783 return;
1784 sym.allocateAux(ctx);
1786 if (flags & NEEDS_GOT)
1787 addGotEntry(ctx, sym);
1788 if (flags & NEEDS_PLT)
1789 addPltEntry(ctx, *ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt,
1790 ctx.target->pltRel, sym);
1791 if (flags & NEEDS_COPY) {
1792 if (sym.isObject()) {
1793 invokeELFT(addCopyRelSymbol, ctx, cast<SharedSymbol>(sym));
1794 // NEEDS_COPY is cleared for sym and its aliases so that in
1795 // later iterations aliases won't cause redundant copies.
1796 assert(!sym.hasFlag(NEEDS_COPY));
1797 } else {
1798 assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT));
1799 if (!sym.isDefined()) {
1800 replaceWithDefined(ctx, sym, *ctx.in.plt,
1801 ctx.target->pltHeaderSize +
1802 ctx.target->pltEntrySize * sym.getPltIdx(ctx),
1804 sym.setFlags(NEEDS_COPY);
1805 if (ctx.arg.emachine == EM_PPC) {
1806 // PPC32 canonical PLT entries are at the beginning of .glink
1807 cast<Defined>(sym).value = ctx.in.plt->headerSize;
1808 ctx.in.plt->headerSize += 16;
1809 cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts.push_back(&sym);
1815 if (!sym.isTls())
1816 return;
1817 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared;
1818 GotSection *got = ctx.in.got.get();
1820 if (flags & NEEDS_TLSDESC) {
1821 got->addTlsDescEntry(sym);
1822 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
1823 ctx.target->tlsDescRel, *got, got->getTlsDescOffset(sym), sym,
1824 ctx.target->tlsDescRel);
1826 if (flags & NEEDS_TLSGD) {
1827 got->addDynTlsEntry(sym);
1828 uint64_t off = got->getGlobalDynOffset(sym);
1829 if (isLocalInExecutable)
1830 // Write one to the GOT slot.
1831 got->addConstant({R_ADDEND, ctx.target->symbolicRel, off, 1, &sym});
1832 else
1833 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsModuleIndexRel,
1834 *got, off, sym);
1836 // If the symbol is preemptible we need the dynamic linker to write
1837 // the offset too.
1838 uint64_t offsetOff = off + ctx.arg.wordsize;
1839 if (sym.isPreemptible)
1840 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsOffsetRel, *got,
1841 offsetOff, sym);
1842 else
1843 got->addConstant({R_ABS, ctx.target->tlsOffsetRel, offsetOff, 0, &sym});
1845 if (flags & NEEDS_TLSGD_TO_IE) {
1846 got->addEntry(sym);
1847 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsGotRel, *got,
1848 sym.getGotOffset(ctx), sym);
1850 if (flags & NEEDS_GOT_DTPREL) {
1851 got->addEntry(sym);
1852 got->addConstant(
1853 {R_ABS, ctx.target->tlsOffsetRel, sym.getGotOffset(ctx), 0, &sym});
1856 if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE))
1857 addTpOffsetGotEntry(ctx, sym);
1860 GotSection *got = ctx.in.got.get();
1861 if (ctx.needsTlsLd.load(std::memory_order_relaxed) && got->addTlsIndex()) {
1862 static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0);
1863 if (ctx.arg.shared)
1864 ctx.mainPart->relaDyn->addReloc(
1865 {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()});
1866 else
1867 got->addConstant({R_ADDEND, ctx.target->symbolicRel,
1868 got->getTlsIndexOff(), 1, &dummy});
1871 assert(ctx.symAux.size() == 1);
1872 for (Symbol *sym : ctx.symtab->getSymbols())
1873 fn(*sym);
1875 // Local symbols may need the aforementioned non-preemptible ifunc and GOT
1876 // handling. They don't need regular PLT.
1877 for (ELFFileBase *file : ctx.objectFiles)
1878 for (Symbol *sym : file->getLocalSymbols())
1879 fn(*sym);
1882 static bool mergeCmp(const InputSection *a, const InputSection *b) {
1883 // std::merge requires a strict weak ordering.
1884 if (a->outSecOff < b->outSecOff)
1885 return true;
1887 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection.
1888 if (a->outSecOff == b->outSecOff && a != b) {
1889 auto *ta = dyn_cast<ThunkSection>(a);
1890 auto *tb = dyn_cast<ThunkSection>(b);
1892 // Check if Thunk is immediately before any specific Target
1893 // InputSection for example Mips LA25 Thunks.
1894 if (ta && ta->getTargetInputSection() == b)
1895 return true;
1897 // Place Thunk Sections without specific targets before
1898 // non-Thunk Sections.
1899 if (ta && !tb && !ta->getTargetInputSection())
1900 return true;
1903 return false;
1906 // Call Fn on every executable InputSection accessed via the linker script
1907 // InputSectionDescription::Sections.
1908 static void forEachInputSectionDescription(
1909 ArrayRef<OutputSection *> outputSections,
1910 llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) {
1911 for (OutputSection *os : outputSections) {
1912 if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR))
1913 continue;
1914 for (SectionCommand *bc : os->commands)
1915 if (auto *isd = dyn_cast<InputSectionDescription>(bc))
1916 fn(os, isd);
1920 ThunkCreator::ThunkCreator(Ctx &ctx) : ctx(ctx) {}
1922 ThunkCreator::~ThunkCreator() {}
1924 // Thunk Implementation
1926 // Thunks (sometimes called stubs, veneers or branch islands) are small pieces
1927 // of code that the linker inserts inbetween a caller and a callee. The thunks
1928 // are added at link time rather than compile time as the decision on whether
1929 // a thunk is needed, such as the caller and callee being out of range, can only
1930 // be made at link time.
1932 // It is straightforward to tell given the current state of the program when a
1933 // thunk is needed for a particular call. The more difficult part is that
1934 // the thunk needs to be placed in the program such that the caller can reach
1935 // the thunk and the thunk can reach the callee; furthermore, adding thunks to
1936 // the program alters addresses, which can mean more thunks etc.
1938 // In lld we have a synthetic ThunkSection that can hold many Thunks.
1939 // The decision to have a ThunkSection act as a container means that we can
1940 // more easily handle the most common case of a single block of contiguous
1941 // Thunks by inserting just a single ThunkSection.
1943 // The implementation of Thunks in lld is split across these areas
1944 // Relocations.cpp : Framework for creating and placing thunks
1945 // Thunks.cpp : The code generated for each supported thunk
1946 // Target.cpp : Target specific hooks that the framework uses to decide when
1947 // a thunk is used
1948 // Synthetic.cpp : Implementation of ThunkSection
1949 // Writer.cpp : Iteratively call framework until no more Thunks added
1951 // Thunk placement requirements:
1952 // Mips LA25 thunks. These must be placed immediately before the callee section
1953 // We can assume that the caller is in range of the Thunk. These are modelled
1954 // by Thunks that return the section they must precede with
1955 // getTargetInputSection().
1957 // ARM interworking and range extension thunks. These thunks must be placed
1958 // within range of the caller. All implemented ARM thunks can always reach the
1959 // callee as they use an indirect jump via a register that has no range
1960 // restrictions.
1962 // Thunk placement algorithm:
1963 // For Mips LA25 ThunkSections; the placement is explicit, it has to be before
1964 // getTargetInputSection().
1966 // For thunks that must be placed within range of the caller there are many
1967 // possible choices given that the maximum range from the caller is usually
1968 // much larger than the average InputSection size. Desirable properties include:
1969 // - Maximize reuse of thunks by multiple callers
1970 // - Minimize number of ThunkSections to simplify insertion
1971 // - Handle impact of already added Thunks on addresses
1972 // - Simple to understand and implement
1974 // In lld for the first pass, we pre-create one or more ThunkSections per
1975 // InputSectionDescription at Target specific intervals. A ThunkSection is
1976 // placed so that the estimated end of the ThunkSection is within range of the
1977 // start of the InputSectionDescription or the previous ThunkSection. For
1978 // example:
1979 // InputSectionDescription
1980 // Section 0
1981 // ...
1982 // Section N
1983 // ThunkSection 0
1984 // Section N + 1
1985 // ...
1986 // Section N + K
1987 // Thunk Section 1
1989 // The intention is that we can add a Thunk to a ThunkSection that is well
1990 // spaced enough to service a number of callers without having to do a lot
1991 // of work. An important principle is that it is not an error if a Thunk cannot
1992 // be placed in a pre-created ThunkSection; when this happens we create a new
1993 // ThunkSection placed next to the caller. This allows us to handle the vast
1994 // majority of thunks simply, but also handle rare cases where the branch range
1995 // is smaller than the target specific spacing.
1997 // The algorithm is expected to create all the thunks that are needed in a
1998 // single pass, with a small number of programs needing a second pass due to
1999 // the insertion of thunks in the first pass increasing the offset between
2000 // callers and callees that were only just in range.
2002 // A consequence of allowing new ThunkSections to be created outside of the
2003 // pre-created ThunkSections is that in rare cases calls to Thunks that were in
2004 // range in pass K, are out of range in some pass > K due to the insertion of
2005 // more Thunks in between the caller and callee. When this happens we retarget
2006 // the relocation back to the original target and create another Thunk.
2008 // Remove ThunkSections that are empty, this should only be the initial set
2009 // precreated on pass 0.
2011 // Insert the Thunks for OutputSection OS into their designated place
2012 // in the Sections vector, and recalculate the InputSection output section
2013 // offsets.
2014 // This may invalidate any output section offsets stored outside of InputSection
2015 void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) {
2016 forEachInputSectionDescription(
2017 outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
2018 if (isd->thunkSections.empty())
2019 return;
2021 // Remove any zero sized precreated Thunks.
2022 llvm::erase_if(isd->thunkSections,
2023 [](const std::pair<ThunkSection *, uint32_t> &ts) {
2024 return ts.first->getSize() == 0;
2027 // ISD->ThunkSections contains all created ThunkSections, including
2028 // those inserted in previous passes. Extract the Thunks created this
2029 // pass and order them in ascending outSecOff.
2030 std::vector<ThunkSection *> newThunks;
2031 for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections)
2032 if (ts.second == pass)
2033 newThunks.push_back(ts.first);
2034 llvm::stable_sort(newThunks,
2035 [](const ThunkSection *a, const ThunkSection *b) {
2036 return a->outSecOff < b->outSecOff;
2039 // Merge sorted vectors of Thunks and InputSections by outSecOff
2040 SmallVector<InputSection *, 0> tmp;
2041 tmp.reserve(isd->sections.size() + newThunks.size());
2043 std::merge(isd->sections.begin(), isd->sections.end(),
2044 newThunks.begin(), newThunks.end(), std::back_inserter(tmp),
2045 mergeCmp);
2047 isd->sections = std::move(tmp);
2051 static int64_t getPCBias(Ctx &ctx, RelType type) {
2052 if (ctx.arg.emachine != EM_ARM)
2053 return 0;
2054 switch (type) {
2055 case R_ARM_THM_JUMP19:
2056 case R_ARM_THM_JUMP24:
2057 case R_ARM_THM_CALL:
2058 return 4;
2059 default:
2060 return 8;
2064 // Find or create a ThunkSection within the InputSectionDescription (ISD) that
2065 // is in range of Src. An ISD maps to a range of InputSections described by a
2066 // linker script section pattern such as { .text .text.* }.
2067 ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os,
2068 InputSection *isec,
2069 InputSectionDescription *isd,
2070 const Relocation &rel,
2071 uint64_t src) {
2072 // See the comment in getThunk for -pcBias below.
2073 const int64_t pcBias = getPCBias(ctx, rel.type);
2074 for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) {
2075 ThunkSection *ts = tp.first;
2076 uint64_t tsBase = os->addr + ts->outSecOff - pcBias;
2077 uint64_t tsLimit = tsBase + ts->getSize();
2078 if (ctx.target->inBranchRange(rel.type, src,
2079 (src > tsLimit) ? tsBase : tsLimit))
2080 return ts;
2083 // No suitable ThunkSection exists. This can happen when there is a branch
2084 // with lower range than the ThunkSection spacing or when there are too
2085 // many Thunks. Create a new ThunkSection as close to the InputSection as
2086 // possible. Error if InputSection is so large we cannot place ThunkSection
2087 // anywhere in Range.
2088 uint64_t thunkSecOff = isec->outSecOff;
2089 if (!ctx.target->inBranchRange(rel.type, src,
2090 os->addr + thunkSecOff + rel.addend)) {
2091 thunkSecOff = isec->outSecOff + isec->getSize();
2092 if (!ctx.target->inBranchRange(rel.type, src,
2093 os->addr + thunkSecOff + rel.addend))
2094 Fatal(ctx) << "InputSection too large for range extension thunk "
2095 << isec->getObjMsg(src - (os->addr << isec->outSecOff));
2097 return addThunkSection(os, isd, thunkSecOff);
2100 // Add a Thunk that needs to be placed in a ThunkSection that immediately
2101 // precedes its Target.
2102 ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) {
2103 ThunkSection *ts = thunkedSections.lookup(isec);
2104 if (ts)
2105 return ts;
2107 // Find InputSectionRange within Target Output Section (TOS) that the
2108 // InputSection (IS) that we need to precede is in.
2109 OutputSection *tos = isec->getParent();
2110 for (SectionCommand *bc : tos->commands) {
2111 auto *isd = dyn_cast<InputSectionDescription>(bc);
2112 if (!isd || isd->sections.empty())
2113 continue;
2115 InputSection *first = isd->sections.front();
2116 InputSection *last = isd->sections.back();
2118 if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff)
2119 continue;
2121 ts = addThunkSection(tos, isd, isec->outSecOff);
2122 thunkedSections[isec] = ts;
2123 return ts;
2126 return nullptr;
2129 // Create one or more ThunkSections per OS that can be used to place Thunks.
2130 // We attempt to place the ThunkSections using the following desirable
2131 // properties:
2132 // - Within range of the maximum number of callers
2133 // - Minimise the number of ThunkSections
2135 // We follow a simple but conservative heuristic to place ThunkSections at
2136 // offsets that are multiples of a Target specific branch range.
2137 // For an InputSectionDescription that is smaller than the range, a single
2138 // ThunkSection at the end of the range will do.
2140 // For an InputSectionDescription that is more than twice the size of the range,
2141 // we place the last ThunkSection at range bytes from the end of the
2142 // InputSectionDescription in order to increase the likelihood that the
2143 // distance from a thunk to its target will be sufficiently small to
2144 // allow for the creation of a short thunk.
2145 void ThunkCreator::createInitialThunkSections(
2146 ArrayRef<OutputSection *> outputSections) {
2147 uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing();
2148 forEachInputSectionDescription(
2149 outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
2150 if (isd->sections.empty())
2151 return;
2153 uint32_t isdBegin = isd->sections.front()->outSecOff;
2154 uint32_t isdEnd =
2155 isd->sections.back()->outSecOff + isd->sections.back()->getSize();
2156 uint32_t lastThunkLowerBound = -1;
2157 if (isdEnd - isdBegin > thunkSectionSpacing * 2)
2158 lastThunkLowerBound = isdEnd - thunkSectionSpacing;
2160 uint32_t isecLimit;
2161 uint32_t prevIsecLimit = isdBegin;
2162 uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing;
2164 for (const InputSection *isec : isd->sections) {
2165 isecLimit = isec->outSecOff + isec->getSize();
2166 if (isecLimit > thunkUpperBound) {
2167 addThunkSection(os, isd, prevIsecLimit);
2168 thunkUpperBound = prevIsecLimit + thunkSectionSpacing;
2170 if (isecLimit > lastThunkLowerBound)
2171 break;
2172 prevIsecLimit = isecLimit;
2174 addThunkSection(os, isd, isecLimit);
2178 ThunkSection *ThunkCreator::addThunkSection(OutputSection *os,
2179 InputSectionDescription *isd,
2180 uint64_t off) {
2181 auto *ts = make<ThunkSection>(ctx, os, off);
2182 ts->partition = os->partition;
2183 if ((ctx.arg.fixCortexA53Errata843419 || ctx.arg.fixCortexA8) &&
2184 !isd->sections.empty()) {
2185 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add
2186 // thunks we disturb the base addresses of sections placed after the thunks
2187 // this makes patches we have generated redundant, and may cause us to
2188 // generate more patches as different instructions are now in sensitive
2189 // locations. When we generate more patches we may force more branches to
2190 // go out of range, causing more thunks to be generated. In pathological
2191 // cases this can cause the address dependent content pass not to converge.
2192 // We fix this by rounding up the size of the ThunkSection to 4KiB, this
2193 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB,
2194 // which means that adding Thunks to the section does not invalidate
2195 // errata patches for following code.
2196 // Rounding up the size to 4KiB has consequences for code-size and can
2197 // trip up linker script defined assertions. For example the linux kernel
2198 // has an assertion that what LLD represents as an InputSectionDescription
2199 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib.
2200 // We use the heuristic of rounding up the size when both of the following
2201 // conditions are true:
2202 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This
2203 // accounts for the case where no single InputSectionDescription is
2204 // larger than the OutputSection size. This is conservative but simple.
2205 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent
2206 // any assertion failures that an InputSectionDescription is < 4 KiB
2207 // in size.
2208 uint64_t isdSize = isd->sections.back()->outSecOff +
2209 isd->sections.back()->getSize() -
2210 isd->sections.front()->outSecOff;
2211 if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096)
2212 ts->roundUpSizeForErrata = true;
2214 isd->thunkSections.push_back({ts, pass});
2215 return ts;
2218 static bool isThunkSectionCompatible(InputSection *source,
2219 SectionBase *target) {
2220 // We can't reuse thunks in different loadable partitions because they might
2221 // not be loaded. But partition 1 (the main partition) will always be loaded.
2222 if (source->partition != target->partition)
2223 return target->partition == 1;
2224 return true;
2227 std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
2228 Relocation &rel, uint64_t src) {
2229 SmallVector<std::unique_ptr<Thunk>, 0> *thunkVec = nullptr;
2230 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled
2231 // out in the relocation addend. We compensate for the PC bias so that
2232 // an Arm and Thumb relocation to the same destination get the same keyAddend,
2233 // which is usually 0.
2234 const int64_t pcBias = getPCBias(ctx, rel.type);
2235 const int64_t keyAddend = rel.addend + pcBias;
2237 // We use a ((section, offset), addend) pair to find the thunk position if
2238 // possible so that we create only one thunk for aliased symbols or ICFed
2239 // sections. There may be multiple relocations sharing the same (section,
2240 // offset + addend) pair. We may revert the relocation back to its original
2241 // non-Thunk target, so we cannot fold offset + addend.
2242 if (auto *d = dyn_cast<Defined>(rel.sym))
2243 if (!d->isInPlt(ctx) && d->section)
2244 thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value},
2245 keyAddend}];
2246 if (!thunkVec)
2247 thunkVec = &thunkedSymbols[{rel.sym, keyAddend}];
2249 // Check existing Thunks for Sym to see if they can be reused
2250 for (auto &t : *thunkVec)
2251 if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) &&
2252 t->isCompatibleWith(*isec, rel) &&
2253 ctx.target->inBranchRange(rel.type, src,
2254 t->getThunkTargetSym()->getVA(ctx, -pcBias)))
2255 return std::make_pair(t.get(), false);
2257 // No existing compatible Thunk in range, create a new one
2258 thunkVec->push_back(addThunk(ctx, *isec, rel));
2259 return std::make_pair(thunkVec->back().get(), true);
2262 std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d,
2263 int64_t a) {
2264 auto [it, isNew] = landingPadsBySectionAndAddend.try_emplace(
2265 {{d.section, d.value}, a}, nullptr);
2266 if (isNew)
2267 it->second = addLandingPadThunk(ctx, d, a);
2268 return {it->second.get(), isNew};
2271 // Return true if the relocation target is an in range Thunk.
2272 // Return false if the relocation is not to a Thunk. If the relocation target
2273 // was originally to a Thunk, but is no longer in range we revert the
2274 // relocation back to its original non-Thunk target.
2275 bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
2276 if (Thunk *t = thunks.lookup(rel.sym)) {
2277 if (ctx.target->inBranchRange(rel.type, src,
2278 rel.sym->getVA(ctx, rel.addend)))
2279 return true;
2280 rel.sym = &t->destination;
2281 rel.addend = t->addend;
2282 if (rel.sym->isInPlt(ctx))
2283 rel.expr = toPlt(rel.expr);
2285 return false;
2288 // When indirect branches are restricted, such as AArch64 BTI Thunks may need
2289 // to target a linker generated landing pad instead of the target. This needs
2290 // to be done once per pass as the need for a BTI thunk is dependent whether
2291 // a thunk is short or long. We iterate over all the thunks to make sure we
2292 // catch thunks that have been created but are no longer live. Non-live thunks
2293 // are not reachable via normalizeExistingThunk() but are still written.
2294 bool ThunkCreator::addSyntheticLandingPads() {
2295 bool addressesChanged = false;
2296 for (Thunk *t : allThunks) {
2297 if (!t->needsSyntheticLandingPad())
2298 continue;
2299 Thunk *lpt;
2300 bool isNew;
2301 auto &dr = cast<Defined>(t->destination);
2302 std::tie(lpt, isNew) = getSyntheticLandingPad(dr, t->addend);
2303 if (isNew) {
2304 addressesChanged = true;
2305 getISThunkSec(cast<InputSection>(dr.section))->addThunk(lpt);
2307 t->landingPad = lpt->getThunkTargetSym();
2309 return addressesChanged;
2312 // Process all relocations from the InputSections that have been assigned
2313 // to InputSectionDescriptions and redirect through Thunks if needed. The
2314 // function should be called iteratively until it returns false.
2316 // PreConditions:
2317 // All InputSections that may need a Thunk are reachable from
2318 // OutputSectionCommands.
2320 // All OutputSections have an address and all InputSections have an offset
2321 // within the OutputSection.
2323 // The offsets between caller (relocation place) and callee
2324 // (relocation target) will not be modified outside of createThunks().
2326 // PostConditions:
2327 // If return value is true then ThunkSections have been inserted into
2328 // OutputSections. All relocations that needed a Thunk based on the information
2329 // available to createThunks() on entry have been redirected to a Thunk. Note
2330 // that adding Thunks changes offsets between caller and callee so more Thunks
2331 // may be required.
2333 // If return value is false then no more Thunks are needed, and createThunks has
2334 // made no changes. If the target requires range extension thunks, currently
2335 // ARM, then any future change in offset between caller and callee risks a
2336 // relocation out of range error.
2337 bool ThunkCreator::createThunks(uint32_t pass,
2338 ArrayRef<OutputSection *> outputSections) {
2339 this->pass = pass;
2340 bool addressesChanged = false;
2342 if (pass == 0 && ctx.target->getThunkSectionSpacing())
2343 createInitialThunkSections(outputSections);
2345 if (ctx.arg.emachine == EM_AARCH64)
2346 addressesChanged = addSyntheticLandingPads();
2348 // Create all the Thunks and insert them into synthetic ThunkSections. The
2349 // ThunkSections are later inserted back into InputSectionDescriptions.
2350 // We separate the creation of ThunkSections from the insertion of the
2351 // ThunkSections as ThunkSections are not always inserted into the same
2352 // InputSectionDescription as the caller.
2353 forEachInputSectionDescription(
2354 outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
2355 for (InputSection *isec : isd->sections)
2356 for (Relocation &rel : isec->relocs()) {
2357 uint64_t src = isec->getVA(rel.offset);
2359 // If we are a relocation to an existing Thunk, check if it is
2360 // still in range. If not then Rel will be altered to point to its
2361 // original target so another Thunk can be generated.
2362 if (pass > 0 && normalizeExistingThunk(rel, src))
2363 continue;
2365 if (!ctx.target->needsThunk(rel.expr, rel.type, isec->file, src,
2366 *rel.sym, rel.addend))
2367 continue;
2369 Thunk *t;
2370 bool isNew;
2371 std::tie(t, isNew) = getThunk(isec, rel, src);
2373 if (isNew) {
2374 // Find or create a ThunkSection for the new Thunk
2375 ThunkSection *ts;
2376 if (auto *tis = t->getTargetInputSection())
2377 ts = getISThunkSec(tis);
2378 else
2379 ts = getISDThunkSec(os, isec, isd, rel, src);
2380 ts->addThunk(t);
2381 thunks[t->getThunkTargetSym()] = t;
2382 allThunks.push_back(t);
2385 // Redirect relocation to Thunk, we never go via the PLT to a Thunk
2386 rel.sym = t->getThunkTargetSym();
2387 rel.expr = fromPlt(rel.expr);
2389 // On AArch64 and PPC, a jump/call relocation may be encoded as
2390 // STT_SECTION + non-zero addend, clear the addend after
2391 // redirection.
2392 if (ctx.arg.emachine != EM_MIPS)
2393 rel.addend = -getPCBias(ctx, rel.type);
2396 for (auto &p : isd->thunkSections)
2397 addressesChanged |= p.first->assignOffsets();
2400 for (auto &p : thunkedSections)
2401 addressesChanged |= p.second->assignOffsets();
2403 // Merge all created synthetic ThunkSections back into OutputSection
2404 mergeThunks(outputSections);
2405 return addressesChanged;
2408 // The following aid in the conversion of call x@GDPLT to call __tls_get_addr
2409 // hexagonNeedsTLSSymbol scans for relocations would require a call to
2410 // __tls_get_addr.
2411 // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr.
2412 bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) {
2413 bool needTlsSymbol = false;
2414 forEachInputSectionDescription(
2415 outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
2416 for (InputSection *isec : isd->sections)
2417 for (Relocation &rel : isec->relocs())
2418 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2419 needTlsSymbol = true;
2420 return;
2423 return needTlsSymbol;
2426 void elf::hexagonTLSSymbolUpdate(Ctx &ctx) {
2427 Symbol *sym = ctx.symtab->find("__tls_get_addr");
2428 if (!sym)
2429 return;
2430 bool needEntry = true;
2431 forEachInputSectionDescription(
2432 ctx.outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
2433 for (InputSection *isec : isd->sections)
2434 for (Relocation &rel : isec->relocs())
2435 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
2436 if (needEntry) {
2437 sym->allocateAux(ctx);
2438 addPltEntry(ctx, *ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt,
2439 ctx.target->pltRel, *sym);
2440 needEntry = false;
2442 rel.sym = sym;
2447 static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) {
2448 if (cmd.toFirst)
2449 return cmd.outputSections[0] == osec;
2450 return llvm::is_contained(cmd.outputSections, osec);
2453 template <class ELFT, class Rels>
2454 static void scanCrossRefs(Ctx &ctx, const NoCrossRefCommand &cmd,
2455 OutputSection *osec, InputSection *sec, Rels rels) {
2456 for (const auto &r : rels) {
2457 Symbol &sym = sec->file->getSymbol(r.getSymbol(ctx.arg.isMips64EL));
2458 // A legal cross-reference is when the destination output section is
2459 // nullptr, osec for a self-reference, or a section that is described by the
2460 // NOCROSSREFS/NOCROSSREFS_TO command.
2461 auto *dstOsec = sym.getOutputSection();
2462 if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, dstOsec->name))
2463 continue;
2465 std::string toSymName;
2466 if (!sym.isSection())
2467 toSymName = toStr(ctx, sym);
2468 else if (auto *d = dyn_cast<Defined>(&sym))
2469 toSymName = d->section->name;
2470 Err(ctx) << sec->getLocation(r.r_offset)
2471 << ": prohibited cross reference from '" << osec->name << "' to '"
2472 << toSymName << "' in '" << dstOsec->name << "'";
2476 // For each output section described by at least one NOCROSSREFS(_TO) command,
2477 // scan relocations from its input sections for prohibited cross references.
2478 template <class ELFT> void elf::checkNoCrossRefs(Ctx &ctx) {
2479 for (OutputSection *osec : ctx.outputSections) {
2480 for (const NoCrossRefCommand &noxref : ctx.script->noCrossRefs) {
2481 if (!llvm::is_contained(noxref.outputSections, osec->name) ||
2482 (noxref.toFirst && noxref.outputSections[0] == osec->name))
2483 continue;
2484 for (SectionCommand *cmd : osec->commands) {
2485 auto *isd = dyn_cast<InputSectionDescription>(cmd);
2486 if (!isd)
2487 continue;
2488 parallelForEach(isd->sections, [&](InputSection *sec) {
2489 invokeOnRelocs(*sec, scanCrossRefs<ELFT>, ctx, noxref, osec, sec);
2496 template void elf::scanRelocations<ELF32LE>(Ctx &);
2497 template void elf::scanRelocations<ELF32BE>(Ctx &);
2498 template void elf::scanRelocations<ELF64LE>(Ctx &);
2499 template void elf::scanRelocations<ELF64BE>(Ctx &);
2501 template void elf::checkNoCrossRefs<ELF32LE>(Ctx &);
2502 template void elf::checkNoCrossRefs<ELF32BE>(Ctx &);
2503 template void elf::checkNoCrossRefs<ELF64LE>(Ctx &);
2504 template void elf::checkNoCrossRefs<ELF64BE>(Ctx &);