[mlir][py] Enable loading only specified dialects during creation. (#121421)
[llvm-project.git] / lld / ELF / InputSection.cpp
blobefa7ba3e7cb06386b9cf809a46f00eb34b492d98
1 //===- InputSection.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "InputSection.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "OutputSections.h"
13 #include "Relocations.h"
14 #include "SymbolTable.h"
15 #include "Symbols.h"
16 #include "SyntheticSections.h"
17 #include "Target.h"
18 #include "lld/Common/CommonLinkerContext.h"
19 #include "lld/Common/DWARF.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/Compression.h"
22 #include "llvm/Support/Endian.h"
23 #include "llvm/Support/xxhash.h"
24 #include <algorithm>
25 #include <mutex>
26 #include <optional>
27 #include <vector>
29 using namespace llvm;
30 using namespace llvm::ELF;
31 using namespace llvm::object;
32 using namespace llvm::support;
33 using namespace llvm::support::endian;
34 using namespace llvm::sys;
35 using namespace lld;
36 using namespace lld::elf;
38 // Returns a string to construct an error message.
39 std::string elf::toStr(Ctx &ctx, const InputSectionBase *sec) {
40 return (toStr(ctx, sec->file) + ":(" + sec->name + ")").str();
43 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
44 const InputSectionBase *sec) {
45 return s << toStr(s.ctx, sec);
48 template <class ELFT>
49 static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &file,
50 const typename ELFT::Shdr &hdr) {
51 if (hdr.sh_type == SHT_NOBITS)
52 return ArrayRef<uint8_t>(nullptr, hdr.sh_size);
53 return check(file.getObj().getSectionContents(hdr));
56 InputSectionBase::InputSectionBase(InputFile *file, StringRef name,
57 uint32_t type, uint64_t flags, uint32_t link,
58 uint32_t info, uint32_t addralign,
59 uint32_t entsize, ArrayRef<uint8_t> data,
60 Kind sectionKind)
61 : SectionBase(sectionKind, file, name, type, flags, link, info, addralign,
62 entsize),
63 bss(0), decodedCrel(0), keepUnique(0), nopFiller(0),
64 content_(data.data()), size(data.size()) {
65 // In order to reduce memory allocation, we assume that mergeable
66 // sections are smaller than 4 GiB, which is not an unreasonable
67 // assumption as of 2017.
68 if (sectionKind == SectionBase::Merge && content().size() > UINT32_MAX)
69 ErrAlways(getCtx()) << this << ": section too large";
71 // The ELF spec states that a value of 0 means the section has
72 // no alignment constraints.
73 uint32_t v = std::max<uint32_t>(addralign, 1);
74 if (!isPowerOf2_64(v))
75 Fatal(getCtx()) << this << ": sh_addralign is not a power of 2";
76 this->addralign = v;
78 // If SHF_COMPRESSED is set, parse the header. The legacy .zdebug format is no
79 // longer supported.
80 if (flags & SHF_COMPRESSED) {
81 Ctx &ctx = file->ctx;
82 invokeELFT(parseCompressedHeader, ctx);
86 // SHF_INFO_LINK and SHF_GROUP are normally resolved and not copied to the
87 // output section. However, for relocatable linking without
88 // --force-group-allocation, the SHF_GROUP flag and section groups are retained.
89 static uint64_t getFlags(Ctx &ctx, uint64_t flags) {
90 flags &= ~(uint64_t)SHF_INFO_LINK;
91 if (ctx.arg.resolveGroups)
92 flags &= ~(uint64_t)SHF_GROUP;
93 return flags;
96 template <class ELFT>
97 InputSectionBase::InputSectionBase(ObjFile<ELFT> &file,
98 const typename ELFT::Shdr &hdr,
99 StringRef name, Kind sectionKind)
100 : InputSectionBase(&file, name, hdr.sh_type,
101 getFlags(file.ctx, hdr.sh_flags), hdr.sh_link,
102 hdr.sh_info, hdr.sh_addralign, hdr.sh_entsize,
103 getSectionContents(file, hdr), sectionKind) {
104 // We reject object files having insanely large alignments even though
105 // they are allowed by the spec. I think 4GB is a reasonable limitation.
106 // We might want to relax this in the future.
107 if (hdr.sh_addralign > UINT32_MAX)
108 Fatal(getCtx()) << &file << ": section sh_addralign is too large";
111 size_t InputSectionBase::getSize() const {
112 if (auto *s = dyn_cast<SyntheticSection>(this))
113 return s->getSize();
114 return size - bytesDropped;
117 template <class ELFT>
118 static void decompressAux(Ctx &ctx, const InputSectionBase &sec, uint8_t *out,
119 size_t size) {
120 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(sec.content_);
121 auto compressed = ArrayRef<uint8_t>(sec.content_, sec.compressedSize)
122 .slice(sizeof(typename ELFT::Chdr));
123 if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
124 ? compression::zlib::decompress(compressed, out, size)
125 : compression::zstd::decompress(compressed, out, size))
126 Fatal(ctx) << &sec << ": decompress failed: " << std::move(e);
129 void InputSectionBase::decompress() const {
130 Ctx &ctx = getCtx();
131 uint8_t *buf = makeThreadLocalN<uint8_t>(size);
132 invokeELFT(decompressAux, ctx, *this, buf, size);
133 content_ = buf;
134 compressed = false;
137 template <class ELFT>
138 RelsOrRelas<ELFT> InputSectionBase::relsOrRelas(bool supportsCrel) const {
139 if (relSecIdx == 0)
140 return {};
141 RelsOrRelas<ELFT> ret;
142 auto *f = cast<ObjFile<ELFT>>(file);
143 typename ELFT::Shdr shdr = f->template getELFShdrs<ELFT>()[relSecIdx];
144 if (shdr.sh_type == SHT_CREL) {
145 // Return an iterator if supported by caller.
146 if (supportsCrel) {
147 ret.crels = Relocs<typename ELFT::Crel>(
148 (const uint8_t *)f->mb.getBufferStart() + shdr.sh_offset);
149 return ret;
151 InputSectionBase *const &relSec = f->getSections()[relSecIdx];
152 // Otherwise, allocate a buffer to hold the decoded RELA relocations. When
153 // called for the first time, relSec is null (without --emit-relocs) or an
154 // InputSection with false decodedCrel.
155 if (!relSec || !cast<InputSection>(relSec)->decodedCrel) {
156 auto *sec = makeThreadLocal<InputSection>(*f, shdr, name);
157 f->cacheDecodedCrel(relSecIdx, sec);
158 sec->type = SHT_RELA;
159 sec->decodedCrel = true;
161 RelocsCrel<ELFT::Is64Bits> entries(sec->content_);
162 sec->size = entries.size() * sizeof(typename ELFT::Rela);
163 auto *relas = makeThreadLocalN<typename ELFT::Rela>(entries.size());
164 sec->content_ = reinterpret_cast<uint8_t *>(relas);
165 for (auto [i, r] : llvm::enumerate(entries)) {
166 relas[i].r_offset = r.r_offset;
167 relas[i].setSymbolAndType(r.r_symidx, r.r_type, false);
168 relas[i].r_addend = r.r_addend;
171 ret.relas = {ArrayRef(
172 reinterpret_cast<const typename ELFT::Rela *>(relSec->content_),
173 relSec->size / sizeof(typename ELFT::Rela))};
174 return ret;
177 const void *content = f->mb.getBufferStart() + shdr.sh_offset;
178 size_t size = shdr.sh_size;
179 if (shdr.sh_type == SHT_REL) {
180 ret.rels = {ArrayRef(reinterpret_cast<const typename ELFT::Rel *>(content),
181 size / sizeof(typename ELFT::Rel))};
182 } else {
183 assert(shdr.sh_type == SHT_RELA);
184 ret.relas = {
185 ArrayRef(reinterpret_cast<const typename ELFT::Rela *>(content),
186 size / sizeof(typename ELFT::Rela))};
188 return ret;
191 Ctx &SectionBase::getCtx() const { return file->ctx; }
193 uint64_t SectionBase::getOffset(uint64_t offset) const {
194 switch (kind()) {
195 case Output: {
196 auto *os = cast<OutputSection>(this);
197 // For output sections we treat offset -1 as the end of the section.
198 return offset == uint64_t(-1) ? os->size : offset;
200 case Class:
201 llvm_unreachable("section classes do not have offsets");
202 case Regular:
203 case Synthetic:
204 case Spill:
205 return cast<InputSection>(this)->outSecOff + offset;
206 case EHFrame: {
207 // Two code paths may reach here. First, clang_rt.crtbegin.o and GCC
208 // crtbeginT.o may reference the start of an empty .eh_frame to identify the
209 // start of the output .eh_frame. Just return offset.
211 // Second, InputSection::copyRelocations on .eh_frame. Some pieces may be
212 // discarded due to GC/ICF. We should compute the output section offset.
213 const EhInputSection *es = cast<EhInputSection>(this);
214 if (!es->content().empty())
215 if (InputSection *isec = es->getParent())
216 return isec->outSecOff + es->getParentOffset(offset);
217 return offset;
219 case Merge:
220 const MergeInputSection *ms = cast<MergeInputSection>(this);
221 if (InputSection *isec = ms->getParent())
222 return isec->outSecOff + ms->getParentOffset(offset);
223 return ms->getParentOffset(offset);
225 llvm_unreachable("invalid section kind");
228 uint64_t SectionBase::getVA(uint64_t offset) const {
229 const OutputSection *out = getOutputSection();
230 return (out ? out->addr : 0) + getOffset(offset);
233 OutputSection *SectionBase::getOutputSection() {
234 InputSection *sec;
235 if (auto *isec = dyn_cast<InputSection>(this))
236 sec = isec;
237 else if (auto *ms = dyn_cast<MergeInputSection>(this))
238 sec = ms->getParent();
239 else if (auto *eh = dyn_cast<EhInputSection>(this))
240 sec = eh->getParent();
241 else
242 return cast<OutputSection>(this);
243 return sec ? sec->getParent() : nullptr;
246 // When a section is compressed, `rawData` consists with a header followed
247 // by zlib-compressed data. This function parses a header to initialize
248 // `uncompressedSize` member and remove the header from `rawData`.
249 template <typename ELFT>
250 void InputSectionBase::parseCompressedHeader(Ctx &ctx) {
251 flags &= ~(uint64_t)SHF_COMPRESSED;
253 // New-style header
254 if (content().size() < sizeof(typename ELFT::Chdr)) {
255 ErrAlways(ctx) << this << ": corrupted compressed section";
256 return;
259 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content().data());
260 if (hdr->ch_type == ELFCOMPRESS_ZLIB) {
261 if (!compression::zlib::isAvailable())
262 ErrAlways(ctx) << this
263 << " is compressed with ELFCOMPRESS_ZLIB, but lld is "
264 "not built with zlib support";
265 } else if (hdr->ch_type == ELFCOMPRESS_ZSTD) {
266 if (!compression::zstd::isAvailable())
267 ErrAlways(ctx) << this
268 << " is compressed with ELFCOMPRESS_ZSTD, but lld is "
269 "not built with zstd support";
270 } else {
271 ErrAlways(ctx) << this << ": unsupported compression type ("
272 << uint32_t(hdr->ch_type) << ")";
273 return;
276 compressed = true;
277 compressedSize = size;
278 size = hdr->ch_size;
279 addralign = std::max<uint32_t>(hdr->ch_addralign, 1);
282 InputSection *InputSectionBase::getLinkOrderDep() const {
283 assert(flags & SHF_LINK_ORDER);
284 if (!link)
285 return nullptr;
286 return cast<InputSection>(file->getSections()[link]);
289 // Find a symbol that encloses a given location.
290 Defined *InputSectionBase::getEnclosingSymbol(uint64_t offset,
291 uint8_t type) const {
292 if (file->isInternal())
293 return nullptr;
294 for (Symbol *b : file->getSymbols())
295 if (Defined *d = dyn_cast<Defined>(b))
296 if (d->section == this && d->value <= offset &&
297 offset < d->value + d->size && (type == 0 || type == d->type))
298 return d;
299 return nullptr;
302 // Returns an object file location string. Used to construct an error message.
303 std::string InputSectionBase::getLocation(uint64_t offset) const {
304 std::string secAndOffset =
305 (name + "+0x" + Twine::utohexstr(offset) + ")").str();
307 std::string filename = toStr(getCtx(), file);
308 if (Defined *d = getEnclosingFunction(offset))
309 return filename + ":(function " + toStr(getCtx(), *d) + ": " + secAndOffset;
311 return filename + ":(" + secAndOffset;
314 static void printFileLine(const ELFSyncStream &s, StringRef path,
315 unsigned line) {
316 StringRef filename = path::filename(path);
317 s << filename << ':' << line;
318 if (filename != path)
319 s << " (" << path << ':' << line << ')';
322 // Print an error message that looks like this:
324 // foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
325 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
326 InputSectionBase::SrcMsg &&msg) {
327 auto &sec = msg.sec;
328 if (sec.file->kind() != InputFile::ObjKind)
329 return s;
330 auto &file = cast<ELFFileBase>(*sec.file);
332 // First, look up the DWARF line table.
333 ArrayRef<InputSectionBase *> sections = file.getSections();
334 auto it = llvm::find(sections, &sec);
335 uint64_t sectionIndex = it != sections.end()
336 ? it - sections.begin()
337 : object::SectionedAddress::UndefSection;
338 DWARFCache *dwarf = file.getDwarf();
339 if (auto info = dwarf->getDILineInfo(msg.offset, sectionIndex))
340 printFileLine(s, info->FileName, info->Line);
341 else if (auto fileLine = dwarf->getVariableLoc(msg.sym.getName()))
342 // If it failed, look up again as a variable.
343 printFileLine(s, fileLine->first, fileLine->second);
344 else
345 // File.sourceFile contains STT_FILE symbol, and that is a last resort.
346 s << file.sourceFile;
347 return s;
350 // Returns a filename string along with an optional section name. This
351 // function is intended to be used for constructing an error
352 // message. The returned message looks like this:
354 // path/to/foo.o:(function bar)
356 // or
358 // path/to/foo.o:(function bar) in archive path/to/bar.a
359 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
360 InputSectionBase::ObjMsg &&msg) {
361 auto *sec = msg.sec;
362 s << sec->file->getName() << ":(";
364 // Find a symbol that encloses a given location. getObjMsg may be called
365 // before ObjFile::initSectionsAndLocalSyms where local symbols are
366 // initialized.
367 if (Defined *d = sec->getEnclosingSymbol(msg.offset))
368 s << d;
369 else
370 s << sec->name << "+0x" << Twine::utohexstr(msg.offset);
371 s << ')';
372 if (!sec->file->archiveName.empty())
373 s << (" in archive " + sec->file->archiveName).str();
374 return s;
377 PotentialSpillSection::PotentialSpillSection(const InputSectionBase &source,
378 InputSectionDescription &isd)
379 : InputSection(source.file, source.name, source.type, source.flags,
380 source.addralign, source.addralign, {}, SectionBase::Spill),
381 isd(&isd) {}
383 InputSection InputSection::discarded(nullptr, "", 0, 0, 0, 0,
384 ArrayRef<uint8_t>());
386 InputSection::InputSection(InputFile *f, StringRef name, uint32_t type,
387 uint64_t flags, uint32_t addralign, uint32_t entsize,
388 ArrayRef<uint8_t> data, Kind k)
389 : InputSectionBase(f, name, type, flags,
390 /*link=*/0, /*info=*/0, addralign, /*entsize=*/entsize,
391 data, k) {
392 assert(f || this == &InputSection::discarded);
395 template <class ELFT>
396 InputSection::InputSection(ObjFile<ELFT> &f, const typename ELFT::Shdr &header,
397 StringRef name)
398 : InputSectionBase(f, header, name, InputSectionBase::Regular) {}
400 // Copy SHT_GROUP section contents. Used only for the -r option.
401 template <class ELFT> void InputSection::copyShtGroup(uint8_t *buf) {
402 // ELFT::Word is the 32-bit integral type in the target endianness.
403 using u32 = typename ELFT::Word;
404 ArrayRef<u32> from = getDataAs<u32>();
405 auto *to = reinterpret_cast<u32 *>(buf);
407 // The first entry is not a section number but a flag.
408 *to++ = from[0];
410 // Adjust section numbers because section numbers in an input object files are
411 // different in the output. We also need to handle combined or discarded
412 // members.
413 ArrayRef<InputSectionBase *> sections = file->getSections();
414 DenseSet<uint32_t> seen;
415 for (uint32_t idx : from.slice(1)) {
416 OutputSection *osec = sections[idx]->getOutputSection();
417 if (osec && seen.insert(osec->sectionIndex).second)
418 *to++ = osec->sectionIndex;
422 InputSectionBase *InputSection::getRelocatedSection() const {
423 if (file->isInternal() || !isStaticRelSecType(type))
424 return nullptr;
425 ArrayRef<InputSectionBase *> sections = file->getSections();
426 return sections[info];
429 template <class ELFT, class RelTy>
430 void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf) {
431 if (ctx.arg.relax && !ctx.arg.relocatable &&
432 (ctx.arg.emachine == EM_RISCV || ctx.arg.emachine == EM_LOONGARCH)) {
433 // On LoongArch and RISC-V, relaxation might change relocations: copy
434 // from internal ones that are updated by relaxation.
435 InputSectionBase *sec = getRelocatedSection();
436 copyRelocations<ELFT, RelTy>(
437 ctx, buf,
438 llvm::make_range(sec->relocations.begin(), sec->relocations.end()));
439 } else {
440 // Convert the raw relocations in the input section into Relocation objects
441 // suitable to be used by copyRelocations below.
442 struct MapRel {
443 Ctx &ctx;
444 const ObjFile<ELFT> &file;
445 Relocation operator()(const RelTy &rel) const {
446 // RelExpr is not used so set to a dummy value.
447 return Relocation{R_NONE, rel.getType(ctx.arg.isMips64EL), rel.r_offset,
448 getAddend<ELFT>(rel), &file.getRelocTargetSym(rel)};
452 using RawRels = ArrayRef<RelTy>;
453 using MapRelIter =
454 llvm::mapped_iterator<typename RawRels::iterator, MapRel>;
455 auto mapRel = MapRel{ctx, *getFile<ELFT>()};
456 RawRels rawRels = getDataAs<RelTy>();
457 auto rels = llvm::make_range(MapRelIter(rawRels.begin(), mapRel),
458 MapRelIter(rawRels.end(), mapRel));
459 copyRelocations<ELFT, RelTy>(ctx, buf, rels);
463 // This is used for -r and --emit-relocs. We can't use memcpy to copy
464 // relocations because we need to update symbol table offset and section index
465 // for each relocation. So we copy relocations one by one.
466 template <class ELFT, class RelTy, class RelIt>
467 void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf,
468 llvm::iterator_range<RelIt> rels) {
469 const TargetInfo &target = *ctx.target;
470 InputSectionBase *sec = getRelocatedSection();
471 (void)sec->contentMaybeDecompress(); // uncompress if needed
473 for (const Relocation &rel : rels) {
474 RelType type = rel.type;
475 const ObjFile<ELFT> *file = getFile<ELFT>();
476 Symbol &sym = *rel.sym;
478 auto *p = reinterpret_cast<typename ELFT::Rela *>(buf);
479 buf += sizeof(RelTy);
481 if (RelTy::HasAddend)
482 p->r_addend = rel.addend;
484 // Output section VA is zero for -r, so r_offset is an offset within the
485 // section, but for --emit-relocs it is a virtual address.
486 p->r_offset = sec->getVA(rel.offset);
487 p->setSymbolAndType(ctx.in.symTab->getSymbolIndex(sym), type,
488 ctx.arg.isMips64EL);
490 if (sym.type == STT_SECTION) {
491 // We combine multiple section symbols into only one per
492 // section. This means we have to update the addend. That is
493 // trivial for Elf_Rela, but for Elf_Rel we have to write to the
494 // section data. We do that by adding to the Relocation vector.
496 // .eh_frame is horribly special and can reference discarded sections. To
497 // avoid having to parse and recreate .eh_frame, we just replace any
498 // relocation in it pointing to discarded sections with R_*_NONE, which
499 // hopefully creates a frame that is ignored at runtime. Also, don't warn
500 // on .gcc_except_table and debug sections.
502 // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc
503 auto *d = dyn_cast<Defined>(&sym);
504 if (!d) {
505 if (!isDebugSection(*sec) && sec->name != ".eh_frame" &&
506 sec->name != ".gcc_except_table" && sec->name != ".got2" &&
507 sec->name != ".toc") {
508 uint32_t secIdx = cast<Undefined>(sym).discardedSecIdx;
509 Elf_Shdr_Impl<ELFT> sec = file->template getELFShdrs<ELFT>()[secIdx];
510 Warn(ctx) << "relocation refers to a discarded section: "
511 << CHECK2(file->getObj().getSectionName(sec), file)
512 << "\n>>> referenced by " << getObjMsg(p->r_offset);
514 p->setSymbolAndType(0, 0, false);
515 continue;
517 SectionBase *section = d->section;
518 assert(section->isLive());
520 int64_t addend = rel.addend;
521 const uint8_t *bufLoc = sec->content().begin() + rel.offset;
522 if (!RelTy::HasAddend)
523 addend = target.getImplicitAddend(bufLoc, type);
525 if (ctx.arg.emachine == EM_MIPS &&
526 target.getRelExpr(type, sym, bufLoc) == RE_MIPS_GOTREL) {
527 // Some MIPS relocations depend on "gp" value. By default,
528 // this value has 0x7ff0 offset from a .got section. But
529 // relocatable files produced by a compiler or a linker
530 // might redefine this default value and we must use it
531 // for a calculation of the relocation result. When we
532 // generate EXE or DSO it's trivial. Generating a relocatable
533 // output is more difficult case because the linker does
534 // not calculate relocations in this mode and loses
535 // individual "gp" values used by each input object file.
536 // As a workaround we add the "gp" value to the relocation
537 // addend and save it back to the file.
538 addend += sec->getFile<ELFT>()->mipsGp0;
541 if (RelTy::HasAddend)
542 p->r_addend =
543 sym.getVA(ctx, addend) - section->getOutputSection()->addr;
544 // For SHF_ALLOC sections relocated by REL, append a relocation to
545 // sec->relocations so that relocateAlloc transitively called by
546 // writeSections will update the implicit addend. Non-SHF_ALLOC sections
547 // utilize relocateNonAlloc to process raw relocations and do not need
548 // this sec->relocations change.
549 else if (ctx.arg.relocatable && (sec->flags & SHF_ALLOC) &&
550 type != target.noneRel)
551 sec->addReloc({R_ABS, type, rel.offset, addend, &sym});
552 } else if (ctx.arg.emachine == EM_PPC && type == R_PPC_PLTREL24 &&
553 p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
554 // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
555 // indicates that r30 is relative to the input section .got2
556 // (r_addend>=0x8000), after linking, r30 should be relative to the output
557 // section .got2 . To compensate for the shift, adjust r_addend by
558 // ppc32Got->outSecOff.
559 p->r_addend += sec->file->ppc32Got2->outSecOff;
564 // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
565 // references specially. The general rule is that the value of the symbol in
566 // this context is the address of the place P. A further special case is that
567 // branch relocations to an undefined weak reference resolve to the next
568 // instruction.
569 static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a,
570 uint32_t p) {
571 switch (type) {
572 // Unresolved branch relocations to weak references resolve to next
573 // instruction, this will be either 2 or 4 bytes on from P.
574 case R_ARM_THM_JUMP8:
575 case R_ARM_THM_JUMP11:
576 return p + 2 + a;
577 case R_ARM_CALL:
578 case R_ARM_JUMP24:
579 case R_ARM_PC24:
580 case R_ARM_PLT32:
581 case R_ARM_PREL31:
582 case R_ARM_THM_JUMP19:
583 case R_ARM_THM_JUMP24:
584 return p + 4 + a;
585 case R_ARM_THM_CALL:
586 // We don't want an interworking BLX to ARM
587 return p + 5 + a;
588 // Unresolved non branch pc-relative relocations
589 // R_ARM_TARGET2 which can be resolved relatively is not present as it never
590 // targets a weak-reference.
591 case R_ARM_MOVW_PREL_NC:
592 case R_ARM_MOVT_PREL:
593 case R_ARM_REL32:
594 case R_ARM_THM_ALU_PREL_11_0:
595 case R_ARM_THM_MOVW_PREL_NC:
596 case R_ARM_THM_MOVT_PREL:
597 case R_ARM_THM_PC12:
598 return p + a;
599 // p + a is unrepresentable as negative immediates can't be encoded.
600 case R_ARM_THM_PC8:
601 return p;
603 llvm_unreachable("ARM pc-relative relocation expected\n");
606 // The comment above getARMUndefinedRelativeWeakVA applies to this function.
607 static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
608 switch (type) {
609 // Unresolved branch relocations to weak references resolve to next
610 // instruction, this is 4 bytes on from P.
611 case R_AARCH64_CALL26:
612 case R_AARCH64_CONDBR19:
613 case R_AARCH64_JUMP26:
614 case R_AARCH64_TSTBR14:
615 return p + 4;
616 // Unresolved non branch pc-relative relocations
617 case R_AARCH64_PREL16:
618 case R_AARCH64_PREL32:
619 case R_AARCH64_PREL64:
620 case R_AARCH64_ADR_PREL_LO21:
621 case R_AARCH64_LD_PREL_LO19:
622 case R_AARCH64_PLT32:
623 return p;
625 llvm_unreachable("AArch64 pc-relative relocation expected\n");
628 static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
629 switch (type) {
630 case R_RISCV_BRANCH:
631 case R_RISCV_JAL:
632 case R_RISCV_CALL:
633 case R_RISCV_CALL_PLT:
634 case R_RISCV_RVC_BRANCH:
635 case R_RISCV_RVC_JUMP:
636 case R_RISCV_PLT32:
637 return p;
638 default:
639 return 0;
643 // ARM SBREL relocations are of the form S + A - B where B is the static base
644 // The ARM ABI defines base to be "addressing origin of the output segment
645 // defining the symbol S". We defined the "addressing origin"/static base to be
646 // the base of the PT_LOAD segment containing the Sym.
647 // The procedure call standard only defines a Read Write Position Independent
648 // RWPI variant so in practice we should expect the static base to be the base
649 // of the RW segment.
650 static uint64_t getARMStaticBase(const Symbol &sym) {
651 OutputSection *os = sym.getOutputSection();
652 if (!os || !os->ptLoad || !os->ptLoad->firstSec)
653 Fatal(os->ctx) << "SBREL relocation to " << sym.getName()
654 << " without static base";
655 return os->ptLoad->firstSec->addr;
658 // For RE_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually
659 // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA
660 // is calculated using PCREL_HI20's symbol.
662 // This function returns the R_RISCV_PCREL_HI20 relocation from the
663 // R_RISCV_PCREL_LO12 relocation.
664 static Relocation *getRISCVPCRelHi20(Ctx &ctx, const InputSectionBase *loSec,
665 const Relocation &loReloc) {
666 uint64_t addend = loReloc.addend;
667 Symbol *sym = loReloc.sym;
669 const Defined *d = cast<Defined>(sym);
670 if (!d->section) {
671 Err(ctx) << loSec->getLocation(loReloc.offset)
672 << ": R_RISCV_PCREL_LO12 relocation points to an absolute symbol: "
673 << sym->getName();
674 return nullptr;
676 InputSection *hiSec = cast<InputSection>(d->section);
678 if (hiSec != loSec)
679 Err(ctx) << loSec->getLocation(loReloc.offset)
680 << ": R_RISCV_PCREL_LO12 relocation points to a symbol '"
681 << sym->getName() << "' in a different section '" << hiSec->name
682 << "'";
684 if (addend != 0)
685 Warn(ctx) << loSec->getLocation(loReloc.offset)
686 << ": non-zero addend in R_RISCV_PCREL_LO12 relocation to "
687 << hiSec->getObjMsg(d->value) << " is ignored";
689 // Relocations are sorted by offset, so we can use std::equal_range to do
690 // binary search.
691 Relocation hiReloc;
692 hiReloc.offset = d->value;
693 auto range =
694 std::equal_range(hiSec->relocs().begin(), hiSec->relocs().end(), hiReloc,
695 [](const Relocation &lhs, const Relocation &rhs) {
696 return lhs.offset < rhs.offset;
699 for (auto it = range.first; it != range.second; ++it)
700 if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 ||
701 it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20)
702 return &*it;
704 Err(ctx) << loSec->getLocation(loReloc.offset)
705 << ": R_RISCV_PCREL_LO12 relocation points to "
706 << hiSec->getObjMsg(d->value)
707 << " without an associated R_RISCV_PCREL_HI20 relocation";
708 return nullptr;
711 // A TLS symbol's virtual address is relative to the TLS segment. Add a
712 // target-specific adjustment to produce a thread-pointer-relative offset.
713 static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
714 // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0.
715 if (&s == ctx.sym.tlsModuleBase)
716 return 0;
718 // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2
719 // while most others use Variant 1. At run time TP will be aligned to p_align.
721 // Variant 1. TP will be followed by an optional gap (which is the size of 2
722 // pointers on ARM/AArch64, 0 on other targets), followed by alignment
723 // padding, then the static TLS blocks. The alignment padding is added so that
724 // (TP + gap + padding) is congruent to p_vaddr modulo p_align.
726 // Variant 2. Static TLS blocks, followed by alignment padding are placed
727 // before TP. The alignment padding is added so that (TP - padding -
728 // p_memsz) is congruent to p_vaddr modulo p_align.
729 PhdrEntry *tls = ctx.tlsPhdr;
730 if (!tls) // Reported an error in getSymVA
731 return 0;
732 switch (ctx.arg.emachine) {
733 // Variant 1.
734 case EM_ARM:
735 case EM_AARCH64:
736 return s.getVA(ctx, 0) + ctx.arg.wordsize * 2 +
737 ((tls->p_vaddr - ctx.arg.wordsize * 2) & (tls->p_align - 1));
738 case EM_MIPS:
739 case EM_PPC:
740 case EM_PPC64:
741 // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
742 // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
743 // data and 0xf000 of the program's TLS segment.
744 return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
745 case EM_LOONGARCH:
746 case EM_RISCV:
747 // See the comment in handleTlsRelocation. For TLSDESC=>IE,
748 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} also reach here. While
749 // `tls` may be null, the return value is ignored.
750 if (s.type != STT_TLS)
751 return 0;
752 return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1));
754 // Variant 2.
755 case EM_HEXAGON:
756 case EM_S390:
757 case EM_SPARCV9:
758 case EM_386:
759 case EM_X86_64:
760 return s.getVA(ctx, 0) - tls->p_memsz -
761 ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
762 default:
763 llvm_unreachable("unhandled ctx.arg.emachine");
767 uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
768 uint64_t p) const {
769 int64_t a = r.addend;
770 switch (r.expr) {
771 case R_ABS:
772 case R_DTPREL:
773 case R_RELAX_TLS_LD_TO_LE_ABS:
774 case R_RELAX_GOT_PC_NOPIC:
775 case RE_AARCH64_AUTH:
776 case RE_RISCV_ADD:
777 case RE_RISCV_LEB128:
778 return r.sym->getVA(ctx, a);
779 case R_ADDEND:
780 return a;
781 case R_RELAX_HINT:
782 return 0;
783 case RE_ARM_SBREL:
784 return r.sym->getVA(ctx, a) - getARMStaticBase(*r.sym);
785 case R_GOT:
786 case RE_AARCH64_AUTH_GOT:
787 case R_RELAX_TLS_GD_TO_IE_ABS:
788 return r.sym->getGotVA(ctx) + a;
789 case RE_LOONGARCH_GOT:
790 // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc r.type
791 // for their page offsets. The arithmetics are different in the TLS case
792 // so we have to duplicate some logic here.
793 if (r.sym->hasFlag(NEEDS_TLSGD) && r.type != R_LARCH_TLS_IE_PC_LO12)
794 // Like RE_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
795 return ctx.in.got->getGlobalDynAddr(*r.sym) + a;
796 return r.sym->getGotVA(ctx) + a;
797 case R_GOTONLY_PC:
798 return ctx.in.got->getVA() + a - p;
799 case R_GOTPLTONLY_PC:
800 return ctx.in.gotPlt->getVA() + a - p;
801 case R_GOTREL:
802 case RE_PPC64_RELAX_TOC:
803 return r.sym->getVA(ctx, a) - ctx.in.got->getVA();
804 case R_GOTPLTREL:
805 return r.sym->getVA(ctx, a) - ctx.in.gotPlt->getVA();
806 case R_GOTPLT:
807 case R_RELAX_TLS_GD_TO_IE_GOTPLT:
808 return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA();
809 case R_TLSLD_GOT_OFF:
810 case R_GOT_OFF:
811 case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
812 return r.sym->getGotOffset(ctx) + a;
813 case RE_AARCH64_GOT_PAGE_PC:
814 case RE_AARCH64_AUTH_GOT_PAGE_PC:
815 case RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
816 return getAArch64Page(r.sym->getGotVA(ctx) + a) - getAArch64Page(p);
817 case RE_AARCH64_GOT_PAGE:
818 return r.sym->getGotVA(ctx) + a - getAArch64Page(ctx.in.got->getVA());
819 case R_GOT_PC:
820 case RE_AARCH64_AUTH_GOT_PC:
821 case R_RELAX_TLS_GD_TO_IE:
822 return r.sym->getGotVA(ctx) + a - p;
823 case R_GOTPLT_GOTREL:
824 return r.sym->getGotPltVA(ctx) + a - ctx.in.got->getVA();
825 case R_GOTPLT_PC:
826 return r.sym->getGotPltVA(ctx) + a - p;
827 case RE_LOONGARCH_GOT_PAGE_PC:
828 if (r.sym->hasFlag(NEEDS_TLSGD))
829 return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
830 r.type);
831 return getLoongArchPageDelta(r.sym->getGotVA(ctx) + a, p, r.type);
832 case RE_MIPS_GOTREL:
833 return r.sym->getVA(ctx, a) - ctx.in.mipsGot->getGp(file);
834 case RE_MIPS_GOT_GP:
835 return ctx.in.mipsGot->getGp(file) + a;
836 case RE_MIPS_GOT_GP_PC: {
837 // R_MIPS_LO16 expression has RE_MIPS_GOT_GP_PC r.type iif the target
838 // is _gp_disp symbol. In that case we should use the following
839 // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
840 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
841 // microMIPS variants of these relocations use slightly different
842 // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
843 // to correctly handle less-significant bit of the microMIPS symbol.
844 uint64_t v = ctx.in.mipsGot->getGp(file) + a - p;
845 if (r.type == R_MIPS_LO16 || r.type == R_MICROMIPS_LO16)
846 v += 4;
847 if (r.type == R_MICROMIPS_LO16 || r.type == R_MICROMIPS_HI16)
848 v -= 1;
849 return v;
851 case RE_MIPS_GOT_LOCAL_PAGE:
852 // If relocation against MIPS local symbol requires GOT entry, this entry
853 // should be initialized by 'page address'. This address is high 16-bits
854 // of sum the symbol's value and the addend.
855 return ctx.in.mipsGot->getVA() +
856 ctx.in.mipsGot->getPageEntryOffset(file, *r.sym, a) -
857 ctx.in.mipsGot->getGp(file);
858 case RE_MIPS_GOT_OFF:
859 case RE_MIPS_GOT_OFF32:
860 // In case of MIPS if a GOT relocation has non-zero addend this addend
861 // should be applied to the GOT entry content not to the GOT entry offset.
862 // That is why we use separate expression r.type.
863 return ctx.in.mipsGot->getVA() +
864 ctx.in.mipsGot->getSymEntryOffset(file, *r.sym, a) -
865 ctx.in.mipsGot->getGp(file);
866 case RE_MIPS_TLSGD:
867 return ctx.in.mipsGot->getVA() +
868 ctx.in.mipsGot->getGlobalDynOffset(file, *r.sym) -
869 ctx.in.mipsGot->getGp(file);
870 case RE_MIPS_TLSLD:
871 return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) -
872 ctx.in.mipsGot->getGp(file);
873 case RE_AARCH64_PAGE_PC: {
874 uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(ctx, a);
875 return getAArch64Page(val) - getAArch64Page(p);
877 case RE_RISCV_PC_INDIRECT: {
878 if (const Relocation *hiRel = getRISCVPCRelHi20(ctx, this, r))
879 return getRelocTargetVA(ctx, *hiRel, r.sym->getVA(ctx));
880 return 0;
882 case RE_LOONGARCH_PAGE_PC:
883 return getLoongArchPageDelta(r.sym->getVA(ctx, a), p, r.type);
884 case R_PC:
885 case RE_ARM_PCA: {
886 uint64_t dest;
887 if (r.expr == RE_ARM_PCA)
888 // Some PC relative ARM (Thumb) relocations align down the place.
889 p = p & 0xfffffffc;
890 if (r.sym->isUndefined()) {
891 // On ARM and AArch64 a branch to an undefined weak resolves to the next
892 // instruction, otherwise the place. On RISC-V, resolve an undefined weak
893 // to the same instruction to cause an infinite loop (making the user
894 // aware of the issue) while ensuring no overflow.
895 // Note: if the symbol is hidden, its binding has been converted to local,
896 // so we just check isUndefined() here.
897 if (ctx.arg.emachine == EM_ARM)
898 dest = getARMUndefinedRelativeWeakVA(r.type, a, p);
899 else if (ctx.arg.emachine == EM_AARCH64)
900 dest = getAArch64UndefinedRelativeWeakVA(r.type, p) + a;
901 else if (ctx.arg.emachine == EM_PPC)
902 dest = p;
903 else if (ctx.arg.emachine == EM_RISCV)
904 dest = getRISCVUndefinedRelativeWeakVA(r.type, p) + a;
905 else
906 dest = r.sym->getVA(ctx, a);
907 } else {
908 dest = r.sym->getVA(ctx, a);
910 return dest - p;
912 case R_PLT:
913 return r.sym->getPltVA(ctx) + a;
914 case R_PLT_PC:
915 case RE_PPC64_CALL_PLT:
916 return r.sym->getPltVA(ctx) + a - p;
917 case RE_LOONGARCH_PLT_PAGE_PC:
918 return getLoongArchPageDelta(r.sym->getPltVA(ctx) + a, p, r.type);
919 case R_PLT_GOTPLT:
920 return r.sym->getPltVA(ctx) + a - ctx.in.gotPlt->getVA();
921 case R_PLT_GOTREL:
922 return r.sym->getPltVA(ctx) + a - ctx.in.got->getVA();
923 case RE_PPC32_PLTREL:
924 // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
925 // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
926 // target VA computation.
927 return r.sym->getPltVA(ctx) - p;
928 case RE_PPC64_CALL: {
929 uint64_t symVA = r.sym->getVA(ctx, a);
930 // If we have an undefined weak symbol, we might get here with a symbol
931 // address of zero. That could overflow, but the code must be unreachable,
932 // so don't bother doing anything at all.
933 if (!symVA)
934 return 0;
936 // PPC64 V2 ABI describes two entry points to a function. The global entry
937 // point is used for calls where the caller and callee (may) have different
938 // TOC base pointers and r2 needs to be modified to hold the TOC base for
939 // the callee. For local calls the caller and callee share the same
940 // TOC base and so the TOC pointer initialization code should be skipped by
941 // branching to the local entry point.
942 return symVA - p +
943 getPPC64GlobalEntryToLocalEntryOffset(ctx, r.sym->stOther);
945 case RE_PPC64_TOCBASE:
946 return getPPC64TocBase(ctx) + a;
947 case R_RELAX_GOT_PC:
948 case RE_PPC64_RELAX_GOT_PC:
949 return r.sym->getVA(ctx, a) - p;
950 case R_RELAX_TLS_GD_TO_LE:
951 case R_RELAX_TLS_IE_TO_LE:
952 case R_RELAX_TLS_LD_TO_LE:
953 case R_TPREL:
954 // It is not very clear what to return if the symbol is undefined. With
955 // --noinhibit-exec, even a non-weak undefined reference may reach here.
956 // Just return A, which matches R_ABS, and the behavior of some dynamic
957 // loaders.
958 if (r.sym->isUndefined())
959 return a;
960 return getTlsTpOffset(ctx, *r.sym) + a;
961 case R_RELAX_TLS_GD_TO_LE_NEG:
962 case R_TPREL_NEG:
963 if (r.sym->isUndefined())
964 return a;
965 return -getTlsTpOffset(ctx, *r.sym) + a;
966 case R_SIZE:
967 return r.sym->getSize() + a;
968 case R_TLSDESC:
969 return ctx.in.got->getTlsDescAddr(*r.sym) + a;
970 case R_TLSDESC_PC:
971 return ctx.in.got->getTlsDescAddr(*r.sym) + a - p;
972 case R_TLSDESC_GOTPLT:
973 return ctx.in.got->getTlsDescAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
974 case RE_AARCH64_TLSDESC_PAGE:
975 return getAArch64Page(ctx.in.got->getTlsDescAddr(*r.sym) + a) -
976 getAArch64Page(p);
977 case RE_LOONGARCH_TLSDESC_PAGE_PC:
978 return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(*r.sym) + a, p,
979 r.type);
980 case R_TLSGD_GOT:
981 return ctx.in.got->getGlobalDynOffset(*r.sym) + a;
982 case R_TLSGD_GOTPLT:
983 return ctx.in.got->getGlobalDynAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
984 case R_TLSGD_PC:
985 return ctx.in.got->getGlobalDynAddr(*r.sym) + a - p;
986 case RE_LOONGARCH_TLSGD_PAGE_PC:
987 return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
988 r.type);
989 case R_TLSLD_GOTPLT:
990 return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a -
991 ctx.in.gotPlt->getVA();
992 case R_TLSLD_GOT:
993 return ctx.in.got->getTlsIndexOff() + a;
994 case R_TLSLD_PC:
995 return ctx.in.got->getTlsIndexVA() + a - p;
996 default:
997 llvm_unreachable("invalid expression");
1001 // This function applies relocations to sections without SHF_ALLOC bit.
1002 // Such sections are never mapped to memory at runtime. Debug sections are
1003 // an example. Relocations in non-alloc sections are much easier to
1004 // handle than in allocated sections because it will never need complex
1005 // treatment such as GOT or PLT (because at runtime no one refers them).
1006 // So, we handle relocations for non-alloc sections directly in this
1007 // function as a performance optimization.
1008 template <class ELFT, class RelTy>
1009 void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
1010 Relocs<RelTy> rels) {
1011 const unsigned bits = sizeof(typename ELFT::uint) * 8;
1012 const TargetInfo &target = *ctx.target;
1013 const auto emachine = ctx.arg.emachine;
1014 const bool isDebug = isDebugSection(*this);
1015 const bool isDebugLine = isDebug && name == ".debug_line";
1016 std::optional<uint64_t> tombstone;
1017 if (isDebug) {
1018 if (name == ".debug_loc" || name == ".debug_ranges")
1019 tombstone = 1;
1020 else if (name == ".debug_names")
1021 tombstone = UINT64_MAX; // tombstone value
1022 else
1023 tombstone = 0;
1025 for (const auto &patAndValue : llvm::reverse(ctx.arg.deadRelocInNonAlloc))
1026 if (patAndValue.first.match(this->name)) {
1027 tombstone = patAndValue.second;
1028 break;
1031 const InputFile *f = this->file;
1032 for (auto it = rels.begin(), end = rels.end(); it != end; ++it) {
1033 const RelTy &rel = *it;
1034 const RelType type = rel.getType(ctx.arg.isMips64EL);
1035 const uint64_t offset = rel.r_offset;
1036 uint8_t *bufLoc = buf + offset;
1037 int64_t addend = getAddend<ELFT>(rel);
1038 if (!RelTy::HasAddend)
1039 addend += target.getImplicitAddend(bufLoc, type);
1041 Symbol &sym = f->getRelocTargetSym(rel);
1042 RelExpr expr = target.getRelExpr(type, sym, bufLoc);
1043 if (expr == R_NONE)
1044 continue;
1045 auto *ds = dyn_cast<Defined>(&sym);
1047 if (emachine == EM_RISCV && type == R_RISCV_SET_ULEB128) {
1048 if (++it != end &&
1049 it->getType(/*isMips64EL=*/false) == R_RISCV_SUB_ULEB128 &&
1050 it->r_offset == offset) {
1051 uint64_t val;
1052 if (!ds && tombstone) {
1053 val = *tombstone;
1054 } else {
1055 val = sym.getVA(ctx, addend) -
1056 (f->getRelocTargetSym(*it).getVA(ctx) + getAddend<ELFT>(*it));
1058 if (overwriteULEB128(bufLoc, val) >= 0x80)
1059 Err(ctx) << getLocation(offset) << ": ULEB128 value " << val
1060 << " exceeds available space; references '" << &sym << "'";
1061 continue;
1063 Err(ctx) << getLocation(offset)
1064 << ": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128";
1065 return;
1068 if (tombstone && (expr == R_ABS || expr == R_DTPREL)) {
1069 // Resolve relocations in .debug_* referencing (discarded symbols or ICF
1070 // folded section symbols) to a tombstone value. Resolving to addend is
1071 // unsatisfactory because the result address range may collide with a
1072 // valid range of low address, or leave multiple CUs claiming ownership of
1073 // the same range of code, which may confuse consumers.
1075 // To address the problems, we use -1 as a tombstone value for most
1076 // .debug_* sections. We have to ignore the addend because we don't want
1077 // to resolve an address attribute (which may have a non-zero addend) to
1078 // -1+addend (wrap around to a low address).
1080 // R_DTPREL type relocations represent an offset into the dynamic thread
1081 // vector. The computed value is st_value plus a non-negative offset.
1082 // Negative values are invalid, so -1 can be used as the tombstone value.
1084 // If the referenced symbol is relative to a discarded section (due to
1085 // --gc-sections, COMDAT, etc), it has been converted to a Undefined.
1086 // `ds->folded` catches the ICF folded case. However, resolving a
1087 // relocation in .debug_line to -1 would stop debugger users from setting
1088 // breakpoints on the folded-in function, so exclude .debug_line.
1090 // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
1091 // (base address selection entry), use 1 (which is used by GNU ld for
1092 // .debug_ranges).
1094 // TODO To reduce disruption, we use 0 instead of -1 as the tombstone
1095 // value. Enable -1 in a future release.
1096 if (!ds || (ds->folded && !isDebugLine)) {
1097 // If -z dead-reloc-in-nonalloc= is specified, respect it.
1098 uint64_t value = SignExtend64<bits>(*tombstone);
1099 // For a 32-bit local TU reference in .debug_names, X86_64::relocate
1100 // requires that the unsigned value for R_X86_64_32 is truncated to
1101 // 32-bit. Other 64-bit targets's don't discern signed/unsigned 32-bit
1102 // absolute relocations and do not need this change.
1103 if (emachine == EM_X86_64 && type == R_X86_64_32)
1104 value = static_cast<uint32_t>(value);
1105 target.relocateNoSym(bufLoc, type, value);
1106 continue;
1110 // For a relocatable link, content relocated by relocation types with an
1111 // explicit addend, such as RELA, remain unchanged and we can stop here.
1112 // While content relocated by relocation types with an implicit addend, such
1113 // as REL, needs the implicit addend updated.
1114 if (ctx.arg.relocatable && (RelTy::HasAddend || sym.type != STT_SECTION))
1115 continue;
1117 // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
1118 // sections.
1119 if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL ||
1120 expr == RE_RISCV_ADD || expr == RE_ARM_SBREL) {
1121 target.relocateNoSym(bufLoc, type,
1122 SignExtend64<bits>(sym.getVA(ctx, addend)));
1123 continue;
1126 if (expr == R_SIZE) {
1127 target.relocateNoSym(bufLoc, type,
1128 SignExtend64<bits>(sym.getSize() + addend));
1129 continue;
1132 // If the control reaches here, we found a PC-relative relocation in a
1133 // non-ALLOC section. Since non-ALLOC section is not loaded into memory
1134 // at runtime, the notion of PC-relative doesn't make sense here. So,
1135 // this is a usage error. However, GNU linkers historically accept such
1136 // relocations without any errors and relocate them as if they were at
1137 // address 0. For bug-compatibility, we accept them with warnings. We
1138 // know Steel Bank Common Lisp as of 2018 have this bug.
1140 // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
1141 // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed in
1142 // 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we need to
1143 // keep this bug-compatible code for a while.
1144 bool isErr = expr != R_PC && !(emachine == EM_386 && type == R_386_GOTPC);
1146 ELFSyncStream diag(ctx, isErr && !ctx.arg.noinhibitExec
1147 ? DiagLevel::Err
1148 : DiagLevel::Warn);
1149 diag << getLocation(offset) << ": has non-ABS relocation " << type
1150 << " against symbol '" << &sym << "'";
1152 if (!isErr)
1153 target.relocateNoSym(
1154 bufLoc, type,
1155 SignExtend64<bits>(sym.getVA(ctx, addend - offset - outSecOff)));
1159 template <class ELFT>
1160 void InputSectionBase::relocate(Ctx &ctx, uint8_t *buf, uint8_t *bufEnd) {
1161 if ((flags & SHF_EXECINSTR) && LLVM_UNLIKELY(getFile<ELFT>()->splitStack))
1162 adjustSplitStackFunctionPrologues<ELFT>(ctx, buf, bufEnd);
1164 if (flags & SHF_ALLOC) {
1165 ctx.target->relocateAlloc(*this, buf);
1166 return;
1169 auto *sec = cast<InputSection>(this);
1170 // For a relocatable link, also call relocateNonAlloc() to rewrite applicable
1171 // locations with tombstone values.
1172 invokeOnRelocs(*sec, sec->relocateNonAlloc<ELFT>, ctx, buf);
1175 // For each function-defining prologue, find any calls to __morestack,
1176 // and replace them with calls to __morestack_non_split.
1177 static void switchMorestackCallsToMorestackNonSplit(
1178 Ctx &ctx, DenseSet<Defined *> &prologues,
1179 SmallVector<Relocation *, 0> &morestackCalls) {
1181 // If the target adjusted a function's prologue, all calls to
1182 // __morestack inside that function should be switched to
1183 // __morestack_non_split.
1184 Symbol *moreStackNonSplit = ctx.symtab->find("__morestack_non_split");
1185 if (!moreStackNonSplit) {
1186 ErrAlways(ctx) << "mixing split-stack objects requires a definition of "
1187 "__morestack_non_split";
1188 return;
1191 // Sort both collections to compare addresses efficiently.
1192 llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) {
1193 return l->offset < r->offset;
1195 std::vector<Defined *> functions(prologues.begin(), prologues.end());
1196 llvm::sort(functions, [](const Defined *l, const Defined *r) {
1197 return l->value < r->value;
1200 auto it = morestackCalls.begin();
1201 for (Defined *f : functions) {
1202 // Find the first call to __morestack within the function.
1203 while (it != morestackCalls.end() && (*it)->offset < f->value)
1204 ++it;
1205 // Adjust all calls inside the function.
1206 while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) {
1207 (*it)->sym = moreStackNonSplit;
1208 ++it;
1213 static bool enclosingPrologueAttempted(uint64_t offset,
1214 const DenseSet<Defined *> &prologues) {
1215 for (Defined *f : prologues)
1216 if (f->value <= offset && offset < f->value + f->size)
1217 return true;
1218 return false;
1221 // If a function compiled for split stack calls a function not
1222 // compiled for split stack, then the caller needs its prologue
1223 // adjusted to ensure that the called function will have enough stack
1224 // available. Find those functions, and adjust their prologues.
1225 template <class ELFT>
1226 void InputSectionBase::adjustSplitStackFunctionPrologues(Ctx &ctx, uint8_t *buf,
1227 uint8_t *end) {
1228 DenseSet<Defined *> prologues;
1229 SmallVector<Relocation *, 0> morestackCalls;
1231 for (Relocation &rel : relocs()) {
1232 // Ignore calls into the split-stack api.
1233 if (rel.sym->getName().starts_with("__morestack")) {
1234 if (rel.sym->getName() == "__morestack")
1235 morestackCalls.push_back(&rel);
1236 continue;
1239 // A relocation to non-function isn't relevant. Sometimes
1240 // __morestack is not marked as a function, so this check comes
1241 // after the name check.
1242 if (rel.sym->type != STT_FUNC)
1243 continue;
1245 // If the callee's-file was compiled with split stack, nothing to do. In
1246 // this context, a "Defined" symbol is one "defined by the binary currently
1247 // being produced". So an "undefined" symbol might be provided by a shared
1248 // library. It is not possible to tell how such symbols were compiled, so be
1249 // conservative.
1250 if (Defined *d = dyn_cast<Defined>(rel.sym))
1251 if (InputSection *isec = cast_or_null<InputSection>(d->section))
1252 if (!isec || !isec->getFile<ELFT>() || isec->getFile<ELFT>()->splitStack)
1253 continue;
1255 if (enclosingPrologueAttempted(rel.offset, prologues))
1256 continue;
1258 if (Defined *f = getEnclosingFunction(rel.offset)) {
1259 prologues.insert(f);
1260 if (ctx.target->adjustPrologueForCrossSplitStack(buf + f->value, end,
1261 f->stOther))
1262 continue;
1263 if (!getFile<ELFT>()->someNoSplitStack)
1264 Err(ctx)
1265 << this << ": " << f->getName() << " (with -fsplit-stack) calls "
1266 << rel.sym->getName()
1267 << " (without -fsplit-stack), but couldn't adjust its prologue";
1271 if (ctx.target->needsMoreStackNonSplit)
1272 switchMorestackCallsToMorestackNonSplit(ctx, prologues, morestackCalls);
1275 template <class ELFT> void InputSection::writeTo(Ctx &ctx, uint8_t *buf) {
1276 if (LLVM_UNLIKELY(type == SHT_NOBITS))
1277 return;
1278 // If -r or --emit-relocs is given, then an InputSection
1279 // may be a relocation section.
1280 if (LLVM_UNLIKELY(type == SHT_RELA)) {
1281 copyRelocations<ELFT, typename ELFT::Rela>(ctx, buf);
1282 return;
1284 if (LLVM_UNLIKELY(type == SHT_REL)) {
1285 copyRelocations<ELFT, typename ELFT::Rel>(ctx, buf);
1286 return;
1289 // If -r is given, we may have a SHT_GROUP section.
1290 if (LLVM_UNLIKELY(type == SHT_GROUP)) {
1291 copyShtGroup<ELFT>(buf);
1292 return;
1295 // If this is a compressed section, uncompress section contents directly
1296 // to the buffer.
1297 if (compressed) {
1298 auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content_);
1299 auto compressed = ArrayRef<uint8_t>(content_, compressedSize)
1300 .slice(sizeof(typename ELFT::Chdr));
1301 size_t size = this->size;
1302 if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
1303 ? compression::zlib::decompress(compressed, buf, size)
1304 : compression::zstd::decompress(compressed, buf, size))
1305 Fatal(ctx) << this << ": decompress failed: " << std::move(e);
1306 uint8_t *bufEnd = buf + size;
1307 relocate<ELFT>(ctx, buf, bufEnd);
1308 return;
1311 // Copy section contents from source object file to output file
1312 // and then apply relocations.
1313 memcpy(buf, content().data(), content().size());
1314 relocate<ELFT>(ctx, buf, buf + content().size());
1317 void InputSection::replace(InputSection *other) {
1318 addralign = std::max(addralign, other->addralign);
1320 // When a section is replaced with another section that was allocated to
1321 // another partition, the replacement section (and its associated sections)
1322 // need to be placed in the main partition so that both partitions will be
1323 // able to access it.
1324 if (partition != other->partition) {
1325 partition = 1;
1326 for (InputSection *isec : dependentSections)
1327 isec->partition = 1;
1330 other->repl = repl;
1331 other->markDead();
1334 template <class ELFT>
1335 EhInputSection::EhInputSection(ObjFile<ELFT> &f,
1336 const typename ELFT::Shdr &header,
1337 StringRef name)
1338 : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {}
1340 SyntheticSection *EhInputSection::getParent() const {
1341 return cast_or_null<SyntheticSection>(parent);
1344 // .eh_frame is a sequence of CIE or FDE records.
1345 // This function splits an input section into records and returns them.
1346 template <class ELFT> void EhInputSection::split() {
1347 const RelsOrRelas<ELFT> rels = relsOrRelas<ELFT>(/*supportsCrel=*/false);
1348 // getReloc expects the relocations to be sorted by r_offset. See the comment
1349 // in scanRelocs.
1350 if (rels.areRelocsRel()) {
1351 SmallVector<typename ELFT::Rel, 0> storage;
1352 split<ELFT>(sortRels(rels.rels, storage));
1353 } else {
1354 SmallVector<typename ELFT::Rela, 0> storage;
1355 split<ELFT>(sortRels(rels.relas, storage));
1359 template <class ELFT, class RelTy>
1360 void EhInputSection::split(ArrayRef<RelTy> rels) {
1361 ArrayRef<uint8_t> d = content();
1362 const char *msg = nullptr;
1363 unsigned relI = 0;
1364 while (!d.empty()) {
1365 if (d.size() < 4) {
1366 msg = "CIE/FDE too small";
1367 break;
1369 uint64_t size = endian::read32<ELFT::Endianness>(d.data());
1370 if (size == 0) // ZERO terminator
1371 break;
1372 uint32_t id = endian::read32<ELFT::Endianness>(d.data() + 4);
1373 size += 4;
1374 if (LLVM_UNLIKELY(size > d.size())) {
1375 // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
1376 // but we do not support that format yet.
1377 msg = size == UINT32_MAX + uint64_t(4)
1378 ? "CIE/FDE too large"
1379 : "CIE/FDE ends past the end of the section";
1380 break;
1383 // Find the first relocation that points to [off,off+size). Relocations
1384 // have been sorted by r_offset.
1385 const uint64_t off = d.data() - content().data();
1386 while (relI != rels.size() && rels[relI].r_offset < off)
1387 ++relI;
1388 unsigned firstRel = -1;
1389 if (relI != rels.size() && rels[relI].r_offset < off + size)
1390 firstRel = relI;
1391 (id == 0 ? cies : fdes).emplace_back(off, this, size, firstRel);
1392 d = d.slice(size);
1394 if (msg)
1395 Err(file->ctx) << "corrupted .eh_frame: " << msg << "\n>>> defined in "
1396 << getObjMsg(d.data() - content().data());
1399 // Return the offset in an output section for a given input offset.
1400 uint64_t EhInputSection::getParentOffset(uint64_t offset) const {
1401 auto it = partition_point(
1402 fdes, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1403 if (it == fdes.begin() || it[-1].inputOff + it[-1].size <= offset) {
1404 it = partition_point(
1405 cies, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1406 if (it == cies.begin()) // invalid piece
1407 return offset;
1409 if (it[-1].outputOff == -1) // invalid piece
1410 return offset - it[-1].inputOff;
1411 return it[-1].outputOff + (offset - it[-1].inputOff);
1414 static size_t findNull(StringRef s, size_t entSize) {
1415 for (unsigned i = 0, n = s.size(); i != n; i += entSize) {
1416 const char *b = s.begin() + i;
1417 if (std::all_of(b, b + entSize, [](char c) { return c == 0; }))
1418 return i;
1420 llvm_unreachable("");
1423 // Split SHF_STRINGS section. Such section is a sequence of
1424 // null-terminated strings.
1425 void MergeInputSection::splitStrings(StringRef s, size_t entSize) {
1426 const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1427 const char *p = s.data(), *end = s.data() + s.size();
1428 if (!std::all_of(end - entSize, end, [](char c) { return c == 0; }))
1429 Fatal(getCtx()) << this << ": string is not null terminated";
1430 if (entSize == 1) {
1431 // Optimize the common case.
1432 do {
1433 size_t size = strlen(p);
1434 pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1435 p += size + 1;
1436 } while (p != end);
1437 } else {
1438 do {
1439 size_t size = findNull(StringRef(p, end - p), entSize);
1440 pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1441 p += size + entSize;
1442 } while (p != end);
1446 // Split non-SHF_STRINGS section. Such section is a sequence of
1447 // fixed size records.
1448 void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> data,
1449 size_t entSize) {
1450 size_t size = data.size();
1451 assert((size % entSize) == 0);
1452 const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1454 pieces.resize_for_overwrite(size / entSize);
1455 for (size_t i = 0, j = 0; i != size; i += entSize, j++)
1456 pieces[j] = {i, (uint32_t)xxh3_64bits(data.slice(i, entSize)), live};
1459 template <class ELFT>
1460 MergeInputSection::MergeInputSection(ObjFile<ELFT> &f,
1461 const typename ELFT::Shdr &header,
1462 StringRef name)
1463 : InputSectionBase(f, header, name, InputSectionBase::Merge) {}
1465 MergeInputSection::MergeInputSection(Ctx &ctx, StringRef name, uint32_t type,
1466 uint64_t flags, uint64_t entsize,
1467 ArrayRef<uint8_t> data)
1468 : InputSectionBase(ctx.internalFile, name, type, flags, /*link=*/0,
1469 /*info=*/0,
1470 /*addralign=*/entsize, entsize, data,
1471 SectionBase::Merge) {}
1473 // This function is called after we obtain a complete list of input sections
1474 // that need to be linked. This is responsible to split section contents
1475 // into small chunks for further processing.
1477 // Note that this function is called from parallelForEach. This must be
1478 // thread-safe (i.e. no memory allocation from the pools).
1479 void MergeInputSection::splitIntoPieces() {
1480 assert(pieces.empty());
1482 if (flags & SHF_STRINGS)
1483 splitStrings(toStringRef(contentMaybeDecompress()), entsize);
1484 else
1485 splitNonStrings(contentMaybeDecompress(), entsize);
1488 SectionPiece &MergeInputSection::getSectionPiece(uint64_t offset) {
1489 if (content().size() <= offset)
1490 Fatal(getCtx()) << this << ": offset is outside the section";
1491 return partition_point(
1492 pieces, [=](SectionPiece p) { return p.inputOff <= offset; })[-1];
1495 // Return the offset in an output section for a given input offset.
1496 uint64_t MergeInputSection::getParentOffset(uint64_t offset) const {
1497 const SectionPiece &piece = getSectionPiece(offset);
1498 return piece.outputOff + (offset - piece.inputOff);
1501 template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
1502 StringRef);
1503 template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
1504 StringRef);
1505 template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
1506 StringRef);
1507 template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
1508 StringRef);
1510 template void InputSection::writeTo<ELF32LE>(Ctx &, uint8_t *);
1511 template void InputSection::writeTo<ELF32BE>(Ctx &, uint8_t *);
1512 template void InputSection::writeTo<ELF64LE>(Ctx &, uint8_t *);
1513 template void InputSection::writeTo<ELF64BE>(Ctx &, uint8_t *);
1515 template RelsOrRelas<ELF32LE>
1516 InputSectionBase::relsOrRelas<ELF32LE>(bool) const;
1517 template RelsOrRelas<ELF32BE>
1518 InputSectionBase::relsOrRelas<ELF32BE>(bool) const;
1519 template RelsOrRelas<ELF64LE>
1520 InputSectionBase::relsOrRelas<ELF64LE>(bool) const;
1521 template RelsOrRelas<ELF64BE>
1522 InputSectionBase::relsOrRelas<ELF64BE>(bool) const;
1524 template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
1525 const ELF32LE::Shdr &, StringRef);
1526 template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
1527 const ELF32BE::Shdr &, StringRef);
1528 template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
1529 const ELF64LE::Shdr &, StringRef);
1530 template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
1531 const ELF64BE::Shdr &, StringRef);
1533 template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
1534 const ELF32LE::Shdr &, StringRef);
1535 template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
1536 const ELF32BE::Shdr &, StringRef);
1537 template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
1538 const ELF64LE::Shdr &, StringRef);
1539 template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
1540 const ELF64BE::Shdr &, StringRef);
1542 template void EhInputSection::split<ELF32LE>();
1543 template void EhInputSection::split<ELF32BE>();
1544 template void EhInputSection::split<ELF64LE>();
1545 template void EhInputSection::split<ELF64BE>();