[RISCV][MCP] Remove redundant move from tail duplication (#89865)
[llvm-project.git] / lld / ELF / OutputSections.cpp
blobcb17e107d6dae2887baf7e0d0652f3f343198cb0
1 //===- OutputSections.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "OutputSections.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/LEB128.h"
22 #include "llvm/Support/Parallel.h"
23 #include "llvm/Support/Path.h"
24 #include "llvm/Support/TimeProfiler.h"
25 #if LLVM_ENABLE_ZLIB
26 // Avoid introducing max as a macro from Windows headers.
27 #define NOMINMAX
28 #include <zlib.h>
29 #endif
30 #if LLVM_ENABLE_ZSTD
31 #include <zstd.h>
32 #endif
34 using namespace llvm;
35 using namespace llvm::dwarf;
36 using namespace llvm::object;
37 using namespace llvm::support::endian;
38 using namespace llvm::ELF;
39 using namespace lld;
40 using namespace lld::elf;
42 uint32_t OutputSection::getPhdrFlags() const {
43 uint32_t ret = 0;
44 if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
45 ret |= PF_R;
46 if (flags & SHF_WRITE)
47 ret |= PF_W;
48 if (flags & SHF_EXECINSTR)
49 ret |= PF_X;
50 return ret;
53 template <class ELFT>
54 void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
55 shdr->sh_entsize = entsize;
56 shdr->sh_addralign = addralign;
57 shdr->sh_type = type;
58 shdr->sh_offset = offset;
59 shdr->sh_flags = flags;
60 shdr->sh_info = info;
61 shdr->sh_link = link;
62 shdr->sh_addr = addr;
63 shdr->sh_size = size;
64 shdr->sh_name = shName;
67 OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
68 : SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
69 /*Info*/ 0, /*Link*/ 0) {}
71 // We allow sections of types listed below to merged into a
72 // single progbits section. This is typically done by linker
73 // scripts. Merging nobits and progbits will force disk space
74 // to be allocated for nobits sections. Other ones don't require
75 // any special treatment on top of progbits, so there doesn't
76 // seem to be a harm in merging them.
78 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
79 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
80 static bool canMergeToProgbits(unsigned type) {
81 return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
82 type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
83 type == SHT_NOTE ||
84 (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
87 // Record that isec will be placed in the OutputSection. isec does not become
88 // permanent until finalizeInputSections() is called. The function should not be
89 // used after finalizeInputSections() is called. If you need to add an
90 // InputSection post finalizeInputSections(), then you must do the following:
92 // 1. Find or create an InputSectionDescription to hold InputSection.
93 // 2. Add the InputSection to the InputSectionDescription::sections.
94 // 3. Call commitSection(isec).
95 void OutputSection::recordSection(InputSectionBase *isec) {
96 partition = isec->partition;
97 isec->parent = this;
98 if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
99 commands.push_back(make<InputSectionDescription>(""));
100 auto *isd = cast<InputSectionDescription>(commands.back());
101 isd->sectionBases.push_back(isec);
104 // Update fields (type, flags, alignment, etc) according to the InputSection
105 // isec. Also check whether the InputSection flags and type are consistent with
106 // other InputSections.
107 void OutputSection::commitSection(InputSection *isec) {
108 if (LLVM_UNLIKELY(type != isec->type)) {
109 if (!hasInputSections && !typeIsSet) {
110 type = isec->type;
111 } else if (isStaticRelSecType(type) && isStaticRelSecType(isec->type) &&
112 (type == SHT_CREL) != (isec->type == SHT_CREL)) {
113 // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
114 type = SHT_CREL;
115 if (type == SHT_REL) {
116 if (name.consume_front(".rel"))
117 name = saver().save(".crel" + name);
118 } else if (name.consume_front(".rela")) {
119 name = saver().save(".crel" + name);
121 } else {
122 if (typeIsSet || !canMergeToProgbits(type) ||
123 !canMergeToProgbits(isec->type)) {
124 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
125 // that the contents at that address is provided by some other means.
126 // Some projects (e.g.
127 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
128 // behavior. Other types get an error.
129 if (type != SHT_NOBITS) {
130 errorOrWarn("section type mismatch for " + isec->name + "\n>>> " +
131 toString(isec) + ": " +
132 getELFSectionTypeName(config->emachine, isec->type) +
133 "\n>>> output section " + name + ": " +
134 getELFSectionTypeName(config->emachine, type));
137 if (!typeIsSet)
138 type = SHT_PROGBITS;
141 if (!hasInputSections) {
142 // If IS is the first section to be added to this section,
143 // initialize type, entsize and flags from isec.
144 hasInputSections = true;
145 entsize = isec->entsize;
146 flags = isec->flags;
147 } else {
148 // Otherwise, check if new type or flags are compatible with existing ones.
149 if ((flags ^ isec->flags) & SHF_TLS)
150 error("incompatible section flags for " + name + "\n>>> " +
151 toString(isec) + ": 0x" + utohexstr(isec->flags) +
152 "\n>>> output section " + name + ": 0x" + utohexstr(flags));
155 isec->parent = this;
156 uint64_t andMask =
157 config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
158 uint64_t orMask = ~andMask;
159 uint64_t andFlags = (flags & isec->flags) & andMask;
160 uint64_t orFlags = (flags | isec->flags) & orMask;
161 flags = andFlags | orFlags;
162 if (nonAlloc)
163 flags &= ~(uint64_t)SHF_ALLOC;
165 addralign = std::max(addralign, isec->addralign);
167 // If this section contains a table of fixed-size entries, sh_entsize
168 // holds the element size. If it contains elements of different size we
169 // set sh_entsize to 0.
170 if (entsize != isec->entsize)
171 entsize = 0;
174 static MergeSyntheticSection *createMergeSynthetic(StringRef name,
175 uint32_t type,
176 uint64_t flags,
177 uint32_t addralign) {
178 if ((flags & SHF_STRINGS) && config->optimize >= 2)
179 return make<MergeTailSection>(name, type, flags, addralign);
180 return make<MergeNoTailSection>(name, type, flags, addralign);
183 // This function scans over the InputSectionBase list sectionBases to create
184 // InputSectionDescription::sections.
186 // It removes MergeInputSections from the input section array and adds
187 // new synthetic sections at the location of the first input section
188 // that it replaces. It then finalizes each synthetic section in order
189 // to compute an output offset for each piece of each input section.
190 void OutputSection::finalizeInputSections(LinkerScript *script) {
191 std::vector<MergeSyntheticSection *> mergeSections;
192 for (SectionCommand *cmd : commands) {
193 auto *isd = dyn_cast<InputSectionDescription>(cmd);
194 if (!isd)
195 continue;
196 isd->sections.reserve(isd->sectionBases.size());
197 for (InputSectionBase *s : isd->sectionBases) {
198 MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
199 if (!ms) {
200 isd->sections.push_back(cast<InputSection>(s));
201 continue;
204 // We do not want to handle sections that are not alive, so just remove
205 // them instead of trying to merge.
206 if (!ms->isLive())
207 continue;
209 auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
210 // While we could create a single synthetic section for two different
211 // values of Entsize, it is better to take Entsize into consideration.
213 // With a single synthetic section no two pieces with different Entsize
214 // could be equal, so we may as well have two sections.
216 // Using Entsize in here also allows us to propagate it to the synthetic
217 // section.
219 // SHF_STRINGS section with different alignments should not be merged.
220 return sec->flags == ms->flags && sec->entsize == ms->entsize &&
221 (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
223 if (i == mergeSections.end()) {
224 MergeSyntheticSection *syn =
225 createMergeSynthetic(s->name, ms->type, ms->flags, ms->addralign);
226 mergeSections.push_back(syn);
227 i = std::prev(mergeSections.end());
228 syn->entsize = ms->entsize;
229 isd->sections.push_back(syn);
230 // The merge synthetic section inherits the potential spill locations of
231 // its first contained section.
232 auto it = script->potentialSpillLists.find(ms);
233 if (it != script->potentialSpillLists.end())
234 script->potentialSpillLists.try_emplace(syn, it->second);
236 (*i)->addSection(ms);
239 // sectionBases should not be used from this point onwards. Clear it to
240 // catch misuses.
241 isd->sectionBases.clear();
243 // Some input sections may be removed from the list after ICF.
244 for (InputSection *s : isd->sections)
245 commitSection(s);
247 for (auto *ms : mergeSections)
248 ms->finalizeContents();
251 static void sortByOrder(MutableArrayRef<InputSection *> in,
252 llvm::function_ref<int(InputSectionBase *s)> order) {
253 std::vector<std::pair<int, InputSection *>> v;
254 for (InputSection *s : in)
255 v.emplace_back(order(s), s);
256 llvm::stable_sort(v, less_first());
258 for (size_t i = 0; i < v.size(); ++i)
259 in[i] = v[i].second;
262 uint64_t elf::getHeaderSize() {
263 if (config->oFormatBinary)
264 return 0;
265 return ctx.out.elfHeader->size + ctx.out.programHeaders->size;
268 void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
269 assert(isLive());
270 for (SectionCommand *b : commands)
271 if (auto *isd = dyn_cast<InputSectionDescription>(b))
272 sortByOrder(isd->sections, order);
275 static void nopInstrFill(uint8_t *buf, size_t size) {
276 if (size == 0)
277 return;
278 unsigned i = 0;
279 if (size == 0)
280 return;
281 std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
282 unsigned num = size / nopFiller.back().size();
283 for (unsigned c = 0; c < num; ++c) {
284 memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
285 i += nopFiller.back().size();
287 unsigned remaining = size - i;
288 if (!remaining)
289 return;
290 assert(nopFiller[remaining - 1].size() == remaining);
291 memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
294 // Fill [Buf, Buf + Size) with Filler.
295 // This is used for linker script "=fillexp" command.
296 static void fill(uint8_t *buf, size_t size,
297 const std::array<uint8_t, 4> &filler) {
298 size_t i = 0;
299 for (; i + 4 < size; i += 4)
300 memcpy(buf + i, filler.data(), 4);
301 memcpy(buf + i, filler.data(), size - i);
304 #if LLVM_ENABLE_ZLIB
305 static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
306 int flush) {
307 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
308 // data with no zlib header or trailer.
309 z_stream s = {};
310 auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
311 if (res != 0) {
312 errorOrWarn("--compress-sections: deflateInit2 returned " + Twine(res));
313 return {};
315 s.next_in = const_cast<uint8_t *>(in.data());
316 s.avail_in = in.size();
318 // Allocate a buffer of half of the input size, and grow it by 1.5x if
319 // insufficient.
320 SmallVector<uint8_t, 0> out;
321 size_t pos = 0;
322 out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
323 do {
324 if (pos == out.size())
325 out.resize_for_overwrite(out.size() * 3 / 2);
326 s.next_out = out.data() + pos;
327 s.avail_out = out.size() - pos;
328 (void)deflate(&s, flush);
329 pos = s.next_out - out.data();
330 } while (s.avail_out == 0);
331 assert(s.avail_in == 0);
333 out.truncate(pos);
334 deflateEnd(&s);
335 return out;
337 #endif
339 // Compress certain non-SHF_ALLOC sections:
341 // * (if --compress-debug-sections is specified) non-empty .debug_* sections
342 // * (if --compress-sections is specified) matched sections
343 template <class ELFT> void OutputSection::maybeCompress() {
344 using Elf_Chdr = typename ELFT::Chdr;
345 (void)sizeof(Elf_Chdr);
347 DebugCompressionType ctype = DebugCompressionType::None;
348 size_t compressedSize = sizeof(Elf_Chdr);
349 unsigned level = 0; // default compression level
350 if (!(flags & SHF_ALLOC) && config->compressDebugSections &&
351 name.starts_with(".debug_"))
352 ctype = *config->compressDebugSections;
353 for (auto &[glob, t, l] : config->compressSections)
354 if (glob.match(name))
355 std::tie(ctype, level) = {t, l};
356 if (ctype == DebugCompressionType::None)
357 return;
358 if (flags & SHF_ALLOC) {
359 errorOrWarn("--compress-sections: section '" + name +
360 "' with the SHF_ALLOC flag cannot be compressed");
361 return;
364 llvm::TimeTraceScope timeScope("Compress sections");
365 auto buf = std::make_unique<uint8_t[]>(size);
366 // Write uncompressed data to a temporary zero-initialized buffer.
368 parallel::TaskGroup tg;
369 writeTo<ELFT>(buf.get(), tg);
371 // The generic ABI specifies "The sh_size and sh_addralign fields of the
372 // section header for a compressed section reflect the requirements of the
373 // compressed section." However, 1-byte alignment has been wildly accepted
374 // and utilized for a long time. Removing alignment padding is particularly
375 // useful when there are many compressed output sections.
376 addralign = 1;
378 // Split input into 1-MiB shards.
379 [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
380 auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
381 const size_t numShards = shardsIn.size();
382 auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
384 #if LLVM_ENABLE_ZSTD
385 // Use ZSTD's streaming compression API. See
386 // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
387 // HowTo".
388 if (ctype == DebugCompressionType::Zstd) {
389 parallelFor(0, numShards, [&](size_t i) {
390 SmallVector<uint8_t, 0> out;
391 ZSTD_CCtx *cctx = ZSTD_createCCtx();
392 ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level);
393 ZSTD_inBuffer zib = {shardsIn[i].data(), shardsIn[i].size(), 0};
394 ZSTD_outBuffer zob = {nullptr, 0, 0};
395 size_t size;
396 do {
397 // Allocate a buffer of half of the input size, and grow it by 1.5x if
398 // insufficient.
399 if (zob.pos == zob.size) {
400 out.resize_for_overwrite(
401 zob.size ? zob.size * 3 / 2 : std::max<size_t>(zib.size / 4, 64));
402 zob = {out.data(), out.size(), zob.pos};
404 size = ZSTD_compressStream2(cctx, &zob, &zib, ZSTD_e_end);
405 assert(!ZSTD_isError(size));
406 } while (size != 0);
407 out.truncate(zob.pos);
408 ZSTD_freeCCtx(cctx);
409 shardsOut[i] = std::move(out);
411 compressed.type = ELFCOMPRESS_ZSTD;
412 for (size_t i = 0; i != numShards; ++i)
413 compressedSize += shardsOut[i].size();
415 #endif
417 #if LLVM_ENABLE_ZLIB
418 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
419 // fast and provides decent compression ratios.
420 if (ctype == DebugCompressionType::Zlib) {
421 if (!level)
422 level = Z_BEST_SPEED;
424 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
425 // shards but the last to flush the output to a byte boundary to be
426 // concatenated with the next shard.
427 auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
428 parallelFor(0, numShards, [&](size_t i) {
429 shardsOut[i] = deflateShard(shardsIn[i], level,
430 i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
431 shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
434 // Update section size and combine Alder-32 checksums.
435 uint32_t checksum = 1; // Initial Adler-32 value
436 compressedSize += 2; // Elf_Chdir and zlib header
437 for (size_t i = 0; i != numShards; ++i) {
438 compressedSize += shardsOut[i].size();
439 checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
441 compressedSize += 4; // checksum
442 compressed.type = ELFCOMPRESS_ZLIB;
443 compressed.checksum = checksum;
445 #endif
447 if (compressedSize >= size)
448 return;
449 compressed.uncompressedSize = size;
450 compressed.shards = std::move(shardsOut);
451 compressed.numShards = numShards;
452 size = compressedSize;
453 flags |= SHF_COMPRESSED;
456 static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
457 if (size == 1)
458 *buf = data;
459 else if (size == 2)
460 write16(buf, data);
461 else if (size == 4)
462 write32(buf, data);
463 else if (size == 8)
464 write64(buf, data);
465 else
466 llvm_unreachable("unsupported Size argument");
469 template <class ELFT>
470 void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
471 llvm::TimeTraceScope timeScope("Write sections", name);
472 if (type == SHT_NOBITS)
473 return;
474 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
475 buf += encodeULEB128(crelHeader, buf);
476 memcpy(buf, crelBody.data(), crelBody.size());
477 return;
480 // If the section is compressed due to
481 // --compress-debug-section/--compress-sections, the content is already known.
482 if (compressed.shards) {
483 auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
484 chdr->ch_type = compressed.type;
485 chdr->ch_size = compressed.uncompressedSize;
486 chdr->ch_addralign = addralign;
487 buf += sizeof(*chdr);
489 auto offsets = std::make_unique<size_t[]>(compressed.numShards);
490 if (compressed.type == ELFCOMPRESS_ZLIB) {
491 buf[0] = 0x78; // CMF
492 buf[1] = 0x01; // FLG: best speed
493 offsets[0] = 2; // zlib header
494 write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
497 // Compute shard offsets.
498 for (size_t i = 1; i != compressed.numShards; ++i)
499 offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
500 parallelFor(0, compressed.numShards, [&](size_t i) {
501 memcpy(buf + offsets[i], compressed.shards[i].data(),
502 compressed.shards[i].size());
504 return;
507 // Write leading padding.
508 ArrayRef<InputSection *> sections = getInputSections(*this, storage);
509 std::array<uint8_t, 4> filler = getFiller();
510 bool nonZeroFiller = read32(filler.data()) != 0;
511 if (nonZeroFiller)
512 fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
514 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
515 buf += encodeULEB128(crelHeader, buf);
516 memcpy(buf, crelBody.data(), crelBody.size());
517 return;
520 auto fn = [=](size_t begin, size_t end) {
521 size_t numSections = sections.size();
522 for (size_t i = begin; i != end; ++i) {
523 InputSection *isec = sections[i];
524 if (auto *s = dyn_cast<SyntheticSection>(isec))
525 s->writeTo(buf + isec->outSecOff);
526 else
527 isec->writeTo<ELFT>(buf + isec->outSecOff);
529 // When in Arm BE8 mode, the linker has to convert the big-endian
530 // instructions to little-endian, leaving the data big-endian.
531 if (config->emachine == EM_ARM && !config->isLE && config->armBe8 &&
532 (flags & SHF_EXECINSTR))
533 convertArmInstructionstoBE8(isec, buf + isec->outSecOff);
535 // Fill gaps between sections.
536 if (nonZeroFiller) {
537 uint8_t *start = buf + isec->outSecOff + isec->getSize();
538 uint8_t *end;
539 if (i + 1 == numSections)
540 end = buf + size;
541 else
542 end = buf + sections[i + 1]->outSecOff;
543 if (isec->nopFiller) {
544 assert(ctx.target->nopInstrs);
545 nopInstrFill(start, end - start);
546 } else
547 fill(start, end - start, filler);
552 // If there is any BYTE()-family command (rare), write the section content
553 // first then process BYTE to overwrite the filler content. The write is
554 // serial due to the limitation of llvm/Support/Parallel.h.
555 bool written = false;
556 size_t numSections = sections.size();
557 for (SectionCommand *cmd : commands)
558 if (auto *data = dyn_cast<ByteCommand>(cmd)) {
559 if (!std::exchange(written, true))
560 fn(0, numSections);
561 writeInt(buf + data->offset, data->expression().getValue(), data->size);
563 if (written || !numSections)
564 return;
566 // There is no data command. Write content asynchronously to overlap the write
567 // time with other output sections. Note, if a linker script specifies
568 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
569 // to supress the error), the output may be non-deterministic.
570 const size_t taskSizeLimit = 4 << 20;
571 for (size_t begin = 0, i = 0, taskSize = 0;;) {
572 taskSize += sections[i]->getSize();
573 bool done = ++i == numSections;
574 if (done || taskSize >= taskSizeLimit) {
575 tg.spawn([=] { fn(begin, i); });
576 if (done)
577 break;
578 begin = i;
579 taskSize = 0;
584 static void finalizeShtGroup(OutputSection *os, InputSection *section) {
585 // sh_link field for SHT_GROUP sections should contain the section index of
586 // the symbol table.
587 os->link = in.symTab->getParent()->sectionIndex;
589 if (!section)
590 return;
592 // sh_info then contain index of an entry in symbol table section which
593 // provides signature of the section group.
594 ArrayRef<Symbol *> symbols = section->file->getSymbols();
595 os->info = in.symTab->getSymbolIndex(*symbols[section->info]);
597 // Some group members may be combined or discarded, so we need to compute the
598 // new size. The content will be rewritten in InputSection::copyShtGroup.
599 DenseSet<uint32_t> seen;
600 ArrayRef<InputSectionBase *> sections = section->file->getSections();
601 for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
602 if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
603 seen.insert(osec->sectionIndex);
604 os->size = (1 + seen.size()) * sizeof(uint32_t);
607 template <class uint>
608 LLVM_ATTRIBUTE_ALWAYS_INLINE static void
609 encodeOneCrel(raw_svector_ostream &os, Elf_Crel<sizeof(uint) == 8> &out,
610 uint offset, const Symbol &sym, uint32_t type, uint addend) {
611 const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
612 out.r_offset = offset;
613 int64_t symidx = in.symTab->getSymbolIndex(sym);
614 if (sym.type == STT_SECTION) {
615 auto *d = dyn_cast<Defined>(&sym);
616 if (d) {
617 SectionBase *section = d->section;
618 assert(section->isLive());
619 addend = sym.getVA(addend) - section->getOutputSection()->addr;
620 } else {
621 // Encode R_*_NONE(symidx=0).
622 symidx = type = addend = 0;
626 // Similar to llvm::ELF::encodeCrel.
627 uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
628 (out.r_type != type ? 2 : 0) +
629 (uint(out.r_addend) != addend ? 4 : 0);
630 if (deltaOffset < 0x10) {
631 os << char(b);
632 } else {
633 os << char(b | 0x80);
634 encodeULEB128(deltaOffset >> 4, os);
636 if (b & 1) {
637 encodeSLEB128(static_cast<int32_t>(symidx - out.r_symidx), os);
638 out.r_symidx = symidx;
640 if (b & 2) {
641 encodeSLEB128(static_cast<int32_t>(type - out.r_type), os);
642 out.r_type = type;
644 if (b & 4) {
645 encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
646 out.r_addend = addend;
650 template <class ELFT>
651 static size_t relToCrel(raw_svector_ostream &os, Elf_Crel<ELFT::Is64Bits> &out,
652 InputSection *relSec, InputSectionBase *sec) {
653 const auto &file = *cast<ELFFileBase>(relSec->file);
654 if (relSec->type == SHT_REL) {
655 // REL conversion is complex and unsupported yet.
656 errorOrWarn(toString(relSec) + ": REL cannot be converted to CREL");
657 return 0;
659 auto rels = relSec->getDataAs<typename ELFT::Rela>();
660 for (auto rel : rels) {
661 encodeOneCrel<typename ELFT::uint>(
662 os, out, sec->getVA(rel.r_offset), file.getRelocTargetSym(rel),
663 rel.getType(config->isMips64EL), getAddend<ELFT>(rel));
665 return rels.size();
668 // Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
669 // Input CREL sections are decoded while REL[A] need to be converted.
670 template <bool is64> void OutputSection::finalizeNonAllocCrel() {
671 using uint = typename Elf_Crel_Impl<is64>::uint;
672 raw_svector_ostream os(crelBody);
673 uint64_t totalCount = 0;
674 Elf_Crel<is64> out{};
675 assert(commands.size() == 1);
676 auto *isd = cast<InputSectionDescription>(commands[0]);
677 for (InputSection *relSec : isd->sections) {
678 const auto &file = *cast<ELFFileBase>(relSec->file);
679 InputSectionBase *sec = relSec->getRelocatedSection();
680 if (relSec->type == SHT_CREL) {
681 RelocsCrel<is64> entries(relSec->content_);
682 totalCount += entries.size();
683 for (Elf_Crel_Impl<is64> r : entries) {
684 encodeOneCrel<uint>(os, out, uint(sec->getVA(r.r_offset)),
685 file.getSymbol(r.r_symidx), r.r_type, r.r_addend);
687 continue;
690 // Convert REL[A] to CREL.
691 if constexpr (is64) {
692 totalCount += config->isLE ? relToCrel<ELF64LE>(os, out, relSec, sec)
693 : relToCrel<ELF64BE>(os, out, relSec, sec);
694 } else {
695 totalCount += config->isLE ? relToCrel<ELF32LE>(os, out, relSec, sec)
696 : relToCrel<ELF32BE>(os, out, relSec, sec);
700 crelHeader = totalCount * 8 + 4;
701 size = getULEB128Size(crelHeader) + crelBody.size();
704 void OutputSection::finalize() {
705 InputSection *first = getFirstInputSection(this);
707 if (flags & SHF_LINK_ORDER) {
708 // We must preserve the link order dependency of sections with the
709 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
710 // need to translate the InputSection sh_link to the OutputSection sh_link,
711 // all InputSections in the OutputSection have the same dependency.
712 if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
713 link = ex->getLinkOrderDep()->getParent()->sectionIndex;
714 else if (first->flags & SHF_LINK_ORDER)
715 if (auto *d = first->getLinkOrderDep())
716 link = d->getParent()->sectionIndex;
719 if (type == SHT_GROUP) {
720 finalizeShtGroup(this, first);
721 return;
724 if (!config->copyRelocs || !isStaticRelSecType(type))
725 return;
727 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
728 // Normally 'type' was changed by 'first' so 'first' should be non-null.
729 // However, if the output section is .rela.dyn, 'type' can be set by the empty
730 // synthetic .rela.plt and first can be null.
731 if (!first || isa<SyntheticSection>(first))
732 return;
734 link = in.symTab->getParent()->sectionIndex;
735 // sh_info for SHT_REL[A] sections should contain the section header index of
736 // the section to which the relocation applies.
737 InputSectionBase *s = first->getRelocatedSection();
738 info = s->getOutputSection()->sectionIndex;
739 flags |= SHF_INFO_LINK;
740 // Finalize the content of non-alloc CREL.
741 if (type == SHT_CREL) {
742 if (config->is64)
743 finalizeNonAllocCrel<true>();
744 else
745 finalizeNonAllocCrel<false>();
749 // Returns true if S is in one of the many forms the compiler driver may pass
750 // crtbegin files.
752 // Gcc uses any of crtbegin[<empty>|S|T].o.
753 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
755 static bool isCrt(StringRef s, StringRef beginEnd) {
756 s = sys::path::filename(s);
757 if (!s.consume_back(".o"))
758 return false;
759 if (s.consume_front("clang_rt."))
760 return s.consume_front(beginEnd);
761 return s.consume_front(beginEnd) && s.size() <= 1;
764 // .ctors and .dtors are sorted by this order:
766 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
767 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
768 // 3. The section has an optional priority value in the form of ".ctors.N" or
769 // ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
770 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
772 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
773 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
774 // internal linker scripts, the sorting is by string comparison which can
775 // achieve the same goal given the optional priority values are of the same
776 // length.
778 // In an ideal world, we don't need this function because .init_array and
779 // .ctors are duplicate features (and .init_array is newer.) However, there
780 // are too many real-world use cases of .ctors, so we had no choice to
781 // support that with this rather ad-hoc semantics.
782 static bool compCtors(const InputSection *a, const InputSection *b) {
783 bool beginA = isCrt(a->file->getName(), "crtbegin");
784 bool beginB = isCrt(b->file->getName(), "crtbegin");
785 if (beginA != beginB)
786 return beginA;
787 bool endA = isCrt(a->file->getName(), "crtend");
788 bool endB = isCrt(b->file->getName(), "crtend");
789 if (endA != endB)
790 return endB;
791 return getPriority(a->name) > getPriority(b->name);
794 // Sorts input sections by the special rules for .ctors and .dtors.
795 // Unfortunately, the rules are different from the one for .{init,fini}_array.
796 // Read the comment above.
797 void OutputSection::sortCtorsDtors() {
798 assert(commands.size() == 1);
799 auto *isd = cast<InputSectionDescription>(commands[0]);
800 llvm::stable_sort(isd->sections, compCtors);
803 // If an input string is in the form of "foo.N" where N is a number, return N
804 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
805 // greater than the lowest priority.
806 int elf::getPriority(StringRef s) {
807 size_t pos = s.rfind('.');
808 if (pos == StringRef::npos)
809 return 65536;
810 int v = 65536;
811 if (to_integer(s.substr(pos + 1), v, 10) &&
812 (pos == 6 && (s.starts_with(".ctors") || s.starts_with(".dtors"))))
813 v = 65535 - v;
814 return v;
817 InputSection *elf::getFirstInputSection(const OutputSection *os) {
818 for (SectionCommand *cmd : os->commands)
819 if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
820 if (!isd->sections.empty())
821 return isd->sections[0];
822 return nullptr;
825 ArrayRef<InputSection *>
826 elf::getInputSections(const OutputSection &os,
827 SmallVector<InputSection *, 0> &storage) {
828 ArrayRef<InputSection *> ret;
829 storage.clear();
830 for (SectionCommand *cmd : os.commands) {
831 auto *isd = dyn_cast<InputSectionDescription>(cmd);
832 if (!isd)
833 continue;
834 if (ret.empty()) {
835 ret = isd->sections;
836 } else {
837 if (storage.empty())
838 storage.assign(ret.begin(), ret.end());
839 storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
842 return storage.empty() ? ret : ArrayRef(storage);
845 // Sorts input sections by section name suffixes, so that .foo.N comes
846 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
847 // We want to keep the original order if the priorities are the same
848 // because the compiler keeps the original initialization order in a
849 // translation unit and we need to respect that.
850 // For more detail, read the section of the GCC's manual about init_priority.
851 void OutputSection::sortInitFini() {
852 // Sort sections by priority.
853 sort([](InputSectionBase *s) { return getPriority(s->name); });
856 std::array<uint8_t, 4> OutputSection::getFiller() {
857 if (filler)
858 return *filler;
859 if (flags & SHF_EXECINSTR)
860 return ctx.target->trapInstr;
861 return {0, 0, 0, 0};
864 void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
865 assert(config->writeAddends && config->checkDynamicRelocs);
866 assert(isStaticRelSecType(type));
867 SmallVector<InputSection *, 0> storage;
868 ArrayRef<InputSection *> sections = getInputSections(*this, storage);
869 parallelFor(0, sections.size(), [&](size_t i) {
870 // When linking with -r or --emit-relocs we might also call this function
871 // for input .rel[a].<sec> sections which we simply pass through to the
872 // output. We skip over those and only look at the synthetic relocation
873 // sections created during linking.
874 const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
875 if (!sec)
876 return;
877 for (const DynamicReloc &rel : sec->relocs) {
878 int64_t addend = rel.addend;
879 const OutputSection *relOsec = rel.inputSec->getOutputSection();
880 assert(relOsec != nullptr && "missing output section for relocation");
881 // Some targets have NOBITS synthetic sections with dynamic relocations
882 // with non-zero addends. Skip such sections.
883 if (is_contained({EM_PPC, EM_PPC64}, config->emachine) &&
884 (rel.inputSec == in.ppc64LongBranchTarget.get() ||
885 rel.inputSec == in.igotPlt.get()))
886 continue;
887 const uint8_t *relocTarget =
888 bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
889 // For SHT_NOBITS the written addend is always zero.
890 int64_t writtenAddend =
891 relOsec->type == SHT_NOBITS
893 : ctx.target->getImplicitAddend(relocTarget, rel.type);
894 if (addend != writtenAddend)
895 internalLinkerError(
896 getErrorLocation(relocTarget),
897 "wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
898 " instead of 0x" + utohexstr(addend) +
899 " for dynamic relocation " + toString(rel.type) +
900 " at offset 0x" + utohexstr(rel.getOffset()) +
901 (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
906 template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
907 template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
908 template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
909 template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
911 template void OutputSection::writeTo<ELF32LE>(uint8_t *,
912 llvm::parallel::TaskGroup &);
913 template void OutputSection::writeTo<ELF32BE>(uint8_t *,
914 llvm::parallel::TaskGroup &);
915 template void OutputSection::writeTo<ELF64LE>(uint8_t *,
916 llvm::parallel::TaskGroup &);
917 template void OutputSection::writeTo<ELF64BE>(uint8_t *,
918 llvm::parallel::TaskGroup &);
920 template void OutputSection::maybeCompress<ELF32LE>();
921 template void OutputSection::maybeCompress<ELF32BE>();
922 template void OutputSection::maybeCompress<ELF64LE>();
923 template void OutputSection::maybeCompress<ELF64BE>();