1 //===- OutputSections.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "OutputSections.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
14 #include "SyntheticSections.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/Parallel.h"
22 #include "llvm/Support/Path.h"
23 #include "llvm/Support/TimeProfiler.h"
32 using namespace llvm::dwarf
;
33 using namespace llvm::object
;
34 using namespace llvm::support::endian
;
35 using namespace llvm::ELF
;
37 using namespace lld::elf
;
39 uint8_t *Out::bufferStart
;
40 PhdrEntry
*Out::tlsPhdr
;
41 OutputSection
*Out::elfHeader
;
42 OutputSection
*Out::programHeaders
;
43 OutputSection
*Out::preinitArray
;
44 OutputSection
*Out::initArray
;
45 OutputSection
*Out::finiArray
;
47 SmallVector
<OutputSection
*, 0> elf::outputSections
;
49 uint32_t OutputSection::getPhdrFlags() const {
51 if (config
->emachine
!= EM_ARM
|| !(flags
& SHF_ARM_PURECODE
))
53 if (flags
& SHF_WRITE
)
55 if (flags
& SHF_EXECINSTR
)
61 void OutputSection::writeHeaderTo(typename
ELFT::Shdr
*shdr
) {
62 shdr
->sh_entsize
= entsize
;
63 shdr
->sh_addralign
= addralign
;
65 shdr
->sh_offset
= offset
;
66 shdr
->sh_flags
= flags
;
71 shdr
->sh_name
= shName
;
74 OutputSection::OutputSection(StringRef name
, uint32_t type
, uint64_t flags
)
75 : SectionBase(Output
, name
, flags
, /*Entsize*/ 0, /*Alignment*/ 1, type
,
76 /*Info*/ 0, /*Link*/ 0) {}
78 // We allow sections of types listed below to merged into a
79 // single progbits section. This is typically done by linker
80 // scripts. Merging nobits and progbits will force disk space
81 // to be allocated for nobits sections. Other ones don't require
82 // any special treatment on top of progbits, so there doesn't
83 // seem to be a harm in merging them.
85 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
86 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
87 static bool canMergeToProgbits(unsigned type
) {
88 return type
== SHT_NOBITS
|| type
== SHT_PROGBITS
|| type
== SHT_INIT_ARRAY
||
89 type
== SHT_PREINIT_ARRAY
|| type
== SHT_FINI_ARRAY
||
91 (type
== SHT_X86_64_UNWIND
&& config
->emachine
== EM_X86_64
);
94 // Record that isec will be placed in the OutputSection. isec does not become
95 // permanent until finalizeInputSections() is called. The function should not be
96 // used after finalizeInputSections() is called. If you need to add an
97 // InputSection post finalizeInputSections(), then you must do the following:
99 // 1. Find or create an InputSectionDescription to hold InputSection.
100 // 2. Add the InputSection to the InputSectionDescription::sections.
101 // 3. Call commitSection(isec).
102 void OutputSection::recordSection(InputSectionBase
*isec
) {
103 partition
= isec
->partition
;
105 if (commands
.empty() || !isa
<InputSectionDescription
>(commands
.back()))
106 commands
.push_back(make
<InputSectionDescription
>(""));
107 auto *isd
= cast
<InputSectionDescription
>(commands
.back());
108 isd
->sectionBases
.push_back(isec
);
111 // Update fields (type, flags, alignment, etc) according to the InputSection
112 // isec. Also check whether the InputSection flags and type are consistent with
113 // other InputSections.
114 void OutputSection::commitSection(InputSection
*isec
) {
115 if (LLVM_UNLIKELY(type
!= isec
->type
)) {
116 if (hasInputSections
|| typeIsSet
) {
117 if (typeIsSet
|| !canMergeToProgbits(type
) ||
118 !canMergeToProgbits(isec
->type
)) {
119 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
120 // that the contents at that address is provided by some other means.
121 // Some projects (e.g.
122 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
123 // behavior. Other types get an error.
124 if (type
!= SHT_NOBITS
) {
125 errorOrWarn("section type mismatch for " + isec
->name
+ "\n>>> " +
126 toString(isec
) + ": " +
127 getELFSectionTypeName(config
->emachine
, isec
->type
) +
128 "\n>>> output section " + name
+ ": " +
129 getELFSectionTypeName(config
->emachine
, type
));
138 if (!hasInputSections
) {
139 // If IS is the first section to be added to this section,
140 // initialize type, entsize and flags from isec.
141 hasInputSections
= true;
142 entsize
= isec
->entsize
;
145 // Otherwise, check if new type or flags are compatible with existing ones.
146 if ((flags
^ isec
->flags
) & SHF_TLS
)
147 error("incompatible section flags for " + name
+ "\n>>> " +
148 toString(isec
) + ": 0x" + utohexstr(isec
->flags
) +
149 "\n>>> output section " + name
+ ": 0x" + utohexstr(flags
));
154 config
->emachine
== EM_ARM
? (uint64_t)SHF_ARM_PURECODE
: 0;
155 uint64_t orMask
= ~andMask
;
156 uint64_t andFlags
= (flags
& isec
->flags
) & andMask
;
157 uint64_t orFlags
= (flags
| isec
->flags
) & orMask
;
158 flags
= andFlags
| orFlags
;
160 flags
&= ~(uint64_t)SHF_ALLOC
;
162 addralign
= std::max(addralign
, isec
->addralign
);
164 // If this section contains a table of fixed-size entries, sh_entsize
165 // holds the element size. If it contains elements of different size we
166 // set sh_entsize to 0.
167 if (entsize
!= isec
->entsize
)
171 static MergeSyntheticSection
*createMergeSynthetic(StringRef name
,
174 uint32_t addralign
) {
175 if ((flags
& SHF_STRINGS
) && config
->optimize
>= 2)
176 return make
<MergeTailSection
>(name
, type
, flags
, addralign
);
177 return make
<MergeNoTailSection
>(name
, type
, flags
, addralign
);
180 // This function scans over the InputSectionBase list sectionBases to create
181 // InputSectionDescription::sections.
183 // It removes MergeInputSections from the input section array and adds
184 // new synthetic sections at the location of the first input section
185 // that it replaces. It then finalizes each synthetic section in order
186 // to compute an output offset for each piece of each input section.
187 void OutputSection::finalizeInputSections() {
188 std::vector
<MergeSyntheticSection
*> mergeSections
;
189 for (SectionCommand
*cmd
: commands
) {
190 auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
);
193 isd
->sections
.reserve(isd
->sectionBases
.size());
194 for (InputSectionBase
*s
: isd
->sectionBases
) {
195 MergeInputSection
*ms
= dyn_cast
<MergeInputSection
>(s
);
197 isd
->sections
.push_back(cast
<InputSection
>(s
));
201 // We do not want to handle sections that are not alive, so just remove
202 // them instead of trying to merge.
206 auto i
= llvm::find_if(mergeSections
, [=](MergeSyntheticSection
*sec
) {
207 // While we could create a single synthetic section for two different
208 // values of Entsize, it is better to take Entsize into consideration.
210 // With a single synthetic section no two pieces with different Entsize
211 // could be equal, so we may as well have two sections.
213 // Using Entsize in here also allows us to propagate it to the synthetic
216 // SHF_STRINGS section with different alignments should not be merged.
217 return sec
->flags
== ms
->flags
&& sec
->entsize
== ms
->entsize
&&
218 (sec
->addralign
== ms
->addralign
|| !(sec
->flags
& SHF_STRINGS
));
220 if (i
== mergeSections
.end()) {
221 MergeSyntheticSection
*syn
=
222 createMergeSynthetic(s
->name
, ms
->type
, ms
->flags
, ms
->addralign
);
223 mergeSections
.push_back(syn
);
224 i
= std::prev(mergeSections
.end());
225 syn
->entsize
= ms
->entsize
;
226 isd
->sections
.push_back(syn
);
228 (*i
)->addSection(ms
);
231 // sectionBases should not be used from this point onwards. Clear it to
233 isd
->sectionBases
.clear();
235 // Some input sections may be removed from the list after ICF.
236 for (InputSection
*s
: isd
->sections
)
239 for (auto *ms
: mergeSections
)
240 ms
->finalizeContents();
243 static void sortByOrder(MutableArrayRef
<InputSection
*> in
,
244 llvm::function_ref
<int(InputSectionBase
*s
)> order
) {
245 std::vector
<std::pair
<int, InputSection
*>> v
;
246 for (InputSection
*s
: in
)
247 v
.emplace_back(order(s
), s
);
248 llvm::stable_sort(v
, less_first());
250 for (size_t i
= 0; i
< v
.size(); ++i
)
254 uint64_t elf::getHeaderSize() {
255 if (config
->oFormatBinary
)
257 return Out::elfHeader
->size
+ Out::programHeaders
->size
;
260 void OutputSection::sort(llvm::function_ref
<int(InputSectionBase
*s
)> order
) {
262 for (SectionCommand
*b
: commands
)
263 if (auto *isd
= dyn_cast
<InputSectionDescription
>(b
))
264 sortByOrder(isd
->sections
, order
);
267 static void nopInstrFill(uint8_t *buf
, size_t size
) {
273 std::vector
<std::vector
<uint8_t>> nopFiller
= *target
->nopInstrs
;
274 unsigned num
= size
/ nopFiller
.back().size();
275 for (unsigned c
= 0; c
< num
; ++c
) {
276 memcpy(buf
+ i
, nopFiller
.back().data(), nopFiller
.back().size());
277 i
+= nopFiller
.back().size();
279 unsigned remaining
= size
- i
;
282 assert(nopFiller
[remaining
- 1].size() == remaining
);
283 memcpy(buf
+ i
, nopFiller
[remaining
- 1].data(), remaining
);
286 // Fill [Buf, Buf + Size) with Filler.
287 // This is used for linker script "=fillexp" command.
288 static void fill(uint8_t *buf
, size_t size
,
289 const std::array
<uint8_t, 4> &filler
) {
291 for (; i
+ 4 < size
; i
+= 4)
292 memcpy(buf
+ i
, filler
.data(), 4);
293 memcpy(buf
+ i
, filler
.data(), size
- i
);
297 static SmallVector
<uint8_t, 0> deflateShard(ArrayRef
<uint8_t> in
, int level
,
299 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
300 // data with no zlib header or trailer.
302 deflateInit2(&s
, level
, Z_DEFLATED
, -15, 8, Z_DEFAULT_STRATEGY
);
303 s
.next_in
= const_cast<uint8_t *>(in
.data());
304 s
.avail_in
= in
.size();
306 // Allocate a buffer of half of the input size, and grow it by 1.5x if
308 SmallVector
<uint8_t, 0> out
;
310 out
.resize_for_overwrite(std::max
<size_t>(in
.size() / 2, 64));
312 if (pos
== out
.size())
313 out
.resize_for_overwrite(out
.size() * 3 / 2);
314 s
.next_out
= out
.data() + pos
;
315 s
.avail_out
= out
.size() - pos
;
316 (void)deflate(&s
, flush
);
317 pos
= s
.next_out
- out
.data();
318 } while (s
.avail_out
== 0);
319 assert(s
.avail_in
== 0);
327 // Compress section contents if this section contains debug info.
328 template <class ELFT
> void OutputSection::maybeCompress() {
329 using Elf_Chdr
= typename
ELFT::Chdr
;
330 (void)sizeof(Elf_Chdr
);
332 // Compress only DWARF debug sections.
333 if (config
->compressDebugSections
== DebugCompressionType::None
||
334 (flags
& SHF_ALLOC
) || !name
.starts_with(".debug_") || size
== 0)
337 llvm::TimeTraceScope
timeScope("Compress debug sections");
338 compressed
.uncompressedSize
= size
;
339 auto buf
= std::make_unique
<uint8_t[]>(size
);
340 // Write uncompressed data to a temporary zero-initialized buffer.
342 parallel::TaskGroup tg
;
343 writeTo
<ELFT
>(buf
.get(), tg
);
347 // Use ZSTD's streaming compression API which permits parallel workers working
348 // on the stream. See http://facebook.github.io/zstd/zstd_manual.html
349 // "Streaming compression - HowTo".
350 if (config
->compressDebugSections
== DebugCompressionType::Zstd
) {
351 // Allocate a buffer of half of the input size, and grow it by 1.5x if
353 compressed
.shards
= std::make_unique
<SmallVector
<uint8_t, 0>[]>(1);
354 SmallVector
<uint8_t, 0> &out
= compressed
.shards
[0];
355 out
.resize_for_overwrite(std::max
<size_t>(size
/ 2, 32));
358 ZSTD_CCtx
*cctx
= ZSTD_createCCtx();
359 // Ignore error if zstd was not built with ZSTD_MULTITHREAD.
360 (void)ZSTD_CCtx_setParameter(cctx
, ZSTD_c_nbWorkers
,
361 parallel::strategy
.compute_thread_count());
362 ZSTD_outBuffer zob
= {out
.data(), out
.size(), 0};
363 ZSTD_EndDirective directive
= ZSTD_e_continue
;
364 const size_t blockSize
= ZSTD_CStreamInSize();
366 const size_t n
= std::min(static_cast<size_t>(size
- pos
), blockSize
);
368 directive
= ZSTD_e_end
;
369 ZSTD_inBuffer zib
= {buf
.get() + pos
, n
, 0};
370 size_t bytesRemaining
= 0;
371 while (zib
.pos
!= zib
.size
||
372 (directive
== ZSTD_e_end
&& bytesRemaining
!= 0)) {
373 if (zob
.pos
== zob
.size
) {
374 out
.resize_for_overwrite(out
.size() * 3 / 2);
375 zob
.dst
= out
.data();
376 zob
.size
= out
.size();
378 bytesRemaining
= ZSTD_compressStream2(cctx
, &zob
, &zib
, directive
);
379 assert(!ZSTD_isError(bytesRemaining
));
382 } while (directive
!= ZSTD_e_end
);
386 size
= sizeof(Elf_Chdr
) + out
.size();
387 flags
|= SHF_COMPRESSED
;
393 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
394 // the fastest. If -O2 is given, we use level 6 to compress debug info more by
395 // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more
396 // compression) while they take significant amount of time (~2x), so level 6
398 const int level
= config
->optimize
>= 2 ? 6 : Z_BEST_SPEED
;
400 // Split input into 1-MiB shards.
401 constexpr size_t shardSize
= 1 << 20;
402 auto shardsIn
= split(ArrayRef
<uint8_t>(buf
.get(), size
), shardSize
);
403 const size_t numShards
= shardsIn
.size();
405 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
406 // shards but the last to flush the output to a byte boundary to be
407 // concatenated with the next shard.
408 auto shardsOut
= std::make_unique
<SmallVector
<uint8_t, 0>[]>(numShards
);
409 auto shardsAdler
= std::make_unique
<uint32_t[]>(numShards
);
410 parallelFor(0, numShards
, [&](size_t i
) {
411 shardsOut
[i
] = deflateShard(shardsIn
[i
], level
,
412 i
!= numShards
- 1 ? Z_SYNC_FLUSH
: Z_FINISH
);
413 shardsAdler
[i
] = adler32(1, shardsIn
[i
].data(), shardsIn
[i
].size());
416 // Update section size and combine Alder-32 checksums.
417 uint32_t checksum
= 1; // Initial Adler-32 value
418 size
= sizeof(Elf_Chdr
) + 2; // Elf_Chdir and zlib header
419 for (size_t i
= 0; i
!= numShards
; ++i
) {
420 size
+= shardsOut
[i
].size();
421 checksum
= adler32_combine(checksum
, shardsAdler
[i
], shardsIn
[i
].size());
423 size
+= 4; // checksum
425 compressed
.shards
= std::move(shardsOut
);
426 compressed
.numShards
= numShards
;
427 compressed
.checksum
= checksum
;
428 flags
|= SHF_COMPRESSED
;
432 static void writeInt(uint8_t *buf
, uint64_t data
, uint64_t size
) {
442 llvm_unreachable("unsupported Size argument");
445 template <class ELFT
>
446 void OutputSection::writeTo(uint8_t *buf
, parallel::TaskGroup
&tg
) {
447 llvm::TimeTraceScope
timeScope("Write sections", name
);
448 if (type
== SHT_NOBITS
)
451 // If --compress-debug-section is specified and if this is a debug section,
452 // we've already compressed section contents. If that's the case,
453 // just write it down.
454 if (compressed
.shards
) {
455 auto *chdr
= reinterpret_cast<typename
ELFT::Chdr
*>(buf
);
456 chdr
->ch_size
= compressed
.uncompressedSize
;
457 chdr
->ch_addralign
= addralign
;
458 buf
+= sizeof(*chdr
);
459 if (config
->compressDebugSections
== DebugCompressionType::Zstd
) {
460 chdr
->ch_type
= ELFCOMPRESS_ZSTD
;
461 memcpy(buf
, compressed
.shards
[0].data(), compressed
.shards
[0].size());
464 chdr
->ch_type
= ELFCOMPRESS_ZLIB
;
466 // Compute shard offsets.
467 auto offsets
= std::make_unique
<size_t[]>(compressed
.numShards
);
468 offsets
[0] = 2; // zlib header
469 for (size_t i
= 1; i
!= compressed
.numShards
; ++i
)
470 offsets
[i
] = offsets
[i
- 1] + compressed
.shards
[i
- 1].size();
472 buf
[0] = 0x78; // CMF
473 buf
[1] = 0x01; // FLG: best speed
474 parallelFor(0, compressed
.numShards
, [&](size_t i
) {
475 memcpy(buf
+ offsets
[i
], compressed
.shards
[i
].data(),
476 compressed
.shards
[i
].size());
479 write32be(buf
+ (size
- sizeof(*chdr
) - 4), compressed
.checksum
);
483 // Write leading padding.
484 ArrayRef
<InputSection
*> sections
= getInputSections(*this, storage
);
485 std::array
<uint8_t, 4> filler
= getFiller();
486 bool nonZeroFiller
= read32(filler
.data()) != 0;
488 fill(buf
, sections
.empty() ? size
: sections
[0]->outSecOff
, filler
);
490 auto fn
= [=](size_t begin
, size_t end
) {
491 size_t numSections
= sections
.size();
492 for (size_t i
= begin
; i
!= end
; ++i
) {
493 InputSection
*isec
= sections
[i
];
494 if (auto *s
= dyn_cast
<SyntheticSection
>(isec
))
495 s
->writeTo(buf
+ isec
->outSecOff
);
497 isec
->writeTo
<ELFT
>(buf
+ isec
->outSecOff
);
499 // When in Arm BE8 mode, the linker has to convert the big-endian
500 // instructions to little-endian, leaving the data big-endian.
501 if (config
->emachine
== EM_ARM
&& !config
->isLE
&& config
->armBe8
&&
502 (flags
& SHF_EXECINSTR
))
503 convertArmInstructionstoBE8(isec
, buf
+ isec
->outSecOff
);
505 // Fill gaps between sections.
507 uint8_t *start
= buf
+ isec
->outSecOff
+ isec
->getSize();
509 if (i
+ 1 == numSections
)
512 end
= buf
+ sections
[i
+ 1]->outSecOff
;
513 if (isec
->nopFiller
) {
514 assert(target
->nopInstrs
);
515 nopInstrFill(start
, end
- start
);
517 fill(start
, end
- start
, filler
);
522 // If there is any BYTE()-family command (rare), write the section content
523 // first then process BYTE to overwrite the filler content. The write is
524 // serial due to the limitation of llvm/Support/Parallel.h.
525 bool written
= false;
526 size_t numSections
= sections
.size();
527 for (SectionCommand
*cmd
: commands
)
528 if (auto *data
= dyn_cast
<ByteCommand
>(cmd
)) {
529 if (!std::exchange(written
, true))
531 writeInt(buf
+ data
->offset
, data
->expression().getValue(), data
->size
);
533 if (written
|| !numSections
)
536 // There is no data command. Write content asynchronously to overlap the write
537 // time with other output sections. Note, if a linker script specifies
538 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
539 // to supress the error), the output may be non-deterministic.
540 const size_t taskSizeLimit
= 4 << 20;
541 for (size_t begin
= 0, i
= 0, taskSize
= 0;;) {
542 taskSize
+= sections
[i
]->getSize();
543 bool done
= ++i
== numSections
;
544 if (done
|| taskSize
>= taskSizeLimit
) {
545 tg
.spawn([=] { fn(begin
, i
); });
554 static void finalizeShtGroup(OutputSection
*os
, InputSection
*section
) {
555 // sh_link field for SHT_GROUP sections should contain the section index of
557 os
->link
= in
.symTab
->getParent()->sectionIndex
;
562 // sh_info then contain index of an entry in symbol table section which
563 // provides signature of the section group.
564 ArrayRef
<Symbol
*> symbols
= section
->file
->getSymbols();
565 os
->info
= in
.symTab
->getSymbolIndex(symbols
[section
->info
]);
567 // Some group members may be combined or discarded, so we need to compute the
568 // new size. The content will be rewritten in InputSection::copyShtGroup.
569 DenseSet
<uint32_t> seen
;
570 ArrayRef
<InputSectionBase
*> sections
= section
->file
->getSections();
571 for (const uint32_t &idx
: section
->getDataAs
<uint32_t>().slice(1))
572 if (OutputSection
*osec
= sections
[read32(&idx
)]->getOutputSection())
573 seen
.insert(osec
->sectionIndex
);
574 os
->size
= (1 + seen
.size()) * sizeof(uint32_t);
577 void OutputSection::finalize() {
578 InputSection
*first
= getFirstInputSection(this);
580 if (flags
& SHF_LINK_ORDER
) {
581 // We must preserve the link order dependency of sections with the
582 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
583 // need to translate the InputSection sh_link to the OutputSection sh_link,
584 // all InputSections in the OutputSection have the same dependency.
585 if (auto *ex
= dyn_cast
<ARMExidxSyntheticSection
>(first
))
586 link
= ex
->getLinkOrderDep()->getParent()->sectionIndex
;
587 else if (first
->flags
& SHF_LINK_ORDER
)
588 if (auto *d
= first
->getLinkOrderDep())
589 link
= d
->getParent()->sectionIndex
;
592 if (type
== SHT_GROUP
) {
593 finalizeShtGroup(this, first
);
597 if (!config
->copyRelocs
|| (type
!= SHT_RELA
&& type
!= SHT_REL
))
600 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
601 // Normally 'type' was changed by 'first' so 'first' should be non-null.
602 // However, if the output section is .rela.dyn, 'type' can be set by the empty
603 // synthetic .rela.plt and first can be null.
604 if (!first
|| isa
<SyntheticSection
>(first
))
607 link
= in
.symTab
->getParent()->sectionIndex
;
608 // sh_info for SHT_REL[A] sections should contain the section header index of
609 // the section to which the relocation applies.
610 InputSectionBase
*s
= first
->getRelocatedSection();
611 info
= s
->getOutputSection()->sectionIndex
;
612 flags
|= SHF_INFO_LINK
;
615 // Returns true if S is in one of the many forms the compiler driver may pass
618 // Gcc uses any of crtbegin[<empty>|S|T].o.
619 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
621 static bool isCrt(StringRef s
, StringRef beginEnd
) {
622 s
= sys::path::filename(s
);
623 if (!s
.consume_back(".o"))
625 if (s
.consume_front("clang_rt."))
626 return s
.consume_front(beginEnd
);
627 return s
.consume_front(beginEnd
) && s
.size() <= 1;
630 // .ctors and .dtors are sorted by this order:
632 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
633 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
634 // 3. The section has an optional priority value in the form of ".ctors.N" or
635 // ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
636 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
638 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
639 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
640 // internal linker scripts, the sorting is by string comparison which can
641 // achieve the same goal given the optional priority values are of the same
644 // In an ideal world, we don't need this function because .init_array and
645 // .ctors are duplicate features (and .init_array is newer.) However, there
646 // are too many real-world use cases of .ctors, so we had no choice to
647 // support that with this rather ad-hoc semantics.
648 static bool compCtors(const InputSection
*a
, const InputSection
*b
) {
649 bool beginA
= isCrt(a
->file
->getName(), "crtbegin");
650 bool beginB
= isCrt(b
->file
->getName(), "crtbegin");
651 if (beginA
!= beginB
)
653 bool endA
= isCrt(a
->file
->getName(), "crtend");
654 bool endB
= isCrt(b
->file
->getName(), "crtend");
657 return getPriority(a
->name
) > getPriority(b
->name
);
660 // Sorts input sections by the special rules for .ctors and .dtors.
661 // Unfortunately, the rules are different from the one for .{init,fini}_array.
662 // Read the comment above.
663 void OutputSection::sortCtorsDtors() {
664 assert(commands
.size() == 1);
665 auto *isd
= cast
<InputSectionDescription
>(commands
[0]);
666 llvm::stable_sort(isd
->sections
, compCtors
);
669 // If an input string is in the form of "foo.N" where N is a number, return N
670 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
671 // greater than the lowest priority.
672 int elf::getPriority(StringRef s
) {
673 size_t pos
= s
.rfind('.');
674 if (pos
== StringRef::npos
)
677 if (to_integer(s
.substr(pos
+ 1), v
, 10) &&
678 (pos
== 6 && (s
.starts_with(".ctors") || s
.starts_with(".dtors"))))
683 InputSection
*elf::getFirstInputSection(const OutputSection
*os
) {
684 for (SectionCommand
*cmd
: os
->commands
)
685 if (auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
))
686 if (!isd
->sections
.empty())
687 return isd
->sections
[0];
691 ArrayRef
<InputSection
*>
692 elf::getInputSections(const OutputSection
&os
,
693 SmallVector
<InputSection
*, 0> &storage
) {
694 ArrayRef
<InputSection
*> ret
;
696 for (SectionCommand
*cmd
: os
.commands
) {
697 auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
);
704 storage
.assign(ret
.begin(), ret
.end());
705 storage
.insert(storage
.end(), isd
->sections
.begin(), isd
->sections
.end());
708 return storage
.empty() ? ret
: ArrayRef(storage
);
711 // Sorts input sections by section name suffixes, so that .foo.N comes
712 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
713 // We want to keep the original order if the priorities are the same
714 // because the compiler keeps the original initialization order in a
715 // translation unit and we need to respect that.
716 // For more detail, read the section of the GCC's manual about init_priority.
717 void OutputSection::sortInitFini() {
718 // Sort sections by priority.
719 sort([](InputSectionBase
*s
) { return getPriority(s
->name
); });
722 std::array
<uint8_t, 4> OutputSection::getFiller() {
725 if (flags
& SHF_EXECINSTR
)
726 return target
->trapInstr
;
730 void OutputSection::checkDynRelAddends(const uint8_t *bufStart
) {
731 assert(config
->writeAddends
&& config
->checkDynamicRelocs
);
732 assert(type
== SHT_REL
|| type
== SHT_RELA
);
733 SmallVector
<InputSection
*, 0> storage
;
734 ArrayRef
<InputSection
*> sections
= getInputSections(*this, storage
);
735 parallelFor(0, sections
.size(), [&](size_t i
) {
736 // When linking with -r or --emit-relocs we might also call this function
737 // for input .rel[a].<sec> sections which we simply pass through to the
738 // output. We skip over those and only look at the synthetic relocation
739 // sections created during linking.
740 const auto *sec
= dyn_cast
<RelocationBaseSection
>(sections
[i
]);
743 for (const DynamicReloc
&rel
: sec
->relocs
) {
744 int64_t addend
= rel
.addend
;
745 const OutputSection
*relOsec
= rel
.inputSec
->getOutputSection();
746 assert(relOsec
!= nullptr && "missing output section for relocation");
747 // Some targets have NOBITS synthetic sections with dynamic relocations
748 // with non-zero addends. Skip such sections.
749 if (is_contained({EM_PPC
, EM_PPC64
}, config
->emachine
) &&
750 (rel
.inputSec
== in
.ppc64LongBranchTarget
.get() ||
751 rel
.inputSec
== in
.igotPlt
.get()))
753 const uint8_t *relocTarget
=
754 bufStart
+ relOsec
->offset
+ rel
.inputSec
->getOffset(rel
.offsetInSec
);
755 // For SHT_NOBITS the written addend is always zero.
756 int64_t writtenAddend
=
757 relOsec
->type
== SHT_NOBITS
759 : target
->getImplicitAddend(relocTarget
, rel
.type
);
760 if (addend
!= writtenAddend
)
762 getErrorLocation(relocTarget
),
763 "wrote incorrect addend value 0x" + utohexstr(writtenAddend
) +
764 " instead of 0x" + utohexstr(addend
) +
765 " for dynamic relocation " + toString(rel
.type
) +
766 " at offset 0x" + utohexstr(rel
.getOffset()) +
767 (rel
.sym
? " against symbol " + toString(*rel
.sym
) : ""));
772 template void OutputSection::writeHeaderTo
<ELF32LE
>(ELF32LE::Shdr
*Shdr
);
773 template void OutputSection::writeHeaderTo
<ELF32BE
>(ELF32BE::Shdr
*Shdr
);
774 template void OutputSection::writeHeaderTo
<ELF64LE
>(ELF64LE::Shdr
*Shdr
);
775 template void OutputSection::writeHeaderTo
<ELF64BE
>(ELF64BE::Shdr
*Shdr
);
777 template void OutputSection::writeTo
<ELF32LE
>(uint8_t *,
778 llvm::parallel::TaskGroup
&);
779 template void OutputSection::writeTo
<ELF32BE
>(uint8_t *,
780 llvm::parallel::TaskGroup
&);
781 template void OutputSection::writeTo
<ELF64LE
>(uint8_t *,
782 llvm::parallel::TaskGroup
&);
783 template void OutputSection::writeTo
<ELF64BE
>(uint8_t *,
784 llvm::parallel::TaskGroup
&);
786 template void OutputSection::maybeCompress
<ELF32LE
>();
787 template void OutputSection::maybeCompress
<ELF32BE
>();
788 template void OutputSection::maybeCompress
<ELF64LE
>();
789 template void OutputSection::maybeCompress
<ELF64BE
>();