1 //===- OutputSections.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "OutputSections.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
14 #include "SyntheticSections.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB, LLVM_ENABLE_ZSTD
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/LEB128.h"
22 #include "llvm/Support/Parallel.h"
23 #include "llvm/Support/Path.h"
24 #include "llvm/Support/TimeProfiler.h"
27 // Avoid introducing max as a macro from Windows headers.
36 using namespace llvm::dwarf
;
37 using namespace llvm::object
;
38 using namespace llvm::support::endian
;
39 using namespace llvm::ELF
;
41 using namespace lld::elf
;
43 uint32_t OutputSection::getPhdrFlags() const {
45 if (ctx
.arg
.emachine
!= EM_ARM
|| !(flags
& SHF_ARM_PURECODE
))
47 if (flags
& SHF_WRITE
)
49 if (flags
& SHF_EXECINSTR
)
55 void OutputSection::writeHeaderTo(typename
ELFT::Shdr
*shdr
) {
56 shdr
->sh_entsize
= entsize
;
57 shdr
->sh_addralign
= addralign
;
59 shdr
->sh_offset
= offset
;
60 shdr
->sh_flags
= flags
;
65 shdr
->sh_name
= shName
;
68 OutputSection::OutputSection(Ctx
&ctx
, StringRef name
, uint32_t type
,
70 : SectionBase(Output
, ctx
.internalFile
, name
, flags
, /*entsize=*/0,
71 /*addralign=*/1, type
,
72 /*info=*/0, /*link=*/0),
75 uint64_t OutputSection::getLMA() const {
76 return ptLoad
? addr
+ ptLoad
->lmaOffset
: addr
;
79 // We allow sections of types listed below to merged into a
80 // single progbits section. This is typically done by linker
81 // scripts. Merging nobits and progbits will force disk space
82 // to be allocated for nobits sections. Other ones don't require
83 // any special treatment on top of progbits, so there doesn't
84 // seem to be a harm in merging them.
86 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
87 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
88 static bool canMergeToProgbits(Ctx
&ctx
, unsigned type
) {
89 return type
== SHT_NOBITS
|| type
== SHT_PROGBITS
|| type
== SHT_INIT_ARRAY
||
90 type
== SHT_PREINIT_ARRAY
|| type
== SHT_FINI_ARRAY
||
92 (type
== SHT_X86_64_UNWIND
&& ctx
.arg
.emachine
== EM_X86_64
);
95 // Record that isec will be placed in the OutputSection. isec does not become
96 // permanent until finalizeInputSections() is called. The function should not be
97 // used after finalizeInputSections() is called. If you need to add an
98 // InputSection post finalizeInputSections(), then you must do the following:
100 // 1. Find or create an InputSectionDescription to hold InputSection.
101 // 2. Add the InputSection to the InputSectionDescription::sections.
102 // 3. Call commitSection(isec).
103 void OutputSection::recordSection(InputSectionBase
*isec
) {
104 partition
= isec
->partition
;
106 if (commands
.empty() || !isa
<InputSectionDescription
>(commands
.back()))
107 commands
.push_back(make
<InputSectionDescription
>(""));
108 auto *isd
= cast
<InputSectionDescription
>(commands
.back());
109 isd
->sectionBases
.push_back(isec
);
112 // Update fields (type, flags, alignment, etc) according to the InputSection
113 // isec. Also check whether the InputSection flags and type are consistent with
114 // other InputSections.
115 void OutputSection::commitSection(InputSection
*isec
) {
116 if (LLVM_UNLIKELY(type
!= isec
->type
)) {
117 if (!hasInputSections
&& !typeIsSet
) {
119 } else if (isStaticRelSecType(type
) && isStaticRelSecType(isec
->type
) &&
120 (type
== SHT_CREL
) != (isec
->type
== SHT_CREL
)) {
121 // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
123 if (type
== SHT_REL
) {
124 if (name
.consume_front(".rel"))
125 name
= ctx
.saver
.save(".crel" + name
);
126 } else if (name
.consume_front(".rela")) {
127 name
= ctx
.saver
.save(".crel" + name
);
130 if (typeIsSet
|| !canMergeToProgbits(ctx
, type
) ||
131 !canMergeToProgbits(ctx
, isec
->type
)) {
132 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
133 // that the contents at that address is provided by some other means.
134 // Some projects (e.g.
135 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
136 // behavior. Other types get an error.
137 if (type
!= SHT_NOBITS
) {
138 Err(ctx
) << "section type mismatch for " << isec
->name
<< "\n>>> "
140 << getELFSectionTypeName(ctx
.arg
.emachine
, isec
->type
)
141 << "\n>>> output section " << name
<< ": "
142 << getELFSectionTypeName(ctx
.arg
.emachine
, type
);
149 if (!hasInputSections
) {
150 // If IS is the first section to be added to this section,
151 // initialize type, entsize and flags from isec.
152 hasInputSections
= true;
153 entsize
= isec
->entsize
;
156 // Otherwise, check if new type or flags are compatible with existing ones.
157 if ((flags
^ isec
->flags
) & SHF_TLS
)
158 ErrAlways(ctx
) << "incompatible section flags for " << name
<< "\n>>> "
159 << isec
<< ": 0x" << utohexstr(isec
->flags
)
160 << "\n>>> output section " << name
<< ": 0x"
166 ctx
.arg
.emachine
== EM_ARM
? (uint64_t)SHF_ARM_PURECODE
: 0;
167 uint64_t orMask
= ~andMask
;
168 uint64_t andFlags
= (flags
& isec
->flags
) & andMask
;
169 uint64_t orFlags
= (flags
| isec
->flags
) & orMask
;
170 flags
= andFlags
| orFlags
;
172 flags
&= ~(uint64_t)SHF_ALLOC
;
174 addralign
= std::max(addralign
, isec
->addralign
);
176 // If this section contains a table of fixed-size entries, sh_entsize
177 // holds the element size. If it contains elements of different size we
178 // set sh_entsize to 0.
179 if (entsize
!= isec
->entsize
)
183 static MergeSyntheticSection
*createMergeSynthetic(Ctx
&ctx
, StringRef name
,
186 uint32_t addralign
) {
187 if ((flags
& SHF_STRINGS
) && ctx
.arg
.optimize
>= 2)
188 return make
<MergeTailSection
>(ctx
, name
, type
, flags
, addralign
);
189 return make
<MergeNoTailSection
>(ctx
, name
, type
, flags
, addralign
);
192 // This function scans over the InputSectionBase list sectionBases to create
193 // InputSectionDescription::sections.
195 // It removes MergeInputSections from the input section array and adds
196 // new synthetic sections at the location of the first input section
197 // that it replaces. It then finalizes each synthetic section in order
198 // to compute an output offset for each piece of each input section.
199 void OutputSection::finalizeInputSections() {
200 auto *script
= ctx
.script
;
201 std::vector
<MergeSyntheticSection
*> mergeSections
;
202 for (SectionCommand
*cmd
: commands
) {
203 auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
);
206 isd
->sections
.reserve(isd
->sectionBases
.size());
207 for (InputSectionBase
*s
: isd
->sectionBases
) {
208 MergeInputSection
*ms
= dyn_cast
<MergeInputSection
>(s
);
210 isd
->sections
.push_back(cast
<InputSection
>(s
));
214 // We do not want to handle sections that are not alive, so just remove
215 // them instead of trying to merge.
219 auto i
= llvm::find_if(mergeSections
, [=](MergeSyntheticSection
*sec
) {
220 // While we could create a single synthetic section for two different
221 // values of Entsize, it is better to take Entsize into consideration.
223 // With a single synthetic section no two pieces with different Entsize
224 // could be equal, so we may as well have two sections.
226 // Using Entsize in here also allows us to propagate it to the synthetic
229 // SHF_STRINGS section with different alignments should not be merged.
230 return sec
->flags
== ms
->flags
&& sec
->entsize
== ms
->entsize
&&
231 (sec
->addralign
== ms
->addralign
|| !(sec
->flags
& SHF_STRINGS
));
233 if (i
== mergeSections
.end()) {
234 MergeSyntheticSection
*syn
= createMergeSynthetic(
235 ctx
, s
->name
, ms
->type
, ms
->flags
, ms
->addralign
);
236 mergeSections
.push_back(syn
);
237 i
= std::prev(mergeSections
.end());
238 syn
->entsize
= ms
->entsize
;
239 isd
->sections
.push_back(syn
);
240 // The merge synthetic section inherits the potential spill locations of
241 // its first contained section.
242 auto it
= script
->potentialSpillLists
.find(ms
);
243 if (it
!= script
->potentialSpillLists
.end())
244 script
->potentialSpillLists
.try_emplace(syn
, it
->second
);
246 (*i
)->addSection(ms
);
249 // sectionBases should not be used from this point onwards. Clear it to
251 isd
->sectionBases
.clear();
253 // Some input sections may be removed from the list after ICF.
254 for (InputSection
*s
: isd
->sections
)
257 for (auto *ms
: mergeSections
)
258 ms
->finalizeContents();
261 static void sortByOrder(MutableArrayRef
<InputSection
*> in
,
262 llvm::function_ref
<int(InputSectionBase
*s
)> order
) {
263 std::vector
<std::pair
<int, InputSection
*>> v
;
264 for (InputSection
*s
: in
)
265 v
.emplace_back(order(s
), s
);
266 llvm::stable_sort(v
, less_first());
268 for (size_t i
= 0; i
< v
.size(); ++i
)
272 uint64_t elf::getHeaderSize(Ctx
&ctx
) {
273 if (ctx
.arg
.oFormatBinary
)
275 return ctx
.out
.elfHeader
->size
+ ctx
.out
.programHeaders
->size
;
278 void OutputSection::sort(llvm::function_ref
<int(InputSectionBase
*s
)> order
) {
280 for (SectionCommand
*b
: commands
)
281 if (auto *isd
= dyn_cast
<InputSectionDescription
>(b
))
282 sortByOrder(isd
->sections
, order
);
285 static void nopInstrFill(Ctx
&ctx
, uint8_t *buf
, size_t size
) {
291 std::vector
<std::vector
<uint8_t>> nopFiller
= *ctx
.target
->nopInstrs
;
292 unsigned num
= size
/ nopFiller
.back().size();
293 for (unsigned c
= 0; c
< num
; ++c
) {
294 memcpy(buf
+ i
, nopFiller
.back().data(), nopFiller
.back().size());
295 i
+= nopFiller
.back().size();
297 unsigned remaining
= size
- i
;
300 assert(nopFiller
[remaining
- 1].size() == remaining
);
301 memcpy(buf
+ i
, nopFiller
[remaining
- 1].data(), remaining
);
304 // Fill [Buf, Buf + Size) with Filler.
305 // This is used for linker script "=fillexp" command.
306 static void fill(uint8_t *buf
, size_t size
,
307 const std::array
<uint8_t, 4> &filler
) {
309 for (; i
+ 4 < size
; i
+= 4)
310 memcpy(buf
+ i
, filler
.data(), 4);
311 memcpy(buf
+ i
, filler
.data(), size
- i
);
315 static SmallVector
<uint8_t, 0> deflateShard(Ctx
&ctx
, ArrayRef
<uint8_t> in
,
316 int level
, int flush
) {
317 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
318 // data with no zlib header or trailer.
320 auto res
= deflateInit2(&s
, level
, Z_DEFLATED
, -15, 8, Z_DEFAULT_STRATEGY
);
322 Err(ctx
) << "--compress-sections: deflateInit2 returned " << res
;
325 s
.next_in
= const_cast<uint8_t *>(in
.data());
326 s
.avail_in
= in
.size();
328 // Allocate a buffer of half of the input size, and grow it by 1.5x if
330 SmallVector
<uint8_t, 0> out
;
332 out
.resize_for_overwrite(std::max
<size_t>(in
.size() / 2, 64));
334 if (pos
== out
.size())
335 out
.resize_for_overwrite(out
.size() * 3 / 2);
336 s
.next_out
= out
.data() + pos
;
337 s
.avail_out
= out
.size() - pos
;
338 (void)deflate(&s
, flush
);
339 pos
= s
.next_out
- out
.data();
340 } while (s
.avail_out
== 0);
341 assert(s
.avail_in
== 0);
349 // Compress certain non-SHF_ALLOC sections:
351 // * (if --compress-debug-sections is specified) non-empty .debug_* sections
352 // * (if --compress-sections is specified) matched sections
353 template <class ELFT
> void OutputSection::maybeCompress(Ctx
&ctx
) {
354 using Elf_Chdr
= typename
ELFT::Chdr
;
355 (void)sizeof(Elf_Chdr
);
357 DebugCompressionType ctype
= DebugCompressionType::None
;
358 size_t compressedSize
= sizeof(Elf_Chdr
);
359 unsigned level
= 0; // default compression level
360 if (!(flags
& SHF_ALLOC
) && ctx
.arg
.compressDebugSections
&&
361 name
.starts_with(".debug_"))
362 ctype
= *ctx
.arg
.compressDebugSections
;
363 for (auto &[glob
, t
, l
] : ctx
.arg
.compressSections
)
364 if (glob
.match(name
))
365 std::tie(ctype
, level
) = {t
, l
};
366 if (ctype
== DebugCompressionType::None
)
368 if (flags
& SHF_ALLOC
) {
369 Err(ctx
) << "--compress-sections: section '" << name
370 << "' with the SHF_ALLOC flag cannot be compressed";
374 llvm::TimeTraceScope
timeScope("Compress sections");
375 auto buf
= std::make_unique
<uint8_t[]>(size
);
376 // Write uncompressed data to a temporary zero-initialized buffer.
378 parallel::TaskGroup tg
;
379 writeTo
<ELFT
>(ctx
, buf
.get(), tg
);
381 // The generic ABI specifies "The sh_size and sh_addralign fields of the
382 // section header for a compressed section reflect the requirements of the
383 // compressed section." However, 1-byte alignment has been wildly accepted
384 // and utilized for a long time. Removing alignment padding is particularly
385 // useful when there are many compressed output sections.
388 // Split input into 1-MiB shards.
389 [[maybe_unused
]] constexpr size_t shardSize
= 1 << 20;
390 auto shardsIn
= split(ArrayRef
<uint8_t>(buf
.get(), size
), shardSize
);
391 const size_t numShards
= shardsIn
.size();
392 auto shardsOut
= std::make_unique
<SmallVector
<uint8_t, 0>[]>(numShards
);
395 // Use ZSTD's streaming compression API. See
396 // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
398 if (ctype
== DebugCompressionType::Zstd
) {
399 parallelFor(0, numShards
, [&](size_t i
) {
400 SmallVector
<uint8_t, 0> out
;
401 ZSTD_CCtx
*cctx
= ZSTD_createCCtx();
402 ZSTD_CCtx_setParameter(cctx
, ZSTD_c_compressionLevel
, level
);
403 ZSTD_inBuffer zib
= {shardsIn
[i
].data(), shardsIn
[i
].size(), 0};
404 ZSTD_outBuffer zob
= {nullptr, 0, 0};
407 // Allocate a buffer of half of the input size, and grow it by 1.5x if
409 if (zob
.pos
== zob
.size
) {
410 out
.resize_for_overwrite(
411 zob
.size
? zob
.size
* 3 / 2 : std::max
<size_t>(zib
.size
/ 4, 64));
412 zob
= {out
.data(), out
.size(), zob
.pos
};
414 size
= ZSTD_compressStream2(cctx
, &zob
, &zib
, ZSTD_e_end
);
415 assert(!ZSTD_isError(size
));
417 out
.truncate(zob
.pos
);
419 shardsOut
[i
] = std::move(out
);
421 compressed
.type
= ELFCOMPRESS_ZSTD
;
422 for (size_t i
= 0; i
!= numShards
; ++i
)
423 compressedSize
+= shardsOut
[i
].size();
428 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
429 // fast and provides decent compression ratios.
430 if (ctype
== DebugCompressionType::Zlib
) {
432 level
= Z_BEST_SPEED
;
434 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
435 // shards but the last to flush the output to a byte boundary to be
436 // concatenated with the next shard.
437 auto shardsAdler
= std::make_unique
<uint32_t[]>(numShards
);
438 parallelFor(0, numShards
, [&](size_t i
) {
439 shardsOut
[i
] = deflateShard(ctx
, shardsIn
[i
], level
,
440 i
!= numShards
- 1 ? Z_SYNC_FLUSH
: Z_FINISH
);
441 shardsAdler
[i
] = adler32(1, shardsIn
[i
].data(), shardsIn
[i
].size());
444 // Update section size and combine Alder-32 checksums.
445 uint32_t checksum
= 1; // Initial Adler-32 value
446 compressedSize
+= 2; // Elf_Chdir and zlib header
447 for (size_t i
= 0; i
!= numShards
; ++i
) {
448 compressedSize
+= shardsOut
[i
].size();
449 checksum
= adler32_combine(checksum
, shardsAdler
[i
], shardsIn
[i
].size());
451 compressedSize
+= 4; // checksum
452 compressed
.type
= ELFCOMPRESS_ZLIB
;
453 compressed
.checksum
= checksum
;
457 if (compressedSize
>= size
)
459 compressed
.uncompressedSize
= size
;
460 compressed
.shards
= std::move(shardsOut
);
461 compressed
.numShards
= numShards
;
462 size
= compressedSize
;
463 flags
|= SHF_COMPRESSED
;
466 static void writeInt(Ctx
&ctx
, uint8_t *buf
, uint64_t data
, uint64_t size
) {
470 write16(ctx
, buf
, data
);
472 write32(ctx
, buf
, data
);
474 write64(ctx
, buf
, data
);
476 llvm_unreachable("unsupported Size argument");
479 template <class ELFT
>
480 void OutputSection::writeTo(Ctx
&ctx
, uint8_t *buf
, parallel::TaskGroup
&tg
) {
481 llvm::TimeTraceScope
timeScope("Write sections", name
);
482 if (type
== SHT_NOBITS
)
484 if (type
== SHT_CREL
&& !(flags
& SHF_ALLOC
)) {
485 buf
+= encodeULEB128(crelHeader
, buf
);
486 memcpy(buf
, crelBody
.data(), crelBody
.size());
490 // If the section is compressed due to
491 // --compress-debug-section/--compress-sections, the content is already known.
492 if (compressed
.shards
) {
493 auto *chdr
= reinterpret_cast<typename
ELFT::Chdr
*>(buf
);
494 chdr
->ch_type
= compressed
.type
;
495 chdr
->ch_size
= compressed
.uncompressedSize
;
496 chdr
->ch_addralign
= addralign
;
497 buf
+= sizeof(*chdr
);
499 auto offsets
= std::make_unique
<size_t[]>(compressed
.numShards
);
500 if (compressed
.type
== ELFCOMPRESS_ZLIB
) {
501 buf
[0] = 0x78; // CMF
502 buf
[1] = 0x01; // FLG: best speed
503 offsets
[0] = 2; // zlib header
504 write32be(buf
+ (size
- sizeof(*chdr
) - 4), compressed
.checksum
);
507 // Compute shard offsets.
508 for (size_t i
= 1; i
!= compressed
.numShards
; ++i
)
509 offsets
[i
] = offsets
[i
- 1] + compressed
.shards
[i
- 1].size();
510 parallelFor(0, compressed
.numShards
, [&](size_t i
) {
511 memcpy(buf
+ offsets
[i
], compressed
.shards
[i
].data(),
512 compressed
.shards
[i
].size());
517 // Write leading padding.
518 ArrayRef
<InputSection
*> sections
= getInputSections(*this, storage
);
519 std::array
<uint8_t, 4> filler
= getFiller(ctx
);
520 bool nonZeroFiller
= read32(ctx
, filler
.data()) != 0;
522 fill(buf
, sections
.empty() ? size
: sections
[0]->outSecOff
, filler
);
524 if (type
== SHT_CREL
&& !(flags
& SHF_ALLOC
)) {
525 buf
+= encodeULEB128(crelHeader
, buf
);
526 memcpy(buf
, crelBody
.data(), crelBody
.size());
530 auto fn
= [=, &ctx
](size_t begin
, size_t end
) {
531 size_t numSections
= sections
.size();
532 for (size_t i
= begin
; i
!= end
; ++i
) {
533 InputSection
*isec
= sections
[i
];
534 if (auto *s
= dyn_cast
<SyntheticSection
>(isec
))
535 s
->writeTo(buf
+ isec
->outSecOff
);
537 isec
->writeTo
<ELFT
>(ctx
, buf
+ isec
->outSecOff
);
539 // When in Arm BE8 mode, the linker has to convert the big-endian
540 // instructions to little-endian, leaving the data big-endian.
541 if (ctx
.arg
.emachine
== EM_ARM
&& !ctx
.arg
.isLE
&& ctx
.arg
.armBe8
&&
542 (flags
& SHF_EXECINSTR
))
543 convertArmInstructionstoBE8(ctx
, isec
, buf
+ isec
->outSecOff
);
545 // Fill gaps between sections.
547 uint8_t *start
= buf
+ isec
->outSecOff
+ isec
->getSize();
549 if (i
+ 1 == numSections
)
552 end
= buf
+ sections
[i
+ 1]->outSecOff
;
553 if (isec
->nopFiller
) {
554 assert(ctx
.target
->nopInstrs
);
555 nopInstrFill(ctx
, start
, end
- start
);
557 fill(start
, end
- start
, filler
);
562 // If there is any BYTE()-family command (rare), write the section content
563 // first then process BYTE to overwrite the filler content. The write is
564 // serial due to the limitation of llvm/Support/Parallel.h.
565 bool written
= false;
566 size_t numSections
= sections
.size();
567 for (SectionCommand
*cmd
: commands
)
568 if (auto *data
= dyn_cast
<ByteCommand
>(cmd
)) {
569 if (!std::exchange(written
, true))
571 writeInt(ctx
, buf
+ data
->offset
, data
->expression().getValue(),
574 if (written
|| !numSections
)
577 // There is no data command. Write content asynchronously to overlap the write
578 // time with other output sections. Note, if a linker script specifies
579 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
580 // to supress the error), the output may be non-deterministic.
581 const size_t taskSizeLimit
= 4 << 20;
582 for (size_t begin
= 0, i
= 0, taskSize
= 0;;) {
583 taskSize
+= sections
[i
]->getSize();
584 bool done
= ++i
== numSections
;
585 if (done
|| taskSize
>= taskSizeLimit
) {
586 tg
.spawn([=] { fn(begin
, i
); });
595 static void finalizeShtGroup(Ctx
&ctx
, OutputSection
*os
,
596 InputSection
*section
) {
597 // sh_link field for SHT_GROUP sections should contain the section index of
599 os
->link
= ctx
.in
.symTab
->getParent()->sectionIndex
;
604 // sh_info then contain index of an entry in symbol table section which
605 // provides signature of the section group.
606 ArrayRef
<Symbol
*> symbols
= section
->file
->getSymbols();
607 os
->info
= ctx
.in
.symTab
->getSymbolIndex(*symbols
[section
->info
]);
609 // Some group members may be combined or discarded, so we need to compute the
610 // new size. The content will be rewritten in InputSection::copyShtGroup.
611 DenseSet
<uint32_t> seen
;
612 ArrayRef
<InputSectionBase
*> sections
= section
->file
->getSections();
613 for (const uint32_t &idx
: section
->getDataAs
<uint32_t>().slice(1))
614 if (OutputSection
*osec
= sections
[read32(ctx
, &idx
)]->getOutputSection())
615 seen
.insert(osec
->sectionIndex
);
616 os
->size
= (1 + seen
.size()) * sizeof(uint32_t);
619 template <class uint
>
620 LLVM_ATTRIBUTE_ALWAYS_INLINE
static void
621 encodeOneCrel(Ctx
&ctx
, raw_svector_ostream
&os
,
622 Elf_Crel
<sizeof(uint
) == 8> &out
, uint offset
, const Symbol
&sym
,
623 uint32_t type
, uint addend
) {
624 const auto deltaOffset
= static_cast<uint64_t>(offset
- out
.r_offset
);
625 out
.r_offset
= offset
;
626 int64_t symidx
= ctx
.in
.symTab
->getSymbolIndex(sym
);
627 if (sym
.type
== STT_SECTION
) {
628 auto *d
= dyn_cast
<Defined
>(&sym
);
630 SectionBase
*section
= d
->section
;
631 assert(section
->isLive());
632 addend
= sym
.getVA(ctx
, addend
) - section
->getOutputSection()->addr
;
634 // Encode R_*_NONE(symidx=0).
635 symidx
= type
= addend
= 0;
639 // Similar to llvm::ELF::encodeCrel.
640 uint8_t b
= deltaOffset
* 8 + (out
.r_symidx
!= symidx
) +
641 (out
.r_type
!= type
? 2 : 0) +
642 (uint(out
.r_addend
) != addend
? 4 : 0);
643 if (deltaOffset
< 0x10) {
646 os
<< char(b
| 0x80);
647 encodeULEB128(deltaOffset
>> 4, os
);
650 encodeSLEB128(static_cast<int32_t>(symidx
- out
.r_symidx
), os
);
651 out
.r_symidx
= symidx
;
654 encodeSLEB128(static_cast<int32_t>(type
- out
.r_type
), os
);
658 encodeSLEB128(std::make_signed_t
<uint
>(addend
- out
.r_addend
), os
);
659 out
.r_addend
= addend
;
663 template <class ELFT
>
664 static size_t relToCrel(Ctx
&ctx
, raw_svector_ostream
&os
,
665 Elf_Crel
<ELFT::Is64Bits
> &out
, InputSection
*relSec
,
666 InputSectionBase
*sec
) {
667 const auto &file
= *cast
<ELFFileBase
>(relSec
->file
);
668 if (relSec
->type
== SHT_REL
) {
669 // REL conversion is complex and unsupported yet.
670 Err(ctx
) << relSec
<< ": REL cannot be converted to CREL";
673 auto rels
= relSec
->getDataAs
<typename
ELFT::Rela
>();
674 for (auto rel
: rels
) {
675 encodeOneCrel
<typename
ELFT::uint
>(
676 ctx
, os
, out
, sec
->getVA(rel
.r_offset
), file
.getRelocTargetSym(rel
),
677 rel
.getType(ctx
.arg
.isMips64EL
), getAddend
<ELFT
>(rel
));
682 // Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
683 // Input CREL sections are decoded while REL[A] need to be converted.
684 template <bool is64
> void OutputSection::finalizeNonAllocCrel(Ctx
&ctx
) {
685 using uint
= typename Elf_Crel_Impl
<is64
>::uint
;
686 raw_svector_ostream
os(crelBody
);
687 uint64_t totalCount
= 0;
688 Elf_Crel
<is64
> out
{};
689 assert(commands
.size() == 1);
690 auto *isd
= cast
<InputSectionDescription
>(commands
[0]);
691 for (InputSection
*relSec
: isd
->sections
) {
692 const auto &file
= *cast
<ELFFileBase
>(relSec
->file
);
693 InputSectionBase
*sec
= relSec
->getRelocatedSection();
694 if (relSec
->type
== SHT_CREL
) {
695 RelocsCrel
<is64
> entries(relSec
->content_
);
696 totalCount
+= entries
.size();
697 for (Elf_Crel_Impl
<is64
> r
: entries
) {
698 encodeOneCrel
<uint
>(ctx
, os
, out
, uint(sec
->getVA(r
.r_offset
)),
699 file
.getSymbol(r
.r_symidx
), r
.r_type
, r
.r_addend
);
704 // Convert REL[A] to CREL.
705 if constexpr (is64
) {
706 totalCount
+= ctx
.arg
.isLE
707 ? relToCrel
<ELF64LE
>(ctx
, os
, out
, relSec
, sec
)
708 : relToCrel
<ELF64BE
>(ctx
, os
, out
, relSec
, sec
);
710 totalCount
+= ctx
.arg
.isLE
711 ? relToCrel
<ELF32LE
>(ctx
, os
, out
, relSec
, sec
)
712 : relToCrel
<ELF32BE
>(ctx
, os
, out
, relSec
, sec
);
716 crelHeader
= totalCount
* 8 + 4;
717 size
= getULEB128Size(crelHeader
) + crelBody
.size();
720 void OutputSection::finalize(Ctx
&ctx
) {
721 InputSection
*first
= getFirstInputSection(this);
723 if (flags
& SHF_LINK_ORDER
) {
724 // We must preserve the link order dependency of sections with the
725 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
726 // need to translate the InputSection sh_link to the OutputSection sh_link,
727 // all InputSections in the OutputSection have the same dependency.
728 if (auto *ex
= dyn_cast
<ARMExidxSyntheticSection
>(first
))
729 link
= ex
->getLinkOrderDep()->getParent()->sectionIndex
;
730 else if (first
->flags
& SHF_LINK_ORDER
)
731 if (auto *d
= first
->getLinkOrderDep())
732 link
= d
->getParent()->sectionIndex
;
735 if (type
== SHT_GROUP
) {
736 finalizeShtGroup(ctx
, this, first
);
740 if (!ctx
.arg
.copyRelocs
|| !isStaticRelSecType(type
))
743 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
744 // Normally 'type' was changed by 'first' so 'first' should be non-null.
745 // However, if the output section is .rela.dyn, 'type' can be set by the empty
746 // synthetic .rela.plt and first can be null.
747 if (!first
|| isa
<SyntheticSection
>(first
))
750 link
= ctx
.in
.symTab
->getParent()->sectionIndex
;
751 // sh_info for SHT_REL[A] sections should contain the section header index of
752 // the section to which the relocation applies.
753 InputSectionBase
*s
= first
->getRelocatedSection();
754 info
= s
->getOutputSection()->sectionIndex
;
755 flags
|= SHF_INFO_LINK
;
756 // Finalize the content of non-alloc CREL.
757 if (type
== SHT_CREL
) {
759 finalizeNonAllocCrel
<true>(ctx
);
761 finalizeNonAllocCrel
<false>(ctx
);
765 // Returns true if S is in one of the many forms the compiler driver may pass
768 // Gcc uses any of crtbegin[<empty>|S|T].o.
769 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
771 static bool isCrt(StringRef s
, StringRef beginEnd
) {
772 s
= sys::path::filename(s
);
773 if (!s
.consume_back(".o"))
775 if (s
.consume_front("clang_rt."))
776 return s
.consume_front(beginEnd
);
777 return s
.consume_front(beginEnd
) && s
.size() <= 1;
780 // .ctors and .dtors are sorted by this order:
782 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
783 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
784 // 3. The section has an optional priority value in the form of ".ctors.N" or
785 // ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
786 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
788 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
789 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
790 // internal linker scripts, the sorting is by string comparison which can
791 // achieve the same goal given the optional priority values are of the same
794 // In an ideal world, we don't need this function because .init_array and
795 // .ctors are duplicate features (and .init_array is newer.) However, there
796 // are too many real-world use cases of .ctors, so we had no choice to
797 // support that with this rather ad-hoc semantics.
798 static bool compCtors(const InputSection
*a
, const InputSection
*b
) {
799 bool beginA
= isCrt(a
->file
->getName(), "crtbegin");
800 bool beginB
= isCrt(b
->file
->getName(), "crtbegin");
801 if (beginA
!= beginB
)
803 bool endA
= isCrt(a
->file
->getName(), "crtend");
804 bool endB
= isCrt(b
->file
->getName(), "crtend");
807 return getPriority(a
->name
) > getPriority(b
->name
);
810 // Sorts input sections by the special rules for .ctors and .dtors.
811 // Unfortunately, the rules are different from the one for .{init,fini}_array.
812 // Read the comment above.
813 void OutputSection::sortCtorsDtors() {
814 assert(commands
.size() == 1);
815 auto *isd
= cast
<InputSectionDescription
>(commands
[0]);
816 llvm::stable_sort(isd
->sections
, compCtors
);
819 // If an input string is in the form of "foo.N" where N is a number, return N
820 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
821 // greater than the lowest priority.
822 int elf::getPriority(StringRef s
) {
823 size_t pos
= s
.rfind('.');
824 if (pos
== StringRef::npos
)
827 if (to_integer(s
.substr(pos
+ 1), v
, 10) &&
828 (pos
== 6 && (s
.starts_with(".ctors") || s
.starts_with(".dtors"))))
833 InputSection
*elf::getFirstInputSection(const OutputSection
*os
) {
834 for (SectionCommand
*cmd
: os
->commands
)
835 if (auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
))
836 if (!isd
->sections
.empty())
837 return isd
->sections
[0];
841 ArrayRef
<InputSection
*>
842 elf::getInputSections(const OutputSection
&os
,
843 SmallVector
<InputSection
*, 0> &storage
) {
844 ArrayRef
<InputSection
*> ret
;
846 for (SectionCommand
*cmd
: os
.commands
) {
847 auto *isd
= dyn_cast
<InputSectionDescription
>(cmd
);
854 storage
.assign(ret
.begin(), ret
.end());
855 storage
.insert(storage
.end(), isd
->sections
.begin(), isd
->sections
.end());
858 return storage
.empty() ? ret
: ArrayRef(storage
);
861 // Sorts input sections by section name suffixes, so that .foo.N comes
862 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
863 // We want to keep the original order if the priorities are the same
864 // because the compiler keeps the original initialization order in a
865 // translation unit and we need to respect that.
866 // For more detail, read the section of the GCC's manual about init_priority.
867 void OutputSection::sortInitFini() {
868 // Sort sections by priority.
869 sort([](InputSectionBase
*s
) { return getPriority(s
->name
); });
872 std::array
<uint8_t, 4> OutputSection::getFiller(Ctx
&ctx
) {
875 if (flags
& SHF_EXECINSTR
)
876 return ctx
.target
->trapInstr
;
880 void OutputSection::checkDynRelAddends(Ctx
&ctx
) {
881 assert(ctx
.arg
.writeAddends
&& ctx
.arg
.checkDynamicRelocs
);
882 assert(isStaticRelSecType(type
));
883 SmallVector
<InputSection
*, 0> storage
;
884 ArrayRef
<InputSection
*> sections
= getInputSections(*this, storage
);
885 parallelFor(0, sections
.size(), [&](size_t i
) {
886 // When linking with -r or --emit-relocs we might also call this function
887 // for input .rel[a].<sec> sections which we simply pass through to the
888 // output. We skip over those and only look at the synthetic relocation
889 // sections created during linking.
890 if (!SyntheticSection::classof(sections
[i
]) ||
891 !is_contained({ELF::SHT_REL
, ELF::SHT_RELA
, ELF::SHT_RELR
},
894 const auto *sec
= cast
<RelocationBaseSection
>(sections
[i
]);
897 for (const DynamicReloc
&rel
: sec
->relocs
) {
898 int64_t addend
= rel
.addend
;
899 const OutputSection
*relOsec
= rel
.inputSec
->getOutputSection();
900 assert(relOsec
!= nullptr && "missing output section for relocation");
901 // Some targets have NOBITS synthetic sections with dynamic relocations
902 // with non-zero addends. Skip such sections.
903 if (is_contained({EM_PPC
, EM_PPC64
}, ctx
.arg
.emachine
) &&
904 (rel
.inputSec
== ctx
.in
.ppc64LongBranchTarget
.get() ||
905 rel
.inputSec
== ctx
.in
.igotPlt
.get()))
907 const uint8_t *relocTarget
= ctx
.bufferStart
+ relOsec
->offset
+
908 rel
.inputSec
->getOffset(rel
.offsetInSec
);
909 // For SHT_NOBITS the written addend is always zero.
910 int64_t writtenAddend
=
911 relOsec
->type
== SHT_NOBITS
913 : ctx
.target
->getImplicitAddend(relocTarget
, rel
.type
);
914 if (addend
!= writtenAddend
)
915 InternalErr(ctx
, relocTarget
)
916 << "wrote incorrect addend value 0x" << utohexstr(writtenAddend
)
917 << " instead of 0x" << utohexstr(addend
)
918 << " for dynamic relocation " << rel
.type
<< " at offset 0x"
919 << utohexstr(rel
.getOffset())
920 << (rel
.sym
? " against symbol " + rel
.sym
->getName() : "");
925 template void OutputSection::writeHeaderTo
<ELF32LE
>(ELF32LE::Shdr
*Shdr
);
926 template void OutputSection::writeHeaderTo
<ELF32BE
>(ELF32BE::Shdr
*Shdr
);
927 template void OutputSection::writeHeaderTo
<ELF64LE
>(ELF64LE::Shdr
*Shdr
);
928 template void OutputSection::writeHeaderTo
<ELF64BE
>(ELF64BE::Shdr
*Shdr
);
930 template void OutputSection::writeTo
<ELF32LE
>(Ctx
&, uint8_t *,
931 llvm::parallel::TaskGroup
&);
932 template void OutputSection::writeTo
<ELF32BE
>(Ctx
&, uint8_t *,
933 llvm::parallel::TaskGroup
&);
934 template void OutputSection::writeTo
<ELF64LE
>(Ctx
&, uint8_t *,
935 llvm::parallel::TaskGroup
&);
936 template void OutputSection::writeTo
<ELF64BE
>(Ctx
&, uint8_t *,
937 llvm::parallel::TaskGroup
&);
939 template void OutputSection::maybeCompress
<ELF32LE
>(Ctx
&);
940 template void OutputSection::maybeCompress
<ELF32BE
>(Ctx
&);
941 template void OutputSection::maybeCompress
<ELF64LE
>(Ctx
&);
942 template void OutputSection::maybeCompress
<ELF64BE
>(Ctx
&);