1 //===- ConcatOutputSection.cpp --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "ConcatOutputSection.h"
11 #include "OutputSegment.h"
12 #include "SymbolTable.h"
14 #include "SyntheticSections.h"
16 #include "lld/Common/CommonLinkerContext.h"
17 #include "llvm/BinaryFormat/MachO.h"
18 #include "llvm/Support/ScopedPrinter.h"
19 #include "llvm/Support/TimeProfiler.h"
22 using namespace llvm::MachO
;
24 using namespace lld::macho
;
26 MapVector
<NamePair
, ConcatOutputSection
*> macho::concatOutputSections
;
28 void ConcatOutputSection::addInput(ConcatInputSection
*input
) {
29 assert(input
->parent
== this);
32 flags
= input
->getFlags();
34 align
= std::max(align
, input
->align
);
37 inputs
.push_back(input
);
40 // Branch-range extension can be implemented in two ways, either through ...
42 // (1) Branch islands: Single branch instructions (also of limited range),
43 // that might be chained in multiple hops to reach the desired
44 // destination. On ARM64, as 16 branch islands are needed to hop between
45 // opposite ends of a 2 GiB program. LD64 uses branch islands exclusively,
46 // even when it needs excessive hops.
48 // (2) Thunks: Instruction(s) to load the destination address into a scratch
49 // register, followed by a register-indirect branch. Thunks are
50 // constructed to reach any arbitrary address, so need not be
51 // chained. Although thunks need not be chained, a program might need
52 // multiple thunks to the same destination distributed throughout a large
53 // program so that all call sites can have one within range.
55 // The optimal approach is to mix islands for destinations within two hops,
56 // and use thunks for destinations at greater distance. For now, we only
57 // implement thunks. TODO: Adding support for branch islands!
59 // Internally -- as expressed in LLD's data structures -- a
60 // branch-range-extension thunk consists of:
62 // (1) new Defined symbol for the thunk named
63 // <FUNCTION>.thunk.<SEQUENCE>, which references ...
64 // (2) new InputSection, which contains ...
65 // (3.1) new data for the instructions to load & branch to the far address +
66 // (3.2) new Relocs on instructions to load the far address, which reference ...
67 // (4.1) existing Defined symbol for the real function in __text, or
68 // (4.2) existing DylibSymbol for the real function in a dylib
70 // Nearly-optimal thunk-placement algorithm features:
72 // * Single pass: O(n) on the number of call sites.
74 // * Accounts for the exact space overhead of thunks - no heuristics
76 // * Exploits the full range of call instructions - forward & backward
80 // * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol
81 // to its thunk bookkeeper.
83 // * struct ThunkInfo (bookkeeper): Call instructions have limited range, and
84 // distant call sites might be unable to reach the same thunk, so multiple
85 // thunks are necessary to serve all call sites in a very large program. A
86 // thunkInfo stores state for all thunks associated with a particular
89 // (b) input section containing stub code, and
90 // (c) sequence number for the active thunk incarnation.
91 // When an old thunk goes out of range, we increment the sequence number and
92 // create a new thunk named <FUNCTION>.thunk.<SEQUENCE>.
94 // * A thunk consists of
95 // (a) a Defined symbol pointing to
96 // (b) an InputSection holding machine code (similar to a MachO stub), and
97 // (c) relocs referencing the real function for fixing up the stub code.
99 // * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel
100 // to the inputs vector. We store new thunks via cheap vector append, rather
101 // than costly insertion into the inputs vector.
105 // * During address assignment, MergedInputSection::finalize() examines call
106 // sites by ascending address and creates thunks. When a function is beyond
107 // the range of a call site, we need a thunk. Place it at the largest
108 // available forward address from the call site. Call sites increase
109 // monotonically and thunks are always placed as far forward as possible;
110 // thus, we place thunks at monotonically increasing addresses. Once a thunk
111 // is placed, it and all previous input-section addresses are final.
113 // * ConcatInputSection::finalize() and ConcatInputSection::writeTo() merge
114 // the inputs and thunks vectors (both ordered by ascending address), which
115 // is simple and cheap.
117 DenseMap
<Symbol
*, ThunkInfo
> lld::macho::thunkMap
;
119 // Determine whether we need thunks, which depends on the target arch -- RISC
120 // (i.e., ARM) generally does because it has limited-range branch/call
121 // instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs
122 // thunks for programs so large that branch source & destination addresses
123 // might differ more than the range of branch instruction(s).
124 bool TextOutputSection::needsThunks() const {
125 if (!target
->usesThunks())
127 uint64_t isecAddr
= addr
;
128 for (ConcatInputSection
*isec
: inputs
)
129 isecAddr
= alignToPowerOf2(isecAddr
, isec
->align
) + isec
->getSize();
130 // Other sections besides __text might be small enough to pass this
131 // test but nevertheless need thunks for calling into other sections.
132 // An imperfect heuristic to use in this case is that if a section
133 // we've already processed in this segment needs thunks, so do the
135 bool needsThunks
= parent
&& parent
->needsThunks
;
137 isecAddr
- addr
+ in
.stubs
->getSize() <=
138 std::min(target
->backwardBranchRange
, target
->forwardBranchRange
))
140 // Yes, this program is large enough to need thunks.
142 parent
->needsThunks
= true;
144 for (ConcatInputSection
*isec
: inputs
) {
145 for (Reloc
&r
: isec
->relocs
) {
146 if (!target
->hasAttr(r
.type
, RelocAttrBits::BRANCH
))
148 auto *sym
= r
.referent
.get
<Symbol
*>();
149 // Pre-populate the thunkMap and memoize call site counts for every
150 // InputSection and ThunkInfo. We do this for the benefit of
151 // estimateStubsInRangeVA().
152 ThunkInfo
&thunkInfo
= thunkMap
[sym
];
153 // Knowing ThunkInfo call site count will help us know whether or not we
154 // might need to create more for this referent at the time we are
155 // estimating distance to __stubs in estimateStubsInRangeVA().
156 ++thunkInfo
.callSiteCount
;
157 // We can avoid work on InputSections that have no BRANCH relocs.
158 isec
->hasCallSites
= true;
164 // Since __stubs is placed after __text, we must estimate the address
165 // beyond which stubs are within range of a simple forward branch.
166 // This is called exactly once, when the last input section has been finalized.
167 uint64_t TextOutputSection::estimateStubsInRangeVA(size_t callIdx
) const {
168 // Tally the functions which still have call sites remaining to process,
169 // which yields the maximum number of thunks we might yet place.
170 size_t maxPotentialThunks
= 0;
171 for (auto &tp
: thunkMap
) {
172 ThunkInfo
&ti
= tp
.second
;
173 // This overcounts: Only sections that are in forward jump range from the
174 // currently-active section get finalized, and all input sections are
175 // finalized when estimateStubsInRangeVA() is called. So only backward
176 // jumps will need thunks, but we count all jumps.
177 if (ti
.callSitesUsed
< ti
.callSiteCount
)
178 maxPotentialThunks
+= 1;
180 // Tally the total size of input sections remaining to process.
181 uint64_t isecVA
= inputs
[callIdx
]->getVA();
182 uint64_t isecEnd
= isecVA
;
183 for (size_t i
= callIdx
; i
< inputs
.size(); i
++) {
184 InputSection
*isec
= inputs
[i
];
185 isecEnd
= alignToPowerOf2(isecEnd
, isec
->align
) + isec
->getSize();
187 // Estimate the address after which call sites can safely call stubs
188 // directly rather than through intermediary thunks.
189 uint64_t forwardBranchRange
= target
->forwardBranchRange
;
190 assert(isecEnd
> forwardBranchRange
&&
191 "should not run thunk insertion if all code fits in jump range");
192 assert(isecEnd
- isecVA
<= forwardBranchRange
&&
193 "should only finalize sections in jump range");
194 uint64_t stubsInRangeVA
= isecEnd
+ maxPotentialThunks
* target
->thunkSize
+
195 in
.stubs
->getSize() - forwardBranchRange
;
196 log("thunks = " + std::to_string(thunkMap
.size()) +
197 ", potential = " + std::to_string(maxPotentialThunks
) +
198 ", stubs = " + std::to_string(in
.stubs
->getSize()) + ", isecVA = " +
199 utohexstr(isecVA
) + ", threshold = " + utohexstr(stubsInRangeVA
) +
200 ", isecEnd = " + utohexstr(isecEnd
) +
201 ", tail = " + utohexstr(isecEnd
- isecVA
) +
202 ", slop = " + utohexstr(forwardBranchRange
- (isecEnd
- isecVA
)));
203 return stubsInRangeVA
;
206 void ConcatOutputSection::finalizeOne(ConcatInputSection
*isec
) {
207 size
= alignToPowerOf2(size
, isec
->align
);
208 fileSize
= alignToPowerOf2(fileSize
, isec
->align
);
209 isec
->outSecOff
= size
;
210 isec
->isFinal
= true;
211 size
+= isec
->getSize();
212 fileSize
+= isec
->getFileSize();
215 void ConcatOutputSection::finalizeContents() {
216 for (ConcatInputSection
*isec
: inputs
)
220 void TextOutputSection::finalize() {
221 if (!needsThunks()) {
222 for (ConcatInputSection
*isec
: inputs
)
227 uint64_t forwardBranchRange
= target
->forwardBranchRange
;
228 uint64_t backwardBranchRange
= target
->backwardBranchRange
;
229 uint64_t stubsInRangeVA
= TargetInfo::outOfRangeVA
;
230 size_t thunkSize
= target
->thunkSize
;
231 size_t relocCount
= 0;
232 size_t callSiteCount
= 0;
233 size_t thunkCallCount
= 0;
234 size_t thunkCount
= 0;
236 // Walk all sections in order. Finalize all sections that are less than
237 // forwardBranchRange in front of it.
238 // isecVA is the address of the current section.
239 // addr + size is the start address of the first non-finalized section.
241 // inputs[finalIdx] is for finalization (address-assignment)
243 // Kick-off by ensuring that the first input section has an address
244 for (size_t callIdx
= 0, endIdx
= inputs
.size(); callIdx
< endIdx
;
246 if (finalIdx
== callIdx
)
247 finalizeOne(inputs
[finalIdx
++]);
248 ConcatInputSection
*isec
= inputs
[callIdx
];
249 assert(isec
->isFinal
);
250 uint64_t isecVA
= isec
->getVA();
252 // Assign addresses up-to the forward branch-range limit.
253 // Every call instruction needs a small number of bytes (on Arm64: 4),
254 // and each inserted thunk needs a slightly larger number of bytes
255 // (on Arm64: 12). If a section starts with a branch instruction and
256 // contains several branch instructions in succession, then the distance
257 // from the current position to the position where the thunks are inserted
258 // grows. So leave room for a bunch of thunks.
259 unsigned slop
= 256 * thunkSize
;
260 while (finalIdx
< endIdx
) {
261 uint64_t expectedNewSize
=
262 alignToPowerOf2(addr
+ size
, inputs
[finalIdx
]->align
) +
263 inputs
[finalIdx
]->getSize();
264 if (expectedNewSize
>= isecVA
+ forwardBranchRange
- slop
)
266 finalizeOne(inputs
[finalIdx
++]);
269 if (!isec
->hasCallSites
)
272 if (finalIdx
== endIdx
&& stubsInRangeVA
== TargetInfo::outOfRangeVA
) {
273 // When we have finalized all input sections, __stubs (destined
274 // to follow __text) comes within range of forward branches and
275 // we can estimate the threshold address after which we can
276 // reach any stub with a forward branch. Note that although it
277 // sits in the middle of a loop, this code executes only once.
278 // It is in the loop because we need to call it at the proper
279 // time: the earliest call site from which the end of __text
280 // (and start of __stubs) comes within range of a forward branch.
281 stubsInRangeVA
= estimateStubsInRangeVA(callIdx
);
283 // Process relocs by ascending address, i.e., ascending offset within isec
284 std::vector
<Reloc
> &relocs
= isec
->relocs
;
285 // FIXME: This property does not hold for object files produced by ld64's
287 assert(is_sorted(relocs
,
288 [](Reloc
&a
, Reloc
&b
) { return a
.offset
> b
.offset
; }));
289 for (Reloc
&r
: reverse(relocs
)) {
291 if (!target
->hasAttr(r
.type
, RelocAttrBits::BRANCH
))
294 // Calculate branch reachability boundaries
295 uint64_t callVA
= isecVA
+ r
.offset
;
297 backwardBranchRange
< callVA
? callVA
- backwardBranchRange
: 0;
298 uint64_t highVA
= callVA
+ forwardBranchRange
;
299 // Calculate our call referent address
300 auto *funcSym
= r
.referent
.get
<Symbol
*>();
301 ThunkInfo
&thunkInfo
= thunkMap
[funcSym
];
302 // The referent is not reachable, so we need to use a thunk ...
303 if (funcSym
->isInStubs() && callVA
>= stubsInRangeVA
) {
304 assert(callVA
!= TargetInfo::outOfRangeVA
);
305 // ... Oh, wait! We are close enough to the end that __stubs
306 // are now within range of a simple forward branch.
309 uint64_t funcVA
= funcSym
->resolveBranchVA();
310 ++thunkInfo
.callSitesUsed
;
311 if (lowVA
<= funcVA
&& funcVA
<= highVA
) {
312 // The referent is reachable with a simple call instruction.
315 ++thunkInfo
.thunkCallCount
;
317 // If an existing thunk is reachable, use it ...
319 uint64_t thunkVA
= thunkInfo
.isec
->getVA();
320 if (lowVA
<= thunkVA
&& thunkVA
<= highVA
) {
321 r
.referent
= thunkInfo
.sym
;
325 // ... otherwise, create a new thunk.
326 if (addr
+ size
> highVA
) {
327 // There were too many consecutive branch instructions for `slop`
328 // above. If you hit this: For the current algorithm, just bumping up
329 // slop above and trying again is probably simplest. (See also PR51578
331 fatal(Twine(__FUNCTION__
) + ": FIXME: thunk range overrun");
334 makeSyntheticInputSection(isec
->getSegName(), isec
->getName());
335 thunkInfo
.isec
->parent
= this;
336 assert(thunkInfo
.isec
->live
);
338 StringRef thunkName
= saver().save(funcSym
->getName() + ".thunk." +
339 std::to_string(thunkInfo
.sequence
++));
340 if (!isa
<Defined
>(funcSym
) || cast
<Defined
>(funcSym
)->isExternal()) {
341 r
.referent
= thunkInfo
.sym
= symtab
->addDefined(
342 thunkName
, /*file=*/nullptr, thunkInfo
.isec
, /*value=*/0, thunkSize
,
343 /*isWeakDef=*/false, /*isPrivateExtern=*/true,
344 /*isReferencedDynamically=*/false, /*noDeadStrip=*/false,
345 /*isWeakDefCanBeHidden=*/false);
347 r
.referent
= thunkInfo
.sym
= make
<Defined
>(
348 thunkName
, /*file=*/nullptr, thunkInfo
.isec
, /*value=*/0, thunkSize
,
349 /*isWeakDef=*/false, /*isExternal=*/false, /*isPrivateExtern=*/true,
350 /*includeInSymtab=*/true, /*isReferencedDynamically=*/false,
351 /*noDeadStrip=*/false, /*isWeakDefCanBeHidden=*/false);
353 thunkInfo
.sym
->used
= true;
354 target
->populateThunk(thunkInfo
.isec
, funcSym
);
355 finalizeOne(thunkInfo
.isec
);
356 thunks
.push_back(thunkInfo
.isec
);
361 log("thunks for " + parent
->name
+ "," + name
+
362 ": funcs = " + std::to_string(thunkMap
.size()) +
363 ", relocs = " + std::to_string(relocCount
) +
364 ", all calls = " + std::to_string(callSiteCount
) +
365 ", thunk calls = " + std::to_string(thunkCallCount
) +
366 ", thunks = " + std::to_string(thunkCount
));
369 void ConcatOutputSection::writeTo(uint8_t *buf
) const {
370 for (ConcatInputSection
*isec
: inputs
)
371 isec
->writeTo(buf
+ isec
->outSecOff
);
374 void TextOutputSection::writeTo(uint8_t *buf
) const {
375 // Merge input sections from thunk & ordinary vectors
376 size_t i
= 0, ie
= inputs
.size();
377 size_t t
= 0, te
= thunks
.size();
378 while (i
< ie
|| t
< te
) {
379 while (i
< ie
&& (t
== te
|| inputs
[i
]->empty() ||
380 inputs
[i
]->outSecOff
< thunks
[t
]->outSecOff
)) {
381 inputs
[i
]->writeTo(buf
+ inputs
[i
]->outSecOff
);
384 while (t
< te
&& (i
== ie
|| thunks
[t
]->outSecOff
< inputs
[i
]->outSecOff
)) {
385 thunks
[t
]->writeTo(buf
+ thunks
[t
]->outSecOff
);
391 void ConcatOutputSection::finalizeFlags(InputSection
*input
) {
392 switch (sectionType(input
->getFlags())) {
393 default /*type-unspec'ed*/:
394 // FIXME: Add additional logic here when supporting emitting obj files.
396 case S_4BYTE_LITERALS
:
397 case S_8BYTE_LITERALS
:
398 case S_16BYTE_LITERALS
:
399 case S_CSTRING_LITERALS
:
401 case S_LAZY_SYMBOL_POINTERS
:
402 case S_MOD_TERM_FUNC_POINTERS
:
403 case S_THREAD_LOCAL_REGULAR
:
404 case S_THREAD_LOCAL_ZEROFILL
:
405 case S_THREAD_LOCAL_VARIABLES
:
406 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
:
407 case S_THREAD_LOCAL_VARIABLE_POINTERS
:
408 case S_NON_LAZY_SYMBOL_POINTERS
:
410 flags
|= input
->getFlags();
415 ConcatOutputSection
*
416 ConcatOutputSection::getOrCreateForInput(const InputSection
*isec
) {
417 NamePair names
= maybeRenameSection({isec
->getSegName(), isec
->getName()});
418 ConcatOutputSection
*&osec
= concatOutputSections
[names
];
420 if (isec
->getSegName() == segment_names::text
&&
421 isec
->getName() != section_names::gccExceptTab
&&
422 isec
->getName() != section_names::ehFrame
)
423 osec
= make
<TextOutputSection
>(names
.second
);
425 osec
= make
<ConcatOutputSection
>(names
.second
);
430 NamePair
macho::maybeRenameSection(NamePair key
) {
431 auto newNames
= config
->sectionRenameMap
.find(key
);
432 if (newNames
!= config
->sectionRenameMap
.end())
433 return newNames
->second
;