1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements inline cost analysis.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
48 #define DEBUG_TYPE "inline-cost"
50 STATISTIC(NumCallsAnalyzed
, "Number of call sites analyzed");
53 DefaultThreshold("inlinedefault-threshold", cl::Hidden
, cl::init(225),
55 cl::desc("Default amount of inlining to perform"));
57 static cl::opt
<bool> PrintInstructionComments(
58 "print-instruction-comments", cl::Hidden
, cl::init(false),
59 cl::desc("Prints comments for instruction based on inline cost analysis"));
61 static cl::opt
<int> InlineThreshold(
62 "inline-threshold", cl::Hidden
, cl::init(225), cl::ZeroOrMore
,
63 cl::desc("Control the amount of inlining to perform (default = 225)"));
65 static cl::opt
<int> HintThreshold(
66 "inlinehint-threshold", cl::Hidden
, cl::init(325), cl::ZeroOrMore
,
67 cl::desc("Threshold for inlining functions with inline hint"));
70 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden
,
71 cl::init(45), cl::ZeroOrMore
,
72 cl::desc("Threshold for inlining cold callsites"));
74 static cl::opt
<bool> InlineEnableCostBenefitAnalysis(
75 "inline-enable-cost-benefit-analysis", cl::Hidden
, cl::init(false),
76 cl::desc("Enable the cost-benefit analysis for the inliner"));
78 static cl::opt
<int> InlineSavingsMultiplier(
79 "inline-savings-multiplier", cl::Hidden
, cl::init(8), cl::ZeroOrMore
,
80 cl::desc("Multiplier to multiply cycle savings by during inlining"));
83 InlineSizeAllowance("inline-size-allowance", cl::Hidden
, cl::init(100),
85 cl::desc("The maximum size of a callee that get's "
86 "inlined without sufficient cycle savings"));
88 // We introduce this threshold to help performance of instrumentation based
89 // PGO before we actually hook up inliner with analysis passes such as BPI and
91 static cl::opt
<int> ColdThreshold(
92 "inlinecold-threshold", cl::Hidden
, cl::init(45), cl::ZeroOrMore
,
93 cl::desc("Threshold for inlining functions with cold attribute"));
96 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden
, cl::init(3000),
98 cl::desc("Threshold for hot callsites "));
100 static cl::opt
<int> LocallyHotCallSiteThreshold(
101 "locally-hot-callsite-threshold", cl::Hidden
, cl::init(525), cl::ZeroOrMore
,
102 cl::desc("Threshold for locally hot callsites "));
104 static cl::opt
<int> ColdCallSiteRelFreq(
105 "cold-callsite-rel-freq", cl::Hidden
, cl::init(2), cl::ZeroOrMore
,
106 cl::desc("Maximum block frequency, expressed as a percentage of caller's "
107 "entry frequency, for a callsite to be cold in the absence of "
108 "profile information."));
110 static cl::opt
<int> HotCallSiteRelFreq(
111 "hot-callsite-rel-freq", cl::Hidden
, cl::init(60), cl::ZeroOrMore
,
112 cl::desc("Minimum block frequency, expressed as a multiple of caller's "
113 "entry frequency, for a callsite to be hot in the absence of "
114 "profile information."));
116 static cl::opt
<int> CallPenalty(
117 "inline-call-penalty", cl::Hidden
, cl::init(25),
118 cl::desc("Call penalty that is applied per callsite when inlining"));
120 static cl::opt
<bool> OptComputeFullInlineCost(
121 "inline-cost-full", cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
122 cl::desc("Compute the full inline cost of a call site even when the cost "
123 "exceeds the threshold."));
125 static cl::opt
<bool> InlineCallerSupersetNoBuiltin(
126 "inline-caller-superset-nobuiltin", cl::Hidden
, cl::init(true),
128 cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
131 static cl::opt
<bool> DisableGEPConstOperand(
132 "disable-gep-const-evaluation", cl::Hidden
, cl::init(false),
133 cl::desc("Disables evaluation of GetElementPtr with constant operands"));
136 class InlineCostCallAnalyzer
;
138 // This struct is used to store information about inline cost of a
139 // particular instruction
140 struct InstructionCostDetail
{
143 int ThresholdBefore
= 0;
144 int ThresholdAfter
= 0;
146 int getThresholdDelta() const { return ThresholdAfter
- ThresholdBefore
; }
148 int getCostDelta() const { return CostAfter
- CostBefore
; }
150 bool hasThresholdChanged() const { return ThresholdAfter
!= ThresholdBefore
; }
153 class InlineCostAnnotationWriter
: public AssemblyAnnotationWriter
{
155 InlineCostCallAnalyzer
*const ICCA
;
158 InlineCostAnnotationWriter(InlineCostCallAnalyzer
*ICCA
) : ICCA(ICCA
) {}
159 virtual void emitInstructionAnnot(const Instruction
*I
,
160 formatted_raw_ostream
&OS
) override
;
163 /// Carry out call site analysis, in order to evaluate inlinability.
164 /// NOTE: the type is currently used as implementation detail of functions such
165 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
166 /// expectation is that they come from the outer scope, from the wrapper
167 /// functions. If we want to support constructing CallAnalyzer objects where
168 /// lambdas are provided inline at construction, or where the object needs to
169 /// otherwise survive past the scope of the provided functions, we need to
170 /// revisit the argument types.
171 class CallAnalyzer
: public InstVisitor
<CallAnalyzer
, bool> {
172 typedef InstVisitor
<CallAnalyzer
, bool> Base
;
173 friend class InstVisitor
<CallAnalyzer
, bool>;
176 virtual ~CallAnalyzer() {}
177 /// The TargetTransformInfo available for this compilation.
178 const TargetTransformInfo
&TTI
;
180 /// Getter for the cache of @llvm.assume intrinsics.
181 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
;
183 /// Getter for BlockFrequencyInfo
184 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
;
186 /// Profile summary information.
187 ProfileSummaryInfo
*PSI
;
189 /// The called function.
192 // Cache the DataLayout since we use it a lot.
193 const DataLayout
&DL
;
195 /// The OptimizationRemarkEmitter available for this compilation.
196 OptimizationRemarkEmitter
*ORE
;
198 /// The candidate callsite being analyzed. Please do not use this to do
199 /// analysis in the caller function; we want the inline cost query to be
200 /// easily cacheable. Instead, use the cover function paramHasAttr.
201 CallBase
&CandidateCall
;
203 /// Extension points for handling callsite features.
204 // Called before a basic block was analyzed.
205 virtual void onBlockStart(const BasicBlock
*BB
) {}
207 /// Called after a basic block was analyzed.
208 virtual void onBlockAnalyzed(const BasicBlock
*BB
) {}
210 /// Called before an instruction was analyzed
211 virtual void onInstructionAnalysisStart(const Instruction
*I
) {}
213 /// Called after an instruction was analyzed
214 virtual void onInstructionAnalysisFinish(const Instruction
*I
) {}
216 /// Called at the end of the analysis of the callsite. Return the outcome of
217 /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
218 /// the reason it can't.
219 virtual InlineResult
finalizeAnalysis() { return InlineResult::success(); }
220 /// Called when we're about to start processing a basic block, and every time
221 /// we are done processing an instruction. Return true if there is no point in
222 /// continuing the analysis (e.g. we've determined already the call site is
223 /// too expensive to inline)
224 virtual bool shouldStop() { return false; }
226 /// Called before the analysis of the callee body starts (with callsite
227 /// contexts propagated). It checks callsite-specific information. Return a
228 /// reason analysis can't continue if that's the case, or 'true' if it may
230 virtual InlineResult
onAnalysisStart() { return InlineResult::success(); }
231 /// Called if the analysis engine decides SROA cannot be done for the given
233 virtual void onDisableSROA(AllocaInst
*Arg
) {}
235 /// Called the analysis engine determines load elimination won't happen.
236 virtual void onDisableLoadElimination() {}
238 /// Called to account for a call.
239 virtual void onCallPenalty() {}
241 /// Called to account for the expectation the inlining would result in a load
243 virtual void onLoadEliminationOpportunity() {}
245 /// Called to account for the cost of argument setup for the Call in the
246 /// callee's body (not the callsite currently under analysis).
247 virtual void onCallArgumentSetup(const CallBase
&Call
) {}
249 /// Called to account for a load relative intrinsic.
250 virtual void onLoadRelativeIntrinsic() {}
252 /// Called to account for a lowered call.
253 virtual void onLoweredCall(Function
*F
, CallBase
&Call
, bool IsIndirectCall
) {
256 /// Account for a jump table of given size. Return false to stop further
257 /// processing the switch instruction
258 virtual bool onJumpTable(unsigned JumpTableSize
) { return true; }
260 /// Account for a case cluster of given size. Return false to stop further
261 /// processing of the instruction.
262 virtual bool onCaseCluster(unsigned NumCaseCluster
) { return true; }
264 /// Called at the end of processing a switch instruction, with the given
265 /// number of case clusters.
266 virtual void onFinalizeSwitch(unsigned JumpTableSize
,
267 unsigned NumCaseCluster
) {}
269 /// Called to account for any other instruction not specifically accounted
271 virtual void onMissedSimplification() {}
273 /// Start accounting potential benefits due to SROA for the given alloca.
274 virtual void onInitializeSROAArg(AllocaInst
*Arg
) {}
276 /// Account SROA savings for the AllocaInst value.
277 virtual void onAggregateSROAUse(AllocaInst
*V
) {}
279 bool handleSROA(Value
*V
, bool DoNotDisable
) {
280 // Check for SROA candidates in comparisons.
281 if (auto *SROAArg
= getSROAArgForValueOrNull(V
)) {
283 onAggregateSROAUse(SROAArg
);
286 disableSROAForArg(SROAArg
);
291 bool IsCallerRecursive
= false;
292 bool IsRecursiveCall
= false;
293 bool ExposesReturnsTwice
= false;
294 bool HasDynamicAlloca
= false;
295 bool ContainsNoDuplicateCall
= false;
296 bool HasReturn
= false;
297 bool HasIndirectBr
= false;
298 bool HasUninlineableIntrinsic
= false;
299 bool InitsVargArgs
= false;
301 /// Number of bytes allocated statically by the callee.
302 uint64_t AllocatedSize
= 0;
303 unsigned NumInstructions
= 0;
304 unsigned NumVectorInstructions
= 0;
306 /// While we walk the potentially-inlined instructions, we build up and
307 /// maintain a mapping of simplified values specific to this callsite. The
308 /// idea is to propagate any special information we have about arguments to
309 /// this call through the inlinable section of the function, and account for
310 /// likely simplifications post-inlining. The most important aspect we track
311 /// is CFG altering simplifications -- when we prove a basic block dead, that
312 /// can cause dramatic shifts in the cost of inlining a function.
313 DenseMap
<Value
*, Constant
*> SimplifiedValues
;
315 /// Keep track of the values which map back (through function arguments) to
316 /// allocas on the caller stack which could be simplified through SROA.
317 DenseMap
<Value
*, AllocaInst
*> SROAArgValues
;
319 /// Keep track of Allocas for which we believe we may get SROA optimization.
320 DenseSet
<AllocaInst
*> EnabledSROAAllocas
;
322 /// Keep track of values which map to a pointer base and constant offset.
323 DenseMap
<Value
*, std::pair
<Value
*, APInt
>> ConstantOffsetPtrs
;
325 /// Keep track of dead blocks due to the constant arguments.
326 SetVector
<BasicBlock
*> DeadBlocks
;
328 /// The mapping of the blocks to their known unique successors due to the
329 /// constant arguments.
330 DenseMap
<BasicBlock
*, BasicBlock
*> KnownSuccessors
;
332 /// Model the elimination of repeated loads that is expected to happen
333 /// whenever we simplify away the stores that would otherwise cause them to be
335 bool EnableLoadElimination
;
336 SmallPtrSet
<Value
*, 16> LoadAddrSet
;
338 AllocaInst
*getSROAArgForValueOrNull(Value
*V
) const {
339 auto It
= SROAArgValues
.find(V
);
340 if (It
== SROAArgValues
.end() || EnabledSROAAllocas
.count(It
->second
) == 0)
345 // Custom simplification helper routines.
346 bool isAllocaDerivedArg(Value
*V
);
347 void disableSROAForArg(AllocaInst
*SROAArg
);
348 void disableSROA(Value
*V
);
349 void findDeadBlocks(BasicBlock
*CurrBB
, BasicBlock
*NextBB
);
350 void disableLoadElimination();
351 bool isGEPFree(GetElementPtrInst
&GEP
);
352 bool canFoldInboundsGEP(GetElementPtrInst
&I
);
353 bool accumulateGEPOffset(GEPOperator
&GEP
, APInt
&Offset
);
354 bool simplifyCallSite(Function
*F
, CallBase
&Call
);
355 template <typename Callable
>
356 bool simplifyInstruction(Instruction
&I
, Callable Evaluate
);
357 ConstantInt
*stripAndComputeInBoundsConstantOffsets(Value
*&V
);
359 /// Return true if the given argument to the function being considered for
360 /// inlining has the given attribute set either at the call site or the
361 /// function declaration. Primarily used to inspect call site specific
362 /// attributes since these can be more precise than the ones on the callee
364 bool paramHasAttr(Argument
*A
, Attribute::AttrKind Attr
);
366 /// Return true if the given value is known non null within the callee if
367 /// inlined through this particular callsite.
368 bool isKnownNonNullInCallee(Value
*V
);
370 /// Return true if size growth is allowed when inlining the callee at \p Call.
371 bool allowSizeGrowth(CallBase
&Call
);
373 // Custom analysis routines.
374 InlineResult
analyzeBlock(BasicBlock
*BB
,
375 SmallPtrSetImpl
<const Value
*> &EphValues
);
377 // Disable several entry points to the visitor so we don't accidentally use
378 // them by declaring but not defining them here.
379 void visit(Module
*);
380 void visit(Module
&);
381 void visit(Function
*);
382 void visit(Function
&);
383 void visit(BasicBlock
*);
384 void visit(BasicBlock
&);
386 // Provide base case for our instruction visit.
387 bool visitInstruction(Instruction
&I
);
389 // Our visit overrides.
390 bool visitAlloca(AllocaInst
&I
);
391 bool visitPHI(PHINode
&I
);
392 bool visitGetElementPtr(GetElementPtrInst
&I
);
393 bool visitBitCast(BitCastInst
&I
);
394 bool visitPtrToInt(PtrToIntInst
&I
);
395 bool visitIntToPtr(IntToPtrInst
&I
);
396 bool visitCastInst(CastInst
&I
);
397 bool visitCmpInst(CmpInst
&I
);
398 bool visitSub(BinaryOperator
&I
);
399 bool visitBinaryOperator(BinaryOperator
&I
);
400 bool visitFNeg(UnaryOperator
&I
);
401 bool visitLoad(LoadInst
&I
);
402 bool visitStore(StoreInst
&I
);
403 bool visitExtractValue(ExtractValueInst
&I
);
404 bool visitInsertValue(InsertValueInst
&I
);
405 bool visitCallBase(CallBase
&Call
);
406 bool visitReturnInst(ReturnInst
&RI
);
407 bool visitBranchInst(BranchInst
&BI
);
408 bool visitSelectInst(SelectInst
&SI
);
409 bool visitSwitchInst(SwitchInst
&SI
);
410 bool visitIndirectBrInst(IndirectBrInst
&IBI
);
411 bool visitResumeInst(ResumeInst
&RI
);
412 bool visitCleanupReturnInst(CleanupReturnInst
&RI
);
413 bool visitCatchReturnInst(CatchReturnInst
&RI
);
414 bool visitUnreachableInst(UnreachableInst
&I
);
417 CallAnalyzer(Function
&Callee
, CallBase
&Call
, const TargetTransformInfo
&TTI
,
418 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
419 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
= nullptr,
420 ProfileSummaryInfo
*PSI
= nullptr,
421 OptimizationRemarkEmitter
*ORE
= nullptr)
422 : TTI(TTI
), GetAssumptionCache(GetAssumptionCache
), GetBFI(GetBFI
),
423 PSI(PSI
), F(Callee
), DL(F
.getParent()->getDataLayout()), ORE(ORE
),
424 CandidateCall(Call
), EnableLoadElimination(true) {}
426 InlineResult
analyze();
428 Optional
<Constant
*> getSimplifiedValue(Instruction
*I
) {
429 if (SimplifiedValues
.find(I
) != SimplifiedValues
.end())
430 return SimplifiedValues
[I
];
434 // Keep a bunch of stats about the cost savings found so we can print them
435 // out when debugging.
436 unsigned NumConstantArgs
= 0;
437 unsigned NumConstantOffsetPtrArgs
= 0;
438 unsigned NumAllocaArgs
= 0;
439 unsigned NumConstantPtrCmps
= 0;
440 unsigned NumConstantPtrDiffs
= 0;
441 unsigned NumInstructionsSimplified
= 0;
446 // Considering forming a binary search, we should find the number of nodes
447 // which is same as the number of comparisons when lowered. For a given
448 // number of clusters, n, we can define a recursive function, f(n), to find
449 // the number of nodes in the tree. The recursion is :
450 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
451 // and f(n) = n, when n <= 3.
452 // This will lead a binary tree where the leaf should be either f(2) or f(3)
453 // when n > 3. So, the number of comparisons from leaves should be n, while
454 // the number of non-leaf should be :
455 // 2^(log2(n) - 1) - 1
456 // = 2^log2(n) * 2^-1 - 1
458 // Considering comparisons from leaf and non-leaf nodes, we can estimate the
459 // number of comparisons in a simple closed form :
460 // n + n / 2 - 1 = n * 3 / 2 - 1
461 int64_t getExpectedNumberOfCompare(int NumCaseCluster
) {
462 return 3 * static_cast<int64_t>(NumCaseCluster
) / 2 - 1;
465 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
466 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
467 class InlineCostCallAnalyzer final
: public CallAnalyzer
{
468 const int CostUpperBound
= INT_MAX
- InlineConstants::InstrCost
- 1;
469 const bool ComputeFullInlineCost
;
470 int LoadEliminationCost
= 0;
471 /// Bonus to be applied when percentage of vector instructions in callee is
472 /// high (see more details in updateThreshold).
474 /// Bonus to be applied when the callee has only one reachable basic block.
475 int SingleBBBonus
= 0;
477 /// Tunable parameters that control the analysis.
478 const InlineParams
&Params
;
480 // This DenseMap stores the delta change in cost and threshold after
481 // accounting for the given instruction. The map is filled only with the
482 // flag PrintInstructionComments on.
483 DenseMap
<const Instruction
*, InstructionCostDetail
> InstructionCostDetailMap
;
485 /// Upper bound for the inlining cost. Bonuses are being applied to account
486 /// for speculative "expected profit" of the inlining decision.
489 /// Attempt to evaluate indirect calls to boost its inline cost.
490 const bool BoostIndirectCalls
;
492 /// Ignore the threshold when finalizing analysis.
493 const bool IgnoreThreshold
;
495 // True if the cost-benefit-analysis-based inliner is enabled.
496 const bool CostBenefitAnalysisEnabled
;
498 /// Inlining cost measured in abstract units, accounts for all the
499 /// instructions expected to be executed for a given function invocation.
500 /// Instructions that are statically proven to be dead based on call-site
501 /// arguments are not counted here.
504 // The cumulative cost at the beginning of the basic block being analyzed. At
505 // the end of analyzing each basic block, "Cost - CostAtBBStart" represents
506 // the size of that basic block.
507 int CostAtBBStart
= 0;
509 // The static size of live but cold basic blocks. This is "static" in the
510 // sense that it's not weighted by profile counts at all.
513 // Whether inlining is decided by cost-benefit analysis.
514 bool DecidedByCostBenefit
= false;
516 // The cost-benefit pair computed by cost-benefit analysis.
517 Optional
<CostBenefitPair
> CostBenefit
= None
;
519 bool SingleBB
= true;
521 unsigned SROACostSavings
= 0;
522 unsigned SROACostSavingsLost
= 0;
524 /// The mapping of caller Alloca values to their accumulated cost savings. If
525 /// we have to disable SROA for one of the allocas, this tells us how much
526 /// cost must be added.
527 DenseMap
<AllocaInst
*, int> SROAArgCosts
;
529 /// Return true if \p Call is a cold callsite.
530 bool isColdCallSite(CallBase
&Call
, BlockFrequencyInfo
*CallerBFI
);
532 /// Update Threshold based on callsite properties such as callee
533 /// attributes and callee hotness for PGO builds. The Callee is explicitly
534 /// passed to support analyzing indirect calls whose target is inferred by
536 void updateThreshold(CallBase
&Call
, Function
&Callee
);
537 /// Return a higher threshold if \p Call is a hot callsite.
538 Optional
<int> getHotCallSiteThreshold(CallBase
&Call
,
539 BlockFrequencyInfo
*CallerBFI
);
541 /// Handle a capped 'int' increment for Cost.
542 void addCost(int64_t Inc
, int64_t UpperBound
= INT_MAX
) {
543 assert(UpperBound
> 0 && UpperBound
<= INT_MAX
&& "invalid upper bound");
544 Cost
= std::min
<int>(UpperBound
, Cost
+ Inc
);
547 void onDisableSROA(AllocaInst
*Arg
) override
{
548 auto CostIt
= SROAArgCosts
.find(Arg
);
549 if (CostIt
== SROAArgCosts
.end())
551 addCost(CostIt
->second
);
552 SROACostSavings
-= CostIt
->second
;
553 SROACostSavingsLost
+= CostIt
->second
;
554 SROAArgCosts
.erase(CostIt
);
557 void onDisableLoadElimination() override
{
558 addCost(LoadEliminationCost
);
559 LoadEliminationCost
= 0;
561 void onCallPenalty() override
{ addCost(CallPenalty
); }
562 void onCallArgumentSetup(const CallBase
&Call
) override
{
563 // Pay the price of the argument setup. We account for the average 1
564 // instruction per call argument setup here.
565 addCost(Call
.arg_size() * InlineConstants::InstrCost
);
567 void onLoadRelativeIntrinsic() override
{
568 // This is normally lowered to 4 LLVM instructions.
569 addCost(3 * InlineConstants::InstrCost
);
571 void onLoweredCall(Function
*F
, CallBase
&Call
,
572 bool IsIndirectCall
) override
{
573 // We account for the average 1 instruction per call argument setup here.
574 addCost(Call
.arg_size() * InlineConstants::InstrCost
);
576 // If we have a constant that we are calling as a function, we can peer
577 // through it and see the function target. This happens not infrequently
578 // during devirtualization and so we want to give it a hefty bonus for
579 // inlining, but cap that bonus in the event that inlining wouldn't pan out.
580 // Pretend to inline the function, with a custom threshold.
581 if (IsIndirectCall
&& BoostIndirectCalls
) {
582 auto IndirectCallParams
= Params
;
583 IndirectCallParams
.DefaultThreshold
=
584 InlineConstants::IndirectCallThreshold
;
585 /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
586 /// to instantiate the derived class.
587 InlineCostCallAnalyzer
CA(*F
, Call
, IndirectCallParams
, TTI
,
588 GetAssumptionCache
, GetBFI
, PSI
, ORE
, false);
589 if (CA
.analyze().isSuccess()) {
590 // We were able to inline the indirect call! Subtract the cost from the
591 // threshold to get the bonus we want to apply, but don't go below zero.
592 Cost
-= std::max(0, CA
.getThreshold() - CA
.getCost());
595 // Otherwise simply add the cost for merely making the call.
596 addCost(CallPenalty
);
599 void onFinalizeSwitch(unsigned JumpTableSize
,
600 unsigned NumCaseCluster
) override
{
601 // If suitable for a jump table, consider the cost for the table size and
602 // branch to destination.
603 // Maximum valid cost increased in this function.
606 static_cast<int64_t>(JumpTableSize
) * InlineConstants::InstrCost
+
607 4 * InlineConstants::InstrCost
;
609 addCost(JTCost
, static_cast<int64_t>(CostUpperBound
));
613 if (NumCaseCluster
<= 3) {
614 // Suppose a comparison includes one compare and one conditional branch.
615 addCost(NumCaseCluster
* 2 * InlineConstants::InstrCost
);
619 int64_t ExpectedNumberOfCompare
=
620 getExpectedNumberOfCompare(NumCaseCluster
);
622 ExpectedNumberOfCompare
* 2 * InlineConstants::InstrCost
;
624 addCost(SwitchCost
, static_cast<int64_t>(CostUpperBound
));
626 void onMissedSimplification() override
{
627 addCost(InlineConstants::InstrCost
);
630 void onInitializeSROAArg(AllocaInst
*Arg
) override
{
631 assert(Arg
!= nullptr &&
632 "Should not initialize SROA costs for null value.");
633 SROAArgCosts
[Arg
] = 0;
636 void onAggregateSROAUse(AllocaInst
*SROAArg
) override
{
637 auto CostIt
= SROAArgCosts
.find(SROAArg
);
638 assert(CostIt
!= SROAArgCosts
.end() &&
639 "expected this argument to have a cost");
640 CostIt
->second
+= InlineConstants::InstrCost
;
641 SROACostSavings
+= InlineConstants::InstrCost
;
644 void onBlockStart(const BasicBlock
*BB
) override
{ CostAtBBStart
= Cost
; }
646 void onBlockAnalyzed(const BasicBlock
*BB
) override
{
647 if (CostBenefitAnalysisEnabled
) {
648 // Keep track of the static size of live but cold basic blocks. For now,
649 // we define a cold basic block to be one that's never executed.
650 assert(GetBFI
&& "GetBFI must be available");
651 BlockFrequencyInfo
*BFI
= &(GetBFI(F
));
652 assert(BFI
&& "BFI must be available");
653 auto ProfileCount
= BFI
->getBlockProfileCount(BB
);
654 assert(ProfileCount
.hasValue());
655 if (ProfileCount
.getValue() == 0)
656 ColdSize
+= Cost
- CostAtBBStart
;
659 auto *TI
= BB
->getTerminator();
660 // If we had any successors at this point, than post-inlining is likely to
661 // have them as well. Note that we assume any basic blocks which existed
662 // due to branches or switches which folded above will also fold after
664 if (SingleBB
&& TI
->getNumSuccessors() > 1) {
665 // Take off the bonus we applied to the threshold.
666 Threshold
-= SingleBBBonus
;
671 void onInstructionAnalysisStart(const Instruction
*I
) override
{
672 // This function is called to store the initial cost of inlining before
673 // the given instruction was assessed.
674 if (!PrintInstructionComments
)
676 InstructionCostDetailMap
[I
].CostBefore
= Cost
;
677 InstructionCostDetailMap
[I
].ThresholdBefore
= Threshold
;
680 void onInstructionAnalysisFinish(const Instruction
*I
) override
{
681 // This function is called to find new values of cost and threshold after
682 // the instruction has been assessed.
683 if (!PrintInstructionComments
)
685 InstructionCostDetailMap
[I
].CostAfter
= Cost
;
686 InstructionCostDetailMap
[I
].ThresholdAfter
= Threshold
;
689 bool isCostBenefitAnalysisEnabled() {
690 if (!PSI
|| !PSI
->hasProfileSummary())
696 if (InlineEnableCostBenefitAnalysis
.getNumOccurrences()) {
697 // Honor the explicit request from the user.
698 if (!InlineEnableCostBenefitAnalysis
)
701 // Otherwise, require instrumentation profile.
702 if (!PSI
->hasInstrumentationProfile())
706 auto *Caller
= CandidateCall
.getParent()->getParent();
707 if (!Caller
->getEntryCount())
710 BlockFrequencyInfo
*CallerBFI
= &(GetBFI(*Caller
));
714 // For now, limit to hot call site.
715 if (!PSI
->isHotCallSite(CandidateCall
, CallerBFI
))
718 // Make sure we have a nonzero entry count.
719 auto EntryCount
= F
.getEntryCount();
720 if (!EntryCount
|| !EntryCount
.getCount())
723 BlockFrequencyInfo
*CalleeBFI
= &(GetBFI(F
));
730 // Determine whether we should inline the given call site, taking into account
731 // both the size cost and the cycle savings. Return None if we don't have
732 // suficient profiling information to determine.
733 Optional
<bool> costBenefitAnalysis() {
734 if (!CostBenefitAnalysisEnabled
)
737 // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
738 // for the prelink phase of the AutoFDO + ThinLTO build. Honor the logic by
739 // falling back to the cost-based metric.
740 // TODO: Improve this hacky condition.
745 BlockFrequencyInfo
*CalleeBFI
= &(GetBFI(F
));
748 // The cycle savings expressed as the sum of InlineConstants::InstrCost
749 // multiplied by the estimated dynamic count of each instruction we can
750 // avoid. Savings come from the call site cost, such as argument setup and
751 // the call instruction, as well as the instructions that are folded.
753 // We use 128-bit APInt here to avoid potential overflow. This variable
754 // should stay well below 10^^24 (or 2^^80) in practice. This "worst" case
755 // assumes that we can avoid or fold a billion instructions, each with a
756 // profile count of 10^^15 -- roughly the number of cycles for a 24-hour
757 // period on a 4GHz machine.
758 APInt
CycleSavings(128, 0);
761 APInt
CurrentSavings(128, 0);
763 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(&I
)) {
764 // Count a conditional branch as savings if it becomes unconditional.
765 if (BI
->isConditional() &&
766 dyn_cast_or_null
<ConstantInt
>(
767 SimplifiedValues
.lookup(BI
->getCondition()))) {
768 CurrentSavings
+= InlineConstants::InstrCost
;
770 } else if (Value
*V
= dyn_cast
<Value
>(&I
)) {
771 // Count an instruction as savings if we can fold it.
772 if (SimplifiedValues
.count(V
)) {
773 CurrentSavings
+= InlineConstants::InstrCost
;
778 auto ProfileCount
= CalleeBFI
->getBlockProfileCount(&BB
);
779 assert(ProfileCount
.hasValue());
780 CurrentSavings
*= ProfileCount
.getValue();
781 CycleSavings
+= CurrentSavings
;
784 // Compute the cycle savings per call.
785 auto EntryProfileCount
= F
.getEntryCount();
786 assert(EntryProfileCount
.hasValue() && EntryProfileCount
.getCount());
787 auto EntryCount
= EntryProfileCount
.getCount();
788 CycleSavings
+= EntryCount
/ 2;
789 CycleSavings
= CycleSavings
.udiv(EntryCount
);
791 // Compute the total savings for the call site.
792 auto *CallerBB
= CandidateCall
.getParent();
793 BlockFrequencyInfo
*CallerBFI
= &(GetBFI(*(CallerBB
->getParent())));
794 CycleSavings
+= getCallsiteCost(this->CandidateCall
, DL
);
795 CycleSavings
*= CallerBFI
->getBlockProfileCount(CallerBB
).getValue();
797 // Remove the cost of the cold basic blocks.
798 int Size
= Cost
- ColdSize
;
800 // Allow tiny callees to be inlined regardless of whether they meet the
801 // savings threshold.
802 Size
= Size
> InlineSizeAllowance
? Size
- InlineSizeAllowance
: 1;
804 CostBenefit
.emplace(APInt(128, Size
), CycleSavings
);
806 // Return true if the savings justify the cost of inlining. Specifically,
807 // we evaluate the following inequality:
809 // CycleSavings PSI->getOrCompHotCountThreshold()
810 // -------------- >= -----------------------------------
811 // Size InlineSavingsMultiplier
813 // Note that the left hand side is specific to a call site. The right hand
814 // side is a constant for the entire executable.
815 APInt LHS
= CycleSavings
;
816 LHS
*= InlineSavingsMultiplier
;
817 APInt
RHS(128, PSI
->getOrCompHotCountThreshold());
822 InlineResult
finalizeAnalysis() override
{
823 // Loops generally act a lot like calls in that they act like barriers to
824 // movement, require a certain amount of setup, etc. So when optimising for
825 // size, we penalise any call sites that perform loops. We do this after all
826 // other costs here, so will likely only be dealing with relatively small
827 // functions (and hence DT and LI will hopefully be cheap).
828 auto *Caller
= CandidateCall
.getFunction();
829 if (Caller
->hasMinSize()) {
834 // Ignore loops that will not be executed
835 if (DeadBlocks
.count(L
->getHeader()))
839 addCost(NumLoops
* InlineConstants::LoopPenalty
);
842 // We applied the maximum possible vector bonus at the beginning. Now,
843 // subtract the excess bonus, if any, from the Threshold before
844 // comparing against Cost.
845 if (NumVectorInstructions
<= NumInstructions
/ 10)
846 Threshold
-= VectorBonus
;
847 else if (NumVectorInstructions
<= NumInstructions
/ 2)
848 Threshold
-= VectorBonus
/ 2;
850 if (auto Result
= costBenefitAnalysis()) {
851 DecidedByCostBenefit
= true;
852 if (Result
.getValue())
853 return InlineResult::success();
855 return InlineResult::failure("Cost over threshold.");
858 if (IgnoreThreshold
|| Cost
< std::max(1, Threshold
))
859 return InlineResult::success();
860 return InlineResult::failure("Cost over threshold.");
862 bool shouldStop() override
{
863 // Bail out the moment we cross the threshold. This means we'll under-count
864 // the cost, but only when undercounting doesn't matter.
865 return !IgnoreThreshold
&& Cost
>= Threshold
&& !ComputeFullInlineCost
;
868 void onLoadEliminationOpportunity() override
{
869 LoadEliminationCost
+= InlineConstants::InstrCost
;
872 InlineResult
onAnalysisStart() override
{
873 // Perform some tweaks to the cost and threshold based on the direct
874 // callsite information.
876 // We want to more aggressively inline vector-dense kernels, so up the
877 // threshold, and we'll lower it if the % of vector instructions gets too
878 // low. Note that these bonuses are some what arbitrary and evolved over
879 // time by accident as much as because they are principled bonuses.
881 // FIXME: It would be nice to remove all such bonuses. At least it would be
882 // nice to base the bonus values on something more scientific.
883 assert(NumInstructions
== 0);
884 assert(NumVectorInstructions
== 0);
886 // Update the threshold based on callsite properties
887 updateThreshold(CandidateCall
, F
);
889 // While Threshold depends on commandline options that can take negative
890 // values, we want to enforce the invariant that the computed threshold and
891 // bonuses are non-negative.
892 assert(Threshold
>= 0);
893 assert(SingleBBBonus
>= 0);
894 assert(VectorBonus
>= 0);
896 // Speculatively apply all possible bonuses to Threshold. If cost exceeds
897 // this Threshold any time, and cost cannot decrease, we can stop processing
898 // the rest of the function body.
899 Threshold
+= (SingleBBBonus
+ VectorBonus
);
901 // Give out bonuses for the callsite, as the instructions setting them up
902 // will be gone after inlining.
903 addCost(-getCallsiteCost(this->CandidateCall
, DL
));
905 // If this function uses the coldcc calling convention, prefer not to inline
907 if (F
.getCallingConv() == CallingConv::Cold
)
908 Cost
+= InlineConstants::ColdccPenalty
;
910 // Check if we're done. This can happen due to bonuses and penalties.
911 if (Cost
>= Threshold
&& !ComputeFullInlineCost
)
912 return InlineResult::failure("high cost");
914 return InlineResult::success();
918 InlineCostCallAnalyzer(
919 Function
&Callee
, CallBase
&Call
, const InlineParams
&Params
,
920 const TargetTransformInfo
&TTI
,
921 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
922 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
= nullptr,
923 ProfileSummaryInfo
*PSI
= nullptr,
924 OptimizationRemarkEmitter
*ORE
= nullptr, bool BoostIndirect
= true,
925 bool IgnoreThreshold
= false)
926 : CallAnalyzer(Callee
, Call
, TTI
, GetAssumptionCache
, GetBFI
, PSI
, ORE
),
927 ComputeFullInlineCost(OptComputeFullInlineCost
||
928 Params
.ComputeFullInlineCost
|| ORE
||
929 isCostBenefitAnalysisEnabled()),
930 Params(Params
), Threshold(Params
.DefaultThreshold
),
931 BoostIndirectCalls(BoostIndirect
), IgnoreThreshold(IgnoreThreshold
),
932 CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()),
935 /// Annotation Writer for instruction details
936 InlineCostAnnotationWriter Writer
;
940 // Prints the same analysis as dump(), but its definition is not dependent
944 Optional
<InstructionCostDetail
> getCostDetails(const Instruction
*I
) {
945 if (InstructionCostDetailMap
.find(I
) != InstructionCostDetailMap
.end())
946 return InstructionCostDetailMap
[I
];
950 virtual ~InlineCostCallAnalyzer() {}
951 int getThreshold() const { return Threshold
; }
952 int getCost() const { return Cost
; }
953 Optional
<CostBenefitPair
> getCostBenefitPair() { return CostBenefit
; }
954 bool wasDecidedByCostBenefit() const { return DecidedByCostBenefit
; }
957 class InlineCostFeaturesAnalyzer final
: public CallAnalyzer
{
959 InlineCostFeatures Cost
= {};
961 // FIXME: These constants are taken from the heuristic-based cost visitor.
962 // These should be removed entirely in a later revision to avoid reliance on
963 // heuristics in the ML inliner.
964 static constexpr int JTCostMultiplier
= 4;
965 static constexpr int CaseClusterCostMultiplier
= 2;
966 static constexpr int SwitchCostMultiplier
= 2;
968 // FIXME: These are taken from the heuristic-based cost visitor: we should
969 // eventually abstract these to the CallAnalyzer to avoid duplication.
970 unsigned SROACostSavingOpportunities
= 0;
972 int SingleBBBonus
= 0;
975 DenseMap
<AllocaInst
*, unsigned> SROACosts
;
977 void increment(InlineCostFeatureIndex Feature
, int64_t Delta
= 1) {
978 Cost
[static_cast<size_t>(Feature
)] += Delta
;
981 void set(InlineCostFeatureIndex Feature
, int64_t Value
) {
982 Cost
[static_cast<size_t>(Feature
)] = Value
;
985 void onDisableSROA(AllocaInst
*Arg
) override
{
986 auto CostIt
= SROACosts
.find(Arg
);
987 if (CostIt
== SROACosts
.end())
990 increment(InlineCostFeatureIndex::SROALosses
, CostIt
->second
);
991 SROACostSavingOpportunities
-= CostIt
->second
;
992 SROACosts
.erase(CostIt
);
995 void onDisableLoadElimination() override
{
996 set(InlineCostFeatureIndex::LoadElimination
, 1);
999 void onCallPenalty() override
{
1000 increment(InlineCostFeatureIndex::CallPenalty
, CallPenalty
);
1003 void onCallArgumentSetup(const CallBase
&Call
) override
{
1004 increment(InlineCostFeatureIndex::CallArgumentSetup
,
1005 Call
.arg_size() * InlineConstants::InstrCost
);
1008 void onLoadRelativeIntrinsic() override
{
1009 increment(InlineCostFeatureIndex::LoadRelativeIntrinsic
,
1010 3 * InlineConstants::InstrCost
);
1013 void onLoweredCall(Function
*F
, CallBase
&Call
,
1014 bool IsIndirectCall
) override
{
1015 increment(InlineCostFeatureIndex::LoweredCallArgSetup
,
1016 Call
.arg_size() * InlineConstants::InstrCost
);
1018 if (IsIndirectCall
) {
1019 InlineParams IndirectCallParams
= {/* DefaultThreshold*/ 0,
1020 /*HintThreshold*/ {},
1021 /*ColdThreshold*/ {},
1022 /*OptSizeThreshold*/ {},
1023 /*OptMinSizeThreshold*/ {},
1024 /*HotCallSiteThreshold*/ {},
1025 /*LocallyHotCallSiteThreshold*/ {},
1026 /*ColdCallSiteThreshold*/ {},
1027 /*ComputeFullInlineCost*/ true,
1028 /*EnableDeferral*/ true};
1029 IndirectCallParams
.DefaultThreshold
=
1030 InlineConstants::IndirectCallThreshold
;
1032 InlineCostCallAnalyzer
CA(*F
, Call
, IndirectCallParams
, TTI
,
1033 GetAssumptionCache
, GetBFI
, PSI
, ORE
, false,
1035 if (CA
.analyze().isSuccess()) {
1036 increment(InlineCostFeatureIndex::NestedInlineCostEstimate
,
1038 increment(InlineCostFeatureIndex::NestedInlines
, 1);
1045 void onFinalizeSwitch(unsigned JumpTableSize
,
1046 unsigned NumCaseCluster
) override
{
1048 if (JumpTableSize
) {
1050 static_cast<int64_t>(JumpTableSize
) * InlineConstants::InstrCost
+
1051 JTCostMultiplier
* InlineConstants::InstrCost
;
1052 increment(InlineCostFeatureIndex::JumpTablePenalty
, JTCost
);
1056 if (NumCaseCluster
<= 3) {
1057 increment(InlineCostFeatureIndex::CaseClusterPenalty
,
1058 NumCaseCluster
* CaseClusterCostMultiplier
*
1059 InlineConstants::InstrCost
);
1063 int64_t ExpectedNumberOfCompare
=
1064 getExpectedNumberOfCompare(NumCaseCluster
);
1066 int64_t SwitchCost
= ExpectedNumberOfCompare
* SwitchCostMultiplier
*
1067 InlineConstants::InstrCost
;
1068 increment(InlineCostFeatureIndex::SwitchPenalty
, SwitchCost
);
1071 void onMissedSimplification() override
{
1072 increment(InlineCostFeatureIndex::UnsimplifiedCommonInstructions
,
1073 InlineConstants::InstrCost
);
1076 void onInitializeSROAArg(AllocaInst
*Arg
) override
{ SROACosts
[Arg
] = 0; }
1077 void onAggregateSROAUse(AllocaInst
*Arg
) override
{
1078 SROACosts
.find(Arg
)->second
+= InlineConstants::InstrCost
;
1079 SROACostSavingOpportunities
+= InlineConstants::InstrCost
;
1082 void onBlockAnalyzed(const BasicBlock
*BB
) override
{
1083 if (BB
->getTerminator()->getNumSuccessors() > 1)
1084 set(InlineCostFeatureIndex::IsMultipleBlocks
, 1);
1085 Threshold
-= SingleBBBonus
;
1088 InlineResult
finalizeAnalysis() override
{
1089 auto *Caller
= CandidateCall
.getFunction();
1090 if (Caller
->hasMinSize()) {
1091 DominatorTree
DT(F
);
1093 for (Loop
*L
: LI
) {
1094 // Ignore loops that will not be executed
1095 if (DeadBlocks
.count(L
->getHeader()))
1097 increment(InlineCostFeatureIndex::NumLoops
,
1098 InlineConstants::LoopPenalty
);
1101 set(InlineCostFeatureIndex::DeadBlocks
, DeadBlocks
.size());
1102 set(InlineCostFeatureIndex::SimplifiedInstructions
,
1103 NumInstructionsSimplified
);
1104 set(InlineCostFeatureIndex::ConstantArgs
, NumConstantArgs
);
1105 set(InlineCostFeatureIndex::ConstantOffsetPtrArgs
,
1106 NumConstantOffsetPtrArgs
);
1107 set(InlineCostFeatureIndex::SROASavings
, SROACostSavingOpportunities
);
1109 if (NumVectorInstructions
<= NumInstructions
/ 10)
1110 Threshold
-= VectorBonus
;
1111 else if (NumVectorInstructions
<= NumInstructions
/ 2)
1112 Threshold
-= VectorBonus
/ 2;
1114 set(InlineCostFeatureIndex::Threshold
, Threshold
);
1116 return InlineResult::success();
1119 bool shouldStop() override
{ return false; }
1121 void onLoadEliminationOpportunity() override
{
1122 increment(InlineCostFeatureIndex::LoadElimination
, 1);
1125 InlineResult
onAnalysisStart() override
{
1126 increment(InlineCostFeatureIndex::CallSiteCost
,
1127 -1 * getCallsiteCost(this->CandidateCall
, DL
));
1129 set(InlineCostFeatureIndex::ColdCcPenalty
,
1130 (F
.getCallingConv() == CallingConv::Cold
));
1132 // FIXME: we shouldn't repeat this logic in both the Features and Cost
1133 // analyzer - instead, we should abstract it to a common method in the
1135 int SingleBBBonusPercent
= 50;
1136 int VectorBonusPercent
= TTI
.getInlinerVectorBonusPercent();
1137 Threshold
+= TTI
.adjustInliningThreshold(&CandidateCall
);
1138 Threshold
*= TTI
.getInliningThresholdMultiplier();
1139 SingleBBBonus
= Threshold
* SingleBBBonusPercent
/ 100;
1140 VectorBonus
= Threshold
* VectorBonusPercent
/ 100;
1141 Threshold
+= (SingleBBBonus
+ VectorBonus
);
1143 return InlineResult::success();
1147 InlineCostFeaturesAnalyzer(
1148 const TargetTransformInfo
&TTI
,
1149 function_ref
<AssumptionCache
&(Function
&)> &GetAssumptionCache
,
1150 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
,
1151 ProfileSummaryInfo
*PSI
, OptimizationRemarkEmitter
*ORE
, Function
&Callee
,
1153 : CallAnalyzer(Callee
, Call
, TTI
, GetAssumptionCache
, GetBFI
, PSI
) {}
1155 const InlineCostFeatures
&features() const { return Cost
; }
1160 /// Test whether the given value is an Alloca-derived function argument.
1161 bool CallAnalyzer::isAllocaDerivedArg(Value
*V
) {
1162 return SROAArgValues
.count(V
);
1165 void CallAnalyzer::disableSROAForArg(AllocaInst
*SROAArg
) {
1166 onDisableSROA(SROAArg
);
1167 EnabledSROAAllocas
.erase(SROAArg
);
1168 disableLoadElimination();
1171 void InlineCostAnnotationWriter::emitInstructionAnnot(
1172 const Instruction
*I
, formatted_raw_ostream
&OS
) {
1173 // The cost of inlining of the given instruction is printed always.
1174 // The threshold delta is printed only when it is non-zero. It happens
1175 // when we decided to give a bonus at a particular instruction.
1176 Optional
<InstructionCostDetail
> Record
= ICCA
->getCostDetails(I
);
1178 OS
<< "; No analysis for the instruction";
1180 OS
<< "; cost before = " << Record
->CostBefore
1181 << ", cost after = " << Record
->CostAfter
1182 << ", threshold before = " << Record
->ThresholdBefore
1183 << ", threshold after = " << Record
->ThresholdAfter
<< ", ";
1184 OS
<< "cost delta = " << Record
->getCostDelta();
1185 if (Record
->hasThresholdChanged())
1186 OS
<< ", threshold delta = " << Record
->getThresholdDelta();
1188 auto C
= ICCA
->getSimplifiedValue(const_cast<Instruction
*>(I
));
1190 OS
<< ", simplified to ";
1191 C
.getValue()->print(OS
, true);
1196 /// If 'V' maps to a SROA candidate, disable SROA for it.
1197 void CallAnalyzer::disableSROA(Value
*V
) {
1198 if (auto *SROAArg
= getSROAArgForValueOrNull(V
)) {
1199 disableSROAForArg(SROAArg
);
1203 void CallAnalyzer::disableLoadElimination() {
1204 if (EnableLoadElimination
) {
1205 onDisableLoadElimination();
1206 EnableLoadElimination
= false;
1210 /// Accumulate a constant GEP offset into an APInt if possible.
1212 /// Returns false if unable to compute the offset for any reason. Respects any
1213 /// simplified values known during the analysis of this callsite.
1214 bool CallAnalyzer::accumulateGEPOffset(GEPOperator
&GEP
, APInt
&Offset
) {
1215 unsigned IntPtrWidth
= DL
.getIndexTypeSizeInBits(GEP
.getType());
1216 assert(IntPtrWidth
== Offset
.getBitWidth());
1218 for (gep_type_iterator GTI
= gep_type_begin(GEP
), GTE
= gep_type_end(GEP
);
1219 GTI
!= GTE
; ++GTI
) {
1220 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GTI
.getOperand());
1222 if (Constant
*SimpleOp
= SimplifiedValues
.lookup(GTI
.getOperand()))
1223 OpC
= dyn_cast
<ConstantInt
>(SimpleOp
);
1229 // Handle a struct index, which adds its field offset to the pointer.
1230 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
1231 unsigned ElementIdx
= OpC
->getZExtValue();
1232 const StructLayout
*SL
= DL
.getStructLayout(STy
);
1233 Offset
+= APInt(IntPtrWidth
, SL
->getElementOffset(ElementIdx
));
1237 APInt
TypeSize(IntPtrWidth
, DL
.getTypeAllocSize(GTI
.getIndexedType()));
1238 Offset
+= OpC
->getValue().sextOrTrunc(IntPtrWidth
) * TypeSize
;
1243 /// Use TTI to check whether a GEP is free.
1245 /// Respects any simplified values known during the analysis of this callsite.
1246 bool CallAnalyzer::isGEPFree(GetElementPtrInst
&GEP
) {
1247 SmallVector
<Value
*, 4> Operands
;
1248 Operands
.push_back(GEP
.getOperand(0));
1249 for (const Use
&Op
: GEP
.indices())
1250 if (Constant
*SimpleOp
= SimplifiedValues
.lookup(Op
))
1251 Operands
.push_back(SimpleOp
);
1253 Operands
.push_back(Op
);
1254 return TTI
.getUserCost(&GEP
, Operands
,
1255 TargetTransformInfo::TCK_SizeAndLatency
) ==
1256 TargetTransformInfo::TCC_Free
;
1259 bool CallAnalyzer::visitAlloca(AllocaInst
&I
) {
1260 disableSROA(I
.getOperand(0));
1262 // Check whether inlining will turn a dynamic alloca into a static
1263 // alloca and handle that case.
1264 if (I
.isArrayAllocation()) {
1265 Constant
*Size
= SimplifiedValues
.lookup(I
.getArraySize());
1266 if (auto *AllocSize
= dyn_cast_or_null
<ConstantInt
>(Size
)) {
1267 // Sometimes a dynamic alloca could be converted into a static alloca
1268 // after this constant prop, and become a huge static alloca on an
1269 // unconditional CFG path. Avoid inlining if this is going to happen above
1271 // FIXME: If the threshold is removed or lowered too much, we could end up
1272 // being too pessimistic and prevent inlining non-problematic code. This
1273 // could result in unintended perf regressions. A better overall strategy
1274 // is needed to track stack usage during inlining.
1275 Type
*Ty
= I
.getAllocatedType();
1276 AllocatedSize
= SaturatingMultiplyAdd(
1277 AllocSize
->getLimitedValue(),
1278 DL
.getTypeAllocSize(Ty
).getKnownMinSize(), AllocatedSize
);
1279 if (AllocatedSize
> InlineConstants::MaxSimplifiedDynamicAllocaToInline
)
1280 HasDynamicAlloca
= true;
1285 // Accumulate the allocated size.
1286 if (I
.isStaticAlloca()) {
1287 Type
*Ty
= I
.getAllocatedType();
1289 SaturatingAdd(DL
.getTypeAllocSize(Ty
).getKnownMinSize(), AllocatedSize
);
1292 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
1293 // a variety of reasons, and so we would like to not inline them into
1294 // functions which don't currently have a dynamic alloca. This simply
1295 // disables inlining altogether in the presence of a dynamic alloca.
1296 if (!I
.isStaticAlloca())
1297 HasDynamicAlloca
= true;
1302 bool CallAnalyzer::visitPHI(PHINode
&I
) {
1303 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
1304 // though we don't want to propagate it's bonuses. The idea is to disable
1305 // SROA if it *might* be used in an inappropriate manner.
1307 // Phi nodes are always zero-cost.
1308 // FIXME: Pointer sizes may differ between different address spaces, so do we
1309 // need to use correct address space in the call to getPointerSizeInBits here?
1310 // Or could we skip the getPointerSizeInBits call completely? As far as I can
1311 // see the ZeroOffset is used as a dummy value, so we can probably use any
1312 // bit width for the ZeroOffset?
1313 APInt ZeroOffset
= APInt::getNullValue(DL
.getPointerSizeInBits(0));
1314 bool CheckSROA
= I
.getType()->isPointerTy();
1316 // Track the constant or pointer with constant offset we've seen so far.
1317 Constant
*FirstC
= nullptr;
1318 std::pair
<Value
*, APInt
> FirstBaseAndOffset
= {nullptr, ZeroOffset
};
1319 Value
*FirstV
= nullptr;
1321 for (unsigned i
= 0, e
= I
.getNumIncomingValues(); i
!= e
; ++i
) {
1322 BasicBlock
*Pred
= I
.getIncomingBlock(i
);
1323 // If the incoming block is dead, skip the incoming block.
1324 if (DeadBlocks
.count(Pred
))
1326 // If the parent block of phi is not the known successor of the incoming
1327 // block, skip the incoming block.
1328 BasicBlock
*KnownSuccessor
= KnownSuccessors
[Pred
];
1329 if (KnownSuccessor
&& KnownSuccessor
!= I
.getParent())
1332 Value
*V
= I
.getIncomingValue(i
);
1333 // If the incoming value is this phi itself, skip the incoming value.
1337 Constant
*C
= dyn_cast
<Constant
>(V
);
1339 C
= SimplifiedValues
.lookup(V
);
1341 std::pair
<Value
*, APInt
> BaseAndOffset
= {nullptr, ZeroOffset
};
1342 if (!C
&& CheckSROA
)
1343 BaseAndOffset
= ConstantOffsetPtrs
.lookup(V
);
1345 if (!C
&& !BaseAndOffset
.first
)
1346 // The incoming value is neither a constant nor a pointer with constant
1347 // offset, exit early.
1352 // If we've seen a constant incoming value before and it is the same
1353 // constant we see this time, continue checking the next incoming value.
1355 // Otherwise early exit because we either see a different constant or saw
1356 // a constant before but we have a pointer with constant offset this time.
1361 // The same logic as above, but check pointer with constant offset here.
1362 if (FirstBaseAndOffset
== BaseAndOffset
)
1368 // This is the 1st time we've seen a constant, record it.
1373 // The remaining case is that this is the 1st time we've seen a pointer with
1374 // constant offset, record it.
1376 FirstBaseAndOffset
= BaseAndOffset
;
1379 // Check if we can map phi to a constant.
1381 SimplifiedValues
[&I
] = FirstC
;
1385 // Check if we can map phi to a pointer with constant offset.
1386 if (FirstBaseAndOffset
.first
) {
1387 ConstantOffsetPtrs
[&I
] = FirstBaseAndOffset
;
1389 if (auto *SROAArg
= getSROAArgForValueOrNull(FirstV
))
1390 SROAArgValues
[&I
] = SROAArg
;
1396 /// Check we can fold GEPs of constant-offset call site argument pointers.
1397 /// This requires target data and inbounds GEPs.
1399 /// \return true if the specified GEP can be folded.
1400 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst
&I
) {
1401 // Check if we have a base + offset for the pointer.
1402 std::pair
<Value
*, APInt
> BaseAndOffset
=
1403 ConstantOffsetPtrs
.lookup(I
.getPointerOperand());
1404 if (!BaseAndOffset
.first
)
1407 // Check if the offset of this GEP is constant, and if so accumulate it
1409 if (!accumulateGEPOffset(cast
<GEPOperator
>(I
), BaseAndOffset
.second
))
1412 // Add the result as a new mapping to Base + Offset.
1413 ConstantOffsetPtrs
[&I
] = BaseAndOffset
;
1418 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst
&I
) {
1419 auto *SROAArg
= getSROAArgForValueOrNull(I
.getPointerOperand());
1421 // Lambda to check whether a GEP's indices are all constant.
1422 auto IsGEPOffsetConstant
= [&](GetElementPtrInst
&GEP
) {
1423 for (const Use
&Op
: GEP
.indices())
1424 if (!isa
<Constant
>(Op
) && !SimplifiedValues
.lookup(Op
))
1429 if (!DisableGEPConstOperand
)
1430 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1431 SmallVector
<Constant
*, 2> Indices
;
1432 for (unsigned int Index
= 1; Index
< COps
.size(); ++Index
)
1433 Indices
.push_back(COps
[Index
]);
1434 return ConstantExpr::getGetElementPtr(
1435 I
.getSourceElementType(), COps
[0], Indices
, I
.isInBounds());
1439 if ((I
.isInBounds() && canFoldInboundsGEP(I
)) || IsGEPOffsetConstant(I
)) {
1441 SROAArgValues
[&I
] = SROAArg
;
1443 // Constant GEPs are modeled as free.
1447 // Variable GEPs will require math and will disable SROA.
1449 disableSROAForArg(SROAArg
);
1450 return isGEPFree(I
);
1453 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1454 /// \p Evaluate is a callable specific to instruction type that evaluates the
1455 /// instruction when all the operands are constants.
1456 template <typename Callable
>
1457 bool CallAnalyzer::simplifyInstruction(Instruction
&I
, Callable Evaluate
) {
1458 SmallVector
<Constant
*, 2> COps
;
1459 for (Value
*Op
: I
.operands()) {
1460 Constant
*COp
= dyn_cast
<Constant
>(Op
);
1462 COp
= SimplifiedValues
.lookup(Op
);
1465 COps
.push_back(COp
);
1467 auto *C
= Evaluate(COps
);
1470 SimplifiedValues
[&I
] = C
;
1474 bool CallAnalyzer::visitBitCast(BitCastInst
&I
) {
1475 // Propagate constants through bitcasts.
1476 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1477 return ConstantExpr::getBitCast(COps
[0], I
.getType());
1481 // Track base/offsets through casts
1482 std::pair
<Value
*, APInt
> BaseAndOffset
=
1483 ConstantOffsetPtrs
.lookup(I
.getOperand(0));
1484 // Casts don't change the offset, just wrap it up.
1485 if (BaseAndOffset
.first
)
1486 ConstantOffsetPtrs
[&I
] = BaseAndOffset
;
1488 // Also look for SROA candidates here.
1489 if (auto *SROAArg
= getSROAArgForValueOrNull(I
.getOperand(0)))
1490 SROAArgValues
[&I
] = SROAArg
;
1492 // Bitcasts are always zero cost.
1496 bool CallAnalyzer::visitPtrToInt(PtrToIntInst
&I
) {
1497 // Propagate constants through ptrtoint.
1498 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1499 return ConstantExpr::getPtrToInt(COps
[0], I
.getType());
1503 // Track base/offset pairs when converted to a plain integer provided the
1504 // integer is large enough to represent the pointer.
1505 unsigned IntegerSize
= I
.getType()->getScalarSizeInBits();
1506 unsigned AS
= I
.getOperand(0)->getType()->getPointerAddressSpace();
1507 if (IntegerSize
== DL
.getPointerSizeInBits(AS
)) {
1508 std::pair
<Value
*, APInt
> BaseAndOffset
=
1509 ConstantOffsetPtrs
.lookup(I
.getOperand(0));
1510 if (BaseAndOffset
.first
)
1511 ConstantOffsetPtrs
[&I
] = BaseAndOffset
;
1514 // This is really weird. Technically, ptrtoint will disable SROA. However,
1515 // unless that ptrtoint is *used* somewhere in the live basic blocks after
1516 // inlining, it will be nuked, and SROA should proceed. All of the uses which
1517 // would block SROA would also block SROA if applied directly to a pointer,
1518 // and so we can just add the integer in here. The only places where SROA is
1519 // preserved either cannot fire on an integer, or won't in-and-of themselves
1520 // disable SROA (ext) w/o some later use that we would see and disable.
1521 if (auto *SROAArg
= getSROAArgForValueOrNull(I
.getOperand(0)))
1522 SROAArgValues
[&I
] = SROAArg
;
1524 return TTI
.getUserCost(&I
, TargetTransformInfo::TCK_SizeAndLatency
) ==
1525 TargetTransformInfo::TCC_Free
;
1528 bool CallAnalyzer::visitIntToPtr(IntToPtrInst
&I
) {
1529 // Propagate constants through ptrtoint.
1530 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1531 return ConstantExpr::getIntToPtr(COps
[0], I
.getType());
1535 // Track base/offset pairs when round-tripped through a pointer without
1536 // modifications provided the integer is not too large.
1537 Value
*Op
= I
.getOperand(0);
1538 unsigned IntegerSize
= Op
->getType()->getScalarSizeInBits();
1539 if (IntegerSize
<= DL
.getPointerTypeSizeInBits(I
.getType())) {
1540 std::pair
<Value
*, APInt
> BaseAndOffset
= ConstantOffsetPtrs
.lookup(Op
);
1541 if (BaseAndOffset
.first
)
1542 ConstantOffsetPtrs
[&I
] = BaseAndOffset
;
1545 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1546 if (auto *SROAArg
= getSROAArgForValueOrNull(Op
))
1547 SROAArgValues
[&I
] = SROAArg
;
1549 return TTI
.getUserCost(&I
, TargetTransformInfo::TCK_SizeAndLatency
) ==
1550 TargetTransformInfo::TCC_Free
;
1553 bool CallAnalyzer::visitCastInst(CastInst
&I
) {
1554 // Propagate constants through casts.
1555 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1556 return ConstantExpr::getCast(I
.getOpcode(), COps
[0], I
.getType());
1560 // Disable SROA in the face of arbitrary casts we don't explicitly list
1562 disableSROA(I
.getOperand(0));
1564 // If this is a floating-point cast, and the target says this operation
1565 // is expensive, this may eventually become a library call. Treat the cost
1567 switch (I
.getOpcode()) {
1568 case Instruction::FPTrunc
:
1569 case Instruction::FPExt
:
1570 case Instruction::UIToFP
:
1571 case Instruction::SIToFP
:
1572 case Instruction::FPToUI
:
1573 case Instruction::FPToSI
:
1574 if (TTI
.getFPOpCost(I
.getType()) == TargetTransformInfo::TCC_Expensive
)
1581 return TTI
.getUserCost(&I
, TargetTransformInfo::TCK_SizeAndLatency
) ==
1582 TargetTransformInfo::TCC_Free
;
1585 bool CallAnalyzer::paramHasAttr(Argument
*A
, Attribute::AttrKind Attr
) {
1586 return CandidateCall
.paramHasAttr(A
->getArgNo(), Attr
);
1589 bool CallAnalyzer::isKnownNonNullInCallee(Value
*V
) {
1590 // Does the *call site* have the NonNull attribute set on an argument? We
1591 // use the attribute on the call site to memoize any analysis done in the
1592 // caller. This will also trip if the callee function has a non-null
1593 // parameter attribute, but that's a less interesting case because hopefully
1594 // the callee would already have been simplified based on that.
1595 if (Argument
*A
= dyn_cast
<Argument
>(V
))
1596 if (paramHasAttr(A
, Attribute::NonNull
))
1599 // Is this an alloca in the caller? This is distinct from the attribute case
1600 // above because attributes aren't updated within the inliner itself and we
1601 // always want to catch the alloca derived case.
1602 if (isAllocaDerivedArg(V
))
1603 // We can actually predict the result of comparisons between an
1604 // alloca-derived value and null. Note that this fires regardless of
1611 bool CallAnalyzer::allowSizeGrowth(CallBase
&Call
) {
1612 // If the normal destination of the invoke or the parent block of the call
1613 // site is unreachable-terminated, there is little point in inlining this
1614 // unless there is literally zero cost.
1615 // FIXME: Note that it is possible that an unreachable-terminated block has a
1616 // hot entry. For example, in below scenario inlining hot_call_X() may be
1624 // For now, we are not handling this corner case here as it is rare in real
1625 // code. In future, we should elaborate this based on BPI and BFI in more
1626 // general threshold adjusting heuristics in updateThreshold().
1627 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(&Call
)) {
1628 if (isa
<UnreachableInst
>(II
->getNormalDest()->getTerminator()))
1630 } else if (isa
<UnreachableInst
>(Call
.getParent()->getTerminator()))
1636 bool InlineCostCallAnalyzer::isColdCallSite(CallBase
&Call
,
1637 BlockFrequencyInfo
*CallerBFI
) {
1638 // If global profile summary is available, then callsite's coldness is
1639 // determined based on that.
1640 if (PSI
&& PSI
->hasProfileSummary())
1641 return PSI
->isColdCallSite(Call
, CallerBFI
);
1643 // Otherwise we need BFI to be available.
1647 // Determine if the callsite is cold relative to caller's entry. We could
1648 // potentially cache the computation of scaled entry frequency, but the added
1649 // complexity is not worth it unless this scaling shows up high in the
1651 const BranchProbability
ColdProb(ColdCallSiteRelFreq
, 100);
1652 auto CallSiteBB
= Call
.getParent();
1653 auto CallSiteFreq
= CallerBFI
->getBlockFreq(CallSiteBB
);
1654 auto CallerEntryFreq
=
1655 CallerBFI
->getBlockFreq(&(Call
.getCaller()->getEntryBlock()));
1656 return CallSiteFreq
< CallerEntryFreq
* ColdProb
;
1660 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase
&Call
,
1661 BlockFrequencyInfo
*CallerBFI
) {
1663 // If global profile summary is available, then callsite's hotness is
1664 // determined based on that.
1665 if (PSI
&& PSI
->hasProfileSummary() && PSI
->isHotCallSite(Call
, CallerBFI
))
1666 return Params
.HotCallSiteThreshold
;
1668 // Otherwise we need BFI to be available and to have a locally hot callsite
1670 if (!CallerBFI
|| !Params
.LocallyHotCallSiteThreshold
)
1673 // Determine if the callsite is hot relative to caller's entry. We could
1674 // potentially cache the computation of scaled entry frequency, but the added
1675 // complexity is not worth it unless this scaling shows up high in the
1677 auto CallSiteBB
= Call
.getParent();
1678 auto CallSiteFreq
= CallerBFI
->getBlockFreq(CallSiteBB
).getFrequency();
1679 auto CallerEntryFreq
= CallerBFI
->getEntryFreq();
1680 if (CallSiteFreq
>= CallerEntryFreq
* HotCallSiteRelFreq
)
1681 return Params
.LocallyHotCallSiteThreshold
;
1683 // Otherwise treat it normally.
1687 void InlineCostCallAnalyzer::updateThreshold(CallBase
&Call
, Function
&Callee
) {
1688 // If no size growth is allowed for this inlining, set Threshold to 0.
1689 if (!allowSizeGrowth(Call
)) {
1694 Function
*Caller
= Call
.getCaller();
1696 // return min(A, B) if B is valid.
1697 auto MinIfValid
= [](int A
, Optional
<int> B
) {
1698 return B
? std::min(A
, B
.getValue()) : A
;
1701 // return max(A, B) if B is valid.
1702 auto MaxIfValid
= [](int A
, Optional
<int> B
) {
1703 return B
? std::max(A
, B
.getValue()) : A
;
1706 // Various bonus percentages. These are multiplied by Threshold to get the
1708 // SingleBBBonus: This bonus is applied if the callee has a single reachable
1709 // basic block at the given callsite context. This is speculatively applied
1710 // and withdrawn if more than one basic block is seen.
1712 // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1713 // of the last call to a static function as inlining such functions is
1714 // guaranteed to reduce code size.
1716 // These bonus percentages may be set to 0 based on properties of the caller
1717 // and the callsite.
1718 int SingleBBBonusPercent
= 50;
1719 int VectorBonusPercent
= TTI
.getInlinerVectorBonusPercent();
1720 int LastCallToStaticBonus
= InlineConstants::LastCallToStaticBonus
;
1722 // Lambda to set all the above bonus and bonus percentages to 0.
1723 auto DisallowAllBonuses
= [&]() {
1724 SingleBBBonusPercent
= 0;
1725 VectorBonusPercent
= 0;
1726 LastCallToStaticBonus
= 0;
1729 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1730 // and reduce the threshold if the caller has the necessary attribute.
1731 if (Caller
->hasMinSize()) {
1732 Threshold
= MinIfValid(Threshold
, Params
.OptMinSizeThreshold
);
1733 // For minsize, we want to disable the single BB bonus and the vector
1734 // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1735 // a static function will, at the minimum, eliminate the parameter setup and
1736 // call/return instructions.
1737 SingleBBBonusPercent
= 0;
1738 VectorBonusPercent
= 0;
1739 } else if (Caller
->hasOptSize())
1740 Threshold
= MinIfValid(Threshold
, Params
.OptSizeThreshold
);
1742 // Adjust the threshold based on inlinehint attribute and profile based
1743 // hotness information if the caller does not have MinSize attribute.
1744 if (!Caller
->hasMinSize()) {
1745 if (Callee
.hasFnAttribute(Attribute::InlineHint
))
1746 Threshold
= MaxIfValid(Threshold
, Params
.HintThreshold
);
1748 // FIXME: After switching to the new passmanager, simplify the logic below
1749 // by checking only the callsite hotness/coldness as we will reliably
1750 // have local profile information.
1752 // Callsite hotness and coldness can be determined if sample profile is
1753 // used (which adds hotness metadata to calls) or if caller's
1754 // BlockFrequencyInfo is available.
1755 BlockFrequencyInfo
*CallerBFI
= GetBFI
? &(GetBFI(*Caller
)) : nullptr;
1756 auto HotCallSiteThreshold
= getHotCallSiteThreshold(Call
, CallerBFI
);
1757 if (!Caller
->hasOptSize() && HotCallSiteThreshold
) {
1758 LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1759 // FIXME: This should update the threshold only if it exceeds the
1760 // current threshold, but AutoFDO + ThinLTO currently relies on this
1761 // behavior to prevent inlining of hot callsites during ThinLTO
1763 Threshold
= HotCallSiteThreshold
.getValue();
1764 } else if (isColdCallSite(Call
, CallerBFI
)) {
1765 LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1766 // Do not apply bonuses for a cold callsite including the
1767 // LastCallToStatic bonus. While this bonus might result in code size
1768 // reduction, it can cause the size of a non-cold caller to increase
1769 // preventing it from being inlined.
1770 DisallowAllBonuses();
1771 Threshold
= MinIfValid(Threshold
, Params
.ColdCallSiteThreshold
);
1773 // Use callee's global profile information only if we have no way of
1774 // determining this via callsite information.
1775 if (PSI
->isFunctionEntryHot(&Callee
)) {
1776 LLVM_DEBUG(dbgs() << "Hot callee.\n");
1777 // If callsite hotness can not be determined, we may still know
1778 // that the callee is hot and treat it as a weaker hint for threshold
1780 Threshold
= MaxIfValid(Threshold
, Params
.HintThreshold
);
1781 } else if (PSI
->isFunctionEntryCold(&Callee
)) {
1782 LLVM_DEBUG(dbgs() << "Cold callee.\n");
1783 // Do not apply bonuses for a cold callee including the
1784 // LastCallToStatic bonus. While this bonus might result in code size
1785 // reduction, it can cause the size of a non-cold caller to increase
1786 // preventing it from being inlined.
1787 DisallowAllBonuses();
1788 Threshold
= MinIfValid(Threshold
, Params
.ColdThreshold
);
1793 Threshold
+= TTI
.adjustInliningThreshold(&Call
);
1795 // Finally, take the target-specific inlining threshold multiplier into
1797 Threshold
*= TTI
.getInliningThresholdMultiplier();
1799 SingleBBBonus
= Threshold
* SingleBBBonusPercent
/ 100;
1800 VectorBonus
= Threshold
* VectorBonusPercent
/ 100;
1802 bool OnlyOneCallAndLocalLinkage
=
1803 F
.hasLocalLinkage() && F
.hasOneUse() && &F
== Call
.getCalledFunction();
1804 // If there is only one call of the function, and it has internal linkage,
1805 // the cost of inlining it drops dramatically. It may seem odd to update
1806 // Cost in updateThreshold, but the bonus depends on the logic in this method.
1807 if (OnlyOneCallAndLocalLinkage
)
1808 Cost
-= LastCallToStaticBonus
;
1811 bool CallAnalyzer::visitCmpInst(CmpInst
&I
) {
1812 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1813 // First try to handle simplified comparisons.
1814 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1815 return ConstantExpr::getCompare(I
.getPredicate(), COps
[0], COps
[1]);
1819 if (I
.getOpcode() == Instruction::FCmp
)
1822 // Otherwise look for a comparison between constant offset pointers with
1824 Value
*LHSBase
, *RHSBase
;
1825 APInt LHSOffset
, RHSOffset
;
1826 std::tie(LHSBase
, LHSOffset
) = ConstantOffsetPtrs
.lookup(LHS
);
1828 std::tie(RHSBase
, RHSOffset
) = ConstantOffsetPtrs
.lookup(RHS
);
1829 if (RHSBase
&& LHSBase
== RHSBase
) {
1830 // We have common bases, fold the icmp to a constant based on the
1832 Constant
*CLHS
= ConstantInt::get(LHS
->getContext(), LHSOffset
);
1833 Constant
*CRHS
= ConstantInt::get(RHS
->getContext(), RHSOffset
);
1834 if (Constant
*C
= ConstantExpr::getICmp(I
.getPredicate(), CLHS
, CRHS
)) {
1835 SimplifiedValues
[&I
] = C
;
1836 ++NumConstantPtrCmps
;
1842 // If the comparison is an equality comparison with null, we can simplify it
1843 // if we know the value (argument) can't be null
1844 if (I
.isEquality() && isa
<ConstantPointerNull
>(I
.getOperand(1)) &&
1845 isKnownNonNullInCallee(I
.getOperand(0))) {
1846 bool IsNotEqual
= I
.getPredicate() == CmpInst::ICMP_NE
;
1847 SimplifiedValues
[&I
] = IsNotEqual
? ConstantInt::getTrue(I
.getType())
1848 : ConstantInt::getFalse(I
.getType());
1851 return handleSROA(I
.getOperand(0), isa
<ConstantPointerNull
>(I
.getOperand(1)));
1854 bool CallAnalyzer::visitSub(BinaryOperator
&I
) {
1855 // Try to handle a special case: we can fold computing the difference of two
1856 // constant-related pointers.
1857 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1858 Value
*LHSBase
, *RHSBase
;
1859 APInt LHSOffset
, RHSOffset
;
1860 std::tie(LHSBase
, LHSOffset
) = ConstantOffsetPtrs
.lookup(LHS
);
1862 std::tie(RHSBase
, RHSOffset
) = ConstantOffsetPtrs
.lookup(RHS
);
1863 if (RHSBase
&& LHSBase
== RHSBase
) {
1864 // We have common bases, fold the subtract to a constant based on the
1866 Constant
*CLHS
= ConstantInt::get(LHS
->getContext(), LHSOffset
);
1867 Constant
*CRHS
= ConstantInt::get(RHS
->getContext(), RHSOffset
);
1868 if (Constant
*C
= ConstantExpr::getSub(CLHS
, CRHS
)) {
1869 SimplifiedValues
[&I
] = C
;
1870 ++NumConstantPtrDiffs
;
1876 // Otherwise, fall back to the generic logic for simplifying and handling
1878 return Base::visitSub(I
);
1881 bool CallAnalyzer::visitBinaryOperator(BinaryOperator
&I
) {
1882 Value
*LHS
= I
.getOperand(0), *RHS
= I
.getOperand(1);
1883 Constant
*CLHS
= dyn_cast
<Constant
>(LHS
);
1885 CLHS
= SimplifiedValues
.lookup(LHS
);
1886 Constant
*CRHS
= dyn_cast
<Constant
>(RHS
);
1888 CRHS
= SimplifiedValues
.lookup(RHS
);
1890 Value
*SimpleV
= nullptr;
1891 if (auto FI
= dyn_cast
<FPMathOperator
>(&I
))
1892 SimpleV
= SimplifyBinOp(I
.getOpcode(), CLHS
? CLHS
: LHS
, CRHS
? CRHS
: RHS
,
1893 FI
->getFastMathFlags(), DL
);
1896 SimplifyBinOp(I
.getOpcode(), CLHS
? CLHS
: LHS
, CRHS
? CRHS
: RHS
, DL
);
1898 if (Constant
*C
= dyn_cast_or_null
<Constant
>(SimpleV
))
1899 SimplifiedValues
[&I
] = C
;
1904 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1908 // If the instruction is floating point, and the target says this operation
1909 // is expensive, this may eventually become a library call. Treat the cost
1910 // as such. Unless it's fneg which can be implemented with an xor.
1911 using namespace llvm::PatternMatch
;
1912 if (I
.getType()->isFloatingPointTy() &&
1913 TTI
.getFPOpCost(I
.getType()) == TargetTransformInfo::TCC_Expensive
&&
1914 !match(&I
, m_FNeg(m_Value())))
1920 bool CallAnalyzer::visitFNeg(UnaryOperator
&I
) {
1921 Value
*Op
= I
.getOperand(0);
1922 Constant
*COp
= dyn_cast
<Constant
>(Op
);
1924 COp
= SimplifiedValues
.lookup(Op
);
1926 Value
*SimpleV
= SimplifyFNegInst(
1927 COp
? COp
: Op
, cast
<FPMathOperator
>(I
).getFastMathFlags(), DL
);
1929 if (Constant
*C
= dyn_cast_or_null
<Constant
>(SimpleV
))
1930 SimplifiedValues
[&I
] = C
;
1935 // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1941 bool CallAnalyzer::visitLoad(LoadInst
&I
) {
1942 if (handleSROA(I
.getPointerOperand(), I
.isSimple()))
1945 // If the data is already loaded from this address and hasn't been clobbered
1946 // by any stores or calls, this load is likely to be redundant and can be
1948 if (EnableLoadElimination
&&
1949 !LoadAddrSet
.insert(I
.getPointerOperand()).second
&& I
.isUnordered()) {
1950 onLoadEliminationOpportunity();
1957 bool CallAnalyzer::visitStore(StoreInst
&I
) {
1958 if (handleSROA(I
.getPointerOperand(), I
.isSimple()))
1961 // The store can potentially clobber loads and prevent repeated loads from
1962 // being eliminated.
1964 // 1. We can probably keep an initial set of eliminatable loads substracted
1965 // from the cost even when we finally see a store. We just need to disable
1966 // *further* accumulation of elimination savings.
1967 // 2. We should probably at some point thread MemorySSA for the callee into
1968 // this and then use that to actually compute *really* precise savings.
1969 disableLoadElimination();
1973 bool CallAnalyzer::visitExtractValue(ExtractValueInst
&I
) {
1974 // Constant folding for extract value is trivial.
1975 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1976 return ConstantExpr::getExtractValue(COps
[0], I
.getIndices());
1980 // SROA can't look through these, but they may be free.
1981 return Base::visitExtractValue(I
);
1984 bool CallAnalyzer::visitInsertValue(InsertValueInst
&I
) {
1985 // Constant folding for insert value is trivial.
1986 if (simplifyInstruction(I
, [&](SmallVectorImpl
<Constant
*> &COps
) {
1987 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps
[0],
1988 /*InsertedValueOperand*/ COps
[1],
1993 // SROA can't look through these, but they may be free.
1994 return Base::visitInsertValue(I
);
1997 /// Try to simplify a call site.
1999 /// Takes a concrete function and callsite and tries to actually simplify it by
2000 /// analyzing the arguments and call itself with instsimplify. Returns true if
2001 /// it has simplified the callsite to some other entity (a constant), making it
2003 bool CallAnalyzer::simplifyCallSite(Function
*F
, CallBase
&Call
) {
2004 // FIXME: Using the instsimplify logic directly for this is inefficient
2005 // because we have to continually rebuild the argument list even when no
2006 // simplifications can be performed. Until that is fixed with remapping
2007 // inside of instsimplify, directly constant fold calls here.
2008 if (!canConstantFoldCallTo(&Call
, F
))
2011 // Try to re-map the arguments to constants.
2012 SmallVector
<Constant
*, 4> ConstantArgs
;
2013 ConstantArgs
.reserve(Call
.arg_size());
2014 for (Value
*I
: Call
.args()) {
2015 Constant
*C
= dyn_cast
<Constant
>(I
);
2017 C
= dyn_cast_or_null
<Constant
>(SimplifiedValues
.lookup(I
));
2019 return false; // This argument doesn't map to a constant.
2021 ConstantArgs
.push_back(C
);
2023 if (Constant
*C
= ConstantFoldCall(&Call
, F
, ConstantArgs
)) {
2024 SimplifiedValues
[&Call
] = C
;
2031 bool CallAnalyzer::visitCallBase(CallBase
&Call
) {
2032 if (Call
.hasFnAttr(Attribute::ReturnsTwice
) &&
2033 !F
.hasFnAttribute(Attribute::ReturnsTwice
)) {
2034 // This aborts the entire analysis.
2035 ExposesReturnsTwice
= true;
2038 if (isa
<CallInst
>(Call
) && cast
<CallInst
>(Call
).cannotDuplicate())
2039 ContainsNoDuplicateCall
= true;
2041 Value
*Callee
= Call
.getCalledOperand();
2042 Function
*F
= dyn_cast_or_null
<Function
>(Callee
);
2043 bool IsIndirectCall
= !F
;
2044 if (IsIndirectCall
) {
2045 // Check if this happens to be an indirect function call to a known function
2046 // in this inline context. If not, we've done all we can.
2047 F
= dyn_cast_or_null
<Function
>(SimplifiedValues
.lookup(Callee
));
2049 onCallArgumentSetup(Call
);
2051 if (!Call
.onlyReadsMemory())
2052 disableLoadElimination();
2053 return Base::visitCallBase(Call
);
2057 assert(F
&& "Expected a call to a known function");
2059 // When we have a concrete function, first try to simplify it directly.
2060 if (simplifyCallSite(F
, Call
))
2063 // Next check if it is an intrinsic we know about.
2064 // FIXME: Lift this into part of the InstVisitor.
2065 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(&Call
)) {
2066 switch (II
->getIntrinsicID()) {
2068 if (!Call
.onlyReadsMemory() && !isAssumeLikeIntrinsic(II
))
2069 disableLoadElimination();
2070 return Base::visitCallBase(Call
);
2072 case Intrinsic::load_relative
:
2073 onLoadRelativeIntrinsic();
2076 case Intrinsic::memset
:
2077 case Intrinsic::memcpy
:
2078 case Intrinsic::memmove
:
2079 disableLoadElimination();
2080 // SROA can usually chew through these intrinsics, but they aren't free.
2082 case Intrinsic::icall_branch_funnel
:
2083 case Intrinsic::localescape
:
2084 HasUninlineableIntrinsic
= true;
2086 case Intrinsic::vastart
:
2087 InitsVargArgs
= true;
2089 case Intrinsic::launder_invariant_group
:
2090 case Intrinsic::strip_invariant_group
:
2091 if (auto *SROAArg
= getSROAArgForValueOrNull(II
->getOperand(0)))
2092 SROAArgValues
[II
] = SROAArg
;
2097 if (F
== Call
.getFunction()) {
2098 // This flag will fully abort the analysis, so don't bother with anything
2100 IsRecursiveCall
= true;
2104 if (TTI
.isLoweredToCall(F
)) {
2105 onLoweredCall(F
, Call
, IsIndirectCall
);
2108 if (!(Call
.onlyReadsMemory() || (IsIndirectCall
&& F
->onlyReadsMemory())))
2109 disableLoadElimination();
2110 return Base::visitCallBase(Call
);
2113 bool CallAnalyzer::visitReturnInst(ReturnInst
&RI
) {
2114 // At least one return instruction will be free after inlining.
2115 bool Free
= !HasReturn
;
2120 bool CallAnalyzer::visitBranchInst(BranchInst
&BI
) {
2121 // We model unconditional branches as essentially free -- they really
2122 // shouldn't exist at all, but handling them makes the behavior of the
2123 // inliner more regular and predictable. Interestingly, conditional branches
2124 // which will fold away are also free.
2125 return BI
.isUnconditional() || isa
<ConstantInt
>(BI
.getCondition()) ||
2126 dyn_cast_or_null
<ConstantInt
>(
2127 SimplifiedValues
.lookup(BI
.getCondition()));
2130 bool CallAnalyzer::visitSelectInst(SelectInst
&SI
) {
2131 bool CheckSROA
= SI
.getType()->isPointerTy();
2132 Value
*TrueVal
= SI
.getTrueValue();
2133 Value
*FalseVal
= SI
.getFalseValue();
2135 Constant
*TrueC
= dyn_cast
<Constant
>(TrueVal
);
2137 TrueC
= SimplifiedValues
.lookup(TrueVal
);
2138 Constant
*FalseC
= dyn_cast
<Constant
>(FalseVal
);
2140 FalseC
= SimplifiedValues
.lookup(FalseVal
);
2142 dyn_cast_or_null
<Constant
>(SimplifiedValues
.lookup(SI
.getCondition()));
2145 // Select C, X, X => X
2146 if (TrueC
== FalseC
&& TrueC
) {
2147 SimplifiedValues
[&SI
] = TrueC
;
2152 return Base::visitSelectInst(SI
);
2154 std::pair
<Value
*, APInt
> TrueBaseAndOffset
=
2155 ConstantOffsetPtrs
.lookup(TrueVal
);
2156 std::pair
<Value
*, APInt
> FalseBaseAndOffset
=
2157 ConstantOffsetPtrs
.lookup(FalseVal
);
2158 if (TrueBaseAndOffset
== FalseBaseAndOffset
&& TrueBaseAndOffset
.first
) {
2159 ConstantOffsetPtrs
[&SI
] = TrueBaseAndOffset
;
2161 if (auto *SROAArg
= getSROAArgForValueOrNull(TrueVal
))
2162 SROAArgValues
[&SI
] = SROAArg
;
2166 return Base::visitSelectInst(SI
);
2169 // Select condition is a constant.
2170 Value
*SelectedV
= CondC
->isAllOnesValue() ? TrueVal
2171 : (CondC
->isNullValue()) ? FalseVal
2174 // Condition is a vector constant that is not all 1s or all 0s. If all
2175 // operands are constants, ConstantExpr::getSelect() can handle the cases
2176 // such as select vectors.
2177 if (TrueC
&& FalseC
) {
2178 if (auto *C
= ConstantExpr::getSelect(CondC
, TrueC
, FalseC
)) {
2179 SimplifiedValues
[&SI
] = C
;
2183 return Base::visitSelectInst(SI
);
2186 // Condition is either all 1s or all 0s. SI can be simplified.
2187 if (Constant
*SelectedC
= dyn_cast
<Constant
>(SelectedV
)) {
2188 SimplifiedValues
[&SI
] = SelectedC
;
2195 std::pair
<Value
*, APInt
> BaseAndOffset
=
2196 ConstantOffsetPtrs
.lookup(SelectedV
);
2197 if (BaseAndOffset
.first
) {
2198 ConstantOffsetPtrs
[&SI
] = BaseAndOffset
;
2200 if (auto *SROAArg
= getSROAArgForValueOrNull(SelectedV
))
2201 SROAArgValues
[&SI
] = SROAArg
;
2207 bool CallAnalyzer::visitSwitchInst(SwitchInst
&SI
) {
2208 // We model unconditional switches as free, see the comments on handling
2210 if (isa
<ConstantInt
>(SI
.getCondition()))
2212 if (Value
*V
= SimplifiedValues
.lookup(SI
.getCondition()))
2213 if (isa
<ConstantInt
>(V
))
2216 // Assume the most general case where the switch is lowered into
2217 // either a jump table, bit test, or a balanced binary tree consisting of
2218 // case clusters without merging adjacent clusters with the same
2219 // destination. We do not consider the switches that are lowered with a mix
2220 // of jump table/bit test/binary search tree. The cost of the switch is
2221 // proportional to the size of the tree or the size of jump table range.
2223 // NB: We convert large switches which are just used to initialize large phi
2224 // nodes to lookup tables instead in simplifycfg, so this shouldn't prevent
2225 // inlining those. It will prevent inlining in cases where the optimization
2226 // does not (yet) fire.
2228 unsigned JumpTableSize
= 0;
2229 BlockFrequencyInfo
*BFI
= GetBFI
? &(GetBFI(F
)) : nullptr;
2230 unsigned NumCaseCluster
=
2231 TTI
.getEstimatedNumberOfCaseClusters(SI
, JumpTableSize
, PSI
, BFI
);
2233 onFinalizeSwitch(JumpTableSize
, NumCaseCluster
);
2237 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst
&IBI
) {
2238 // We never want to inline functions that contain an indirectbr. This is
2239 // incorrect because all the blockaddress's (in static global initializers
2240 // for example) would be referring to the original function, and this
2241 // indirect jump would jump from the inlined copy of the function into the
2242 // original function which is extremely undefined behavior.
2243 // FIXME: This logic isn't really right; we can safely inline functions with
2244 // indirectbr's as long as no other function or global references the
2245 // blockaddress of a block within the current function.
2246 HasIndirectBr
= true;
2250 bool CallAnalyzer::visitResumeInst(ResumeInst
&RI
) {
2251 // FIXME: It's not clear that a single instruction is an accurate model for
2252 // the inline cost of a resume instruction.
2256 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst
&CRI
) {
2257 // FIXME: It's not clear that a single instruction is an accurate model for
2258 // the inline cost of a cleanupret instruction.
2262 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst
&CRI
) {
2263 // FIXME: It's not clear that a single instruction is an accurate model for
2264 // the inline cost of a catchret instruction.
2268 bool CallAnalyzer::visitUnreachableInst(UnreachableInst
&I
) {
2269 // FIXME: It might be reasonably to discount the cost of instructions leading
2270 // to unreachable as they have the lowest possible impact on both runtime and
2272 return true; // No actual code is needed for unreachable.
2275 bool CallAnalyzer::visitInstruction(Instruction
&I
) {
2276 // Some instructions are free. All of the free intrinsics can also be
2277 // handled by SROA, etc.
2278 if (TTI
.getUserCost(&I
, TargetTransformInfo::TCK_SizeAndLatency
) ==
2279 TargetTransformInfo::TCC_Free
)
2282 // We found something we don't understand or can't handle. Mark any SROA-able
2283 // values in the operand list as no longer viable.
2284 for (const Use
&Op
: I
.operands())
2290 /// Analyze a basic block for its contribution to the inline cost.
2292 /// This method walks the analyzer over every instruction in the given basic
2293 /// block and accounts for their cost during inlining at this callsite. It
2294 /// aborts early if the threshold has been exceeded or an impossible to inline
2295 /// construct has been detected. It returns false if inlining is no longer
2296 /// viable, and true if inlining remains viable.
2298 CallAnalyzer::analyzeBlock(BasicBlock
*BB
,
2299 SmallPtrSetImpl
<const Value
*> &EphValues
) {
2300 for (Instruction
&I
: *BB
) {
2301 // FIXME: Currently, the number of instructions in a function regardless of
2302 // our ability to simplify them during inline to constants or dead code,
2303 // are actually used by the vector bonus heuristic. As long as that's true,
2304 // we have to special case debug intrinsics here to prevent differences in
2305 // inlining due to debug symbols. Eventually, the number of unsimplified
2306 // instructions shouldn't factor into the cost computation, but until then,
2307 // hack around it here.
2308 if (isa
<DbgInfoIntrinsic
>(I
))
2311 // Skip pseudo-probes.
2312 if (isa
<PseudoProbeInst
>(I
))
2315 // Skip ephemeral values.
2316 if (EphValues
.count(&I
))
2320 if (isa
<ExtractElementInst
>(I
) || I
.getType()->isVectorTy())
2321 ++NumVectorInstructions
;
2323 // If the instruction simplified to a constant, there is no cost to this
2324 // instruction. Visit the instructions using our InstVisitor to account for
2325 // all of the per-instruction logic. The visit tree returns true if we
2326 // consumed the instruction in any way, and false if the instruction's base
2327 // cost should count against inlining.
2328 onInstructionAnalysisStart(&I
);
2330 if (Base::visit(&I
))
2331 ++NumInstructionsSimplified
;
2333 onMissedSimplification();
2335 onInstructionAnalysisFinish(&I
);
2336 using namespace ore
;
2337 // If the visit this instruction detected an uninlinable pattern, abort.
2338 InlineResult IR
= InlineResult::success();
2339 if (IsRecursiveCall
)
2340 IR
= InlineResult::failure("recursive");
2341 else if (ExposesReturnsTwice
)
2342 IR
= InlineResult::failure("exposes returns twice");
2343 else if (HasDynamicAlloca
)
2344 IR
= InlineResult::failure("dynamic alloca");
2345 else if (HasIndirectBr
)
2346 IR
= InlineResult::failure("indirect branch");
2347 else if (HasUninlineableIntrinsic
)
2348 IR
= InlineResult::failure("uninlinable intrinsic");
2349 else if (InitsVargArgs
)
2350 IR
= InlineResult::failure("varargs");
2351 if (!IR
.isSuccess()) {
2354 return OptimizationRemarkMissed(DEBUG_TYPE
, "NeverInline",
2356 << NV("Callee", &F
) << " has uninlinable pattern ("
2357 << NV("InlineResult", IR
.getFailureReason())
2358 << ") and cost is not fully computed";
2363 // If the caller is a recursive function then we don't want to inline
2364 // functions which allocate a lot of stack space because it would increase
2365 // the caller stack usage dramatically.
2366 if (IsCallerRecursive
&&
2367 AllocatedSize
> InlineConstants::TotalAllocaSizeRecursiveCaller
) {
2369 InlineResult::failure("recursive and allocates too much stack space");
2372 return OptimizationRemarkMissed(DEBUG_TYPE
, "NeverInline",
2374 << NV("Callee", &F
) << " is "
2375 << NV("InlineResult", IR
.getFailureReason())
2376 << ". Cost is not fully computed";
2382 return InlineResult::failure(
2383 "Call site analysis is not favorable to inlining.");
2386 return InlineResult::success();
2389 /// Compute the base pointer and cumulative constant offsets for V.
2391 /// This strips all constant offsets off of V, leaving it the base pointer, and
2392 /// accumulates the total constant offset applied in the returned constant. It
2393 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
2394 /// no constant offsets applied.
2395 ConstantInt
*CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value
*&V
) {
2396 if (!V
->getType()->isPointerTy())
2399 unsigned AS
= V
->getType()->getPointerAddressSpace();
2400 unsigned IntPtrWidth
= DL
.getIndexSizeInBits(AS
);
2401 APInt Offset
= APInt::getNullValue(IntPtrWidth
);
2403 // Even though we don't look through PHI nodes, we could be called on an
2404 // instruction in an unreachable block, which may be on a cycle.
2405 SmallPtrSet
<Value
*, 4> Visited
;
2408 if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
2409 if (!GEP
->isInBounds() || !accumulateGEPOffset(*GEP
, Offset
))
2411 V
= GEP
->getPointerOperand();
2412 } else if (Operator::getOpcode(V
) == Instruction::BitCast
) {
2413 V
= cast
<Operator
>(V
)->getOperand(0);
2414 } else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
2415 if (GA
->isInterposable())
2417 V
= GA
->getAliasee();
2421 assert(V
->getType()->isPointerTy() && "Unexpected operand type!");
2422 } while (Visited
.insert(V
).second
);
2424 Type
*IdxPtrTy
= DL
.getIndexType(V
->getType());
2425 return cast
<ConstantInt
>(ConstantInt::get(IdxPtrTy
, Offset
));
2428 /// Find dead blocks due to deleted CFG edges during inlining.
2430 /// If we know the successor of the current block, \p CurrBB, has to be \p
2431 /// NextBB, the other successors of \p CurrBB are dead if these successors have
2432 /// no live incoming CFG edges. If one block is found to be dead, we can
2433 /// continue growing the dead block list by checking the successors of the dead
2434 /// blocks to see if all their incoming edges are dead or not.
2435 void CallAnalyzer::findDeadBlocks(BasicBlock
*CurrBB
, BasicBlock
*NextBB
) {
2436 auto IsEdgeDead
= [&](BasicBlock
*Pred
, BasicBlock
*Succ
) {
2437 // A CFG edge is dead if the predecessor is dead or the predecessor has a
2438 // known successor which is not the one under exam.
2439 return (DeadBlocks
.count(Pred
) ||
2440 (KnownSuccessors
[Pred
] && KnownSuccessors
[Pred
] != Succ
));
2443 auto IsNewlyDead
= [&](BasicBlock
*BB
) {
2444 // If all the edges to a block are dead, the block is also dead.
2445 return (!DeadBlocks
.count(BB
) &&
2446 llvm::all_of(predecessors(BB
),
2447 [&](BasicBlock
*P
) { return IsEdgeDead(P
, BB
); }));
2450 for (BasicBlock
*Succ
: successors(CurrBB
)) {
2451 if (Succ
== NextBB
|| !IsNewlyDead(Succ
))
2453 SmallVector
<BasicBlock
*, 4> NewDead
;
2454 NewDead
.push_back(Succ
);
2455 while (!NewDead
.empty()) {
2456 BasicBlock
*Dead
= NewDead
.pop_back_val();
2457 if (DeadBlocks
.insert(Dead
))
2458 // Continue growing the dead block lists.
2459 for (BasicBlock
*S
: successors(Dead
))
2461 NewDead
.push_back(S
);
2466 /// Analyze a call site for potential inlining.
2468 /// Returns true if inlining this call is viable, and false if it is not
2469 /// viable. It computes the cost and adjusts the threshold based on numerous
2470 /// factors and heuristics. If this method returns false but the computed cost
2471 /// is below the computed threshold, then inlining was forcibly disabled by
2472 /// some artifact of the routine.
2473 InlineResult
CallAnalyzer::analyze() {
2476 auto Result
= onAnalysisStart();
2477 if (!Result
.isSuccess())
2481 return InlineResult::success();
2483 Function
*Caller
= CandidateCall
.getFunction();
2484 // Check if the caller function is recursive itself.
2485 for (User
*U
: Caller
->users()) {
2486 CallBase
*Call
= dyn_cast
<CallBase
>(U
);
2487 if (Call
&& Call
->getFunction() == Caller
) {
2488 IsCallerRecursive
= true;
2493 // Populate our simplified values by mapping from function arguments to call
2494 // arguments with known important simplifications.
2495 auto CAI
= CandidateCall
.arg_begin();
2496 for (Argument
&FAI
: F
.args()) {
2497 assert(CAI
!= CandidateCall
.arg_end());
2498 if (Constant
*C
= dyn_cast
<Constant
>(CAI
))
2499 SimplifiedValues
[&FAI
] = C
;
2501 Value
*PtrArg
= *CAI
;
2502 if (ConstantInt
*C
= stripAndComputeInBoundsConstantOffsets(PtrArg
)) {
2503 ConstantOffsetPtrs
[&FAI
] = std::make_pair(PtrArg
, C
->getValue());
2505 // We can SROA any pointer arguments derived from alloca instructions.
2506 if (auto *SROAArg
= dyn_cast
<AllocaInst
>(PtrArg
)) {
2507 SROAArgValues
[&FAI
] = SROAArg
;
2508 onInitializeSROAArg(SROAArg
);
2509 EnabledSROAAllocas
.insert(SROAArg
);
2514 NumConstantArgs
= SimplifiedValues
.size();
2515 NumConstantOffsetPtrArgs
= ConstantOffsetPtrs
.size();
2516 NumAllocaArgs
= SROAArgValues
.size();
2518 // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2519 // the ephemeral values multiple times (and they're completely determined by
2520 // the callee, so this is purely duplicate work).
2521 SmallPtrSet
<const Value
*, 32> EphValues
;
2522 CodeMetrics::collectEphemeralValues(&F
, &GetAssumptionCache(F
), EphValues
);
2524 // The worklist of live basic blocks in the callee *after* inlining. We avoid
2525 // adding basic blocks of the callee which can be proven to be dead for this
2526 // particular call site in order to get more accurate cost estimates. This
2527 // requires a somewhat heavyweight iteration pattern: we need to walk the
2528 // basic blocks in a breadth-first order as we insert live successors. To
2529 // accomplish this, prioritizing for small iterations because we exit after
2530 // crossing our threshold, we use a small-size optimized SetVector.
2531 typedef SetVector
<BasicBlock
*, SmallVector
<BasicBlock
*, 16>,
2532 SmallPtrSet
<BasicBlock
*, 16>>
2534 BBSetVector BBWorklist
;
2535 BBWorklist
.insert(&F
.getEntryBlock());
2537 // Note that we *must not* cache the size, this loop grows the worklist.
2538 for (unsigned Idx
= 0; Idx
!= BBWorklist
.size(); ++Idx
) {
2542 BasicBlock
*BB
= BBWorklist
[Idx
];
2548 // Disallow inlining a blockaddress with uses other than strictly callbr.
2549 // A blockaddress only has defined behavior for an indirect branch in the
2550 // same function, and we do not currently support inlining indirect
2551 // branches. But, the inliner may not see an indirect branch that ends up
2552 // being dead code at a particular call site. If the blockaddress escapes
2553 // the function, e.g., via a global variable, inlining may lead to an
2554 // invalid cross-function reference.
2555 // FIXME: pr/39560: continue relaxing this overt restriction.
2556 if (BB
->hasAddressTaken())
2557 for (User
*U
: BlockAddress::get(&*BB
)->users())
2558 if (!isa
<CallBrInst
>(*U
))
2559 return InlineResult::failure("blockaddress used outside of callbr");
2561 // Analyze the cost of this block. If we blow through the threshold, this
2562 // returns false, and we can bail on out.
2563 InlineResult IR
= analyzeBlock(BB
, EphValues
);
2564 if (!IR
.isSuccess())
2567 Instruction
*TI
= BB
->getTerminator();
2569 // Add in the live successors by first checking whether we have terminator
2570 // that may be simplified based on the values simplified by this call.
2571 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
)) {
2572 if (BI
->isConditional()) {
2573 Value
*Cond
= BI
->getCondition();
2574 if (ConstantInt
*SimpleCond
=
2575 dyn_cast_or_null
<ConstantInt
>(SimplifiedValues
.lookup(Cond
))) {
2576 BasicBlock
*NextBB
= BI
->getSuccessor(SimpleCond
->isZero() ? 1 : 0);
2577 BBWorklist
.insert(NextBB
);
2578 KnownSuccessors
[BB
] = NextBB
;
2579 findDeadBlocks(BB
, NextBB
);
2583 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(TI
)) {
2584 Value
*Cond
= SI
->getCondition();
2585 if (ConstantInt
*SimpleCond
=
2586 dyn_cast_or_null
<ConstantInt
>(SimplifiedValues
.lookup(Cond
))) {
2587 BasicBlock
*NextBB
= SI
->findCaseValue(SimpleCond
)->getCaseSuccessor();
2588 BBWorklist
.insert(NextBB
);
2589 KnownSuccessors
[BB
] = NextBB
;
2590 findDeadBlocks(BB
, NextBB
);
2595 // If we're unable to select a particular successor, just count all of
2597 for (unsigned TIdx
= 0, TSize
= TI
->getNumSuccessors(); TIdx
!= TSize
;
2599 BBWorklist
.insert(TI
->getSuccessor(TIdx
));
2601 onBlockAnalyzed(BB
);
2604 bool OnlyOneCallAndLocalLinkage
= F
.hasLocalLinkage() && F
.hasOneUse() &&
2605 &F
== CandidateCall
.getCalledFunction();
2606 // If this is a noduplicate call, we can still inline as long as
2607 // inlining this would cause the removal of the caller (so the instruction
2608 // is not actually duplicated, just moved).
2609 if (!OnlyOneCallAndLocalLinkage
&& ContainsNoDuplicateCall
)
2610 return InlineResult::failure("noduplicate");
2612 return finalizeAnalysis();
2615 void InlineCostCallAnalyzer::print() {
2616 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
2617 if (PrintInstructionComments
)
2618 F
.print(dbgs(), &Writer
);
2619 DEBUG_PRINT_STAT(NumConstantArgs
);
2620 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs
);
2621 DEBUG_PRINT_STAT(NumAllocaArgs
);
2622 DEBUG_PRINT_STAT(NumConstantPtrCmps
);
2623 DEBUG_PRINT_STAT(NumConstantPtrDiffs
);
2624 DEBUG_PRINT_STAT(NumInstructionsSimplified
);
2625 DEBUG_PRINT_STAT(NumInstructions
);
2626 DEBUG_PRINT_STAT(SROACostSavings
);
2627 DEBUG_PRINT_STAT(SROACostSavingsLost
);
2628 DEBUG_PRINT_STAT(LoadEliminationCost
);
2629 DEBUG_PRINT_STAT(ContainsNoDuplicateCall
);
2630 DEBUG_PRINT_STAT(Cost
);
2631 DEBUG_PRINT_STAT(Threshold
);
2632 #undef DEBUG_PRINT_STAT
2635 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2636 /// Dump stats about this call's analysis.
2637 LLVM_DUMP_METHOD
void InlineCostCallAnalyzer::dump() { print(); }
2640 /// Test that there are no attribute conflicts between Caller and Callee
2641 /// that prevent inlining.
2642 static bool functionsHaveCompatibleAttributes(
2643 Function
*Caller
, Function
*Callee
, TargetTransformInfo
&TTI
,
2644 function_ref
<const TargetLibraryInfo
&(Function
&)> &GetTLI
) {
2645 // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2646 // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2647 // object, and always returns the same object (which is overwritten on each
2648 // GetTLI call). Therefore we copy the first result.
2649 auto CalleeTLI
= GetTLI(*Callee
);
2650 return TTI
.areInlineCompatible(Caller
, Callee
) &&
2651 GetTLI(*Caller
).areInlineCompatible(CalleeTLI
,
2652 InlineCallerSupersetNoBuiltin
) &&
2653 AttributeFuncs::areInlineCompatible(*Caller
, *Callee
);
2656 int llvm::getCallsiteCost(CallBase
&Call
, const DataLayout
&DL
) {
2658 for (unsigned I
= 0, E
= Call
.arg_size(); I
!= E
; ++I
) {
2659 if (Call
.isByValArgument(I
)) {
2660 // We approximate the number of loads and stores needed by dividing the
2661 // size of the byval type by the target's pointer size.
2662 PointerType
*PTy
= cast
<PointerType
>(Call
.getArgOperand(I
)->getType());
2663 unsigned TypeSize
= DL
.getTypeSizeInBits(Call
.getParamByValType(I
));
2664 unsigned AS
= PTy
->getAddressSpace();
2665 unsigned PointerSize
= DL
.getPointerSizeInBits(AS
);
2666 // Ceiling division.
2667 unsigned NumStores
= (TypeSize
+ PointerSize
- 1) / PointerSize
;
2669 // If it generates more than 8 stores it is likely to be expanded as an
2670 // inline memcpy so we take that as an upper bound. Otherwise we assume
2671 // one load and one store per word copied.
2672 // FIXME: The maxStoresPerMemcpy setting from the target should be used
2673 // here instead of a magic number of 8, but it's not available via
2675 NumStores
= std::min(NumStores
, 8U);
2677 Cost
+= 2 * NumStores
* InlineConstants::InstrCost
;
2679 // For non-byval arguments subtract off one instruction per call
2681 Cost
+= InlineConstants::InstrCost
;
2684 // The call instruction also disappears after inlining.
2685 Cost
+= InlineConstants::InstrCost
+ CallPenalty
;
2689 InlineCost
llvm::getInlineCost(
2690 CallBase
&Call
, const InlineParams
&Params
, TargetTransformInfo
&CalleeTTI
,
2691 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
2692 function_ref
<const TargetLibraryInfo
&(Function
&)> GetTLI
,
2693 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
,
2694 ProfileSummaryInfo
*PSI
, OptimizationRemarkEmitter
*ORE
) {
2695 return getInlineCost(Call
, Call
.getCalledFunction(), Params
, CalleeTTI
,
2696 GetAssumptionCache
, GetTLI
, GetBFI
, PSI
, ORE
);
2699 Optional
<int> llvm::getInliningCostEstimate(
2700 CallBase
&Call
, TargetTransformInfo
&CalleeTTI
,
2701 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
2702 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
,
2703 ProfileSummaryInfo
*PSI
, OptimizationRemarkEmitter
*ORE
) {
2704 const InlineParams Params
= {/* DefaultThreshold*/ 0,
2705 /*HintThreshold*/ {},
2706 /*ColdThreshold*/ {},
2707 /*OptSizeThreshold*/ {},
2708 /*OptMinSizeThreshold*/ {},
2709 /*HotCallSiteThreshold*/ {},
2710 /*LocallyHotCallSiteThreshold*/ {},
2711 /*ColdCallSiteThreshold*/ {},
2712 /*ComputeFullInlineCost*/ true,
2713 /*EnableDeferral*/ true};
2715 InlineCostCallAnalyzer
CA(*Call
.getCalledFunction(), Call
, Params
, CalleeTTI
,
2716 GetAssumptionCache
, GetBFI
, PSI
, ORE
, true,
2717 /*IgnoreThreshold*/ true);
2718 auto R
= CA
.analyze();
2721 return CA
.getCost();
2724 Optional
<InlineCostFeatures
> llvm::getInliningCostFeatures(
2725 CallBase
&Call
, TargetTransformInfo
&CalleeTTI
,
2726 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
2727 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
,
2728 ProfileSummaryInfo
*PSI
, OptimizationRemarkEmitter
*ORE
) {
2729 InlineCostFeaturesAnalyzer
CFA(CalleeTTI
, GetAssumptionCache
, GetBFI
, PSI
,
2730 ORE
, *Call
.getCalledFunction(), Call
);
2731 auto R
= CFA
.analyze();
2734 return CFA
.features();
2737 Optional
<InlineResult
> llvm::getAttributeBasedInliningDecision(
2738 CallBase
&Call
, Function
*Callee
, TargetTransformInfo
&CalleeTTI
,
2739 function_ref
<const TargetLibraryInfo
&(Function
&)> GetTLI
) {
2741 // Cannot inline indirect calls.
2743 return InlineResult::failure("indirect call");
2745 // When callee coroutine function is inlined into caller coroutine function
2746 // before coro-split pass,
2747 // coro-early pass can not handle this quiet well.
2748 // So we won't inline the coroutine function if it have not been unsplited
2749 if (Callee
->isPresplitCoroutine())
2750 return InlineResult::failure("unsplited coroutine call");
2752 // Never inline calls with byval arguments that does not have the alloca
2753 // address space. Since byval arguments can be replaced with a copy to an
2754 // alloca, the inlined code would need to be adjusted to handle that the
2755 // argument is in the alloca address space (so it is a little bit complicated
2757 unsigned AllocaAS
= Callee
->getParent()->getDataLayout().getAllocaAddrSpace();
2758 for (unsigned I
= 0, E
= Call
.arg_size(); I
!= E
; ++I
)
2759 if (Call
.isByValArgument(I
)) {
2760 PointerType
*PTy
= cast
<PointerType
>(Call
.getArgOperand(I
)->getType());
2761 if (PTy
->getAddressSpace() != AllocaAS
)
2762 return InlineResult::failure("byval arguments without alloca"
2766 // Calls to functions with always-inline attributes should be inlined
2767 // whenever possible.
2768 if (Call
.hasFnAttr(Attribute::AlwaysInline
)) {
2769 auto IsViable
= isInlineViable(*Callee
);
2770 if (IsViable
.isSuccess())
2771 return InlineResult::success();
2772 return InlineResult::failure(IsViable
.getFailureReason());
2775 // Never inline functions with conflicting attributes (unless callee has
2776 // always-inline attribute).
2777 Function
*Caller
= Call
.getCaller();
2778 if (!functionsHaveCompatibleAttributes(Caller
, Callee
, CalleeTTI
, GetTLI
))
2779 return InlineResult::failure("conflicting attributes");
2781 // Don't inline this call if the caller has the optnone attribute.
2782 if (Caller
->hasOptNone())
2783 return InlineResult::failure("optnone attribute");
2785 // Don't inline a function that treats null pointer as valid into a caller
2786 // that does not have this attribute.
2787 if (!Caller
->nullPointerIsDefined() && Callee
->nullPointerIsDefined())
2788 return InlineResult::failure("nullptr definitions incompatible");
2790 // Don't inline functions which can be interposed at link-time.
2791 if (Callee
->isInterposable())
2792 return InlineResult::failure("interposable");
2794 // Don't inline functions marked noinline.
2795 if (Callee
->hasFnAttribute(Attribute::NoInline
))
2796 return InlineResult::failure("noinline function attribute");
2798 // Don't inline call sites marked noinline.
2799 if (Call
.isNoInline())
2800 return InlineResult::failure("noinline call site attribute");
2802 // Don't inline functions if one does not have any stack protector attribute
2803 // but the other does.
2804 if (Caller
->hasStackProtectorFnAttr() && !Callee
->hasStackProtectorFnAttr())
2805 return InlineResult::failure(
2806 "stack protected caller but callee requested no stack protector");
2807 if (Callee
->hasStackProtectorFnAttr() && !Caller
->hasStackProtectorFnAttr())
2808 return InlineResult::failure(
2809 "stack protected callee but caller requested no stack protector");
2814 InlineCost
llvm::getInlineCost(
2815 CallBase
&Call
, Function
*Callee
, const InlineParams
&Params
,
2816 TargetTransformInfo
&CalleeTTI
,
2817 function_ref
<AssumptionCache
&(Function
&)> GetAssumptionCache
,
2818 function_ref
<const TargetLibraryInfo
&(Function
&)> GetTLI
,
2819 function_ref
<BlockFrequencyInfo
&(Function
&)> GetBFI
,
2820 ProfileSummaryInfo
*PSI
, OptimizationRemarkEmitter
*ORE
) {
2823 llvm::getAttributeBasedInliningDecision(Call
, Callee
, CalleeTTI
, GetTLI
);
2825 if (UserDecision
.hasValue()) {
2826 if (UserDecision
->isSuccess())
2827 return llvm::InlineCost::getAlways("always inline attribute");
2828 return llvm::InlineCost::getNever(UserDecision
->getFailureReason());
2831 LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee
->getName()
2832 << "... (caller:" << Call
.getCaller()->getName()
2835 InlineCostCallAnalyzer
CA(*Callee
, Call
, Params
, CalleeTTI
,
2836 GetAssumptionCache
, GetBFI
, PSI
, ORE
);
2837 InlineResult ShouldInline
= CA
.analyze();
2839 LLVM_DEBUG(CA
.dump());
2841 // Always make cost benefit based decision explicit.
2842 // We use always/never here since threshold is not meaningful,
2843 // as it's not what drives cost-benefit analysis.
2844 if (CA
.wasDecidedByCostBenefit()) {
2845 if (ShouldInline
.isSuccess())
2846 return InlineCost::getAlways("benefit over cost",
2847 CA
.getCostBenefitPair());
2849 return InlineCost::getNever("cost over benefit", CA
.getCostBenefitPair());
2852 // Check if there was a reason to force inlining or no inlining.
2853 if (!ShouldInline
.isSuccess() && CA
.getCost() < CA
.getThreshold())
2854 return InlineCost::getNever(ShouldInline
.getFailureReason());
2855 if (ShouldInline
.isSuccess() && CA
.getCost() >= CA
.getThreshold())
2856 return InlineCost::getAlways("empty function");
2858 return llvm::InlineCost::get(CA
.getCost(), CA
.getThreshold());
2861 InlineResult
llvm::isInlineViable(Function
&F
) {
2862 bool ReturnsTwice
= F
.hasFnAttribute(Attribute::ReturnsTwice
);
2863 for (BasicBlock
&BB
: F
) {
2864 // Disallow inlining of functions which contain indirect branches.
2865 if (isa
<IndirectBrInst
>(BB
.getTerminator()))
2866 return InlineResult::failure("contains indirect branches");
2868 // Disallow inlining of blockaddresses which are used by non-callbr
2870 if (BB
.hasAddressTaken())
2871 for (User
*U
: BlockAddress::get(&BB
)->users())
2872 if (!isa
<CallBrInst
>(*U
))
2873 return InlineResult::failure("blockaddress used outside of callbr");
2875 for (auto &II
: BB
) {
2876 CallBase
*Call
= dyn_cast
<CallBase
>(&II
);
2880 // Disallow recursive calls.
2881 Function
*Callee
= Call
->getCalledFunction();
2883 return InlineResult::failure("recursive call");
2885 // Disallow calls which expose returns-twice to a function not previously
2886 // attributed as such.
2887 if (!ReturnsTwice
&& isa
<CallInst
>(Call
) &&
2888 cast
<CallInst
>(Call
)->canReturnTwice())
2889 return InlineResult::failure("exposes returns-twice attribute");
2892 switch (Callee
->getIntrinsicID()) {
2895 case llvm::Intrinsic::icall_branch_funnel
:
2896 // Disallow inlining of @llvm.icall.branch.funnel because current
2897 // backend can't separate call targets from call arguments.
2898 return InlineResult::failure(
2899 "disallowed inlining of @llvm.icall.branch.funnel");
2900 case llvm::Intrinsic::localescape
:
2901 // Disallow inlining functions that call @llvm.localescape. Doing this
2902 // correctly would require major changes to the inliner.
2903 return InlineResult::failure(
2904 "disallowed inlining of @llvm.localescape");
2905 case llvm::Intrinsic::vastart
:
2906 // Disallow inlining of functions that initialize VarArgs with
2908 return InlineResult::failure(
2909 "contains VarArgs initialized with va_start");
2914 return InlineResult::success();
2917 // APIs to create InlineParams based on command line flags and/or other
2920 InlineParams
llvm::getInlineParams(int Threshold
) {
2921 InlineParams Params
;
2923 // This field is the threshold to use for a callee by default. This is
2924 // derived from one or more of:
2925 // * optimization or size-optimization levels,
2926 // * a value passed to createFunctionInliningPass function, or
2927 // * the -inline-threshold flag.
2928 // If the -inline-threshold flag is explicitly specified, that is used
2929 // irrespective of anything else.
2930 if (InlineThreshold
.getNumOccurrences() > 0)
2931 Params
.DefaultThreshold
= InlineThreshold
;
2933 Params
.DefaultThreshold
= Threshold
;
2935 // Set the HintThreshold knob from the -inlinehint-threshold.
2936 Params
.HintThreshold
= HintThreshold
;
2938 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2939 Params
.HotCallSiteThreshold
= HotCallSiteThreshold
;
2941 // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2942 // populate LocallyHotCallSiteThreshold. Later, we populate
2943 // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2944 // we know that optimization level is O3 (in the getInlineParams variant that
2945 // takes the opt and size levels).
2946 // FIXME: Remove this check (and make the assignment unconditional) after
2947 // addressing size regression issues at O2.
2948 if (LocallyHotCallSiteThreshold
.getNumOccurrences() > 0)
2949 Params
.LocallyHotCallSiteThreshold
= LocallyHotCallSiteThreshold
;
2951 // Set the ColdCallSiteThreshold knob from the
2952 // -inline-cold-callsite-threshold.
2953 Params
.ColdCallSiteThreshold
= ColdCallSiteThreshold
;
2955 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2956 // -inlinehint-threshold commandline option is not explicitly given. If that
2957 // option is present, then its value applies even for callees with size and
2958 // minsize attributes.
2959 // If the -inline-threshold is not specified, set the ColdThreshold from the
2960 // -inlinecold-threshold even if it is not explicitly passed. If
2961 // -inline-threshold is specified, then -inlinecold-threshold needs to be
2962 // explicitly specified to set the ColdThreshold knob
2963 if (InlineThreshold
.getNumOccurrences() == 0) {
2964 Params
.OptMinSizeThreshold
= InlineConstants::OptMinSizeThreshold
;
2965 Params
.OptSizeThreshold
= InlineConstants::OptSizeThreshold
;
2966 Params
.ColdThreshold
= ColdThreshold
;
2967 } else if (ColdThreshold
.getNumOccurrences() > 0) {
2968 Params
.ColdThreshold
= ColdThreshold
;
2973 InlineParams
llvm::getInlineParams() {
2974 return getInlineParams(DefaultThreshold
);
2977 // Compute the default threshold for inlining based on the opt level and the
2979 static int computeThresholdFromOptLevels(unsigned OptLevel
,
2980 unsigned SizeOptLevel
) {
2982 return InlineConstants::OptAggressiveThreshold
;
2983 if (SizeOptLevel
== 1) // -Os
2984 return InlineConstants::OptSizeThreshold
;
2985 if (SizeOptLevel
== 2) // -Oz
2986 return InlineConstants::OptMinSizeThreshold
;
2987 return DefaultThreshold
;
2990 InlineParams
llvm::getInlineParams(unsigned OptLevel
, unsigned SizeOptLevel
) {
2992 getInlineParams(computeThresholdFromOptLevels(OptLevel
, SizeOptLevel
));
2993 // At O3, use the value of -locally-hot-callsite-threshold option to populate
2994 // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2995 // when it is specified explicitly.
2997 Params
.LocallyHotCallSiteThreshold
= LocallyHotCallSiteThreshold
;
3002 InlineCostAnnotationPrinterPass::run(Function
&F
,
3003 FunctionAnalysisManager
&FAM
) {
3004 PrintInstructionComments
= true;
3005 std::function
<AssumptionCache
&(Function
&)> GetAssumptionCache
=
3006 [&](Function
&F
) -> AssumptionCache
& {
3007 return FAM
.getResult
<AssumptionAnalysis
>(F
);
3009 Module
*M
= F
.getParent();
3010 ProfileSummaryInfo
PSI(*M
);
3012 TargetTransformInfo
TTI(DL
);
3013 // FIXME: Redesign the usage of InlineParams to expand the scope of this pass.
3014 // In the current implementation, the type of InlineParams doesn't matter as
3015 // the pass serves only for verification of inliner's decisions.
3016 // We can add a flag which determines InlineParams for this run. Right now,
3017 // the default InlineParams are used.
3018 const InlineParams Params
= llvm::getInlineParams();
3019 for (BasicBlock
&BB
: F
) {
3020 for (Instruction
&I
: BB
) {
3021 if (CallInst
*CI
= dyn_cast
<CallInst
>(&I
)) {
3022 Function
*CalledFunction
= CI
->getCalledFunction();
3023 if (!CalledFunction
|| CalledFunction
->isDeclaration())
3025 OptimizationRemarkEmitter
ORE(CalledFunction
);
3026 InlineCostCallAnalyzer
ICCA(*CalledFunction
, *CI
, Params
, TTI
,
3027 GetAssumptionCache
, nullptr, &PSI
, &ORE
);
3029 OS
<< " Analyzing call of " << CalledFunction
->getName()
3030 << "... (caller:" << CI
->getCaller()->getName() << ")\n";
3035 return PreservedAnalyses::all();