1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/MemoryProfileInfo.h"
28 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
29 #include "llvm/Analysis/ObjCARCUtil.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/IR/AttributeMask.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugInfo.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/EHPersonalities.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
65 #include "llvm/Transforms/Utils/Cloning.h"
66 #include "llvm/Transforms/Utils/Local.h"
67 #include "llvm/Transforms/Utils/ValueMapper.h"
78 #define DEBUG_TYPE "inline-function"
81 using namespace llvm::memprof
;
82 using ProfileCount
= Function::ProfileCount
;
85 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
87 cl::desc("Convert noalias attributes to metadata during inlining."));
90 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden
,
92 cl::desc("Use the llvm.experimental.noalias.scope.decl "
93 "intrinsic during inlining."));
95 // Disabled by default, because the added alignment assumptions may increase
96 // compile-time and block optimizations. This option is not suitable for use
97 // with frontends that emit comprehensive parameter alignment annotations.
99 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
100 cl::init(false), cl::Hidden
,
101 cl::desc("Convert align attributes to assumptions during inlining."));
103 static cl::opt
<unsigned> InlinerAttributeWindow(
104 "max-inst-checked-for-throw-during-inlining", cl::Hidden
,
105 cl::desc("the maximum number of instructions analyzed for may throw during "
106 "attribute inference in inlined body"),
111 /// A class for recording information about inlining a landing pad.
112 class LandingPadInliningInfo
{
113 /// Destination of the invoke's unwind.
114 BasicBlock
*OuterResumeDest
;
116 /// Destination for the callee's resume.
117 BasicBlock
*InnerResumeDest
= nullptr;
119 /// LandingPadInst associated with the invoke.
120 LandingPadInst
*CallerLPad
= nullptr;
122 /// PHI for EH values from landingpad insts.
123 PHINode
*InnerEHValuesPHI
= nullptr;
125 SmallVector
<Value
*, 8> UnwindDestPHIValues
;
128 LandingPadInliningInfo(InvokeInst
*II
)
129 : OuterResumeDest(II
->getUnwindDest()) {
130 // If there are PHI nodes in the unwind destination block, we need to keep
131 // track of which values came into them from the invoke before removing
132 // the edge from this block.
133 BasicBlock
*InvokeBB
= II
->getParent();
134 BasicBlock::iterator I
= OuterResumeDest
->begin();
135 for (; isa
<PHINode
>(I
); ++I
) {
136 // Save the value to use for this edge.
137 PHINode
*PHI
= cast
<PHINode
>(I
);
138 UnwindDestPHIValues
.push_back(PHI
->getIncomingValueForBlock(InvokeBB
));
141 CallerLPad
= cast
<LandingPadInst
>(I
);
144 /// The outer unwind destination is the target of
145 /// unwind edges introduced for calls within the inlined function.
146 BasicBlock
*getOuterResumeDest() const {
147 return OuterResumeDest
;
150 BasicBlock
*getInnerResumeDest();
152 LandingPadInst
*getLandingPadInst() const { return CallerLPad
; }
154 /// Forward the 'resume' instruction to the caller's landing pad block.
155 /// When the landing pad block has only one predecessor, this is
156 /// a simple branch. When there is more than one predecessor, we need to
157 /// split the landing pad block after the landingpad instruction and jump
159 void forwardResume(ResumeInst
*RI
,
160 SmallPtrSetImpl
<LandingPadInst
*> &InlinedLPads
);
162 /// Add incoming-PHI values to the unwind destination block for the given
163 /// basic block, using the values for the original invoke's source block.
164 void addIncomingPHIValuesFor(BasicBlock
*BB
) const {
165 addIncomingPHIValuesForInto(BB
, OuterResumeDest
);
168 void addIncomingPHIValuesForInto(BasicBlock
*src
, BasicBlock
*dest
) const {
169 BasicBlock::iterator I
= dest
->begin();
170 for (unsigned i
= 0, e
= UnwindDestPHIValues
.size(); i
!= e
; ++i
, ++I
) {
171 PHINode
*phi
= cast
<PHINode
>(I
);
172 phi
->addIncoming(UnwindDestPHIValues
[i
], src
);
177 } // end anonymous namespace
179 /// Get or create a target for the branch from ResumeInsts.
180 BasicBlock
*LandingPadInliningInfo::getInnerResumeDest() {
181 if (InnerResumeDest
) return InnerResumeDest
;
183 // Split the landing pad.
184 BasicBlock::iterator SplitPoint
= ++CallerLPad
->getIterator();
186 OuterResumeDest
->splitBasicBlock(SplitPoint
,
187 OuterResumeDest
->getName() + ".body");
189 // The number of incoming edges we expect to the inner landing pad.
190 const unsigned PHICapacity
= 2;
192 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193 BasicBlock::iterator InsertPoint
= InnerResumeDest
->begin();
194 BasicBlock::iterator I
= OuterResumeDest
->begin();
195 for (unsigned i
= 0, e
= UnwindDestPHIValues
.size(); i
!= e
; ++i
, ++I
) {
196 PHINode
*OuterPHI
= cast
<PHINode
>(I
);
197 PHINode
*InnerPHI
= PHINode::Create(OuterPHI
->getType(), PHICapacity
,
198 OuterPHI
->getName() + ".lpad-body");
199 InnerPHI
->insertBefore(InsertPoint
);
200 OuterPHI
->replaceAllUsesWith(InnerPHI
);
201 InnerPHI
->addIncoming(OuterPHI
, OuterResumeDest
);
204 // Create a PHI for the exception values.
206 PHINode::Create(CallerLPad
->getType(), PHICapacity
, "eh.lpad-body");
207 InnerEHValuesPHI
->insertBefore(InsertPoint
);
208 CallerLPad
->replaceAllUsesWith(InnerEHValuesPHI
);
209 InnerEHValuesPHI
->addIncoming(CallerLPad
, OuterResumeDest
);
212 return InnerResumeDest
;
215 /// Forward the 'resume' instruction to the caller's landing pad block.
216 /// When the landing pad block has only one predecessor, this is a simple
217 /// branch. When there is more than one predecessor, we need to split the
218 /// landing pad block after the landingpad instruction and jump to there.
219 void LandingPadInliningInfo::forwardResume(
220 ResumeInst
*RI
, SmallPtrSetImpl
<LandingPadInst
*> &InlinedLPads
) {
221 BasicBlock
*Dest
= getInnerResumeDest();
222 BasicBlock
*Src
= RI
->getParent();
224 BranchInst::Create(Dest
, Src
);
226 // Update the PHIs in the destination. They were inserted in an order which
228 addIncomingPHIValuesForInto(Src
, Dest
);
230 InnerEHValuesPHI
->addIncoming(RI
->getOperand(0), Src
);
231 RI
->eraseFromParent();
234 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
235 static Value
*getParentPad(Value
*EHPad
) {
236 if (auto *FPI
= dyn_cast
<FuncletPadInst
>(EHPad
))
237 return FPI
->getParentPad();
238 return cast
<CatchSwitchInst
>(EHPad
)->getParentPad();
241 using UnwindDestMemoTy
= DenseMap
<Instruction
*, Value
*>;
243 /// Helper for getUnwindDestToken that does the descendant-ward part of
245 static Value
*getUnwindDestTokenHelper(Instruction
*EHPad
,
246 UnwindDestMemoTy
&MemoMap
) {
247 SmallVector
<Instruction
*, 8> Worklist(1, EHPad
);
249 while (!Worklist
.empty()) {
250 Instruction
*CurrentPad
= Worklist
.pop_back_val();
251 // We only put pads on the worklist that aren't in the MemoMap. When
252 // we find an unwind dest for a pad we may update its ancestors, but
253 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
254 // so they should never get updated while queued on the worklist.
255 assert(!MemoMap
.count(CurrentPad
));
256 Value
*UnwindDestToken
= nullptr;
257 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(CurrentPad
)) {
258 if (CatchSwitch
->hasUnwindDest()) {
259 UnwindDestToken
= CatchSwitch
->getUnwindDest()->getFirstNonPHI();
261 // Catchswitch doesn't have a 'nounwind' variant, and one might be
262 // annotated as "unwinds to caller" when really it's nounwind (see
263 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
264 // parent's unwind dest from this. We can check its catchpads'
265 // descendants, since they might include a cleanuppad with an
266 // "unwinds to caller" cleanupret, which can be trusted.
267 for (auto HI
= CatchSwitch
->handler_begin(),
268 HE
= CatchSwitch
->handler_end();
269 HI
!= HE
&& !UnwindDestToken
; ++HI
) {
270 BasicBlock
*HandlerBlock
= *HI
;
271 auto *CatchPad
= cast
<CatchPadInst
>(HandlerBlock
->getFirstNonPHI());
272 for (User
*Child
: CatchPad
->users()) {
273 // Intentionally ignore invokes here -- since the catchswitch is
274 // marked "unwind to caller", it would be a verifier error if it
275 // contained an invoke which unwinds out of it, so any invoke we'd
276 // encounter must unwind to some child of the catch.
277 if (!isa
<CleanupPadInst
>(Child
) && !isa
<CatchSwitchInst
>(Child
))
280 Instruction
*ChildPad
= cast
<Instruction
>(Child
);
281 auto Memo
= MemoMap
.find(ChildPad
);
282 if (Memo
== MemoMap
.end()) {
283 // Haven't figured out this child pad yet; queue it.
284 Worklist
.push_back(ChildPad
);
287 // We've already checked this child, but might have found that
288 // it offers no proof either way.
289 Value
*ChildUnwindDestToken
= Memo
->second
;
290 if (!ChildUnwindDestToken
)
292 // We already know the child's unwind dest, which can either
293 // be ConstantTokenNone to indicate unwind to caller, or can
294 // be another child of the catchpad. Only the former indicates
295 // the unwind dest of the catchswitch.
296 if (isa
<ConstantTokenNone
>(ChildUnwindDestToken
)) {
297 UnwindDestToken
= ChildUnwindDestToken
;
300 assert(getParentPad(ChildUnwindDestToken
) == CatchPad
);
305 auto *CleanupPad
= cast
<CleanupPadInst
>(CurrentPad
);
306 for (User
*U
: CleanupPad
->users()) {
307 if (auto *CleanupRet
= dyn_cast
<CleanupReturnInst
>(U
)) {
308 if (BasicBlock
*RetUnwindDest
= CleanupRet
->getUnwindDest())
309 UnwindDestToken
= RetUnwindDest
->getFirstNonPHI();
311 UnwindDestToken
= ConstantTokenNone::get(CleanupPad
->getContext());
314 Value
*ChildUnwindDestToken
;
315 if (auto *Invoke
= dyn_cast
<InvokeInst
>(U
)) {
316 ChildUnwindDestToken
= Invoke
->getUnwindDest()->getFirstNonPHI();
317 } else if (isa
<CleanupPadInst
>(U
) || isa
<CatchSwitchInst
>(U
)) {
318 Instruction
*ChildPad
= cast
<Instruction
>(U
);
319 auto Memo
= MemoMap
.find(ChildPad
);
320 if (Memo
== MemoMap
.end()) {
321 // Haven't resolved this child yet; queue it and keep searching.
322 Worklist
.push_back(ChildPad
);
325 // We've checked this child, but still need to ignore it if it
326 // had no proof either way.
327 ChildUnwindDestToken
= Memo
->second
;
328 if (!ChildUnwindDestToken
)
331 // Not a relevant user of the cleanuppad
334 // In a well-formed program, the child/invoke must either unwind to
335 // an(other) child of the cleanup, or exit the cleanup. In the
336 // first case, continue searching.
337 if (isa
<Instruction
>(ChildUnwindDestToken
) &&
338 getParentPad(ChildUnwindDestToken
) == CleanupPad
)
340 UnwindDestToken
= ChildUnwindDestToken
;
344 // If we haven't found an unwind dest for CurrentPad, we may have queued its
345 // children, so move on to the next in the worklist.
346 if (!UnwindDestToken
)
349 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
350 // any ancestors of CurrentPad up to but not including UnwindDestToken's
351 // parent pad. Record this in the memo map, and check to see if the
352 // original EHPad being queried is one of the ones exited.
354 if (auto *UnwindPad
= dyn_cast
<Instruction
>(UnwindDestToken
))
355 UnwindParent
= getParentPad(UnwindPad
);
357 UnwindParent
= nullptr;
358 bool ExitedOriginalPad
= false;
359 for (Instruction
*ExitedPad
= CurrentPad
;
360 ExitedPad
&& ExitedPad
!= UnwindParent
;
361 ExitedPad
= dyn_cast
<Instruction
>(getParentPad(ExitedPad
))) {
362 // Skip over catchpads since they just follow their catchswitches.
363 if (isa
<CatchPadInst
>(ExitedPad
))
365 MemoMap
[ExitedPad
] = UnwindDestToken
;
366 ExitedOriginalPad
|= (ExitedPad
== EHPad
);
369 if (ExitedOriginalPad
)
370 return UnwindDestToken
;
372 // Continue the search.
375 // No definitive information is contained within this funclet.
379 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
380 /// return that pad instruction. If it unwinds to caller, return
381 /// ConstantTokenNone. If it does not have a definitive unwind destination,
384 /// This routine gets invoked for calls in funclets in inlinees when inlining
385 /// an invoke. Since many funclets don't have calls inside them, it's queried
386 /// on-demand rather than building a map of pads to unwind dests up front.
387 /// Determining a funclet's unwind dest may require recursively searching its
388 /// descendants, and also ancestors and cousins if the descendants don't provide
389 /// an answer. Since most funclets will have their unwind dest immediately
390 /// available as the unwind dest of a catchswitch or cleanupret, this routine
391 /// searches top-down from the given pad and then up. To avoid worst-case
392 /// quadratic run-time given that approach, it uses a memo map to avoid
393 /// re-processing funclet trees. The callers that rewrite the IR as they go
394 /// take advantage of this, for correctness, by checking/forcing rewritten
395 /// pads' entries to match the original callee view.
396 static Value
*getUnwindDestToken(Instruction
*EHPad
,
397 UnwindDestMemoTy
&MemoMap
) {
398 // Catchpads unwind to the same place as their catchswitch;
399 // redirct any queries on catchpads so the code below can
400 // deal with just catchswitches and cleanuppads.
401 if (auto *CPI
= dyn_cast
<CatchPadInst
>(EHPad
))
402 EHPad
= CPI
->getCatchSwitch();
404 // Check if we've already determined the unwind dest for this pad.
405 auto Memo
= MemoMap
.find(EHPad
);
406 if (Memo
!= MemoMap
.end())
409 // Search EHPad and, if necessary, its descendants.
410 Value
*UnwindDestToken
= getUnwindDestTokenHelper(EHPad
, MemoMap
);
411 assert((UnwindDestToken
== nullptr) != (MemoMap
.count(EHPad
) != 0));
413 return UnwindDestToken
;
415 // No information is available for this EHPad from itself or any of its
416 // descendants. An unwind all the way out to a pad in the caller would
417 // need also to agree with the unwind dest of the parent funclet, so
418 // search up the chain to try to find a funclet with information. Put
419 // null entries in the memo map to avoid re-processing as we go up.
420 MemoMap
[EHPad
] = nullptr;
422 SmallPtrSet
<Instruction
*, 4> TempMemos
;
423 TempMemos
.insert(EHPad
);
425 Instruction
*LastUselessPad
= EHPad
;
426 Value
*AncestorToken
;
427 for (AncestorToken
= getParentPad(EHPad
);
428 auto *AncestorPad
= dyn_cast
<Instruction
>(AncestorToken
);
429 AncestorToken
= getParentPad(AncestorToken
)) {
430 // Skip over catchpads since they just follow their catchswitches.
431 if (isa
<CatchPadInst
>(AncestorPad
))
433 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
434 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
435 // call to getUnwindDestToken, that would mean that AncestorPad had no
436 // information in itself, its descendants, or its ancestors. If that
437 // were the case, then we should also have recorded the lack of information
438 // for the descendant that we're coming from. So assert that we don't
439 // find a null entry in the MemoMap for AncestorPad.
440 assert(!MemoMap
.count(AncestorPad
) || MemoMap
[AncestorPad
]);
441 auto AncestorMemo
= MemoMap
.find(AncestorPad
);
442 if (AncestorMemo
== MemoMap
.end()) {
443 UnwindDestToken
= getUnwindDestTokenHelper(AncestorPad
, MemoMap
);
445 UnwindDestToken
= AncestorMemo
->second
;
449 LastUselessPad
= AncestorPad
;
450 MemoMap
[LastUselessPad
] = nullptr;
452 TempMemos
.insert(LastUselessPad
);
456 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
457 // returned nullptr (and likewise for EHPad and any of its ancestors up to
458 // LastUselessPad), so LastUselessPad has no information from below. Since
459 // getUnwindDestTokenHelper must investigate all downward paths through
460 // no-information nodes to prove that a node has no information like this,
461 // and since any time it finds information it records it in the MemoMap for
462 // not just the immediately-containing funclet but also any ancestors also
463 // exited, it must be the case that, walking downward from LastUselessPad,
464 // visiting just those nodes which have not been mapped to an unwind dest
465 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
466 // they are just used to keep getUnwindDestTokenHelper from repeating work),
467 // any node visited must have been exhaustively searched with no information
469 SmallVector
<Instruction
*, 8> Worklist(1, LastUselessPad
);
470 while (!Worklist
.empty()) {
471 Instruction
*UselessPad
= Worklist
.pop_back_val();
472 auto Memo
= MemoMap
.find(UselessPad
);
473 if (Memo
!= MemoMap
.end() && Memo
->second
) {
474 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
475 // that it is a funclet that does have information about unwinding to
476 // a particular destination; its parent was a useless pad.
477 // Since its parent has no information, the unwind edge must not escape
478 // the parent, and must target a sibling of this pad. This local unwind
479 // gives us no information about EHPad. Leave it and the subtree rooted
481 assert(getParentPad(Memo
->second
) == getParentPad(UselessPad
));
484 // We know we don't have information for UselesPad. If it has an entry in
485 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
486 // added on this invocation of getUnwindDestToken; if a previous invocation
487 // recorded nullptr, it would have had to prove that the ancestors of
488 // UselessPad, which include LastUselessPad, had no information, and that
489 // in turn would have required proving that the descendants of
490 // LastUselesPad, which include EHPad, have no information about
491 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
492 // the MemoMap on that invocation, which isn't the case if we got here.
493 assert(!MemoMap
.count(UselessPad
) || TempMemos
.count(UselessPad
));
494 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
495 // information that we'd be contradicting by making a map entry for it
496 // (which is something that getUnwindDestTokenHelper must have proved for
497 // us to get here). Just assert on is direct users here; the checks in
498 // this downward walk at its descendants will verify that they don't have
499 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
500 // unwind edges or unwind to a sibling).
501 MemoMap
[UselessPad
] = UnwindDestToken
;
502 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(UselessPad
)) {
503 assert(CatchSwitch
->getUnwindDest() == nullptr && "Expected useless pad");
504 for (BasicBlock
*HandlerBlock
: CatchSwitch
->handlers()) {
505 auto *CatchPad
= HandlerBlock
->getFirstNonPHI();
506 for (User
*U
: CatchPad
->users()) {
508 (!isa
<InvokeInst
>(U
) ||
510 cast
<InvokeInst
>(U
)->getUnwindDest()->getFirstNonPHI()) ==
512 "Expected useless pad");
513 if (isa
<CatchSwitchInst
>(U
) || isa
<CleanupPadInst
>(U
))
514 Worklist
.push_back(cast
<Instruction
>(U
));
518 assert(isa
<CleanupPadInst
>(UselessPad
));
519 for (User
*U
: UselessPad
->users()) {
520 assert(!isa
<CleanupReturnInst
>(U
) && "Expected useless pad");
521 assert((!isa
<InvokeInst
>(U
) ||
523 cast
<InvokeInst
>(U
)->getUnwindDest()->getFirstNonPHI()) ==
525 "Expected useless pad");
526 if (isa
<CatchSwitchInst
>(U
) || isa
<CleanupPadInst
>(U
))
527 Worklist
.push_back(cast
<Instruction
>(U
));
532 return UnwindDestToken
;
535 /// When we inline a basic block into an invoke,
536 /// we have to turn all of the calls that can throw into invokes.
537 /// This function analyze BB to see if there are any calls, and if so,
538 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
539 /// nodes in that block with the values specified in InvokeDestPHIValues.
540 static BasicBlock
*HandleCallsInBlockInlinedThroughInvoke(
541 BasicBlock
*BB
, BasicBlock
*UnwindEdge
,
542 UnwindDestMemoTy
*FuncletUnwindMap
= nullptr) {
543 for (Instruction
&I
: llvm::make_early_inc_range(*BB
)) {
544 // We only need to check for function calls: inlined invoke
545 // instructions require no special handling.
546 CallInst
*CI
= dyn_cast
<CallInst
>(&I
);
548 if (!CI
|| CI
->doesNotThrow())
551 // We do not need to (and in fact, cannot) convert possibly throwing calls
552 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
553 // invokes. The caller's "segment" of the deoptimization continuation
554 // attached to the newly inlined @llvm.experimental_deoptimize
555 // (resp. @llvm.experimental.guard) call should contain the exception
556 // handling logic, if any.
557 if (auto *F
= CI
->getCalledFunction())
558 if (F
->getIntrinsicID() == Intrinsic::experimental_deoptimize
||
559 F
->getIntrinsicID() == Intrinsic::experimental_guard
)
562 if (auto FuncletBundle
= CI
->getOperandBundle(LLVMContext::OB_funclet
)) {
563 // This call is nested inside a funclet. If that funclet has an unwind
564 // destination within the inlinee, then unwinding out of this call would
565 // be UB. Rewriting this call to an invoke which targets the inlined
566 // invoke's unwind dest would give the call's parent funclet multiple
567 // unwind destinations, which is something that subsequent EH table
568 // generation can't handle and that the veirifer rejects. So when we
569 // see such a call, leave it as a call.
570 auto *FuncletPad
= cast
<Instruction
>(FuncletBundle
->Inputs
[0]);
571 Value
*UnwindDestToken
=
572 getUnwindDestToken(FuncletPad
, *FuncletUnwindMap
);
573 if (UnwindDestToken
&& !isa
<ConstantTokenNone
>(UnwindDestToken
))
576 Instruction
*MemoKey
;
577 if (auto *CatchPad
= dyn_cast
<CatchPadInst
>(FuncletPad
))
578 MemoKey
= CatchPad
->getCatchSwitch();
580 MemoKey
= FuncletPad
;
581 assert(FuncletUnwindMap
->count(MemoKey
) &&
582 (*FuncletUnwindMap
)[MemoKey
] == UnwindDestToken
&&
583 "must get memoized to avoid confusing later searches");
587 changeToInvokeAndSplitBasicBlock(CI
, UnwindEdge
);
593 /// If we inlined an invoke site, we need to convert calls
594 /// in the body of the inlined function into invokes.
596 /// II is the invoke instruction being inlined. FirstNewBlock is the first
597 /// block of the inlined code (the last block is the end of the function),
598 /// and InlineCodeInfo is information about the code that got inlined.
599 static void HandleInlinedLandingPad(InvokeInst
*II
, BasicBlock
*FirstNewBlock
,
600 ClonedCodeInfo
&InlinedCodeInfo
) {
601 BasicBlock
*InvokeDest
= II
->getUnwindDest();
603 Function
*Caller
= FirstNewBlock
->getParent();
605 // The inlined code is currently at the end of the function, scan from the
606 // start of the inlined code to its end, checking for stuff we need to
608 LandingPadInliningInfo
Invoke(II
);
610 // Get all of the inlined landing pad instructions.
611 SmallPtrSet
<LandingPadInst
*, 16> InlinedLPads
;
612 for (Function::iterator I
= FirstNewBlock
->getIterator(), E
= Caller
->end();
614 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(I
->getTerminator()))
615 InlinedLPads
.insert(II
->getLandingPadInst());
617 // Append the clauses from the outer landing pad instruction into the inlined
618 // landing pad instructions.
619 LandingPadInst
*OuterLPad
= Invoke
.getLandingPadInst();
620 for (LandingPadInst
*InlinedLPad
: InlinedLPads
) {
621 unsigned OuterNum
= OuterLPad
->getNumClauses();
622 InlinedLPad
->reserveClauses(OuterNum
);
623 for (unsigned OuterIdx
= 0; OuterIdx
!= OuterNum
; ++OuterIdx
)
624 InlinedLPad
->addClause(OuterLPad
->getClause(OuterIdx
));
625 if (OuterLPad
->isCleanup())
626 InlinedLPad
->setCleanup(true);
629 for (Function::iterator BB
= FirstNewBlock
->getIterator(), E
= Caller
->end();
631 if (InlinedCodeInfo
.ContainsCalls
)
632 if (BasicBlock
*NewBB
= HandleCallsInBlockInlinedThroughInvoke(
633 &*BB
, Invoke
.getOuterResumeDest()))
634 // Update any PHI nodes in the exceptional block to indicate that there
635 // is now a new entry in them.
636 Invoke
.addIncomingPHIValuesFor(NewBB
);
638 // Forward any resumes that are remaining here.
639 if (ResumeInst
*RI
= dyn_cast
<ResumeInst
>(BB
->getTerminator()))
640 Invoke
.forwardResume(RI
, InlinedLPads
);
643 // Now that everything is happy, we have one final detail. The PHI nodes in
644 // the exception destination block still have entries due to the original
645 // invoke instruction. Eliminate these entries (which might even delete the
647 InvokeDest
->removePredecessor(II
->getParent());
650 /// If we inlined an invoke site, we need to convert calls
651 /// in the body of the inlined function into invokes.
653 /// II is the invoke instruction being inlined. FirstNewBlock is the first
654 /// block of the inlined code (the last block is the end of the function),
655 /// and InlineCodeInfo is information about the code that got inlined.
656 static void HandleInlinedEHPad(InvokeInst
*II
, BasicBlock
*FirstNewBlock
,
657 ClonedCodeInfo
&InlinedCodeInfo
) {
658 BasicBlock
*UnwindDest
= II
->getUnwindDest();
659 Function
*Caller
= FirstNewBlock
->getParent();
661 assert(UnwindDest
->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
663 // If there are PHI nodes in the unwind destination block, we need to keep
664 // track of which values came into them from the invoke before removing the
665 // edge from this block.
666 SmallVector
<Value
*, 8> UnwindDestPHIValues
;
667 BasicBlock
*InvokeBB
= II
->getParent();
668 for (PHINode
&PHI
: UnwindDest
->phis()) {
669 // Save the value to use for this edge.
670 UnwindDestPHIValues
.push_back(PHI
.getIncomingValueForBlock(InvokeBB
));
673 // Add incoming-PHI values to the unwind destination block for the given basic
674 // block, using the values for the original invoke's source block.
675 auto UpdatePHINodes
= [&](BasicBlock
*Src
) {
676 BasicBlock::iterator I
= UnwindDest
->begin();
677 for (Value
*V
: UnwindDestPHIValues
) {
678 PHINode
*PHI
= cast
<PHINode
>(I
);
679 PHI
->addIncoming(V
, Src
);
684 // This connects all the instructions which 'unwind to caller' to the invoke
686 UnwindDestMemoTy FuncletUnwindMap
;
687 for (Function::iterator BB
= FirstNewBlock
->getIterator(), E
= Caller
->end();
689 if (auto *CRI
= dyn_cast
<CleanupReturnInst
>(BB
->getTerminator())) {
690 if (CRI
->unwindsToCaller()) {
691 auto *CleanupPad
= CRI
->getCleanupPad();
692 CleanupReturnInst::Create(CleanupPad
, UnwindDest
, CRI
);
693 CRI
->eraseFromParent();
694 UpdatePHINodes(&*BB
);
695 // Finding a cleanupret with an unwind destination would confuse
696 // subsequent calls to getUnwindDestToken, so map the cleanuppad
697 // to short-circuit any such calls and recognize this as an "unwind
698 // to caller" cleanup.
699 assert(!FuncletUnwindMap
.count(CleanupPad
) ||
700 isa
<ConstantTokenNone
>(FuncletUnwindMap
[CleanupPad
]));
701 FuncletUnwindMap
[CleanupPad
] =
702 ConstantTokenNone::get(Caller
->getContext());
706 Instruction
*I
= BB
->getFirstNonPHI();
710 Instruction
*Replacement
= nullptr;
711 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(I
)) {
712 if (CatchSwitch
->unwindsToCaller()) {
713 Value
*UnwindDestToken
;
714 if (auto *ParentPad
=
715 dyn_cast
<Instruction
>(CatchSwitch
->getParentPad())) {
716 // This catchswitch is nested inside another funclet. If that
717 // funclet has an unwind destination within the inlinee, then
718 // unwinding out of this catchswitch would be UB. Rewriting this
719 // catchswitch to unwind to the inlined invoke's unwind dest would
720 // give the parent funclet multiple unwind destinations, which is
721 // something that subsequent EH table generation can't handle and
722 // that the veirifer rejects. So when we see such a call, leave it
723 // as "unwind to caller".
724 UnwindDestToken
= getUnwindDestToken(ParentPad
, FuncletUnwindMap
);
725 if (UnwindDestToken
&& !isa
<ConstantTokenNone
>(UnwindDestToken
))
728 // This catchswitch has no parent to inherit constraints from, and
729 // none of its descendants can have an unwind edge that exits it and
730 // targets another funclet in the inlinee. It may or may not have a
731 // descendant that definitively has an unwind to caller. In either
732 // case, we'll have to assume that any unwinds out of it may need to
733 // be routed to the caller, so treat it as though it has a definitive
735 UnwindDestToken
= ConstantTokenNone::get(Caller
->getContext());
737 auto *NewCatchSwitch
= CatchSwitchInst::Create(
738 CatchSwitch
->getParentPad(), UnwindDest
,
739 CatchSwitch
->getNumHandlers(), CatchSwitch
->getName(),
741 for (BasicBlock
*PadBB
: CatchSwitch
->handlers())
742 NewCatchSwitch
->addHandler(PadBB
);
743 // Propagate info for the old catchswitch over to the new one in
744 // the unwind map. This also serves to short-circuit any subsequent
745 // checks for the unwind dest of this catchswitch, which would get
746 // confused if they found the outer handler in the callee.
747 FuncletUnwindMap
[NewCatchSwitch
] = UnwindDestToken
;
748 Replacement
= NewCatchSwitch
;
750 } else if (!isa
<FuncletPadInst
>(I
)) {
751 llvm_unreachable("unexpected EHPad!");
755 Replacement
->takeName(I
);
756 I
->replaceAllUsesWith(Replacement
);
757 I
->eraseFromParent();
758 UpdatePHINodes(&*BB
);
762 if (InlinedCodeInfo
.ContainsCalls
)
763 for (Function::iterator BB
= FirstNewBlock
->getIterator(),
766 if (BasicBlock
*NewBB
= HandleCallsInBlockInlinedThroughInvoke(
767 &*BB
, UnwindDest
, &FuncletUnwindMap
))
768 // Update any PHI nodes in the exceptional block to indicate that there
769 // is now a new entry in them.
770 UpdatePHINodes(NewBB
);
772 // Now that everything is happy, we have one final detail. The PHI nodes in
773 // the exception destination block still have entries due to the original
774 // invoke instruction. Eliminate these entries (which might even delete the
776 UnwindDest
->removePredecessor(InvokeBB
);
779 static bool haveCommonPrefix(MDNode
*MIBStackContext
,
780 MDNode
*CallsiteStackContext
) {
781 assert(MIBStackContext
->getNumOperands() > 0 &&
782 CallsiteStackContext
->getNumOperands() > 0);
783 // Because of the context trimming performed during matching, the callsite
784 // context could have more stack ids than the MIB. We match up to the end of
785 // the shortest stack context.
786 for (auto MIBStackIter
= MIBStackContext
->op_begin(),
787 CallsiteStackIter
= CallsiteStackContext
->op_begin();
788 MIBStackIter
!= MIBStackContext
->op_end() &&
789 CallsiteStackIter
!= CallsiteStackContext
->op_end();
790 MIBStackIter
++, CallsiteStackIter
++) {
791 auto *Val1
= mdconst::dyn_extract
<ConstantInt
>(*MIBStackIter
);
792 auto *Val2
= mdconst::dyn_extract
<ConstantInt
>(*CallsiteStackIter
);
793 assert(Val1
&& Val2
);
794 if (Val1
->getZExtValue() != Val2
->getZExtValue())
800 static void removeMemProfMetadata(CallBase
*Call
) {
801 Call
->setMetadata(LLVMContext::MD_memprof
, nullptr);
804 static void removeCallsiteMetadata(CallBase
*Call
) {
805 Call
->setMetadata(LLVMContext::MD_callsite
, nullptr);
808 static void updateMemprofMetadata(CallBase
*CI
,
809 const std::vector
<Metadata
*> &MIBList
) {
810 assert(!MIBList
.empty());
811 // Remove existing memprof, which will either be replaced or may not be needed
812 // if we are able to use a single allocation type function attribute.
813 removeMemProfMetadata(CI
);
814 CallStackTrie CallStack
;
815 for (Metadata
*MIB
: MIBList
)
816 CallStack
.addCallStack(cast
<MDNode
>(MIB
));
817 bool MemprofMDAttached
= CallStack
.buildAndAttachMIBMetadata(CI
);
818 assert(MemprofMDAttached
== CI
->hasMetadata(LLVMContext::MD_memprof
));
819 if (!MemprofMDAttached
)
820 // If we used a function attribute remove the callsite metadata as well.
821 removeCallsiteMetadata(CI
);
824 // Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
825 // inlined callee body, based on the callsite metadata InlinedCallsiteMD from
826 // the call that was inlined.
827 static void propagateMemProfHelper(const CallBase
*OrigCall
,
828 CallBase
*ClonedCall
,
829 MDNode
*InlinedCallsiteMD
) {
830 MDNode
*OrigCallsiteMD
= ClonedCall
->getMetadata(LLVMContext::MD_callsite
);
831 MDNode
*ClonedCallsiteMD
= nullptr;
832 // Check if the call originally had callsite metadata, and update it for the
833 // new call in the inlined body.
834 if (OrigCallsiteMD
) {
835 // The cloned call's context is now the concatenation of the original call's
836 // callsite metadata and the callsite metadata on the call where it was
838 ClonedCallsiteMD
= MDNode::concatenate(OrigCallsiteMD
, InlinedCallsiteMD
);
839 ClonedCall
->setMetadata(LLVMContext::MD_callsite
, ClonedCallsiteMD
);
842 // Update any memprof metadata on the cloned call.
843 MDNode
*OrigMemProfMD
= ClonedCall
->getMetadata(LLVMContext::MD_memprof
);
846 // We currently expect that allocations with memprof metadata also have
847 // callsite metadata for the allocation's part of the context.
848 assert(OrigCallsiteMD
);
850 // New call's MIB list.
851 std::vector
<Metadata
*> NewMIBList
;
853 // For each MIB metadata, check if its call stack context starts with the
854 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
855 // the inlined body. If not, it stays on the out-of-line original call.
856 for (auto &MIBOp
: OrigMemProfMD
->operands()) {
857 MDNode
*MIB
= dyn_cast
<MDNode
>(MIBOp
);
858 // Stack is first operand of MIB.
859 MDNode
*StackMD
= getMIBStackNode(MIB
);
861 // See if the new cloned callsite context matches this profiled context.
862 if (haveCommonPrefix(StackMD
, ClonedCallsiteMD
))
863 // Add it to the cloned call's MIB list.
864 NewMIBList
.push_back(MIB
);
866 if (NewMIBList
.empty()) {
867 removeMemProfMetadata(ClonedCall
);
868 removeCallsiteMetadata(ClonedCall
);
871 if (NewMIBList
.size() < OrigMemProfMD
->getNumOperands())
872 updateMemprofMetadata(ClonedCall
, NewMIBList
);
875 // Update memprof related metadata (!memprof and !callsite) based on the
876 // inlining of Callee into the callsite at CB. The updates include merging the
877 // inlined callee's callsite metadata with that of the inlined call,
878 // and moving the subset of any memprof contexts to the inlined callee
879 // allocations if they match the new inlined call stack.
881 propagateMemProfMetadata(Function
*Callee
, CallBase
&CB
,
882 bool ContainsMemProfMetadata
,
883 const ValueMap
<const Value
*, WeakTrackingVH
> &VMap
) {
884 MDNode
*CallsiteMD
= CB
.getMetadata(LLVMContext::MD_callsite
);
885 // Only need to update if the inlined callsite had callsite metadata, or if
886 // there was any memprof metadata inlined.
887 if (!CallsiteMD
&& !ContainsMemProfMetadata
)
890 // Propagate metadata onto the cloned calls in the inlined callee.
891 for (const auto &Entry
: VMap
) {
892 // See if this is a call that has been inlined and remapped, and not
893 // simplified away in the process.
894 auto *OrigCall
= dyn_cast_or_null
<CallBase
>(Entry
.first
);
895 auto *ClonedCall
= dyn_cast_or_null
<CallBase
>(Entry
.second
);
896 if (!OrigCall
|| !ClonedCall
)
898 // If the inlined callsite did not have any callsite metadata, then it isn't
899 // involved in any profiled call contexts, and we can remove any memprof
900 // metadata on the cloned call.
902 removeMemProfMetadata(ClonedCall
);
903 removeCallsiteMetadata(ClonedCall
);
906 propagateMemProfHelper(OrigCall
, ClonedCall
, CallsiteMD
);
910 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
911 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
912 /// be propagated to all memory-accessing cloned instructions.
913 static void PropagateCallSiteMetadata(CallBase
&CB
, Function::iterator FStart
,
914 Function::iterator FEnd
) {
915 MDNode
*MemParallelLoopAccess
=
916 CB
.getMetadata(LLVMContext::MD_mem_parallel_loop_access
);
917 MDNode
*AccessGroup
= CB
.getMetadata(LLVMContext::MD_access_group
);
918 MDNode
*AliasScope
= CB
.getMetadata(LLVMContext::MD_alias_scope
);
919 MDNode
*NoAlias
= CB
.getMetadata(LLVMContext::MD_noalias
);
920 if (!MemParallelLoopAccess
&& !AccessGroup
&& !AliasScope
&& !NoAlias
)
923 for (BasicBlock
&BB
: make_range(FStart
, FEnd
)) {
924 for (Instruction
&I
: BB
) {
925 // This metadata is only relevant for instructions that access memory.
926 if (!I
.mayReadOrWriteMemory())
929 if (MemParallelLoopAccess
) {
930 // TODO: This probably should not overwrite MemParalleLoopAccess.
931 MemParallelLoopAccess
= MDNode::concatenate(
932 I
.getMetadata(LLVMContext::MD_mem_parallel_loop_access
),
933 MemParallelLoopAccess
);
934 I
.setMetadata(LLVMContext::MD_mem_parallel_loop_access
,
935 MemParallelLoopAccess
);
939 I
.setMetadata(LLVMContext::MD_access_group
, uniteAccessGroups(
940 I
.getMetadata(LLVMContext::MD_access_group
), AccessGroup
));
943 I
.setMetadata(LLVMContext::MD_alias_scope
, MDNode::concatenate(
944 I
.getMetadata(LLVMContext::MD_alias_scope
), AliasScope
));
947 I
.setMetadata(LLVMContext::MD_noalias
, MDNode::concatenate(
948 I
.getMetadata(LLVMContext::MD_noalias
), NoAlias
));
953 /// Bundle operands of the inlined function must be added to inlined call sites.
954 static void PropagateOperandBundles(Function::iterator InlinedBB
,
955 Instruction
*CallSiteEHPad
) {
956 for (Instruction
&II
: llvm::make_early_inc_range(*InlinedBB
)) {
957 CallBase
*I
= dyn_cast
<CallBase
>(&II
);
960 // Skip call sites which already have a "funclet" bundle.
961 if (I
->getOperandBundle(LLVMContext::OB_funclet
))
963 // Skip call sites which are nounwind intrinsics (as long as they don't
964 // lower into regular function calls in the course of IR transformations).
966 dyn_cast
<Function
>(I
->getCalledOperand()->stripPointerCasts());
967 if (CalledFn
&& CalledFn
->isIntrinsic() && I
->doesNotThrow() &&
968 !IntrinsicInst::mayLowerToFunctionCall(CalledFn
->getIntrinsicID()))
971 SmallVector
<OperandBundleDef
, 1> OpBundles
;
972 I
->getOperandBundlesAsDefs(OpBundles
);
973 OpBundles
.emplace_back("funclet", CallSiteEHPad
);
975 Instruction
*NewInst
= CallBase::Create(I
, OpBundles
, I
);
976 NewInst
->takeName(I
);
977 I
->replaceAllUsesWith(NewInst
);
978 I
->eraseFromParent();
983 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
984 /// using scoped alias metadata is inlined, the aliasing relationships may not
985 /// hold between the two version. It is necessary to create a deep clone of the
986 /// metadata, putting the two versions in separate scope domains.
987 class ScopedAliasMetadataDeepCloner
{
988 using MetadataMap
= DenseMap
<const MDNode
*, TrackingMDNodeRef
>;
989 SetVector
<const MDNode
*> MD
;
991 void addRecursiveMetadataUses();
994 ScopedAliasMetadataDeepCloner(const Function
*F
);
996 /// Create a new clone of the scoped alias metadata, which will be used by
997 /// subsequent remap() calls.
1000 /// Remap instructions in the given range from the original to the cloned
1002 void remap(Function::iterator FStart
, Function::iterator FEnd
);
1006 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1007 const Function
*F
) {
1008 for (const BasicBlock
&BB
: *F
) {
1009 for (const Instruction
&I
: BB
) {
1010 if (const MDNode
*M
= I
.getMetadata(LLVMContext::MD_alias_scope
))
1012 if (const MDNode
*M
= I
.getMetadata(LLVMContext::MD_noalias
))
1015 // We also need to clone the metadata in noalias intrinsics.
1016 if (const auto *Decl
= dyn_cast
<NoAliasScopeDeclInst
>(&I
))
1017 MD
.insert(Decl
->getScopeList());
1020 addRecursiveMetadataUses();
1023 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1024 SmallVector
<const Metadata
*, 16> Queue(MD
.begin(), MD
.end());
1025 while (!Queue
.empty()) {
1026 const MDNode
*M
= cast
<MDNode
>(Queue
.pop_back_val());
1027 for (const Metadata
*Op
: M
->operands())
1028 if (const MDNode
*OpMD
= dyn_cast
<MDNode
>(Op
))
1029 if (MD
.insert(OpMD
))
1030 Queue
.push_back(OpMD
);
1034 void ScopedAliasMetadataDeepCloner::clone() {
1035 assert(MDMap
.empty() && "clone() already called ?");
1037 SmallVector
<TempMDTuple
, 16> DummyNodes
;
1038 for (const MDNode
*I
: MD
) {
1039 DummyNodes
.push_back(MDTuple::getTemporary(I
->getContext(), std::nullopt
));
1040 MDMap
[I
].reset(DummyNodes
.back().get());
1043 // Create new metadata nodes to replace the dummy nodes, replacing old
1044 // metadata references with either a dummy node or an already-created new
1046 SmallVector
<Metadata
*, 4> NewOps
;
1047 for (const MDNode
*I
: MD
) {
1048 for (const Metadata
*Op
: I
->operands()) {
1049 if (const MDNode
*M
= dyn_cast
<MDNode
>(Op
))
1050 NewOps
.push_back(MDMap
[M
]);
1052 NewOps
.push_back(const_cast<Metadata
*>(Op
));
1055 MDNode
*NewM
= MDNode::get(I
->getContext(), NewOps
);
1056 MDTuple
*TempM
= cast
<MDTuple
>(MDMap
[I
]);
1057 assert(TempM
->isTemporary() && "Expected temporary node");
1059 TempM
->replaceAllUsesWith(NewM
);
1064 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart
,
1065 Function::iterator FEnd
) {
1067 return; // Nothing to do.
1069 for (BasicBlock
&BB
: make_range(FStart
, FEnd
)) {
1070 for (Instruction
&I
: BB
) {
1071 // TODO: The null checks for the MDMap.lookup() results should no longer
1073 if (MDNode
*M
= I
.getMetadata(LLVMContext::MD_alias_scope
))
1074 if (MDNode
*MNew
= MDMap
.lookup(M
))
1075 I
.setMetadata(LLVMContext::MD_alias_scope
, MNew
);
1077 if (MDNode
*M
= I
.getMetadata(LLVMContext::MD_noalias
))
1078 if (MDNode
*MNew
= MDMap
.lookup(M
))
1079 I
.setMetadata(LLVMContext::MD_noalias
, MNew
);
1081 if (auto *Decl
= dyn_cast
<NoAliasScopeDeclInst
>(&I
))
1082 if (MDNode
*MNew
= MDMap
.lookup(Decl
->getScopeList()))
1083 Decl
->setScopeList(MNew
);
1088 /// If the inlined function has noalias arguments,
1089 /// then add new alias scopes for each noalias argument, tag the mapped noalias
1090 /// parameters with noalias metadata specifying the new scope, and tag all
1091 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
1092 static void AddAliasScopeMetadata(CallBase
&CB
, ValueToValueMapTy
&VMap
,
1093 const DataLayout
&DL
, AAResults
*CalleeAAR
,
1094 ClonedCodeInfo
&InlinedFunctionInfo
) {
1095 if (!EnableNoAliasConversion
)
1098 const Function
*CalledFunc
= CB
.getCalledFunction();
1099 SmallVector
<const Argument
*, 4> NoAliasArgs
;
1101 for (const Argument
&Arg
: CalledFunc
->args())
1102 if (CB
.paramHasAttr(Arg
.getArgNo(), Attribute::NoAlias
) && !Arg
.use_empty())
1103 NoAliasArgs
.push_back(&Arg
);
1105 if (NoAliasArgs
.empty())
1108 // To do a good job, if a noalias variable is captured, we need to know if
1109 // the capture point dominates the particular use we're considering.
1111 DT
.recalculate(const_cast<Function
&>(*CalledFunc
));
1113 // noalias indicates that pointer values based on the argument do not alias
1114 // pointer values which are not based on it. So we add a new "scope" for each
1115 // noalias function argument. Accesses using pointers based on that argument
1116 // become part of that alias scope, accesses using pointers not based on that
1117 // argument are tagged as noalias with that scope.
1119 DenseMap
<const Argument
*, MDNode
*> NewScopes
;
1120 MDBuilder
MDB(CalledFunc
->getContext());
1122 // Create a new scope domain for this function.
1124 MDB
.createAnonymousAliasScopeDomain(CalledFunc
->getName());
1125 for (unsigned i
= 0, e
= NoAliasArgs
.size(); i
!= e
; ++i
) {
1126 const Argument
*A
= NoAliasArgs
[i
];
1128 std::string Name
= std::string(CalledFunc
->getName());
1131 Name
+= A
->getName();
1133 Name
+= ": argument ";
1137 // Note: We always create a new anonymous root here. This is true regardless
1138 // of the linkage of the callee because the aliasing "scope" is not just a
1139 // property of the callee, but also all control dependencies in the caller.
1140 MDNode
*NewScope
= MDB
.createAnonymousAliasScope(NewDomain
, Name
);
1141 NewScopes
.insert(std::make_pair(A
, NewScope
));
1143 if (UseNoAliasIntrinsic
) {
1144 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1146 MDNode
*AScopeList
= MDNode::get(CalledFunc
->getContext(), NewScope
);
1148 IRBuilder
<>(&CB
).CreateNoAliasScopeDeclaration(AScopeList
);
1149 // Ignore the result for now. The result will be used when the
1150 // llvm.noalias intrinsic is introduced.
1155 // Iterate over all new instructions in the map; for all memory-access
1156 // instructions, add the alias scope metadata.
1157 for (ValueToValueMapTy::iterator VMI
= VMap
.begin(), VMIE
= VMap
.end();
1158 VMI
!= VMIE
; ++VMI
) {
1159 if (const Instruction
*I
= dyn_cast
<Instruction
>(VMI
->first
)) {
1163 Instruction
*NI
= dyn_cast
<Instruction
>(VMI
->second
);
1164 if (!NI
|| InlinedFunctionInfo
.isSimplified(I
, NI
))
1167 bool IsArgMemOnlyCall
= false, IsFuncCall
= false;
1168 SmallVector
<const Value
*, 2> PtrArgs
;
1170 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
1171 PtrArgs
.push_back(LI
->getPointerOperand());
1172 else if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
1173 PtrArgs
.push_back(SI
->getPointerOperand());
1174 else if (const VAArgInst
*VAAI
= dyn_cast
<VAArgInst
>(I
))
1175 PtrArgs
.push_back(VAAI
->getPointerOperand());
1176 else if (const AtomicCmpXchgInst
*CXI
= dyn_cast
<AtomicCmpXchgInst
>(I
))
1177 PtrArgs
.push_back(CXI
->getPointerOperand());
1178 else if (const AtomicRMWInst
*RMWI
= dyn_cast
<AtomicRMWInst
>(I
))
1179 PtrArgs
.push_back(RMWI
->getPointerOperand());
1180 else if (const auto *Call
= dyn_cast
<CallBase
>(I
)) {
1181 // If we know that the call does not access memory, then we'll still
1182 // know that about the inlined clone of this call site, and we don't
1183 // need to add metadata.
1184 if (Call
->doesNotAccessMemory())
1189 MemoryEffects ME
= CalleeAAR
->getMemoryEffects(Call
);
1191 // We'll retain this knowledge without additional metadata.
1192 if (ME
.onlyAccessesInaccessibleMem())
1195 if (ME
.onlyAccessesArgPointees())
1196 IsArgMemOnlyCall
= true;
1199 for (Value
*Arg
: Call
->args()) {
1200 // Only care about pointer arguments. If a noalias argument is
1201 // accessed through a non-pointer argument, it must be captured
1202 // first (e.g. via ptrtoint), and we protect against captures below.
1203 if (!Arg
->getType()->isPointerTy())
1206 PtrArgs
.push_back(Arg
);
1210 // If we found no pointers, then this instruction is not suitable for
1211 // pairing with an instruction to receive aliasing metadata.
1212 // However, if this is a call, this we might just alias with none of the
1213 // noalias arguments.
1214 if (PtrArgs
.empty() && !IsFuncCall
)
1217 // It is possible that there is only one underlying object, but you
1218 // need to go through several PHIs to see it, and thus could be
1219 // repeated in the Objects list.
1220 SmallPtrSet
<const Value
*, 4> ObjSet
;
1221 SmallVector
<Metadata
*, 4> Scopes
, NoAliases
;
1223 SmallSetVector
<const Argument
*, 4> NAPtrArgs
;
1224 for (const Value
*V
: PtrArgs
) {
1225 SmallVector
<const Value
*, 4> Objects
;
1226 getUnderlyingObjects(V
, Objects
, /* LI = */ nullptr);
1228 for (const Value
*O
: Objects
)
1232 // Figure out if we're derived from anything that is not a noalias
1234 bool RequiresNoCaptureBefore
= false, UsesAliasingPtr
= false,
1235 UsesUnknownObject
= false;
1236 for (const Value
*V
: ObjSet
) {
1237 // Is this value a constant that cannot be derived from any pointer
1238 // value (we need to exclude constant expressions, for example, that
1239 // are formed from arithmetic on global symbols).
1240 bool IsNonPtrConst
= isa
<ConstantInt
>(V
) || isa
<ConstantFP
>(V
) ||
1241 isa
<ConstantPointerNull
>(V
) ||
1242 isa
<ConstantDataVector
>(V
) || isa
<UndefValue
>(V
);
1246 // If this is anything other than a noalias argument, then we cannot
1247 // completely describe the aliasing properties using alias.scope
1248 // metadata (and, thus, won't add any).
1249 if (const Argument
*A
= dyn_cast
<Argument
>(V
)) {
1250 if (!CB
.paramHasAttr(A
->getArgNo(), Attribute::NoAlias
))
1251 UsesAliasingPtr
= true;
1253 UsesAliasingPtr
= true;
1256 if (isEscapeSource(V
)) {
1257 // An escape source can only alias with a noalias argument if it has
1258 // been captured beforehand.
1259 RequiresNoCaptureBefore
= true;
1260 } else if (!isa
<Argument
>(V
) && !isIdentifiedObject(V
)) {
1261 // If this is neither an escape source, nor some identified object
1262 // (which cannot directly alias a noalias argument), nor some other
1263 // argument (which, by definition, also cannot alias a noalias
1264 // argument), conservatively do not make any assumptions.
1265 UsesUnknownObject
= true;
1269 // Nothing we can do if the used underlying object cannot be reliably
1271 if (UsesUnknownObject
)
1274 // A function call can always get captured noalias pointers (via other
1275 // parameters, globals, etc.).
1276 if (IsFuncCall
&& !IsArgMemOnlyCall
)
1277 RequiresNoCaptureBefore
= true;
1279 // First, we want to figure out all of the sets with which we definitely
1280 // don't alias. Iterate over all noalias set, and add those for which:
1281 // 1. The noalias argument is not in the set of objects from which we
1282 // definitely derive.
1283 // 2. The noalias argument has not yet been captured.
1284 // An arbitrary function that might load pointers could see captured
1285 // noalias arguments via other noalias arguments or globals, and so we
1286 // must always check for prior capture.
1287 for (const Argument
*A
: NoAliasArgs
) {
1288 if (ObjSet
.contains(A
))
1289 continue; // May be based on a noalias argument.
1291 // It might be tempting to skip the PointerMayBeCapturedBefore check if
1292 // A->hasNoCaptureAttr() is true, but this is incorrect because
1293 // nocapture only guarantees that no copies outlive the function, not
1294 // that the value cannot be locally captured.
1295 if (!RequiresNoCaptureBefore
||
1296 !PointerMayBeCapturedBefore(A
, /* ReturnCaptures */ false,
1297 /* StoreCaptures */ false, I
, &DT
))
1298 NoAliases
.push_back(NewScopes
[A
]);
1301 if (!NoAliases
.empty())
1302 NI
->setMetadata(LLVMContext::MD_noalias
,
1303 MDNode::concatenate(
1304 NI
->getMetadata(LLVMContext::MD_noalias
),
1305 MDNode::get(CalledFunc
->getContext(), NoAliases
)));
1307 // Next, we want to figure out all of the sets to which we might belong.
1308 // We might belong to a set if the noalias argument is in the set of
1309 // underlying objects. If there is some non-noalias argument in our list
1310 // of underlying objects, then we cannot add a scope because the fact
1311 // that some access does not alias with any set of our noalias arguments
1312 // cannot itself guarantee that it does not alias with this access
1313 // (because there is some pointer of unknown origin involved and the
1314 // other access might also depend on this pointer). We also cannot add
1315 // scopes to arbitrary functions unless we know they don't access any
1316 // non-parameter pointer-values.
1317 bool CanAddScopes
= !UsesAliasingPtr
;
1318 if (CanAddScopes
&& IsFuncCall
)
1319 CanAddScopes
= IsArgMemOnlyCall
;
1322 for (const Argument
*A
: NoAliasArgs
) {
1323 if (ObjSet
.count(A
))
1324 Scopes
.push_back(NewScopes
[A
]);
1327 if (!Scopes
.empty())
1329 LLVMContext::MD_alias_scope
,
1330 MDNode::concatenate(NI
->getMetadata(LLVMContext::MD_alias_scope
),
1331 MDNode::get(CalledFunc
->getContext(), Scopes
)));
1336 static bool MayContainThrowingOrExitingCallAfterCB(CallBase
*Begin
,
1339 assert(Begin
->getParent() == End
->getParent() &&
1340 "Expected to be in same basic block!");
1341 auto BeginIt
= Begin
->getIterator();
1342 assert(BeginIt
!= End
->getIterator() && "Non-empty BB has empty iterator");
1343 return !llvm::isGuaranteedToTransferExecutionToSuccessor(
1344 ++BeginIt
, End
->getIterator(), InlinerAttributeWindow
+ 1);
1347 // Only allow these white listed attributes to be propagated back to the
1348 // callee. This is because other attributes may only be valid on the call
1349 // itself, i.e. attributes such as signext and zeroext.
1351 // Attributes that are always okay to propagate as if they are violated its
1353 static AttrBuilder
IdentifyValidUBGeneratingAttributes(CallBase
&CB
) {
1354 AttrBuilder
Valid(CB
.getContext());
1355 if (auto DerefBytes
= CB
.getRetDereferenceableBytes())
1356 Valid
.addDereferenceableAttr(DerefBytes
);
1357 if (auto DerefOrNullBytes
= CB
.getRetDereferenceableOrNullBytes())
1358 Valid
.addDereferenceableOrNullAttr(DerefOrNullBytes
);
1359 if (CB
.hasRetAttr(Attribute::NoAlias
))
1360 Valid
.addAttribute(Attribute::NoAlias
);
1361 if (CB
.hasRetAttr(Attribute::NoUndef
))
1362 Valid
.addAttribute(Attribute::NoUndef
);
1366 // Attributes that need additional checks as propagating them may change
1367 // behavior or cause new UB.
1368 static AttrBuilder
IdentifyValidPoisonGeneratingAttributes(CallBase
&CB
) {
1369 AttrBuilder
Valid(CB
.getContext());
1370 if (CB
.hasRetAttr(Attribute::NonNull
))
1371 Valid
.addAttribute(Attribute::NonNull
);
1372 if (CB
.hasRetAttr(Attribute::Alignment
))
1373 Valid
.addAlignmentAttr(CB
.getRetAlign());
1377 static void AddReturnAttributes(CallBase
&CB
, ValueToValueMapTy
&VMap
) {
1378 AttrBuilder ValidUB
= IdentifyValidUBGeneratingAttributes(CB
);
1379 AttrBuilder ValidPG
= IdentifyValidPoisonGeneratingAttributes(CB
);
1380 if (!ValidUB
.hasAttributes() && !ValidPG
.hasAttributes())
1382 auto *CalledFunction
= CB
.getCalledFunction();
1383 auto &Context
= CalledFunction
->getContext();
1385 for (auto &BB
: *CalledFunction
) {
1386 auto *RI
= dyn_cast
<ReturnInst
>(BB
.getTerminator());
1387 if (!RI
|| !isa
<CallBase
>(RI
->getOperand(0)))
1389 auto *RetVal
= cast
<CallBase
>(RI
->getOperand(0));
1390 // Check that the cloned RetVal exists and is a call, otherwise we cannot
1391 // add the attributes on the cloned RetVal. Simplification during inlining
1392 // could have transformed the cloned instruction.
1393 auto *NewRetVal
= dyn_cast_or_null
<CallBase
>(VMap
.lookup(RetVal
));
1396 // Backward propagation of attributes to the returned value may be incorrect
1397 // if it is control flow dependent.
1400 // %rv = call @foo()
1401 // %rv2 = call @bar()
1402 // if (%rv2 != null)
1409 // %val = call nonnull @callee()
1411 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1412 // limit the check to both RetVal and RI are in the same basic block and
1413 // there are no throwing/exiting instructions between these instructions.
1414 if (RI
->getParent() != RetVal
->getParent() ||
1415 MayContainThrowingOrExitingCallAfterCB(RetVal
, RI
))
1417 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1419 // NB! When we have the same attribute already existing on NewRetVal, but
1420 // with a differing value, the AttributeList's merge API honours the already
1421 // existing attribute value (i.e. attributes such as dereferenceable,
1422 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1423 AttributeList AL
= NewRetVal
->getAttributes();
1424 if (ValidUB
.getDereferenceableBytes() < AL
.getRetDereferenceableBytes())
1425 ValidUB
.removeAttribute(Attribute::Dereferenceable
);
1426 if (ValidUB
.getDereferenceableOrNullBytes() <
1427 AL
.getRetDereferenceableOrNullBytes())
1428 ValidUB
.removeAttribute(Attribute::DereferenceableOrNull
);
1429 AttributeList NewAL
= AL
.addRetAttributes(Context
, ValidUB
);
1430 // Attributes that may generate poison returns are a bit tricky. If we
1431 // propagate them, other uses of the callsite might have their behavior
1432 // change or cause UB (if they have noundef) b.c of the new potential
1434 // Take the following three cases:
1437 // define nonnull ptr @foo() {
1438 // %p = call ptr @bar()
1439 // call void @use(ptr %p) willreturn nounwind
1444 // define noundef nonnull ptr @foo() {
1445 // %p = call ptr @bar()
1446 // call void @use(ptr %p) willreturn nounwind
1451 // define nonnull ptr @foo() {
1452 // %p = call noundef ptr @bar()
1456 // In case 1, we can't propagate nonnull because poison value in @use may
1457 // change behavior or trigger UB.
1458 // In case 2, we don't need to be concerned about propagating nonnull, as
1459 // any new poison at @use will trigger UB anyways.
1460 // In case 3, we can never propagate nonnull because it may create UB due to
1461 // the noundef on @bar.
1462 if (ValidPG
.getAlignment().valueOrOne() < AL
.getRetAlignment().valueOrOne())
1463 ValidPG
.removeAttribute(Attribute::Alignment
);
1464 if (ValidPG
.hasAttributes()) {
1466 // If the callsite has `noundef`, then a poison due to violating the
1467 // return attribute will create UB anyways so we can always propagate.
1468 // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1469 // can't propagate as a new poison return will cause UB.
1470 // Finally, check if the return value has no uses whose behavior may
1471 // change/may cause UB if we potentially return poison. At the moment this
1472 // is implemented overly conservatively with a single-use check.
1473 // TODO: Update the single-use check to iterate through uses and only bail
1474 // if we have a potentially dangerous use.
1476 if (CB
.hasRetAttr(Attribute::NoUndef
) ||
1477 (RetVal
->hasOneUse() && !RetVal
->hasRetAttr(Attribute::NoUndef
)))
1478 NewAL
= NewAL
.addRetAttributes(Context
, ValidPG
);
1480 NewRetVal
->setAttributes(NewAL
);
1484 /// If the inlined function has non-byval align arguments, then
1485 /// add @llvm.assume-based alignment assumptions to preserve this information.
1486 static void AddAlignmentAssumptions(CallBase
&CB
, InlineFunctionInfo
&IFI
) {
1487 if (!PreserveAlignmentAssumptions
|| !IFI
.GetAssumptionCache
)
1490 AssumptionCache
*AC
= &IFI
.GetAssumptionCache(*CB
.getCaller());
1491 auto &DL
= CB
.getCaller()->getParent()->getDataLayout();
1493 // To avoid inserting redundant assumptions, we should check for assumptions
1494 // already in the caller. To do this, we might need a DT of the caller.
1496 bool DTCalculated
= false;
1498 Function
*CalledFunc
= CB
.getCalledFunction();
1499 for (Argument
&Arg
: CalledFunc
->args()) {
1500 if (!Arg
.getType()->isPointerTy() || Arg
.hasPassPointeeByValueCopyAttr() ||
1503 MaybeAlign Alignment
= Arg
.getParamAlign();
1507 if (!DTCalculated
) {
1508 DT
.recalculate(*CB
.getCaller());
1509 DTCalculated
= true;
1511 // If we can already prove the asserted alignment in the context of the
1512 // caller, then don't bother inserting the assumption.
1513 Value
*ArgVal
= CB
.getArgOperand(Arg
.getArgNo());
1514 if (getKnownAlignment(ArgVal
, DL
, &CB
, AC
, &DT
) >= *Alignment
)
1517 CallInst
*NewAsmp
= IRBuilder
<>(&CB
).CreateAlignmentAssumption(
1518 DL
, ArgVal
, Alignment
->value());
1519 AC
->registerAssumption(cast
<AssumeInst
>(NewAsmp
));
1523 static void HandleByValArgumentInit(Type
*ByValType
, Value
*Dst
, Value
*Src
,
1524 Module
*M
, BasicBlock
*InsertBlock
,
1525 InlineFunctionInfo
&IFI
,
1526 Function
*CalledFunc
) {
1527 IRBuilder
<> Builder(InsertBlock
, InsertBlock
->begin());
1530 Builder
.getInt64(M
->getDataLayout().getTypeStoreSize(ByValType
));
1532 // Always generate a memcpy of alignment 1 here because we don't know
1533 // the alignment of the src pointer. Other optimizations can infer
1534 // better alignment.
1535 CallInst
*CI
= Builder
.CreateMemCpy(Dst
, /*DstAlign*/ Align(1), Src
,
1536 /*SrcAlign*/ Align(1), Size
);
1538 // The verifier requires that all calls of debug-info-bearing functions
1539 // from debug-info-bearing functions have a debug location (for inlining
1540 // purposes). Assign a dummy location to satisfy the constraint.
1541 if (!CI
->getDebugLoc() && InsertBlock
->getParent()->getSubprogram())
1542 if (DISubprogram
*SP
= CalledFunc
->getSubprogram())
1543 CI
->setDebugLoc(DILocation::get(SP
->getContext(), 0, 0, SP
));
1546 /// When inlining a call site that has a byval argument,
1547 /// we have to make the implicit memcpy explicit by adding it.
1548 static Value
*HandleByValArgument(Type
*ByValType
, Value
*Arg
,
1549 Instruction
*TheCall
,
1550 const Function
*CalledFunc
,
1551 InlineFunctionInfo
&IFI
,
1552 MaybeAlign ByValAlignment
) {
1553 Function
*Caller
= TheCall
->getFunction();
1554 const DataLayout
&DL
= Caller
->getParent()->getDataLayout();
1556 // If the called function is readonly, then it could not mutate the caller's
1557 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1559 if (CalledFunc
->onlyReadsMemory()) {
1560 // If the byval argument has a specified alignment that is greater than the
1561 // passed in pointer, then we either have to round up the input pointer or
1562 // give up on this transformation.
1563 if (ByValAlignment
.valueOrOne() == 1)
1566 AssumptionCache
*AC
=
1567 IFI
.GetAssumptionCache
? &IFI
.GetAssumptionCache(*Caller
) : nullptr;
1569 // If the pointer is already known to be sufficiently aligned, or if we can
1570 // round it up to a larger alignment, then we don't need a temporary.
1571 if (getOrEnforceKnownAlignment(Arg
, *ByValAlignment
, DL
, TheCall
, AC
) >=
1575 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1576 // for code quality, but rarely happens and is required for correctness.
1579 // Create the alloca. If we have DataLayout, use nice alignment.
1580 Align Alignment
= DL
.getPrefTypeAlign(ByValType
);
1582 // If the byval had an alignment specified, we *must* use at least that
1583 // alignment, as it is required by the byval argument (and uses of the
1584 // pointer inside the callee).
1586 Alignment
= std::max(Alignment
, *ByValAlignment
);
1588 AllocaInst
*NewAlloca
= new AllocaInst(ByValType
, DL
.getAllocaAddrSpace(),
1589 nullptr, Alignment
, Arg
->getName());
1590 NewAlloca
->insertBefore(Caller
->begin()->begin());
1591 IFI
.StaticAllocas
.push_back(NewAlloca
);
1593 // Uses of the argument in the function should use our new alloca
1598 // Check whether this Value is used by a lifetime intrinsic.
1599 static bool isUsedByLifetimeMarker(Value
*V
) {
1600 for (User
*U
: V
->users())
1601 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(U
))
1602 if (II
->isLifetimeStartOrEnd())
1607 // Check whether the given alloca already has
1608 // lifetime.start or lifetime.end intrinsics.
1609 static bool hasLifetimeMarkers(AllocaInst
*AI
) {
1610 Type
*Ty
= AI
->getType();
1612 PointerType::get(Ty
->getContext(), Ty
->getPointerAddressSpace());
1613 if (Ty
== Int8PtrTy
)
1614 return isUsedByLifetimeMarker(AI
);
1616 // Do a scan to find all the casts to i8*.
1617 for (User
*U
: AI
->users()) {
1618 if (U
->getType() != Int8PtrTy
) continue;
1619 if (U
->stripPointerCasts() != AI
) continue;
1620 if (isUsedByLifetimeMarker(U
))
1626 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1627 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1628 /// cannot be static.
1629 static bool allocaWouldBeStaticInEntry(const AllocaInst
*AI
) {
1630 return isa
<Constant
>(AI
->getArraySize()) && !AI
->isUsedWithInAlloca();
1633 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1634 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1635 static DebugLoc
inlineDebugLoc(DebugLoc OrigDL
, DILocation
*InlinedAt
,
1637 DenseMap
<const MDNode
*, MDNode
*> &IANodes
) {
1638 auto IA
= DebugLoc::appendInlinedAt(OrigDL
, InlinedAt
, Ctx
, IANodes
);
1639 return DILocation::get(Ctx
, OrigDL
.getLine(), OrigDL
.getCol(),
1640 OrigDL
.getScope(), IA
);
1643 /// Update inlined instructions' line numbers to
1644 /// to encode location where these instructions are inlined.
1645 static void fixupLineNumbers(Function
*Fn
, Function::iterator FI
,
1646 Instruction
*TheCall
, bool CalleeHasDebugInfo
) {
1647 const DebugLoc
&TheCallDL
= TheCall
->getDebugLoc();
1651 auto &Ctx
= Fn
->getContext();
1652 DILocation
*InlinedAtNode
= TheCallDL
;
1654 // Create a unique call site, not to be confused with any other call from the
1656 InlinedAtNode
= DILocation::getDistinct(
1657 Ctx
, InlinedAtNode
->getLine(), InlinedAtNode
->getColumn(),
1658 InlinedAtNode
->getScope(), InlinedAtNode
->getInlinedAt());
1660 // Cache the inlined-at nodes as they're built so they are reused, without
1661 // this every instruction's inlined-at chain would become distinct from each
1663 DenseMap
<const MDNode
*, MDNode
*> IANodes
;
1665 // Check if we are not generating inline line tables and want to use
1666 // the call site location instead.
1667 bool NoInlineLineTables
= Fn
->hasFnAttribute("no-inline-line-tables");
1669 // Helper-util for updating the metadata attached to an instruction.
1670 auto UpdateInst
= [&](Instruction
&I
) {
1671 // Loop metadata needs to be updated so that the start and end locs
1672 // reference inlined-at locations.
1673 auto updateLoopInfoLoc
= [&Ctx
, &InlinedAtNode
,
1674 &IANodes
](Metadata
*MD
) -> Metadata
* {
1675 if (auto *Loc
= dyn_cast_or_null
<DILocation
>(MD
))
1676 return inlineDebugLoc(Loc
, InlinedAtNode
, Ctx
, IANodes
).get();
1679 updateLoopMetadataDebugLocations(I
, updateLoopInfoLoc
);
1681 if (!NoInlineLineTables
)
1682 if (DebugLoc DL
= I
.getDebugLoc()) {
1684 inlineDebugLoc(DL
, InlinedAtNode
, I
.getContext(), IANodes
);
1689 if (CalleeHasDebugInfo
&& !NoInlineLineTables
)
1692 // If the inlined instruction has no line number, or if inline info
1693 // is not being generated, make it look as if it originates from the call
1694 // location. This is important for ((__always_inline, __nodebug__))
1695 // functions which must use caller location for all instructions in their
1698 // Don't update static allocas, as they may get moved later.
1699 if (auto *AI
= dyn_cast
<AllocaInst
>(&I
))
1700 if (allocaWouldBeStaticInEntry(AI
))
1703 // Do not force a debug loc for pseudo probes, since they do not need to
1704 // be debuggable, and also they are expected to have a zero/null dwarf
1705 // discriminator at this point which could be violated otherwise.
1706 if (isa
<PseudoProbeInst
>(I
))
1709 I
.setDebugLoc(TheCallDL
);
1712 // Helper-util for updating debug-info records attached to instructions.
1713 auto UpdateDPV
= [&](DPValue
*DPV
) {
1714 assert(DPV
->getDebugLoc() && "Debug Value must have debug loc");
1715 if (NoInlineLineTables
) {
1716 DPV
->setDebugLoc(TheCallDL
);
1719 DebugLoc DL
= DPV
->getDebugLoc();
1721 inlineDebugLoc(DL
, InlinedAtNode
,
1722 DPV
->getMarker()->getParent()->getContext(), IANodes
);
1723 DPV
->setDebugLoc(IDL
);
1726 // Iterate over all instructions, updating metadata and debug-info records.
1727 for (; FI
!= Fn
->end(); ++FI
) {
1728 for (BasicBlock::iterator BI
= FI
->begin(), BE
= FI
->end(); BI
!= BE
;
1731 for (DPValue
&DPV
: BI
->getDbgValueRange()) {
1736 // Remove debug info intrinsics if we're not keeping inline info.
1737 if (NoInlineLineTables
) {
1738 BasicBlock::iterator BI
= FI
->begin();
1739 while (BI
!= FI
->end()) {
1740 if (isa
<DbgInfoIntrinsic
>(BI
)) {
1741 BI
= BI
->eraseFromParent();
1744 BI
->dropDbgValues();
1753 #define DEBUG_TYPE "assignment-tracking"
1754 /// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1755 static at::StorageToVarsMap
collectEscapedLocals(const DataLayout
&DL
,
1756 const CallBase
&CB
) {
1757 at::StorageToVarsMap EscapedLocals
;
1758 SmallPtrSet
<const Value
*, 4> SeenBases
;
1761 errs() << "# Finding caller local variables escaped by callee\n");
1762 for (const Value
*Arg
: CB
.args()) {
1763 LLVM_DEBUG(errs() << "INSPECT: " << *Arg
<< "\n");
1764 if (!Arg
->getType()->isPointerTy()) {
1765 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
1769 const Instruction
*I
= dyn_cast
<Instruction
>(Arg
);
1771 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
1775 // Walk back to the base storage.
1776 assert(Arg
->getType()->isPtrOrPtrVectorTy());
1777 APInt
TmpOffset(DL
.getIndexTypeSizeInBits(Arg
->getType()), 0, false);
1778 const AllocaInst
*Base
= dyn_cast
<AllocaInst
>(
1779 Arg
->stripAndAccumulateConstantOffsets(DL
, TmpOffset
, true));
1781 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
1786 LLVM_DEBUG(errs() << " | BASE: " << *Base
<< "\n");
1787 // We only need to process each base address once - skip any duplicates.
1788 if (!SeenBases
.insert(Base
).second
)
1791 // Find all local variables associated with the backing storage.
1792 auto CollectAssignsForStorage
= [&](auto *DbgAssign
) {
1793 // Skip variables from inlined functions - they are not local variables.
1794 if (DbgAssign
->getDebugLoc().getInlinedAt())
1796 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign
<< "\n");
1797 EscapedLocals
[Base
].insert(at::VarRecord(DbgAssign
));
1799 for_each(at::getAssignmentMarkers(Base
), CollectAssignsForStorage
);
1800 for_each(at::getDPVAssignmentMarkers(Base
), CollectAssignsForStorage
);
1802 return EscapedLocals
;
1805 static void trackInlinedStores(Function::iterator Start
, Function::iterator End
,
1806 const CallBase
&CB
) {
1807 LLVM_DEBUG(errs() << "trackInlinedStores into "
1808 << Start
->getParent()->getName() << " from "
1809 << CB
.getCalledFunction()->getName() << "\n");
1810 std::unique_ptr
<DataLayout
> DL
= std::make_unique
<DataLayout
>(CB
.getModule());
1811 at::trackAssignments(Start
, End
, collectEscapedLocals(*DL
, CB
), *DL
);
1814 /// Update inlined instructions' DIAssignID metadata. We need to do this
1815 /// otherwise a function inlined more than once into the same function
1816 /// will cause DIAssignID to be shared by many instructions.
1817 static void fixupAssignments(Function::iterator Start
, Function::iterator End
) {
1818 // Map {Old, New} metadata. Not used directly - use GetNewID.
1819 DenseMap
<DIAssignID
*, DIAssignID
*> Map
;
1820 auto GetNewID
= [&Map
](Metadata
*Old
) {
1821 DIAssignID
*OldID
= cast
<DIAssignID
>(Old
);
1822 if (DIAssignID
*NewID
= Map
.lookup(OldID
))
1824 DIAssignID
*NewID
= DIAssignID::getDistinct(OldID
->getContext());
1828 // Loop over all the inlined instructions. If we find a DIAssignID
1829 // attachment or use, replace it with a new version.
1830 for (auto BBI
= Start
; BBI
!= End
; ++BBI
) {
1831 for (Instruction
&I
: *BBI
) {
1832 for (DPValue
&DPV
: I
.getDbgValueRange()) {
1833 if (DPV
.isDbgAssign())
1834 DPV
.setAssignId(GetNewID(DPV
.getAssignID()));
1836 if (auto *ID
= I
.getMetadata(LLVMContext::MD_DIAssignID
))
1837 I
.setMetadata(LLVMContext::MD_DIAssignID
, GetNewID(ID
));
1838 else if (auto *DAI
= dyn_cast
<DbgAssignIntrinsic
>(&I
))
1839 DAI
->setAssignId(GetNewID(DAI
->getAssignID()));
1844 #define DEBUG_TYPE "inline-function"
1846 /// Update the block frequencies of the caller after a callee has been inlined.
1848 /// Each block cloned into the caller has its block frequency scaled by the
1849 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1850 /// callee's entry block gets the same frequency as the callsite block and the
1851 /// relative frequencies of all cloned blocks remain the same after cloning.
1852 static void updateCallerBFI(BasicBlock
*CallSiteBlock
,
1853 const ValueToValueMapTy
&VMap
,
1854 BlockFrequencyInfo
*CallerBFI
,
1855 BlockFrequencyInfo
*CalleeBFI
,
1856 const BasicBlock
&CalleeEntryBlock
) {
1857 SmallPtrSet
<BasicBlock
*, 16> ClonedBBs
;
1858 for (auto Entry
: VMap
) {
1859 if (!isa
<BasicBlock
>(Entry
.first
) || !Entry
.second
)
1861 auto *OrigBB
= cast
<BasicBlock
>(Entry
.first
);
1862 auto *ClonedBB
= cast
<BasicBlock
>(Entry
.second
);
1863 BlockFrequency Freq
= CalleeBFI
->getBlockFreq(OrigBB
);
1864 if (!ClonedBBs
.insert(ClonedBB
).second
) {
1865 // Multiple blocks in the callee might get mapped to one cloned block in
1866 // the caller since we prune the callee as we clone it. When that happens,
1867 // we want to use the maximum among the original blocks' frequencies.
1868 BlockFrequency NewFreq
= CallerBFI
->getBlockFreq(ClonedBB
);
1872 CallerBFI
->setBlockFreq(ClonedBB
, Freq
);
1874 BasicBlock
*EntryClone
= cast
<BasicBlock
>(VMap
.lookup(&CalleeEntryBlock
));
1875 CallerBFI
->setBlockFreqAndScale(
1876 EntryClone
, CallerBFI
->getBlockFreq(CallSiteBlock
), ClonedBBs
);
1879 /// Update the branch metadata for cloned call instructions.
1880 static void updateCallProfile(Function
*Callee
, const ValueToValueMapTy
&VMap
,
1881 const ProfileCount
&CalleeEntryCount
,
1882 const CallBase
&TheCall
, ProfileSummaryInfo
*PSI
,
1883 BlockFrequencyInfo
*CallerBFI
) {
1884 if (CalleeEntryCount
.isSynthetic() || CalleeEntryCount
.getCount() < 1)
1886 auto CallSiteCount
=
1887 PSI
? PSI
->getProfileCount(TheCall
, CallerBFI
) : std::nullopt
;
1889 std::min(CallSiteCount
.value_or(0), CalleeEntryCount
.getCount());
1890 updateProfileCallee(Callee
, -CallCount
, &VMap
);
1893 void llvm::updateProfileCallee(
1894 Function
*Callee
, int64_t EntryDelta
,
1895 const ValueMap
<const Value
*, WeakTrackingVH
> *VMap
) {
1896 auto CalleeCount
= Callee
->getEntryCount();
1900 const uint64_t PriorEntryCount
= CalleeCount
->getCount();
1902 // Since CallSiteCount is an estimate, it could exceed the original callee
1903 // count and has to be set to 0 so guard against underflow.
1904 const uint64_t NewEntryCount
=
1905 (EntryDelta
< 0 && static_cast<uint64_t>(-EntryDelta
) > PriorEntryCount
)
1907 : PriorEntryCount
+ EntryDelta
;
1909 // During inlining ?
1911 uint64_t CloneEntryCount
= PriorEntryCount
- NewEntryCount
;
1912 for (auto Entry
: *VMap
)
1913 if (isa
<CallInst
>(Entry
.first
))
1914 if (auto *CI
= dyn_cast_or_null
<CallInst
>(Entry
.second
))
1915 CI
->updateProfWeight(CloneEntryCount
, PriorEntryCount
);
1919 Callee
->setEntryCount(NewEntryCount
);
1921 for (BasicBlock
&BB
: *Callee
)
1922 // No need to update the callsite if it is pruned during inlining.
1923 if (!VMap
|| VMap
->count(&BB
))
1924 for (Instruction
&I
: BB
)
1925 if (CallInst
*CI
= dyn_cast
<CallInst
>(&I
))
1926 CI
->updateProfWeight(NewEntryCount
, PriorEntryCount
);
1930 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1931 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1932 /// after the call. This function inlines the retainRV/claimRV calls.
1934 /// There are three cases to consider:
1936 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1937 /// object in the callee return block, the autoreleaseRV call and the
1938 /// retainRV/claimRV call in the caller cancel out. If the call in the caller
1939 /// is a claimRV call, a call to objc_release is emitted.
1941 /// 2. If there is a call in the callee return block that doesn't have operand
1942 /// bundle "clang.arc.attachedcall", the operand bundle on the original call
1943 /// is transferred to the call in the callee.
1945 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1946 /// a retainRV call.
1948 inlineRetainOrClaimRVCalls(CallBase
&CB
, objcarc::ARCInstKind RVCallKind
,
1949 const SmallVectorImpl
<ReturnInst
*> &Returns
) {
1950 Module
*Mod
= CB
.getModule();
1951 assert(objcarc::isRetainOrClaimRV(RVCallKind
) && "unexpected ARC function");
1952 bool IsRetainRV
= RVCallKind
== objcarc::ARCInstKind::RetainRV
,
1953 IsUnsafeClaimRV
= !IsRetainRV
;
1955 for (auto *RI
: Returns
) {
1956 Value
*RetOpnd
= objcarc::GetRCIdentityRoot(RI
->getOperand(0));
1957 bool InsertRetainCall
= IsRetainRV
;
1958 IRBuilder
<> Builder(RI
->getContext());
1960 // Walk backwards through the basic block looking for either a matching
1961 // autoreleaseRV call or an unannotated call.
1962 auto InstRange
= llvm::make_range(++(RI
->getIterator().getReverse()),
1963 RI
->getParent()->rend());
1964 for (Instruction
&I
: llvm::make_early_inc_range(InstRange
)) {
1966 if (isa
<CastInst
>(I
))
1969 if (auto *II
= dyn_cast
<IntrinsicInst
>(&I
)) {
1970 if (II
->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue
||
1972 objcarc::GetRCIdentityRoot(II
->getOperand(0)) != RetOpnd
)
1975 // If we've found a matching authoreleaseRV call:
1976 // - If claimRV is attached to the call, insert a call to objc_release
1977 // and erase the autoreleaseRV call.
1978 // - If retainRV is attached to the call, just erase the autoreleaseRV
1980 if (IsUnsafeClaimRV
) {
1981 Builder
.SetInsertPoint(II
);
1983 Intrinsic::getDeclaration(Mod
, Intrinsic::objc_release
);
1984 Builder
.CreateCall(IFn
, RetOpnd
, "");
1986 II
->eraseFromParent();
1987 InsertRetainCall
= false;
1991 auto *CI
= dyn_cast
<CallInst
>(&I
);
1996 if (objcarc::GetRCIdentityRoot(CI
) != RetOpnd
||
1997 objcarc::hasAttachedCallOpBundle(CI
))
2000 // If we've found an unannotated call that defines RetOpnd, add a
2001 // "clang.arc.attachedcall" operand bundle.
2002 Value
*BundleArgs
[] = {*objcarc::getAttachedARCFunction(&CB
)};
2003 OperandBundleDef
OB("clang.arc.attachedcall", BundleArgs
);
2004 auto *NewCall
= CallBase::addOperandBundle(
2005 CI
, LLVMContext::OB_clang_arc_attachedcall
, OB
, CI
);
2006 NewCall
->copyMetadata(*CI
);
2007 CI
->replaceAllUsesWith(NewCall
);
2008 CI
->eraseFromParent();
2009 InsertRetainCall
= false;
2013 if (InsertRetainCall
) {
2014 // The retainRV is attached to the call and we've failed to find a
2015 // matching autoreleaseRV or an annotated call in the callee. Emit a call
2017 Builder
.SetInsertPoint(RI
);
2018 Function
*IFn
= Intrinsic::getDeclaration(Mod
, Intrinsic::objc_retain
);
2019 Builder
.CreateCall(IFn
, RetOpnd
, "");
2024 /// This function inlines the called function into the basic block of the
2025 /// caller. This returns false if it is not possible to inline this call.
2026 /// The program is still in a well defined state if this occurs though.
2028 /// Note that this only does one level of inlining. For example, if the
2029 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2030 /// exists in the instruction stream. Similarly this will inline a recursive
2031 /// function by one level.
2032 llvm::InlineResult
llvm::InlineFunction(CallBase
&CB
, InlineFunctionInfo
&IFI
,
2033 bool MergeAttributes
,
2034 AAResults
*CalleeAAR
,
2035 bool InsertLifetime
,
2036 Function
*ForwardVarArgsTo
) {
2037 assert(CB
.getParent() && CB
.getFunction() && "Instruction not in function!");
2039 // FIXME: we don't inline callbr yet.
2040 if (isa
<CallBrInst
>(CB
))
2041 return InlineResult::failure("We don't inline callbr yet.");
2043 // If IFI has any state in it, zap it before we fill it in.
2046 Function
*CalledFunc
= CB
.getCalledFunction();
2047 if (!CalledFunc
|| // Can't inline external function or indirect
2048 CalledFunc
->isDeclaration()) // call!
2049 return InlineResult::failure("external or indirect");
2051 // The inliner does not know how to inline through calls with operand bundles
2053 Value
*ConvergenceControlToken
= nullptr;
2054 if (CB
.hasOperandBundles()) {
2055 for (int i
= 0, e
= CB
.getNumOperandBundles(); i
!= e
; ++i
) {
2056 auto OBUse
= CB
.getOperandBundleAt(i
);
2057 uint32_t Tag
= OBUse
.getTagID();
2058 // ... but it knows how to inline through "deopt" operand bundles ...
2059 if (Tag
== LLVMContext::OB_deopt
)
2061 // ... and "funclet" operand bundles.
2062 if (Tag
== LLVMContext::OB_funclet
)
2064 if (Tag
== LLVMContext::OB_clang_arc_attachedcall
)
2066 if (Tag
== LLVMContext::OB_kcfi
)
2068 if (Tag
== LLVMContext::OB_convergencectrl
) {
2069 ConvergenceControlToken
= OBUse
.Inputs
[0].get();
2073 return InlineResult::failure("unsupported operand bundle");
2077 // FIXME: The check below is redundant and incomplete. According to spec, if a
2078 // convergent call is missing a token, then the caller is using uncontrolled
2079 // convergence. If the callee has an entry intrinsic, then the callee is using
2080 // controlled convergence, and the call cannot be inlined. A proper
2081 // implemenation of this check requires a whole new analysis that identifies
2082 // convergence in every function. For now, we skip that and just do this one
2083 // cursory check. The underlying assumption is that in a compiler flow that
2084 // fully implements convergence control tokens, there is no mixing of
2085 // controlled and uncontrolled convergent operations in the whole program.
2086 if (CB
.isConvergent()) {
2087 auto *I
= CalledFunc
->getEntryBlock().getFirstNonPHI();
2088 if (auto *IntrinsicCall
= dyn_cast
<IntrinsicInst
>(I
)) {
2089 if (IntrinsicCall
->getIntrinsicID() ==
2090 Intrinsic::experimental_convergence_entry
) {
2091 if (!ConvergenceControlToken
) {
2092 return InlineResult::failure(
2093 "convergent call needs convergencectrl operand");
2099 // If the call to the callee cannot throw, set the 'nounwind' flag on any
2100 // calls that we inline.
2101 bool MarkNoUnwind
= CB
.doesNotThrow();
2103 BasicBlock
*OrigBB
= CB
.getParent();
2104 Function
*Caller
= OrigBB
->getParent();
2106 // Do not inline strictfp function into non-strictfp one. It would require
2107 // conversion of all FP operations in host function to constrained intrinsics.
2108 if (CalledFunc
->getAttributes().hasFnAttr(Attribute::StrictFP
) &&
2109 !Caller
->getAttributes().hasFnAttr(Attribute::StrictFP
)) {
2110 return InlineResult::failure("incompatible strictfp attributes");
2113 // GC poses two hazards to inlining, which only occur when the callee has GC:
2114 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2116 // 2. If the caller has a differing GC, it is invalid to inline.
2117 if (CalledFunc
->hasGC()) {
2118 if (!Caller
->hasGC())
2119 Caller
->setGC(CalledFunc
->getGC());
2120 else if (CalledFunc
->getGC() != Caller
->getGC())
2121 return InlineResult::failure("incompatible GC");
2124 // Get the personality function from the callee if it contains a landing pad.
2125 Constant
*CalledPersonality
=
2126 CalledFunc
->hasPersonalityFn()
2127 ? CalledFunc
->getPersonalityFn()->stripPointerCasts()
2130 // Find the personality function used by the landing pads of the caller. If it
2131 // exists, then check to see that it matches the personality function used in
2133 Constant
*CallerPersonality
=
2134 Caller
->hasPersonalityFn()
2135 ? Caller
->getPersonalityFn()->stripPointerCasts()
2137 if (CalledPersonality
) {
2138 if (!CallerPersonality
)
2139 Caller
->setPersonalityFn(CalledPersonality
);
2140 // If the personality functions match, then we can perform the
2141 // inlining. Otherwise, we can't inline.
2142 // TODO: This isn't 100% true. Some personality functions are proper
2143 // supersets of others and can be used in place of the other.
2144 else if (CalledPersonality
!= CallerPersonality
)
2145 return InlineResult::failure("incompatible personality");
2148 // We need to figure out which funclet the callsite was in so that we may
2149 // properly nest the callee.
2150 Instruction
*CallSiteEHPad
= nullptr;
2151 if (CallerPersonality
) {
2152 EHPersonality Personality
= classifyEHPersonality(CallerPersonality
);
2153 if (isScopedEHPersonality(Personality
)) {
2154 std::optional
<OperandBundleUse
> ParentFunclet
=
2155 CB
.getOperandBundle(LLVMContext::OB_funclet
);
2157 CallSiteEHPad
= cast
<FuncletPadInst
>(ParentFunclet
->Inputs
.front());
2159 // OK, the inlining site is legal. What about the target function?
2161 if (CallSiteEHPad
) {
2162 if (Personality
== EHPersonality::MSVC_CXX
) {
2163 // The MSVC personality cannot tolerate catches getting inlined into
2164 // cleanup funclets.
2165 if (isa
<CleanupPadInst
>(CallSiteEHPad
)) {
2166 // Ok, the call site is within a cleanuppad. Let's check the callee
2168 for (const BasicBlock
&CalledBB
: *CalledFunc
) {
2169 if (isa
<CatchSwitchInst
>(CalledBB
.getFirstNonPHI()))
2170 return InlineResult::failure("catch in cleanup funclet");
2173 } else if (isAsynchronousEHPersonality(Personality
)) {
2174 // SEH is even less tolerant, there may not be any sort of exceptional
2175 // funclet in the callee.
2176 for (const BasicBlock
&CalledBB
: *CalledFunc
) {
2177 if (CalledBB
.isEHPad())
2178 return InlineResult::failure("SEH in cleanup funclet");
2185 // Determine if we are dealing with a call in an EHPad which does not unwind
2187 bool EHPadForCallUnwindsLocally
= false;
2188 if (CallSiteEHPad
&& isa
<CallInst
>(CB
)) {
2189 UnwindDestMemoTy FuncletUnwindMap
;
2190 Value
*CallSiteUnwindDestToken
=
2191 getUnwindDestToken(CallSiteEHPad
, FuncletUnwindMap
);
2193 EHPadForCallUnwindsLocally
=
2194 CallSiteUnwindDestToken
&&
2195 !isa
<ConstantTokenNone
>(CallSiteUnwindDestToken
);
2198 // Get an iterator to the last basic block in the function, which will have
2199 // the new function inlined after it.
2200 Function::iterator LastBlock
= --Caller
->end();
2202 // Make sure to capture all of the return instructions from the cloned
2204 SmallVector
<ReturnInst
*, 8> Returns
;
2205 ClonedCodeInfo InlinedFunctionInfo
;
2206 Function::iterator FirstNewBlock
;
2208 { // Scope to destroy VMap after cloning.
2209 ValueToValueMapTy VMap
;
2215 // Keep a list of pair (dst, src) to emit byval initializations.
2216 SmallVector
<ByValInit
, 4> ByValInits
;
2218 // When inlining a function that contains noalias scope metadata,
2219 // this metadata needs to be cloned so that the inlined blocks
2220 // have different "unique scopes" at every call site.
2221 // Track the metadata that must be cloned. Do this before other changes to
2222 // the function, so that we do not get in trouble when inlining caller ==
2224 ScopedAliasMetadataDeepCloner
SAMetadataCloner(CB
.getCalledFunction());
2226 auto &DL
= Caller
->getParent()->getDataLayout();
2228 // Calculate the vector of arguments to pass into the function cloner, which
2229 // matches up the formal to the actual argument values.
2230 auto AI
= CB
.arg_begin();
2232 for (Function::arg_iterator I
= CalledFunc
->arg_begin(),
2233 E
= CalledFunc
->arg_end(); I
!= E
; ++I
, ++AI
, ++ArgNo
) {
2234 Value
*ActualArg
= *AI
;
2236 // When byval arguments actually inlined, we need to make the copy implied
2237 // by them explicit. However, we don't do this if the callee is readonly
2238 // or readnone, because the copy would be unneeded: the callee doesn't
2239 // modify the struct.
2240 if (CB
.isByValArgument(ArgNo
)) {
2241 ActualArg
= HandleByValArgument(CB
.getParamByValType(ArgNo
), ActualArg
,
2242 &CB
, CalledFunc
, IFI
,
2243 CalledFunc
->getParamAlign(ArgNo
));
2244 if (ActualArg
!= *AI
)
2245 ByValInits
.push_back(
2246 {ActualArg
, (Value
*)*AI
, CB
.getParamByValType(ArgNo
)});
2249 VMap
[&*I
] = ActualArg
;
2252 // TODO: Remove this when users have been updated to the assume bundles.
2253 // Add alignment assumptions if necessary. We do this before the inlined
2254 // instructions are actually cloned into the caller so that we can easily
2255 // check what will be known at the start of the inlined code.
2256 AddAlignmentAssumptions(CB
, IFI
);
2258 AssumptionCache
*AC
=
2259 IFI
.GetAssumptionCache
? &IFI
.GetAssumptionCache(*Caller
) : nullptr;
2261 /// Preserve all attributes on of the call and its parameters.
2262 salvageKnowledge(&CB
, AC
);
2264 // We want the inliner to prune the code as it copies. We would LOVE to
2265 // have no dead or constant instructions leftover after inlining occurs
2266 // (which can happen, e.g., because an argument was constant), but we'll be
2267 // happy with whatever the cloner can do.
2268 CloneAndPruneFunctionInto(Caller
, CalledFunc
, VMap
,
2269 /*ModuleLevelChanges=*/false, Returns
, ".i",
2270 &InlinedFunctionInfo
);
2271 // Remember the first block that is newly cloned over.
2272 FirstNewBlock
= LastBlock
; ++FirstNewBlock
;
2274 // Insert retainRV/clainRV runtime calls.
2275 objcarc::ARCInstKind RVCallKind
= objcarc::getAttachedARCFunctionKind(&CB
);
2276 if (RVCallKind
!= objcarc::ARCInstKind::None
)
2277 inlineRetainOrClaimRVCalls(CB
, RVCallKind
, Returns
);
2279 // Updated caller/callee profiles only when requested. For sample loader
2280 // inlining, the context-sensitive inlinee profile doesn't need to be
2281 // subtracted from callee profile, and the inlined clone also doesn't need
2282 // to be scaled based on call site count.
2283 if (IFI
.UpdateProfile
) {
2284 if (IFI
.CallerBFI
!= nullptr && IFI
.CalleeBFI
!= nullptr)
2285 // Update the BFI of blocks cloned into the caller.
2286 updateCallerBFI(OrigBB
, VMap
, IFI
.CallerBFI
, IFI
.CalleeBFI
,
2287 CalledFunc
->front());
2289 if (auto Profile
= CalledFunc
->getEntryCount())
2290 updateCallProfile(CalledFunc
, VMap
, *Profile
, CB
, IFI
.PSI
,
2294 // Inject byval arguments initialization.
2295 for (ByValInit
&Init
: ByValInits
)
2296 HandleByValArgumentInit(Init
.Ty
, Init
.Dst
, Init
.Src
, Caller
->getParent(),
2297 &*FirstNewBlock
, IFI
, CalledFunc
);
2299 std::optional
<OperandBundleUse
> ParentDeopt
=
2300 CB
.getOperandBundle(LLVMContext::OB_deopt
);
2302 SmallVector
<OperandBundleDef
, 2> OpDefs
;
2304 for (auto &VH
: InlinedFunctionInfo
.OperandBundleCallSites
) {
2305 CallBase
*ICS
= dyn_cast_or_null
<CallBase
>(VH
);
2307 continue; // instruction was DCE'd or RAUW'ed to undef
2311 OpDefs
.reserve(ICS
->getNumOperandBundles());
2313 for (unsigned COBi
= 0, COBe
= ICS
->getNumOperandBundles(); COBi
< COBe
;
2315 auto ChildOB
= ICS
->getOperandBundleAt(COBi
);
2316 if (ChildOB
.getTagID() != LLVMContext::OB_deopt
) {
2317 // If the inlined call has other operand bundles, let them be
2318 OpDefs
.emplace_back(ChildOB
);
2322 // It may be useful to separate this logic (of handling operand
2323 // bundles) out to a separate "policy" component if this gets crowded.
2324 // Prepend the parent's deoptimization continuation to the newly
2325 // inlined call's deoptimization continuation.
2326 std::vector
<Value
*> MergedDeoptArgs
;
2327 MergedDeoptArgs
.reserve(ParentDeopt
->Inputs
.size() +
2328 ChildOB
.Inputs
.size());
2330 llvm::append_range(MergedDeoptArgs
, ParentDeopt
->Inputs
);
2331 llvm::append_range(MergedDeoptArgs
, ChildOB
.Inputs
);
2333 OpDefs
.emplace_back("deopt", std::move(MergedDeoptArgs
));
2336 Instruction
*NewI
= CallBase::Create(ICS
, OpDefs
, ICS
);
2338 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2339 // this even if the call returns void.
2340 ICS
->replaceAllUsesWith(NewI
);
2343 ICS
->eraseFromParent();
2347 // For 'nodebug' functions, the associated DISubprogram is always null.
2348 // Conservatively avoid propagating the callsite debug location to
2349 // instructions inlined from a function whose DISubprogram is not null.
2350 fixupLineNumbers(Caller
, FirstNewBlock
, &CB
,
2351 CalledFunc
->getSubprogram() != nullptr);
2353 if (isAssignmentTrackingEnabled(*Caller
->getParent())) {
2354 // Interpret inlined stores to caller-local variables as assignments.
2355 trackInlinedStores(FirstNewBlock
, Caller
->end(), CB
);
2357 // Update DIAssignID metadata attachments and uses so that they are
2358 // unique to this inlined instance.
2359 fixupAssignments(FirstNewBlock
, Caller
->end());
2362 // Now clone the inlined noalias scope metadata.
2363 SAMetadataCloner
.clone();
2364 SAMetadataCloner
.remap(FirstNewBlock
, Caller
->end());
2366 // Add noalias metadata if necessary.
2367 AddAliasScopeMetadata(CB
, VMap
, DL
, CalleeAAR
, InlinedFunctionInfo
);
2369 // Clone return attributes on the callsite into the calls within the inlined
2370 // function which feed into its return value.
2371 AddReturnAttributes(CB
, VMap
);
2373 propagateMemProfMetadata(CalledFunc
, CB
,
2374 InlinedFunctionInfo
.ContainsMemProfMetadata
, VMap
);
2376 // Propagate metadata on the callsite if necessary.
2377 PropagateCallSiteMetadata(CB
, FirstNewBlock
, Caller
->end());
2379 // Register any cloned assumptions.
2380 if (IFI
.GetAssumptionCache
)
2381 for (BasicBlock
&NewBlock
:
2382 make_range(FirstNewBlock
->getIterator(), Caller
->end()))
2383 for (Instruction
&I
: NewBlock
)
2384 if (auto *II
= dyn_cast
<AssumeInst
>(&I
))
2385 IFI
.GetAssumptionCache(*Caller
).registerAssumption(II
);
2388 if (ConvergenceControlToken
) {
2389 auto *I
= FirstNewBlock
->getFirstNonPHI();
2390 if (auto *IntrinsicCall
= dyn_cast
<IntrinsicInst
>(I
)) {
2391 if (IntrinsicCall
->getIntrinsicID() ==
2392 Intrinsic::experimental_convergence_entry
) {
2393 IntrinsicCall
->replaceAllUsesWith(ConvergenceControlToken
);
2394 IntrinsicCall
->eraseFromParent();
2399 // If there are any alloca instructions in the block that used to be the entry
2400 // block for the callee, move them to the entry block of the caller. First
2401 // calculate which instruction they should be inserted before. We insert the
2402 // instructions at the end of the current alloca list.
2404 BasicBlock::iterator InsertPoint
= Caller
->begin()->begin();
2405 for (BasicBlock::iterator I
= FirstNewBlock
->begin(),
2406 E
= FirstNewBlock
->end(); I
!= E
; ) {
2407 AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
++);
2410 // If the alloca is now dead, remove it. This often occurs due to code
2412 if (AI
->use_empty()) {
2413 AI
->eraseFromParent();
2417 if (!allocaWouldBeStaticInEntry(AI
))
2420 // Keep track of the static allocas that we inline into the caller.
2421 IFI
.StaticAllocas
.push_back(AI
);
2423 // Scan for the block of allocas that we can move over, and move them
2425 while (isa
<AllocaInst
>(I
) &&
2426 !cast
<AllocaInst
>(I
)->use_empty() &&
2427 allocaWouldBeStaticInEntry(cast
<AllocaInst
>(I
))) {
2428 IFI
.StaticAllocas
.push_back(cast
<AllocaInst
>(I
));
2432 // Transfer all of the allocas over in a block. Using splice means
2433 // that the instructions aren't removed from the symbol table, then
2436 Caller
->getEntryBlock().splice(InsertPoint
, &*FirstNewBlock
,
2437 AI
->getIterator(), I
);
2441 SmallVector
<Value
*,4> VarArgsToForward
;
2442 SmallVector
<AttributeSet
, 4> VarArgsAttrs
;
2443 for (unsigned i
= CalledFunc
->getFunctionType()->getNumParams();
2444 i
< CB
.arg_size(); i
++) {
2445 VarArgsToForward
.push_back(CB
.getArgOperand(i
));
2446 VarArgsAttrs
.push_back(CB
.getAttributes().getParamAttrs(i
));
2449 bool InlinedMustTailCalls
= false, InlinedDeoptimizeCalls
= false;
2450 if (InlinedFunctionInfo
.ContainsCalls
) {
2451 CallInst::TailCallKind CallSiteTailKind
= CallInst::TCK_None
;
2452 if (CallInst
*CI
= dyn_cast
<CallInst
>(&CB
))
2453 CallSiteTailKind
= CI
->getTailCallKind();
2455 // For inlining purposes, the "notail" marker is the same as no marker.
2456 if (CallSiteTailKind
== CallInst::TCK_NoTail
)
2457 CallSiteTailKind
= CallInst::TCK_None
;
2459 for (Function::iterator BB
= FirstNewBlock
, E
= Caller
->end(); BB
!= E
;
2461 for (Instruction
&I
: llvm::make_early_inc_range(*BB
)) {
2462 CallInst
*CI
= dyn_cast
<CallInst
>(&I
);
2466 // Forward varargs from inlined call site to calls to the
2467 // ForwardVarArgsTo function, if requested, and to musttail calls.
2468 if (!VarArgsToForward
.empty() &&
2469 ((ForwardVarArgsTo
&&
2470 CI
->getCalledFunction() == ForwardVarArgsTo
) ||
2471 CI
->isMustTailCall())) {
2472 // Collect attributes for non-vararg parameters.
2473 AttributeList Attrs
= CI
->getAttributes();
2474 SmallVector
<AttributeSet
, 8> ArgAttrs
;
2475 if (!Attrs
.isEmpty() || !VarArgsAttrs
.empty()) {
2476 for (unsigned ArgNo
= 0;
2477 ArgNo
< CI
->getFunctionType()->getNumParams(); ++ArgNo
)
2478 ArgAttrs
.push_back(Attrs
.getParamAttrs(ArgNo
));
2481 // Add VarArg attributes.
2482 ArgAttrs
.append(VarArgsAttrs
.begin(), VarArgsAttrs
.end());
2483 Attrs
= AttributeList::get(CI
->getContext(), Attrs
.getFnAttrs(),
2484 Attrs
.getRetAttrs(), ArgAttrs
);
2485 // Add VarArgs to existing parameters.
2486 SmallVector
<Value
*, 6> Params(CI
->args());
2487 Params
.append(VarArgsToForward
.begin(), VarArgsToForward
.end());
2488 CallInst
*NewCI
= CallInst::Create(
2489 CI
->getFunctionType(), CI
->getCalledOperand(), Params
, "", CI
);
2490 NewCI
->setDebugLoc(CI
->getDebugLoc());
2491 NewCI
->setAttributes(Attrs
);
2492 NewCI
->setCallingConv(CI
->getCallingConv());
2493 CI
->replaceAllUsesWith(NewCI
);
2494 CI
->eraseFromParent();
2498 if (Function
*F
= CI
->getCalledFunction())
2499 InlinedDeoptimizeCalls
|=
2500 F
->getIntrinsicID() == Intrinsic::experimental_deoptimize
;
2502 // We need to reduce the strength of any inlined tail calls. For
2503 // musttail, we have to avoid introducing potential unbounded stack
2504 // growth. For example, if functions 'f' and 'g' are mutually recursive
2505 // with musttail, we can inline 'g' into 'f' so long as we preserve
2506 // musttail on the cloned call to 'f'. If either the inlined call site
2507 // or the cloned call site is *not* musttail, the program already has
2508 // one frame of stack growth, so it's safe to remove musttail. Here is
2509 // a table of example transformations:
2511 // f -> musttail g -> musttail f ==> f -> musttail f
2512 // f -> musttail g -> tail f ==> f -> tail f
2513 // f -> g -> musttail f ==> f -> f
2514 // f -> g -> tail f ==> f -> f
2516 // Inlined notail calls should remain notail calls.
2517 CallInst::TailCallKind ChildTCK
= CI
->getTailCallKind();
2518 if (ChildTCK
!= CallInst::TCK_NoTail
)
2519 ChildTCK
= std::min(CallSiteTailKind
, ChildTCK
);
2520 CI
->setTailCallKind(ChildTCK
);
2521 InlinedMustTailCalls
|= CI
->isMustTailCall();
2523 // Call sites inlined through a 'nounwind' call site should be
2524 // 'nounwind' as well. However, avoid marking call sites explicitly
2525 // where possible. This helps expose more opportunities for CSE after
2526 // inlining, commonly when the callee is an intrinsic.
2527 if (MarkNoUnwind
&& !CI
->doesNotThrow())
2528 CI
->setDoesNotThrow();
2533 // Leave lifetime markers for the static alloca's, scoping them to the
2534 // function we just inlined.
2535 // We need to insert lifetime intrinsics even at O0 to avoid invalid
2536 // access caused by multithreaded coroutines. The check
2537 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2538 if ((InsertLifetime
|| Caller
->isPresplitCoroutine()) &&
2539 !IFI
.StaticAllocas
.empty()) {
2540 IRBuilder
<> builder(&*FirstNewBlock
, FirstNewBlock
->begin());
2541 for (unsigned ai
= 0, ae
= IFI
.StaticAllocas
.size(); ai
!= ae
; ++ai
) {
2542 AllocaInst
*AI
= IFI
.StaticAllocas
[ai
];
2543 // Don't mark swifterror allocas. They can't have bitcast uses.
2544 if (AI
->isSwiftError())
2547 // If the alloca is already scoped to something smaller than the whole
2548 // function then there's no need to add redundant, less accurate markers.
2549 if (hasLifetimeMarkers(AI
))
2552 // Try to determine the size of the allocation.
2553 ConstantInt
*AllocaSize
= nullptr;
2554 if (ConstantInt
*AIArraySize
=
2555 dyn_cast
<ConstantInt
>(AI
->getArraySize())) {
2556 auto &DL
= Caller
->getParent()->getDataLayout();
2557 Type
*AllocaType
= AI
->getAllocatedType();
2558 TypeSize AllocaTypeSize
= DL
.getTypeAllocSize(AllocaType
);
2559 uint64_t AllocaArraySize
= AIArraySize
->getLimitedValue();
2561 // Don't add markers for zero-sized allocas.
2562 if (AllocaArraySize
== 0)
2565 // Check that array size doesn't saturate uint64_t and doesn't
2566 // overflow when it's multiplied by type size.
2567 if (!AllocaTypeSize
.isScalable() &&
2568 AllocaArraySize
!= std::numeric_limits
<uint64_t>::max() &&
2569 std::numeric_limits
<uint64_t>::max() / AllocaArraySize
>=
2570 AllocaTypeSize
.getFixedValue()) {
2571 AllocaSize
= ConstantInt::get(Type::getInt64Ty(AI
->getContext()),
2572 AllocaArraySize
* AllocaTypeSize
);
2576 builder
.CreateLifetimeStart(AI
, AllocaSize
);
2577 for (ReturnInst
*RI
: Returns
) {
2578 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2579 // call and a return. The return kills all local allocas.
2580 if (InlinedMustTailCalls
&&
2581 RI
->getParent()->getTerminatingMustTailCall())
2583 if (InlinedDeoptimizeCalls
&&
2584 RI
->getParent()->getTerminatingDeoptimizeCall())
2586 IRBuilder
<>(RI
).CreateLifetimeEnd(AI
, AllocaSize
);
2591 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2592 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2593 if (InlinedFunctionInfo
.ContainsDynamicAllocas
) {
2594 // Insert the llvm.stacksave.
2595 CallInst
*SavedPtr
= IRBuilder
<>(&*FirstNewBlock
, FirstNewBlock
->begin())
2596 .CreateStackSave("savedstack");
2598 // Insert a call to llvm.stackrestore before any return instructions in the
2599 // inlined function.
2600 for (ReturnInst
*RI
: Returns
) {
2601 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2602 // call and a return. The return will restore the stack pointer.
2603 if (InlinedMustTailCalls
&& RI
->getParent()->getTerminatingMustTailCall())
2605 if (InlinedDeoptimizeCalls
&& RI
->getParent()->getTerminatingDeoptimizeCall())
2607 IRBuilder
<>(RI
).CreateStackRestore(SavedPtr
);
2611 // If we are inlining for an invoke instruction, we must make sure to rewrite
2612 // any call instructions into invoke instructions. This is sensitive to which
2613 // funclet pads were top-level in the inlinee, so must be done before
2614 // rewriting the "parent pad" links.
2615 if (auto *II
= dyn_cast
<InvokeInst
>(&CB
)) {
2616 BasicBlock
*UnwindDest
= II
->getUnwindDest();
2617 Instruction
*FirstNonPHI
= UnwindDest
->getFirstNonPHI();
2618 if (isa
<LandingPadInst
>(FirstNonPHI
)) {
2619 HandleInlinedLandingPad(II
, &*FirstNewBlock
, InlinedFunctionInfo
);
2621 HandleInlinedEHPad(II
, &*FirstNewBlock
, InlinedFunctionInfo
);
2625 // Update the lexical scopes of the new funclets and callsites.
2626 // Anything that had 'none' as its parent is now nested inside the callsite's
2628 if (CallSiteEHPad
) {
2629 for (Function::iterator BB
= FirstNewBlock
->getIterator(),
2632 // Add bundle operands to inlined call sites.
2633 PropagateOperandBundles(BB
, CallSiteEHPad
);
2635 // It is problematic if the inlinee has a cleanupret which unwinds to
2636 // caller and we inline it into a call site which doesn't unwind but into
2637 // an EH pad that does. Such an edge must be dynamically unreachable.
2638 // As such, we replace the cleanupret with unreachable.
2639 if (auto *CleanupRet
= dyn_cast
<CleanupReturnInst
>(BB
->getTerminator()))
2640 if (CleanupRet
->unwindsToCaller() && EHPadForCallUnwindsLocally
)
2641 changeToUnreachable(CleanupRet
);
2643 Instruction
*I
= BB
->getFirstNonPHI();
2647 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(I
)) {
2648 if (isa
<ConstantTokenNone
>(CatchSwitch
->getParentPad()))
2649 CatchSwitch
->setParentPad(CallSiteEHPad
);
2651 auto *FPI
= cast
<FuncletPadInst
>(I
);
2652 if (isa
<ConstantTokenNone
>(FPI
->getParentPad()))
2653 FPI
->setParentPad(CallSiteEHPad
);
2658 if (InlinedDeoptimizeCalls
) {
2659 // We need to at least remove the deoptimizing returns from the Return set,
2660 // so that the control flow from those returns does not get merged into the
2661 // caller (but terminate it instead). If the caller's return type does not
2662 // match the callee's return type, we also need to change the return type of
2664 if (Caller
->getReturnType() == CB
.getType()) {
2665 llvm::erase_if(Returns
, [](ReturnInst
*RI
) {
2666 return RI
->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2669 SmallVector
<ReturnInst
*, 8> NormalReturns
;
2670 Function
*NewDeoptIntrinsic
= Intrinsic::getDeclaration(
2671 Caller
->getParent(), Intrinsic::experimental_deoptimize
,
2672 {Caller
->getReturnType()});
2674 for (ReturnInst
*RI
: Returns
) {
2675 CallInst
*DeoptCall
= RI
->getParent()->getTerminatingDeoptimizeCall();
2677 NormalReturns
.push_back(RI
);
2681 // The calling convention on the deoptimize call itself may be bogus,
2682 // since the code we're inlining may have undefined behavior (and may
2683 // never actually execute at runtime); but all
2684 // @llvm.experimental.deoptimize declarations have to have the same
2685 // calling convention in a well-formed module.
2686 auto CallingConv
= DeoptCall
->getCalledFunction()->getCallingConv();
2687 NewDeoptIntrinsic
->setCallingConv(CallingConv
);
2688 auto *CurBB
= RI
->getParent();
2689 RI
->eraseFromParent();
2691 SmallVector
<Value
*, 4> CallArgs(DeoptCall
->args());
2693 SmallVector
<OperandBundleDef
, 1> OpBundles
;
2694 DeoptCall
->getOperandBundlesAsDefs(OpBundles
);
2695 auto DeoptAttributes
= DeoptCall
->getAttributes();
2696 DeoptCall
->eraseFromParent();
2697 assert(!OpBundles
.empty() &&
2698 "Expected at least the deopt operand bundle");
2700 IRBuilder
<> Builder(CurBB
);
2701 CallInst
*NewDeoptCall
=
2702 Builder
.CreateCall(NewDeoptIntrinsic
, CallArgs
, OpBundles
);
2703 NewDeoptCall
->setCallingConv(CallingConv
);
2704 NewDeoptCall
->setAttributes(DeoptAttributes
);
2705 if (NewDeoptCall
->getType()->isVoidTy())
2706 Builder
.CreateRetVoid();
2708 Builder
.CreateRet(NewDeoptCall
);
2709 // Since the ret type is changed, remove the incompatible attributes.
2710 NewDeoptCall
->removeRetAttrs(
2711 AttributeFuncs::typeIncompatible(NewDeoptCall
->getType()));
2714 // Leave behind the normal returns so we can merge control flow.
2715 std::swap(Returns
, NormalReturns
);
2719 // Handle any inlined musttail call sites. In order for a new call site to be
2720 // musttail, the source of the clone and the inlined call site must have been
2721 // musttail. Therefore it's safe to return without merging control into the
2723 if (InlinedMustTailCalls
) {
2724 // Check if we need to bitcast the result of any musttail calls.
2725 Type
*NewRetTy
= Caller
->getReturnType();
2726 bool NeedBitCast
= !CB
.use_empty() && CB
.getType() != NewRetTy
;
2728 // Handle the returns preceded by musttail calls separately.
2729 SmallVector
<ReturnInst
*, 8> NormalReturns
;
2730 for (ReturnInst
*RI
: Returns
) {
2731 CallInst
*ReturnedMustTail
=
2732 RI
->getParent()->getTerminatingMustTailCall();
2733 if (!ReturnedMustTail
) {
2734 NormalReturns
.push_back(RI
);
2740 // Delete the old return and any preceding bitcast.
2741 BasicBlock
*CurBB
= RI
->getParent();
2742 auto *OldCast
= dyn_cast_or_null
<BitCastInst
>(RI
->getReturnValue());
2743 RI
->eraseFromParent();
2745 OldCast
->eraseFromParent();
2747 // Insert a new bitcast and return with the right type.
2748 IRBuilder
<> Builder(CurBB
);
2749 Builder
.CreateRet(Builder
.CreateBitCast(ReturnedMustTail
, NewRetTy
));
2752 // Leave behind the normal returns so we can merge control flow.
2753 std::swap(Returns
, NormalReturns
);
2756 // Now that all of the transforms on the inlined code have taken place but
2757 // before we splice the inlined code into the CFG and lose track of which
2758 // blocks were actually inlined, collect the call sites. We only do this if
2759 // call graph updates weren't requested, as those provide value handle based
2760 // tracking of inlined call sites instead. Calls to intrinsics are not
2761 // collected because they are not inlineable.
2762 if (InlinedFunctionInfo
.ContainsCalls
) {
2763 // Otherwise just collect the raw call sites that were inlined.
2764 for (BasicBlock
&NewBB
:
2765 make_range(FirstNewBlock
->getIterator(), Caller
->end()))
2766 for (Instruction
&I
: NewBB
)
2767 if (auto *CB
= dyn_cast
<CallBase
>(&I
))
2768 if (!(CB
->getCalledFunction() &&
2769 CB
->getCalledFunction()->isIntrinsic()))
2770 IFI
.InlinedCallSites
.push_back(CB
);
2773 // If we cloned in _exactly one_ basic block, and if that block ends in a
2774 // return instruction, we splice the body of the inlined callee directly into
2775 // the calling basic block.
2776 if (Returns
.size() == 1 && std::distance(FirstNewBlock
, Caller
->end()) == 1) {
2777 // Move all of the instructions right before the call.
2778 OrigBB
->splice(CB
.getIterator(), &*FirstNewBlock
, FirstNewBlock
->begin(),
2779 FirstNewBlock
->end());
2780 // Remove the cloned basic block.
2781 Caller
->back().eraseFromParent();
2783 // If the call site was an invoke instruction, add a branch to the normal
2785 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(&CB
)) {
2786 BranchInst
*NewBr
= BranchInst::Create(II
->getNormalDest(), &CB
);
2787 NewBr
->setDebugLoc(Returns
[0]->getDebugLoc());
2790 // If the return instruction returned a value, replace uses of the call with
2791 // uses of the returned value.
2792 if (!CB
.use_empty()) {
2793 ReturnInst
*R
= Returns
[0];
2794 if (&CB
== R
->getReturnValue())
2795 CB
.replaceAllUsesWith(PoisonValue::get(CB
.getType()));
2797 CB
.replaceAllUsesWith(R
->getReturnValue());
2799 // Since we are now done with the Call/Invoke, we can delete it.
2800 CB
.eraseFromParent();
2802 // Since we are now done with the return instruction, delete it also.
2803 Returns
[0]->eraseFromParent();
2805 if (MergeAttributes
)
2806 AttributeFuncs::mergeAttributesForInlining(*Caller
, *CalledFunc
);
2808 // We are now done with the inlining.
2809 return InlineResult::success();
2812 // Otherwise, we have the normal case, of more than one block to inline or
2813 // multiple return sites.
2815 // We want to clone the entire callee function into the hole between the
2816 // "starter" and "ender" blocks. How we accomplish this depends on whether
2817 // this is an invoke instruction or a call instruction.
2818 BasicBlock
*AfterCallBB
;
2819 BranchInst
*CreatedBranchToNormalDest
= nullptr;
2820 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(&CB
)) {
2822 // Add an unconditional branch to make this look like the CallInst case...
2823 CreatedBranchToNormalDest
= BranchInst::Create(II
->getNormalDest(), &CB
);
2825 // Split the basic block. This guarantees that no PHI nodes will have to be
2826 // updated due to new incoming edges, and make the invoke case more
2827 // symmetric to the call case.
2829 OrigBB
->splitBasicBlock(CreatedBranchToNormalDest
->getIterator(),
2830 CalledFunc
->getName() + ".exit");
2832 } else { // It's a call
2833 // If this is a call instruction, we need to split the basic block that
2834 // the call lives in.
2836 AfterCallBB
= OrigBB
->splitBasicBlock(CB
.getIterator(),
2837 CalledFunc
->getName() + ".exit");
2840 if (IFI
.CallerBFI
) {
2841 // Copy original BB's block frequency to AfterCallBB
2842 IFI
.CallerBFI
->setBlockFreq(AfterCallBB
,
2843 IFI
.CallerBFI
->getBlockFreq(OrigBB
));
2846 // Change the branch that used to go to AfterCallBB to branch to the first
2847 // basic block of the inlined function.
2849 Instruction
*Br
= OrigBB
->getTerminator();
2850 assert(Br
&& Br
->getOpcode() == Instruction::Br
&&
2851 "splitBasicBlock broken!");
2852 Br
->setOperand(0, &*FirstNewBlock
);
2854 // Now that the function is correct, make it a little bit nicer. In
2855 // particular, move the basic blocks inserted from the end of the function
2856 // into the space made by splitting the source basic block.
2857 Caller
->splice(AfterCallBB
->getIterator(), Caller
, FirstNewBlock
,
2860 // Handle all of the return instructions that we just cloned in, and eliminate
2861 // any users of the original call/invoke instruction.
2862 Type
*RTy
= CalledFunc
->getReturnType();
2864 PHINode
*PHI
= nullptr;
2865 if (Returns
.size() > 1) {
2866 // The PHI node should go at the front of the new basic block to merge all
2867 // possible incoming values.
2868 if (!CB
.use_empty()) {
2869 PHI
= PHINode::Create(RTy
, Returns
.size(), CB
.getName());
2870 PHI
->insertBefore(AfterCallBB
->begin());
2871 // Anything that used the result of the function call should now use the
2872 // PHI node as their operand.
2873 CB
.replaceAllUsesWith(PHI
);
2876 // Loop over all of the return instructions adding entries to the PHI node
2879 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
2880 ReturnInst
*RI
= Returns
[i
];
2881 assert(RI
->getReturnValue()->getType() == PHI
->getType() &&
2882 "Ret value not consistent in function!");
2883 PHI
->addIncoming(RI
->getReturnValue(), RI
->getParent());
2887 // Add a branch to the merge points and remove return instructions.
2889 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
2890 ReturnInst
*RI
= Returns
[i
];
2891 BranchInst
* BI
= BranchInst::Create(AfterCallBB
, RI
);
2892 Loc
= RI
->getDebugLoc();
2893 BI
->setDebugLoc(Loc
);
2894 RI
->eraseFromParent();
2896 // We need to set the debug location to *somewhere* inside the
2897 // inlined function. The line number may be nonsensical, but the
2898 // instruction will at least be associated with the right
2900 if (CreatedBranchToNormalDest
)
2901 CreatedBranchToNormalDest
->setDebugLoc(Loc
);
2902 } else if (!Returns
.empty()) {
2903 // Otherwise, if there is exactly one return value, just replace anything
2904 // using the return value of the call with the computed value.
2905 if (!CB
.use_empty()) {
2906 if (&CB
== Returns
[0]->getReturnValue())
2907 CB
.replaceAllUsesWith(PoisonValue::get(CB
.getType()));
2909 CB
.replaceAllUsesWith(Returns
[0]->getReturnValue());
2912 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2913 BasicBlock
*ReturnBB
= Returns
[0]->getParent();
2914 ReturnBB
->replaceAllUsesWith(AfterCallBB
);
2916 // Splice the code from the return block into the block that it will return
2917 // to, which contains the code that was after the call.
2918 AfterCallBB
->splice(AfterCallBB
->begin(), ReturnBB
);
2920 if (CreatedBranchToNormalDest
)
2921 CreatedBranchToNormalDest
->setDebugLoc(Returns
[0]->getDebugLoc());
2923 // Delete the return instruction now and empty ReturnBB now.
2924 Returns
[0]->eraseFromParent();
2925 ReturnBB
->eraseFromParent();
2926 } else if (!CB
.use_empty()) {
2927 // No returns, but something is using the return value of the call. Just
2929 CB
.replaceAllUsesWith(PoisonValue::get(CB
.getType()));
2932 // Since we are now done with the Call/Invoke, we can delete it.
2933 CB
.eraseFromParent();
2935 // If we inlined any musttail calls and the original return is now
2936 // unreachable, delete it. It can only contain a bitcast and ret.
2937 if (InlinedMustTailCalls
&& pred_empty(AfterCallBB
))
2938 AfterCallBB
->eraseFromParent();
2940 // We should always be able to fold the entry block of the function into the
2941 // single predecessor of the block...
2942 assert(cast
<BranchInst
>(Br
)->isUnconditional() && "splitBasicBlock broken!");
2943 BasicBlock
*CalleeEntry
= cast
<BranchInst
>(Br
)->getSuccessor(0);
2945 // Splice the code entry block into calling block, right before the
2946 // unconditional branch.
2947 CalleeEntry
->replaceAllUsesWith(OrigBB
); // Update PHI nodes
2948 OrigBB
->splice(Br
->getIterator(), CalleeEntry
);
2950 // Remove the unconditional branch.
2951 Br
->eraseFromParent();
2953 // Now we can remove the CalleeEntry block, which is now empty.
2954 CalleeEntry
->eraseFromParent();
2956 // If we inserted a phi node, check to see if it has a single value (e.g. all
2957 // the entries are the same or undef). If so, remove the PHI so it doesn't
2958 // block other optimizations.
2960 AssumptionCache
*AC
=
2961 IFI
.GetAssumptionCache
? &IFI
.GetAssumptionCache(*Caller
) : nullptr;
2962 auto &DL
= Caller
->getParent()->getDataLayout();
2963 if (Value
*V
= simplifyInstruction(PHI
, {DL
, nullptr, nullptr, AC
})) {
2964 PHI
->replaceAllUsesWith(V
);
2965 PHI
->eraseFromParent();
2969 if (MergeAttributes
)
2970 AttributeFuncs::mergeAttributesForInlining(*Caller
, *CalledFunc
);
2972 return InlineResult::success();