1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/IntrinsicInst.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/Attributes.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/DebugInfo.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/Support/CallSite.h"
33 bool llvm::InlineFunction(CallInst
*CI
, InlineFunctionInfo
&IFI
) {
34 return InlineFunction(CallSite(CI
), IFI
);
36 bool llvm::InlineFunction(InvokeInst
*II
, InlineFunctionInfo
&IFI
) {
37 return InlineFunction(CallSite(II
), IFI
);
41 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
42 /// an invoke, we have to turn all of the calls that can throw into
43 /// invokes. This function analyze BB to see if there are any calls, and if so,
44 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
45 /// nodes in that block with the values specified in InvokeDestPHIValues.
47 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock
*BB
,
48 BasicBlock
*InvokeDest
,
49 const SmallVectorImpl
<Value
*> &InvokeDestPHIValues
) {
50 for (BasicBlock::iterator BBI
= BB
->begin(), E
= BB
->end(); BBI
!= E
; ) {
51 Instruction
*I
= BBI
++;
53 // We only need to check for function calls: inlined invoke
54 // instructions require no special handling.
55 CallInst
*CI
= dyn_cast
<CallInst
>(I
);
56 if (CI
== 0) continue;
58 // If this call cannot unwind, don't convert it to an invoke.
59 if (CI
->doesNotThrow())
62 // Convert this function call into an invoke instruction.
63 // First, split the basic block.
64 BasicBlock
*Split
= BB
->splitBasicBlock(CI
, CI
->getName()+".noexc");
66 // Next, create the new invoke instruction, inserting it at the end
67 // of the old basic block.
68 ImmutableCallSite
CS(CI
);
69 SmallVector
<Value
*, 8> InvokeArgs(CS
.arg_begin(), CS
.arg_end());
71 InvokeInst::Create(CI
->getCalledValue(), Split
, InvokeDest
,
72 InvokeArgs
.begin(), InvokeArgs
.end(),
73 CI
->getName(), BB
->getTerminator());
74 II
->setCallingConv(CI
->getCallingConv());
75 II
->setAttributes(CI
->getAttributes());
77 // Make sure that anything using the call now uses the invoke! This also
78 // updates the CallGraph if present, because it uses a WeakVH.
79 CI
->replaceAllUsesWith(II
);
81 // Delete the unconditional branch inserted by splitBasicBlock
82 BB
->getInstList().pop_back();
83 Split
->getInstList().pop_front(); // Delete the original call
85 // Update any PHI nodes in the exceptional block to indicate that
86 // there is now a new entry in them.
88 for (BasicBlock::iterator I
= InvokeDest
->begin();
89 isa
<PHINode
>(I
); ++I
, ++i
)
90 cast
<PHINode
>(I
)->addIncoming(InvokeDestPHIValues
[i
], BB
);
92 // This basic block is now complete, the caller will continue scanning the
99 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
100 /// in the body of the inlined function into invokes and turn unwind
101 /// instructions into branches to the invoke unwind dest.
103 /// II is the invoke instruction being inlined. FirstNewBlock is the first
104 /// block of the inlined code (the last block is the end of the function),
105 /// and InlineCodeInfo is information about the code that got inlined.
106 static void HandleInlinedInvoke(InvokeInst
*II
, BasicBlock
*FirstNewBlock
,
107 ClonedCodeInfo
&InlinedCodeInfo
) {
108 BasicBlock
*InvokeDest
= II
->getUnwindDest();
109 SmallVector
<Value
*, 8> InvokeDestPHIValues
;
111 // If there are PHI nodes in the unwind destination block, we need to
112 // keep track of which values came into them from this invoke, then remove
113 // the entry for this block.
114 BasicBlock
*InvokeBlock
= II
->getParent();
115 for (BasicBlock::iterator I
= InvokeDest
->begin(); isa
<PHINode
>(I
); ++I
) {
116 PHINode
*PN
= cast
<PHINode
>(I
);
117 // Save the value to use for this edge.
118 InvokeDestPHIValues
.push_back(PN
->getIncomingValueForBlock(InvokeBlock
));
121 Function
*Caller
= FirstNewBlock
->getParent();
123 // The inlined code is currently at the end of the function, scan from the
124 // start of the inlined code to its end, checking for stuff we need to
125 // rewrite. If the code doesn't have calls or unwinds, we know there is
126 // nothing to rewrite.
127 if (!InlinedCodeInfo
.ContainsCalls
&& !InlinedCodeInfo
.ContainsUnwinds
) {
128 // Now that everything is happy, we have one final detail. The PHI nodes in
129 // the exception destination block still have entries due to the original
130 // invoke instruction. Eliminate these entries (which might even delete the
132 InvokeDest
->removePredecessor(II
->getParent());
136 for (Function::iterator BB
= FirstNewBlock
, E
= Caller
->end(); BB
!= E
; ++BB
){
137 if (InlinedCodeInfo
.ContainsCalls
)
138 HandleCallsInBlockInlinedThroughInvoke(BB
, InvokeDest
,
139 InvokeDestPHIValues
);
141 if (UnwindInst
*UI
= dyn_cast
<UnwindInst
>(BB
->getTerminator())) {
142 // An UnwindInst requires special handling when it gets inlined into an
143 // invoke site. Once this happens, we know that the unwind would cause
144 // a control transfer to the invoke exception destination, so we can
145 // transform it into a direct branch to the exception destination.
146 BranchInst::Create(InvokeDest
, UI
);
148 // Delete the unwind instruction!
149 UI
->eraseFromParent();
151 // Update any PHI nodes in the exceptional block to indicate that
152 // there is now a new entry in them.
154 for (BasicBlock::iterator I
= InvokeDest
->begin();
155 isa
<PHINode
>(I
); ++I
, ++i
) {
156 PHINode
*PN
= cast
<PHINode
>(I
);
157 PN
->addIncoming(InvokeDestPHIValues
[i
], BB
);
162 // Now that everything is happy, we have one final detail. The PHI nodes in
163 // the exception destination block still have entries due to the original
164 // invoke instruction. Eliminate these entries (which might even delete the
166 InvokeDest
->removePredecessor(II
->getParent());
169 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
170 /// into the caller, update the specified callgraph to reflect the changes we
171 /// made. Note that it's possible that not all code was copied over, so only
172 /// some edges of the callgraph may remain.
173 static void UpdateCallGraphAfterInlining(CallSite CS
,
174 Function::iterator FirstNewBlock
,
175 ValueToValueMapTy
&VMap
,
176 InlineFunctionInfo
&IFI
) {
177 CallGraph
&CG
= *IFI
.CG
;
178 const Function
*Caller
= CS
.getInstruction()->getParent()->getParent();
179 const Function
*Callee
= CS
.getCalledFunction();
180 CallGraphNode
*CalleeNode
= CG
[Callee
];
181 CallGraphNode
*CallerNode
= CG
[Caller
];
183 // Since we inlined some uninlined call sites in the callee into the caller,
184 // add edges from the caller to all of the callees of the callee.
185 CallGraphNode::iterator I
= CalleeNode
->begin(), E
= CalleeNode
->end();
187 // Consider the case where CalleeNode == CallerNode.
188 CallGraphNode::CalledFunctionsVector CallCache
;
189 if (CalleeNode
== CallerNode
) {
190 CallCache
.assign(I
, E
);
191 I
= CallCache
.begin();
195 for (; I
!= E
; ++I
) {
196 const Value
*OrigCall
= I
->first
;
198 ValueToValueMapTy::iterator VMI
= VMap
.find(OrigCall
);
199 // Only copy the edge if the call was inlined!
200 if (VMI
== VMap
.end() || VMI
->second
== 0)
203 // If the call was inlined, but then constant folded, there is no edge to
204 // add. Check for this case.
205 Instruction
*NewCall
= dyn_cast
<Instruction
>(VMI
->second
);
206 if (NewCall
== 0) continue;
208 // Remember that this call site got inlined for the client of
210 IFI
.InlinedCalls
.push_back(NewCall
);
212 // It's possible that inlining the callsite will cause it to go from an
213 // indirect to a direct call by resolving a function pointer. If this
214 // happens, set the callee of the new call site to a more precise
215 // destination. This can also happen if the call graph node of the caller
216 // was just unnecessarily imprecise.
217 if (I
->second
->getFunction() == 0)
218 if (Function
*F
= CallSite(NewCall
).getCalledFunction()) {
219 // Indirect call site resolved to direct call.
220 CallerNode
->addCalledFunction(CallSite(NewCall
), CG
[F
]);
225 CallerNode
->addCalledFunction(CallSite(NewCall
), I
->second
);
228 // Update the call graph by deleting the edge from Callee to Caller. We must
229 // do this after the loop above in case Caller and Callee are the same.
230 CallerNode
->removeCallEdgeFor(CS
);
233 /// HandleByValArgument - When inlining a call site that has a byval argument,
234 /// we have to make the implicit memcpy explicit by adding it.
235 static Value
*HandleByValArgument(Value
*Arg
, Instruction
*TheCall
,
236 const Function
*CalledFunc
,
237 InlineFunctionInfo
&IFI
,
238 unsigned ByValAlignment
) {
239 const Type
*AggTy
= cast
<PointerType
>(Arg
->getType())->getElementType();
241 // If the called function is readonly, then it could not mutate the caller's
242 // copy of the byval'd memory. In this case, it is safe to elide the copy and
244 if (CalledFunc
->onlyReadsMemory()) {
245 // If the byval argument has a specified alignment that is greater than the
246 // passed in pointer, then we either have to round up the input pointer or
247 // give up on this transformation.
248 if (ByValAlignment
<= 1) // 0 = unspecified, 1 = no particular alignment.
251 // If the pointer is already known to be sufficiently aligned, or if we can
252 // round it up to a larger alignment, then we don't need a temporary.
253 if (getOrEnforceKnownAlignment(Arg
, ByValAlignment
,
254 IFI
.TD
) >= ByValAlignment
)
257 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
258 // for code quality, but rarely happens and is required for correctness.
261 LLVMContext
&Context
= Arg
->getContext();
263 const Type
*VoidPtrTy
= Type::getInt8PtrTy(Context
);
265 // Create the alloca. If we have TargetData, use nice alignment.
268 Align
= IFI
.TD
->getPrefTypeAlignment(AggTy
);
270 // If the byval had an alignment specified, we *must* use at least that
271 // alignment, as it is required by the byval argument (and uses of the
272 // pointer inside the callee).
273 Align
= std::max(Align
, ByValAlignment
);
275 Function
*Caller
= TheCall
->getParent()->getParent();
277 Value
*NewAlloca
= new AllocaInst(AggTy
, 0, Align
, Arg
->getName(),
278 &*Caller
->begin()->begin());
280 const Type
*Tys
[3] = {VoidPtrTy
, VoidPtrTy
, Type::getInt64Ty(Context
)};
281 Function
*MemCpyFn
= Intrinsic::getDeclaration(Caller
->getParent(),
284 Value
*DestCast
= new BitCastInst(NewAlloca
, VoidPtrTy
, "tmp", TheCall
);
285 Value
*SrcCast
= new BitCastInst(Arg
, VoidPtrTy
, "tmp", TheCall
);
289 Size
= ConstantExpr::getSizeOf(AggTy
);
291 Size
= ConstantInt::get(Type::getInt64Ty(Context
),
292 IFI
.TD
->getTypeStoreSize(AggTy
));
294 // Always generate a memcpy of alignment 1 here because we don't know
295 // the alignment of the src pointer. Other optimizations can infer
297 Value
*CallArgs
[] = {
298 DestCast
, SrcCast
, Size
,
299 ConstantInt::get(Type::getInt32Ty(Context
), 1),
300 ConstantInt::getFalse(Context
) // isVolatile
302 CallInst
*TheMemCpy
=
303 CallInst::Create(MemCpyFn
, CallArgs
, CallArgs
+5, "", TheCall
);
305 // If we have a call graph, update it.
306 if (CallGraph
*CG
= IFI
.CG
) {
307 CallGraphNode
*MemCpyCGN
= CG
->getOrInsertFunction(MemCpyFn
);
308 CallGraphNode
*CallerNode
= (*CG
)[Caller
];
309 CallerNode
->addCalledFunction(TheMemCpy
, MemCpyCGN
);
312 // Uses of the argument in the function should use our new alloca
317 // InlineFunction - This function inlines the called function into the basic
318 // block of the caller. This returns false if it is not possible to inline this
319 // call. The program is still in a well defined state if this occurs though.
321 // Note that this only does one level of inlining. For example, if the
322 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
323 // exists in the instruction stream. Similiarly this will inline a recursive
324 // function by one level.
326 bool llvm::InlineFunction(CallSite CS
, InlineFunctionInfo
&IFI
) {
327 Instruction
*TheCall
= CS
.getInstruction();
328 LLVMContext
&Context
= TheCall
->getContext();
329 assert(TheCall
->getParent() && TheCall
->getParent()->getParent() &&
330 "Instruction not in function!");
332 // If IFI has any state in it, zap it before we fill it in.
335 const Function
*CalledFunc
= CS
.getCalledFunction();
336 if (CalledFunc
== 0 || // Can't inline external function or indirect
337 CalledFunc
->isDeclaration() || // call, or call to a vararg function!
338 CalledFunc
->getFunctionType()->isVarArg()) return false;
340 // If the call to the callee is not a tail call, we must clear the 'tail'
341 // flags on any calls that we inline.
342 bool MustClearTailCallFlags
=
343 !(isa
<CallInst
>(TheCall
) && cast
<CallInst
>(TheCall
)->isTailCall());
345 // If the call to the callee cannot throw, set the 'nounwind' flag on any
346 // calls that we inline.
347 bool MarkNoUnwind
= CS
.doesNotThrow();
349 BasicBlock
*OrigBB
= TheCall
->getParent();
350 Function
*Caller
= OrigBB
->getParent();
352 // GC poses two hazards to inlining, which only occur when the callee has GC:
353 // 1. If the caller has no GC, then the callee's GC must be propagated to the
355 // 2. If the caller has a differing GC, it is invalid to inline.
356 if (CalledFunc
->hasGC()) {
357 if (!Caller
->hasGC())
358 Caller
->setGC(CalledFunc
->getGC());
359 else if (CalledFunc
->getGC() != Caller
->getGC())
363 // Get an iterator to the last basic block in the function, which will have
364 // the new function inlined after it.
366 Function::iterator LastBlock
= &Caller
->back();
368 // Make sure to capture all of the return instructions from the cloned
370 SmallVector
<ReturnInst
*, 8> Returns
;
371 ClonedCodeInfo InlinedFunctionInfo
;
372 Function::iterator FirstNewBlock
;
374 { // Scope to destroy VMap after cloning.
375 ValueToValueMapTy VMap
;
377 assert(CalledFunc
->arg_size() == CS
.arg_size() &&
378 "No varargs calls can be inlined!");
380 // Calculate the vector of arguments to pass into the function cloner, which
381 // matches up the formal to the actual argument values.
382 CallSite::arg_iterator AI
= CS
.arg_begin();
384 for (Function::const_arg_iterator I
= CalledFunc
->arg_begin(),
385 E
= CalledFunc
->arg_end(); I
!= E
; ++I
, ++AI
, ++ArgNo
) {
386 Value
*ActualArg
= *AI
;
388 // When byval arguments actually inlined, we need to make the copy implied
389 // by them explicit. However, we don't do this if the callee is readonly
390 // or readnone, because the copy would be unneeded: the callee doesn't
391 // modify the struct.
392 if (CalledFunc
->paramHasAttr(ArgNo
+1, Attribute::ByVal
)) {
393 ActualArg
= HandleByValArgument(ActualArg
, TheCall
, CalledFunc
, IFI
,
394 CalledFunc
->getParamAlignment(ArgNo
+1));
396 // Calls that we inline may use the new alloca, so we need to clear
397 // their 'tail' flags if HandleByValArgument introduced a new alloca and
398 // the callee has calls.
399 MustClearTailCallFlags
|= ActualArg
!= *AI
;
405 // We want the inliner to prune the code as it copies. We would LOVE to
406 // have no dead or constant instructions leftover after inlining occurs
407 // (which can happen, e.g., because an argument was constant), but we'll be
408 // happy with whatever the cloner can do.
409 CloneAndPruneFunctionInto(Caller
, CalledFunc
, VMap
,
410 /*ModuleLevelChanges=*/false, Returns
, ".i",
411 &InlinedFunctionInfo
, IFI
.TD
, TheCall
);
413 // Remember the first block that is newly cloned over.
414 FirstNewBlock
= LastBlock
; ++FirstNewBlock
;
416 // Update the callgraph if requested.
418 UpdateCallGraphAfterInlining(CS
, FirstNewBlock
, VMap
, IFI
);
421 // If there are any alloca instructions in the block that used to be the entry
422 // block for the callee, move them to the entry block of the caller. First
423 // calculate which instruction they should be inserted before. We insert the
424 // instructions at the end of the current alloca list.
427 BasicBlock::iterator InsertPoint
= Caller
->begin()->begin();
428 for (BasicBlock::iterator I
= FirstNewBlock
->begin(),
429 E
= FirstNewBlock
->end(); I
!= E
; ) {
430 AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
++);
431 if (AI
== 0) continue;
433 // If the alloca is now dead, remove it. This often occurs due to code
435 if (AI
->use_empty()) {
436 AI
->eraseFromParent();
440 if (!isa
<Constant
>(AI
->getArraySize()))
443 // Keep track of the static allocas that we inline into the caller.
444 IFI
.StaticAllocas
.push_back(AI
);
446 // Scan for the block of allocas that we can move over, and move them
448 while (isa
<AllocaInst
>(I
) &&
449 isa
<Constant
>(cast
<AllocaInst
>(I
)->getArraySize())) {
450 IFI
.StaticAllocas
.push_back(cast
<AllocaInst
>(I
));
454 // Transfer all of the allocas over in a block. Using splice means
455 // that the instructions aren't removed from the symbol table, then
457 Caller
->getEntryBlock().getInstList().splice(InsertPoint
,
458 FirstNewBlock
->getInstList(),
463 // If the inlined code contained dynamic alloca instructions, wrap the inlined
464 // code with llvm.stacksave/llvm.stackrestore intrinsics.
465 if (InlinedFunctionInfo
.ContainsDynamicAllocas
) {
466 Module
*M
= Caller
->getParent();
467 // Get the two intrinsics we care about.
468 Function
*StackSave
= Intrinsic::getDeclaration(M
, Intrinsic::stacksave
);
469 Function
*StackRestore
=Intrinsic::getDeclaration(M
,Intrinsic::stackrestore
);
471 // If we are preserving the callgraph, add edges to the stacksave/restore
472 // functions for the calls we insert.
473 CallGraphNode
*StackSaveCGN
= 0, *StackRestoreCGN
= 0, *CallerNode
= 0;
474 if (CallGraph
*CG
= IFI
.CG
) {
475 StackSaveCGN
= CG
->getOrInsertFunction(StackSave
);
476 StackRestoreCGN
= CG
->getOrInsertFunction(StackRestore
);
477 CallerNode
= (*CG
)[Caller
];
480 // Insert the llvm.stacksave.
481 CallInst
*SavedPtr
= CallInst::Create(StackSave
, "savedstack",
482 FirstNewBlock
->begin());
483 if (IFI
.CG
) CallerNode
->addCalledFunction(SavedPtr
, StackSaveCGN
);
485 // Insert a call to llvm.stackrestore before any return instructions in the
487 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
488 CallInst
*CI
= CallInst::Create(StackRestore
, SavedPtr
, "", Returns
[i
]);
489 if (IFI
.CG
) CallerNode
->addCalledFunction(CI
, StackRestoreCGN
);
492 // Count the number of StackRestore calls we insert.
493 unsigned NumStackRestores
= Returns
.size();
495 // If we are inlining an invoke instruction, insert restores before each
496 // unwind. These unwinds will be rewritten into branches later.
497 if (InlinedFunctionInfo
.ContainsUnwinds
&& isa
<InvokeInst
>(TheCall
)) {
498 for (Function::iterator BB
= FirstNewBlock
, E
= Caller
->end();
500 if (UnwindInst
*UI
= dyn_cast
<UnwindInst
>(BB
->getTerminator())) {
501 CallInst
*CI
= CallInst::Create(StackRestore
, SavedPtr
, "", UI
);
502 if (IFI
.CG
) CallerNode
->addCalledFunction(CI
, StackRestoreCGN
);
508 // If we are inlining tail call instruction through a call site that isn't
509 // marked 'tail', we must remove the tail marker for any calls in the inlined
510 // code. Also, calls inlined through a 'nounwind' call site should be marked
512 if (InlinedFunctionInfo
.ContainsCalls
&&
513 (MustClearTailCallFlags
|| MarkNoUnwind
)) {
514 for (Function::iterator BB
= FirstNewBlock
, E
= Caller
->end();
516 for (BasicBlock::iterator I
= BB
->begin(), E
= BB
->end(); I
!= E
; ++I
)
517 if (CallInst
*CI
= dyn_cast
<CallInst
>(I
)) {
518 if (MustClearTailCallFlags
)
519 CI
->setTailCall(false);
521 CI
->setDoesNotThrow();
525 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
526 // instructions are unreachable.
527 if (InlinedFunctionInfo
.ContainsUnwinds
&& MarkNoUnwind
)
528 for (Function::iterator BB
= FirstNewBlock
, E
= Caller
->end();
530 TerminatorInst
*Term
= BB
->getTerminator();
531 if (isa
<UnwindInst
>(Term
)) {
532 new UnreachableInst(Context
, Term
);
533 BB
->getInstList().erase(Term
);
537 // If we are inlining for an invoke instruction, we must make sure to rewrite
538 // any inlined 'unwind' instructions into branches to the invoke exception
539 // destination, and call instructions into invoke instructions.
540 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(TheCall
))
541 HandleInlinedInvoke(II
, FirstNewBlock
, InlinedFunctionInfo
);
543 // If we cloned in _exactly one_ basic block, and if that block ends in a
544 // return instruction, we splice the body of the inlined callee directly into
545 // the calling basic block.
546 if (Returns
.size() == 1 && std::distance(FirstNewBlock
, Caller
->end()) == 1) {
547 // Move all of the instructions right before the call.
548 OrigBB
->getInstList().splice(TheCall
, FirstNewBlock
->getInstList(),
549 FirstNewBlock
->begin(), FirstNewBlock
->end());
550 // Remove the cloned basic block.
551 Caller
->getBasicBlockList().pop_back();
553 // If the call site was an invoke instruction, add a branch to the normal
555 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(TheCall
))
556 BranchInst::Create(II
->getNormalDest(), TheCall
);
558 // If the return instruction returned a value, replace uses of the call with
559 // uses of the returned value.
560 if (!TheCall
->use_empty()) {
561 ReturnInst
*R
= Returns
[0];
562 if (TheCall
== R
->getReturnValue())
563 TheCall
->replaceAllUsesWith(UndefValue::get(TheCall
->getType()));
565 TheCall
->replaceAllUsesWith(R
->getReturnValue());
567 // Since we are now done with the Call/Invoke, we can delete it.
568 TheCall
->eraseFromParent();
570 // Since we are now done with the return instruction, delete it also.
571 Returns
[0]->eraseFromParent();
573 // We are now done with the inlining.
577 // Otherwise, we have the normal case, of more than one block to inline or
578 // multiple return sites.
580 // We want to clone the entire callee function into the hole between the
581 // "starter" and "ender" blocks. How we accomplish this depends on whether
582 // this is an invoke instruction or a call instruction.
583 BasicBlock
*AfterCallBB
;
584 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(TheCall
)) {
586 // Add an unconditional branch to make this look like the CallInst case...
587 BranchInst
*NewBr
= BranchInst::Create(II
->getNormalDest(), TheCall
);
589 // Split the basic block. This guarantees that no PHI nodes will have to be
590 // updated due to new incoming edges, and make the invoke case more
591 // symmetric to the call case.
592 AfterCallBB
= OrigBB
->splitBasicBlock(NewBr
,
593 CalledFunc
->getName()+".exit");
595 } else { // It's a call
596 // If this is a call instruction, we need to split the basic block that
597 // the call lives in.
599 AfterCallBB
= OrigBB
->splitBasicBlock(TheCall
,
600 CalledFunc
->getName()+".exit");
603 // Change the branch that used to go to AfterCallBB to branch to the first
604 // basic block of the inlined function.
606 TerminatorInst
*Br
= OrigBB
->getTerminator();
607 assert(Br
&& Br
->getOpcode() == Instruction::Br
&&
608 "splitBasicBlock broken!");
609 Br
->setOperand(0, FirstNewBlock
);
612 // Now that the function is correct, make it a little bit nicer. In
613 // particular, move the basic blocks inserted from the end of the function
614 // into the space made by splitting the source basic block.
615 Caller
->getBasicBlockList().splice(AfterCallBB
, Caller
->getBasicBlockList(),
616 FirstNewBlock
, Caller
->end());
618 // Handle all of the return instructions that we just cloned in, and eliminate
619 // any users of the original call/invoke instruction.
620 const Type
*RTy
= CalledFunc
->getReturnType();
623 if (Returns
.size() > 1) {
624 // The PHI node should go at the front of the new basic block to merge all
625 // possible incoming values.
626 if (!TheCall
->use_empty()) {
627 PHI
= PHINode::Create(RTy
, Returns
.size(), TheCall
->getName(),
628 AfterCallBB
->begin());
629 // Anything that used the result of the function call should now use the
630 // PHI node as their operand.
631 TheCall
->replaceAllUsesWith(PHI
);
634 // Loop over all of the return instructions adding entries to the PHI node
637 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
638 ReturnInst
*RI
= Returns
[i
];
639 assert(RI
->getReturnValue()->getType() == PHI
->getType() &&
640 "Ret value not consistent in function!");
641 PHI
->addIncoming(RI
->getReturnValue(), RI
->getParent());
646 // Add a branch to the merge points and remove return instructions.
647 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
648 ReturnInst
*RI
= Returns
[i
];
649 BranchInst::Create(AfterCallBB
, RI
);
650 RI
->eraseFromParent();
652 } else if (!Returns
.empty()) {
653 // Otherwise, if there is exactly one return value, just replace anything
654 // using the return value of the call with the computed value.
655 if (!TheCall
->use_empty()) {
656 if (TheCall
== Returns
[0]->getReturnValue())
657 TheCall
->replaceAllUsesWith(UndefValue::get(TheCall
->getType()));
659 TheCall
->replaceAllUsesWith(Returns
[0]->getReturnValue());
662 // Splice the code from the return block into the block that it will return
663 // to, which contains the code that was after the call.
664 BasicBlock
*ReturnBB
= Returns
[0]->getParent();
665 AfterCallBB
->getInstList().splice(AfterCallBB
->begin(),
666 ReturnBB
->getInstList());
668 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
669 ReturnBB
->replaceAllUsesWith(AfterCallBB
);
671 // Delete the return instruction now and empty ReturnBB now.
672 Returns
[0]->eraseFromParent();
673 ReturnBB
->eraseFromParent();
674 } else if (!TheCall
->use_empty()) {
675 // No returns, but something is using the return value of the call. Just
677 TheCall
->replaceAllUsesWith(UndefValue::get(TheCall
->getType()));
680 // Since we are now done with the Call/Invoke, we can delete it.
681 TheCall
->eraseFromParent();
683 // We should always be able to fold the entry block of the function into the
684 // single predecessor of the block...
685 assert(cast
<BranchInst
>(Br
)->isUnconditional() && "splitBasicBlock broken!");
686 BasicBlock
*CalleeEntry
= cast
<BranchInst
>(Br
)->getSuccessor(0);
688 // Splice the code entry block into calling block, right before the
689 // unconditional branch.
690 OrigBB
->getInstList().splice(Br
, CalleeEntry
->getInstList());
691 CalleeEntry
->replaceAllUsesWith(OrigBB
); // Update PHI nodes
693 // Remove the unconditional branch.
694 OrigBB
->getInstList().erase(Br
);
696 // Now we can remove the CalleeEntry block, which is now empty.
697 Caller
->getBasicBlockList().erase(CalleeEntry
);
699 // If we inserted a phi node, check to see if it has a single value (e.g. all
700 // the entries are the same or undef). If so, remove the PHI so it doesn't
701 // block other optimizations.
703 if (Value
*V
= SimplifyInstruction(PHI
, IFI
.TD
)) {
704 PHI
->replaceAllUsesWith(V
);
705 PHI
->eraseFromParent();