1 //===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation is designed for use by code generators which do not yet
11 // support stack unwinding. This pass supports two models of exception handling
12 // lowering, the 'cheap' support and the 'expensive' support.
14 // 'Cheap' exception handling support gives the program the ability to execute
15 // any program which does not "throw an exception", by turning 'invoke'
16 // instructions into calls and by turning 'unwind' instructions into calls to
17 // abort(). If the program does dynamically use the unwind instruction, the
18 // program will print a message then abort.
20 // 'Expensive' exception handling support gives the full exception handling
21 // support to the program at the cost of making the 'invoke' instruction
22 // really expensive. It basically inserts setjmp/longjmp calls to emulate the
23 // exception handling as necessary.
25 // Because the 'expensive' support slows down programs a lot, and EH is only
26 // used for a subset of the programs, it must be specifically enabled by an
29 // Note that after this pass runs the CFG is not entirely accurate (exceptional
30 // control flow edges are not correct anymore) so only very simple things should
31 // be done after the lowerinvoke pass has run (like generation of native code).
32 // This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't
33 // support the invoke instruction yet" lowering pass.
35 //===----------------------------------------------------------------------===//
37 #define DEBUG_TYPE "lowerinvoke"
38 #include "llvm/Transforms/Scalar.h"
39 #include "llvm/Constants.h"
40 #include "llvm/DerivedTypes.h"
41 #include "llvm/Instructions.h"
42 #include "llvm/Intrinsics.h"
43 #include "llvm/LLVMContext.h"
44 #include "llvm/Module.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Target/TargetLowering.h"
56 STATISTIC(NumInvokes
, "Number of invokes replaced");
57 STATISTIC(NumUnwinds
, "Number of unwinds replaced");
58 STATISTIC(NumSpilled
, "Number of registers live across unwind edges");
60 static cl::opt
<bool> ExpensiveEHSupport("enable-correct-eh-support",
61 cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code"));
64 class LowerInvoke
: public FunctionPass
{
65 // Used for both models.
68 // Used for expensive EH support.
70 GlobalVariable
*JBListHead
;
71 Constant
*SetJmpFn
, *LongJmpFn
, *StackSaveFn
, *StackRestoreFn
;
72 bool useExpensiveEHSupport
;
74 // We peek in TLI to grab the target's jmp_buf size and alignment
75 const TargetLowering
*TLI
;
78 static char ID
; // Pass identification, replacement for typeid
79 explicit LowerInvoke(const TargetLowering
*tli
= NULL
,
80 bool useExpensiveEHSupport
= ExpensiveEHSupport
)
81 : FunctionPass(ID
), useExpensiveEHSupport(useExpensiveEHSupport
),
83 initializeLowerInvokePass(*PassRegistry::getPassRegistry());
85 bool doInitialization(Module
&M
);
86 bool runOnFunction(Function
&F
);
88 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
89 // This is a cluster of orthogonal Transforms
90 AU
.addPreserved("mem2reg");
91 AU
.addPreservedID(LowerSwitchID
);
95 bool insertCheapEHSupport(Function
&F
);
96 void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl
<InvokeInst
*>&Invokes
);
97 void rewriteExpensiveInvoke(InvokeInst
*II
, unsigned InvokeNo
,
98 AllocaInst
*InvokeNum
, AllocaInst
*StackPtr
,
99 SwitchInst
*CatchSwitch
);
100 bool insertExpensiveEHSupport(Function
&F
);
104 char LowerInvoke::ID
= 0;
105 INITIALIZE_PASS(LowerInvoke
, "lowerinvoke",
106 "Lower invoke and unwind, for unwindless code generators",
109 char &llvm::LowerInvokePassID
= LowerInvoke::ID
;
111 // Public Interface To the LowerInvoke pass.
112 FunctionPass
*llvm::createLowerInvokePass(const TargetLowering
*TLI
) {
113 return new LowerInvoke(TLI
, ExpensiveEHSupport
);
115 FunctionPass
*llvm::createLowerInvokePass(const TargetLowering
*TLI
,
116 bool useExpensiveEHSupport
) {
117 return new LowerInvoke(TLI
, useExpensiveEHSupport
);
120 // doInitialization - Make sure that there is a prototype for abort in the
122 bool LowerInvoke::doInitialization(Module
&M
) {
123 const Type
*VoidPtrTy
= Type::getInt8PtrTy(M
.getContext());
124 if (useExpensiveEHSupport
) {
125 // Insert a type for the linked list of jump buffers.
126 unsigned JBSize
= TLI
? TLI
->getJumpBufSize() : 0;
127 JBSize
= JBSize
? JBSize
: 200;
128 Type
*JmpBufTy
= ArrayType::get(VoidPtrTy
, JBSize
);
130 JBLinkTy
= StructType::createNamed(M
.getContext(), "llvm.sjljeh.jmpbufty");
131 Type
*Elts
[] = { JmpBufTy
, PointerType::getUnqual(JBLinkTy
) };
132 JBLinkTy
->setBody(Elts
);
134 const Type
*PtrJBList
= PointerType::getUnqual(JBLinkTy
);
136 // Now that we've done that, insert the jmpbuf list head global, unless it
138 if (!(JBListHead
= M
.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList
))) {
139 JBListHead
= new GlobalVariable(M
, PtrJBList
, false,
140 GlobalValue::LinkOnceAnyLinkage
,
141 Constant::getNullValue(PtrJBList
),
142 "llvm.sjljeh.jblist");
145 // VisualStudio defines setjmp as _setjmp
146 #if defined(_MSC_VER) && defined(setjmp) && \
147 !defined(setjmp_undefined_for_msvc)
148 # pragma push_macro("setjmp")
150 # define setjmp_undefined_for_msvc
153 SetJmpFn
= Intrinsic::getDeclaration(&M
, Intrinsic::setjmp
);
155 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
156 // let's return it to _setjmp state
157 # pragma pop_macro("setjmp")
158 # undef setjmp_undefined_for_msvc
161 LongJmpFn
= Intrinsic::getDeclaration(&M
, Intrinsic::longjmp
);
162 StackSaveFn
= Intrinsic::getDeclaration(&M
, Intrinsic::stacksave
);
163 StackRestoreFn
= Intrinsic::getDeclaration(&M
, Intrinsic::stackrestore
);
166 // We need the 'write' and 'abort' functions for both models.
167 AbortFn
= M
.getOrInsertFunction("abort", Type::getVoidTy(M
.getContext()),
172 bool LowerInvoke::insertCheapEHSupport(Function
&F
) {
173 bool Changed
= false;
174 for (Function::iterator BB
= F
.begin(), E
= F
.end(); BB
!= E
; ++BB
)
175 if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(BB
->getTerminator())) {
176 SmallVector
<Value
*,16> CallArgs(II
->op_begin(), II
->op_end() - 3);
177 // Insert a normal call instruction...
178 CallInst
*NewCall
= CallInst::Create(II
->getCalledValue(),
179 CallArgs
.begin(), CallArgs
.end(),
181 NewCall
->takeName(II
);
182 NewCall
->setCallingConv(II
->getCallingConv());
183 NewCall
->setAttributes(II
->getAttributes());
184 NewCall
->setDebugLoc(II
->getDebugLoc());
185 II
->replaceAllUsesWith(NewCall
);
187 // Insert an unconditional branch to the normal destination.
188 BranchInst::Create(II
->getNormalDest(), II
);
190 // Remove any PHI node entries from the exception destination.
191 II
->getUnwindDest()->removePredecessor(BB
);
193 // Remove the invoke instruction now.
194 BB
->getInstList().erase(II
);
196 ++NumInvokes
; Changed
= true;
197 } else if (UnwindInst
*UI
= dyn_cast
<UnwindInst
>(BB
->getTerminator())) {
198 // Insert a call to abort()
199 CallInst::Create(AbortFn
, "", UI
)->setTailCall();
201 // Insert a return instruction. This really should be a "barrier", as it
203 ReturnInst::Create(F
.getContext(),
204 F
.getReturnType()->isVoidTy() ?
205 0 : Constant::getNullValue(F
.getReturnType()), UI
);
207 // Remove the unwind instruction now.
208 BB
->getInstList().erase(UI
);
210 ++NumUnwinds
; Changed
= true;
215 /// rewriteExpensiveInvoke - Insert code and hack the function to replace the
216 /// specified invoke instruction with a call.
217 void LowerInvoke::rewriteExpensiveInvoke(InvokeInst
*II
, unsigned InvokeNo
,
218 AllocaInst
*InvokeNum
,
219 AllocaInst
*StackPtr
,
220 SwitchInst
*CatchSwitch
) {
221 ConstantInt
*InvokeNoC
= ConstantInt::get(Type::getInt32Ty(II
->getContext()),
224 // If the unwind edge has phi nodes, split the edge.
225 if (isa
<PHINode
>(II
->getUnwindDest()->begin())) {
226 SplitCriticalEdge(II
, 1, this);
228 // If there are any phi nodes left, they must have a single predecessor.
229 while (PHINode
*PN
= dyn_cast
<PHINode
>(II
->getUnwindDest()->begin())) {
230 PN
->replaceAllUsesWith(PN
->getIncomingValue(0));
231 PN
->eraseFromParent();
235 // Insert a store of the invoke num before the invoke and store zero into the
236 // location afterward.
237 new StoreInst(InvokeNoC
, InvokeNum
, true, II
); // volatile
239 // Insert a store of the stack ptr before the invoke, so we can restore it
240 // later in the exception case.
241 CallInst
* StackSaveRet
= CallInst::Create(StackSaveFn
, "ssret", II
);
242 new StoreInst(StackSaveRet
, StackPtr
, true, II
); // volatile
244 BasicBlock::iterator NI
= II
->getNormalDest()->getFirstNonPHI();
246 new StoreInst(Constant::getNullValue(Type::getInt32Ty(II
->getContext())),
247 InvokeNum
, false, NI
);
249 Instruction
* StackPtrLoad
= new LoadInst(StackPtr
, "stackptr.restore", true,
250 II
->getUnwindDest()->getFirstNonPHI()
252 CallInst::Create(StackRestoreFn
, StackPtrLoad
, "")->insertAfter(StackPtrLoad
);
254 // Add a switch case to our unwind block.
255 CatchSwitch
->addCase(InvokeNoC
, II
->getUnwindDest());
257 // Insert a normal call instruction.
258 SmallVector
<Value
*,16> CallArgs(II
->op_begin(), II
->op_end() - 3);
259 CallInst
*NewCall
= CallInst::Create(II
->getCalledValue(),
260 CallArgs
.begin(), CallArgs
.end(), "",
262 NewCall
->takeName(II
);
263 NewCall
->setCallingConv(II
->getCallingConv());
264 NewCall
->setAttributes(II
->getAttributes());
265 NewCall
->setDebugLoc(II
->getDebugLoc());
266 II
->replaceAllUsesWith(NewCall
);
268 // Replace the invoke with an uncond branch.
269 BranchInst::Create(II
->getNormalDest(), NewCall
->getParent());
270 II
->eraseFromParent();
273 /// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
274 /// we reach blocks we've already seen.
275 static void MarkBlocksLiveIn(BasicBlock
*BB
, std::set
<BasicBlock
*> &LiveBBs
) {
276 if (!LiveBBs
.insert(BB
).second
) return; // already been here.
278 for (pred_iterator PI
= pred_begin(BB
), E
= pred_end(BB
); PI
!= E
; ++PI
)
279 MarkBlocksLiveIn(*PI
, LiveBBs
);
282 // First thing we need to do is scan the whole function for values that are
283 // live across unwind edges. Each value that is live across an unwind edge
284 // we spill into a stack location, guaranteeing that there is nothing live
285 // across the unwind edge. This process also splits all critical edges
286 // coming out of invoke's.
288 splitLiveRangesLiveAcrossInvokes(SmallVectorImpl
<InvokeInst
*> &Invokes
) {
289 // First step, split all critical edges from invoke instructions.
290 for (unsigned i
= 0, e
= Invokes
.size(); i
!= e
; ++i
) {
291 InvokeInst
*II
= Invokes
[i
];
292 SplitCriticalEdge(II
, 0, this);
293 SplitCriticalEdge(II
, 1, this);
294 assert(!isa
<PHINode
>(II
->getNormalDest()) &&
295 !isa
<PHINode
>(II
->getUnwindDest()) &&
296 "critical edge splitting left single entry phi nodes?");
299 Function
*F
= Invokes
.back()->getParent()->getParent();
301 // To avoid having to handle incoming arguments specially, we lower each arg
302 // to a copy instruction in the entry block. This ensures that the argument
303 // value itself cannot be live across the entry block.
304 BasicBlock::iterator AfterAllocaInsertPt
= F
->begin()->begin();
305 while (isa
<AllocaInst
>(AfterAllocaInsertPt
) &&
306 isa
<ConstantInt
>(cast
<AllocaInst
>(AfterAllocaInsertPt
)->getArraySize()))
307 ++AfterAllocaInsertPt
;
308 for (Function::arg_iterator AI
= F
->arg_begin(), E
= F
->arg_end();
310 const Type
*Ty
= AI
->getType();
311 // Aggregate types can't be cast, but are legal argument types, so we have
312 // to handle them differently. We use an extract/insert pair as a
313 // lightweight method to achieve the same goal.
314 if (isa
<StructType
>(Ty
) || isa
<ArrayType
>(Ty
) || isa
<VectorType
>(Ty
)) {
315 Instruction
*EI
= ExtractValueInst::Create(AI
, 0, "",AfterAllocaInsertPt
);
316 Instruction
*NI
= InsertValueInst::Create(AI
, EI
, 0);
318 AI
->replaceAllUsesWith(NI
);
319 // Set the operand of the instructions back to the AllocaInst.
320 EI
->setOperand(0, AI
);
321 NI
->setOperand(0, AI
);
323 // This is always a no-op cast because we're casting AI to AI->getType()
324 // so src and destination types are identical. BitCast is the only
326 CastInst
*NC
= new BitCastInst(
327 AI
, AI
->getType(), AI
->getName()+".tmp", AfterAllocaInsertPt
);
328 AI
->replaceAllUsesWith(NC
);
329 // Set the operand of the cast instruction back to the AllocaInst.
330 // Normally it's forbidden to replace a CastInst's operand because it
331 // could cause the opcode to reflect an illegal conversion. However,
332 // we're replacing it here with the same value it was constructed with.
333 // We do this because the above replaceAllUsesWith() clobbered the
334 // operand, but we want this one to remain.
335 NC
->setOperand(0, AI
);
339 // Finally, scan the code looking for instructions with bad live ranges.
340 for (Function::iterator BB
= F
->begin(), E
= F
->end(); BB
!= E
; ++BB
)
341 for (BasicBlock::iterator II
= BB
->begin(), E
= BB
->end(); II
!= E
; ++II
) {
342 // Ignore obvious cases we don't have to handle. In particular, most
343 // instructions either have no uses or only have a single use inside the
344 // current block. Ignore them quickly.
345 Instruction
*Inst
= II
;
346 if (Inst
->use_empty()) continue;
347 if (Inst
->hasOneUse() &&
348 cast
<Instruction
>(Inst
->use_back())->getParent() == BB
&&
349 !isa
<PHINode
>(Inst
->use_back())) continue;
351 // If this is an alloca in the entry block, it's not a real register
353 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(Inst
))
354 if (isa
<ConstantInt
>(AI
->getArraySize()) && BB
== F
->begin())
357 // Avoid iterator invalidation by copying users to a temporary vector.
358 SmallVector
<Instruction
*,16> Users
;
359 for (Value::use_iterator UI
= Inst
->use_begin(), E
= Inst
->use_end();
361 Instruction
*User
= cast
<Instruction
>(*UI
);
362 if (User
->getParent() != BB
|| isa
<PHINode
>(User
))
363 Users
.push_back(User
);
366 // Scan all of the uses and see if the live range is live across an unwind
367 // edge. If we find a use live across an invoke edge, create an alloca
368 // and spill the value.
369 std::set
<InvokeInst
*> InvokesWithStoreInserted
;
371 // Find all of the blocks that this value is live in.
372 std::set
<BasicBlock
*> LiveBBs
;
373 LiveBBs
.insert(Inst
->getParent());
374 while (!Users
.empty()) {
375 Instruction
*U
= Users
.back();
378 if (!isa
<PHINode
>(U
)) {
379 MarkBlocksLiveIn(U
->getParent(), LiveBBs
);
381 // Uses for a PHI node occur in their predecessor block.
382 PHINode
*PN
= cast
<PHINode
>(U
);
383 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
384 if (PN
->getIncomingValue(i
) == Inst
)
385 MarkBlocksLiveIn(PN
->getIncomingBlock(i
), LiveBBs
);
389 // Now that we know all of the blocks that this thing is live in, see if
390 // it includes any of the unwind locations.
391 bool NeedsSpill
= false;
392 for (unsigned i
= 0, e
= Invokes
.size(); i
!= e
; ++i
) {
393 BasicBlock
*UnwindBlock
= Invokes
[i
]->getUnwindDest();
394 if (UnwindBlock
!= BB
&& LiveBBs
.count(UnwindBlock
)) {
399 // If we decided we need a spill, do it.
402 DemoteRegToStack(*Inst
, true);
407 bool LowerInvoke::insertExpensiveEHSupport(Function
&F
) {
408 SmallVector
<ReturnInst
*,16> Returns
;
409 SmallVector
<UnwindInst
*,16> Unwinds
;
410 SmallVector
<InvokeInst
*,16> Invokes
;
412 for (Function::iterator BB
= F
.begin(), E
= F
.end(); BB
!= E
; ++BB
)
413 if (ReturnInst
*RI
= dyn_cast
<ReturnInst
>(BB
->getTerminator())) {
414 // Remember all return instructions in case we insert an invoke into this
416 Returns
.push_back(RI
);
417 } else if (InvokeInst
*II
= dyn_cast
<InvokeInst
>(BB
->getTerminator())) {
418 Invokes
.push_back(II
);
419 } else if (UnwindInst
*UI
= dyn_cast
<UnwindInst
>(BB
->getTerminator())) {
420 Unwinds
.push_back(UI
);
423 if (Unwinds
.empty() && Invokes
.empty()) return false;
425 NumInvokes
+= Invokes
.size();
426 NumUnwinds
+= Unwinds
.size();
428 // TODO: This is not an optimal way to do this. In particular, this always
429 // inserts setjmp calls into the entries of functions with invoke instructions
430 // even though there are possibly paths through the function that do not
431 // execute any invokes. In particular, for functions with early exits, e.g.
432 // the 'addMove' method in hexxagon, it would be nice to not have to do the
433 // setjmp stuff on the early exit path. This requires a bit of dataflow, but
434 // would not be too hard to do.
436 // If we have an invoke instruction, insert a setjmp that dominates all
437 // invokes. After the setjmp, use a cond branch that goes to the original
438 // code path on zero, and to a designated 'catch' block of nonzero.
439 Value
*OldJmpBufPtr
= 0;
440 if (!Invokes
.empty()) {
441 // First thing we need to do is scan the whole function for values that are
442 // live across unwind edges. Each value that is live across an unwind edge
443 // we spill into a stack location, guaranteeing that there is nothing live
444 // across the unwind edge. This process also splits all critical edges
445 // coming out of invoke's.
446 splitLiveRangesLiveAcrossInvokes(Invokes
);
448 BasicBlock
*EntryBB
= F
.begin();
450 // Create an alloca for the incoming jump buffer ptr and the new jump buffer
451 // that needs to be restored on all exits from the function. This is an
452 // alloca because the value needs to be live across invokes.
453 unsigned Align
= TLI
? TLI
->getJumpBufAlignment() : 0;
455 new AllocaInst(JBLinkTy
, 0, Align
,
456 "jblink", F
.begin()->begin());
458 Value
*Idx
[] = { Constant::getNullValue(Type::getInt32Ty(F
.getContext())),
459 ConstantInt::get(Type::getInt32Ty(F
.getContext()), 1) };
460 OldJmpBufPtr
= GetElementPtrInst::Create(JmpBuf
, &Idx
[0], &Idx
[2],
462 EntryBB
->getTerminator());
464 // Copy the JBListHead to the alloca.
465 Value
*OldBuf
= new LoadInst(JBListHead
, "oldjmpbufptr", true,
466 EntryBB
->getTerminator());
467 new StoreInst(OldBuf
, OldJmpBufPtr
, true, EntryBB
->getTerminator());
469 // Add the new jumpbuf to the list.
470 new StoreInst(JmpBuf
, JBListHead
, true, EntryBB
->getTerminator());
472 // Create the catch block. The catch block is basically a big switch
473 // statement that goes to all of the invoke catch blocks.
474 BasicBlock
*CatchBB
=
475 BasicBlock::Create(F
.getContext(), "setjmp.catch", &F
);
477 // Create an alloca which keeps track of the stack pointer before every
478 // invoke, this allows us to properly restore the stack pointer after
480 AllocaInst
*StackPtr
= new AllocaInst(Type::getInt8PtrTy(F
.getContext()), 0,
481 "stackptr", EntryBB
->begin());
483 // Create an alloca which keeps track of which invoke is currently
484 // executing. For normal calls it contains zero.
485 AllocaInst
*InvokeNum
= new AllocaInst(Type::getInt32Ty(F
.getContext()), 0,
486 "invokenum",EntryBB
->begin());
487 new StoreInst(ConstantInt::get(Type::getInt32Ty(F
.getContext()), 0),
488 InvokeNum
, true, EntryBB
->getTerminator());
490 // Insert a load in the Catch block, and a switch on its value. By default,
491 // we go to a block that just does an unwind (which is the correct action
492 // for a standard call).
493 BasicBlock
*UnwindBB
= BasicBlock::Create(F
.getContext(), "unwindbb", &F
);
494 Unwinds
.push_back(new UnwindInst(F
.getContext(), UnwindBB
));
496 Value
*CatchLoad
= new LoadInst(InvokeNum
, "invoke.num", true, CatchBB
);
497 SwitchInst
*CatchSwitch
=
498 SwitchInst::Create(CatchLoad
, UnwindBB
, Invokes
.size(), CatchBB
);
500 // Now that things are set up, insert the setjmp call itself.
502 // Split the entry block to insert the conditional branch for the setjmp.
503 BasicBlock
*ContBlock
= EntryBB
->splitBasicBlock(EntryBB
->getTerminator(),
506 Idx
[1] = ConstantInt::get(Type::getInt32Ty(F
.getContext()), 0);
507 Value
*JmpBufPtr
= GetElementPtrInst::Create(JmpBuf
, &Idx
[0], &Idx
[2],
509 EntryBB
->getTerminator());
510 JmpBufPtr
= new BitCastInst(JmpBufPtr
,
511 Type::getInt8PtrTy(F
.getContext()),
512 "tmp", EntryBB
->getTerminator());
513 Value
*SJRet
= CallInst::Create(SetJmpFn
, JmpBufPtr
, "sjret",
514 EntryBB
->getTerminator());
516 // Compare the return value to zero.
517 Value
*IsNormal
= new ICmpInst(EntryBB
->getTerminator(),
518 ICmpInst::ICMP_EQ
, SJRet
,
519 Constant::getNullValue(SJRet
->getType()),
521 // Nuke the uncond branch.
522 EntryBB
->getTerminator()->eraseFromParent();
524 // Put in a new condbranch in its place.
525 BranchInst::Create(ContBlock
, CatchBB
, IsNormal
, EntryBB
);
527 // At this point, we are all set up, rewrite each invoke instruction.
528 for (unsigned i
= 0, e
= Invokes
.size(); i
!= e
; ++i
)
529 rewriteExpensiveInvoke(Invokes
[i
], i
+1, InvokeNum
, StackPtr
, CatchSwitch
);
532 // We know that there is at least one unwind.
534 // Create three new blocks, the block to load the jmpbuf ptr and compare
535 // against null, the block to do the longjmp, and the error block for if it
536 // is null. Add them at the end of the function because they are not hot.
537 BasicBlock
*UnwindHandler
= BasicBlock::Create(F
.getContext(),
539 BasicBlock
*UnwindBlock
= BasicBlock::Create(F
.getContext(), "unwind", &F
);
540 BasicBlock
*TermBlock
= BasicBlock::Create(F
.getContext(), "unwinderror", &F
);
542 // If this function contains an invoke, restore the old jumpbuf ptr.
545 // Before the return, insert a copy from the saved value to the new value.
546 BufPtr
= new LoadInst(OldJmpBufPtr
, "oldjmpbufptr", UnwindHandler
);
547 new StoreInst(BufPtr
, JBListHead
, UnwindHandler
);
549 BufPtr
= new LoadInst(JBListHead
, "ehlist", UnwindHandler
);
552 // Load the JBList, if it's null, then there was no catch!
553 Value
*NotNull
= new ICmpInst(*UnwindHandler
, ICmpInst::ICMP_NE
, BufPtr
,
554 Constant::getNullValue(BufPtr
->getType()),
556 BranchInst::Create(UnwindBlock
, TermBlock
, NotNull
, UnwindHandler
);
558 // Create the block to do the longjmp.
559 // Get a pointer to the jmpbuf and longjmp.
560 Value
*Idx
[] = { Constant::getNullValue(Type::getInt32Ty(F
.getContext())),
561 ConstantInt::get(Type::getInt32Ty(F
.getContext()), 0) };
562 Idx
[0] = GetElementPtrInst::Create(BufPtr
, &Idx
[0], &Idx
[2], "JmpBuf",
564 Idx
[0] = new BitCastInst(Idx
[0],
565 Type::getInt8PtrTy(F
.getContext()),
567 Idx
[1] = ConstantInt::get(Type::getInt32Ty(F
.getContext()), 1);
568 CallInst::Create(LongJmpFn
, &Idx
[0], &Idx
[2], "", UnwindBlock
);
569 new UnreachableInst(F
.getContext(), UnwindBlock
);
571 // Set up the term block ("throw without a catch").
572 new UnreachableInst(F
.getContext(), TermBlock
);
574 // Insert a call to abort()
575 CallInst::Create(AbortFn
, "",
576 TermBlock
->getTerminator())->setTailCall();
579 // Replace all unwinds with a branch to the unwind handler.
580 for (unsigned i
= 0, e
= Unwinds
.size(); i
!= e
; ++i
) {
581 BranchInst::Create(UnwindHandler
, Unwinds
[i
]);
582 Unwinds
[i
]->eraseFromParent();
585 // Finally, for any returns from this function, if this function contains an
586 // invoke, restore the old jmpbuf pointer to its input value.
588 for (unsigned i
= 0, e
= Returns
.size(); i
!= e
; ++i
) {
589 ReturnInst
*R
= Returns
[i
];
591 // Before the return, insert a copy from the saved value to the new value.
592 Value
*OldBuf
= new LoadInst(OldJmpBufPtr
, "oldjmpbufptr", true, R
);
593 new StoreInst(OldBuf
, JBListHead
, true, R
);
600 bool LowerInvoke::runOnFunction(Function
&F
) {
601 if (useExpensiveEHSupport
)
602 return insertExpensiveEHSupport(F
);
604 return insertCheapEHSupport(F
);