Use %ull here.
[llvm/stm8.git] / lib / Transforms / Utils / LowerInvoke.cpp
blob025ae0d61696fb1d7ab59afba662d320c5734f29
1 //===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation is designed for use by code generators which do not yet
11 // support stack unwinding. This pass supports two models of exception handling
12 // lowering, the 'cheap' support and the 'expensive' support.
14 // 'Cheap' exception handling support gives the program the ability to execute
15 // any program which does not "throw an exception", by turning 'invoke'
16 // instructions into calls and by turning 'unwind' instructions into calls to
17 // abort(). If the program does dynamically use the unwind instruction, the
18 // program will print a message then abort.
20 // 'Expensive' exception handling support gives the full exception handling
21 // support to the program at the cost of making the 'invoke' instruction
22 // really expensive. It basically inserts setjmp/longjmp calls to emulate the
23 // exception handling as necessary.
25 // Because the 'expensive' support slows down programs a lot, and EH is only
26 // used for a subset of the programs, it must be specifically enabled by an
27 // option.
29 // Note that after this pass runs the CFG is not entirely accurate (exceptional
30 // control flow edges are not correct anymore) so only very simple things should
31 // be done after the lowerinvoke pass has run (like generation of native code).
32 // This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't
33 // support the invoke instruction yet" lowering pass.
35 //===----------------------------------------------------------------------===//
37 #define DEBUG_TYPE "lowerinvoke"
38 #include "llvm/Transforms/Scalar.h"
39 #include "llvm/Constants.h"
40 #include "llvm/DerivedTypes.h"
41 #include "llvm/Instructions.h"
42 #include "llvm/Intrinsics.h"
43 #include "llvm/LLVMContext.h"
44 #include "llvm/Module.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Target/TargetLowering.h"
52 #include <csetjmp>
53 #include <set>
54 using namespace llvm;
56 STATISTIC(NumInvokes, "Number of invokes replaced");
57 STATISTIC(NumUnwinds, "Number of unwinds replaced");
58 STATISTIC(NumSpilled, "Number of registers live across unwind edges");
60 static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
61 cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code"));
63 namespace {
64 class LowerInvoke : public FunctionPass {
65 // Used for both models.
66 Constant *AbortFn;
68 // Used for expensive EH support.
69 const Type *JBLinkTy;
70 GlobalVariable *JBListHead;
71 Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
72 bool useExpensiveEHSupport;
74 // We peek in TLI to grab the target's jmp_buf size and alignment
75 const TargetLowering *TLI;
77 public:
78 static char ID; // Pass identification, replacement for typeid
79 explicit LowerInvoke(const TargetLowering *tli = NULL,
80 bool useExpensiveEHSupport = ExpensiveEHSupport)
81 : FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport),
82 TLI(tli) {
83 initializeLowerInvokePass(*PassRegistry::getPassRegistry());
85 bool doInitialization(Module &M);
86 bool runOnFunction(Function &F);
88 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
89 // This is a cluster of orthogonal Transforms
90 AU.addPreserved("mem2reg");
91 AU.addPreservedID(LowerSwitchID);
94 private:
95 bool insertCheapEHSupport(Function &F);
96 void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
97 void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
98 AllocaInst *InvokeNum, AllocaInst *StackPtr,
99 SwitchInst *CatchSwitch);
100 bool insertExpensiveEHSupport(Function &F);
104 char LowerInvoke::ID = 0;
105 INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
106 "Lower invoke and unwind, for unwindless code generators",
107 false, false)
109 char &llvm::LowerInvokePassID = LowerInvoke::ID;
111 // Public Interface To the LowerInvoke pass.
112 FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) {
113 return new LowerInvoke(TLI, ExpensiveEHSupport);
115 FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
116 bool useExpensiveEHSupport) {
117 return new LowerInvoke(TLI, useExpensiveEHSupport);
120 // doInitialization - Make sure that there is a prototype for abort in the
121 // current module.
122 bool LowerInvoke::doInitialization(Module &M) {
123 const Type *VoidPtrTy =
124 Type::getInt8PtrTy(M.getContext());
125 if (useExpensiveEHSupport) {
126 // Insert a type for the linked list of jump buffers.
127 unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
128 JBSize = JBSize ? JBSize : 200;
129 const Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
131 { // The type is recursive, so use a type holder.
132 std::vector<const Type*> Elements;
133 Elements.push_back(JmpBufTy);
134 OpaqueType *OT = OpaqueType::get(M.getContext());
135 Elements.push_back(PointerType::getUnqual(OT));
136 PATypeHolder JBLType(StructType::get(M.getContext(), Elements));
137 OT->refineAbstractTypeTo(JBLType.get()); // Complete the cycle.
138 JBLinkTy = JBLType.get();
139 M.addTypeName("llvm.sjljeh.jmpbufty", JBLinkTy);
142 const Type *PtrJBList = PointerType::getUnqual(JBLinkTy);
144 // Now that we've done that, insert the jmpbuf list head global, unless it
145 // already exists.
146 if (!(JBListHead = M.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList))) {
147 JBListHead = new GlobalVariable(M, PtrJBList, false,
148 GlobalValue::LinkOnceAnyLinkage,
149 Constant::getNullValue(PtrJBList),
150 "llvm.sjljeh.jblist");
153 // VisualStudio defines setjmp as _setjmp
154 #if defined(_MSC_VER) && defined(setjmp) && \
155 !defined(setjmp_undefined_for_msvc)
156 # pragma push_macro("setjmp")
157 # undef setjmp
158 # define setjmp_undefined_for_msvc
159 #endif
161 SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp);
163 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
164 // let's return it to _setjmp state
165 # pragma pop_macro("setjmp")
166 # undef setjmp_undefined_for_msvc
167 #endif
169 LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp);
170 StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
171 StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
174 // We need the 'write' and 'abort' functions for both models.
175 AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
176 (Type *)0);
177 return true;
180 bool LowerInvoke::insertCheapEHSupport(Function &F) {
181 bool Changed = false;
182 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
183 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
184 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
185 // Insert a normal call instruction...
186 CallInst *NewCall = CallInst::Create(II->getCalledValue(),
187 CallArgs.begin(), CallArgs.end(),
188 "",II);
189 NewCall->takeName(II);
190 NewCall->setCallingConv(II->getCallingConv());
191 NewCall->setAttributes(II->getAttributes());
192 NewCall->setDebugLoc(II->getDebugLoc());
193 II->replaceAllUsesWith(NewCall);
195 // Insert an unconditional branch to the normal destination.
196 BranchInst::Create(II->getNormalDest(), II);
198 // Remove any PHI node entries from the exception destination.
199 II->getUnwindDest()->removePredecessor(BB);
201 // Remove the invoke instruction now.
202 BB->getInstList().erase(II);
204 ++NumInvokes; Changed = true;
205 } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
206 // Insert a call to abort()
207 CallInst::Create(AbortFn, "", UI)->setTailCall();
209 // Insert a return instruction. This really should be a "barrier", as it
210 // is unreachable.
211 ReturnInst::Create(F.getContext(),
212 F.getReturnType()->isVoidTy() ?
213 0 : Constant::getNullValue(F.getReturnType()), UI);
215 // Remove the unwind instruction now.
216 BB->getInstList().erase(UI);
218 ++NumUnwinds; Changed = true;
220 return Changed;
223 /// rewriteExpensiveInvoke - Insert code and hack the function to replace the
224 /// specified invoke instruction with a call.
225 void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
226 AllocaInst *InvokeNum,
227 AllocaInst *StackPtr,
228 SwitchInst *CatchSwitch) {
229 ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()),
230 InvokeNo);
232 // If the unwind edge has phi nodes, split the edge.
233 if (isa<PHINode>(II->getUnwindDest()->begin())) {
234 SplitCriticalEdge(II, 1, this);
236 // If there are any phi nodes left, they must have a single predecessor.
237 while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) {
238 PN->replaceAllUsesWith(PN->getIncomingValue(0));
239 PN->eraseFromParent();
243 // Insert a store of the invoke num before the invoke and store zero into the
244 // location afterward.
245 new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile
247 // Insert a store of the stack ptr before the invoke, so we can restore it
248 // later in the exception case.
249 CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II);
250 new StoreInst(StackSaveRet, StackPtr, true, II); // volatile
252 BasicBlock::iterator NI = II->getNormalDest()->getFirstNonPHI();
253 // nonvolatile.
254 new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())),
255 InvokeNum, false, NI);
257 Instruction* StackPtrLoad = new LoadInst(StackPtr, "stackptr.restore", true,
258 II->getUnwindDest()->getFirstNonPHI()
260 CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad);
262 // Add a switch case to our unwind block.
263 CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
265 // Insert a normal call instruction.
266 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
267 CallInst *NewCall = CallInst::Create(II->getCalledValue(),
268 CallArgs.begin(), CallArgs.end(), "",
269 II);
270 NewCall->takeName(II);
271 NewCall->setCallingConv(II->getCallingConv());
272 NewCall->setAttributes(II->getAttributes());
273 NewCall->setDebugLoc(II->getDebugLoc());
274 II->replaceAllUsesWith(NewCall);
276 // Replace the invoke with an uncond branch.
277 BranchInst::Create(II->getNormalDest(), NewCall->getParent());
278 II->eraseFromParent();
281 /// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
282 /// we reach blocks we've already seen.
283 static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
284 if (!LiveBBs.insert(BB).second) return; // already been here.
286 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
287 MarkBlocksLiveIn(*PI, LiveBBs);
290 // First thing we need to do is scan the whole function for values that are
291 // live across unwind edges. Each value that is live across an unwind edge
292 // we spill into a stack location, guaranteeing that there is nothing live
293 // across the unwind edge. This process also splits all critical edges
294 // coming out of invoke's.
295 void LowerInvoke::
296 splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
297 // First step, split all critical edges from invoke instructions.
298 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
299 InvokeInst *II = Invokes[i];
300 SplitCriticalEdge(II, 0, this);
301 SplitCriticalEdge(II, 1, this);
302 assert(!isa<PHINode>(II->getNormalDest()) &&
303 !isa<PHINode>(II->getUnwindDest()) &&
304 "critical edge splitting left single entry phi nodes?");
307 Function *F = Invokes.back()->getParent()->getParent();
309 // To avoid having to handle incoming arguments specially, we lower each arg
310 // to a copy instruction in the entry block. This ensures that the argument
311 // value itself cannot be live across the entry block.
312 BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
313 while (isa<AllocaInst>(AfterAllocaInsertPt) &&
314 isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
315 ++AfterAllocaInsertPt;
316 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
317 AI != E; ++AI) {
318 const Type *Ty = AI->getType();
319 // Aggregate types can't be cast, but are legal argument types, so we have
320 // to handle them differently. We use an extract/insert pair as a
321 // lightweight method to achieve the same goal.
322 if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
323 Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
324 Instruction *NI = InsertValueInst::Create(AI, EI, 0);
325 NI->insertAfter(EI);
326 AI->replaceAllUsesWith(NI);
327 // Set the operand of the instructions back to the AllocaInst.
328 EI->setOperand(0, AI);
329 NI->setOperand(0, AI);
330 } else {
331 // This is always a no-op cast because we're casting AI to AI->getType()
332 // so src and destination types are identical. BitCast is the only
333 // possibility.
334 CastInst *NC = new BitCastInst(
335 AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
336 AI->replaceAllUsesWith(NC);
337 // Set the operand of the cast instruction back to the AllocaInst.
338 // Normally it's forbidden to replace a CastInst's operand because it
339 // could cause the opcode to reflect an illegal conversion. However,
340 // we're replacing it here with the same value it was constructed with.
341 // We do this because the above replaceAllUsesWith() clobbered the
342 // operand, but we want this one to remain.
343 NC->setOperand(0, AI);
347 // Finally, scan the code looking for instructions with bad live ranges.
348 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
349 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
350 // Ignore obvious cases we don't have to handle. In particular, most
351 // instructions either have no uses or only have a single use inside the
352 // current block. Ignore them quickly.
353 Instruction *Inst = II;
354 if (Inst->use_empty()) continue;
355 if (Inst->hasOneUse() &&
356 cast<Instruction>(Inst->use_back())->getParent() == BB &&
357 !isa<PHINode>(Inst->use_back())) continue;
359 // If this is an alloca in the entry block, it's not a real register
360 // value.
361 if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
362 if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
363 continue;
365 // Avoid iterator invalidation by copying users to a temporary vector.
366 SmallVector<Instruction*,16> Users;
367 for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
368 UI != E; ++UI) {
369 Instruction *User = cast<Instruction>(*UI);
370 if (User->getParent() != BB || isa<PHINode>(User))
371 Users.push_back(User);
374 // Scan all of the uses and see if the live range is live across an unwind
375 // edge. If we find a use live across an invoke edge, create an alloca
376 // and spill the value.
377 std::set<InvokeInst*> InvokesWithStoreInserted;
379 // Find all of the blocks that this value is live in.
380 std::set<BasicBlock*> LiveBBs;
381 LiveBBs.insert(Inst->getParent());
382 while (!Users.empty()) {
383 Instruction *U = Users.back();
384 Users.pop_back();
386 if (!isa<PHINode>(U)) {
387 MarkBlocksLiveIn(U->getParent(), LiveBBs);
388 } else {
389 // Uses for a PHI node occur in their predecessor block.
390 PHINode *PN = cast<PHINode>(U);
391 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
392 if (PN->getIncomingValue(i) == Inst)
393 MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
397 // Now that we know all of the blocks that this thing is live in, see if
398 // it includes any of the unwind locations.
399 bool NeedsSpill = false;
400 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
401 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
402 if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
403 NeedsSpill = true;
407 // If we decided we need a spill, do it.
408 if (NeedsSpill) {
409 ++NumSpilled;
410 DemoteRegToStack(*Inst, true);
415 bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
416 SmallVector<ReturnInst*,16> Returns;
417 SmallVector<UnwindInst*,16> Unwinds;
418 SmallVector<InvokeInst*,16> Invokes;
420 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
421 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
422 // Remember all return instructions in case we insert an invoke into this
423 // function.
424 Returns.push_back(RI);
425 } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
426 Invokes.push_back(II);
427 } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
428 Unwinds.push_back(UI);
431 if (Unwinds.empty() && Invokes.empty()) return false;
433 NumInvokes += Invokes.size();
434 NumUnwinds += Unwinds.size();
436 // TODO: This is not an optimal way to do this. In particular, this always
437 // inserts setjmp calls into the entries of functions with invoke instructions
438 // even though there are possibly paths through the function that do not
439 // execute any invokes. In particular, for functions with early exits, e.g.
440 // the 'addMove' method in hexxagon, it would be nice to not have to do the
441 // setjmp stuff on the early exit path. This requires a bit of dataflow, but
442 // would not be too hard to do.
444 // If we have an invoke instruction, insert a setjmp that dominates all
445 // invokes. After the setjmp, use a cond branch that goes to the original
446 // code path on zero, and to a designated 'catch' block of nonzero.
447 Value *OldJmpBufPtr = 0;
448 if (!Invokes.empty()) {
449 // First thing we need to do is scan the whole function for values that are
450 // live across unwind edges. Each value that is live across an unwind edge
451 // we spill into a stack location, guaranteeing that there is nothing live
452 // across the unwind edge. This process also splits all critical edges
453 // coming out of invoke's.
454 splitLiveRangesLiveAcrossInvokes(Invokes);
456 BasicBlock *EntryBB = F.begin();
458 // Create an alloca for the incoming jump buffer ptr and the new jump buffer
459 // that needs to be restored on all exits from the function. This is an
460 // alloca because the value needs to be live across invokes.
461 unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
462 AllocaInst *JmpBuf =
463 new AllocaInst(JBLinkTy, 0, Align,
464 "jblink", F.begin()->begin());
466 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
467 ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
468 OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
469 "OldBuf",
470 EntryBB->getTerminator());
472 // Copy the JBListHead to the alloca.
473 Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
474 EntryBB->getTerminator());
475 new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator());
477 // Add the new jumpbuf to the list.
478 new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator());
480 // Create the catch block. The catch block is basically a big switch
481 // statement that goes to all of the invoke catch blocks.
482 BasicBlock *CatchBB =
483 BasicBlock::Create(F.getContext(), "setjmp.catch", &F);
485 // Create an alloca which keeps track of the stack pointer before every
486 // invoke, this allows us to properly restore the stack pointer after
487 // long jumping.
488 AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0,
489 "stackptr", EntryBB->begin());
491 // Create an alloca which keeps track of which invoke is currently
492 // executing. For normal calls it contains zero.
493 AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0,
494 "invokenum",EntryBB->begin());
495 new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
496 InvokeNum, true, EntryBB->getTerminator());
498 // Insert a load in the Catch block, and a switch on its value. By default,
499 // we go to a block that just does an unwind (which is the correct action
500 // for a standard call).
501 BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F);
502 Unwinds.push_back(new UnwindInst(F.getContext(), UnwindBB));
504 Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB);
505 SwitchInst *CatchSwitch =
506 SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB);
508 // Now that things are set up, insert the setjmp call itself.
510 // Split the entry block to insert the conditional branch for the setjmp.
511 BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
512 "setjmp.cont");
514 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
515 Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
516 "TheJmpBuf",
517 EntryBB->getTerminator());
518 JmpBufPtr = new BitCastInst(JmpBufPtr,
519 Type::getInt8PtrTy(F.getContext()),
520 "tmp", EntryBB->getTerminator());
521 Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret",
522 EntryBB->getTerminator());
524 // Compare the return value to zero.
525 Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
526 ICmpInst::ICMP_EQ, SJRet,
527 Constant::getNullValue(SJRet->getType()),
528 "notunwind");
529 // Nuke the uncond branch.
530 EntryBB->getTerminator()->eraseFromParent();
532 // Put in a new condbranch in its place.
533 BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB);
535 // At this point, we are all set up, rewrite each invoke instruction.
536 for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
537 rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch);
540 // We know that there is at least one unwind.
542 // Create three new blocks, the block to load the jmpbuf ptr and compare
543 // against null, the block to do the longjmp, and the error block for if it
544 // is null. Add them at the end of the function because they are not hot.
545 BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(),
546 "dounwind", &F);
547 BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F);
548 BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F);
550 // If this function contains an invoke, restore the old jumpbuf ptr.
551 Value *BufPtr;
552 if (OldJmpBufPtr) {
553 // Before the return, insert a copy from the saved value to the new value.
554 BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler);
555 new StoreInst(BufPtr, JBListHead, UnwindHandler);
556 } else {
557 BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler);
560 // Load the JBList, if it's null, then there was no catch!
561 Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr,
562 Constant::getNullValue(BufPtr->getType()),
563 "notnull");
564 BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler);
566 // Create the block to do the longjmp.
567 // Get a pointer to the jmpbuf and longjmp.
568 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
569 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
570 Idx[0] = GetElementPtrInst::Create(BufPtr, &Idx[0], &Idx[2], "JmpBuf",
571 UnwindBlock);
572 Idx[0] = new BitCastInst(Idx[0],
573 Type::getInt8PtrTy(F.getContext()),
574 "tmp", UnwindBlock);
575 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
576 CallInst::Create(LongJmpFn, &Idx[0], &Idx[2], "", UnwindBlock);
577 new UnreachableInst(F.getContext(), UnwindBlock);
579 // Set up the term block ("throw without a catch").
580 new UnreachableInst(F.getContext(), TermBlock);
582 // Insert a call to abort()
583 CallInst::Create(AbortFn, "",
584 TermBlock->getTerminator())->setTailCall();
587 // Replace all unwinds with a branch to the unwind handler.
588 for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
589 BranchInst::Create(UnwindHandler, Unwinds[i]);
590 Unwinds[i]->eraseFromParent();
593 // Finally, for any returns from this function, if this function contains an
594 // invoke, restore the old jmpbuf pointer to its input value.
595 if (OldJmpBufPtr) {
596 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
597 ReturnInst *R = Returns[i];
599 // Before the return, insert a copy from the saved value to the new value.
600 Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R);
601 new StoreInst(OldBuf, JBListHead, true, R);
605 return true;
608 bool LowerInvoke::runOnFunction(Function &F) {
609 if (useExpensiveEHSupport)
610 return insertExpensiveEHSupport(F);
611 else
612 return insertCheapEHSupport(F);