1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Stmt nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGDebugInfo.h"
15 #include "CodeGenModule.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/StmtVisitor.h"
18 #include "clang/Basic/PrettyStackTrace.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/InlineAsm.h"
22 #include "llvm/Intrinsics.h"
23 #include "llvm/Target/TargetData.h"
24 using namespace clang
;
25 using namespace CodeGen
;
27 //===----------------------------------------------------------------------===//
29 //===----------------------------------------------------------------------===//
31 void CodeGenFunction::EmitStopPoint(const Stmt
*S
) {
32 if (CGDebugInfo
*DI
= getDebugInfo()) {
33 DI
->setLocation(S
->getLocStart());
34 DI
->EmitStopPoint(CurFn
, Builder
);
38 void CodeGenFunction::EmitStmt(const Stmt
*S
) {
39 assert(S
&& "Null statement?");
41 // Check if we can handle this without bothering to generate an
42 // insert point or debug info.
43 if (EmitSimpleStmt(S
))
46 // Check if we are generating unreachable code.
47 if (!HaveInsertPoint()) {
48 // If so, and the statement doesn't contain a label, then we do not need to
49 // generate actual code. This is safe because (1) the current point is
50 // unreachable, so we don't need to execute the code, and (2) we've already
51 // handled the statements which update internal data structures (like the
52 // local variable map) which could be used by subsequent statements.
53 if (!ContainsLabel(S
)) {
54 // Verify that any decl statements were handled as simple, they may be in
55 // scope of subsequent reachable statements.
56 assert(!isa
<DeclStmt
>(*S
) && "Unexpected DeclStmt!");
60 // Otherwise, make a new block to hold the code.
64 // Generate a stoppoint if we are emitting debug info.
67 switch (S
->getStmtClass()) {
69 // Must be an expression in a stmt context. Emit the value (to get
70 // side-effects) and ignore the result.
71 if (const Expr
*E
= dyn_cast
<Expr
>(S
)) {
72 EmitAnyExpr(E
, 0, false, true);
74 ErrorUnsupported(S
, "statement");
77 case Stmt::IndirectGotoStmtClass
:
78 EmitIndirectGotoStmt(cast
<IndirectGotoStmt
>(*S
)); break;
80 case Stmt::IfStmtClass
: EmitIfStmt(cast
<IfStmt
>(*S
)); break;
81 case Stmt::WhileStmtClass
: EmitWhileStmt(cast
<WhileStmt
>(*S
)); break;
82 case Stmt::DoStmtClass
: EmitDoStmt(cast
<DoStmt
>(*S
)); break;
83 case Stmt::ForStmtClass
: EmitForStmt(cast
<ForStmt
>(*S
)); break;
85 case Stmt::ReturnStmtClass
: EmitReturnStmt(cast
<ReturnStmt
>(*S
)); break;
87 case Stmt::SwitchStmtClass
: EmitSwitchStmt(cast
<SwitchStmt
>(*S
)); break;
88 case Stmt::AsmStmtClass
: EmitAsmStmt(cast
<AsmStmt
>(*S
)); break;
90 case Stmt::ObjCAtTryStmtClass
:
91 EmitObjCAtTryStmt(cast
<ObjCAtTryStmt
>(*S
));
93 case Stmt::ObjCAtCatchStmtClass
:
94 assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
96 case Stmt::ObjCAtFinallyStmtClass
:
97 assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
99 case Stmt::ObjCAtThrowStmtClass
:
100 EmitObjCAtThrowStmt(cast
<ObjCAtThrowStmt
>(*S
));
102 case Stmt::ObjCAtSynchronizedStmtClass
:
103 EmitObjCAtSynchronizedStmt(cast
<ObjCAtSynchronizedStmt
>(*S
));
105 case Stmt::ObjCForCollectionStmtClass
:
106 EmitObjCForCollectionStmt(cast
<ObjCForCollectionStmt
>(*S
));
111 bool CodeGenFunction::EmitSimpleStmt(const Stmt
*S
) {
112 switch (S
->getStmtClass()) {
113 default: return false;
114 case Stmt::NullStmtClass
: break;
115 case Stmt::CompoundStmtClass
: EmitCompoundStmt(cast
<CompoundStmt
>(*S
)); break;
116 case Stmt::DeclStmtClass
: EmitDeclStmt(cast
<DeclStmt
>(*S
)); break;
117 case Stmt::LabelStmtClass
: EmitLabelStmt(cast
<LabelStmt
>(*S
)); break;
118 case Stmt::GotoStmtClass
: EmitGotoStmt(cast
<GotoStmt
>(*S
)); break;
119 case Stmt::BreakStmtClass
: EmitBreakStmt(cast
<BreakStmt
>(*S
)); break;
120 case Stmt::ContinueStmtClass
: EmitContinueStmt(cast
<ContinueStmt
>(*S
)); break;
121 case Stmt::DefaultStmtClass
: EmitDefaultStmt(cast
<DefaultStmt
>(*S
)); break;
122 case Stmt::CaseStmtClass
: EmitCaseStmt(cast
<CaseStmt
>(*S
)); break;
128 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
129 /// this captures the expression result of the last sub-statement and returns it
130 /// (for use by the statement expression extension).
131 RValue
CodeGenFunction::EmitCompoundStmt(const CompoundStmt
&S
, bool GetLast
,
132 llvm::Value
*AggLoc
, bool isAggVol
) {
133 PrettyStackTraceLoc
CrashInfo(getContext().getSourceManager(),S
.getLBracLoc(),
134 "LLVM IR generation of compound statement ('{}')");
136 CGDebugInfo
*DI
= getDebugInfo();
139 DI
->setLocation(S
.getLBracLoc());
140 // FIXME: The llvm backend is currently not ready to deal with region_end
141 // for block scoping. In the presence of always_inline functions it gets so
142 // confused that it doesn't emit any debug info. Just disable this for now.
143 //DI->EmitRegionStart(CurFn, Builder);
146 // Keep track of the current cleanup stack depth.
147 size_t CleanupStackDepth
= CleanupEntries
.size();
148 bool OldDidCallStackSave
= DidCallStackSave
;
149 DidCallStackSave
= false;
151 for (CompoundStmt::const_body_iterator I
= S
.body_begin(),
152 E
= S
.body_end()-GetLast
; I
!= E
; ++I
)
157 DI
->setLocation(S
.getRBracLoc());
159 // FIXME: The llvm backend is currently not ready to deal with region_end
160 // for block scoping. In the presence of always_inline functions it gets so
161 // confused that it doesn't emit any debug info. Just disable this for now.
162 //DI->EmitRegionEnd(CurFn, Builder);
169 // We have to special case labels here. They are statements, but when put
170 // at the end of a statement expression, they yield the value of their
171 // subexpression. Handle this by walking through all labels we encounter,
172 // emitting them before we evaluate the subexpr.
173 const Stmt
*LastStmt
= S
.body_back();
174 while (const LabelStmt
*LS
= dyn_cast
<LabelStmt
>(LastStmt
)) {
176 LastStmt
= LS
->getSubStmt();
181 RV
= EmitAnyExpr(cast
<Expr
>(LastStmt
), AggLoc
);
184 DidCallStackSave
= OldDidCallStackSave
;
186 EmitCleanupBlocks(CleanupStackDepth
);
191 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock
*BB
) {
192 llvm::BranchInst
*BI
= dyn_cast
<llvm::BranchInst
>(BB
->getTerminator());
194 // If there is a cleanup stack, then we it isn't worth trying to
195 // simplify this block (we would need to remove it from the scope map
196 // and cleanup entry).
197 if (!CleanupEntries
.empty())
200 // Can only simplify direct branches.
201 if (!BI
|| !BI
->isUnconditional())
204 BB
->replaceAllUsesWith(BI
->getSuccessor(0));
205 BI
->eraseFromParent();
206 BB
->eraseFromParent();
209 void CodeGenFunction::EmitBlock(llvm::BasicBlock
*BB
, bool IsFinished
) {
210 // Fall out of the current block (if necessary).
213 if (IsFinished
&& BB
->use_empty()) {
218 // If necessary, associate the block with the cleanup stack size.
219 if (!CleanupEntries
.empty()) {
220 // Check if the basic block has already been inserted.
221 BlockScopeMap::iterator I
= BlockScopes
.find(BB
);
222 if (I
!= BlockScopes
.end()) {
223 assert(I
->second
== CleanupEntries
.size() - 1);
225 BlockScopes
[BB
] = CleanupEntries
.size() - 1;
226 CleanupEntries
.back().Blocks
.push_back(BB
);
230 CurFn
->getBasicBlockList().push_back(BB
);
231 Builder
.SetInsertPoint(BB
);
234 void CodeGenFunction::EmitBranch(llvm::BasicBlock
*Target
) {
235 // Emit a branch from the current block to the target one if this
236 // was a real block. If this was just a fall-through block after a
237 // terminator, don't emit it.
238 llvm::BasicBlock
*CurBB
= Builder
.GetInsertBlock();
240 if (!CurBB
|| CurBB
->getTerminator()) {
241 // If there is no insert point or the previous block is already
242 // terminated, don't touch it.
244 // Otherwise, create a fall-through branch.
245 Builder
.CreateBr(Target
);
248 Builder
.ClearInsertionPoint();
251 void CodeGenFunction::EmitLabel(const LabelStmt
&S
) {
252 EmitBlock(getBasicBlockForLabel(&S
));
256 void CodeGenFunction::EmitLabelStmt(const LabelStmt
&S
) {
258 EmitStmt(S
.getSubStmt());
261 void CodeGenFunction::EmitGotoStmt(const GotoStmt
&S
) {
262 // If this code is reachable then emit a stop point (if generating
263 // debug info). We have to do this ourselves because we are on the
264 // "simple" statement path.
265 if (HaveInsertPoint())
268 EmitBranchThroughCleanup(getBasicBlockForLabel(S
.getLabel()));
271 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt
&S
) {
272 // Emit initial switch which will be patched up later by
273 // EmitIndirectSwitches(). We need a default dest, so we use the
274 // current BB, but this is overwritten.
275 llvm::Value
*V
= Builder
.CreatePtrToInt(EmitScalarExpr(S
.getTarget()),
278 llvm::SwitchInst
*I
= Builder
.CreateSwitch(V
, Builder
.GetInsertBlock());
279 IndirectSwitches
.push_back(I
);
281 // Clear the insertion point to indicate we are in unreachable code.
282 Builder
.ClearInsertionPoint();
285 void CodeGenFunction::EmitIfStmt(const IfStmt
&S
) {
286 // C99 6.8.4.1: The first substatement is executed if the expression compares
287 // unequal to 0. The condition must be a scalar type.
289 // If the condition constant folds and can be elided, try to avoid emitting
290 // the condition and the dead arm of the if/else.
291 if (int Cond
= ConstantFoldsToSimpleInteger(S
.getCond())) {
292 // Figure out which block (then or else) is executed.
293 const Stmt
*Executed
= S
.getThen(), *Skipped
= S
.getElse();
294 if (Cond
== -1) // Condition false?
295 std::swap(Executed
, Skipped
);
297 // If the skipped block has no labels in it, just emit the executed block.
298 // This avoids emitting dead code and simplifies the CFG substantially.
299 if (!ContainsLabel(Skipped
)) {
306 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
307 // the conditional branch.
308 llvm::BasicBlock
*ThenBlock
= createBasicBlock("if.then");
309 llvm::BasicBlock
*ContBlock
= createBasicBlock("if.end");
310 llvm::BasicBlock
*ElseBlock
= ContBlock
;
312 ElseBlock
= createBasicBlock("if.else");
313 EmitBranchOnBoolExpr(S
.getCond(), ThenBlock
, ElseBlock
);
315 // Emit the 'then' code.
316 EmitBlock(ThenBlock
);
317 EmitStmt(S
.getThen());
318 EmitBranch(ContBlock
);
320 // Emit the 'else' code if present.
321 if (const Stmt
*Else
= S
.getElse()) {
322 EmitBlock(ElseBlock
);
324 EmitBranch(ContBlock
);
327 // Emit the continuation block for code after the if.
328 EmitBlock(ContBlock
, true);
331 void CodeGenFunction::EmitWhileStmt(const WhileStmt
&S
) {
332 // Emit the header for the loop, insert it, which will create an uncond br to
334 llvm::BasicBlock
*LoopHeader
= createBasicBlock("while.cond");
335 EmitBlock(LoopHeader
);
337 // Create an exit block for when the condition fails, create a block for the
339 llvm::BasicBlock
*ExitBlock
= createBasicBlock("while.end");
340 llvm::BasicBlock
*LoopBody
= createBasicBlock("while.body");
342 // Store the blocks to use for break and continue.
343 BreakContinueStack
.push_back(BreakContinue(ExitBlock
, LoopHeader
));
345 // Evaluate the conditional in the while header. C99 6.8.5.1: The
346 // evaluation of the controlling expression takes place before each
347 // execution of the loop body.
348 llvm::Value
*BoolCondVal
= EvaluateExprAsBool(S
.getCond());
350 // while(1) is common, avoid extra exit blocks. Be sure
351 // to correctly handle break/continue though.
352 bool EmitBoolCondBranch
= true;
353 if (llvm::ConstantInt
*C
= dyn_cast
<llvm::ConstantInt
>(BoolCondVal
))
355 EmitBoolCondBranch
= false;
357 // As long as the condition is true, go to the loop body.
358 if (EmitBoolCondBranch
)
359 Builder
.CreateCondBr(BoolCondVal
, LoopBody
, ExitBlock
);
361 // Emit the loop body.
363 EmitStmt(S
.getBody());
365 BreakContinueStack
.pop_back();
367 // Cycle to the condition.
368 EmitBranch(LoopHeader
);
370 // Emit the exit block.
371 EmitBlock(ExitBlock
, true);
373 // The LoopHeader typically is just a branch if we skipped emitting
374 // a branch, try to erase it.
375 if (!EmitBoolCondBranch
)
376 SimplifyForwardingBlocks(LoopHeader
);
379 void CodeGenFunction::EmitDoStmt(const DoStmt
&S
) {
380 // Emit the body for the loop, insert it, which will create an uncond br to
382 llvm::BasicBlock
*LoopBody
= createBasicBlock("do.body");
383 llvm::BasicBlock
*AfterDo
= createBasicBlock("do.end");
386 llvm::BasicBlock
*DoCond
= createBasicBlock("do.cond");
388 // Store the blocks to use for break and continue.
389 BreakContinueStack
.push_back(BreakContinue(AfterDo
, DoCond
));
391 // Emit the body of the loop into the block.
392 EmitStmt(S
.getBody());
394 BreakContinueStack
.pop_back();
398 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
399 // after each execution of the loop body."
401 // Evaluate the conditional in the while header.
402 // C99 6.8.5p2/p4: The first substatement is executed if the expression
403 // compares unequal to 0. The condition must be a scalar type.
404 llvm::Value
*BoolCondVal
= EvaluateExprAsBool(S
.getCond());
406 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
407 // to correctly handle break/continue though.
408 bool EmitBoolCondBranch
= true;
409 if (llvm::ConstantInt
*C
= dyn_cast
<llvm::ConstantInt
>(BoolCondVal
))
411 EmitBoolCondBranch
= false;
413 // As long as the condition is true, iterate the loop.
414 if (EmitBoolCondBranch
)
415 Builder
.CreateCondBr(BoolCondVal
, LoopBody
, AfterDo
);
417 // Emit the exit block.
420 // The DoCond block typically is just a branch if we skipped
421 // emitting a branch, try to erase it.
422 if (!EmitBoolCondBranch
)
423 SimplifyForwardingBlocks(DoCond
);
426 void CodeGenFunction::EmitForStmt(const ForStmt
&S
) {
427 // FIXME: What do we do if the increment (f.e.) contains a stmt expression,
428 // which contains a continue/break?
430 // Evaluate the first part before the loop.
432 EmitStmt(S
.getInit());
434 // Start the loop with a block that tests the condition.
435 llvm::BasicBlock
*CondBlock
= createBasicBlock("for.cond");
436 llvm::BasicBlock
*AfterFor
= createBasicBlock("for.end");
438 EmitBlock(CondBlock
);
440 // Evaluate the condition if present. If not, treat it as a
441 // non-zero-constant according to 6.8.5.3p2, aka, true.
443 // As long as the condition is true, iterate the loop.
444 llvm::BasicBlock
*ForBody
= createBasicBlock("for.body");
446 // C99 6.8.5p2/p4: The first substatement is executed if the expression
447 // compares unequal to 0. The condition must be a scalar type.
448 EmitBranchOnBoolExpr(S
.getCond(), ForBody
, AfterFor
);
452 // Treat it as a non-zero constant. Don't even create a new block for the
453 // body, just fall into it.
456 // If the for loop doesn't have an increment we can just use the
457 // condition as the continue block.
458 llvm::BasicBlock
*ContinueBlock
;
460 ContinueBlock
= createBasicBlock("for.inc");
462 ContinueBlock
= CondBlock
;
464 // Store the blocks to use for break and continue.
465 BreakContinueStack
.push_back(BreakContinue(AfterFor
, ContinueBlock
));
467 // If the condition is true, execute the body of the for stmt.
468 EmitStmt(S
.getBody());
470 BreakContinueStack
.pop_back();
472 // If there is an increment, emit it next.
474 EmitBlock(ContinueBlock
);
475 EmitStmt(S
.getInc());
478 // Finally, branch back up to the condition for the next iteration.
479 EmitBranch(CondBlock
);
481 // Emit the fall-through block.
482 EmitBlock(AfterFor
, true);
485 void CodeGenFunction::EmitReturnOfRValue(RValue RV
, QualType Ty
) {
487 Builder
.CreateStore(RV
.getScalarVal(), ReturnValue
);
488 } else if (RV
.isAggregate()) {
489 EmitAggregateCopy(ReturnValue
, RV
.getAggregateAddr(), Ty
);
491 StoreComplexToAddr(RV
.getComplexVal(), ReturnValue
, false);
493 EmitBranchThroughCleanup(ReturnBlock
);
496 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
497 /// if the function returns void, or may be missing one if the function returns
498 /// non-void. Fun stuff :).
499 void CodeGenFunction::EmitReturnStmt(const ReturnStmt
&S
) {
500 // Emit the result value, even if unused, to evalute the side effects.
501 const Expr
*RV
= S
.getRetValue();
503 // FIXME: Clean this up by using an LValue for ReturnTemp,
504 // EmitStoreThroughLValue, and EmitAnyExpr.
506 // Make sure not to return anything, but evaluate the expression
510 } else if (RV
== 0) {
511 // Do nothing (return value is left uninitialized)
512 } else if (FnRetTy
->isReferenceType()) {
513 // If this function returns a reference, take the address of the expression
514 // rather than the value.
515 Builder
.CreateStore(EmitLValue(RV
).getAddress(), ReturnValue
);
516 } else if (!hasAggregateLLVMType(RV
->getType())) {
517 Builder
.CreateStore(EmitScalarExpr(RV
), ReturnValue
);
518 } else if (RV
->getType()->isAnyComplexType()) {
519 EmitComplexExprIntoAddr(RV
, ReturnValue
, false);
521 EmitAggExpr(RV
, ReturnValue
, false);
524 EmitBranchThroughCleanup(ReturnBlock
);
527 void CodeGenFunction::EmitDeclStmt(const DeclStmt
&S
) {
528 // As long as debug info is modeled with instructions, we have to ensure we
529 // have a place to insert here and write the stop point here.
530 if (getDebugInfo()) {
535 for (DeclStmt::const_decl_iterator I
= S
.decl_begin(), E
= S
.decl_end();
540 void CodeGenFunction::EmitBreakStmt(const BreakStmt
&S
) {
541 assert(!BreakContinueStack
.empty() && "break stmt not in a loop or switch!");
543 // If this code is reachable then emit a stop point (if generating
544 // debug info). We have to do this ourselves because we are on the
545 // "simple" statement path.
546 if (HaveInsertPoint())
549 llvm::BasicBlock
*Block
= BreakContinueStack
.back().BreakBlock
;
550 EmitBranchThroughCleanup(Block
);
553 void CodeGenFunction::EmitContinueStmt(const ContinueStmt
&S
) {
554 assert(!BreakContinueStack
.empty() && "continue stmt not in a loop!");
556 // If this code is reachable then emit a stop point (if generating
557 // debug info). We have to do this ourselves because we are on the
558 // "simple" statement path.
559 if (HaveInsertPoint())
562 llvm::BasicBlock
*Block
= BreakContinueStack
.back().ContinueBlock
;
563 EmitBranchThroughCleanup(Block
);
566 /// EmitCaseStmtRange - If case statement range is not too big then
567 /// add multiple cases to switch instruction, one for each value within
568 /// the range. If range is too big then emit "if" condition check.
569 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt
&S
) {
570 assert(S
.getRHS() && "Expected RHS value in CaseStmt");
572 llvm::APSInt LHS
= S
.getLHS()->EvaluateAsInt(getContext());
573 llvm::APSInt RHS
= S
.getRHS()->EvaluateAsInt(getContext());
575 // Emit the code for this case. We do this first to make sure it is
576 // properly chained from our predecessor before generating the
577 // switch machinery to enter this block.
578 EmitBlock(createBasicBlock("sw.bb"));
579 llvm::BasicBlock
*CaseDest
= Builder
.GetInsertBlock();
580 EmitStmt(S
.getSubStmt());
582 // If range is empty, do nothing.
583 if (LHS
.isSigned() ? RHS
.slt(LHS
) : RHS
.ult(LHS
))
586 llvm::APInt Range
= RHS
- LHS
;
587 // FIXME: parameters such as this should not be hardcoded.
588 if (Range
.ult(llvm::APInt(Range
.getBitWidth(), 64))) {
589 // Range is small enough to add multiple switch instruction cases.
590 for (unsigned i
= 0, e
= Range
.getZExtValue() + 1; i
!= e
; ++i
) {
591 SwitchInsn
->addCase(VMContext
.getConstantInt(LHS
), CaseDest
);
597 // The range is too big. Emit "if" condition into a new block,
598 // making sure to save and restore the current insertion point.
599 llvm::BasicBlock
*RestoreBB
= Builder
.GetInsertBlock();
601 // Push this test onto the chain of range checks (which terminates
602 // in the default basic block). The switch's default will be changed
603 // to the top of this chain after switch emission is complete.
604 llvm::BasicBlock
*FalseDest
= CaseRangeBlock
;
605 CaseRangeBlock
= createBasicBlock("sw.caserange");
607 CurFn
->getBasicBlockList().push_back(CaseRangeBlock
);
608 Builder
.SetInsertPoint(CaseRangeBlock
);
612 Builder
.CreateSub(SwitchInsn
->getCondition(), VMContext
.getConstantInt(LHS
),
615 Builder
.CreateICmpULE(Diff
, VMContext
.getConstantInt(Range
), "tmp");
616 Builder
.CreateCondBr(Cond
, CaseDest
, FalseDest
);
618 // Restore the appropriate insertion point.
620 Builder
.SetInsertPoint(RestoreBB
);
622 Builder
.ClearInsertionPoint();
625 void CodeGenFunction::EmitCaseStmt(const CaseStmt
&S
) {
627 EmitCaseStmtRange(S
);
631 EmitBlock(createBasicBlock("sw.bb"));
632 llvm::BasicBlock
*CaseDest
= Builder
.GetInsertBlock();
633 llvm::APSInt CaseVal
= S
.getLHS()->EvaluateAsInt(getContext());
634 SwitchInsn
->addCase(VMContext
.getConstantInt(CaseVal
), CaseDest
);
636 // Recursively emitting the statement is acceptable, but is not wonderful for
637 // code where we have many case statements nested together, i.e.:
641 // Handling this recursively will create a new block for each case statement
642 // that falls through to the next case which is IR intensive. It also causes
643 // deep recursion which can run into stack depth limitations. Handle
644 // sequential non-range case statements specially.
645 const CaseStmt
*CurCase
= &S
;
646 const CaseStmt
*NextCase
= dyn_cast
<CaseStmt
>(S
.getSubStmt());
648 // Otherwise, iteratively add consequtive cases to this switch stmt.
649 while (NextCase
&& NextCase
->getRHS() == 0) {
651 CaseVal
= CurCase
->getLHS()->EvaluateAsInt(getContext());
652 SwitchInsn
->addCase(VMContext
.getConstantInt(CaseVal
), CaseDest
);
654 NextCase
= dyn_cast
<CaseStmt
>(CurCase
->getSubStmt());
657 // Normal default recursion for non-cases.
658 EmitStmt(CurCase
->getSubStmt());
661 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt
&S
) {
662 llvm::BasicBlock
*DefaultBlock
= SwitchInsn
->getDefaultDest();
663 assert(DefaultBlock
->empty() &&
664 "EmitDefaultStmt: Default block already defined?");
665 EmitBlock(DefaultBlock
);
666 EmitStmt(S
.getSubStmt());
669 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt
&S
) {
670 llvm::Value
*CondV
= EmitScalarExpr(S
.getCond());
672 // Handle nested switch statements.
673 llvm::SwitchInst
*SavedSwitchInsn
= SwitchInsn
;
674 llvm::BasicBlock
*SavedCRBlock
= CaseRangeBlock
;
676 // Create basic block to hold stuff that comes after switch
677 // statement. We also need to create a default block now so that
678 // explicit case ranges tests can have a place to jump to on
680 llvm::BasicBlock
*NextBlock
= createBasicBlock("sw.epilog");
681 llvm::BasicBlock
*DefaultBlock
= createBasicBlock("sw.default");
682 SwitchInsn
= Builder
.CreateSwitch(CondV
, DefaultBlock
);
683 CaseRangeBlock
= DefaultBlock
;
685 // Clear the insertion point to indicate we are in unreachable code.
686 Builder
.ClearInsertionPoint();
688 // All break statements jump to NextBlock. If BreakContinueStack is non empty
689 // then reuse last ContinueBlock.
690 llvm::BasicBlock
*ContinueBlock
= 0;
691 if (!BreakContinueStack
.empty())
692 ContinueBlock
= BreakContinueStack
.back().ContinueBlock
;
694 // Ensure any vlas created between there and here, are undone
695 BreakContinueStack
.push_back(BreakContinue(NextBlock
, ContinueBlock
));
698 EmitStmt(S
.getBody());
700 BreakContinueStack
.pop_back();
702 // Update the default block in case explicit case range tests have
703 // been chained on top.
704 SwitchInsn
->setSuccessor(0, CaseRangeBlock
);
706 // If a default was never emitted then reroute any jumps to it and
708 if (!DefaultBlock
->getParent()) {
709 DefaultBlock
->replaceAllUsesWith(NextBlock
);
713 // Emit continuation.
714 EmitBlock(NextBlock
, true);
716 SwitchInsn
= SavedSwitchInsn
;
717 CaseRangeBlock
= SavedCRBlock
;
721 SimplifyConstraint(const char *Constraint
, TargetInfo
&Target
,
722 llvm::SmallVectorImpl
<TargetInfo::ConstraintInfo
> *OutCons
=0) {
725 while (*Constraint
) {
726 switch (*Constraint
) {
728 Result
+= Target
.convertConstraint(*Constraint
);
740 "Must pass output names to constraints with a symbolic name");
742 bool result
= Target
.resolveSymbolicName(Constraint
,
744 OutCons
->size(), Index
);
745 assert(result
&& "Could not resolve symbolic name"); result
=result
;
746 Result
+= llvm::utostr(Index
);
757 llvm::Value
* CodeGenFunction::EmitAsmInput(const AsmStmt
&S
,
758 const TargetInfo::ConstraintInfo
&Info
,
759 const Expr
*InputExpr
,
760 std::string
&ConstraintStr
) {
762 if (Info
.allowsRegister() || !Info
.allowsMemory()) {
763 const llvm::Type
*Ty
= ConvertType(InputExpr
->getType());
765 if (Ty
->isSingleValueType()) {
766 Arg
= EmitScalarExpr(InputExpr
);
768 InputExpr
= InputExpr
->IgnoreParenNoopCasts(getContext());
769 LValue Dest
= EmitLValue(InputExpr
);
771 uint64_t Size
= CGM
.getTargetData().getTypeSizeInBits(Ty
);
772 if (Size
<= 64 && llvm::isPowerOf2_64(Size
)) {
773 Ty
= llvm::IntegerType::get(Size
);
774 Ty
= llvm::PointerType::getUnqual(Ty
);
776 Arg
= Builder
.CreateLoad(Builder
.CreateBitCast(Dest
.getAddress(), Ty
));
778 Arg
= Dest
.getAddress();
779 ConstraintStr
+= '*';
783 InputExpr
= InputExpr
->IgnoreParenNoopCasts(getContext());
784 LValue Dest
= EmitLValue(InputExpr
);
785 Arg
= Dest
.getAddress();
786 ConstraintStr
+= '*';
792 void CodeGenFunction::EmitAsmStmt(const AsmStmt
&S
) {
793 // Analyze the asm string to decompose it into its pieces. We know that Sema
794 // has already done this, so it is guaranteed to be successful.
795 llvm::SmallVector
<AsmStmt::AsmStringPiece
, 4> Pieces
;
797 S
.AnalyzeAsmString(Pieces
, getContext(), DiagOffs
);
799 // Assemble the pieces into the final asm string.
800 std::string AsmString
;
801 for (unsigned i
= 0, e
= Pieces
.size(); i
!= e
; ++i
) {
802 if (Pieces
[i
].isString())
803 AsmString
+= Pieces
[i
].getString();
804 else if (Pieces
[i
].getModifier() == '\0')
805 AsmString
+= '$' + llvm::utostr(Pieces
[i
].getOperandNo());
807 AsmString
+= "${" + llvm::utostr(Pieces
[i
].getOperandNo()) + ':' +
808 Pieces
[i
].getModifier() + '}';
811 // Get all the output and input constraints together.
812 llvm::SmallVector
<TargetInfo::ConstraintInfo
, 4> OutputConstraintInfos
;
813 llvm::SmallVector
<TargetInfo::ConstraintInfo
, 4> InputConstraintInfos
;
815 for (unsigned i
= 0, e
= S
.getNumOutputs(); i
!= e
; i
++) {
816 TargetInfo::ConstraintInfo
Info(S
.getOutputConstraint(i
),
818 bool result
= Target
.validateOutputConstraint(Info
);
819 assert(result
&& "Failed to parse output constraint"); result
=result
;
820 OutputConstraintInfos
.push_back(Info
);
823 for (unsigned i
= 0, e
= S
.getNumInputs(); i
!= e
; i
++) {
824 TargetInfo::ConstraintInfo
Info(S
.getInputConstraint(i
),
826 bool result
= Target
.validateInputConstraint(OutputConstraintInfos
.data(),
828 Info
); result
=result
;
829 assert(result
&& "Failed to parse input constraint");
830 InputConstraintInfos
.push_back(Info
);
833 std::string Constraints
;
835 std::vector
<LValue
> ResultRegDests
;
836 std::vector
<QualType
> ResultRegQualTys
;
837 std::vector
<const llvm::Type
*> ResultRegTypes
;
838 std::vector
<const llvm::Type
*> ResultTruncRegTypes
;
839 std::vector
<const llvm::Type
*> ArgTypes
;
840 std::vector
<llvm::Value
*> Args
;
842 // Keep track of inout constraints.
843 std::string InOutConstraints
;
844 std::vector
<llvm::Value
*> InOutArgs
;
845 std::vector
<const llvm::Type
*> InOutArgTypes
;
847 for (unsigned i
= 0, e
= S
.getNumOutputs(); i
!= e
; i
++) {
848 TargetInfo::ConstraintInfo
&Info
= OutputConstraintInfos
[i
];
850 // Simplify the output constraint.
851 std::string
OutputConstraint(S
.getOutputConstraint(i
));
852 OutputConstraint
= SimplifyConstraint(OutputConstraint
.c_str() + 1, Target
);
854 const Expr
*OutExpr
= S
.getOutputExpr(i
);
855 OutExpr
= OutExpr
->IgnoreParenNoopCasts(getContext());
857 LValue Dest
= EmitLValue(OutExpr
);
858 if (!Constraints
.empty())
861 // If this is a register output, then make the inline asm return it
862 // by-value. If this is a memory result, return the value by-reference.
863 if (!Info
.allowsMemory() && !hasAggregateLLVMType(OutExpr
->getType())) {
864 Constraints
+= "=" + OutputConstraint
;
865 ResultRegQualTys
.push_back(OutExpr
->getType());
866 ResultRegDests
.push_back(Dest
);
867 ResultRegTypes
.push_back(ConvertTypeForMem(OutExpr
->getType()));
868 ResultTruncRegTypes
.push_back(ResultRegTypes
.back());
870 // If this output is tied to an input, and if the input is larger, then
871 // we need to set the actual result type of the inline asm node to be the
872 // same as the input type.
873 if (Info
.hasMatchingInput()) {
875 for (InputNo
= 0; InputNo
!= S
.getNumInputs(); ++InputNo
) {
876 TargetInfo::ConstraintInfo
&Input
= InputConstraintInfos
[InputNo
];
877 if (Input
.hasTiedOperand() &&
878 Input
.getTiedOperand() == i
)
881 assert(InputNo
!= S
.getNumInputs() && "Didn't find matching input!");
883 QualType InputTy
= S
.getInputExpr(InputNo
)->getType();
884 QualType OutputTy
= OutExpr
->getType();
886 uint64_t InputSize
= getContext().getTypeSize(InputTy
);
887 if (getContext().getTypeSize(OutputTy
) < InputSize
) {
888 // Form the asm to return the value as a larger integer type.
889 ResultRegTypes
.back() = llvm::IntegerType::get((unsigned)InputSize
);
894 ArgTypes
.push_back(Dest
.getAddress()->getType());
895 Args
.push_back(Dest
.getAddress());
897 Constraints
+= OutputConstraint
;
900 if (Info
.isReadWrite()) {
901 InOutConstraints
+= ',';
903 const Expr
*InputExpr
= S
.getOutputExpr(i
);
904 llvm::Value
*Arg
= EmitAsmInput(S
, Info
, InputExpr
, InOutConstraints
);
906 if (Info
.allowsRegister())
907 InOutConstraints
+= llvm::utostr(i
);
909 InOutConstraints
+= OutputConstraint
;
911 InOutArgTypes
.push_back(Arg
->getType());
912 InOutArgs
.push_back(Arg
);
916 unsigned NumConstraints
= S
.getNumOutputs() + S
.getNumInputs();
918 for (unsigned i
= 0, e
= S
.getNumInputs(); i
!= e
; i
++) {
919 const Expr
*InputExpr
= S
.getInputExpr(i
);
921 TargetInfo::ConstraintInfo
&Info
= InputConstraintInfos
[i
];
923 if (!Constraints
.empty())
926 // Simplify the input constraint.
927 std::string
InputConstraint(S
.getInputConstraint(i
));
928 InputConstraint
= SimplifyConstraint(InputConstraint
.c_str(), Target
,
929 &OutputConstraintInfos
);
931 llvm::Value
*Arg
= EmitAsmInput(S
, Info
, InputExpr
, Constraints
);
933 // If this input argument is tied to a larger output result, extend the
934 // input to be the same size as the output. The LLVM backend wants to see
935 // the input and output of a matching constraint be the same size. Note
936 // that GCC does not define what the top bits are here. We use zext because
937 // that is usually cheaper, but LLVM IR should really get an anyext someday.
938 if (Info
.hasTiedOperand()) {
939 unsigned Output
= Info
.getTiedOperand();
940 QualType OutputTy
= S
.getOutputExpr(Output
)->getType();
941 QualType InputTy
= InputExpr
->getType();
943 if (getContext().getTypeSize(OutputTy
) >
944 getContext().getTypeSize(InputTy
)) {
945 // Use ptrtoint as appropriate so that we can do our extension.
946 if (isa
<llvm::PointerType
>(Arg
->getType()))
947 Arg
= Builder
.CreatePtrToInt(Arg
,
948 llvm::IntegerType::get(LLVMPointerWidth
));
949 unsigned OutputSize
= (unsigned)getContext().getTypeSize(OutputTy
);
950 Arg
= Builder
.CreateZExt(Arg
, llvm::IntegerType::get(OutputSize
));
955 ArgTypes
.push_back(Arg
->getType());
957 Constraints
+= InputConstraint
;
960 // Append the "input" part of inout constraints last.
961 for (unsigned i
= 0, e
= InOutArgs
.size(); i
!= e
; i
++) {
962 ArgTypes
.push_back(InOutArgTypes
[i
]);
963 Args
.push_back(InOutArgs
[i
]);
965 Constraints
+= InOutConstraints
;
968 for (unsigned i
= 0, e
= S
.getNumClobbers(); i
!= e
; i
++) {
969 std::string
Clobber(S
.getClobber(i
)->getStrData(),
970 S
.getClobber(i
)->getByteLength());
972 Clobber
= Target
.getNormalizedGCCRegisterName(Clobber
.c_str());
974 if (i
!= 0 || NumConstraints
!= 0)
978 Constraints
+= Clobber
;
982 // Add machine specific clobbers
983 std::string MachineClobbers
= Target
.getClobbers();
984 if (!MachineClobbers
.empty()) {
985 if (!Constraints
.empty())
987 Constraints
+= MachineClobbers
;
990 const llvm::Type
*ResultType
;
991 if (ResultRegTypes
.empty())
992 ResultType
= llvm::Type::VoidTy
;
993 else if (ResultRegTypes
.size() == 1)
994 ResultType
= ResultRegTypes
[0];
996 ResultType
= llvm::StructType::get(ResultRegTypes
);
998 const llvm::FunctionType
*FTy
=
999 llvm::FunctionType::get(ResultType
, ArgTypes
, false);
1001 llvm::InlineAsm
*IA
=
1002 llvm::InlineAsm::get(FTy
, AsmString
, Constraints
,
1003 S
.isVolatile() || S
.getNumOutputs() == 0);
1004 llvm::CallInst
*Result
= Builder
.CreateCall(IA
, Args
.begin(), Args
.end());
1005 Result
->addAttribute(~0, llvm::Attribute::NoUnwind
);
1008 // Extract all of the register value results from the asm.
1009 std::vector
<llvm::Value
*> RegResults
;
1010 if (ResultRegTypes
.size() == 1) {
1011 RegResults
.push_back(Result
);
1013 for (unsigned i
= 0, e
= ResultRegTypes
.size(); i
!= e
; ++i
) {
1014 llvm::Value
*Tmp
= Builder
.CreateExtractValue(Result
, i
, "asmresult");
1015 RegResults
.push_back(Tmp
);
1019 for (unsigned i
= 0, e
= RegResults
.size(); i
!= e
; ++i
) {
1020 llvm::Value
*Tmp
= RegResults
[i
];
1022 // If the result type of the LLVM IR asm doesn't match the result type of
1023 // the expression, do the conversion.
1024 if (ResultRegTypes
[i
] != ResultTruncRegTypes
[i
]) {
1025 const llvm::Type
*TruncTy
= ResultTruncRegTypes
[i
];
1026 // Truncate the integer result to the right size, note that
1027 // ResultTruncRegTypes can be a pointer.
1028 uint64_t ResSize
= CGM
.getTargetData().getTypeSizeInBits(TruncTy
);
1029 Tmp
= Builder
.CreateTrunc(Tmp
, llvm::IntegerType::get((unsigned)ResSize
));
1031 if (Tmp
->getType() != TruncTy
) {
1032 assert(isa
<llvm::PointerType
>(TruncTy
));
1033 Tmp
= Builder
.CreateIntToPtr(Tmp
, TruncTy
);
1037 EmitStoreThroughLValue(RValue::get(Tmp
), ResultRegDests
[i
],
1038 ResultRegQualTys
[i
]);