[DFAJumpThreading] Remove incoming StartBlock from all phis when unfolding select...
[llvm-project.git] / clang / lib / CodeGen / CGStmt.cpp
blobc719df1bfa0503665da79560d232d9255abad894
1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/Assumptions.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/MDBuilder.h"
36 #include "llvm/Support/SaveAndRestore.h"
37 #include <optional>
39 using namespace clang;
40 using namespace CodeGen;
42 //===----------------------------------------------------------------------===//
43 // Statement Emission
44 //===----------------------------------------------------------------------===//
46 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
47 if (CGDebugInfo *DI = getDebugInfo()) {
48 SourceLocation Loc;
49 Loc = S->getBeginLoc();
50 DI->EmitLocation(Builder, Loc);
52 LastStopPoint = Loc;
56 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
57 assert(S && "Null statement?");
58 PGO.setCurrentStmt(S);
60 // These statements have their own debug info handling.
61 if (EmitSimpleStmt(S, Attrs))
62 return;
64 // Check if we are generating unreachable code.
65 if (!HaveInsertPoint()) {
66 // If so, and the statement doesn't contain a label, then we do not need to
67 // generate actual code. This is safe because (1) the current point is
68 // unreachable, so we don't need to execute the code, and (2) we've already
69 // handled the statements which update internal data structures (like the
70 // local variable map) which could be used by subsequent statements.
71 if (!ContainsLabel(S)) {
72 // Verify that any decl statements were handled as simple, they may be in
73 // scope of subsequent reachable statements.
74 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
75 return;
78 // Otherwise, make a new block to hold the code.
79 EnsureInsertPoint();
82 // Generate a stoppoint if we are emitting debug info.
83 EmitStopPoint(S);
85 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
86 // enabled.
87 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
88 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
89 EmitSimpleOMPExecutableDirective(*D);
90 return;
94 switch (S->getStmtClass()) {
95 case Stmt::NoStmtClass:
96 case Stmt::CXXCatchStmtClass:
97 case Stmt::SEHExceptStmtClass:
98 case Stmt::SEHFinallyStmtClass:
99 case Stmt::MSDependentExistsStmtClass:
100 llvm_unreachable("invalid statement class to emit generically");
101 case Stmt::NullStmtClass:
102 case Stmt::CompoundStmtClass:
103 case Stmt::DeclStmtClass:
104 case Stmt::LabelStmtClass:
105 case Stmt::AttributedStmtClass:
106 case Stmt::GotoStmtClass:
107 case Stmt::BreakStmtClass:
108 case Stmt::ContinueStmtClass:
109 case Stmt::DefaultStmtClass:
110 case Stmt::CaseStmtClass:
111 case Stmt::SEHLeaveStmtClass:
112 llvm_unreachable("should have emitted these statements as simple");
114 #define STMT(Type, Base)
115 #define ABSTRACT_STMT(Op)
116 #define EXPR(Type, Base) \
117 case Stmt::Type##Class:
118 #include "clang/AST/StmtNodes.inc"
120 // Remember the block we came in on.
121 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
122 assert(incoming && "expression emission must have an insertion point");
124 EmitIgnoredExpr(cast<Expr>(S));
126 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
127 assert(outgoing && "expression emission cleared block!");
129 // The expression emitters assume (reasonably!) that the insertion
130 // point is always set. To maintain that, the call-emission code
131 // for noreturn functions has to enter a new block with no
132 // predecessors. We want to kill that block and mark the current
133 // insertion point unreachable in the common case of a call like
134 // "exit();". Since expression emission doesn't otherwise create
135 // blocks with no predecessors, we can just test for that.
136 // However, we must be careful not to do this to our incoming
137 // block, because *statement* emission does sometimes create
138 // reachable blocks which will have no predecessors until later in
139 // the function. This occurs with, e.g., labels that are not
140 // reachable by fallthrough.
141 if (incoming != outgoing && outgoing->use_empty()) {
142 outgoing->eraseFromParent();
143 Builder.ClearInsertionPoint();
145 break;
148 case Stmt::IndirectGotoStmtClass:
149 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
151 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
152 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
153 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
154 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
156 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
158 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
159 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
160 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
161 case Stmt::CoroutineBodyStmtClass:
162 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
163 break;
164 case Stmt::CoreturnStmtClass:
165 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
166 break;
167 case Stmt::CapturedStmtClass: {
168 const CapturedStmt *CS = cast<CapturedStmt>(S);
169 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
171 break;
172 case Stmt::ObjCAtTryStmtClass:
173 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
174 break;
175 case Stmt::ObjCAtCatchStmtClass:
176 llvm_unreachable(
177 "@catch statements should be handled by EmitObjCAtTryStmt");
178 case Stmt::ObjCAtFinallyStmtClass:
179 llvm_unreachable(
180 "@finally statements should be handled by EmitObjCAtTryStmt");
181 case Stmt::ObjCAtThrowStmtClass:
182 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
183 break;
184 case Stmt::ObjCAtSynchronizedStmtClass:
185 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
186 break;
187 case Stmt::ObjCForCollectionStmtClass:
188 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
189 break;
190 case Stmt::ObjCAutoreleasePoolStmtClass:
191 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
192 break;
194 case Stmt::CXXTryStmtClass:
195 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
196 break;
197 case Stmt::CXXForRangeStmtClass:
198 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
199 break;
200 case Stmt::SEHTryStmtClass:
201 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
202 break;
203 case Stmt::OMPMetaDirectiveClass:
204 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
205 break;
206 case Stmt::OMPCanonicalLoopClass:
207 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
208 break;
209 case Stmt::OMPParallelDirectiveClass:
210 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
211 break;
212 case Stmt::OMPSimdDirectiveClass:
213 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
214 break;
215 case Stmt::OMPTileDirectiveClass:
216 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
217 break;
218 case Stmt::OMPUnrollDirectiveClass:
219 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
220 break;
221 case Stmt::OMPForDirectiveClass:
222 EmitOMPForDirective(cast<OMPForDirective>(*S));
223 break;
224 case Stmt::OMPForSimdDirectiveClass:
225 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
226 break;
227 case Stmt::OMPSectionsDirectiveClass:
228 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
229 break;
230 case Stmt::OMPSectionDirectiveClass:
231 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
232 break;
233 case Stmt::OMPSingleDirectiveClass:
234 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
235 break;
236 case Stmt::OMPMasterDirectiveClass:
237 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
238 break;
239 case Stmt::OMPCriticalDirectiveClass:
240 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
241 break;
242 case Stmt::OMPParallelForDirectiveClass:
243 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
244 break;
245 case Stmt::OMPParallelForSimdDirectiveClass:
246 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
247 break;
248 case Stmt::OMPParallelMasterDirectiveClass:
249 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
250 break;
251 case Stmt::OMPParallelSectionsDirectiveClass:
252 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
253 break;
254 case Stmt::OMPTaskDirectiveClass:
255 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
256 break;
257 case Stmt::OMPTaskyieldDirectiveClass:
258 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
259 break;
260 case Stmt::OMPErrorDirectiveClass:
261 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
262 break;
263 case Stmt::OMPBarrierDirectiveClass:
264 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
265 break;
266 case Stmt::OMPTaskwaitDirectiveClass:
267 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
268 break;
269 case Stmt::OMPTaskgroupDirectiveClass:
270 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
271 break;
272 case Stmt::OMPFlushDirectiveClass:
273 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
274 break;
275 case Stmt::OMPDepobjDirectiveClass:
276 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
277 break;
278 case Stmt::OMPScanDirectiveClass:
279 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
280 break;
281 case Stmt::OMPOrderedDirectiveClass:
282 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
283 break;
284 case Stmt::OMPAtomicDirectiveClass:
285 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
286 break;
287 case Stmt::OMPTargetDirectiveClass:
288 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
289 break;
290 case Stmt::OMPTeamsDirectiveClass:
291 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
292 break;
293 case Stmt::OMPCancellationPointDirectiveClass:
294 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
295 break;
296 case Stmt::OMPCancelDirectiveClass:
297 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
298 break;
299 case Stmt::OMPTargetDataDirectiveClass:
300 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
301 break;
302 case Stmt::OMPTargetEnterDataDirectiveClass:
303 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
304 break;
305 case Stmt::OMPTargetExitDataDirectiveClass:
306 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
307 break;
308 case Stmt::OMPTargetParallelDirectiveClass:
309 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
310 break;
311 case Stmt::OMPTargetParallelForDirectiveClass:
312 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
313 break;
314 case Stmt::OMPTaskLoopDirectiveClass:
315 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
316 break;
317 case Stmt::OMPTaskLoopSimdDirectiveClass:
318 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
319 break;
320 case Stmt::OMPMasterTaskLoopDirectiveClass:
321 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
322 break;
323 case Stmt::OMPMaskedTaskLoopDirectiveClass:
324 llvm_unreachable("masked taskloop directive not supported yet.");
325 break;
326 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
327 EmitOMPMasterTaskLoopSimdDirective(
328 cast<OMPMasterTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
331 llvm_unreachable("masked taskloop simd directive not supported yet.");
332 break;
333 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
334 EmitOMPParallelMasterTaskLoopDirective(
335 cast<OMPParallelMasterTaskLoopDirective>(*S));
336 break;
337 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
338 llvm_unreachable("parallel masked taskloop directive not supported yet.");
339 break;
340 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
341 EmitOMPParallelMasterTaskLoopSimdDirective(
342 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
343 break;
344 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
345 llvm_unreachable(
346 "parallel masked taskloop simd directive not supported yet.");
347 break;
348 case Stmt::OMPDistributeDirectiveClass:
349 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
350 break;
351 case Stmt::OMPTargetUpdateDirectiveClass:
352 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
353 break;
354 case Stmt::OMPDistributeParallelForDirectiveClass:
355 EmitOMPDistributeParallelForDirective(
356 cast<OMPDistributeParallelForDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
359 EmitOMPDistributeParallelForSimdDirective(
360 cast<OMPDistributeParallelForSimdDirective>(*S));
361 break;
362 case Stmt::OMPDistributeSimdDirectiveClass:
363 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
364 break;
365 case Stmt::OMPTargetParallelForSimdDirectiveClass:
366 EmitOMPTargetParallelForSimdDirective(
367 cast<OMPTargetParallelForSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetSimdDirectiveClass:
370 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
371 break;
372 case Stmt::OMPTeamsDistributeDirectiveClass:
373 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
374 break;
375 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
376 EmitOMPTeamsDistributeSimdDirective(
377 cast<OMPTeamsDistributeSimdDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
380 EmitOMPTeamsDistributeParallelForSimdDirective(
381 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
384 EmitOMPTeamsDistributeParallelForDirective(
385 cast<OMPTeamsDistributeParallelForDirective>(*S));
386 break;
387 case Stmt::OMPTargetTeamsDirectiveClass:
388 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
389 break;
390 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
391 EmitOMPTargetTeamsDistributeDirective(
392 cast<OMPTargetTeamsDistributeDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
395 EmitOMPTargetTeamsDistributeParallelForDirective(
396 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
399 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
400 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
403 EmitOMPTargetTeamsDistributeSimdDirective(
404 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
405 break;
406 case Stmt::OMPInteropDirectiveClass:
407 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
408 break;
409 case Stmt::OMPDispatchDirectiveClass:
410 llvm_unreachable("Dispatch directive not supported yet.");
411 break;
412 case Stmt::OMPScopeDirectiveClass:
413 llvm_unreachable("scope not supported with FE outlining");
414 case Stmt::OMPMaskedDirectiveClass:
415 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
416 break;
417 case Stmt::OMPGenericLoopDirectiveClass:
418 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
419 break;
420 case Stmt::OMPTeamsGenericLoopDirectiveClass:
421 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
422 break;
423 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
424 EmitOMPTargetTeamsGenericLoopDirective(
425 cast<OMPTargetTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPParallelGenericLoopDirectiveClass:
428 EmitOMPParallelGenericLoopDirective(
429 cast<OMPParallelGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
432 EmitOMPTargetParallelGenericLoopDirective(
433 cast<OMPTargetParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPParallelMaskedDirectiveClass:
436 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
437 break;
441 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
442 ArrayRef<const Attr *> Attrs) {
443 switch (S->getStmtClass()) {
444 default:
445 return false;
446 case Stmt::NullStmtClass:
447 break;
448 case Stmt::CompoundStmtClass:
449 EmitCompoundStmt(cast<CompoundStmt>(*S));
450 break;
451 case Stmt::DeclStmtClass:
452 EmitDeclStmt(cast<DeclStmt>(*S));
453 break;
454 case Stmt::LabelStmtClass:
455 EmitLabelStmt(cast<LabelStmt>(*S));
456 break;
457 case Stmt::AttributedStmtClass:
458 EmitAttributedStmt(cast<AttributedStmt>(*S));
459 break;
460 case Stmt::GotoStmtClass:
461 EmitGotoStmt(cast<GotoStmt>(*S));
462 break;
463 case Stmt::BreakStmtClass:
464 EmitBreakStmt(cast<BreakStmt>(*S));
465 break;
466 case Stmt::ContinueStmtClass:
467 EmitContinueStmt(cast<ContinueStmt>(*S));
468 break;
469 case Stmt::DefaultStmtClass:
470 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
471 break;
472 case Stmt::CaseStmtClass:
473 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
474 break;
475 case Stmt::SEHLeaveStmtClass:
476 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
477 break;
479 return true;
482 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
483 /// this captures the expression result of the last sub-statement and returns it
484 /// (for use by the statement expression extension).
485 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
486 AggValueSlot AggSlot) {
487 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
488 "LLVM IR generation of compound statement ('{}')");
490 // Keep track of the current cleanup stack depth, including debug scopes.
491 LexicalScope Scope(*this, S.getSourceRange());
493 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
496 Address
497 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
498 bool GetLast,
499 AggValueSlot AggSlot) {
501 const Stmt *ExprResult = S.getStmtExprResult();
502 assert((!GetLast || (GetLast && ExprResult)) &&
503 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
505 Address RetAlloca = Address::invalid();
507 for (auto *CurStmt : S.body()) {
508 if (GetLast && ExprResult == CurStmt) {
509 // We have to special case labels here. They are statements, but when put
510 // at the end of a statement expression, they yield the value of their
511 // subexpression. Handle this by walking through all labels we encounter,
512 // emitting them before we evaluate the subexpr.
513 // Similar issues arise for attributed statements.
514 while (!isa<Expr>(ExprResult)) {
515 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
516 EmitLabel(LS->getDecl());
517 ExprResult = LS->getSubStmt();
518 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
519 // FIXME: Update this if we ever have attributes that affect the
520 // semantics of an expression.
521 ExprResult = AS->getSubStmt();
522 } else {
523 llvm_unreachable("unknown value statement");
527 EnsureInsertPoint();
529 const Expr *E = cast<Expr>(ExprResult);
530 QualType ExprTy = E->getType();
531 if (hasAggregateEvaluationKind(ExprTy)) {
532 EmitAggExpr(E, AggSlot);
533 } else {
534 // We can't return an RValue here because there might be cleanups at
535 // the end of the StmtExpr. Because of that, we have to emit the result
536 // here into a temporary alloca.
537 RetAlloca = CreateMemTemp(ExprTy);
538 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
539 /*IsInit*/ false);
541 } else {
542 EmitStmt(CurStmt);
546 return RetAlloca;
549 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
550 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
552 // If there is a cleanup stack, then we it isn't worth trying to
553 // simplify this block (we would need to remove it from the scope map
554 // and cleanup entry).
555 if (!EHStack.empty())
556 return;
558 // Can only simplify direct branches.
559 if (!BI || !BI->isUnconditional())
560 return;
562 // Can only simplify empty blocks.
563 if (BI->getIterator() != BB->begin())
564 return;
566 BB->replaceAllUsesWith(BI->getSuccessor(0));
567 BI->eraseFromParent();
568 BB->eraseFromParent();
571 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
572 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
574 // Fall out of the current block (if necessary).
575 EmitBranch(BB);
577 if (IsFinished && BB->use_empty()) {
578 delete BB;
579 return;
582 // Place the block after the current block, if possible, or else at
583 // the end of the function.
584 if (CurBB && CurBB->getParent())
585 CurFn->insert(std::next(CurBB->getIterator()), BB);
586 else
587 CurFn->insert(CurFn->end(), BB);
588 Builder.SetInsertPoint(BB);
591 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
592 // Emit a branch from the current block to the target one if this
593 // was a real block. If this was just a fall-through block after a
594 // terminator, don't emit it.
595 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
597 if (!CurBB || CurBB->getTerminator()) {
598 // If there is no insert point or the previous block is already
599 // terminated, don't touch it.
600 } else {
601 // Otherwise, create a fall-through branch.
602 Builder.CreateBr(Target);
605 Builder.ClearInsertionPoint();
608 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
609 bool inserted = false;
610 for (llvm::User *u : block->users()) {
611 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
612 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
613 inserted = true;
614 break;
618 if (!inserted)
619 CurFn->insert(CurFn->end(), block);
621 Builder.SetInsertPoint(block);
624 CodeGenFunction::JumpDest
625 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
626 JumpDest &Dest = LabelMap[D];
627 if (Dest.isValid()) return Dest;
629 // Create, but don't insert, the new block.
630 Dest = JumpDest(createBasicBlock(D->getName()),
631 EHScopeStack::stable_iterator::invalid(),
632 NextCleanupDestIndex++);
633 return Dest;
636 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
637 // Add this label to the current lexical scope if we're within any
638 // normal cleanups. Jumps "in" to this label --- when permitted by
639 // the language --- may need to be routed around such cleanups.
640 if (EHStack.hasNormalCleanups() && CurLexicalScope)
641 CurLexicalScope->addLabel(D);
643 JumpDest &Dest = LabelMap[D];
645 // If we didn't need a forward reference to this label, just go
646 // ahead and create a destination at the current scope.
647 if (!Dest.isValid()) {
648 Dest = getJumpDestInCurrentScope(D->getName());
650 // Otherwise, we need to give this label a target depth and remove
651 // it from the branch-fixups list.
652 } else {
653 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
654 Dest.setScopeDepth(EHStack.stable_begin());
655 ResolveBranchFixups(Dest.getBlock());
658 EmitBlock(Dest.getBlock());
660 // Emit debug info for labels.
661 if (CGDebugInfo *DI = getDebugInfo()) {
662 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
663 DI->setLocation(D->getLocation());
664 DI->EmitLabel(D, Builder);
668 incrementProfileCounter(D->getStmt());
671 /// Change the cleanup scope of the labels in this lexical scope to
672 /// match the scope of the enclosing context.
673 void CodeGenFunction::LexicalScope::rescopeLabels() {
674 assert(!Labels.empty());
675 EHScopeStack::stable_iterator innermostScope
676 = CGF.EHStack.getInnermostNormalCleanup();
678 // Change the scope depth of all the labels.
679 for (SmallVectorImpl<const LabelDecl*>::const_iterator
680 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
681 assert(CGF.LabelMap.count(*i));
682 JumpDest &dest = CGF.LabelMap.find(*i)->second;
683 assert(dest.getScopeDepth().isValid());
684 assert(innermostScope.encloses(dest.getScopeDepth()));
685 dest.setScopeDepth(innermostScope);
688 // Reparent the labels if the new scope also has cleanups.
689 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
690 ParentScope->Labels.append(Labels.begin(), Labels.end());
695 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
696 EmitLabel(S.getDecl());
698 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
699 if (getLangOpts().EHAsynch && S.isSideEntry())
700 EmitSehCppScopeBegin();
702 EmitStmt(S.getSubStmt());
705 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
706 bool nomerge = false;
707 bool noinline = false;
708 bool alwaysinline = false;
709 const CallExpr *musttail = nullptr;
711 for (const auto *A : S.getAttrs()) {
712 switch (A->getKind()) {
713 default:
714 break;
715 case attr::NoMerge:
716 nomerge = true;
717 break;
718 case attr::NoInline:
719 noinline = true;
720 break;
721 case attr::AlwaysInline:
722 alwaysinline = true;
723 break;
724 case attr::MustTail:
725 const Stmt *Sub = S.getSubStmt();
726 const ReturnStmt *R = cast<ReturnStmt>(Sub);
727 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
728 break;
731 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
732 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
733 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
734 SaveAndRestore save_musttail(MustTailCall, musttail);
735 EmitStmt(S.getSubStmt(), S.getAttrs());
738 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
739 // If this code is reachable then emit a stop point (if generating
740 // debug info). We have to do this ourselves because we are on the
741 // "simple" statement path.
742 if (HaveInsertPoint())
743 EmitStopPoint(&S);
745 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
749 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
750 if (const LabelDecl *Target = S.getConstantTarget()) {
751 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
752 return;
755 // Ensure that we have an i8* for our PHI node.
756 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
757 Int8PtrTy, "addr");
758 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
760 // Get the basic block for the indirect goto.
761 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
763 // The first instruction in the block has to be the PHI for the switch dest,
764 // add an entry for this branch.
765 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
767 EmitBranch(IndGotoBB);
770 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
771 // The else branch of a consteval if statement is always the only branch that
772 // can be runtime evaluated.
773 if (S.isConsteval()) {
774 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
775 if (Executed) {
776 RunCleanupsScope ExecutedScope(*this);
777 EmitStmt(Executed);
779 return;
782 // C99 6.8.4.1: The first substatement is executed if the expression compares
783 // unequal to 0. The condition must be a scalar type.
784 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
786 if (S.getInit())
787 EmitStmt(S.getInit());
789 if (S.getConditionVariable())
790 EmitDecl(*S.getConditionVariable());
792 // If the condition constant folds and can be elided, try to avoid emitting
793 // the condition and the dead arm of the if/else.
794 bool CondConstant;
795 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
796 S.isConstexpr())) {
797 // Figure out which block (then or else) is executed.
798 const Stmt *Executed = S.getThen();
799 const Stmt *Skipped = S.getElse();
800 if (!CondConstant) // Condition false?
801 std::swap(Executed, Skipped);
803 // If the skipped block has no labels in it, just emit the executed block.
804 // This avoids emitting dead code and simplifies the CFG substantially.
805 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
806 if (CondConstant)
807 incrementProfileCounter(&S);
808 if (Executed) {
809 RunCleanupsScope ExecutedScope(*this);
810 EmitStmt(Executed);
812 return;
816 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
817 // the conditional branch.
818 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
819 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
820 llvm::BasicBlock *ElseBlock = ContBlock;
821 if (S.getElse())
822 ElseBlock = createBasicBlock("if.else");
824 // Prefer the PGO based weights over the likelihood attribute.
825 // When the build isn't optimized the metadata isn't used, so don't generate
826 // it.
827 // Also, differentiate between disabled PGO and a never executed branch with
828 // PGO. Assuming PGO is in use:
829 // - we want to ignore the [[likely]] attribute if the branch is never
830 // executed,
831 // - assuming the profile is poor, preserving the attribute may still be
832 // beneficial.
833 // As an approximation, preserve the attribute only if both the branch and the
834 // parent context were not executed.
835 Stmt::Likelihood LH = Stmt::LH_None;
836 uint64_t ThenCount = getProfileCount(S.getThen());
837 if (!ThenCount && !getCurrentProfileCount() &&
838 CGM.getCodeGenOpts().OptimizationLevel)
839 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
840 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
842 // Emit the 'then' code.
843 EmitBlock(ThenBlock);
844 incrementProfileCounter(&S);
846 RunCleanupsScope ThenScope(*this);
847 EmitStmt(S.getThen());
849 EmitBranch(ContBlock);
851 // Emit the 'else' code if present.
852 if (const Stmt *Else = S.getElse()) {
854 // There is no need to emit line number for an unconditional branch.
855 auto NL = ApplyDebugLocation::CreateEmpty(*this);
856 EmitBlock(ElseBlock);
859 RunCleanupsScope ElseScope(*this);
860 EmitStmt(Else);
863 // There is no need to emit line number for an unconditional branch.
864 auto NL = ApplyDebugLocation::CreateEmpty(*this);
865 EmitBranch(ContBlock);
869 // Emit the continuation block for code after the if.
870 EmitBlock(ContBlock, true);
873 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
874 ArrayRef<const Attr *> WhileAttrs) {
875 // Emit the header for the loop, which will also become
876 // the continue target.
877 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
878 EmitBlock(LoopHeader.getBlock());
880 // Create an exit block for when the condition fails, which will
881 // also become the break target.
882 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
884 // Store the blocks to use for break and continue.
885 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
887 // C++ [stmt.while]p2:
888 // When the condition of a while statement is a declaration, the
889 // scope of the variable that is declared extends from its point
890 // of declaration (3.3.2) to the end of the while statement.
891 // [...]
892 // The object created in a condition is destroyed and created
893 // with each iteration of the loop.
894 RunCleanupsScope ConditionScope(*this);
896 if (S.getConditionVariable())
897 EmitDecl(*S.getConditionVariable());
899 // Evaluate the conditional in the while header. C99 6.8.5.1: The
900 // evaluation of the controlling expression takes place before each
901 // execution of the loop body.
902 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
904 // while(1) is common, avoid extra exit blocks. Be sure
905 // to correctly handle break/continue though.
906 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
907 bool CondIsConstInt = C != nullptr;
908 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
909 const SourceRange &R = S.getSourceRange();
910 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
911 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
912 SourceLocToDebugLoc(R.getEnd()),
913 checkIfLoopMustProgress(CondIsConstInt));
915 // As long as the condition is true, go to the loop body.
916 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
917 if (EmitBoolCondBranch) {
918 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
919 if (ConditionScope.requiresCleanups())
920 ExitBlock = createBasicBlock("while.exit");
921 llvm::MDNode *Weights =
922 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
923 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
924 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
925 BoolCondVal, Stmt::getLikelihood(S.getBody()));
926 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
928 if (ExitBlock != LoopExit.getBlock()) {
929 EmitBlock(ExitBlock);
930 EmitBranchThroughCleanup(LoopExit);
932 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
933 CGM.getDiags().Report(A->getLocation(),
934 diag::warn_attribute_has_no_effect_on_infinite_loop)
935 << A << A->getRange();
936 CGM.getDiags().Report(
937 S.getWhileLoc(),
938 diag::note_attribute_has_no_effect_on_infinite_loop_here)
939 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
942 // Emit the loop body. We have to emit this in a cleanup scope
943 // because it might be a singleton DeclStmt.
945 RunCleanupsScope BodyScope(*this);
946 EmitBlock(LoopBody);
947 incrementProfileCounter(&S);
948 EmitStmt(S.getBody());
951 BreakContinueStack.pop_back();
953 // Immediately force cleanup.
954 ConditionScope.ForceCleanup();
956 EmitStopPoint(&S);
957 // Branch to the loop header again.
958 EmitBranch(LoopHeader.getBlock());
960 LoopStack.pop();
962 // Emit the exit block.
963 EmitBlock(LoopExit.getBlock(), true);
965 // The LoopHeader typically is just a branch if we skipped emitting
966 // a branch, try to erase it.
967 if (!EmitBoolCondBranch)
968 SimplifyForwardingBlocks(LoopHeader.getBlock());
971 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
972 ArrayRef<const Attr *> DoAttrs) {
973 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
974 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
976 uint64_t ParentCount = getCurrentProfileCount();
978 // Store the blocks to use for break and continue.
979 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
981 // Emit the body of the loop.
982 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
984 EmitBlockWithFallThrough(LoopBody, &S);
986 RunCleanupsScope BodyScope(*this);
987 EmitStmt(S.getBody());
990 EmitBlock(LoopCond.getBlock());
992 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
993 // after each execution of the loop body."
995 // Evaluate the conditional in the while header.
996 // C99 6.8.5p2/p4: The first substatement is executed if the expression
997 // compares unequal to 0. The condition must be a scalar type.
998 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1000 BreakContinueStack.pop_back();
1002 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1003 // to correctly handle break/continue though.
1004 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1005 bool CondIsConstInt = C;
1006 bool EmitBoolCondBranch = !C || !C->isZero();
1008 const SourceRange &R = S.getSourceRange();
1009 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1010 SourceLocToDebugLoc(R.getBegin()),
1011 SourceLocToDebugLoc(R.getEnd()),
1012 checkIfLoopMustProgress(CondIsConstInt));
1014 // As long as the condition is true, iterate the loop.
1015 if (EmitBoolCondBranch) {
1016 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1017 Builder.CreateCondBr(
1018 BoolCondVal, LoopBody, LoopExit.getBlock(),
1019 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1022 LoopStack.pop();
1024 // Emit the exit block.
1025 EmitBlock(LoopExit.getBlock());
1027 // The DoCond block typically is just a branch if we skipped
1028 // emitting a branch, try to erase it.
1029 if (!EmitBoolCondBranch)
1030 SimplifyForwardingBlocks(LoopCond.getBlock());
1033 void CodeGenFunction::EmitForStmt(const ForStmt &S,
1034 ArrayRef<const Attr *> ForAttrs) {
1035 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1037 LexicalScope ForScope(*this, S.getSourceRange());
1039 // Evaluate the first part before the loop.
1040 if (S.getInit())
1041 EmitStmt(S.getInit());
1043 // Start the loop with a block that tests the condition.
1044 // If there's an increment, the continue scope will be overwritten
1045 // later.
1046 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1047 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1048 EmitBlock(CondBlock);
1050 Expr::EvalResult Result;
1051 bool CondIsConstInt =
1052 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1054 const SourceRange &R = S.getSourceRange();
1055 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1056 SourceLocToDebugLoc(R.getBegin()),
1057 SourceLocToDebugLoc(R.getEnd()),
1058 checkIfLoopMustProgress(CondIsConstInt));
1060 // Create a cleanup scope for the condition variable cleanups.
1061 LexicalScope ConditionScope(*this, S.getSourceRange());
1063 // If the for loop doesn't have an increment we can just use the condition as
1064 // the continue block. Otherwise, if there is no condition variable, we can
1065 // form the continue block now. If there is a condition variable, we can't
1066 // form the continue block until after we've emitted the condition, because
1067 // the condition is in scope in the increment, but Sema's jump diagnostics
1068 // ensure that there are no continues from the condition variable that jump
1069 // to the loop increment.
1070 JumpDest Continue;
1071 if (!S.getInc())
1072 Continue = CondDest;
1073 else if (!S.getConditionVariable())
1074 Continue = getJumpDestInCurrentScope("for.inc");
1075 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1077 if (S.getCond()) {
1078 // If the for statement has a condition scope, emit the local variable
1079 // declaration.
1080 if (S.getConditionVariable()) {
1081 EmitDecl(*S.getConditionVariable());
1083 // We have entered the condition variable's scope, so we're now able to
1084 // jump to the continue block.
1085 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1086 BreakContinueStack.back().ContinueBlock = Continue;
1089 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1090 // If there are any cleanups between here and the loop-exit scope,
1091 // create a block to stage a loop exit along.
1092 if (ForScope.requiresCleanups())
1093 ExitBlock = createBasicBlock("for.cond.cleanup");
1095 // As long as the condition is true, iterate the loop.
1096 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1098 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1099 // compares unequal to 0. The condition must be a scalar type.
1100 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1101 llvm::MDNode *Weights =
1102 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1103 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1104 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1105 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1107 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1109 if (ExitBlock != LoopExit.getBlock()) {
1110 EmitBlock(ExitBlock);
1111 EmitBranchThroughCleanup(LoopExit);
1114 EmitBlock(ForBody);
1115 } else {
1116 // Treat it as a non-zero constant. Don't even create a new block for the
1117 // body, just fall into it.
1119 incrementProfileCounter(&S);
1122 // Create a separate cleanup scope for the body, in case it is not
1123 // a compound statement.
1124 RunCleanupsScope BodyScope(*this);
1125 EmitStmt(S.getBody());
1128 // If there is an increment, emit it next.
1129 if (S.getInc()) {
1130 EmitBlock(Continue.getBlock());
1131 EmitStmt(S.getInc());
1134 BreakContinueStack.pop_back();
1136 ConditionScope.ForceCleanup();
1138 EmitStopPoint(&S);
1139 EmitBranch(CondBlock);
1141 ForScope.ForceCleanup();
1143 LoopStack.pop();
1145 // Emit the fall-through block.
1146 EmitBlock(LoopExit.getBlock(), true);
1149 void
1150 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1151 ArrayRef<const Attr *> ForAttrs) {
1152 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1154 LexicalScope ForScope(*this, S.getSourceRange());
1156 // Evaluate the first pieces before the loop.
1157 if (S.getInit())
1158 EmitStmt(S.getInit());
1159 EmitStmt(S.getRangeStmt());
1160 EmitStmt(S.getBeginStmt());
1161 EmitStmt(S.getEndStmt());
1163 // Start the loop with a block that tests the condition.
1164 // If there's an increment, the continue scope will be overwritten
1165 // later.
1166 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1167 EmitBlock(CondBlock);
1169 const SourceRange &R = S.getSourceRange();
1170 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1171 SourceLocToDebugLoc(R.getBegin()),
1172 SourceLocToDebugLoc(R.getEnd()));
1174 // If there are any cleanups between here and the loop-exit scope,
1175 // create a block to stage a loop exit along.
1176 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1177 if (ForScope.requiresCleanups())
1178 ExitBlock = createBasicBlock("for.cond.cleanup");
1180 // The loop body, consisting of the specified body and the loop variable.
1181 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1183 // The body is executed if the expression, contextually converted
1184 // to bool, is true.
1185 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1186 llvm::MDNode *Weights =
1187 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1188 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1189 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1190 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1191 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1193 if (ExitBlock != LoopExit.getBlock()) {
1194 EmitBlock(ExitBlock);
1195 EmitBranchThroughCleanup(LoopExit);
1198 EmitBlock(ForBody);
1199 incrementProfileCounter(&S);
1201 // Create a block for the increment. In case of a 'continue', we jump there.
1202 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1204 // Store the blocks to use for break and continue.
1205 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1208 // Create a separate cleanup scope for the loop variable and body.
1209 LexicalScope BodyScope(*this, S.getSourceRange());
1210 EmitStmt(S.getLoopVarStmt());
1211 EmitStmt(S.getBody());
1214 EmitStopPoint(&S);
1215 // If there is an increment, emit it next.
1216 EmitBlock(Continue.getBlock());
1217 EmitStmt(S.getInc());
1219 BreakContinueStack.pop_back();
1221 EmitBranch(CondBlock);
1223 ForScope.ForceCleanup();
1225 LoopStack.pop();
1227 // Emit the fall-through block.
1228 EmitBlock(LoopExit.getBlock(), true);
1231 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1232 if (RV.isScalar()) {
1233 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1234 } else if (RV.isAggregate()) {
1235 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1236 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1237 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1238 } else {
1239 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1240 /*init*/ true);
1242 EmitBranchThroughCleanup(ReturnBlock);
1245 namespace {
1246 // RAII struct used to save and restore a return statment's result expression.
1247 struct SaveRetExprRAII {
1248 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1249 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1250 CGF.RetExpr = RetExpr;
1252 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1253 const Expr *OldRetExpr;
1254 CodeGenFunction &CGF;
1256 } // namespace
1258 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1259 /// codegen it as 'tail call ...; ret void;'.
1260 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1261 const CGFunctionInfo *CurFnInfo) {
1262 auto calleeQualType = CE->getCallee()->getType();
1263 const FunctionType *calleeType = nullptr;
1264 if (calleeQualType->isFunctionPointerType() ||
1265 calleeQualType->isFunctionReferenceType() ||
1266 calleeQualType->isBlockPointerType() ||
1267 calleeQualType->isMemberFunctionPointerType()) {
1268 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1269 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1270 calleeType = ty;
1271 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1272 if (auto methodDecl = CMCE->getMethodDecl()) {
1273 // getMethodDecl() doesn't handle member pointers at the moment.
1274 calleeType = methodDecl->getType()->castAs<FunctionType>();
1275 } else {
1276 return;
1278 } else {
1279 return;
1281 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1282 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1283 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1284 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1285 Builder.CreateRetVoid();
1286 Builder.ClearInsertionPoint();
1290 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1291 /// if the function returns void, or may be missing one if the function returns
1292 /// non-void. Fun stuff :).
1293 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1294 if (requiresReturnValueCheck()) {
1295 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1296 auto *SLocPtr =
1297 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1298 llvm::GlobalVariable::PrivateLinkage, SLoc);
1299 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1300 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1301 assert(ReturnLocation.isValid() && "No valid return location");
1302 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1303 ReturnLocation);
1306 // Returning from an outlined SEH helper is UB, and we already warn on it.
1307 if (IsOutlinedSEHHelper) {
1308 Builder.CreateUnreachable();
1309 Builder.ClearInsertionPoint();
1312 // Emit the result value, even if unused, to evaluate the side effects.
1313 const Expr *RV = S.getRetValue();
1315 // Record the result expression of the return statement. The recorded
1316 // expression is used to determine whether a block capture's lifetime should
1317 // end at the end of the full expression as opposed to the end of the scope
1318 // enclosing the block expression.
1320 // This permits a small, easily-implemented exception to our over-conservative
1321 // rules about not jumping to statements following block literals with
1322 // non-trivial cleanups.
1323 SaveRetExprRAII SaveRetExpr(RV, *this);
1325 RunCleanupsScope cleanupScope(*this);
1326 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1327 RV = EWC->getSubExpr();
1328 // FIXME: Clean this up by using an LValue for ReturnTemp,
1329 // EmitStoreThroughLValue, and EmitAnyExpr.
1330 // Check if the NRVO candidate was not globalized in OpenMP mode.
1331 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1332 S.getNRVOCandidate()->isNRVOVariable() &&
1333 (!getLangOpts().OpenMP ||
1334 !CGM.getOpenMPRuntime()
1335 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1336 .isValid())) {
1337 // Apply the named return value optimization for this return statement,
1338 // which means doing nothing: the appropriate result has already been
1339 // constructed into the NRVO variable.
1341 // If there is an NRVO flag for this variable, set it to 1 into indicate
1342 // that the cleanup code should not destroy the variable.
1343 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1344 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1345 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1346 // Make sure not to return anything, but evaluate the expression
1347 // for side effects.
1348 if (RV) {
1349 EmitAnyExpr(RV);
1350 if (auto *CE = dyn_cast<CallExpr>(RV))
1351 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1353 } else if (!RV) {
1354 // Do nothing (return value is left uninitialized)
1355 } else if (FnRetTy->isReferenceType()) {
1356 // If this function returns a reference, take the address of the expression
1357 // rather than the value.
1358 RValue Result = EmitReferenceBindingToExpr(RV);
1359 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1360 } else {
1361 switch (getEvaluationKind(RV->getType())) {
1362 case TEK_Scalar:
1363 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1364 break;
1365 case TEK_Complex:
1366 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1367 /*isInit*/ true);
1368 break;
1369 case TEK_Aggregate:
1370 EmitAggExpr(RV, AggValueSlot::forAddr(
1371 ReturnValue, Qualifiers(),
1372 AggValueSlot::IsDestructed,
1373 AggValueSlot::DoesNotNeedGCBarriers,
1374 AggValueSlot::IsNotAliased,
1375 getOverlapForReturnValue()));
1376 break;
1380 ++NumReturnExprs;
1381 if (!RV || RV->isEvaluatable(getContext()))
1382 ++NumSimpleReturnExprs;
1384 cleanupScope.ForceCleanup();
1385 EmitBranchThroughCleanup(ReturnBlock);
1388 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1389 // As long as debug info is modeled with instructions, we have to ensure we
1390 // have a place to insert here and write the stop point here.
1391 if (HaveInsertPoint())
1392 EmitStopPoint(&S);
1394 for (const auto *I : S.decls())
1395 EmitDecl(*I);
1398 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1399 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1401 // If this code is reachable then emit a stop point (if generating
1402 // debug info). We have to do this ourselves because we are on the
1403 // "simple" statement path.
1404 if (HaveInsertPoint())
1405 EmitStopPoint(&S);
1407 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1410 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1411 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1413 // If this code is reachable then emit a stop point (if generating
1414 // debug info). We have to do this ourselves because we are on the
1415 // "simple" statement path.
1416 if (HaveInsertPoint())
1417 EmitStopPoint(&S);
1419 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1422 /// EmitCaseStmtRange - If case statement range is not too big then
1423 /// add multiple cases to switch instruction, one for each value within
1424 /// the range. If range is too big then emit "if" condition check.
1425 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1426 ArrayRef<const Attr *> Attrs) {
1427 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1429 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1430 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1432 // Emit the code for this case. We do this first to make sure it is
1433 // properly chained from our predecessor before generating the
1434 // switch machinery to enter this block.
1435 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1436 EmitBlockWithFallThrough(CaseDest, &S);
1437 EmitStmt(S.getSubStmt());
1439 // If range is empty, do nothing.
1440 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1441 return;
1443 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1444 llvm::APInt Range = RHS - LHS;
1445 // FIXME: parameters such as this should not be hardcoded.
1446 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1447 // Range is small enough to add multiple switch instruction cases.
1448 uint64_t Total = getProfileCount(&S);
1449 unsigned NCases = Range.getZExtValue() + 1;
1450 // We only have one region counter for the entire set of cases here, so we
1451 // need to divide the weights evenly between the generated cases, ensuring
1452 // that the total weight is preserved. E.g., a weight of 5 over three cases
1453 // will be distributed as weights of 2, 2, and 1.
1454 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1455 for (unsigned I = 0; I != NCases; ++I) {
1456 if (SwitchWeights)
1457 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1458 else if (SwitchLikelihood)
1459 SwitchLikelihood->push_back(LH);
1461 if (Rem)
1462 Rem--;
1463 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1464 ++LHS;
1466 return;
1469 // The range is too big. Emit "if" condition into a new block,
1470 // making sure to save and restore the current insertion point.
1471 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1473 // Push this test onto the chain of range checks (which terminates
1474 // in the default basic block). The switch's default will be changed
1475 // to the top of this chain after switch emission is complete.
1476 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1477 CaseRangeBlock = createBasicBlock("sw.caserange");
1479 CurFn->insert(CurFn->end(), CaseRangeBlock);
1480 Builder.SetInsertPoint(CaseRangeBlock);
1482 // Emit range check.
1483 llvm::Value *Diff =
1484 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1485 llvm::Value *Cond =
1486 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1488 llvm::MDNode *Weights = nullptr;
1489 if (SwitchWeights) {
1490 uint64_t ThisCount = getProfileCount(&S);
1491 uint64_t DefaultCount = (*SwitchWeights)[0];
1492 Weights = createProfileWeights(ThisCount, DefaultCount);
1494 // Since we're chaining the switch default through each large case range, we
1495 // need to update the weight for the default, ie, the first case, to include
1496 // this case.
1497 (*SwitchWeights)[0] += ThisCount;
1498 } else if (SwitchLikelihood)
1499 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1501 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1503 // Restore the appropriate insertion point.
1504 if (RestoreBB)
1505 Builder.SetInsertPoint(RestoreBB);
1506 else
1507 Builder.ClearInsertionPoint();
1510 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1511 ArrayRef<const Attr *> Attrs) {
1512 // If there is no enclosing switch instance that we're aware of, then this
1513 // case statement and its block can be elided. This situation only happens
1514 // when we've constant-folded the switch, are emitting the constant case,
1515 // and part of the constant case includes another case statement. For
1516 // instance: switch (4) { case 4: do { case 5: } while (1); }
1517 if (!SwitchInsn) {
1518 EmitStmt(S.getSubStmt());
1519 return;
1522 // Handle case ranges.
1523 if (S.getRHS()) {
1524 EmitCaseStmtRange(S, Attrs);
1525 return;
1528 llvm::ConstantInt *CaseVal =
1529 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1531 // Emit debuginfo for the case value if it is an enum value.
1532 const ConstantExpr *CE;
1533 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1534 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1535 else
1536 CE = dyn_cast<ConstantExpr>(S.getLHS());
1537 if (CE) {
1538 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1539 if (CGDebugInfo *Dbg = getDebugInfo())
1540 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1541 Dbg->EmitGlobalVariable(DE->getDecl(),
1542 APValue(llvm::APSInt(CaseVal->getValue())));
1545 if (SwitchLikelihood)
1546 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1548 // If the body of the case is just a 'break', try to not emit an empty block.
1549 // If we're profiling or we're not optimizing, leave the block in for better
1550 // debug and coverage analysis.
1551 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1552 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1553 isa<BreakStmt>(S.getSubStmt())) {
1554 JumpDest Block = BreakContinueStack.back().BreakBlock;
1556 // Only do this optimization if there are no cleanups that need emitting.
1557 if (isObviouslyBranchWithoutCleanups(Block)) {
1558 if (SwitchWeights)
1559 SwitchWeights->push_back(getProfileCount(&S));
1560 SwitchInsn->addCase(CaseVal, Block.getBlock());
1562 // If there was a fallthrough into this case, make sure to redirect it to
1563 // the end of the switch as well.
1564 if (Builder.GetInsertBlock()) {
1565 Builder.CreateBr(Block.getBlock());
1566 Builder.ClearInsertionPoint();
1568 return;
1572 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1573 EmitBlockWithFallThrough(CaseDest, &S);
1574 if (SwitchWeights)
1575 SwitchWeights->push_back(getProfileCount(&S));
1576 SwitchInsn->addCase(CaseVal, CaseDest);
1578 // Recursively emitting the statement is acceptable, but is not wonderful for
1579 // code where we have many case statements nested together, i.e.:
1580 // case 1:
1581 // case 2:
1582 // case 3: etc.
1583 // Handling this recursively will create a new block for each case statement
1584 // that falls through to the next case which is IR intensive. It also causes
1585 // deep recursion which can run into stack depth limitations. Handle
1586 // sequential non-range case statements specially.
1588 // TODO When the next case has a likelihood attribute the code returns to the
1589 // recursive algorithm. Maybe improve this case if it becomes common practice
1590 // to use a lot of attributes.
1591 const CaseStmt *CurCase = &S;
1592 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1594 // Otherwise, iteratively add consecutive cases to this switch stmt.
1595 while (NextCase && NextCase->getRHS() == nullptr) {
1596 CurCase = NextCase;
1597 llvm::ConstantInt *CaseVal =
1598 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1600 if (SwitchWeights)
1601 SwitchWeights->push_back(getProfileCount(NextCase));
1602 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1603 CaseDest = createBasicBlock("sw.bb");
1604 EmitBlockWithFallThrough(CaseDest, CurCase);
1606 // Since this loop is only executed when the CaseStmt has no attributes
1607 // use a hard-coded value.
1608 if (SwitchLikelihood)
1609 SwitchLikelihood->push_back(Stmt::LH_None);
1611 SwitchInsn->addCase(CaseVal, CaseDest);
1612 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1615 // Generate a stop point for debug info if the case statement is
1616 // followed by a default statement. A fallthrough case before a
1617 // default case gets its own branch target.
1618 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1619 EmitStopPoint(CurCase);
1621 // Normal default recursion for non-cases.
1622 EmitStmt(CurCase->getSubStmt());
1625 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1626 ArrayRef<const Attr *> Attrs) {
1627 // If there is no enclosing switch instance that we're aware of, then this
1628 // default statement can be elided. This situation only happens when we've
1629 // constant-folded the switch.
1630 if (!SwitchInsn) {
1631 EmitStmt(S.getSubStmt());
1632 return;
1635 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1636 assert(DefaultBlock->empty() &&
1637 "EmitDefaultStmt: Default block already defined?");
1639 if (SwitchLikelihood)
1640 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1642 EmitBlockWithFallThrough(DefaultBlock, &S);
1644 EmitStmt(S.getSubStmt());
1647 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1648 /// constant value that is being switched on, see if we can dead code eliminate
1649 /// the body of the switch to a simple series of statements to emit. Basically,
1650 /// on a switch (5) we want to find these statements:
1651 /// case 5:
1652 /// printf(...); <--
1653 /// ++i; <--
1654 /// break;
1656 /// and add them to the ResultStmts vector. If it is unsafe to do this
1657 /// transformation (for example, one of the elided statements contains a label
1658 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1659 /// should include statements after it (e.g. the printf() line is a substmt of
1660 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1661 /// statement, then return CSFC_Success.
1663 /// If Case is non-null, then we are looking for the specified case, checking
1664 /// that nothing we jump over contains labels. If Case is null, then we found
1665 /// the case and are looking for the break.
1667 /// If the recursive walk actually finds our Case, then we set FoundCase to
1668 /// true.
1670 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1671 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1672 const SwitchCase *Case,
1673 bool &FoundCase,
1674 SmallVectorImpl<const Stmt*> &ResultStmts) {
1675 // If this is a null statement, just succeed.
1676 if (!S)
1677 return Case ? CSFC_Success : CSFC_FallThrough;
1679 // If this is the switchcase (case 4: or default) that we're looking for, then
1680 // we're in business. Just add the substatement.
1681 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1682 if (S == Case) {
1683 FoundCase = true;
1684 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1685 ResultStmts);
1688 // Otherwise, this is some other case or default statement, just ignore it.
1689 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1690 ResultStmts);
1693 // If we are in the live part of the code and we found our break statement,
1694 // return a success!
1695 if (!Case && isa<BreakStmt>(S))
1696 return CSFC_Success;
1698 // If this is a switch statement, then it might contain the SwitchCase, the
1699 // break, or neither.
1700 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1701 // Handle this as two cases: we might be looking for the SwitchCase (if so
1702 // the skipped statements must be skippable) or we might already have it.
1703 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1704 bool StartedInLiveCode = FoundCase;
1705 unsigned StartSize = ResultStmts.size();
1707 // If we've not found the case yet, scan through looking for it.
1708 if (Case) {
1709 // Keep track of whether we see a skipped declaration. The code could be
1710 // using the declaration even if it is skipped, so we can't optimize out
1711 // the decl if the kept statements might refer to it.
1712 bool HadSkippedDecl = false;
1714 // If we're looking for the case, just see if we can skip each of the
1715 // substatements.
1716 for (; Case && I != E; ++I) {
1717 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1719 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1720 case CSFC_Failure: return CSFC_Failure;
1721 case CSFC_Success:
1722 // A successful result means that either 1) that the statement doesn't
1723 // have the case and is skippable, or 2) does contain the case value
1724 // and also contains the break to exit the switch. In the later case,
1725 // we just verify the rest of the statements are elidable.
1726 if (FoundCase) {
1727 // If we found the case and skipped declarations, we can't do the
1728 // optimization.
1729 if (HadSkippedDecl)
1730 return CSFC_Failure;
1732 for (++I; I != E; ++I)
1733 if (CodeGenFunction::ContainsLabel(*I, true))
1734 return CSFC_Failure;
1735 return CSFC_Success;
1737 break;
1738 case CSFC_FallThrough:
1739 // If we have a fallthrough condition, then we must have found the
1740 // case started to include statements. Consider the rest of the
1741 // statements in the compound statement as candidates for inclusion.
1742 assert(FoundCase && "Didn't find case but returned fallthrough?");
1743 // We recursively found Case, so we're not looking for it anymore.
1744 Case = nullptr;
1746 // If we found the case and skipped declarations, we can't do the
1747 // optimization.
1748 if (HadSkippedDecl)
1749 return CSFC_Failure;
1750 break;
1754 if (!FoundCase)
1755 return CSFC_Success;
1757 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1760 // If we have statements in our range, then we know that the statements are
1761 // live and need to be added to the set of statements we're tracking.
1762 bool AnyDecls = false;
1763 for (; I != E; ++I) {
1764 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1766 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1767 case CSFC_Failure: return CSFC_Failure;
1768 case CSFC_FallThrough:
1769 // A fallthrough result means that the statement was simple and just
1770 // included in ResultStmt, keep adding them afterwards.
1771 break;
1772 case CSFC_Success:
1773 // A successful result means that we found the break statement and
1774 // stopped statement inclusion. We just ensure that any leftover stmts
1775 // are skippable and return success ourselves.
1776 for (++I; I != E; ++I)
1777 if (CodeGenFunction::ContainsLabel(*I, true))
1778 return CSFC_Failure;
1779 return CSFC_Success;
1783 // If we're about to fall out of a scope without hitting a 'break;', we
1784 // can't perform the optimization if there were any decls in that scope
1785 // (we'd lose their end-of-lifetime).
1786 if (AnyDecls) {
1787 // If the entire compound statement was live, there's one more thing we
1788 // can try before giving up: emit the whole thing as a single statement.
1789 // We can do that unless the statement contains a 'break;'.
1790 // FIXME: Such a break must be at the end of a construct within this one.
1791 // We could emit this by just ignoring the BreakStmts entirely.
1792 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1793 ResultStmts.resize(StartSize);
1794 ResultStmts.push_back(S);
1795 } else {
1796 return CSFC_Failure;
1800 return CSFC_FallThrough;
1803 // Okay, this is some other statement that we don't handle explicitly, like a
1804 // for statement or increment etc. If we are skipping over this statement,
1805 // just verify it doesn't have labels, which would make it invalid to elide.
1806 if (Case) {
1807 if (CodeGenFunction::ContainsLabel(S, true))
1808 return CSFC_Failure;
1809 return CSFC_Success;
1812 // Otherwise, we want to include this statement. Everything is cool with that
1813 // so long as it doesn't contain a break out of the switch we're in.
1814 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1816 // Otherwise, everything is great. Include the statement and tell the caller
1817 // that we fall through and include the next statement as well.
1818 ResultStmts.push_back(S);
1819 return CSFC_FallThrough;
1822 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1823 /// then invoke CollectStatementsForCase to find the list of statements to emit
1824 /// for a switch on constant. See the comment above CollectStatementsForCase
1825 /// for more details.
1826 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1827 const llvm::APSInt &ConstantCondValue,
1828 SmallVectorImpl<const Stmt*> &ResultStmts,
1829 ASTContext &C,
1830 const SwitchCase *&ResultCase) {
1831 // First step, find the switch case that is being branched to. We can do this
1832 // efficiently by scanning the SwitchCase list.
1833 const SwitchCase *Case = S.getSwitchCaseList();
1834 const DefaultStmt *DefaultCase = nullptr;
1836 for (; Case; Case = Case->getNextSwitchCase()) {
1837 // It's either a default or case. Just remember the default statement in
1838 // case we're not jumping to any numbered cases.
1839 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1840 DefaultCase = DS;
1841 continue;
1844 // Check to see if this case is the one we're looking for.
1845 const CaseStmt *CS = cast<CaseStmt>(Case);
1846 // Don't handle case ranges yet.
1847 if (CS->getRHS()) return false;
1849 // If we found our case, remember it as 'case'.
1850 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1851 break;
1854 // If we didn't find a matching case, we use a default if it exists, or we
1855 // elide the whole switch body!
1856 if (!Case) {
1857 // It is safe to elide the body of the switch if it doesn't contain labels
1858 // etc. If it is safe, return successfully with an empty ResultStmts list.
1859 if (!DefaultCase)
1860 return !CodeGenFunction::ContainsLabel(&S);
1861 Case = DefaultCase;
1864 // Ok, we know which case is being jumped to, try to collect all the
1865 // statements that follow it. This can fail for a variety of reasons. Also,
1866 // check to see that the recursive walk actually found our case statement.
1867 // Insane cases like this can fail to find it in the recursive walk since we
1868 // don't handle every stmt kind:
1869 // switch (4) {
1870 // while (1) {
1871 // case 4: ...
1872 bool FoundCase = false;
1873 ResultCase = Case;
1874 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1875 ResultStmts) != CSFC_Failure &&
1876 FoundCase;
1879 static std::optional<SmallVector<uint64_t, 16>>
1880 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1881 // Are there enough branches to weight them?
1882 if (Likelihoods.size() <= 1)
1883 return std::nullopt;
1885 uint64_t NumUnlikely = 0;
1886 uint64_t NumNone = 0;
1887 uint64_t NumLikely = 0;
1888 for (const auto LH : Likelihoods) {
1889 switch (LH) {
1890 case Stmt::LH_Unlikely:
1891 ++NumUnlikely;
1892 break;
1893 case Stmt::LH_None:
1894 ++NumNone;
1895 break;
1896 case Stmt::LH_Likely:
1897 ++NumLikely;
1898 break;
1902 // Is there a likelihood attribute used?
1903 if (NumUnlikely == 0 && NumLikely == 0)
1904 return std::nullopt;
1906 // When multiple cases share the same code they can be combined during
1907 // optimization. In that case the weights of the branch will be the sum of
1908 // the individual weights. Make sure the combined sum of all neutral cases
1909 // doesn't exceed the value of a single likely attribute.
1910 // The additions both avoid divisions by 0 and make sure the weights of None
1911 // don't exceed the weight of Likely.
1912 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1913 const uint64_t None = Likely / (NumNone + 1);
1914 const uint64_t Unlikely = 0;
1916 SmallVector<uint64_t, 16> Result;
1917 Result.reserve(Likelihoods.size());
1918 for (const auto LH : Likelihoods) {
1919 switch (LH) {
1920 case Stmt::LH_Unlikely:
1921 Result.push_back(Unlikely);
1922 break;
1923 case Stmt::LH_None:
1924 Result.push_back(None);
1925 break;
1926 case Stmt::LH_Likely:
1927 Result.push_back(Likely);
1928 break;
1932 return Result;
1935 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1936 // Handle nested switch statements.
1937 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1938 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1939 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1940 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1942 // See if we can constant fold the condition of the switch and therefore only
1943 // emit the live case statement (if any) of the switch.
1944 llvm::APSInt ConstantCondValue;
1945 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1946 SmallVector<const Stmt*, 4> CaseStmts;
1947 const SwitchCase *Case = nullptr;
1948 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1949 getContext(), Case)) {
1950 if (Case)
1951 incrementProfileCounter(Case);
1952 RunCleanupsScope ExecutedScope(*this);
1954 if (S.getInit())
1955 EmitStmt(S.getInit());
1957 // Emit the condition variable if needed inside the entire cleanup scope
1958 // used by this special case for constant folded switches.
1959 if (S.getConditionVariable())
1960 EmitDecl(*S.getConditionVariable());
1962 // At this point, we are no longer "within" a switch instance, so
1963 // we can temporarily enforce this to ensure that any embedded case
1964 // statements are not emitted.
1965 SwitchInsn = nullptr;
1967 // Okay, we can dead code eliminate everything except this case. Emit the
1968 // specified series of statements and we're good.
1969 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1970 EmitStmt(CaseStmts[i]);
1971 incrementProfileCounter(&S);
1973 // Now we want to restore the saved switch instance so that nested
1974 // switches continue to function properly
1975 SwitchInsn = SavedSwitchInsn;
1977 return;
1981 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1983 RunCleanupsScope ConditionScope(*this);
1985 if (S.getInit())
1986 EmitStmt(S.getInit());
1988 if (S.getConditionVariable())
1989 EmitDecl(*S.getConditionVariable());
1990 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1992 // Create basic block to hold stuff that comes after switch
1993 // statement. We also need to create a default block now so that
1994 // explicit case ranges tests can have a place to jump to on
1995 // failure.
1996 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1997 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1998 if (PGO.haveRegionCounts()) {
1999 // Walk the SwitchCase list to find how many there are.
2000 uint64_t DefaultCount = 0;
2001 unsigned NumCases = 0;
2002 for (const SwitchCase *Case = S.getSwitchCaseList();
2003 Case;
2004 Case = Case->getNextSwitchCase()) {
2005 if (isa<DefaultStmt>(Case))
2006 DefaultCount = getProfileCount(Case);
2007 NumCases += 1;
2009 SwitchWeights = new SmallVector<uint64_t, 16>();
2010 SwitchWeights->reserve(NumCases);
2011 // The default needs to be first. We store the edge count, so we already
2012 // know the right weight.
2013 SwitchWeights->push_back(DefaultCount);
2014 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2015 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2016 // Initialize the default case.
2017 SwitchLikelihood->push_back(Stmt::LH_None);
2020 CaseRangeBlock = DefaultBlock;
2022 // Clear the insertion point to indicate we are in unreachable code.
2023 Builder.ClearInsertionPoint();
2025 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2026 // then reuse last ContinueBlock.
2027 JumpDest OuterContinue;
2028 if (!BreakContinueStack.empty())
2029 OuterContinue = BreakContinueStack.back().ContinueBlock;
2031 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2033 // Emit switch body.
2034 EmitStmt(S.getBody());
2036 BreakContinueStack.pop_back();
2038 // Update the default block in case explicit case range tests have
2039 // been chained on top.
2040 SwitchInsn->setDefaultDest(CaseRangeBlock);
2042 // If a default was never emitted:
2043 if (!DefaultBlock->getParent()) {
2044 // If we have cleanups, emit the default block so that there's a
2045 // place to jump through the cleanups from.
2046 if (ConditionScope.requiresCleanups()) {
2047 EmitBlock(DefaultBlock);
2049 // Otherwise, just forward the default block to the switch end.
2050 } else {
2051 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2052 delete DefaultBlock;
2056 ConditionScope.ForceCleanup();
2058 // Emit continuation.
2059 EmitBlock(SwitchExit.getBlock(), true);
2060 incrementProfileCounter(&S);
2062 // If the switch has a condition wrapped by __builtin_unpredictable,
2063 // create metadata that specifies that the switch is unpredictable.
2064 // Don't bother if not optimizing because that metadata would not be used.
2065 auto *Call = dyn_cast<CallExpr>(S.getCond());
2066 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2067 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2068 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2069 llvm::MDBuilder MDHelper(getLLVMContext());
2070 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2071 MDHelper.createUnpredictable());
2075 if (SwitchWeights) {
2076 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2077 "switch weights do not match switch cases");
2078 // If there's only one jump destination there's no sense weighting it.
2079 if (SwitchWeights->size() > 1)
2080 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2081 createProfileWeights(*SwitchWeights));
2082 delete SwitchWeights;
2083 } else if (SwitchLikelihood) {
2084 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2085 "switch likelihoods do not match switch cases");
2086 std::optional<SmallVector<uint64_t, 16>> LHW =
2087 getLikelihoodWeights(*SwitchLikelihood);
2088 if (LHW) {
2089 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2090 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2091 createProfileWeights(*LHW));
2093 delete SwitchLikelihood;
2095 SwitchInsn = SavedSwitchInsn;
2096 SwitchWeights = SavedSwitchWeights;
2097 SwitchLikelihood = SavedSwitchLikelihood;
2098 CaseRangeBlock = SavedCRBlock;
2101 static std::string
2102 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2103 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2104 std::string Result;
2106 while (*Constraint) {
2107 switch (*Constraint) {
2108 default:
2109 Result += Target.convertConstraint(Constraint);
2110 break;
2111 // Ignore these
2112 case '*':
2113 case '?':
2114 case '!':
2115 case '=': // Will see this and the following in mult-alt constraints.
2116 case '+':
2117 break;
2118 case '#': // Ignore the rest of the constraint alternative.
2119 while (Constraint[1] && Constraint[1] != ',')
2120 Constraint++;
2121 break;
2122 case '&':
2123 case '%':
2124 Result += *Constraint;
2125 while (Constraint[1] && Constraint[1] == *Constraint)
2126 Constraint++;
2127 break;
2128 case ',':
2129 Result += "|";
2130 break;
2131 case 'g':
2132 Result += "imr";
2133 break;
2134 case '[': {
2135 assert(OutCons &&
2136 "Must pass output names to constraints with a symbolic name");
2137 unsigned Index;
2138 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2139 assert(result && "Could not resolve symbolic name"); (void)result;
2140 Result += llvm::utostr(Index);
2141 break;
2145 Constraint++;
2148 return Result;
2151 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2152 /// as using a particular register add that as a constraint that will be used
2153 /// in this asm stmt.
2154 static std::string
2155 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2156 const TargetInfo &Target, CodeGenModule &CGM,
2157 const AsmStmt &Stmt, const bool EarlyClobber,
2158 std::string *GCCReg = nullptr) {
2159 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2160 if (!AsmDeclRef)
2161 return Constraint;
2162 const ValueDecl &Value = *AsmDeclRef->getDecl();
2163 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2164 if (!Variable)
2165 return Constraint;
2166 if (Variable->getStorageClass() != SC_Register)
2167 return Constraint;
2168 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2169 if (!Attr)
2170 return Constraint;
2171 StringRef Register = Attr->getLabel();
2172 assert(Target.isValidGCCRegisterName(Register));
2173 // We're using validateOutputConstraint here because we only care if
2174 // this is a register constraint.
2175 TargetInfo::ConstraintInfo Info(Constraint, "");
2176 if (Target.validateOutputConstraint(Info) &&
2177 !Info.allowsRegister()) {
2178 CGM.ErrorUnsupported(&Stmt, "__asm__");
2179 return Constraint;
2181 // Canonicalize the register here before returning it.
2182 Register = Target.getNormalizedGCCRegisterName(Register);
2183 if (GCCReg != nullptr)
2184 *GCCReg = Register.str();
2185 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2188 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2189 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2190 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2191 if (Info.allowsRegister() || !Info.allowsMemory()) {
2192 if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2193 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2195 llvm::Type *Ty = ConvertType(InputType);
2196 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2197 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2198 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2199 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2201 return {
2202 Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2203 nullptr};
2207 Address Addr = InputValue.getAddress(*this);
2208 ConstraintStr += '*';
2209 return {Addr.getPointer(), Addr.getElementType()};
2212 std::pair<llvm::Value *, llvm::Type *>
2213 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2214 const Expr *InputExpr,
2215 std::string &ConstraintStr) {
2216 // If this can't be a register or memory, i.e., has to be a constant
2217 // (immediate or symbolic), try to emit it as such.
2218 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2219 if (Info.requiresImmediateConstant()) {
2220 Expr::EvalResult EVResult;
2221 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2223 llvm::APSInt IntResult;
2224 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2225 getContext()))
2226 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2229 Expr::EvalResult Result;
2230 if (InputExpr->EvaluateAsInt(Result, getContext()))
2231 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2232 nullptr};
2235 if (Info.allowsRegister() || !Info.allowsMemory())
2236 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2237 return {EmitScalarExpr(InputExpr), nullptr};
2238 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2239 return {EmitScalarExpr(InputExpr), nullptr};
2240 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2241 LValue Dest = EmitLValue(InputExpr);
2242 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2243 InputExpr->getExprLoc());
2246 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2247 /// asm call instruction. The !srcloc MDNode contains a list of constant
2248 /// integers which are the source locations of the start of each line in the
2249 /// asm.
2250 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2251 CodeGenFunction &CGF) {
2252 SmallVector<llvm::Metadata *, 8> Locs;
2253 // Add the location of the first line to the MDNode.
2254 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2255 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2256 StringRef StrVal = Str->getString();
2257 if (!StrVal.empty()) {
2258 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2259 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2260 unsigned StartToken = 0;
2261 unsigned ByteOffset = 0;
2263 // Add the location of the start of each subsequent line of the asm to the
2264 // MDNode.
2265 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2266 if (StrVal[i] != '\n') continue;
2267 SourceLocation LineLoc = Str->getLocationOfByte(
2268 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2269 Locs.push_back(llvm::ConstantAsMetadata::get(
2270 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2274 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2277 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2278 bool HasUnwindClobber, bool ReadOnly,
2279 bool ReadNone, bool NoMerge, const AsmStmt &S,
2280 const std::vector<llvm::Type *> &ResultRegTypes,
2281 const std::vector<llvm::Type *> &ArgElemTypes,
2282 CodeGenFunction &CGF,
2283 std::vector<llvm::Value *> &RegResults) {
2284 if (!HasUnwindClobber)
2285 Result.addFnAttr(llvm::Attribute::NoUnwind);
2287 if (NoMerge)
2288 Result.addFnAttr(llvm::Attribute::NoMerge);
2289 // Attach readnone and readonly attributes.
2290 if (!HasSideEffect) {
2291 if (ReadNone)
2292 Result.setDoesNotAccessMemory();
2293 else if (ReadOnly)
2294 Result.setOnlyReadsMemory();
2297 // Add elementtype attribute for indirect constraints.
2298 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2299 if (Pair.value()) {
2300 auto Attr = llvm::Attribute::get(
2301 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2302 Result.addParamAttr(Pair.index(), Attr);
2306 // Slap the source location of the inline asm into a !srcloc metadata on the
2307 // call.
2308 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2309 Result.setMetadata("srcloc",
2310 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2311 else {
2312 // At least put the line number on MS inline asm blobs.
2313 llvm::Constant *Loc =
2314 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2315 Result.setMetadata("srcloc",
2316 llvm::MDNode::get(CGF.getLLVMContext(),
2317 llvm::ConstantAsMetadata::get(Loc)));
2320 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2321 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2322 // convergent (meaning, they may call an intrinsically convergent op, such
2323 // as bar.sync, and so can't have certain optimizations applied around
2324 // them).
2325 Result.addFnAttr(llvm::Attribute::Convergent);
2326 // Extract all of the register value results from the asm.
2327 if (ResultRegTypes.size() == 1) {
2328 RegResults.push_back(&Result);
2329 } else {
2330 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2331 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2332 RegResults.push_back(Tmp);
2337 static void
2338 EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
2339 const llvm::ArrayRef<llvm::Value *> RegResults,
2340 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2341 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2342 const llvm::ArrayRef<LValue> ResultRegDests,
2343 const llvm::ArrayRef<QualType> ResultRegQualTys,
2344 const llvm::BitVector &ResultTypeRequiresCast,
2345 const llvm::BitVector &ResultRegIsFlagReg) {
2346 CGBuilderTy &Builder = CGF.Builder;
2347 CodeGenModule &CGM = CGF.CGM;
2348 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2350 assert(RegResults.size() == ResultRegTypes.size());
2351 assert(RegResults.size() == ResultTruncRegTypes.size());
2352 assert(RegResults.size() == ResultRegDests.size());
2353 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2354 // in which case its size may grow.
2355 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2356 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2358 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2359 llvm::Value *Tmp = RegResults[i];
2360 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2362 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2363 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2364 // value.
2365 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2366 llvm::Value *IsBooleanValue =
2367 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2368 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2369 Builder.CreateCall(FnAssume, IsBooleanValue);
2372 // If the result type of the LLVM IR asm doesn't match the result type of
2373 // the expression, do the conversion.
2374 if (ResultRegTypes[i] != TruncTy) {
2376 // Truncate the integer result to the right size, note that TruncTy can be
2377 // a pointer.
2378 if (TruncTy->isFloatingPointTy())
2379 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2380 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2381 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2382 Tmp = Builder.CreateTrunc(
2383 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2384 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2385 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2386 uint64_t TmpSize =
2387 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2388 Tmp = Builder.CreatePtrToInt(
2389 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2390 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2391 } else if (TruncTy->isIntegerTy()) {
2392 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2393 } else if (TruncTy->isVectorTy()) {
2394 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2398 LValue Dest = ResultRegDests[i];
2399 // ResultTypeRequiresCast elements correspond to the first
2400 // ResultTypeRequiresCast.size() elements of RegResults.
2401 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2402 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2403 Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2404 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2405 Builder.CreateStore(Tmp, A);
2406 continue;
2409 QualType Ty =
2410 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2411 if (Ty.isNull()) {
2412 const Expr *OutExpr = S.getOutputExpr(i);
2413 CGM.getDiags().Report(OutExpr->getExprLoc(),
2414 diag::err_store_value_to_reg);
2415 return;
2417 Dest = CGF.MakeAddrLValue(A, Ty);
2419 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2423 static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF,
2424 const AsmStmt &S) {
2425 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2427 StringRef Asm;
2428 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2429 Asm = GCCAsm->getAsmString()->getString();
2431 auto &Ctx = CGF->CGM.getLLVMContext();
2433 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2434 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2435 {StrTy->getType()}, false);
2436 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2438 CGF->Builder.CreateCall(UBF, {StrTy});
2441 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2442 // Pop all cleanup blocks at the end of the asm statement.
2443 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2445 // Assemble the final asm string.
2446 std::string AsmString = S.generateAsmString(getContext());
2448 // Get all the output and input constraints together.
2449 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2450 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2452 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2453 bool IsValidTargetAsm = true;
2454 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2455 StringRef Name;
2456 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2457 Name = GAS->getOutputName(i);
2458 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2459 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2460 if (IsHipStdPar && !IsValid)
2461 IsValidTargetAsm = false;
2462 else
2463 assert(IsValid && "Failed to parse output constraint");
2464 OutputConstraintInfos.push_back(Info);
2467 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2468 StringRef Name;
2469 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2470 Name = GAS->getInputName(i);
2471 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2472 bool IsValid =
2473 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2474 if (IsHipStdPar && !IsValid)
2475 IsValidTargetAsm = false;
2476 else
2477 assert(IsValid && "Failed to parse input constraint");
2478 InputConstraintInfos.push_back(Info);
2481 if (!IsValidTargetAsm)
2482 return EmitHipStdParUnsupportedAsm(this, S);
2484 std::string Constraints;
2486 std::vector<LValue> ResultRegDests;
2487 std::vector<QualType> ResultRegQualTys;
2488 std::vector<llvm::Type *> ResultRegTypes;
2489 std::vector<llvm::Type *> ResultTruncRegTypes;
2490 std::vector<llvm::Type *> ArgTypes;
2491 std::vector<llvm::Type *> ArgElemTypes;
2492 std::vector<llvm::Value*> Args;
2493 llvm::BitVector ResultTypeRequiresCast;
2494 llvm::BitVector ResultRegIsFlagReg;
2496 // Keep track of inout constraints.
2497 std::string InOutConstraints;
2498 std::vector<llvm::Value*> InOutArgs;
2499 std::vector<llvm::Type*> InOutArgTypes;
2500 std::vector<llvm::Type*> InOutArgElemTypes;
2502 // Keep track of out constraints for tied input operand.
2503 std::vector<std::string> OutputConstraints;
2505 // Keep track of defined physregs.
2506 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2508 // An inline asm can be marked readonly if it meets the following conditions:
2509 // - it doesn't have any sideeffects
2510 // - it doesn't clobber memory
2511 // - it doesn't return a value by-reference
2512 // It can be marked readnone if it doesn't have any input memory constraints
2513 // in addition to meeting the conditions listed above.
2514 bool ReadOnly = true, ReadNone = true;
2516 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2517 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2519 // Simplify the output constraint.
2520 std::string OutputConstraint(S.getOutputConstraint(i));
2521 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2522 getTarget(), &OutputConstraintInfos);
2524 const Expr *OutExpr = S.getOutputExpr(i);
2525 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2527 std::string GCCReg;
2528 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2529 getTarget(), CGM, S,
2530 Info.earlyClobber(),
2531 &GCCReg);
2532 // Give an error on multiple outputs to same physreg.
2533 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2534 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2536 OutputConstraints.push_back(OutputConstraint);
2537 LValue Dest = EmitLValue(OutExpr);
2538 if (!Constraints.empty())
2539 Constraints += ',';
2541 // If this is a register output, then make the inline asm return it
2542 // by-value. If this is a memory result, return the value by-reference.
2543 QualType QTy = OutExpr->getType();
2544 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2545 hasAggregateEvaluationKind(QTy);
2546 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2548 Constraints += "=" + OutputConstraint;
2549 ResultRegQualTys.push_back(QTy);
2550 ResultRegDests.push_back(Dest);
2552 bool IsFlagReg = llvm::StringRef(OutputConstraint).startswith("{@cc");
2553 ResultRegIsFlagReg.push_back(IsFlagReg);
2555 llvm::Type *Ty = ConvertTypeForMem(QTy);
2556 const bool RequiresCast = Info.allowsRegister() &&
2557 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2558 Ty->isAggregateType());
2560 ResultTruncRegTypes.push_back(Ty);
2561 ResultTypeRequiresCast.push_back(RequiresCast);
2563 if (RequiresCast) {
2564 unsigned Size = getContext().getTypeSize(QTy);
2565 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2567 ResultRegTypes.push_back(Ty);
2568 // If this output is tied to an input, and if the input is larger, then
2569 // we need to set the actual result type of the inline asm node to be the
2570 // same as the input type.
2571 if (Info.hasMatchingInput()) {
2572 unsigned InputNo;
2573 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2574 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2575 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2576 break;
2578 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2580 QualType InputTy = S.getInputExpr(InputNo)->getType();
2581 QualType OutputType = OutExpr->getType();
2583 uint64_t InputSize = getContext().getTypeSize(InputTy);
2584 if (getContext().getTypeSize(OutputType) < InputSize) {
2585 // Form the asm to return the value as a larger integer or fp type.
2586 ResultRegTypes.back() = ConvertType(InputTy);
2589 if (llvm::Type* AdjTy =
2590 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2591 ResultRegTypes.back()))
2592 ResultRegTypes.back() = AdjTy;
2593 else {
2594 CGM.getDiags().Report(S.getAsmLoc(),
2595 diag::err_asm_invalid_type_in_input)
2596 << OutExpr->getType() << OutputConstraint;
2599 // Update largest vector width for any vector types.
2600 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2601 LargestVectorWidth =
2602 std::max((uint64_t)LargestVectorWidth,
2603 VT->getPrimitiveSizeInBits().getKnownMinValue());
2604 } else {
2605 Address DestAddr = Dest.getAddress(*this);
2606 // Matrix types in memory are represented by arrays, but accessed through
2607 // vector pointers, with the alignment specified on the access operation.
2608 // For inline assembly, update pointer arguments to use vector pointers.
2609 // Otherwise there will be a mis-match if the matrix is also an
2610 // input-argument which is represented as vector.
2611 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2612 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2614 ArgTypes.push_back(DestAddr.getType());
2615 ArgElemTypes.push_back(DestAddr.getElementType());
2616 Args.push_back(DestAddr.getPointer());
2617 Constraints += "=*";
2618 Constraints += OutputConstraint;
2619 ReadOnly = ReadNone = false;
2622 if (Info.isReadWrite()) {
2623 InOutConstraints += ',';
2625 const Expr *InputExpr = S.getOutputExpr(i);
2626 llvm::Value *Arg;
2627 llvm::Type *ArgElemType;
2628 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2629 Info, Dest, InputExpr->getType(), InOutConstraints,
2630 InputExpr->getExprLoc());
2632 if (llvm::Type* AdjTy =
2633 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2634 Arg->getType()))
2635 Arg = Builder.CreateBitCast(Arg, AdjTy);
2637 // Update largest vector width for any vector types.
2638 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2639 LargestVectorWidth =
2640 std::max((uint64_t)LargestVectorWidth,
2641 VT->getPrimitiveSizeInBits().getKnownMinValue());
2642 // Only tie earlyclobber physregs.
2643 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2644 InOutConstraints += llvm::utostr(i);
2645 else
2646 InOutConstraints += OutputConstraint;
2648 InOutArgTypes.push_back(Arg->getType());
2649 InOutArgElemTypes.push_back(ArgElemType);
2650 InOutArgs.push_back(Arg);
2654 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2655 // to the return value slot. Only do this when returning in registers.
2656 if (isa<MSAsmStmt>(&S)) {
2657 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2658 if (RetAI.isDirect() || RetAI.isExtend()) {
2659 // Make a fake lvalue for the return value slot.
2660 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2661 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2662 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2663 ResultRegDests, AsmString, S.getNumOutputs());
2664 SawAsmBlock = true;
2668 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2669 const Expr *InputExpr = S.getInputExpr(i);
2671 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2673 if (Info.allowsMemory())
2674 ReadNone = false;
2676 if (!Constraints.empty())
2677 Constraints += ',';
2679 // Simplify the input constraint.
2680 std::string InputConstraint(S.getInputConstraint(i));
2681 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2682 &OutputConstraintInfos);
2684 InputConstraint = AddVariableConstraints(
2685 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2686 getTarget(), CGM, S, false /* No EarlyClobber */);
2688 std::string ReplaceConstraint (InputConstraint);
2689 llvm::Value *Arg;
2690 llvm::Type *ArgElemType;
2691 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2693 // If this input argument is tied to a larger output result, extend the
2694 // input to be the same size as the output. The LLVM backend wants to see
2695 // the input and output of a matching constraint be the same size. Note
2696 // that GCC does not define what the top bits are here. We use zext because
2697 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2698 if (Info.hasTiedOperand()) {
2699 unsigned Output = Info.getTiedOperand();
2700 QualType OutputType = S.getOutputExpr(Output)->getType();
2701 QualType InputTy = InputExpr->getType();
2703 if (getContext().getTypeSize(OutputType) >
2704 getContext().getTypeSize(InputTy)) {
2705 // Use ptrtoint as appropriate so that we can do our extension.
2706 if (isa<llvm::PointerType>(Arg->getType()))
2707 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2708 llvm::Type *OutputTy = ConvertType(OutputType);
2709 if (isa<llvm::IntegerType>(OutputTy))
2710 Arg = Builder.CreateZExt(Arg, OutputTy);
2711 else if (isa<llvm::PointerType>(OutputTy))
2712 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2713 else if (OutputTy->isFloatingPointTy())
2714 Arg = Builder.CreateFPExt(Arg, OutputTy);
2716 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2717 ReplaceConstraint = OutputConstraints[Output];
2719 if (llvm::Type* AdjTy =
2720 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2721 Arg->getType()))
2722 Arg = Builder.CreateBitCast(Arg, AdjTy);
2723 else
2724 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2725 << InputExpr->getType() << InputConstraint;
2727 // Update largest vector width for any vector types.
2728 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2729 LargestVectorWidth =
2730 std::max((uint64_t)LargestVectorWidth,
2731 VT->getPrimitiveSizeInBits().getKnownMinValue());
2733 ArgTypes.push_back(Arg->getType());
2734 ArgElemTypes.push_back(ArgElemType);
2735 Args.push_back(Arg);
2736 Constraints += InputConstraint;
2739 // Append the "input" part of inout constraints.
2740 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2741 ArgTypes.push_back(InOutArgTypes[i]);
2742 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2743 Args.push_back(InOutArgs[i]);
2745 Constraints += InOutConstraints;
2747 // Labels
2748 SmallVector<llvm::BasicBlock *, 16> Transfer;
2749 llvm::BasicBlock *Fallthrough = nullptr;
2750 bool IsGCCAsmGoto = false;
2751 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2752 IsGCCAsmGoto = GS->isAsmGoto();
2753 if (IsGCCAsmGoto) {
2754 for (const auto *E : GS->labels()) {
2755 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2756 Transfer.push_back(Dest.getBlock());
2757 if (!Constraints.empty())
2758 Constraints += ',';
2759 Constraints += "!i";
2761 Fallthrough = createBasicBlock("asm.fallthrough");
2765 bool HasUnwindClobber = false;
2767 // Clobbers
2768 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2769 StringRef Clobber = S.getClobber(i);
2771 if (Clobber == "memory")
2772 ReadOnly = ReadNone = false;
2773 else if (Clobber == "unwind") {
2774 HasUnwindClobber = true;
2775 continue;
2776 } else if (Clobber != "cc") {
2777 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2778 if (CGM.getCodeGenOpts().StackClashProtector &&
2779 getTarget().isSPRegName(Clobber)) {
2780 CGM.getDiags().Report(S.getAsmLoc(),
2781 diag::warn_stack_clash_protection_inline_asm);
2785 if (isa<MSAsmStmt>(&S)) {
2786 if (Clobber == "eax" || Clobber == "edx") {
2787 if (Constraints.find("=&A") != std::string::npos)
2788 continue;
2789 std::string::size_type position1 =
2790 Constraints.find("={" + Clobber.str() + "}");
2791 if (position1 != std::string::npos) {
2792 Constraints.insert(position1 + 1, "&");
2793 continue;
2795 std::string::size_type position2 = Constraints.find("=A");
2796 if (position2 != std::string::npos) {
2797 Constraints.insert(position2 + 1, "&");
2798 continue;
2802 if (!Constraints.empty())
2803 Constraints += ',';
2805 Constraints += "~{";
2806 Constraints += Clobber;
2807 Constraints += '}';
2810 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2811 "unwind clobber can't be used with asm goto");
2813 // Add machine specific clobbers
2814 std::string_view MachineClobbers = getTarget().getClobbers();
2815 if (!MachineClobbers.empty()) {
2816 if (!Constraints.empty())
2817 Constraints += ',';
2818 Constraints += MachineClobbers;
2821 llvm::Type *ResultType;
2822 if (ResultRegTypes.empty())
2823 ResultType = VoidTy;
2824 else if (ResultRegTypes.size() == 1)
2825 ResultType = ResultRegTypes[0];
2826 else
2827 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2829 llvm::FunctionType *FTy =
2830 llvm::FunctionType::get(ResultType, ArgTypes, false);
2832 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2834 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2835 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2836 ? llvm::InlineAsm::AD_ATT
2837 : llvm::InlineAsm::AD_Intel;
2838 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2839 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2841 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2842 FTy, AsmString, Constraints, HasSideEffect,
2843 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2844 std::vector<llvm::Value*> RegResults;
2845 llvm::CallBrInst *CBR;
2846 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2847 CBRRegResults;
2848 if (IsGCCAsmGoto) {
2849 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2850 EmitBlock(Fallthrough);
2851 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2852 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2853 *this, RegResults);
2854 // Because we are emitting code top to bottom, we don't have enough
2855 // information at this point to know precisely whether we have a critical
2856 // edge. If we have outputs, split all indirect destinations.
2857 if (!RegResults.empty()) {
2858 unsigned i = 0;
2859 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2860 llvm::Twine SynthName = Dest->getName() + ".split";
2861 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2862 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2863 Builder.SetInsertPoint(SynthBB);
2865 if (ResultRegTypes.size() == 1) {
2866 CBRRegResults[SynthBB].push_back(CBR);
2867 } else {
2868 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2869 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2870 CBRRegResults[SynthBB].push_back(Tmp);
2874 EmitBranch(Dest);
2875 EmitBlock(SynthBB);
2876 CBR->setIndirectDest(i++, SynthBB);
2879 } else if (HasUnwindClobber) {
2880 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2881 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2882 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2883 *this, RegResults);
2884 } else {
2885 llvm::CallInst *Result =
2886 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2887 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2888 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2889 *this, RegResults);
2892 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2893 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2894 ResultRegIsFlagReg);
2896 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2897 // different insertion point; one for each indirect destination and with
2898 // CBRRegResults rather than RegResults.
2899 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2900 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2901 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2902 Builder.SetInsertPoint(Succ, --(Succ->end()));
2903 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2904 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2905 ResultTypeRequiresCast, ResultRegIsFlagReg);
2910 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2911 const RecordDecl *RD = S.getCapturedRecordDecl();
2912 QualType RecordTy = getContext().getRecordType(RD);
2914 // Initialize the captured struct.
2915 LValue SlotLV =
2916 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2918 RecordDecl::field_iterator CurField = RD->field_begin();
2919 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2920 E = S.capture_init_end();
2921 I != E; ++I, ++CurField) {
2922 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2923 if (CurField->hasCapturedVLAType()) {
2924 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2925 } else {
2926 EmitInitializerForField(*CurField, LV, *I);
2930 return SlotLV;
2933 /// Generate an outlined function for the body of a CapturedStmt, store any
2934 /// captured variables into the captured struct, and call the outlined function.
2935 llvm::Function *
2936 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2937 LValue CapStruct = InitCapturedStruct(S);
2939 // Emit the CapturedDecl
2940 CodeGenFunction CGF(CGM, true);
2941 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2942 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2943 delete CGF.CapturedStmtInfo;
2945 // Emit call to the helper function.
2946 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2948 return F;
2951 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2952 LValue CapStruct = InitCapturedStruct(S);
2953 return CapStruct.getAddress(*this);
2956 /// Creates the outlined function for a CapturedStmt.
2957 llvm::Function *
2958 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2959 assert(CapturedStmtInfo &&
2960 "CapturedStmtInfo should be set when generating the captured function");
2961 const CapturedDecl *CD = S.getCapturedDecl();
2962 const RecordDecl *RD = S.getCapturedRecordDecl();
2963 SourceLocation Loc = S.getBeginLoc();
2964 assert(CD->hasBody() && "missing CapturedDecl body");
2966 // Build the argument list.
2967 ASTContext &Ctx = CGM.getContext();
2968 FunctionArgList Args;
2969 Args.append(CD->param_begin(), CD->param_end());
2971 // Create the function declaration.
2972 const CGFunctionInfo &FuncInfo =
2973 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2974 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2976 llvm::Function *F =
2977 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2978 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2979 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2980 if (CD->isNothrow())
2981 F->addFnAttr(llvm::Attribute::NoUnwind);
2983 // Generate the function.
2984 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2985 CD->getBody()->getBeginLoc());
2986 // Set the context parameter in CapturedStmtInfo.
2987 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2988 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2990 // Initialize variable-length arrays.
2991 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2992 Ctx.getTagDeclType(RD));
2993 for (auto *FD : RD->fields()) {
2994 if (FD->hasCapturedVLAType()) {
2995 auto *ExprArg =
2996 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2997 .getScalarVal();
2998 auto VAT = FD->getCapturedVLAType();
2999 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3003 // If 'this' is captured, load it into CXXThisValue.
3004 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3005 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3006 LValue ThisLValue = EmitLValueForField(Base, FD);
3007 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3010 PGO.assignRegionCounters(GlobalDecl(CD), F);
3011 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3012 FinishFunction(CD->getBodyRBrace());
3014 return F;