[lld][WebAssembly] Add `--table-base` setting
[llvm-project.git] / clang / lib / CodeGen / CGCoroutine.cpp
blob2614596312f63d08e1d397514a76b5aff0a3d8a0
1 //===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with C++ code generation of coroutines.
11 //===----------------------------------------------------------------------===//
13 #include "CGCleanup.h"
14 #include "CodeGenFunction.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "clang/AST/StmtCXX.h"
17 #include "clang/AST/StmtVisitor.h"
19 using namespace clang;
20 using namespace CodeGen;
22 using llvm::Value;
23 using llvm::BasicBlock;
25 namespace {
26 enum class AwaitKind { Init, Normal, Yield, Final };
27 static constexpr llvm::StringLiteral AwaitKindStr[] = {"init", "await", "yield",
28 "final"};
31 struct clang::CodeGen::CGCoroData {
32 // What is the current await expression kind and how many
33 // await/yield expressions were encountered so far.
34 // These are used to generate pretty labels for await expressions in LLVM IR.
35 AwaitKind CurrentAwaitKind = AwaitKind::Init;
36 unsigned AwaitNum = 0;
37 unsigned YieldNum = 0;
39 // How many co_return statements are in the coroutine. Used to decide whether
40 // we need to add co_return; equivalent at the end of the user authored body.
41 unsigned CoreturnCount = 0;
43 // A branch to this block is emitted when coroutine needs to suspend.
44 llvm::BasicBlock *SuspendBB = nullptr;
46 // The promise type's 'unhandled_exception' handler, if it defines one.
47 Stmt *ExceptionHandler = nullptr;
49 // A temporary i1 alloca that stores whether 'await_resume' threw an
50 // exception. If it did, 'true' is stored in this variable, and the coroutine
51 // body must be skipped. If the promise type does not define an exception
52 // handler, this is null.
53 llvm::Value *ResumeEHVar = nullptr;
55 // Stores the jump destination just before the coroutine memory is freed.
56 // This is the destination that every suspend point jumps to for the cleanup
57 // branch.
58 CodeGenFunction::JumpDest CleanupJD;
60 // Stores the jump destination just before the final suspend. The co_return
61 // statements jumps to this point after calling return_xxx promise member.
62 CodeGenFunction::JumpDest FinalJD;
64 // Stores the llvm.coro.id emitted in the function so that we can supply it
65 // as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
66 // Note: llvm.coro.id returns a token that cannot be directly expressed in a
67 // builtin.
68 llvm::CallInst *CoroId = nullptr;
70 // Stores the llvm.coro.begin emitted in the function so that we can replace
71 // all coro.frame intrinsics with direct SSA value of coro.begin that returns
72 // the address of the coroutine frame of the current coroutine.
73 llvm::CallInst *CoroBegin = nullptr;
75 // Stores the last emitted coro.free for the deallocate expressions, we use it
76 // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem).
77 llvm::CallInst *LastCoroFree = nullptr;
79 // If coro.id came from the builtin, remember the expression to give better
80 // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
81 // EmitCoroutineBody.
82 CallExpr const *CoroIdExpr = nullptr;
85 // Defining these here allows to keep CGCoroData private to this file.
86 clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
87 CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
89 static void createCoroData(CodeGenFunction &CGF,
90 CodeGenFunction::CGCoroInfo &CurCoro,
91 llvm::CallInst *CoroId,
92 CallExpr const *CoroIdExpr = nullptr) {
93 if (CurCoro.Data) {
94 if (CurCoro.Data->CoroIdExpr)
95 CGF.CGM.Error(CoroIdExpr->getBeginLoc(),
96 "only one __builtin_coro_id can be used in a function");
97 else if (CoroIdExpr)
98 CGF.CGM.Error(CoroIdExpr->getBeginLoc(),
99 "__builtin_coro_id shall not be used in a C++ coroutine");
100 else
101 llvm_unreachable("EmitCoroutineBodyStatement called twice?");
103 return;
106 CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData);
107 CurCoro.Data->CoroId = CoroId;
108 CurCoro.Data->CoroIdExpr = CoroIdExpr;
111 // Synthesize a pretty name for a suspend point.
112 static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
113 unsigned No = 0;
114 switch (Kind) {
115 case AwaitKind::Init:
116 case AwaitKind::Final:
117 break;
118 case AwaitKind::Normal:
119 No = ++Coro.AwaitNum;
120 break;
121 case AwaitKind::Yield:
122 No = ++Coro.YieldNum;
123 break;
125 SmallString<32> Prefix(AwaitKindStr[static_cast<unsigned>(Kind)]);
126 if (No > 1) {
127 Twine(No).toVector(Prefix);
129 return Prefix;
132 static bool memberCallExpressionCanThrow(const Expr *E) {
133 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
134 if (const auto *Proto =
135 CE->getMethodDecl()->getType()->getAs<FunctionProtoType>())
136 if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) &&
137 Proto->canThrow() == CT_Cannot)
138 return false;
139 return true;
142 /// Return true when the coroutine handle may escape from the await-suspend
143 /// (`awaiter.await_suspend(std::coroutine_handle)` expression).
144 /// Return false only when the coroutine wouldn't escape in the await-suspend
145 /// for sure.
147 /// While it is always safe to return true, return falses can bring better
148 /// performances.
150 /// See https://github.com/llvm/llvm-project/issues/56301 and
151 /// https://reviews.llvm.org/D157070 for the example and the full discussion.
153 /// FIXME: It will be much better to perform such analysis in the middle end.
154 /// See the comments in `CodeGenFunction::EmitCall` for example.
155 static bool MayCoroHandleEscape(CoroutineSuspendExpr const &S) {
156 CXXRecordDecl *Awaiter =
157 S.getCommonExpr()->getType().getNonReferenceType()->getAsCXXRecordDecl();
159 // Return true conservatively if the awaiter type is not a record type.
160 if (!Awaiter)
161 return true;
163 // In case the awaiter type is empty, the suspend wouldn't leak the coroutine
164 // handle.
166 // TODO: We can improve this by looking into the implementation of
167 // await-suspend and see if the coroutine handle is passed to foreign
168 // functions.
169 return !Awaiter->field_empty();
172 // Emit suspend expression which roughly looks like:
174 // auto && x = CommonExpr();
175 // if (!x.await_ready()) {
176 // llvm_coro_save();
177 // x.await_suspend(...); (*)
178 // llvm_coro_suspend(); (**)
179 // }
180 // x.await_resume();
182 // where the result of the entire expression is the result of x.await_resume()
184 // (*) If x.await_suspend return type is bool, it allows to veto a suspend:
185 // if (x.await_suspend(...))
186 // llvm_coro_suspend();
188 // (**) llvm_coro_suspend() encodes three possible continuations as
189 // a switch instruction:
191 // %where-to = call i8 @llvm.coro.suspend(...)
192 // switch i8 %where-to, label %coro.ret [ ; jump to epilogue to suspend
193 // i8 0, label %yield.ready ; go here when resumed
194 // i8 1, label %yield.cleanup ; go here when destroyed
195 // ]
197 // See llvm's docs/Coroutines.rst for more details.
199 namespace {
200 struct LValueOrRValue {
201 LValue LV;
202 RValue RV;
205 static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro,
206 CoroutineSuspendExpr const &S,
207 AwaitKind Kind, AggValueSlot aggSlot,
208 bool ignoreResult, bool forLValue) {
209 auto *E = S.getCommonExpr();
211 auto Binder =
212 CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E);
213 auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); });
215 auto Prefix = buildSuspendPrefixStr(Coro, Kind);
216 BasicBlock *ReadyBlock = CGF.createBasicBlock(Prefix + Twine(".ready"));
217 BasicBlock *SuspendBlock = CGF.createBasicBlock(Prefix + Twine(".suspend"));
218 BasicBlock *CleanupBlock = CGF.createBasicBlock(Prefix + Twine(".cleanup"));
220 // If expression is ready, no need to suspend.
221 CGF.EmitBranchOnBoolExpr(S.getReadyExpr(), ReadyBlock, SuspendBlock, 0);
223 // Otherwise, emit suspend logic.
224 CGF.EmitBlock(SuspendBlock);
226 auto &Builder = CGF.Builder;
227 llvm::Function *CoroSave = CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_save);
228 auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
229 auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
231 CGF.CurCoro.InSuspendBlock = true;
232 CGF.CurCoro.MayCoroHandleEscape = MayCoroHandleEscape(S);
233 auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
234 CGF.CurCoro.InSuspendBlock = false;
235 CGF.CurCoro.MayCoroHandleEscape = false;
237 if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) {
238 // Veto suspension if requested by bool returning await_suspend.
239 BasicBlock *RealSuspendBlock =
240 CGF.createBasicBlock(Prefix + Twine(".suspend.bool"));
241 CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock);
242 CGF.EmitBlock(RealSuspendBlock);
245 // Emit the suspend point.
246 const bool IsFinalSuspend = (Kind == AwaitKind::Final);
247 llvm::Function *CoroSuspend =
248 CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_suspend);
249 auto *SuspendResult = Builder.CreateCall(
250 CoroSuspend, {SaveCall, Builder.getInt1(IsFinalSuspend)});
252 // Create a switch capturing three possible continuations.
253 auto *Switch = Builder.CreateSwitch(SuspendResult, Coro.SuspendBB, 2);
254 Switch->addCase(Builder.getInt8(0), ReadyBlock);
255 Switch->addCase(Builder.getInt8(1), CleanupBlock);
257 // Emit cleanup for this suspend point.
258 CGF.EmitBlock(CleanupBlock);
259 CGF.EmitBranchThroughCleanup(Coro.CleanupJD);
261 // Emit await_resume expression.
262 CGF.EmitBlock(ReadyBlock);
264 // Exception handling requires additional IR. If the 'await_resume' function
265 // is marked as 'noexcept', we avoid generating this additional IR.
266 CXXTryStmt *TryStmt = nullptr;
267 if (Coro.ExceptionHandler && Kind == AwaitKind::Init &&
268 memberCallExpressionCanThrow(S.getResumeExpr())) {
269 Coro.ResumeEHVar =
270 CGF.CreateTempAlloca(Builder.getInt1Ty(), Prefix + Twine("resume.eh"));
271 Builder.CreateFlagStore(true, Coro.ResumeEHVar);
273 auto Loc = S.getResumeExpr()->getExprLoc();
274 auto *Catch = new (CGF.getContext())
275 CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler);
276 auto *TryBody = CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(),
277 FPOptionsOverride(), Loc, Loc);
278 TryStmt = CXXTryStmt::Create(CGF.getContext(), Loc, TryBody, Catch);
279 CGF.EnterCXXTryStmt(*TryStmt);
282 LValueOrRValue Res;
283 if (forLValue)
284 Res.LV = CGF.EmitLValue(S.getResumeExpr());
285 else
286 Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult);
288 if (TryStmt) {
289 Builder.CreateFlagStore(false, Coro.ResumeEHVar);
290 CGF.ExitCXXTryStmt(*TryStmt);
293 return Res;
296 RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E,
297 AggValueSlot aggSlot,
298 bool ignoreResult) {
299 return emitSuspendExpression(*this, *CurCoro.Data, E,
300 CurCoro.Data->CurrentAwaitKind, aggSlot,
301 ignoreResult, /*forLValue*/false).RV;
303 RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
304 AggValueSlot aggSlot,
305 bool ignoreResult) {
306 return emitSuspendExpression(*this, *CurCoro.Data, E, AwaitKind::Yield,
307 aggSlot, ignoreResult, /*forLValue*/false).RV;
310 void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
311 ++CurCoro.Data->CoreturnCount;
312 const Expr *RV = S.getOperand();
313 if (RV && RV->getType()->isVoidType() && !isa<InitListExpr>(RV)) {
314 // Make sure to evaluate the non initlist expression of a co_return
315 // with a void expression for side effects.
316 RunCleanupsScope cleanupScope(*this);
317 EmitIgnoredExpr(RV);
319 EmitStmt(S.getPromiseCall());
320 EmitBranchThroughCleanup(CurCoro.Data->FinalJD);
324 #ifndef NDEBUG
325 static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx,
326 const CoroutineSuspendExpr *E) {
327 const auto *RE = E->getResumeExpr();
328 // Is it possible for RE to be a CXXBindTemporaryExpr wrapping
329 // a MemberCallExpr?
330 assert(isa<CallExpr>(RE) && "unexpected suspend expression type");
331 return cast<CallExpr>(RE)->getCallReturnType(Ctx);
333 #endif
335 LValue
336 CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) {
337 assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
338 "Can't have a scalar return unless the return type is a "
339 "reference type!");
340 return emitSuspendExpression(*this, *CurCoro.Data, *E,
341 CurCoro.Data->CurrentAwaitKind, AggValueSlot::ignored(),
342 /*ignoreResult*/false, /*forLValue*/true).LV;
345 LValue
346 CodeGenFunction::EmitCoyieldLValue(const CoyieldExpr *E) {
347 assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
348 "Can't have a scalar return unless the return type is a "
349 "reference type!");
350 return emitSuspendExpression(*this, *CurCoro.Data, *E,
351 AwaitKind::Yield, AggValueSlot::ignored(),
352 /*ignoreResult*/false, /*forLValue*/true).LV;
355 // Hunts for the parameter reference in the parameter copy/move declaration.
356 namespace {
357 struct GetParamRef : public StmtVisitor<GetParamRef> {
358 public:
359 DeclRefExpr *Expr = nullptr;
360 GetParamRef() {}
361 void VisitDeclRefExpr(DeclRefExpr *E) {
362 assert(Expr == nullptr && "multilple declref in param move");
363 Expr = E;
365 void VisitStmt(Stmt *S) {
366 for (auto *C : S->children()) {
367 if (C)
368 Visit(C);
374 // This class replaces references to parameters to their copies by changing
375 // the addresses in CGF.LocalDeclMap and restoring back the original values in
376 // its destructor.
378 namespace {
379 struct ParamReferenceReplacerRAII {
380 CodeGenFunction::DeclMapTy SavedLocals;
381 CodeGenFunction::DeclMapTy& LocalDeclMap;
383 ParamReferenceReplacerRAII(CodeGenFunction::DeclMapTy &LocalDeclMap)
384 : LocalDeclMap(LocalDeclMap) {}
386 void addCopy(DeclStmt const *PM) {
387 // Figure out what param it refers to.
389 assert(PM->isSingleDecl());
390 VarDecl const*VD = static_cast<VarDecl const*>(PM->getSingleDecl());
391 Expr const *InitExpr = VD->getInit();
392 GetParamRef Visitor;
393 Visitor.Visit(const_cast<Expr*>(InitExpr));
394 assert(Visitor.Expr);
395 DeclRefExpr *DREOrig = Visitor.Expr;
396 auto *PD = DREOrig->getDecl();
398 auto it = LocalDeclMap.find(PD);
399 assert(it != LocalDeclMap.end() && "parameter is not found");
400 SavedLocals.insert({ PD, it->second });
402 auto copyIt = LocalDeclMap.find(VD);
403 assert(copyIt != LocalDeclMap.end() && "parameter copy is not found");
404 it->second = copyIt->getSecond();
407 ~ParamReferenceReplacerRAII() {
408 for (auto&& SavedLocal : SavedLocals) {
409 LocalDeclMap.insert({SavedLocal.first, SavedLocal.second});
415 // For WinEH exception representation backend needs to know what funclet coro.end
416 // belongs to. That information is passed in a funclet bundle.
417 static SmallVector<llvm::OperandBundleDef, 1>
418 getBundlesForCoroEnd(CodeGenFunction &CGF) {
419 SmallVector<llvm::OperandBundleDef, 1> BundleList;
421 if (llvm::Instruction *EHPad = CGF.CurrentFuncletPad)
422 BundleList.emplace_back("funclet", EHPad);
424 return BundleList;
427 namespace {
428 // We will insert coro.end to cut any of the destructors for objects that
429 // do not need to be destroyed once the coroutine is resumed.
430 // See llvm/docs/Coroutines.rst for more details about coro.end.
431 struct CallCoroEnd final : public EHScopeStack::Cleanup {
432 void Emit(CodeGenFunction &CGF, Flags flags) override {
433 auto &CGM = CGF.CGM;
434 auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
435 llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
436 // See if we have a funclet bundle to associate coro.end with. (WinEH)
437 auto Bundles = getBundlesForCoroEnd(CGF);
438 auto *CoroEnd = CGF.Builder.CreateCall(
439 CoroEndFn, {NullPtr, CGF.Builder.getTrue()}, Bundles);
440 if (Bundles.empty()) {
441 // Otherwise, (landingpad model), create a conditional branch that leads
442 // either to a cleanup block or a block with EH resume instruction.
443 auto *ResumeBB = CGF.getEHResumeBlock(/*isCleanup=*/true);
444 auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont");
445 CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB);
446 CGF.EmitBlock(CleanupContBB);
452 namespace {
453 // Make sure to call coro.delete on scope exit.
454 struct CallCoroDelete final : public EHScopeStack::Cleanup {
455 Stmt *Deallocate;
457 // Emit "if (coro.free(CoroId, CoroBegin)) Deallocate;"
459 // Note: That deallocation will be emitted twice: once for a normal exit and
460 // once for exceptional exit. This usage is safe because Deallocate does not
461 // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr()
462 // builds a single call to a deallocation function which is safe to emit
463 // multiple times.
464 void Emit(CodeGenFunction &CGF, Flags) override {
465 // Remember the current point, as we are going to emit deallocation code
466 // first to get to coro.free instruction that is an argument to a delete
467 // call.
468 BasicBlock *SaveInsertBlock = CGF.Builder.GetInsertBlock();
470 auto *FreeBB = CGF.createBasicBlock("coro.free");
471 CGF.EmitBlock(FreeBB);
472 CGF.EmitStmt(Deallocate);
474 auto *AfterFreeBB = CGF.createBasicBlock("after.coro.free");
475 CGF.EmitBlock(AfterFreeBB);
477 // We should have captured coro.free from the emission of deallocate.
478 auto *CoroFree = CGF.CurCoro.Data->LastCoroFree;
479 if (!CoroFree) {
480 CGF.CGM.Error(Deallocate->getBeginLoc(),
481 "Deallocation expressoin does not refer to coro.free");
482 return;
485 // Get back to the block we were originally and move coro.free there.
486 auto *InsertPt = SaveInsertBlock->getTerminator();
487 CoroFree->moveBefore(InsertPt);
488 CGF.Builder.SetInsertPoint(InsertPt);
490 // Add if (auto *mem = coro.free) Deallocate;
491 auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
492 auto *Cond = CGF.Builder.CreateICmpNE(CoroFree, NullPtr);
493 CGF.Builder.CreateCondBr(Cond, FreeBB, AfterFreeBB);
495 // No longer need old terminator.
496 InsertPt->eraseFromParent();
497 CGF.Builder.SetInsertPoint(AfterFreeBB);
499 explicit CallCoroDelete(Stmt *DeallocStmt) : Deallocate(DeallocStmt) {}
503 namespace {
504 struct GetReturnObjectManager {
505 CodeGenFunction &CGF;
506 CGBuilderTy &Builder;
507 const CoroutineBodyStmt &S;
508 // When true, performs RVO for the return object.
509 bool DirectEmit = false;
511 Address GroActiveFlag;
512 CodeGenFunction::AutoVarEmission GroEmission;
514 GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
515 : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
516 GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {
517 // The call to get_­return_­object is sequenced before the call to
518 // initial_­suspend and is invoked at most once, but there are caveats
519 // regarding on whether the prvalue result object may be initialized
520 // directly/eager or delayed, depending on the types involved.
522 // More info at https://github.com/cplusplus/papers/issues/1414
524 // The general cases:
525 // 1. Same type of get_return_object and coroutine return type (direct
526 // emission):
527 // - Constructed in the return slot.
528 // 2. Different types (delayed emission):
529 // - Constructed temporary object prior to initial suspend initialized with
530 // a call to get_return_object()
531 // - When coroutine needs to to return to the caller and needs to construct
532 // return value for the coroutine it is initialized with expiring value of
533 // the temporary obtained above.
535 // Direct emission for void returning coroutines or GROs.
536 DirectEmit = [&]() {
537 auto *RVI = S.getReturnValueInit();
538 assert(RVI && "expected RVI");
539 auto GroType = RVI->getType();
540 return CGF.getContext().hasSameType(GroType, CGF.FnRetTy);
541 }();
544 // The gro variable has to outlive coroutine frame and coroutine promise, but,
545 // it can only be initialized after coroutine promise was created, thus, we
546 // split its emission in two parts. EmitGroAlloca emits an alloca and sets up
547 // cleanups. Later when coroutine promise is available we initialize the gro
548 // and sets the flag that the cleanup is now active.
549 void EmitGroAlloca() {
550 if (DirectEmit)
551 return;
553 auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(S.getResultDecl());
554 if (!GroDeclStmt) {
555 // If get_return_object returns void, no need to do an alloca.
556 return;
559 auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl());
561 // Set GRO flag that it is not initialized yet
562 GroActiveFlag = CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
563 "gro.active");
564 Builder.CreateStore(Builder.getFalse(), GroActiveFlag);
566 GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl);
568 // Remember the top of EHStack before emitting the cleanup.
569 auto old_top = CGF.EHStack.stable_begin();
570 CGF.EmitAutoVarCleanups(GroEmission);
571 auto top = CGF.EHStack.stable_begin();
573 // Make the cleanup conditional on gro.active
574 for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top); b != e;
575 b++) {
576 if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) {
577 assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
578 Cleanup->setActiveFlag(GroActiveFlag);
579 Cleanup->setTestFlagInEHCleanup();
580 Cleanup->setTestFlagInNormalCleanup();
585 void EmitGroInit() {
586 if (DirectEmit) {
587 // ReturnValue should be valid as long as the coroutine's return type
588 // is not void. The assertion could help us to reduce the check later.
589 assert(CGF.ReturnValue.isValid() == (bool)S.getReturnStmt());
590 // Now we have the promise, initialize the GRO.
591 // We need to emit `get_return_object` first. According to:
592 // [dcl.fct.def.coroutine]p7
593 // The call to get_return_­object is sequenced before the call to
594 // initial_suspend and is invoked at most once.
596 // So we couldn't emit return value when we emit return statment,
597 // otherwise the call to get_return_object wouldn't be in front
598 // of initial_suspend.
599 if (CGF.ReturnValue.isValid()) {
600 CGF.EmitAnyExprToMem(S.getReturnValue(), CGF.ReturnValue,
601 S.getReturnValue()->getType().getQualifiers(),
602 /*IsInit*/ true);
604 return;
607 if (!GroActiveFlag.isValid()) {
608 // No Gro variable was allocated. Simply emit the call to
609 // get_return_object.
610 CGF.EmitStmt(S.getResultDecl());
611 return;
614 CGF.EmitAutoVarInit(GroEmission);
615 Builder.CreateStore(Builder.getTrue(), GroActiveFlag);
618 } // namespace
620 static void emitBodyAndFallthrough(CodeGenFunction &CGF,
621 const CoroutineBodyStmt &S, Stmt *Body) {
622 CGF.EmitStmt(Body);
623 const bool CanFallthrough = CGF.Builder.GetInsertBlock();
624 if (CanFallthrough)
625 if (Stmt *OnFallthrough = S.getFallthroughHandler())
626 CGF.EmitStmt(OnFallthrough);
629 void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
630 auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy());
631 auto &TI = CGM.getContext().getTargetInfo();
632 unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
634 auto *EntryBB = Builder.GetInsertBlock();
635 auto *AllocBB = createBasicBlock("coro.alloc");
636 auto *InitBB = createBasicBlock("coro.init");
637 auto *FinalBB = createBasicBlock("coro.final");
638 auto *RetBB = createBasicBlock("coro.ret");
640 auto *CoroId = Builder.CreateCall(
641 CGM.getIntrinsic(llvm::Intrinsic::coro_id),
642 {Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
643 createCoroData(*this, CurCoro, CoroId);
644 CurCoro.Data->SuspendBB = RetBB;
645 assert(ShouldEmitLifetimeMarkers &&
646 "Must emit lifetime intrinsics for coroutines");
648 // Backend is allowed to elide memory allocations, to help it, emit
649 // auto mem = coro.alloc() ? 0 : ... allocation code ...;
650 auto *CoroAlloc = Builder.CreateCall(
651 CGM.getIntrinsic(llvm::Intrinsic::coro_alloc), {CoroId});
653 Builder.CreateCondBr(CoroAlloc, AllocBB, InitBB);
655 EmitBlock(AllocBB);
656 auto *AllocateCall = EmitScalarExpr(S.getAllocate());
657 auto *AllocOrInvokeContBB = Builder.GetInsertBlock();
659 // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
660 if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) {
661 auto *RetOnFailureBB = createBasicBlock("coro.ret.on.failure");
663 // See if allocation was successful.
664 auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy);
665 auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr);
666 // Expect the allocation to be successful.
667 emitCondLikelihoodViaExpectIntrinsic(Cond, Stmt::LH_Likely);
668 Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB);
670 // If not, return OnAllocFailure object.
671 EmitBlock(RetOnFailureBB);
672 EmitStmt(RetOnAllocFailure);
674 else {
675 Builder.CreateBr(InitBB);
678 EmitBlock(InitBB);
680 // Pass the result of the allocation to coro.begin.
681 auto *Phi = Builder.CreatePHI(VoidPtrTy, 2);
682 Phi->addIncoming(NullPtr, EntryBB);
683 Phi->addIncoming(AllocateCall, AllocOrInvokeContBB);
684 auto *CoroBegin = Builder.CreateCall(
685 CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
686 CurCoro.Data->CoroBegin = CoroBegin;
688 GetReturnObjectManager GroManager(*this, S);
689 GroManager.EmitGroAlloca();
691 CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
693 CGDebugInfo *DI = getDebugInfo();
694 ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap);
695 CodeGenFunction::RunCleanupsScope ResumeScope(*this);
696 EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate());
698 // Create mapping between parameters and copy-params for coroutine function.
699 llvm::ArrayRef<const Stmt *> ParamMoves = S.getParamMoves();
700 assert(
701 (ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) &&
702 "ParamMoves and FnArgs should be the same size for coroutine function");
703 if (ParamMoves.size() == FnArgs.size() && DI)
704 for (const auto Pair : llvm::zip(FnArgs, ParamMoves))
705 DI->getCoroutineParameterMappings().insert(
706 {std::get<0>(Pair), std::get<1>(Pair)});
708 // Create parameter copies. We do it before creating a promise, since an
709 // evolution of coroutine TS may allow promise constructor to observe
710 // parameter copies.
711 for (auto *PM : S.getParamMoves()) {
712 EmitStmt(PM);
713 ParamReplacer.addCopy(cast<DeclStmt>(PM));
714 // TODO: if(CoroParam(...)) need to surround ctor and dtor
715 // for the copy, so that llvm can elide it if the copy is
716 // not needed.
719 EmitStmt(S.getPromiseDeclStmt());
721 Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl());
722 auto *PromiseAddrVoidPtr =
723 new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId);
724 // Update CoroId to refer to the promise. We could not do it earlier because
725 // promise local variable was not emitted yet.
726 CoroId->setArgOperand(1, PromiseAddrVoidPtr);
728 // Now we have the promise, initialize the GRO
729 GroManager.EmitGroInit();
731 EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
733 CurCoro.Data->CurrentAwaitKind = AwaitKind::Init;
734 CurCoro.Data->ExceptionHandler = S.getExceptionHandler();
735 EmitStmt(S.getInitSuspendStmt());
736 CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB);
738 CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal;
740 if (CurCoro.Data->ExceptionHandler) {
741 // If we generated IR to record whether an exception was thrown from
742 // 'await_resume', then use that IR to determine whether the coroutine
743 // body should be skipped.
744 // If we didn't generate the IR (perhaps because 'await_resume' was marked
745 // as 'noexcept'), then we skip this check.
746 BasicBlock *ContBB = nullptr;
747 if (CurCoro.Data->ResumeEHVar) {
748 BasicBlock *BodyBB = createBasicBlock("coro.resumed.body");
749 ContBB = createBasicBlock("coro.resumed.cont");
750 Value *SkipBody = Builder.CreateFlagLoad(CurCoro.Data->ResumeEHVar,
751 "coro.resumed.eh");
752 Builder.CreateCondBr(SkipBody, ContBB, BodyBB);
753 EmitBlock(BodyBB);
756 auto Loc = S.getBeginLoc();
757 CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr,
758 CurCoro.Data->ExceptionHandler);
759 auto *TryStmt =
760 CXXTryStmt::Create(getContext(), Loc, S.getBody(), &Catch);
762 EnterCXXTryStmt(*TryStmt);
763 emitBodyAndFallthrough(*this, S, TryStmt->getTryBlock());
764 ExitCXXTryStmt(*TryStmt);
766 if (ContBB)
767 EmitBlock(ContBB);
769 else {
770 emitBodyAndFallthrough(*this, S, S.getBody());
773 // See if we need to generate final suspend.
774 const bool CanFallthrough = Builder.GetInsertBlock();
775 const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0;
776 if (CanFallthrough || HasCoreturns) {
777 EmitBlock(FinalBB);
778 CurCoro.Data->CurrentAwaitKind = AwaitKind::Final;
779 EmitStmt(S.getFinalSuspendStmt());
780 } else {
781 // We don't need FinalBB. Emit it to make sure the block is deleted.
782 EmitBlock(FinalBB, /*IsFinished=*/true);
786 EmitBlock(RetBB);
787 // Emit coro.end before getReturnStmt (and parameter destructors), since
788 // resume and destroy parts of the coroutine should not include them.
789 llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
790 Builder.CreateCall(CoroEnd, {NullPtr, Builder.getFalse()});
792 if (Stmt *Ret = S.getReturnStmt()) {
793 // Since we already emitted the return value above, so we shouldn't
794 // emit it again here.
795 if (GroManager.DirectEmit)
796 cast<ReturnStmt>(Ret)->setRetValue(nullptr);
797 EmitStmt(Ret);
800 // LLVM require the frontend to mark the coroutine.
801 CurFn->setPresplitCoroutine();
804 // Emit coroutine intrinsic and patch up arguments of the token type.
805 RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
806 unsigned int IID) {
807 SmallVector<llvm::Value *, 8> Args;
808 switch (IID) {
809 default:
810 break;
811 // The coro.frame builtin is replaced with an SSA value of the coro.begin
812 // intrinsic.
813 case llvm::Intrinsic::coro_frame: {
814 if (CurCoro.Data && CurCoro.Data->CoroBegin) {
815 return RValue::get(CurCoro.Data->CoroBegin);
817 CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_begin "
818 "has been used earlier in this function");
819 auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy());
820 return RValue::get(NullPtr);
822 case llvm::Intrinsic::coro_size: {
823 auto &Context = getContext();
824 CanQualType SizeTy = Context.getSizeType();
825 llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
826 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_size, T);
827 return RValue::get(Builder.CreateCall(F));
829 case llvm::Intrinsic::coro_align: {
830 auto &Context = getContext();
831 CanQualType SizeTy = Context.getSizeType();
832 llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
833 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_align, T);
834 return RValue::get(Builder.CreateCall(F));
836 // The following three intrinsics take a token parameter referring to a token
837 // returned by earlier call to @llvm.coro.id. Since we cannot represent it in
838 // builtins, we patch it up here.
839 case llvm::Intrinsic::coro_alloc:
840 case llvm::Intrinsic::coro_begin:
841 case llvm::Intrinsic::coro_free: {
842 if (CurCoro.Data && CurCoro.Data->CoroId) {
843 Args.push_back(CurCoro.Data->CoroId);
844 break;
846 CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_id has"
847 " been used earlier in this function");
848 // Fallthrough to the next case to add TokenNone as the first argument.
849 [[fallthrough]];
851 // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
852 // argument.
853 case llvm::Intrinsic::coro_suspend:
854 Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
855 break;
857 for (const Expr *Arg : E->arguments())
858 Args.push_back(EmitScalarExpr(Arg));
860 llvm::Function *F = CGM.getIntrinsic(IID);
861 llvm::CallInst *Call = Builder.CreateCall(F, Args);
863 // Note: The following code is to enable to emit coro.id and coro.begin by
864 // hand to experiment with coroutines in C.
865 // If we see @llvm.coro.id remember it in the CoroData. We will update
866 // coro.alloc, coro.begin and coro.free intrinsics to refer to it.
867 if (IID == llvm::Intrinsic::coro_id) {
868 createCoroData(*this, CurCoro, Call, E);
870 else if (IID == llvm::Intrinsic::coro_begin) {
871 if (CurCoro.Data)
872 CurCoro.Data->CoroBegin = Call;
874 else if (IID == llvm::Intrinsic::coro_free) {
875 // Remember the last coro_free as we need it to build the conditional
876 // deletion of the coroutine frame.
877 if (CurCoro.Data)
878 CurCoro.Data->LastCoroFree = Call;
880 return RValue::get(Call);