1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
19 //===----------------------------------------------------------------------===//
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInternal.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/PriorityWorklist.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/Analysis/CFG.h"
32 #include "llvm/Analysis/CallGraph.h"
33 #include "llvm/Analysis/ConstantFolding.h"
34 #include "llvm/Analysis/LazyCallGraph.h"
35 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
36 #include "llvm/Analysis/TargetTransformInfo.h"
37 #include "llvm/BinaryFormat/Dwarf.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/CFG.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/Dominators.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/IRBuilder.h"
51 #include "llvm/IR/InstIterator.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/IR/Verifier.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/PrettyStackTrace.h"
64 #include "llvm/Support/TimeProfiler.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Transforms/Coroutines/ABI.h"
67 #include "llvm/Transforms/Coroutines/CoroInstr.h"
68 #include "llvm/Transforms/Coroutines/MaterializationUtils.h"
69 #include "llvm/Transforms/Scalar.h"
70 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
71 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
72 #include "llvm/Transforms/Utils/Cloning.h"
73 #include "llvm/Transforms/Utils/Local.h"
74 #include "llvm/Transforms/Utils/ValueMapper.h"
78 #include <initializer_list>
83 #define DEBUG_TYPE "coro-split"
87 /// A little helper class for building
91 /// The shared resume function for a switch lowering.
94 /// The shared unwind function for a switch lowering.
97 /// The shared cleanup function for a switch lowering.
100 /// An individual continuation function.
103 /// An async resume function.
113 ValueToValueMapTy VMap
;
115 Value
*NewFramePtr
= nullptr;
117 /// The active suspend instruction; meaningful only for continuation and async
119 AnyCoroSuspendInst
*ActiveSuspend
= nullptr;
121 TargetTransformInfo
&TTI
;
123 /// Create a cloner for a switch lowering.
124 CoroCloner(Function
&OrigF
, const Twine
&Suffix
, coro::Shape
&Shape
,
125 Kind FKind
, TargetTransformInfo
&TTI
)
126 : OrigF(OrigF
), NewF(nullptr), Suffix(Suffix
), Shape(Shape
), FKind(FKind
),
127 Builder(OrigF
.getContext()), TTI(TTI
) {
128 assert(Shape
.ABI
== coro::ABI::Switch
);
131 /// Create a cloner for a continuation lowering.
132 CoroCloner(Function
&OrigF
, const Twine
&Suffix
, coro::Shape
&Shape
,
133 Function
*NewF
, AnyCoroSuspendInst
*ActiveSuspend
,
134 TargetTransformInfo
&TTI
)
135 : OrigF(OrigF
), NewF(NewF
), Suffix(Suffix
), Shape(Shape
),
136 FKind(Shape
.ABI
== coro::ABI::Async
? Kind::Async
: Kind::Continuation
),
137 Builder(OrigF
.getContext()), ActiveSuspend(ActiveSuspend
), TTI(TTI
) {
138 assert(Shape
.ABI
== coro::ABI::Retcon
||
139 Shape
.ABI
== coro::ABI::RetconOnce
|| Shape
.ABI
== coro::ABI::Async
);
140 assert(NewF
&& "need existing function for continuation");
141 assert(ActiveSuspend
&& "need active suspend point for continuation");
145 /// Create a clone for a switch lowering.
146 static Function
*createClone(Function
&OrigF
, const Twine
&Suffix
,
147 coro::Shape
&Shape
, Kind FKind
,
148 TargetTransformInfo
&TTI
) {
149 TimeTraceScope
FunctionScope("CoroCloner");
151 CoroCloner
Cloner(OrigF
, Suffix
, Shape
, FKind
, TTI
);
153 return Cloner
.getFunction();
156 /// Create a clone for a continuation lowering.
157 static Function
*createClone(Function
&OrigF
, const Twine
&Suffix
,
158 coro::Shape
&Shape
, Function
*NewF
,
159 AnyCoroSuspendInst
*ActiveSuspend
,
160 TargetTransformInfo
&TTI
) {
161 TimeTraceScope
FunctionScope("CoroCloner");
163 CoroCloner
Cloner(OrigF
, Suffix
, Shape
, NewF
, ActiveSuspend
, TTI
);
165 return Cloner
.getFunction();
168 Function
*getFunction() const {
169 assert(NewF
!= nullptr && "declaration not yet set");
176 bool isSwitchDestroyFunction() {
179 case Kind::Continuation
:
180 case Kind::SwitchResume
:
182 case Kind::SwitchUnwind
:
183 case Kind::SwitchCleanup
:
186 llvm_unreachable("Unknown CoroCloner::Kind enum");
189 void replaceEntryBlock();
190 Value
*deriveNewFramePointer();
191 void replaceRetconOrAsyncSuspendUses();
192 void replaceCoroSuspends();
193 void replaceCoroEnds();
194 void replaceSwiftErrorOps();
195 void salvageDebugInfo();
196 void handleFinalSuspend();
199 } // end anonymous namespace
202 // Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape
203 // and it is known that other transformations, for example, sanitizers
204 // won't lead to incorrect code.
205 static void lowerAwaitSuspend(IRBuilder
<> &Builder
, CoroAwaitSuspendInst
*CB
,
206 coro::Shape
&Shape
) {
207 auto Wrapper
= CB
->getWrapperFunction();
208 auto Awaiter
= CB
->getAwaiter();
209 auto FramePtr
= CB
->getFrame();
211 Builder
.SetInsertPoint(CB
);
213 CallBase
*NewCall
= nullptr;
214 // await_suspend has only 2 parameters, awaiter and handle.
215 // Copy parameter attributes from the intrinsic call, but remove the last,
216 // because the last parameter now becomes the function that is being called.
217 AttributeList NewAttributes
=
218 CB
->getAttributes().removeParamAttributes(CB
->getContext(), 2);
220 if (auto Invoke
= dyn_cast
<InvokeInst
>(CB
)) {
222 Builder
.CreateInvoke(Wrapper
, Invoke
->getNormalDest(),
223 Invoke
->getUnwindDest(), {Awaiter
, FramePtr
});
225 WrapperInvoke
->setCallingConv(Invoke
->getCallingConv());
226 std::copy(Invoke
->bundle_op_info_begin(), Invoke
->bundle_op_info_end(),
227 WrapperInvoke
->bundle_op_info_begin());
228 WrapperInvoke
->setAttributes(NewAttributes
);
229 WrapperInvoke
->setDebugLoc(Invoke
->getDebugLoc());
230 NewCall
= WrapperInvoke
;
231 } else if (auto Call
= dyn_cast
<CallInst
>(CB
)) {
232 auto WrapperCall
= Builder
.CreateCall(Wrapper
, {Awaiter
, FramePtr
});
234 WrapperCall
->setAttributes(NewAttributes
);
235 WrapperCall
->setDebugLoc(Call
->getDebugLoc());
236 NewCall
= WrapperCall
;
238 llvm_unreachable("Unexpected coro_await_suspend invocation method");
241 if (CB
->getCalledFunction()->getIntrinsicID() ==
242 Intrinsic::coro_await_suspend_handle
) {
243 // Follow the lowered await_suspend call above with a lowered resume call
244 // to the returned coroutine.
245 if (auto *Invoke
= dyn_cast
<InvokeInst
>(CB
)) {
246 // If the await_suspend call is an invoke, we continue in the next block.
247 Builder
.SetInsertPoint(Invoke
->getNormalDest()->getFirstInsertionPt());
250 coro::LowererBase
LB(*Wrapper
->getParent());
251 auto *ResumeAddr
= LB
.makeSubFnCall(NewCall
, CoroSubFnInst::ResumeIndex
,
252 &*Builder
.GetInsertPoint());
254 LLVMContext
&Ctx
= Builder
.getContext();
255 FunctionType
*ResumeTy
= FunctionType::get(
256 Type::getVoidTy(Ctx
), PointerType::getUnqual(Ctx
), false);
257 auto *ResumeCall
= Builder
.CreateCall(ResumeTy
, ResumeAddr
, {NewCall
});
258 ResumeCall
->setCallingConv(CallingConv::Fast
);
260 // We can't insert the 'ret' instruction and adjust the cc until the
261 // function has been split, so remember this for later.
262 Shape
.SymmetricTransfers
.push_back(ResumeCall
);
264 NewCall
= ResumeCall
;
267 CB
->replaceAllUsesWith(NewCall
);
268 CB
->eraseFromParent();
271 static void lowerAwaitSuspends(Function
&F
, coro::Shape
&Shape
) {
272 IRBuilder
<> Builder(F
.getContext());
273 for (auto *AWS
: Shape
.CoroAwaitSuspends
)
274 lowerAwaitSuspend(Builder
, AWS
, Shape
);
277 static void maybeFreeRetconStorage(IRBuilder
<> &Builder
,
278 const coro::Shape
&Shape
, Value
*FramePtr
,
280 assert(Shape
.ABI
== coro::ABI::Retcon
|| Shape
.ABI
== coro::ABI::RetconOnce
);
281 if (Shape
.RetconLowering
.IsFrameInlineInStorage
)
284 Shape
.emitDealloc(Builder
, FramePtr
, CG
);
287 /// Replace an llvm.coro.end.async.
288 /// Will inline the must tail call function call if there is one.
289 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
290 static bool replaceCoroEndAsync(AnyCoroEndInst
*End
) {
291 IRBuilder
<> Builder(End
);
293 auto *EndAsync
= dyn_cast
<CoroAsyncEndInst
>(End
);
295 Builder
.CreateRetVoid();
296 return true /*needs cleanup of coro.end block*/;
299 auto *MustTailCallFunc
= EndAsync
->getMustTailCallFunction();
300 if (!MustTailCallFunc
) {
301 Builder
.CreateRetVoid();
302 return true /*needs cleanup of coro.end block*/;
305 // Move the must tail call from the predecessor block into the end block.
306 auto *CoroEndBlock
= End
->getParent();
307 auto *MustTailCallFuncBlock
= CoroEndBlock
->getSinglePredecessor();
308 assert(MustTailCallFuncBlock
&& "Must have a single predecessor block");
309 auto It
= MustTailCallFuncBlock
->getTerminator()->getIterator();
310 auto *MustTailCall
= cast
<CallInst
>(&*std::prev(It
));
311 CoroEndBlock
->splice(End
->getIterator(), MustTailCallFuncBlock
,
312 MustTailCall
->getIterator());
314 // Insert the return instruction.
315 Builder
.SetInsertPoint(End
);
316 Builder
.CreateRetVoid();
317 InlineFunctionInfo FnInfo
;
319 // Remove the rest of the block, by splitting it into an unreachable block.
320 auto *BB
= End
->getParent();
321 BB
->splitBasicBlock(End
);
322 BB
->getTerminator()->eraseFromParent();
324 auto InlineRes
= InlineFunction(*MustTailCall
, FnInfo
);
325 assert(InlineRes
.isSuccess() && "Expected inlining to succeed");
328 // We have cleaned up the coro.end block above.
332 /// Replace a non-unwind call to llvm.coro.end.
333 static void replaceFallthroughCoroEnd(AnyCoroEndInst
*End
,
334 const coro::Shape
&Shape
, Value
*FramePtr
,
335 bool InResume
, CallGraph
*CG
) {
336 // Start inserting right before the coro.end.
337 IRBuilder
<> Builder(End
);
339 // Create the return instruction.
341 // The cloned functions in switch-lowering always return void.
342 case coro::ABI::Switch
:
343 assert(!cast
<CoroEndInst
>(End
)->hasResults() &&
344 "switch coroutine should not return any values");
345 // coro.end doesn't immediately end the coroutine in the main function
346 // in this lowering, because we need to deallocate the coroutine.
349 Builder
.CreateRetVoid();
352 // In async lowering this returns.
353 case coro::ABI::Async
: {
354 bool CoroEndBlockNeedsCleanup
= replaceCoroEndAsync(End
);
355 if (!CoroEndBlockNeedsCleanup
)
360 // In unique continuation lowering, the continuations always return void.
361 // But we may have implicitly allocated storage.
362 case coro::ABI::RetconOnce
: {
363 maybeFreeRetconStorage(Builder
, Shape
, FramePtr
, CG
);
364 auto *CoroEnd
= cast
<CoroEndInst
>(End
);
365 auto *RetTy
= Shape
.getResumeFunctionType()->getReturnType();
367 if (!CoroEnd
->hasResults()) {
368 assert(RetTy
->isVoidTy());
369 Builder
.CreateRetVoid();
373 auto *CoroResults
= CoroEnd
->getResults();
374 unsigned NumReturns
= CoroResults
->numReturns();
376 if (auto *RetStructTy
= dyn_cast
<StructType
>(RetTy
)) {
377 assert(RetStructTy
->getNumElements() == NumReturns
&&
378 "numbers of returns should match resume function singature");
379 Value
*ReturnValue
= PoisonValue::get(RetStructTy
);
381 for (Value
*RetValEl
: CoroResults
->return_values())
382 ReturnValue
= Builder
.CreateInsertValue(ReturnValue
, RetValEl
, Idx
++);
383 Builder
.CreateRet(ReturnValue
);
384 } else if (NumReturns
== 0) {
385 assert(RetTy
->isVoidTy());
386 Builder
.CreateRetVoid();
388 assert(NumReturns
== 1);
389 Builder
.CreateRet(*CoroResults
->retval_begin());
391 CoroResults
->replaceAllUsesWith(
392 ConstantTokenNone::get(CoroResults
->getContext()));
393 CoroResults
->eraseFromParent();
397 // In non-unique continuation lowering, we signal completion by returning
398 // a null continuation.
399 case coro::ABI::Retcon
: {
400 assert(!cast
<CoroEndInst
>(End
)->hasResults() &&
401 "retcon coroutine should not return any values");
402 maybeFreeRetconStorage(Builder
, Shape
, FramePtr
, CG
);
403 auto RetTy
= Shape
.getResumeFunctionType()->getReturnType();
404 auto RetStructTy
= dyn_cast
<StructType
>(RetTy
);
405 PointerType
*ContinuationTy
=
406 cast
<PointerType
>(RetStructTy
? RetStructTy
->getElementType(0) : RetTy
);
408 Value
*ReturnValue
= ConstantPointerNull::get(ContinuationTy
);
410 ReturnValue
= Builder
.CreateInsertValue(PoisonValue::get(RetStructTy
),
413 Builder
.CreateRet(ReturnValue
);
418 // Remove the rest of the block, by splitting it into an unreachable block.
419 auto *BB
= End
->getParent();
420 BB
->splitBasicBlock(End
);
421 BB
->getTerminator()->eraseFromParent();
424 // Mark a coroutine as done, which implies that the coroutine is finished and
425 // never get resumed.
427 // In resume-switched ABI, the done state is represented by storing zero in
430 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
431 // pointer to the frame in splitted function is not stored in `Shape`.
432 static void markCoroutineAsDone(IRBuilder
<> &Builder
, const coro::Shape
&Shape
,
435 Shape
.ABI
== coro::ABI::Switch
&&
436 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
437 auto *GepIndex
= Builder
.CreateStructGEP(
438 Shape
.FrameTy
, FramePtr
, coro::Shape::SwitchFieldIndex::Resume
,
440 auto *NullPtr
= ConstantPointerNull::get(cast
<PointerType
>(
441 Shape
.FrameTy
->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume
)));
442 Builder
.CreateStore(NullPtr
, GepIndex
);
444 // If the coroutine don't have unwind coro end, we could omit the store to
445 // the final suspend point since we could infer the coroutine is suspended
446 // at the final suspend point by the nullness of ResumeFnAddr.
447 // However, we can't skip it if the coroutine have unwind coro end. Since
448 // the coroutine reaches unwind coro end is considered suspended at the
449 // final suspend point (the ResumeFnAddr is null) but in fact the coroutine
450 // didn't complete yet. We need the IndexVal for the final suspend point
451 // to make the states clear.
452 if (Shape
.SwitchLowering
.HasUnwindCoroEnd
&&
453 Shape
.SwitchLowering
.HasFinalSuspend
) {
454 assert(cast
<CoroSuspendInst
>(Shape
.CoroSuspends
.back())->isFinal() &&
455 "The final suspend should only live in the last position of "
457 ConstantInt
*IndexVal
= Shape
.getIndex(Shape
.CoroSuspends
.size() - 1);
458 auto *FinalIndex
= Builder
.CreateStructGEP(
459 Shape
.FrameTy
, FramePtr
, Shape
.getSwitchIndexField(), "index.addr");
461 Builder
.CreateStore(IndexVal
, FinalIndex
);
465 /// Replace an unwind call to llvm.coro.end.
466 static void replaceUnwindCoroEnd(AnyCoroEndInst
*End
, const coro::Shape
&Shape
,
467 Value
*FramePtr
, bool InResume
,
469 IRBuilder
<> Builder(End
);
472 // In switch-lowering, this does nothing in the main function.
473 case coro::ABI::Switch
: {
474 // In C++'s specification, the coroutine should be marked as done
475 // if promise.unhandled_exception() throws. The frontend will
476 // call coro.end(true) along this path.
478 // FIXME: We should refactor this once there is other language
479 // which uses Switch-Resumed style other than C++.
480 markCoroutineAsDone(Builder
, Shape
, FramePtr
);
485 // In async lowering this does nothing.
486 case coro::ABI::Async
:
488 // In continuation-lowering, this frees the continuation storage.
489 case coro::ABI::Retcon
:
490 case coro::ABI::RetconOnce
:
491 maybeFreeRetconStorage(Builder
, Shape
, FramePtr
, CG
);
495 // If coro.end has an associated bundle, add cleanupret instruction.
496 if (auto Bundle
= End
->getOperandBundle(LLVMContext::OB_funclet
)) {
497 auto *FromPad
= cast
<CleanupPadInst
>(Bundle
->Inputs
[0]);
498 auto *CleanupRet
= Builder
.CreateCleanupRet(FromPad
, nullptr);
499 End
->getParent()->splitBasicBlock(End
);
500 CleanupRet
->getParent()->getTerminator()->eraseFromParent();
504 static void replaceCoroEnd(AnyCoroEndInst
*End
, const coro::Shape
&Shape
,
505 Value
*FramePtr
, bool InResume
, CallGraph
*CG
) {
507 replaceUnwindCoroEnd(End
, Shape
, FramePtr
, InResume
, CG
);
509 replaceFallthroughCoroEnd(End
, Shape
, FramePtr
, InResume
, CG
);
511 auto &Context
= End
->getContext();
512 End
->replaceAllUsesWith(InResume
? ConstantInt::getTrue(Context
)
513 : ConstantInt::getFalse(Context
));
514 End
->eraseFromParent();
517 // In the resume function, we remove the last case (when coro::Shape is built,
518 // the final suspend point (if present) is always the last element of
519 // CoroSuspends array) since it is an undefined behavior to resume a coroutine
520 // suspended at the final suspend point.
521 // In the destroy function, if it isn't possible that the ResumeFnAddr is NULL
522 // and the coroutine doesn't suspend at the final suspend point actually (this
523 // is possible since the coroutine is considered suspended at the final suspend
524 // point if promise.unhandled_exception() exits via an exception), we can
525 // remove the last case.
526 void CoroCloner::handleFinalSuspend() {
527 assert(Shape
.ABI
== coro::ABI::Switch
&&
528 Shape
.SwitchLowering
.HasFinalSuspend
);
530 if (isSwitchDestroyFunction() && Shape
.SwitchLowering
.HasUnwindCoroEnd
)
533 auto *Switch
= cast
<SwitchInst
>(VMap
[Shape
.SwitchLowering
.ResumeSwitch
]);
534 auto FinalCaseIt
= std::prev(Switch
->case_end());
535 BasicBlock
*ResumeBB
= FinalCaseIt
->getCaseSuccessor();
536 Switch
->removeCase(FinalCaseIt
);
537 if (isSwitchDestroyFunction()) {
538 BasicBlock
*OldSwitchBB
= Switch
->getParent();
539 auto *NewSwitchBB
= OldSwitchBB
->splitBasicBlock(Switch
, "Switch");
540 Builder
.SetInsertPoint(OldSwitchBB
->getTerminator());
542 if (NewF
->isCoroOnlyDestroyWhenComplete()) {
543 // When the coroutine can only be destroyed when complete, we don't need
544 // to generate code for other cases.
545 Builder
.CreateBr(ResumeBB
);
547 auto *GepIndex
= Builder
.CreateStructGEP(
548 Shape
.FrameTy
, NewFramePtr
, coro::Shape::SwitchFieldIndex::Resume
,
551 Builder
.CreateLoad(Shape
.getSwitchResumePointerType(), GepIndex
);
552 auto *Cond
= Builder
.CreateIsNull(Load
);
553 Builder
.CreateCondBr(Cond
, ResumeBB
, NewSwitchBB
);
555 OldSwitchBB
->getTerminator()->eraseFromParent();
559 static FunctionType
*
560 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst
*Suspend
) {
561 auto *AsyncSuspend
= cast
<CoroSuspendAsyncInst
>(Suspend
);
562 auto *StructTy
= cast
<StructType
>(AsyncSuspend
->getType());
563 auto &Context
= Suspend
->getParent()->getParent()->getContext();
564 auto *VoidTy
= Type::getVoidTy(Context
);
565 return FunctionType::get(VoidTy
, StructTy
->elements(), false);
568 static Function
*createCloneDeclaration(Function
&OrigF
, coro::Shape
&Shape
,
570 Module::iterator InsertBefore
,
571 AnyCoroSuspendInst
*ActiveSuspend
) {
572 Module
*M
= OrigF
.getParent();
573 auto *FnTy
= (Shape
.ABI
!= coro::ABI::Async
)
574 ? Shape
.getResumeFunctionType()
575 : getFunctionTypeFromAsyncSuspend(ActiveSuspend
);
578 Function::Create(FnTy
, GlobalValue::LinkageTypes::InternalLinkage
,
579 OrigF
.getName() + Suffix
);
581 M
->getFunctionList().insert(InsertBefore
, NewF
);
586 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
587 /// arguments to the continuation function.
589 /// This assumes that the builder has a meaningful insertion point.
590 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
591 assert(Shape
.ABI
== coro::ABI::Retcon
|| Shape
.ABI
== coro::ABI::RetconOnce
||
592 Shape
.ABI
== coro::ABI::Async
);
594 auto NewS
= VMap
[ActiveSuspend
];
595 if (NewS
->use_empty())
598 // Copy out all the continuation arguments after the buffer pointer into
599 // an easily-indexed data structure for convenience.
600 SmallVector
<Value
*, 8> Args
;
601 // The async ABI includes all arguments -- including the first argument.
602 bool IsAsyncABI
= Shape
.ABI
== coro::ABI::Async
;
603 for (auto I
= IsAsyncABI
? NewF
->arg_begin() : std::next(NewF
->arg_begin()),
608 // If the suspend returns a single scalar value, we can just do a simple
610 if (!isa
<StructType
>(NewS
->getType())) {
611 assert(Args
.size() == 1);
612 NewS
->replaceAllUsesWith(Args
.front());
616 // Try to peephole extracts of an aggregate return.
617 for (Use
&U
: llvm::make_early_inc_range(NewS
->uses())) {
618 auto *EVI
= dyn_cast
<ExtractValueInst
>(U
.getUser());
619 if (!EVI
|| EVI
->getNumIndices() != 1)
622 EVI
->replaceAllUsesWith(Args
[EVI
->getIndices().front()]);
623 EVI
->eraseFromParent();
626 // If we have no remaining uses, we're done.
627 if (NewS
->use_empty())
630 // Otherwise, we need to create an aggregate.
631 Value
*Aggr
= PoisonValue::get(NewS
->getType());
632 for (auto [Idx
, Arg
] : llvm::enumerate(Args
))
633 Aggr
= Builder
.CreateInsertValue(Aggr
, Arg
, Idx
);
635 NewS
->replaceAllUsesWith(Aggr
);
638 void CoroCloner::replaceCoroSuspends() {
639 Value
*SuspendResult
;
642 // In switch lowering, replace coro.suspend with the appropriate value
643 // for the type of function we're extracting.
644 // Replacing coro.suspend with (0) will result in control flow proceeding to
645 // a resume label associated with a suspend point, replacing it with (1) will
646 // result in control flow proceeding to a cleanup label associated with this
648 case coro::ABI::Switch
:
649 SuspendResult
= Builder
.getInt8(isSwitchDestroyFunction() ? 1 : 0);
652 // In async lowering there are no uses of the result.
653 case coro::ABI::Async
:
656 // In returned-continuation lowering, the arguments from earlier
657 // continuations are theoretically arbitrary, and they should have been
659 case coro::ABI::RetconOnce
:
660 case coro::ABI::Retcon
:
664 for (AnyCoroSuspendInst
*CS
: Shape
.CoroSuspends
) {
665 // The active suspend was handled earlier.
666 if (CS
== ActiveSuspend
)
669 auto *MappedCS
= cast
<AnyCoroSuspendInst
>(VMap
[CS
]);
670 MappedCS
->replaceAllUsesWith(SuspendResult
);
671 MappedCS
->eraseFromParent();
675 void CoroCloner::replaceCoroEnds() {
676 for (AnyCoroEndInst
*CE
: Shape
.CoroEnds
) {
677 // We use a null call graph because there's no call graph node for
678 // the cloned function yet. We'll just be rebuilding that later.
679 auto *NewCE
= cast
<AnyCoroEndInst
>(VMap
[CE
]);
680 replaceCoroEnd(NewCE
, Shape
, NewFramePtr
, /*in resume*/ true, nullptr);
684 static void replaceSwiftErrorOps(Function
&F
, coro::Shape
&Shape
,
685 ValueToValueMapTy
*VMap
) {
686 if (Shape
.ABI
== coro::ABI::Async
&& Shape
.CoroSuspends
.empty())
688 Value
*CachedSlot
= nullptr;
689 auto getSwiftErrorSlot
= [&](Type
*ValueTy
) -> Value
* {
693 // Check if the function has a swifterror argument.
694 for (auto &Arg
: F
.args()) {
695 if (Arg
.isSwiftError()) {
701 // Create a swifterror alloca.
702 IRBuilder
<> Builder(F
.getEntryBlock().getFirstNonPHIOrDbg());
703 auto Alloca
= Builder
.CreateAlloca(ValueTy
);
704 Alloca
->setSwiftError(true);
710 for (CallInst
*Op
: Shape
.SwiftErrorOps
) {
711 auto MappedOp
= VMap
? cast
<CallInst
>((*VMap
)[Op
]) : Op
;
712 IRBuilder
<> Builder(MappedOp
);
714 // If there are no arguments, this is a 'get' operation.
716 if (Op
->arg_empty()) {
717 auto ValueTy
= Op
->getType();
718 auto Slot
= getSwiftErrorSlot(ValueTy
);
719 MappedResult
= Builder
.CreateLoad(ValueTy
, Slot
);
721 assert(Op
->arg_size() == 1);
722 auto Value
= MappedOp
->getArgOperand(0);
723 auto ValueTy
= Value
->getType();
724 auto Slot
= getSwiftErrorSlot(ValueTy
);
725 Builder
.CreateStore(Value
, Slot
);
729 MappedOp
->replaceAllUsesWith(MappedResult
);
730 MappedOp
->eraseFromParent();
733 // If we're updating the original function, we've invalidated SwiftErrorOps.
734 if (VMap
== nullptr) {
735 Shape
.SwiftErrorOps
.clear();
739 /// Returns all DbgVariableIntrinsic in F.
740 static std::pair
<SmallVector
<DbgVariableIntrinsic
*, 8>,
741 SmallVector
<DbgVariableRecord
*>>
742 collectDbgVariableIntrinsics(Function
&F
) {
743 SmallVector
<DbgVariableIntrinsic
*, 8> Intrinsics
;
744 SmallVector
<DbgVariableRecord
*> DbgVariableRecords
;
745 for (auto &I
: instructions(F
)) {
746 for (DbgVariableRecord
&DVR
: filterDbgVars(I
.getDbgRecordRange()))
747 DbgVariableRecords
.push_back(&DVR
);
748 if (auto *DVI
= dyn_cast
<DbgVariableIntrinsic
>(&I
))
749 Intrinsics
.push_back(DVI
);
751 return {Intrinsics
, DbgVariableRecords
};
754 void CoroCloner::replaceSwiftErrorOps() {
755 ::replaceSwiftErrorOps(*NewF
, Shape
, &VMap
);
758 void CoroCloner::salvageDebugInfo() {
759 auto [Worklist
, DbgVariableRecords
] = collectDbgVariableIntrinsics(*NewF
);
760 SmallDenseMap
<Argument
*, AllocaInst
*, 4> ArgToAllocaMap
;
762 // Only 64-bit ABIs have a register we can refer to with the entry value.
764 llvm::Triple(OrigF
.getParent()->getTargetTriple()).isArch64Bit();
765 for (DbgVariableIntrinsic
*DVI
: Worklist
)
766 coro::salvageDebugInfo(ArgToAllocaMap
, *DVI
, UseEntryValue
);
767 for (DbgVariableRecord
*DVR
: DbgVariableRecords
)
768 coro::salvageDebugInfo(ArgToAllocaMap
, *DVR
, UseEntryValue
);
770 // Remove all salvaged dbg.declare intrinsics that became
771 // either unreachable or stale due to the CoroSplit transformation.
772 DominatorTree
DomTree(*NewF
);
773 auto IsUnreachableBlock
= [&](BasicBlock
*BB
) {
774 return !isPotentiallyReachable(&NewF
->getEntryBlock(), BB
, nullptr,
777 auto RemoveOne
= [&](auto *DVI
) {
778 if (IsUnreachableBlock(DVI
->getParent()))
779 DVI
->eraseFromParent();
780 else if (isa_and_nonnull
<AllocaInst
>(DVI
->getVariableLocationOp(0))) {
781 // Count all non-debuginfo uses in reachable blocks.
783 for (auto *User
: DVI
->getVariableLocationOp(0)->users())
784 if (auto *I
= dyn_cast
<Instruction
>(User
))
785 if (!isa
<AllocaInst
>(I
) && !IsUnreachableBlock(I
->getParent()))
788 DVI
->eraseFromParent();
791 for_each(Worklist
, RemoveOne
);
792 for_each(DbgVariableRecords
, RemoveOne
);
795 void CoroCloner::replaceEntryBlock() {
796 // In the original function, the AllocaSpillBlock is a block immediately
797 // following the allocation of the frame object which defines GEPs for
798 // all the allocas that have been moved into the frame, and it ends by
799 // branching to the original beginning of the coroutine. Make this
800 // the entry block of the cloned function.
801 auto *Entry
= cast
<BasicBlock
>(VMap
[Shape
.AllocaSpillBlock
]);
802 auto *OldEntry
= &NewF
->getEntryBlock();
803 Entry
->setName("entry" + Suffix
);
804 Entry
->moveBefore(OldEntry
);
805 Entry
->getTerminator()->eraseFromParent();
807 // Clear all predecessors of the new entry block. There should be
808 // exactly one predecessor, which we created when splitting out
809 // AllocaSpillBlock to begin with.
810 assert(Entry
->hasOneUse());
811 auto BranchToEntry
= cast
<BranchInst
>(Entry
->user_back());
812 assert(BranchToEntry
->isUnconditional());
813 Builder
.SetInsertPoint(BranchToEntry
);
814 Builder
.CreateUnreachable();
815 BranchToEntry
->eraseFromParent();
817 // Branch from the entry to the appropriate place.
818 Builder
.SetInsertPoint(Entry
);
820 case coro::ABI::Switch
: {
821 // In switch-lowering, we built a resume-entry block in the original
822 // function. Make the entry block branch to this.
824 cast
<BasicBlock
>(VMap
[Shape
.SwitchLowering
.ResumeEntryBlock
]);
825 Builder
.CreateBr(SwitchBB
);
828 case coro::ABI::Async
:
829 case coro::ABI::Retcon
:
830 case coro::ABI::RetconOnce
: {
831 // In continuation ABIs, we want to branch to immediately after the
832 // active suspend point. Earlier phases will have put the suspend in its
833 // own basic block, so just thread our jump directly to its successor.
834 assert((Shape
.ABI
== coro::ABI::Async
&&
835 isa
<CoroSuspendAsyncInst
>(ActiveSuspend
)) ||
836 ((Shape
.ABI
== coro::ABI::Retcon
||
837 Shape
.ABI
== coro::ABI::RetconOnce
) &&
838 isa
<CoroSuspendRetconInst
>(ActiveSuspend
)));
839 auto *MappedCS
= cast
<AnyCoroSuspendInst
>(VMap
[ActiveSuspend
]);
840 auto Branch
= cast
<BranchInst
>(MappedCS
->getNextNode());
841 assert(Branch
->isUnconditional());
842 Builder
.CreateBr(Branch
->getSuccessor(0));
847 // Any static alloca that's still being used but not reachable from the new
848 // entry needs to be moved to the new entry.
849 Function
*F
= OldEntry
->getParent();
850 DominatorTree DT
{*F
};
851 for (Instruction
&I
: llvm::make_early_inc_range(instructions(F
))) {
852 auto *Alloca
= dyn_cast
<AllocaInst
>(&I
);
853 if (!Alloca
|| I
.use_empty())
855 if (DT
.isReachableFromEntry(I
.getParent()) ||
856 !isa
<ConstantInt
>(Alloca
->getArraySize()))
858 I
.moveBefore(*Entry
, Entry
->getFirstInsertionPt());
862 /// Derive the value of the new frame pointer.
863 Value
*CoroCloner::deriveNewFramePointer() {
864 // Builder should be inserting to the front of the new entry block.
867 // In switch-lowering, the argument is the frame pointer.
868 case coro::ABI::Switch
:
869 return &*NewF
->arg_begin();
870 // In async-lowering, one of the arguments is an async context as determined
871 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
872 // the resume function from the async context projection function associated
873 // with the active suspend. The frame is located as a tail to the async
875 case coro::ABI::Async
: {
876 auto *ActiveAsyncSuspend
= cast
<CoroSuspendAsyncInst
>(ActiveSuspend
);
877 auto ContextIdx
= ActiveAsyncSuspend
->getStorageArgumentIndex() & 0xff;
878 auto *CalleeContext
= NewF
->getArg(ContextIdx
);
879 auto *ProjectionFunc
=
880 ActiveAsyncSuspend
->getAsyncContextProjectionFunction();
882 cast
<CoroSuspendAsyncInst
>(VMap
[ActiveSuspend
])->getDebugLoc();
884 auto *CallerContext
= Builder
.CreateCall(ProjectionFunc
->getFunctionType(),
885 ProjectionFunc
, CalleeContext
);
886 CallerContext
->setCallingConv(ProjectionFunc
->getCallingConv());
887 CallerContext
->setDebugLoc(DbgLoc
);
888 // The frame is located after the async_context header.
889 auto &Context
= Builder
.getContext();
890 auto *FramePtrAddr
= Builder
.CreateConstInBoundsGEP1_32(
891 Type::getInt8Ty(Context
), CallerContext
,
892 Shape
.AsyncLowering
.FrameOffset
, "async.ctx.frameptr");
893 // Inline the projection function.
894 InlineFunctionInfo InlineInfo
;
895 auto InlineRes
= InlineFunction(*CallerContext
, InlineInfo
);
896 assert(InlineRes
.isSuccess());
900 // In continuation-lowering, the argument is the opaque storage.
901 case coro::ABI::Retcon
:
902 case coro::ABI::RetconOnce
: {
903 Argument
*NewStorage
= &*NewF
->arg_begin();
904 auto FramePtrTy
= PointerType::getUnqual(Shape
.FrameTy
->getContext());
906 // If the storage is inline, just bitcast to the storage to the frame type.
907 if (Shape
.RetconLowering
.IsFrameInlineInStorage
)
910 // Otherwise, load the real frame from the opaque storage.
911 return Builder
.CreateLoad(FramePtrTy
, NewStorage
);
914 llvm_unreachable("bad ABI");
917 /// Adjust the scope line of the funclet to the first line number after the
918 /// suspend point. This avoids a jump in the line table from the function
919 /// declaration (where prologue instructions are attributed to) to the suspend
921 /// Only adjust the scope line when the files are the same.
922 /// If no candidate line number is found, fallback to the line of ActiveSuspend.
923 static void updateScopeLine(Instruction
*ActiveSuspend
,
924 DISubprogram
&SPToUpdate
) {
928 auto *Successor
= ActiveSuspend
->getNextNonDebugInstruction();
929 // Corosplit splits the BB around ActiveSuspend, so the meaningful
930 // instructions are not in the same BB.
931 if (auto *Branch
= dyn_cast_or_null
<BranchInst
>(Successor
);
932 Branch
&& Branch
->isUnconditional())
933 Successor
= Branch
->getSuccessor(0)->getFirstNonPHIOrDbg();
935 // Find the first successor of ActiveSuspend with a non-zero line location.
936 // If that matches the file of ActiveSuspend, use it.
937 for (; Successor
; Successor
= Successor
->getNextNonDebugInstruction()) {
938 auto DL
= Successor
->getDebugLoc();
939 if (!DL
|| DL
.getLine() == 0)
942 if (SPToUpdate
.getFile() == DL
->getFile()) {
943 SPToUpdate
.setScopeLine(DL
.getLine());
950 // If the search above failed, fallback to the location of ActiveSuspend.
951 if (auto DL
= ActiveSuspend
->getDebugLoc())
952 if (SPToUpdate
.getFile() == DL
->getFile())
953 SPToUpdate
.setScopeLine(DL
->getLine());
956 static void addFramePointerAttrs(AttributeList
&Attrs
, LLVMContext
&Context
,
957 unsigned ParamIndex
, uint64_t Size
,
958 Align Alignment
, bool NoAlias
) {
959 AttrBuilder
ParamAttrs(Context
);
960 ParamAttrs
.addAttribute(Attribute::NonNull
);
961 ParamAttrs
.addAttribute(Attribute::NoUndef
);
964 ParamAttrs
.addAttribute(Attribute::NoAlias
);
966 ParamAttrs
.addAlignmentAttr(Alignment
);
967 ParamAttrs
.addDereferenceableAttr(Size
);
968 Attrs
= Attrs
.addParamAttributes(Context
, ParamIndex
, ParamAttrs
);
971 static void addAsyncContextAttrs(AttributeList
&Attrs
, LLVMContext
&Context
,
972 unsigned ParamIndex
) {
973 AttrBuilder
ParamAttrs(Context
);
974 ParamAttrs
.addAttribute(Attribute::SwiftAsync
);
975 Attrs
= Attrs
.addParamAttributes(Context
, ParamIndex
, ParamAttrs
);
978 static void addSwiftSelfAttrs(AttributeList
&Attrs
, LLVMContext
&Context
,
979 unsigned ParamIndex
) {
980 AttrBuilder
ParamAttrs(Context
);
981 ParamAttrs
.addAttribute(Attribute::SwiftSelf
);
982 Attrs
= Attrs
.addParamAttributes(Context
, ParamIndex
, ParamAttrs
);
985 /// Clone the body of the original function into a resume function of
987 void CoroCloner::create() {
988 // Create the new function if we don't already have one.
990 NewF
= createCloneDeclaration(OrigF
, Shape
, Suffix
,
991 OrigF
.getParent()->end(), ActiveSuspend
);
994 // Replace all args with dummy instructions. If an argument is the old frame
995 // pointer, the dummy will be replaced by the new frame pointer once it is
996 // computed below. Uses of all other arguments should have already been
997 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
999 SmallVector
<Instruction
*> DummyArgs
;
1000 for (Argument
&A
: OrigF
.args()) {
1001 DummyArgs
.push_back(new FreezeInst(PoisonValue::get(A
.getType())));
1002 VMap
[&A
] = DummyArgs
.back();
1005 SmallVector
<ReturnInst
*, 4> Returns
;
1007 // Ignore attempts to change certain attributes of the function.
1008 // TODO: maybe there should be a way to suppress this during cloning?
1009 auto savedVisibility
= NewF
->getVisibility();
1010 auto savedUnnamedAddr
= NewF
->getUnnamedAddr();
1011 auto savedDLLStorageClass
= NewF
->getDLLStorageClass();
1013 // NewF's linkage (which CloneFunctionInto does *not* change) might not
1014 // be compatible with the visibility of OrigF (which it *does* change),
1015 // so protect against that.
1016 auto savedLinkage
= NewF
->getLinkage();
1017 NewF
->setLinkage(llvm::GlobalValue::ExternalLinkage
);
1019 CloneFunctionInto(NewF
, &OrigF
, VMap
,
1020 CloneFunctionChangeType::LocalChangesOnly
, Returns
);
1022 auto &Context
= NewF
->getContext();
1024 if (DISubprogram
*SP
= NewF
->getSubprogram()) {
1025 assert(SP
!= OrigF
.getSubprogram() && SP
->isDistinct());
1026 updateScopeLine(ActiveSuspend
, *SP
);
1028 // Update the linkage name to reflect the modified symbol name. It
1029 // is necessary to update the linkage name in Swift, since the
1030 // mangling changes for resume functions. It might also be the
1031 // right thing to do in C++, but due to a limitation in LLVM's
1032 // AsmPrinter we can only do this if the function doesn't have an
1033 // abstract specification, since the DWARF backend expects the
1034 // abstract specification to contain the linkage name and asserts
1035 // that they are identical.
1036 if (SP
->getUnit() &&
1037 SP
->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift
) {
1038 SP
->replaceLinkageName(MDString::get(Context
, NewF
->getName()));
1039 if (auto *Decl
= SP
->getDeclaration()) {
1040 auto *NewDecl
= DISubprogram::get(
1041 Decl
->getContext(), Decl
->getScope(), Decl
->getName(),
1042 NewF
->getName(), Decl
->getFile(), Decl
->getLine(), Decl
->getType(),
1043 Decl
->getScopeLine(), Decl
->getContainingType(),
1044 Decl
->getVirtualIndex(), Decl
->getThisAdjustment(),
1045 Decl
->getFlags(), Decl
->getSPFlags(), Decl
->getUnit(),
1046 Decl
->getTemplateParams(), nullptr, Decl
->getRetainedNodes(),
1047 Decl
->getThrownTypes(), Decl
->getAnnotations(),
1048 Decl
->getTargetFuncName());
1049 SP
->replaceDeclaration(NewDecl
);
1054 NewF
->setLinkage(savedLinkage
);
1055 NewF
->setVisibility(savedVisibility
);
1056 NewF
->setUnnamedAddr(savedUnnamedAddr
);
1057 NewF
->setDLLStorageClass(savedDLLStorageClass
);
1058 // The function sanitizer metadata needs to match the signature of the
1059 // function it is being attached to. However this does not hold for split
1060 // functions here. Thus remove the metadata for split functions.
1061 if (Shape
.ABI
== coro::ABI::Switch
&&
1062 NewF
->hasMetadata(LLVMContext::MD_func_sanitize
))
1063 NewF
->eraseMetadata(LLVMContext::MD_func_sanitize
);
1065 // Replace the attributes of the new function:
1066 auto OrigAttrs
= NewF
->getAttributes();
1067 auto NewAttrs
= AttributeList();
1069 switch (Shape
.ABI
) {
1070 case coro::ABI::Switch
:
1071 // Bootstrap attributes by copying function attributes from the
1072 // original function. This should include optimization settings and so on.
1073 NewAttrs
= NewAttrs
.addFnAttributes(
1074 Context
, AttrBuilder(Context
, OrigAttrs
.getFnAttrs()));
1076 addFramePointerAttrs(NewAttrs
, Context
, 0, Shape
.FrameSize
,
1077 Shape
.FrameAlign
, /*NoAlias=*/false);
1079 case coro::ABI::Async
: {
1080 auto *ActiveAsyncSuspend
= cast
<CoroSuspendAsyncInst
>(ActiveSuspend
);
1081 if (OrigF
.hasParamAttribute(Shape
.AsyncLowering
.ContextArgNo
,
1082 Attribute::SwiftAsync
)) {
1083 uint32_t ArgAttributeIndices
=
1084 ActiveAsyncSuspend
->getStorageArgumentIndex();
1085 auto ContextArgIndex
= ArgAttributeIndices
& 0xff;
1086 addAsyncContextAttrs(NewAttrs
, Context
, ContextArgIndex
);
1088 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
1090 auto SwiftSelfIndex
= ArgAttributeIndices
>> 8;
1092 addSwiftSelfAttrs(NewAttrs
, Context
, SwiftSelfIndex
);
1095 // Transfer the original function's attributes.
1096 auto FnAttrs
= OrigF
.getAttributes().getFnAttrs();
1097 NewAttrs
= NewAttrs
.addFnAttributes(Context
, AttrBuilder(Context
, FnAttrs
));
1100 case coro::ABI::Retcon
:
1101 case coro::ABI::RetconOnce
:
1102 // If we have a continuation prototype, just use its attributes,
1104 NewAttrs
= Shape
.RetconLowering
.ResumePrototype
->getAttributes();
1106 /// FIXME: Is it really good to add the NoAlias attribute?
1107 addFramePointerAttrs(NewAttrs
, Context
, 0,
1108 Shape
.getRetconCoroId()->getStorageSize(),
1109 Shape
.getRetconCoroId()->getStorageAlignment(),
1115 switch (Shape
.ABI
) {
1116 // In these ABIs, the cloned functions always return 'void', and the
1117 // existing return sites are meaningless. Note that for unique
1118 // continuations, this includes the returns associated with suspends;
1119 // this is fine because we can't suspend twice.
1120 case coro::ABI::Switch
:
1121 case coro::ABI::RetconOnce
:
1122 // Remove old returns.
1123 for (ReturnInst
*Return
: Returns
)
1124 changeToUnreachable(Return
);
1127 // With multi-suspend continuations, we'll already have eliminated the
1128 // original returns and inserted returns before all the suspend points,
1129 // so we want to leave any returns in place.
1130 case coro::ABI::Retcon
:
1132 // Async lowering will insert musttail call functions at all suspend points
1133 // followed by a return.
1134 // Don't change returns to unreachable because that will trip up the verifier.
1135 // These returns should be unreachable from the clone.
1136 case coro::ABI::Async
:
1140 NewF
->setAttributes(NewAttrs
);
1141 NewF
->setCallingConv(Shape
.getResumeFunctionCC());
1143 // Set up the new entry block.
1144 replaceEntryBlock();
1146 // Turn symmetric transfers into musttail calls.
1147 for (CallInst
*ResumeCall
: Shape
.SymmetricTransfers
) {
1148 ResumeCall
= cast
<CallInst
>(VMap
[ResumeCall
]);
1149 if (TTI
.supportsTailCallFor(ResumeCall
)) {
1150 // FIXME: Could we support symmetric transfer effectively without
1152 ResumeCall
->setTailCallKind(CallInst::TCK_MustTail
);
1155 // Put a 'ret void' after the call, and split any remaining instructions to
1156 // an unreachable block.
1157 BasicBlock
*BB
= ResumeCall
->getParent();
1158 BB
->splitBasicBlock(ResumeCall
->getNextNode());
1159 Builder
.SetInsertPoint(BB
->getTerminator());
1160 Builder
.CreateRetVoid();
1161 BB
->getTerminator()->eraseFromParent();
1164 Builder
.SetInsertPoint(&NewF
->getEntryBlock().front());
1165 NewFramePtr
= deriveNewFramePointer();
1167 // Remap frame pointer.
1168 Value
*OldFramePtr
= VMap
[Shape
.FramePtr
];
1169 NewFramePtr
->takeName(OldFramePtr
);
1170 OldFramePtr
->replaceAllUsesWith(NewFramePtr
);
1172 // Remap vFrame pointer.
1173 auto *NewVFrame
= Builder
.CreateBitCast(
1174 NewFramePtr
, PointerType::getUnqual(Builder
.getContext()), "vFrame");
1175 Value
*OldVFrame
= cast
<Value
>(VMap
[Shape
.CoroBegin
]);
1176 if (OldVFrame
!= NewVFrame
)
1177 OldVFrame
->replaceAllUsesWith(NewVFrame
);
1179 // All uses of the arguments should have been resolved by this point,
1180 // so we can safely remove the dummy values.
1181 for (Instruction
*DummyArg
: DummyArgs
) {
1182 DummyArg
->replaceAllUsesWith(PoisonValue::get(DummyArg
->getType()));
1183 DummyArg
->deleteValue();
1186 switch (Shape
.ABI
) {
1187 case coro::ABI::Switch
:
1188 // Rewrite final suspend handling as it is not done via switch (allows to
1189 // remove final case from the switch, since it is undefined behavior to
1190 // resume the coroutine suspended at the final suspend point.
1191 if (Shape
.SwitchLowering
.HasFinalSuspend
)
1192 handleFinalSuspend();
1194 case coro::ABI::Async
:
1195 case coro::ABI::Retcon
:
1196 case coro::ABI::RetconOnce
:
1197 // Replace uses of the active suspend with the corresponding
1198 // continuation-function arguments.
1199 assert(ActiveSuspend
!= nullptr &&
1200 "no active suspend when lowering a continuation-style coroutine");
1201 replaceRetconOrAsyncSuspendUses();
1206 replaceCoroSuspends();
1208 // Handle swifterror.
1209 replaceSwiftErrorOps();
1211 // Remove coro.end intrinsics.
1214 // Salvage debug info that points into the coroutine frame.
1217 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1218 // to suppress deallocation code.
1219 if (Shape
.ABI
== coro::ABI::Switch
)
1220 coro::replaceCoroFree(cast
<CoroIdInst
>(VMap
[Shape
.CoroBegin
->getId()]),
1221 /*Elide=*/FKind
== CoroCloner::Kind::SwitchCleanup
);
1224 static void updateAsyncFuncPointerContextSize(coro::Shape
&Shape
) {
1225 assert(Shape
.ABI
== coro::ABI::Async
);
1227 auto *FuncPtrStruct
= cast
<ConstantStruct
>(
1228 Shape
.AsyncLowering
.AsyncFuncPointer
->getInitializer());
1229 auto *OrigRelativeFunOffset
= FuncPtrStruct
->getOperand(0);
1230 auto *OrigContextSize
= FuncPtrStruct
->getOperand(1);
1231 auto *NewContextSize
= ConstantInt::get(OrigContextSize
->getType(),
1232 Shape
.AsyncLowering
.ContextSize
);
1233 auto *NewFuncPtrStruct
= ConstantStruct::get(
1234 FuncPtrStruct
->getType(), OrigRelativeFunOffset
, NewContextSize
);
1236 Shape
.AsyncLowering
.AsyncFuncPointer
->setInitializer(NewFuncPtrStruct
);
1239 static TypeSize
getFrameSizeForShape(coro::Shape
&Shape
) {
1240 // In the same function all coro.sizes should have the same result type.
1241 auto *SizeIntrin
= Shape
.CoroSizes
.back();
1242 Module
*M
= SizeIntrin
->getModule();
1243 const DataLayout
&DL
= M
->getDataLayout();
1244 return DL
.getTypeAllocSize(Shape
.FrameTy
);
1247 static void replaceFrameSizeAndAlignment(coro::Shape
&Shape
) {
1248 if (Shape
.ABI
== coro::ABI::Async
)
1249 updateAsyncFuncPointerContextSize(Shape
);
1251 for (CoroAlignInst
*CA
: Shape
.CoroAligns
) {
1252 CA
->replaceAllUsesWith(
1253 ConstantInt::get(CA
->getType(), Shape
.FrameAlign
.value()));
1254 CA
->eraseFromParent();
1257 if (Shape
.CoroSizes
.empty())
1260 // In the same function all coro.sizes should have the same result type.
1261 auto *SizeIntrin
= Shape
.CoroSizes
.back();
1262 auto *SizeConstant
=
1263 ConstantInt::get(SizeIntrin
->getType(), getFrameSizeForShape(Shape
));
1265 for (CoroSizeInst
*CS
: Shape
.CoroSizes
) {
1266 CS
->replaceAllUsesWith(SizeConstant
);
1267 CS
->eraseFromParent();
1271 static void postSplitCleanup(Function
&F
) {
1272 removeUnreachableBlocks(F
);
1275 // For now, we do a mandatory verification step because we don't
1276 // entirely trust this pass. Note that we don't want to add a verifier
1277 // pass to FPM below because it will also verify all the global data.
1278 if (verifyFunction(F
, &errs()))
1279 report_fatal_error("Broken function");
1283 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1284 // frame if possible.
1285 static void handleNoSuspendCoroutine(coro::Shape
&Shape
) {
1286 auto *CoroBegin
= Shape
.CoroBegin
;
1287 switch (Shape
.ABI
) {
1288 case coro::ABI::Switch
: {
1289 auto SwitchId
= Shape
.getSwitchCoroId();
1290 auto *AllocInst
= SwitchId
->getCoroAlloc();
1291 coro::replaceCoroFree(SwitchId
, /*Elide=*/AllocInst
!= nullptr);
1293 IRBuilder
<> Builder(AllocInst
);
1294 auto *Frame
= Builder
.CreateAlloca(Shape
.FrameTy
);
1295 Frame
->setAlignment(Shape
.FrameAlign
);
1296 AllocInst
->replaceAllUsesWith(Builder
.getFalse());
1297 AllocInst
->eraseFromParent();
1298 CoroBegin
->replaceAllUsesWith(Frame
);
1300 CoroBegin
->replaceAllUsesWith(CoroBegin
->getMem());
1305 case coro::ABI::Async
:
1306 case coro::ABI::Retcon
:
1307 case coro::ABI::RetconOnce
:
1308 CoroBegin
->replaceAllUsesWith(PoisonValue::get(CoroBegin
->getType()));
1312 CoroBegin
->eraseFromParent();
1313 Shape
.CoroBegin
= nullptr;
1316 // SimplifySuspendPoint needs to check that there is no calls between
1317 // coro_save and coro_suspend, since any of the calls may potentially resume
1318 // the coroutine and if that is the case we cannot eliminate the suspend point.
1319 static bool hasCallsInBlockBetween(Instruction
*From
, Instruction
*To
) {
1320 for (Instruction
*I
= From
; I
!= To
; I
= I
->getNextNode()) {
1321 // Assume that no intrinsic can resume the coroutine.
1322 if (isa
<IntrinsicInst
>(I
))
1325 if (isa
<CallBase
>(I
))
1331 static bool hasCallsInBlocksBetween(BasicBlock
*SaveBB
, BasicBlock
*ResDesBB
) {
1332 SmallPtrSet
<BasicBlock
*, 8> Set
;
1333 SmallVector
<BasicBlock
*, 8> Worklist
;
1336 Worklist
.push_back(ResDesBB
);
1338 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1339 // returns a token consumed by suspend instruction, all blocks in between
1340 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1341 while (!Worklist
.empty()) {
1342 auto *BB
= Worklist
.pop_back_val();
1344 for (auto *Pred
: predecessors(BB
))
1345 if (!Set
.contains(Pred
))
1346 Worklist
.push_back(Pred
);
1349 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1351 Set
.erase(ResDesBB
);
1353 for (auto *BB
: Set
)
1354 if (hasCallsInBlockBetween(BB
->getFirstNonPHI(), nullptr))
1360 static bool hasCallsBetween(Instruction
*Save
, Instruction
*ResumeOrDestroy
) {
1361 auto *SaveBB
= Save
->getParent();
1362 auto *ResumeOrDestroyBB
= ResumeOrDestroy
->getParent();
1364 if (SaveBB
== ResumeOrDestroyBB
)
1365 return hasCallsInBlockBetween(Save
->getNextNode(), ResumeOrDestroy
);
1367 // Any calls from Save to the end of the block?
1368 if (hasCallsInBlockBetween(Save
->getNextNode(), nullptr))
1371 // Any calls from begging of the block up to ResumeOrDestroy?
1372 if (hasCallsInBlockBetween(ResumeOrDestroyBB
->getFirstNonPHI(),
1376 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1377 if (hasCallsInBlocksBetween(SaveBB
, ResumeOrDestroyBB
))
1383 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1384 // suspend point and replace it with nornal control flow.
1385 static bool simplifySuspendPoint(CoroSuspendInst
*Suspend
,
1386 CoroBeginInst
*CoroBegin
) {
1387 Instruction
*Prev
= Suspend
->getPrevNode();
1389 auto *Pred
= Suspend
->getParent()->getSinglePredecessor();
1392 Prev
= Pred
->getTerminator();
1395 CallBase
*CB
= dyn_cast
<CallBase
>(Prev
);
1399 auto *Callee
= CB
->getCalledOperand()->stripPointerCasts();
1401 // See if the callsite is for resumption or destruction of the coroutine.
1402 auto *SubFn
= dyn_cast
<CoroSubFnInst
>(Callee
);
1406 // Does not refer to the current coroutine, we cannot do anything with it.
1407 if (SubFn
->getFrame() != CoroBegin
)
1410 // See if the transformation is safe. Specifically, see if there are any
1411 // calls in between Save and CallInstr. They can potenitally resume the
1412 // coroutine rendering this optimization unsafe.
1413 auto *Save
= Suspend
->getCoroSave();
1414 if (hasCallsBetween(Save
, CB
))
1417 // Replace llvm.coro.suspend with the value that results in resumption over
1418 // the resume or cleanup path.
1419 Suspend
->replaceAllUsesWith(SubFn
->getRawIndex());
1420 Suspend
->eraseFromParent();
1421 Save
->eraseFromParent();
1423 // No longer need a call to coro.resume or coro.destroy.
1424 if (auto *Invoke
= dyn_cast
<InvokeInst
>(CB
)) {
1425 BranchInst::Create(Invoke
->getNormalDest(), Invoke
->getIterator());
1428 // Grab the CalledValue from CB before erasing the CallInstr.
1429 auto *CalledValue
= CB
->getCalledOperand();
1430 CB
->eraseFromParent();
1432 // If no more users remove it. Usually it is a bitcast of SubFn.
1433 if (CalledValue
!= SubFn
&& CalledValue
->user_empty())
1434 if (auto *I
= dyn_cast
<Instruction
>(CalledValue
))
1435 I
->eraseFromParent();
1437 // Now we are good to remove SubFn.
1438 if (SubFn
->user_empty())
1439 SubFn
->eraseFromParent();
1444 // Remove suspend points that are simplified.
1445 static void simplifySuspendPoints(coro::Shape
&Shape
) {
1446 // Currently, the only simplification we do is switch-lowering-specific.
1447 if (Shape
.ABI
!= coro::ABI::Switch
)
1450 auto &S
= Shape
.CoroSuspends
;
1451 size_t I
= 0, N
= S
.size();
1455 size_t ChangedFinalIndex
= std::numeric_limits
<size_t>::max();
1457 auto SI
= cast
<CoroSuspendInst
>(S
[I
]);
1458 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1459 // to resume a coroutine suspended at the final suspend point.
1460 if (!SI
->isFinal() && simplifySuspendPoint(SI
, Shape
.CoroBegin
)) {
1464 std::swap(S
[I
], S
[N
]);
1466 if (cast
<CoroSuspendInst
>(S
[I
])->isFinal()) {
1467 assert(Shape
.SwitchLowering
.HasFinalSuspend
);
1468 ChangedFinalIndex
= I
;
1478 // Maintain final.suspend in case final suspend was swapped.
1479 // Due to we requrie the final suspend to be the last element of CoroSuspends.
1480 if (ChangedFinalIndex
< N
) {
1481 assert(cast
<CoroSuspendInst
>(S
[ChangedFinalIndex
])->isFinal());
1482 std::swap(S
[ChangedFinalIndex
], S
.back());
1488 struct SwitchCoroutineSplitter
{
1489 static void split(Function
&F
, coro::Shape
&Shape
,
1490 SmallVectorImpl
<Function
*> &Clones
,
1491 TargetTransformInfo
&TTI
) {
1492 assert(Shape
.ABI
== coro::ABI::Switch
);
1494 // Create a resume clone by cloning the body of the original function,
1495 // setting new entry block and replacing coro.suspend an appropriate value
1496 // to force resume or cleanup pass for every suspend point.
1497 createResumeEntryBlock(F
, Shape
);
1498 auto *ResumeClone
= CoroCloner::createClone(
1499 F
, ".resume", Shape
, CoroCloner::Kind::SwitchResume
, TTI
);
1500 auto *DestroyClone
= CoroCloner::createClone(
1501 F
, ".destroy", Shape
, CoroCloner::Kind::SwitchUnwind
, TTI
);
1502 auto *CleanupClone
= CoroCloner::createClone(
1503 F
, ".cleanup", Shape
, CoroCloner::Kind::SwitchCleanup
, TTI
);
1505 postSplitCleanup(*ResumeClone
);
1506 postSplitCleanup(*DestroyClone
);
1507 postSplitCleanup(*CleanupClone
);
1509 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1510 updateCoroFrame(Shape
, ResumeClone
, DestroyClone
, CleanupClone
);
1512 assert(Clones
.empty());
1513 Clones
.push_back(ResumeClone
);
1514 Clones
.push_back(DestroyClone
);
1515 Clones
.push_back(CleanupClone
);
1517 // Create a constant array referring to resume/destroy/clone functions
1518 // pointed by the last argument of @llvm.coro.info, so that CoroElide pass
1519 // can determined correct function to call.
1520 setCoroInfo(F
, Shape
, Clones
);
1523 // Create a variant of ramp function that does not perform heap allocation
1524 // for a switch ABI coroutine.
1526 // The newly split `.noalloc` ramp function has the following differences:
1527 // - Has one additional frame pointer parameter in lieu of dynamic
1529 // - Suppressed allocations by replacing coro.alloc and coro.free.
1530 static Function
*createNoAllocVariant(Function
&F
, coro::Shape
&Shape
,
1531 SmallVectorImpl
<Function
*> &Clones
) {
1532 assert(Shape
.ABI
== coro::ABI::Switch
);
1533 auto *OrigFnTy
= F
.getFunctionType();
1534 auto OldParams
= OrigFnTy
->params();
1536 SmallVector
<Type
*> NewParams
;
1537 NewParams
.reserve(OldParams
.size() + 1);
1538 NewParams
.append(OldParams
.begin(), OldParams
.end());
1539 NewParams
.push_back(PointerType::getUnqual(Shape
.FrameTy
));
1541 auto *NewFnTy
= FunctionType::get(OrigFnTy
->getReturnType(), NewParams
,
1542 OrigFnTy
->isVarArg());
1543 Function
*NoAllocF
=
1544 Function::Create(NewFnTy
, F
.getLinkage(), F
.getName() + ".noalloc");
1546 ValueToValueMapTy VMap
;
1547 unsigned int Idx
= 0;
1548 for (const auto &I
: F
.args()) {
1549 VMap
[&I
] = NoAllocF
->getArg(Idx
++);
1551 // We just appended the frame pointer as the last argument of the new
1553 auto FrameIdx
= NoAllocF
->arg_size() - 1;
1554 SmallVector
<ReturnInst
*, 4> Returns
;
1555 CloneFunctionInto(NoAllocF
, &F
, VMap
,
1556 CloneFunctionChangeType::LocalChangesOnly
, Returns
);
1558 if (Shape
.CoroBegin
) {
1559 auto *NewCoroBegin
=
1560 cast_if_present
<CoroBeginInst
>(VMap
[Shape
.CoroBegin
]);
1561 auto *NewCoroId
= cast
<CoroIdInst
>(NewCoroBegin
->getId());
1562 coro::replaceCoroFree(NewCoroId
, /*Elide=*/true);
1563 coro::suppressCoroAllocs(NewCoroId
);
1564 NewCoroBegin
->replaceAllUsesWith(NoAllocF
->getArg(FrameIdx
));
1565 NewCoroBegin
->eraseFromParent();
1568 Module
*M
= F
.getParent();
1569 M
->getFunctionList().insert(M
->end(), NoAllocF
);
1571 removeUnreachableBlocks(*NoAllocF
);
1572 auto NewAttrs
= NoAllocF
->getAttributes();
1573 // When we elide allocation, we read these attributes to determine the
1574 // frame size and alignment.
1575 addFramePointerAttrs(NewAttrs
, NoAllocF
->getContext(), FrameIdx
,
1576 Shape
.FrameSize
, Shape
.FrameAlign
,
1579 NoAllocF
->setAttributes(NewAttrs
);
1581 Clones
.push_back(NoAllocF
);
1582 // Reset the original function's coro info, make the new noalloc variant
1583 // connected to the original ramp function.
1584 setCoroInfo(F
, Shape
, Clones
);
1585 // After copying, set the linkage to internal linkage. Original function
1586 // may have different linkage, but optimization dependent on this function
1587 // generally relies on LTO.
1588 NoAllocF
->setLinkage(llvm::GlobalValue::InternalLinkage
);
1593 // Create an entry block for a resume function with a switch that will jump to
1595 static void createResumeEntryBlock(Function
&F
, coro::Shape
&Shape
) {
1596 LLVMContext
&C
= F
.getContext();
1599 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32
1600 // 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label
1602 // i32 0, label %resume.0
1603 // i32 1, label %resume.1
1607 auto *NewEntry
= BasicBlock::Create(C
, "resume.entry", &F
);
1608 auto *UnreachBB
= BasicBlock::Create(C
, "unreachable", &F
);
1610 IRBuilder
<> Builder(NewEntry
);
1611 auto *FramePtr
= Shape
.FramePtr
;
1612 auto *FrameTy
= Shape
.FrameTy
;
1613 auto *GepIndex
= Builder
.CreateStructGEP(
1614 FrameTy
, FramePtr
, Shape
.getSwitchIndexField(), "index.addr");
1615 auto *Index
= Builder
.CreateLoad(Shape
.getIndexType(), GepIndex
, "index");
1617 Builder
.CreateSwitch(Index
, UnreachBB
, Shape
.CoroSuspends
.size());
1618 Shape
.SwitchLowering
.ResumeSwitch
= Switch
;
1620 size_t SuspendIndex
= 0;
1621 for (auto *AnyS
: Shape
.CoroSuspends
) {
1622 auto *S
= cast
<CoroSuspendInst
>(AnyS
);
1623 ConstantInt
*IndexVal
= Shape
.getIndex(SuspendIndex
);
1625 // Replace CoroSave with a store to Index:
1626 // %index.addr = getelementptr %f.frame... (index field number)
1627 // store i32 %IndexVal, i32* %index.addr1
1628 auto *Save
= S
->getCoroSave();
1629 Builder
.SetInsertPoint(Save
);
1631 // The coroutine should be marked done if it reaches the final suspend
1633 markCoroutineAsDone(Builder
, Shape
, FramePtr
);
1635 auto *GepIndex
= Builder
.CreateStructGEP(
1636 FrameTy
, FramePtr
, Shape
.getSwitchIndexField(), "index.addr");
1637 Builder
.CreateStore(IndexVal
, GepIndex
);
1640 Save
->replaceAllUsesWith(ConstantTokenNone::get(C
));
1641 Save
->eraseFromParent();
1643 // Split block before and after coro.suspend and add a jump from an entry
1648 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
1649 // switch i8 %0, label %suspend[i8 0, label %resume
1650 // i8 1, label %cleanup]
1655 // br label %resume.0.landing
1657 // resume.0: ; <--- jump from the switch in the resume.entry
1658 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
1659 // br label %resume.0.landing
1661 // resume.0.landing:
1662 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
1663 // switch i8 % 1, label %suspend [i8 0, label %resume
1664 // i8 1, label %cleanup]
1666 auto *SuspendBB
= S
->getParent();
1668 SuspendBB
->splitBasicBlock(S
, "resume." + Twine(SuspendIndex
));
1669 auto *LandingBB
= ResumeBB
->splitBasicBlock(
1670 S
->getNextNode(), ResumeBB
->getName() + Twine(".landing"));
1671 Switch
->addCase(IndexVal
, ResumeBB
);
1673 cast
<BranchInst
>(SuspendBB
->getTerminator())->setSuccessor(0, LandingBB
);
1674 auto *PN
= PHINode::Create(Builder
.getInt8Ty(), 2, "");
1675 PN
->insertBefore(LandingBB
->begin());
1676 S
->replaceAllUsesWith(PN
);
1677 PN
->addIncoming(Builder
.getInt8(-1), SuspendBB
);
1678 PN
->addIncoming(S
, ResumeBB
);
1683 Builder
.SetInsertPoint(UnreachBB
);
1684 Builder
.CreateUnreachable();
1686 Shape
.SwitchLowering
.ResumeEntryBlock
= NewEntry
;
1689 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1690 static void updateCoroFrame(coro::Shape
&Shape
, Function
*ResumeFn
,
1691 Function
*DestroyFn
, Function
*CleanupFn
) {
1692 IRBuilder
<> Builder(&*Shape
.getInsertPtAfterFramePtr());
1694 auto *ResumeAddr
= Builder
.CreateStructGEP(
1695 Shape
.FrameTy
, Shape
.FramePtr
, coro::Shape::SwitchFieldIndex::Resume
,
1697 Builder
.CreateStore(ResumeFn
, ResumeAddr
);
1699 Value
*DestroyOrCleanupFn
= DestroyFn
;
1701 CoroIdInst
*CoroId
= Shape
.getSwitchCoroId();
1702 if (CoroAllocInst
*CA
= CoroId
->getCoroAlloc()) {
1703 // If there is a CoroAlloc and it returns false (meaning we elide the
1704 // allocation, use CleanupFn instead of DestroyFn).
1705 DestroyOrCleanupFn
= Builder
.CreateSelect(CA
, DestroyFn
, CleanupFn
);
1708 auto *DestroyAddr
= Builder
.CreateStructGEP(
1709 Shape
.FrameTy
, Shape
.FramePtr
, coro::Shape::SwitchFieldIndex::Destroy
,
1711 Builder
.CreateStore(DestroyOrCleanupFn
, DestroyAddr
);
1714 // Create a global constant array containing pointers to functions provided
1715 // and set Info parameter of CoroBegin to point at this constant. Example:
1717 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1718 // [void(%f.frame*)* @f.resume, void(%f.frame*)*
1720 // define void @f() {
1722 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1723 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to
1726 // Assumes that all the functions have the same signature.
1727 static void setCoroInfo(Function
&F
, coro::Shape
&Shape
,
1728 ArrayRef
<Function
*> Fns
) {
1729 // This only works under the switch-lowering ABI because coro elision
1730 // only works on the switch-lowering ABI.
1731 SmallVector
<Constant
*, 4> Args(Fns
);
1732 assert(!Args
.empty());
1733 Function
*Part
= *Fns
.begin();
1734 Module
*M
= Part
->getParent();
1735 auto *ArrTy
= ArrayType::get(Part
->getType(), Args
.size());
1737 auto *ConstVal
= ConstantArray::get(ArrTy
, Args
);
1738 auto *GV
= new GlobalVariable(*M
, ConstVal
->getType(), /*isConstant=*/true,
1739 GlobalVariable::PrivateLinkage
, ConstVal
,
1740 F
.getName() + Twine(".resumers"));
1742 // Update coro.begin instruction to refer to this constant.
1743 LLVMContext
&C
= F
.getContext();
1744 auto *BC
= ConstantExpr::getPointerCast(GV
, PointerType::getUnqual(C
));
1745 Shape
.getSwitchCoroId()->setInfo(BC
);
1751 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst
*Suspend
,
1752 Value
*Continuation
) {
1753 auto *ResumeIntrinsic
= Suspend
->getResumeFunction();
1754 auto &Context
= Suspend
->getParent()->getParent()->getContext();
1755 auto *Int8PtrTy
= PointerType::getUnqual(Context
);
1757 IRBuilder
<> Builder(ResumeIntrinsic
);
1758 auto *Val
= Builder
.CreateBitOrPointerCast(Continuation
, Int8PtrTy
);
1759 ResumeIntrinsic
->replaceAllUsesWith(Val
);
1760 ResumeIntrinsic
->eraseFromParent();
1761 Suspend
->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg
,
1762 PoisonValue::get(Int8PtrTy
));
1765 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1766 static void coerceArguments(IRBuilder
<> &Builder
, FunctionType
*FnTy
,
1767 ArrayRef
<Value
*> FnArgs
,
1768 SmallVectorImpl
<Value
*> &CallArgs
) {
1770 for (auto *paramTy
: FnTy
->params()) {
1771 assert(ArgIdx
< FnArgs
.size());
1772 if (paramTy
!= FnArgs
[ArgIdx
]->getType())
1774 Builder
.CreateBitOrPointerCast(FnArgs
[ArgIdx
], paramTy
));
1776 CallArgs
.push_back(FnArgs
[ArgIdx
]);
1781 CallInst
*coro::createMustTailCall(DebugLoc Loc
, Function
*MustTailCallFn
,
1782 TargetTransformInfo
&TTI
,
1783 ArrayRef
<Value
*> Arguments
,
1784 IRBuilder
<> &Builder
) {
1785 auto *FnTy
= MustTailCallFn
->getFunctionType();
1786 // Coerce the arguments, llvm optimizations seem to ignore the types in
1787 // vaarg functions and throws away casts in optimized mode.
1788 SmallVector
<Value
*, 8> CallArgs
;
1789 coerceArguments(Builder
, FnTy
, Arguments
, CallArgs
);
1791 auto *TailCall
= Builder
.CreateCall(FnTy
, MustTailCallFn
, CallArgs
);
1792 // Skip targets which don't support tail call.
1793 if (TTI
.supportsTailCallFor(TailCall
)) {
1794 TailCall
->setTailCallKind(CallInst::TCK_MustTail
);
1796 TailCall
->setDebugLoc(Loc
);
1797 TailCall
->setCallingConv(MustTailCallFn
->getCallingConv());
1801 void coro::AsyncABI::splitCoroutine(Function
&F
, coro::Shape
&Shape
,
1802 SmallVectorImpl
<Function
*> &Clones
,
1803 TargetTransformInfo
&TTI
) {
1804 assert(Shape
.ABI
== coro::ABI::Async
);
1805 assert(Clones
.empty());
1806 // Reset various things that the optimizer might have decided it
1807 // "knows" about the coroutine function due to not seeing a return.
1808 F
.removeFnAttr(Attribute::NoReturn
);
1809 F
.removeRetAttr(Attribute::NoAlias
);
1810 F
.removeRetAttr(Attribute::NonNull
);
1812 auto &Context
= F
.getContext();
1813 auto *Int8PtrTy
= PointerType::getUnqual(Context
);
1815 auto *Id
= Shape
.getAsyncCoroId();
1816 IRBuilder
<> Builder(Id
);
1818 auto *FramePtr
= Id
->getStorage();
1819 FramePtr
= Builder
.CreateBitOrPointerCast(FramePtr
, Int8PtrTy
);
1820 FramePtr
= Builder
.CreateConstInBoundsGEP1_32(
1821 Type::getInt8Ty(Context
), FramePtr
, Shape
.AsyncLowering
.FrameOffset
,
1822 "async.ctx.frameptr");
1824 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1826 // Make sure we don't invalidate Shape.FramePtr.
1827 TrackingVH
<Value
> Handle(Shape
.FramePtr
);
1828 Shape
.CoroBegin
->replaceAllUsesWith(FramePtr
);
1829 Shape
.FramePtr
= Handle
.getValPtr();
1832 // Create all the functions in order after the main function.
1833 auto NextF
= std::next(F
.getIterator());
1835 // Create a continuation function for each of the suspend points.
1836 Clones
.reserve(Shape
.CoroSuspends
.size());
1837 for (auto [Idx
, CS
] : llvm::enumerate(Shape
.CoroSuspends
)) {
1838 auto *Suspend
= cast
<CoroSuspendAsyncInst
>(CS
);
1840 // Create the clone declaration.
1841 auto ResumeNameSuffix
= ".resume.";
1842 auto ProjectionFunctionName
=
1843 Suspend
->getAsyncContextProjectionFunction()->getName();
1844 bool UseSwiftMangling
= false;
1845 if (ProjectionFunctionName
== "__swift_async_resume_project_context") {
1846 ResumeNameSuffix
= "TQ";
1847 UseSwiftMangling
= true;
1848 } else if (ProjectionFunctionName
== "__swift_async_resume_get_context") {
1849 ResumeNameSuffix
= "TY";
1850 UseSwiftMangling
= true;
1852 auto *Continuation
= createCloneDeclaration(
1854 UseSwiftMangling
? ResumeNameSuffix
+ Twine(Idx
) + "_"
1855 : ResumeNameSuffix
+ Twine(Idx
),
1857 Clones
.push_back(Continuation
);
1859 // Insert a branch to a new return block immediately before the suspend
1861 auto *SuspendBB
= Suspend
->getParent();
1862 auto *NewSuspendBB
= SuspendBB
->splitBasicBlock(Suspend
);
1863 auto *Branch
= cast
<BranchInst
>(SuspendBB
->getTerminator());
1865 // Place it before the first suspend.
1867 BasicBlock::Create(F
.getContext(), "coro.return", &F
, NewSuspendBB
);
1868 Branch
->setSuccessor(0, ReturnBB
);
1870 IRBuilder
<> Builder(ReturnBB
);
1872 // Insert the call to the tail call function and inline it.
1873 auto *Fn
= Suspend
->getMustTailCallFunction();
1874 SmallVector
<Value
*, 8> Args(Suspend
->args());
1875 auto FnArgs
= ArrayRef
<Value
*>(Args
).drop_front(
1876 CoroSuspendAsyncInst::MustTailCallFuncArg
+ 1);
1877 auto *TailCall
= coro::createMustTailCall(Suspend
->getDebugLoc(), Fn
, TTI
,
1879 Builder
.CreateRetVoid();
1880 InlineFunctionInfo FnInfo
;
1881 (void)InlineFunction(*TailCall
, FnInfo
);
1883 // Replace the lvm.coro.async.resume intrisic call.
1884 replaceAsyncResumeFunction(Suspend
, Continuation
);
1887 assert(Clones
.size() == Shape
.CoroSuspends
.size());
1888 for (auto [Idx
, CS
] : llvm::enumerate(Shape
.CoroSuspends
)) {
1890 auto *Clone
= Clones
[Idx
];
1892 CoroCloner::createClone(F
, "resume." + Twine(Idx
), Shape
, Clone
, Suspend
,
1897 void coro::AnyRetconABI::splitCoroutine(Function
&F
, coro::Shape
&Shape
,
1898 SmallVectorImpl
<Function
*> &Clones
,
1899 TargetTransformInfo
&TTI
) {
1900 assert(Shape
.ABI
== coro::ABI::Retcon
|| Shape
.ABI
== coro::ABI::RetconOnce
);
1901 assert(Clones
.empty());
1903 // Reset various things that the optimizer might have decided it
1904 // "knows" about the coroutine function due to not seeing a return.
1905 F
.removeFnAttr(Attribute::NoReturn
);
1906 F
.removeRetAttr(Attribute::NoAlias
);
1907 F
.removeRetAttr(Attribute::NonNull
);
1909 // Allocate the frame.
1910 auto *Id
= Shape
.getRetconCoroId();
1912 if (Shape
.RetconLowering
.IsFrameInlineInStorage
) {
1913 RawFramePtr
= Id
->getStorage();
1915 IRBuilder
<> Builder(Id
);
1917 // Determine the size of the frame.
1918 const DataLayout
&DL
= F
.getDataLayout();
1919 auto Size
= DL
.getTypeAllocSize(Shape
.FrameTy
);
1921 // Allocate. We don't need to update the call graph node because we're
1922 // going to recompute it from scratch after splitting.
1923 // FIXME: pass the required alignment
1924 RawFramePtr
= Shape
.emitAlloc(Builder
, Builder
.getInt64(Size
), nullptr);
1926 Builder
.CreateBitCast(RawFramePtr
, Shape
.CoroBegin
->getType());
1928 // Stash the allocated frame pointer in the continuation storage.
1929 Builder
.CreateStore(RawFramePtr
, Id
->getStorage());
1932 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1934 // Make sure we don't invalidate Shape.FramePtr.
1935 TrackingVH
<Value
> Handle(Shape
.FramePtr
);
1936 Shape
.CoroBegin
->replaceAllUsesWith(RawFramePtr
);
1937 Shape
.FramePtr
= Handle
.getValPtr();
1940 // Create a unique return block.
1941 BasicBlock
*ReturnBB
= nullptr;
1942 PHINode
*ContinuationPhi
= nullptr;
1943 SmallVector
<PHINode
*, 4> ReturnPHIs
;
1945 // Create all the functions in order after the main function.
1946 auto NextF
= std::next(F
.getIterator());
1948 // Create a continuation function for each of the suspend points.
1949 Clones
.reserve(Shape
.CoroSuspends
.size());
1950 for (auto [Idx
, CS
] : llvm::enumerate(Shape
.CoroSuspends
)) {
1951 auto Suspend
= cast
<CoroSuspendRetconInst
>(CS
);
1953 // Create the clone declaration.
1954 auto Continuation
= createCloneDeclaration(
1955 F
, Shape
, ".resume." + Twine(Idx
), NextF
, nullptr);
1956 Clones
.push_back(Continuation
);
1958 // Insert a branch to the unified return block immediately before
1959 // the suspend point.
1960 auto SuspendBB
= Suspend
->getParent();
1961 auto NewSuspendBB
= SuspendBB
->splitBasicBlock(Suspend
);
1962 auto Branch
= cast
<BranchInst
>(SuspendBB
->getTerminator());
1964 // Create the unified return block.
1966 // Place it before the first suspend.
1968 BasicBlock::Create(F
.getContext(), "coro.return", &F
, NewSuspendBB
);
1969 Shape
.RetconLowering
.ReturnBlock
= ReturnBB
;
1971 IRBuilder
<> Builder(ReturnBB
);
1973 // First, the continuation.
1975 Builder
.CreatePHI(Continuation
->getType(), Shape
.CoroSuspends
.size());
1977 // Create PHIs for all other return values.
1978 assert(ReturnPHIs
.empty());
1980 // Next, all the directly-yielded values.
1981 for (auto *ResultTy
: Shape
.getRetconResultTypes())
1982 ReturnPHIs
.push_back(
1983 Builder
.CreatePHI(ResultTy
, Shape
.CoroSuspends
.size()));
1985 // Build the return value.
1986 auto RetTy
= F
.getReturnType();
1988 // Cast the continuation value if necessary.
1989 // We can't rely on the types matching up because that type would
1990 // have to be infinite.
1991 auto CastedContinuationTy
=
1992 (ReturnPHIs
.empty() ? RetTy
: RetTy
->getStructElementType(0));
1993 auto *CastedContinuation
=
1994 Builder
.CreateBitCast(ContinuationPhi
, CastedContinuationTy
);
1996 Value
*RetV
= CastedContinuation
;
1997 if (!ReturnPHIs
.empty()) {
1999 RetV
= PoisonValue::get(RetTy
);
2000 RetV
= Builder
.CreateInsertValue(RetV
, CastedContinuation
, ValueIdx
++);
2002 for (auto Phi
: ReturnPHIs
)
2003 RetV
= Builder
.CreateInsertValue(RetV
, Phi
, ValueIdx
++);
2006 Builder
.CreateRet(RetV
);
2009 // Branch to the return block.
2010 Branch
->setSuccessor(0, ReturnBB
);
2011 assert(ContinuationPhi
);
2012 ContinuationPhi
->addIncoming(Continuation
, SuspendBB
);
2013 for (auto [Phi
, VUse
] :
2014 llvm::zip_equal(ReturnPHIs
, Suspend
->value_operands()))
2015 Phi
->addIncoming(VUse
, SuspendBB
);
2018 assert(Clones
.size() == Shape
.CoroSuspends
.size());
2019 for (auto [Idx
, CS
] : llvm::enumerate(Shape
.CoroSuspends
)) {
2021 auto Clone
= Clones
[Idx
];
2023 CoroCloner::createClone(F
, "resume." + Twine(Idx
), Shape
, Clone
, Suspend
,
2029 class PrettyStackTraceFunction
: public PrettyStackTraceEntry
{
2033 PrettyStackTraceFunction(Function
&F
) : F(F
) {}
2034 void print(raw_ostream
&OS
) const override
{
2035 OS
<< "While splitting coroutine ";
2036 F
.printAsOperand(OS
, /*print type*/ false, F
.getParent());
2042 /// Remove calls to llvm.coro.end in the original function.
2043 static void removeCoroEndsFromRampFunction(const coro::Shape
&Shape
) {
2044 if (Shape
.ABI
!= coro::ABI::Switch
) {
2045 for (auto *End
: Shape
.CoroEnds
) {
2046 replaceCoroEnd(End
, Shape
, Shape
.FramePtr
, /*in resume*/ false, nullptr);
2049 for (llvm::AnyCoroEndInst
*End
: Shape
.CoroEnds
) {
2050 auto &Context
= End
->getContext();
2051 End
->replaceAllUsesWith(ConstantInt::getFalse(Context
));
2052 End
->eraseFromParent();
2057 static bool hasSafeElideCaller(Function
&F
) {
2058 for (auto *U
: F
.users()) {
2059 if (auto *CB
= dyn_cast
<CallBase
>(U
)) {
2060 auto *Caller
= CB
->getFunction();
2061 if (Caller
&& Caller
->isPresplitCoroutine() &&
2062 CB
->hasFnAttr(llvm::Attribute::CoroElideSafe
))
2069 void coro::SwitchABI::splitCoroutine(Function
&F
, coro::Shape
&Shape
,
2070 SmallVectorImpl
<Function
*> &Clones
,
2071 TargetTransformInfo
&TTI
) {
2072 SwitchCoroutineSplitter::split(F
, Shape
, Clones
, TTI
);
2075 static void doSplitCoroutine(Function
&F
, SmallVectorImpl
<Function
*> &Clones
,
2076 coro::BaseABI
&ABI
, TargetTransformInfo
&TTI
,
2077 bool OptimizeFrame
) {
2078 PrettyStackTraceFunction
prettyStackTrace(F
);
2080 auto &Shape
= ABI
.Shape
;
2081 assert(Shape
.CoroBegin
);
2083 lowerAwaitSuspends(F
, Shape
);
2085 simplifySuspendPoints(Shape
);
2087 normalizeCoroutine(F
, Shape
, TTI
);
2088 ABI
.buildCoroutineFrame(OptimizeFrame
);
2089 replaceFrameSizeAndAlignment(Shape
);
2091 bool isNoSuspendCoroutine
= Shape
.CoroSuspends
.empty();
2093 bool shouldCreateNoAllocVariant
=
2094 !isNoSuspendCoroutine
&& Shape
.ABI
== coro::ABI::Switch
&&
2095 hasSafeElideCaller(F
) && !F
.hasFnAttribute(llvm::Attribute::NoInline
);
2097 // If there are no suspend points, no split required, just remove
2098 // the allocation and deallocation blocks, they are not needed.
2099 if (isNoSuspendCoroutine
) {
2100 handleNoSuspendCoroutine(Shape
);
2102 ABI
.splitCoroutine(F
, Shape
, Clones
, TTI
);
2105 // Replace all the swifterror operations in the original function.
2106 // This invalidates SwiftErrorOps in the Shape.
2107 replaceSwiftErrorOps(F
, Shape
, nullptr);
2109 // Salvage debug intrinsics that point into the coroutine frame in the
2110 // original function. The Cloner has already salvaged debug info in the new
2111 // coroutine funclets.
2112 SmallDenseMap
<Argument
*, AllocaInst
*, 4> ArgToAllocaMap
;
2113 auto [DbgInsts
, DbgVariableRecords
] = collectDbgVariableIntrinsics(F
);
2114 for (auto *DDI
: DbgInsts
)
2115 coro::salvageDebugInfo(ArgToAllocaMap
, *DDI
, false /*UseEntryValue*/);
2116 for (DbgVariableRecord
*DVR
: DbgVariableRecords
)
2117 coro::salvageDebugInfo(ArgToAllocaMap
, *DVR
, false /*UseEntryValue*/);
2119 removeCoroEndsFromRampFunction(Shape
);
2121 if (shouldCreateNoAllocVariant
)
2122 SwitchCoroutineSplitter::createNoAllocVariant(F
, Shape
, Clones
);
2125 static LazyCallGraph::SCC
&updateCallGraphAfterCoroutineSplit(
2126 LazyCallGraph::Node
&N
, const coro::Shape
&Shape
,
2127 const SmallVectorImpl
<Function
*> &Clones
, LazyCallGraph::SCC
&C
,
2128 LazyCallGraph
&CG
, CGSCCAnalysisManager
&AM
, CGSCCUpdateResult
&UR
,
2129 FunctionAnalysisManager
&FAM
) {
2131 auto *CurrentSCC
= &C
;
2132 if (!Clones
.empty()) {
2133 switch (Shape
.ABI
) {
2134 case coro::ABI::Switch
:
2135 // Each clone in the Switch lowering is independent of the other clones.
2136 // Let the LazyCallGraph know about each one separately.
2137 for (Function
*Clone
: Clones
)
2138 CG
.addSplitFunction(N
.getFunction(), *Clone
);
2140 case coro::ABI::Async
:
2141 case coro::ABI::Retcon
:
2142 case coro::ABI::RetconOnce
:
2143 // Each clone in the Async/Retcon lowering references of the other clones.
2144 // Let the LazyCallGraph know about all of them at once.
2145 if (!Clones
.empty())
2146 CG
.addSplitRefRecursiveFunctions(N
.getFunction(), Clones
);
2150 // Let the CGSCC infra handle the changes to the original function.
2151 CurrentSCC
= &updateCGAndAnalysisManagerForCGSCCPass(CG
, *CurrentSCC
, N
, AM
,
2155 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2156 // to the split functions.
2157 postSplitCleanup(N
.getFunction());
2158 CurrentSCC
= &updateCGAndAnalysisManagerForFunctionPass(CG
, *CurrentSCC
, N
,
2163 /// Replace a call to llvm.coro.prepare.retcon.
2164 static void replacePrepare(CallInst
*Prepare
, LazyCallGraph
&CG
,
2165 LazyCallGraph::SCC
&C
) {
2166 auto CastFn
= Prepare
->getArgOperand(0); // as an i8*
2167 auto Fn
= CastFn
->stripPointerCasts(); // as its original type
2169 // Attempt to peephole this pattern:
2170 // %0 = bitcast [[TYPE]] @some_function to i8*
2171 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2172 // %2 = bitcast %1 to [[TYPE]]
2174 // %2 = @some_function
2175 for (Use
&U
: llvm::make_early_inc_range(Prepare
->uses())) {
2176 // Look for bitcasts back to the original function type.
2177 auto *Cast
= dyn_cast
<BitCastInst
>(U
.getUser());
2178 if (!Cast
|| Cast
->getType() != Fn
->getType())
2181 // Replace and remove the cast.
2182 Cast
->replaceAllUsesWith(Fn
);
2183 Cast
->eraseFromParent();
2186 // Replace any remaining uses with the function as an i8*.
2187 // This can never directly be a callee, so we don't need to update CG.
2188 Prepare
->replaceAllUsesWith(CastFn
);
2189 Prepare
->eraseFromParent();
2191 // Kill dead bitcasts.
2192 while (auto *Cast
= dyn_cast
<BitCastInst
>(CastFn
)) {
2193 if (!Cast
->use_empty())
2195 CastFn
= Cast
->getOperand(0);
2196 Cast
->eraseFromParent();
2200 static bool replaceAllPrepares(Function
*PrepareFn
, LazyCallGraph
&CG
,
2201 LazyCallGraph::SCC
&C
) {
2202 bool Changed
= false;
2203 for (Use
&P
: llvm::make_early_inc_range(PrepareFn
->uses())) {
2204 // Intrinsics can only be used in calls.
2205 auto *Prepare
= cast
<CallInst
>(P
.getUser());
2206 replacePrepare(Prepare
, CG
, C
);
2213 static void addPrepareFunction(const Module
&M
,
2214 SmallVectorImpl
<Function
*> &Fns
,
2216 auto *PrepareFn
= M
.getFunction(Name
);
2217 if (PrepareFn
&& !PrepareFn
->use_empty())
2218 Fns
.push_back(PrepareFn
);
2221 static std::unique_ptr
<coro::BaseABI
>
2222 CreateNewABI(Function
&F
, coro::Shape
&S
,
2223 std::function
<bool(Instruction
&)> IsMatCallback
,
2224 const SmallVector
<CoroSplitPass::BaseABITy
> GenCustomABIs
) {
2225 if (S
.CoroBegin
->hasCustomABI()) {
2226 unsigned CustomABI
= S
.CoroBegin
->getCustomABI();
2227 if (CustomABI
>= GenCustomABIs
.size())
2228 llvm_unreachable("Custom ABI not found amoung those specified");
2229 return GenCustomABIs
[CustomABI
](F
, S
);
2233 case coro::ABI::Switch
:
2234 return std::make_unique
<coro::SwitchABI
>(F
, S
, IsMatCallback
);
2235 case coro::ABI::Async
:
2236 return std::make_unique
<coro::AsyncABI
>(F
, S
, IsMatCallback
);
2237 case coro::ABI::Retcon
:
2238 return std::make_unique
<coro::AnyRetconABI
>(F
, S
, IsMatCallback
);
2239 case coro::ABI::RetconOnce
:
2240 return std::make_unique
<coro::AnyRetconABI
>(F
, S
, IsMatCallback
);
2242 llvm_unreachable("Unknown ABI");
2245 CoroSplitPass::CoroSplitPass(bool OptimizeFrame
)
2246 : CreateAndInitABI([](Function
&F
, coro::Shape
&S
) {
2247 std::unique_ptr
<coro::BaseABI
> ABI
=
2248 CreateNewABI(F
, S
, coro::isTriviallyMaterializable
, {});
2252 OptimizeFrame(OptimizeFrame
) {}
2254 CoroSplitPass::CoroSplitPass(
2255 SmallVector
<CoroSplitPass::BaseABITy
> GenCustomABIs
, bool OptimizeFrame
)
2256 : CreateAndInitABI([=](Function
&F
, coro::Shape
&S
) {
2257 std::unique_ptr
<coro::BaseABI
> ABI
=
2258 CreateNewABI(F
, S
, coro::isTriviallyMaterializable
, GenCustomABIs
);
2262 OptimizeFrame(OptimizeFrame
) {}
2264 // For back compatibility, constructor takes a materializable callback and
2265 // creates a generator for an ABI with a modified materializable callback.
2266 CoroSplitPass::CoroSplitPass(std::function
<bool(Instruction
&)> IsMatCallback
,
2268 : CreateAndInitABI([=](Function
&F
, coro::Shape
&S
) {
2269 std::unique_ptr
<coro::BaseABI
> ABI
=
2270 CreateNewABI(F
, S
, IsMatCallback
, {});
2274 OptimizeFrame(OptimizeFrame
) {}
2276 // For back compatibility, constructor takes a materializable callback and
2277 // creates a generator for an ABI with a modified materializable callback.
2278 CoroSplitPass::CoroSplitPass(
2279 std::function
<bool(Instruction
&)> IsMatCallback
,
2280 SmallVector
<CoroSplitPass::BaseABITy
> GenCustomABIs
, bool OptimizeFrame
)
2281 : CreateAndInitABI([=](Function
&F
, coro::Shape
&S
) {
2282 std::unique_ptr
<coro::BaseABI
> ABI
=
2283 CreateNewABI(F
, S
, IsMatCallback
, GenCustomABIs
);
2287 OptimizeFrame(OptimizeFrame
) {}
2289 PreservedAnalyses
CoroSplitPass::run(LazyCallGraph::SCC
&C
,
2290 CGSCCAnalysisManager
&AM
,
2291 LazyCallGraph
&CG
, CGSCCUpdateResult
&UR
) {
2292 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2293 // non-zero number of nodes, so we assume that here and grab the first
2294 // node's function's module.
2295 Module
&M
= *C
.begin()->getFunction().getParent();
2297 AM
.getResult
<FunctionAnalysisManagerCGSCCProxy
>(C
, CG
).getManager();
2299 // Check for uses of llvm.coro.prepare.retcon/async.
2300 SmallVector
<Function
*, 2> PrepareFns
;
2301 addPrepareFunction(M
, PrepareFns
, "llvm.coro.prepare.retcon");
2302 addPrepareFunction(M
, PrepareFns
, "llvm.coro.prepare.async");
2304 // Find coroutines for processing.
2305 SmallVector
<LazyCallGraph::Node
*> Coroutines
;
2306 for (LazyCallGraph::Node
&N
: C
)
2307 if (N
.getFunction().isPresplitCoroutine())
2308 Coroutines
.push_back(&N
);
2310 if (Coroutines
.empty() && PrepareFns
.empty())
2311 return PreservedAnalyses::all();
2313 auto *CurrentSCC
= &C
;
2314 // Split all the coroutines.
2315 for (LazyCallGraph::Node
*N
: Coroutines
) {
2316 Function
&F
= N
->getFunction();
2317 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F
.getName()
2320 // The suspend-crossing algorithm in buildCoroutineFrame gets tripped up
2321 // by unreachable blocks, so remove them as a first pass. Remove the
2322 // unreachable blocks before collecting intrinsics into Shape.
2323 removeUnreachableBlocks(F
);
2325 coro::Shape
Shape(F
);
2326 if (!Shape
.CoroBegin
)
2329 F
.setSplittedCoroutine();
2331 std::unique_ptr
<coro::BaseABI
> ABI
= CreateAndInitABI(F
, Shape
);
2333 SmallVector
<Function
*, 4> Clones
;
2334 auto &TTI
= FAM
.getResult
<TargetIRAnalysis
>(F
);
2335 doSplitCoroutine(F
, Clones
, *ABI
, TTI
, OptimizeFrame
);
2336 CurrentSCC
= &updateCallGraphAfterCoroutineSplit(
2337 *N
, Shape
, Clones
, *CurrentSCC
, CG
, AM
, UR
, FAM
);
2339 auto &ORE
= FAM
.getResult
<OptimizationRemarkEmitterAnalysis
>(F
);
2341 return OptimizationRemark(DEBUG_TYPE
, "CoroSplit", &F
)
2342 << "Split '" << ore::NV("function", F
.getName())
2343 << "' (frame_size=" << ore::NV("frame_size", Shape
.FrameSize
)
2344 << ", align=" << ore::NV("align", Shape
.FrameAlign
.value()) << ")";
2347 if (!Shape
.CoroSuspends
.empty()) {
2348 // Run the CGSCC pipeline on the original and newly split functions.
2349 UR
.CWorklist
.insert(CurrentSCC
);
2350 for (Function
*Clone
: Clones
)
2351 UR
.CWorklist
.insert(CG
.lookupSCC(CG
.get(*Clone
)));
2355 for (auto *PrepareFn
: PrepareFns
) {
2356 replaceAllPrepares(PrepareFn
, CG
, *CurrentSCC
);
2359 return PreservedAnalyses::none();