[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Transforms / Scalar / PlaceSafepoints.cpp
bloba110f7d5c24173964e9fd10f73c87cf662468b29
1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Place garbage collection safepoints at appropriate locations in the IR. This
10 // does not make relocation semantics or variable liveness explicit. That's
11 // done by RewriteStatepointsForGC.
13 // Terminology:
14 // - A call is said to be "parseable" if there is a stack map generated for the
15 // return PC of the call. A runtime can determine where values listed in the
16 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
17 // on the stack when the code is suspended inside such a call. Every parse
18 // point is represented by a call wrapped in an gc.statepoint intrinsic.
19 // - A "poll" is an explicit check in the generated code to determine if the
20 // runtime needs the generated code to cooperate by calling a helper routine
21 // and thus suspending its execution at a known state. The call to the helper
22 // routine will be parseable. The (gc & runtime specific) logic of a poll is
23 // assumed to be provided in a function of the name "gc.safepoint_poll".
25 // We aim to insert polls such that running code can quickly be brought to a
26 // well defined state for inspection by the collector. In the current
27 // implementation, this is done via the insertion of poll sites at method entry
28 // and the backedge of most loops. We try to avoid inserting more polls than
29 // are necessary to ensure a finite period between poll sites. This is not
30 // because the poll itself is expensive in the generated code; it's not. Polls
31 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
32 // perturbing the optimization of the method as much as we can.
34 // We also need to make most call sites parseable. The callee might execute a
35 // poll (or otherwise be inspected by the GC). If so, the entire stack
36 // (including the suspended frame of the current method) must be parseable.
38 // This pass will insert:
39 // - Call parse points ("call safepoints") for any call which may need to
40 // reach a safepoint during the execution of the callee function.
41 // - Backedge safepoint polls and entry safepoint polls to ensure that
42 // executing code reaches a safepoint poll in a finite amount of time.
44 // We do not currently support return statepoints, but adding them would not
45 // be hard. They are not required for correctness - entry safepoints are an
46 // alternative - but some GCs may prefer them. Patches welcome.
48 //===----------------------------------------------------------------------===//
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
53 #include "llvm/ADT/SetVector.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/CFG.h"
56 #include "llvm/Analysis/ScalarEvolution.h"
57 #include "llvm/Analysis/TargetLibraryInfo.h"
58 #include "llvm/Transforms/Utils/Local.h"
59 #include "llvm/IR/Dominators.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/LegacyPassManager.h"
62 #include "llvm/IR/Statepoint.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Cloning.h"
69 #define DEBUG_TYPE "safepoint-placement"
71 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
72 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
74 STATISTIC(CallInLoop,
75 "Number of loops without safepoints due to calls in loop");
76 STATISTIC(FiniteExecution,
77 "Number of loops without safepoints finite execution");
79 using namespace llvm;
81 // Ignore opportunities to avoid placing safepoints on backedges, useful for
82 // validation
83 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
84 cl::init(false));
86 /// How narrow does the trip count of a loop have to be to have to be considered
87 /// "counted"? Counted loops do not get safepoints at backedges.
88 static cl::opt<int> CountedLoopTripWidth("spp-counted-loop-trip-width",
89 cl::Hidden, cl::init(32));
91 // If true, split the backedge of a loop when placing the safepoint, otherwise
92 // split the latch block itself. Both are useful to support for
93 // experimentation, but in practice, it looks like splitting the backedge
94 // optimizes better.
95 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
96 cl::init(false));
98 namespace {
100 /// An analysis pass whose purpose is to identify each of the backedges in
101 /// the function which require a safepoint poll to be inserted.
102 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
103 static char ID;
105 /// The output of the pass - gives a list of each backedge (described by
106 /// pointing at the branch) which need a poll inserted.
107 std::vector<Instruction *> PollLocations;
109 /// True unless we're running spp-no-calls in which case we need to disable
110 /// the call-dependent placement opts.
111 bool CallSafepointsEnabled;
113 ScalarEvolution *SE = nullptr;
114 DominatorTree *DT = nullptr;
115 LoopInfo *LI = nullptr;
116 TargetLibraryInfo *TLI = nullptr;
118 PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
119 : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
120 initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
123 bool runOnLoop(Loop *);
124 void runOnLoopAndSubLoops(Loop *L) {
125 // Visit all the subloops
126 for (Loop *I : *L)
127 runOnLoopAndSubLoops(I);
128 runOnLoop(L);
131 bool runOnFunction(Function &F) override {
132 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
133 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
134 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
135 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
136 for (Loop *I : *LI) {
137 runOnLoopAndSubLoops(I);
139 return false;
142 void getAnalysisUsage(AnalysisUsage &AU) const override {
143 AU.addRequired<DominatorTreeWrapperPass>();
144 AU.addRequired<ScalarEvolutionWrapperPass>();
145 AU.addRequired<LoopInfoWrapperPass>();
146 AU.addRequired<TargetLibraryInfoWrapperPass>();
147 // We no longer modify the IR at all in this pass. Thus all
148 // analysis are preserved.
149 AU.setPreservesAll();
154 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
155 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
156 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
158 namespace {
159 struct PlaceSafepoints : public FunctionPass {
160 static char ID; // Pass identification, replacement for typeid
162 PlaceSafepoints() : FunctionPass(ID) {
163 initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
165 bool runOnFunction(Function &F) override;
167 void getAnalysisUsage(AnalysisUsage &AU) const override {
168 // We modify the graph wholesale (inlining, block insertion, etc). We
169 // preserve nothing at the moment. We could potentially preserve dom tree
170 // if that was worth doing
171 AU.addRequired<TargetLibraryInfoWrapperPass>();
176 // Insert a safepoint poll immediately before the given instruction. Does
177 // not handle the parsability of state at the runtime call, that's the
178 // callers job.
179 static void
180 InsertSafepointPoll(Instruction *InsertBefore,
181 std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
182 const TargetLibraryInfo &TLI);
184 static bool needsStatepoint(CallBase *Call, const TargetLibraryInfo &TLI) {
185 if (callsGCLeafFunction(Call, TLI))
186 return false;
187 if (auto *CI = dyn_cast<CallInst>(Call)) {
188 if (CI->isInlineAsm())
189 return false;
192 return !(isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
193 isa<GCResultInst>(Call));
196 /// Returns true if this loop is known to contain a call safepoint which
197 /// must unconditionally execute on any iteration of the loop which returns
198 /// to the loop header via an edge from Pred. Returns a conservative correct
199 /// answer; i.e. false is always valid.
200 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
201 BasicBlock *Pred,
202 DominatorTree &DT,
203 const TargetLibraryInfo &TLI) {
204 // In general, we're looking for any cut of the graph which ensures
205 // there's a call safepoint along every edge between Header and Pred.
206 // For the moment, we look only for the 'cuts' that consist of a single call
207 // instruction in a block which is dominated by the Header and dominates the
208 // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
209 // of such dominating blocks gets substantially more occurrences than just
210 // checking the Pred and Header blocks themselves. This may be due to the
211 // density of loop exit conditions caused by range and null checks.
212 // TODO: structure this as an analysis pass, cache the result for subloops,
213 // avoid dom tree recalculations
214 assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
216 BasicBlock *Current = Pred;
217 while (true) {
218 for (Instruction &I : *Current) {
219 if (auto *Call = dyn_cast<CallBase>(&I))
220 // Note: Technically, needing a safepoint isn't quite the right
221 // condition here. We should instead be checking if the target method
222 // has an
223 // unconditional poll. In practice, this is only a theoretical concern
224 // since we don't have any methods with conditional-only safepoint
225 // polls.
226 if (needsStatepoint(Call, TLI))
227 return true;
230 if (Current == Header)
231 break;
232 Current = DT.getNode(Current)->getIDom()->getBlock();
235 return false;
238 /// Returns true if this loop is known to terminate in a finite number of
239 /// iterations. Note that this function may return false for a loop which
240 /// does actual terminate in a finite constant number of iterations due to
241 /// conservatism in the analysis.
242 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
243 BasicBlock *Pred) {
244 // A conservative bound on the loop as a whole.
245 const SCEV *MaxTrips = SE->getConstantMaxBackedgeTakenCount(L);
246 if (!isa<SCEVCouldNotCompute>(MaxTrips) &&
247 SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(
248 CountedLoopTripWidth))
249 return true;
251 // If this is a conditional branch to the header with the alternate path
252 // being outside the loop, we can ask questions about the execution frequency
253 // of the exit block.
254 if (L->isLoopExiting(Pred)) {
255 // This returns an exact expression only. TODO: We really only need an
256 // upper bound here, but SE doesn't expose that.
257 const SCEV *MaxExec = SE->getExitCount(L, Pred);
258 if (!isa<SCEVCouldNotCompute>(MaxExec) &&
259 SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(
260 CountedLoopTripWidth))
261 return true;
264 return /* not finite */ false;
267 static void scanOneBB(Instruction *Start, Instruction *End,
268 std::vector<CallInst *> &Calls,
269 DenseSet<BasicBlock *> &Seen,
270 std::vector<BasicBlock *> &Worklist) {
271 for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(),
272 BBE1 = BasicBlock::iterator(End);
273 BBI != BBE0 && BBI != BBE1; BBI++) {
274 if (CallInst *CI = dyn_cast<CallInst>(&*BBI))
275 Calls.push_back(CI);
277 // FIXME: This code does not handle invokes
278 assert(!isa<InvokeInst>(&*BBI) &&
279 "support for invokes in poll code needed");
281 // Only add the successor blocks if we reach the terminator instruction
282 // without encountering end first
283 if (BBI->isTerminator()) {
284 BasicBlock *BB = BBI->getParent();
285 for (BasicBlock *Succ : successors(BB)) {
286 if (Seen.insert(Succ).second) {
287 Worklist.push_back(Succ);
294 static void scanInlinedCode(Instruction *Start, Instruction *End,
295 std::vector<CallInst *> &Calls,
296 DenseSet<BasicBlock *> &Seen) {
297 Calls.clear();
298 std::vector<BasicBlock *> Worklist;
299 Seen.insert(Start->getParent());
300 scanOneBB(Start, End, Calls, Seen, Worklist);
301 while (!Worklist.empty()) {
302 BasicBlock *BB = Worklist.back();
303 Worklist.pop_back();
304 scanOneBB(&*BB->begin(), End, Calls, Seen, Worklist);
308 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
309 // Loop through all loop latches (branches controlling backedges). We need
310 // to place a safepoint on every backedge (potentially).
311 // Note: In common usage, there will be only one edge due to LoopSimplify
312 // having run sometime earlier in the pipeline, but this code must be correct
313 // w.r.t. loops with multiple backedges.
314 BasicBlock *Header = L->getHeader();
315 SmallVector<BasicBlock*, 16> LoopLatches;
316 L->getLoopLatches(LoopLatches);
317 for (BasicBlock *Pred : LoopLatches) {
318 assert(L->contains(Pred));
320 // Make a policy decision about whether this loop needs a safepoint or
321 // not. Note that this is about unburdening the optimizer in loops, not
322 // avoiding the runtime cost of the actual safepoint.
323 if (!AllBackedges) {
324 if (mustBeFiniteCountedLoop(L, SE, Pred)) {
325 LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
326 FiniteExecution++;
327 continue;
329 if (CallSafepointsEnabled &&
330 containsUnconditionalCallSafepoint(L, Header, Pred, *DT, *TLI)) {
331 // Note: This is only semantically legal since we won't do any further
332 // IPO or inlining before the actual call insertion.. If we hadn't, we
333 // might latter loose this call safepoint.
334 LLVM_DEBUG(
335 dbgs()
336 << "skipping safepoint placement due to unconditional call\n");
337 CallInLoop++;
338 continue;
342 // TODO: We can create an inner loop which runs a finite number of
343 // iterations with an outer loop which contains a safepoint. This would
344 // not help runtime performance that much, but it might help our ability to
345 // optimize the inner loop.
347 // Safepoint insertion would involve creating a new basic block (as the
348 // target of the current backedge) which does the safepoint (of all live
349 // variables) and branches to the true header
350 Instruction *Term = Pred->getTerminator();
352 LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
354 PollLocations.push_back(Term);
357 return false;
360 /// Returns true if an entry safepoint is not required before this callsite in
361 /// the caller function.
362 static bool doesNotRequireEntrySafepointBefore(CallBase *Call) {
363 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
364 switch (II->getIntrinsicID()) {
365 case Intrinsic::experimental_gc_statepoint:
366 case Intrinsic::experimental_patchpoint_void:
367 case Intrinsic::experimental_patchpoint_i64:
368 // The can wrap an actual call which may grow the stack by an unbounded
369 // amount or run forever.
370 return false;
371 default:
372 // Most LLVM intrinsics are things which do not expand to actual calls, or
373 // at least if they do, are leaf functions that cause only finite stack
374 // growth. In particular, the optimizer likes to form things like memsets
375 // out of stores in the original IR. Another important example is
376 // llvm.localescape which must occur in the entry block. Inserting a
377 // safepoint before it is not legal since it could push the localescape
378 // out of the entry block.
379 return true;
382 return false;
385 static Instruction *findLocationForEntrySafepoint(Function &F,
386 DominatorTree &DT) {
388 // Conceptually, this poll needs to be on method entry, but in
389 // practice, we place it as late in the entry block as possible. We
390 // can place it as late as we want as long as it dominates all calls
391 // that can grow the stack. This, combined with backedge polls,
392 // give us all the progress guarantees we need.
394 // hasNextInstruction and nextInstruction are used to iterate
395 // through a "straight line" execution sequence.
397 auto HasNextInstruction = [](Instruction *I) {
398 if (!I->isTerminator())
399 return true;
401 BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
402 return nextBB && (nextBB->getUniquePredecessor() != nullptr);
405 auto NextInstruction = [&](Instruction *I) {
406 assert(HasNextInstruction(I) &&
407 "first check if there is a next instruction!");
409 if (I->isTerminator())
410 return &I->getParent()->getUniqueSuccessor()->front();
411 return &*++I->getIterator();
414 Instruction *Cursor = nullptr;
415 for (Cursor = &F.getEntryBlock().front(); HasNextInstruction(Cursor);
416 Cursor = NextInstruction(Cursor)) {
418 // We need to ensure a safepoint poll occurs before any 'real' call. The
419 // easiest way to ensure finite execution between safepoints in the face of
420 // recursive and mutually recursive functions is to enforce that each take
421 // a safepoint. Additionally, we need to ensure a poll before any call
422 // which can grow the stack by an unbounded amount. This isn't required
423 // for GC semantics per se, but is a common requirement for languages
424 // which detect stack overflow via guard pages and then throw exceptions.
425 if (auto *Call = dyn_cast<CallBase>(Cursor)) {
426 if (doesNotRequireEntrySafepointBefore(Call))
427 continue;
428 break;
432 assert((HasNextInstruction(Cursor) || Cursor->isTerminator()) &&
433 "either we stopped because of a call, or because of terminator");
435 return Cursor;
438 const char GCSafepointPollName[] = "gc.safepoint_poll";
440 static bool isGCSafepointPoll(Function &F) {
441 return F.getName().equals(GCSafepointPollName);
444 /// Returns true if this function should be rewritten to include safepoint
445 /// polls and parseable call sites. The main point of this function is to be
446 /// an extension point for custom logic.
447 static bool shouldRewriteFunction(Function &F) {
448 // TODO: This should check the GCStrategy
449 if (F.hasGC()) {
450 const auto &FunctionGCName = F.getGC();
451 const StringRef StatepointExampleName("statepoint-example");
452 const StringRef CoreCLRName("coreclr");
453 return (StatepointExampleName == FunctionGCName) ||
454 (CoreCLRName == FunctionGCName);
455 } else
456 return false;
459 // TODO: These should become properties of the GCStrategy, possibly with
460 // command line overrides.
461 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
462 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
463 static bool enableCallSafepoints(Function &F) { return !NoCall; }
465 bool PlaceSafepoints::runOnFunction(Function &F) {
466 if (F.isDeclaration() || F.empty()) {
467 // This is a declaration, nothing to do. Must exit early to avoid crash in
468 // dom tree calculation
469 return false;
472 if (isGCSafepointPoll(F)) {
473 // Given we're inlining this inside of safepoint poll insertion, this
474 // doesn't make any sense. Note that we do make any contained calls
475 // parseable after we inline a poll.
476 return false;
479 if (!shouldRewriteFunction(F))
480 return false;
482 const TargetLibraryInfo &TLI =
483 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
485 bool Modified = false;
487 // In various bits below, we rely on the fact that uses are reachable from
488 // defs. When there are basic blocks unreachable from the entry, dominance
489 // and reachablity queries return non-sensical results. Thus, we preprocess
490 // the function to ensure these properties hold.
491 Modified |= removeUnreachableBlocks(F);
493 // STEP 1 - Insert the safepoint polling locations. We do not need to
494 // actually insert parse points yet. That will be done for all polls and
495 // calls in a single pass.
497 DominatorTree DT;
498 DT.recalculate(F);
500 SmallVector<Instruction *, 16> PollsNeeded;
501 std::vector<CallBase *> ParsePointNeeded;
503 if (enableBackedgeSafepoints(F)) {
504 // Construct a pass manager to run the LoopPass backedge logic. We
505 // need the pass manager to handle scheduling all the loop passes
506 // appropriately. Doing this by hand is painful and just not worth messing
507 // with for the moment.
508 legacy::FunctionPassManager FPM(F.getParent());
509 bool CanAssumeCallSafepoints = enableCallSafepoints(F);
510 auto *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
511 FPM.add(PBS);
512 FPM.run(F);
514 // We preserve dominance information when inserting the poll, otherwise
515 // we'd have to recalculate this on every insert
516 DT.recalculate(F);
518 auto &PollLocations = PBS->PollLocations;
520 auto OrderByBBName = [](Instruction *a, Instruction *b) {
521 return a->getParent()->getName() < b->getParent()->getName();
523 // We need the order of list to be stable so that naming ends up stable
524 // when we split edges. This makes test cases much easier to write.
525 llvm::sort(PollLocations, OrderByBBName);
527 // We can sometimes end up with duplicate poll locations. This happens if
528 // a single loop is visited more than once. The fact this happens seems
529 // wrong, but it does happen for the split-backedge.ll test case.
530 PollLocations.erase(std::unique(PollLocations.begin(),
531 PollLocations.end()),
532 PollLocations.end());
534 // Insert a poll at each point the analysis pass identified
535 // The poll location must be the terminator of a loop latch block.
536 for (Instruction *Term : PollLocations) {
537 // We are inserting a poll, the function is modified
538 Modified = true;
540 if (SplitBackedge) {
541 // Split the backedge of the loop and insert the poll within that new
542 // basic block. This creates a loop with two latches per original
543 // latch (which is non-ideal), but this appears to be easier to
544 // optimize in practice than inserting the poll immediately before the
545 // latch test.
547 // Since this is a latch, at least one of the successors must dominate
548 // it. Its possible that we have a) duplicate edges to the same header
549 // and b) edges to distinct loop headers. We need to insert pools on
550 // each.
551 SetVector<BasicBlock *> Headers;
552 for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
553 BasicBlock *Succ = Term->getSuccessor(i);
554 if (DT.dominates(Succ, Term->getParent())) {
555 Headers.insert(Succ);
558 assert(!Headers.empty() && "poll location is not a loop latch?");
560 // The split loop structure here is so that we only need to recalculate
561 // the dominator tree once. Alternatively, we could just keep it up to
562 // date and use a more natural merged loop.
563 SetVector<BasicBlock *> SplitBackedges;
564 for (BasicBlock *Header : Headers) {
565 BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
566 PollsNeeded.push_back(NewBB->getTerminator());
567 NumBackedgeSafepoints++;
569 } else {
570 // Split the latch block itself, right before the terminator.
571 PollsNeeded.push_back(Term);
572 NumBackedgeSafepoints++;
577 if (enableEntrySafepoints(F)) {
578 if (Instruction *Location = findLocationForEntrySafepoint(F, DT)) {
579 PollsNeeded.push_back(Location);
580 Modified = true;
581 NumEntrySafepoints++;
583 // TODO: else we should assert that there was, in fact, a policy choice to
584 // not insert a entry safepoint poll.
587 // Now that we've identified all the needed safepoint poll locations, insert
588 // safepoint polls themselves.
589 for (Instruction *PollLocation : PollsNeeded) {
590 std::vector<CallBase *> RuntimeCalls;
591 InsertSafepointPoll(PollLocation, RuntimeCalls, TLI);
592 llvm::append_range(ParsePointNeeded, RuntimeCalls);
595 return Modified;
598 char PlaceBackedgeSafepointsImpl::ID = 0;
599 char PlaceSafepoints::ID = 0;
601 FunctionPass *llvm::createPlaceSafepointsPass() {
602 return new PlaceSafepoints();
605 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
606 "place-backedge-safepoints-impl",
607 "Place Backedge Safepoints", false, false)
608 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
609 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
610 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
611 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
612 "place-backedge-safepoints-impl",
613 "Place Backedge Safepoints", false, false)
615 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
616 false, false)
617 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
618 false, false)
620 static void
621 InsertSafepointPoll(Instruction *InsertBefore,
622 std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
623 const TargetLibraryInfo &TLI) {
624 BasicBlock *OrigBB = InsertBefore->getParent();
625 Module *M = InsertBefore->getModule();
626 assert(M && "must be part of a module");
628 // Inline the safepoint poll implementation - this will get all the branch,
629 // control flow, etc.. Most importantly, it will introduce the actual slow
630 // path call - where we need to insert a safepoint (parsepoint).
632 auto *F = M->getFunction(GCSafepointPollName);
633 assert(F && "gc.safepoint_poll function is missing");
634 assert(F->getValueType() ==
635 FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
636 "gc.safepoint_poll declared with wrong type");
637 assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
638 CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
640 // Record some information about the call site we're replacing
641 BasicBlock::iterator Before(PollCall), After(PollCall);
642 bool IsBegin = false;
643 if (Before == OrigBB->begin())
644 IsBegin = true;
645 else
646 Before--;
648 After++;
649 assert(After != OrigBB->end() && "must have successor");
651 // Do the actual inlining
652 InlineFunctionInfo IFI;
653 bool InlineStatus = InlineFunction(*PollCall, IFI).isSuccess();
654 assert(InlineStatus && "inline must succeed");
655 (void)InlineStatus; // suppress warning in release-asserts
657 // Check post-conditions
658 assert(IFI.StaticAllocas.empty() && "can't have allocs");
660 std::vector<CallInst *> Calls; // new calls
661 DenseSet<BasicBlock *> BBs; // new BBs + insertee
663 // Include only the newly inserted instructions, Note: begin may not be valid
664 // if we inserted to the beginning of the basic block
665 BasicBlock::iterator Start = IsBegin ? OrigBB->begin() : std::next(Before);
667 // If your poll function includes an unreachable at the end, that's not
668 // valid. Bugpoint likes to create this, so check for it.
669 assert(isPotentiallyReachable(&*Start, &*After) &&
670 "malformed poll function");
672 scanInlinedCode(&*Start, &*After, Calls, BBs);
673 assert(!Calls.empty() && "slow path not found for safepoint poll");
675 // Record the fact we need a parsable state at the runtime call contained in
676 // the poll function. This is required so that the runtime knows how to
677 // parse the last frame when we actually take the safepoint (i.e. execute
678 // the slow path)
679 assert(ParsePointsNeeded.empty());
680 for (auto *CI : Calls) {
681 // No safepoint needed or wanted
682 if (!needsStatepoint(CI, TLI))
683 continue;
685 // These are likely runtime calls. Should we assert that via calling
686 // convention or something?
687 ParsePointsNeeded.push_back(CI);
689 assert(ParsePointsNeeded.size() <= Calls.size());