1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the Jump Threading pass.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Transforms/Scalar/JumpThreading.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/MapVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/DomTreeUpdater.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/LazyValueInfo.h"
32 #include "llvm/Analysis/Loads.h"
33 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/Analysis/MemoryLocation.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/TargetTransformInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/ConstantRange.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/MDBuilder.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/PassManager.h"
56 #include "llvm/IR/PatternMatch.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Use.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/BlockFrequency.h"
64 #include "llvm/Support/BranchProbability.h"
65 #include "llvm/Support/Casting.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Transforms/Scalar.h"
70 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
71 #include "llvm/Transforms/Utils/Cloning.h"
72 #include "llvm/Transforms/Utils/Local.h"
73 #include "llvm/Transforms/Utils/SSAUpdater.h"
74 #include "llvm/Transforms/Utils/ValueMapper.h"
84 using namespace jumpthreading
;
86 #define DEBUG_TYPE "jump-threading"
88 STATISTIC(NumThreads
, "Number of jumps threaded");
89 STATISTIC(NumFolds
, "Number of terminators folded");
90 STATISTIC(NumDupes
, "Number of branch blocks duplicated to eliminate phi");
92 static cl::opt
<unsigned>
93 BBDuplicateThreshold("jump-threading-threshold",
94 cl::desc("Max block size to duplicate for jump threading"),
95 cl::init(6), cl::Hidden
);
97 static cl::opt
<unsigned>
98 ImplicationSearchThreshold(
99 "jump-threading-implication-search-threshold",
100 cl::desc("The number of predecessors to search for a stronger "
101 "condition to use to thread over a weaker condition"),
102 cl::init(3), cl::Hidden
);
104 static cl::opt
<bool> PrintLVIAfterJumpThreading(
105 "print-lvi-after-jump-threading",
106 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false),
109 static cl::opt
<bool> JumpThreadingFreezeSelectCond(
110 "jump-threading-freeze-select-cond",
111 cl::desc("Freeze the condition when unfolding select"), cl::init(false),
114 static cl::opt
<bool> ThreadAcrossLoopHeaders(
115 "jump-threading-across-loop-headers",
116 cl::desc("Allow JumpThreading to thread across loop headers, for testing"),
117 cl::init(false), cl::Hidden
);
122 /// This pass performs 'jump threading', which looks at blocks that have
123 /// multiple predecessors and multiple successors. If one or more of the
124 /// predecessors of the block can be proven to always jump to one of the
125 /// successors, we forward the edge from the predecessor to the successor by
126 /// duplicating the contents of this block.
128 /// An example of when this can occur is code like this:
135 /// In this case, the unconditional branch at the end of the first if can be
136 /// revectored to the false side of the second if.
137 class JumpThreading
: public FunctionPass
{
138 JumpThreadingPass Impl
;
141 static char ID
; // Pass identification
143 JumpThreading(bool InsertFreezeWhenUnfoldingSelect
= false, int T
= -1)
144 : FunctionPass(ID
), Impl(InsertFreezeWhenUnfoldingSelect
, T
) {
145 initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
148 bool runOnFunction(Function
&F
) override
;
150 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
151 AU
.addRequired
<DominatorTreeWrapperPass
>();
152 AU
.addPreserved
<DominatorTreeWrapperPass
>();
153 AU
.addRequired
<AAResultsWrapperPass
>();
154 AU
.addRequired
<LazyValueInfoWrapperPass
>();
155 AU
.addPreserved
<LazyValueInfoWrapperPass
>();
156 AU
.addPreserved
<GlobalsAAWrapperPass
>();
157 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
158 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
161 void releaseMemory() override
{ Impl
.releaseMemory(); }
164 } // end anonymous namespace
166 char JumpThreading::ID
= 0;
168 INITIALIZE_PASS_BEGIN(JumpThreading
, "jump-threading",
169 "Jump Threading", false, false)
170 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
171 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass
)
172 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
173 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
174 INITIALIZE_PASS_END(JumpThreading
, "jump-threading",
175 "Jump Threading", false, false)
177 // Public interface to the Jump Threading pass
178 FunctionPass
*llvm::createJumpThreadingPass(bool InsertFr
, int Threshold
) {
179 return new JumpThreading(InsertFr
, Threshold
);
182 JumpThreadingPass::JumpThreadingPass(bool InsertFr
, int T
) {
183 InsertFreezeWhenUnfoldingSelect
= JumpThreadingFreezeSelectCond
| InsertFr
;
184 DefaultBBDupThreshold
= (T
== -1) ? BBDuplicateThreshold
: unsigned(T
);
187 // Update branch probability information according to conditional
188 // branch probability. This is usually made possible for cloned branches
189 // in inline instances by the context specific profile in the caller.
201 // cond = PN([true, %A], [..., %B]); // PHI node
204 // ... // P(cond == true) = 1%
207 // Here we know that when block A is taken, cond must be true, which means
208 // P(cond == true | A) = 1
210 // Given that P(cond == true) = P(cond == true | A) * P(A) +
211 // P(cond == true | B) * P(B)
213 // P(cond == true ) = P(A) + P(cond == true | B) * P(B)
216 // P(A) is less than P(cond == true), i.e.
217 // P(t == true) <= P(cond == true)
219 // In other words, if we know P(cond == true) is unlikely, we know
220 // that P(t == true) is also unlikely.
222 static void updatePredecessorProfileMetadata(PHINode
*PN
, BasicBlock
*BB
) {
223 BranchInst
*CondBr
= dyn_cast
<BranchInst
>(BB
->getTerminator());
227 uint64_t TrueWeight
, FalseWeight
;
228 if (!CondBr
->extractProfMetadata(TrueWeight
, FalseWeight
))
231 if (TrueWeight
+ FalseWeight
== 0)
232 // Zero branch_weights do not give a hint for getting branch probabilities.
233 // Technically it would result in division by zero denominator, which is
234 // TrueWeight + FalseWeight.
237 // Returns the outgoing edge of the dominating predecessor block
238 // that leads to the PhiNode's incoming block:
239 auto GetPredOutEdge
=
240 [](BasicBlock
*IncomingBB
,
241 BasicBlock
*PhiBB
) -> std::pair
<BasicBlock
*, BasicBlock
*> {
242 auto *PredBB
= IncomingBB
;
243 auto *SuccBB
= PhiBB
;
244 SmallPtrSet
<BasicBlock
*, 16> Visited
;
246 BranchInst
*PredBr
= dyn_cast
<BranchInst
>(PredBB
->getTerminator());
247 if (PredBr
&& PredBr
->isConditional())
248 return {PredBB
, SuccBB
};
249 Visited
.insert(PredBB
);
250 auto *SinglePredBB
= PredBB
->getSinglePredecessor();
252 return {nullptr, nullptr};
254 // Stop searching when SinglePredBB has been visited. It means we see
255 // an unreachable loop.
256 if (Visited
.count(SinglePredBB
))
257 return {nullptr, nullptr};
260 PredBB
= SinglePredBB
;
264 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
265 Value
*PhiOpnd
= PN
->getIncomingValue(i
);
266 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(PhiOpnd
);
268 if (!CI
|| !CI
->getType()->isIntegerTy(1))
271 BranchProbability BP
=
272 (CI
->isOne() ? BranchProbability::getBranchProbability(
273 TrueWeight
, TrueWeight
+ FalseWeight
)
274 : BranchProbability::getBranchProbability(
275 FalseWeight
, TrueWeight
+ FalseWeight
));
277 auto PredOutEdge
= GetPredOutEdge(PN
->getIncomingBlock(i
), BB
);
278 if (!PredOutEdge
.first
)
281 BasicBlock
*PredBB
= PredOutEdge
.first
;
282 BranchInst
*PredBr
= dyn_cast
<BranchInst
>(PredBB
->getTerminator());
286 uint64_t PredTrueWeight
, PredFalseWeight
;
287 // FIXME: We currently only set the profile data when it is missing.
288 // With PGO, this can be used to refine even existing profile data with
289 // context information. This needs to be done after more performance
291 if (PredBr
->extractProfMetadata(PredTrueWeight
, PredFalseWeight
))
294 // We can not infer anything useful when BP >= 50%, because BP is the
295 // upper bound probability value.
296 if (BP
>= BranchProbability(50, 100))
299 SmallVector
<uint32_t, 2> Weights
;
300 if (PredBr
->getSuccessor(0) == PredOutEdge
.second
) {
301 Weights
.push_back(BP
.getNumerator());
302 Weights
.push_back(BP
.getCompl().getNumerator());
304 Weights
.push_back(BP
.getCompl().getNumerator());
305 Weights
.push_back(BP
.getNumerator());
307 PredBr
->setMetadata(LLVMContext::MD_prof
,
308 MDBuilder(PredBr
->getParent()->getContext())
309 .createBranchWeights(Weights
));
313 /// runOnFunction - Toplevel algorithm.
314 bool JumpThreading::runOnFunction(Function
&F
) {
317 auto TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
318 // Jump Threading has no sense for the targets with divergent CF
319 if (TTI
->hasBranchDivergence())
321 auto TLI
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
322 auto DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
323 auto LVI
= &getAnalysis
<LazyValueInfoWrapperPass
>().getLVI();
324 auto AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
325 DomTreeUpdater
DTU(*DT
, DomTreeUpdater::UpdateStrategy::Lazy
);
326 std::unique_ptr
<BlockFrequencyInfo
> BFI
;
327 std::unique_ptr
<BranchProbabilityInfo
> BPI
;
328 if (F
.hasProfileData()) {
329 LoopInfo LI
{DominatorTree(F
)};
330 BPI
.reset(new BranchProbabilityInfo(F
, LI
, TLI
));
331 BFI
.reset(new BlockFrequencyInfo(F
, *BPI
, LI
));
334 bool Changed
= Impl
.runImpl(F
, TLI
, LVI
, AA
, &DTU
, F
.hasProfileData(),
335 std::move(BFI
), std::move(BPI
));
336 if (PrintLVIAfterJumpThreading
) {
337 dbgs() << "LVI for function '" << F
.getName() << "':\n";
338 LVI
->printLVI(F
, DTU
.getDomTree(), dbgs());
343 PreservedAnalyses
JumpThreadingPass::run(Function
&F
,
344 FunctionAnalysisManager
&AM
) {
345 auto &TTI
= AM
.getResult
<TargetIRAnalysis
>(F
);
346 // Jump Threading has no sense for the targets with divergent CF
347 if (TTI
.hasBranchDivergence())
348 return PreservedAnalyses::all();
349 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
350 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
351 auto &LVI
= AM
.getResult
<LazyValueAnalysis
>(F
);
352 auto &AA
= AM
.getResult
<AAManager
>(F
);
353 DomTreeUpdater
DTU(DT
, DomTreeUpdater::UpdateStrategy::Lazy
);
355 std::unique_ptr
<BlockFrequencyInfo
> BFI
;
356 std::unique_ptr
<BranchProbabilityInfo
> BPI
;
357 if (F
.hasProfileData()) {
358 LoopInfo LI
{DominatorTree(F
)};
359 BPI
.reset(new BranchProbabilityInfo(F
, LI
, &TLI
));
360 BFI
.reset(new BlockFrequencyInfo(F
, *BPI
, LI
));
363 bool Changed
= runImpl(F
, &TLI
, &LVI
, &AA
, &DTU
, F
.hasProfileData(),
364 std::move(BFI
), std::move(BPI
));
366 if (PrintLVIAfterJumpThreading
) {
367 dbgs() << "LVI for function '" << F
.getName() << "':\n";
368 LVI
.printLVI(F
, DTU
.getDomTree(), dbgs());
372 return PreservedAnalyses::all();
373 PreservedAnalyses PA
;
374 PA
.preserve
<DominatorTreeAnalysis
>();
375 PA
.preserve
<LazyValueAnalysis
>();
379 bool JumpThreadingPass::runImpl(Function
&F
, TargetLibraryInfo
*TLI_
,
380 LazyValueInfo
*LVI_
, AliasAnalysis
*AA_
,
381 DomTreeUpdater
*DTU_
, bool HasProfileData_
,
382 std::unique_ptr
<BlockFrequencyInfo
> BFI_
,
383 std::unique_ptr
<BranchProbabilityInfo
> BPI_
) {
384 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F
.getName() << "'\n");
391 // When profile data is available, we need to update edge weights after
392 // successful jump threading, which requires both BPI and BFI being available.
393 HasProfileData
= HasProfileData_
;
394 auto *GuardDecl
= F
.getParent()->getFunction(
395 Intrinsic::getName(Intrinsic::experimental_guard
));
396 HasGuards
= GuardDecl
&& !GuardDecl
->use_empty();
397 if (HasProfileData
) {
398 BPI
= std::move(BPI_
);
399 BFI
= std::move(BFI_
);
402 // Reduce the number of instructions duplicated when optimizing strictly for
404 if (BBDuplicateThreshold
.getNumOccurrences())
405 BBDupThreshold
= BBDuplicateThreshold
;
406 else if (F
.hasFnAttribute(Attribute::MinSize
))
409 BBDupThreshold
= DefaultBBDupThreshold
;
411 // JumpThreading must not processes blocks unreachable from entry. It's a
412 // waste of compute time and can potentially lead to hangs.
413 SmallPtrSet
<BasicBlock
*, 16> Unreachable
;
414 assert(DTU
&& "DTU isn't passed into JumpThreading before using it.");
415 assert(DTU
->hasDomTree() && "JumpThreading relies on DomTree to proceed.");
416 DominatorTree
&DT
= DTU
->getDomTree();
418 if (!DT
.isReachableFromEntry(&BB
))
419 Unreachable
.insert(&BB
);
421 if (!ThreadAcrossLoopHeaders
)
424 bool EverChanged
= false;
429 if (Unreachable
.count(&BB
))
431 while (processBlock(&BB
)) // Thread all of the branches we can over BB.
434 // Jump threading may have introduced redundant debug values into BB
435 // which should be removed.
437 RemoveRedundantDbgInstrs(&BB
);
439 // Stop processing BB if it's the entry or is now deleted. The following
440 // routines attempt to eliminate BB and locating a suitable replacement
441 // for the entry is non-trivial.
442 if (&BB
== &F
.getEntryBlock() || DTU
->isBBPendingDeletion(&BB
))
445 if (pred_empty(&BB
)) {
446 // When processBlock makes BB unreachable it doesn't bother to fix up
447 // the instructions in it. We must remove BB to prevent invalid IR.
448 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB
.getName()
449 << "' with terminator: " << *BB
.getTerminator()
451 LoopHeaders
.erase(&BB
);
452 LVI
->eraseBlock(&BB
);
453 DeleteDeadBlock(&BB
, DTU
);
458 // processBlock doesn't thread BBs with unconditional TIs. However, if BB
459 // is "almost empty", we attempt to merge BB with its sole successor.
460 auto *BI
= dyn_cast
<BranchInst
>(BB
.getTerminator());
461 if (BI
&& BI
->isUnconditional()) {
462 BasicBlock
*Succ
= BI
->getSuccessor(0);
464 // The terminator must be the only non-phi instruction in BB.
465 BB
.getFirstNonPHIOrDbg(true)->isTerminator() &&
466 // Don't alter Loop headers and latches to ensure another pass can
467 // detect and transform nested loops later.
468 !LoopHeaders
.count(&BB
) && !LoopHeaders
.count(Succ
) &&
469 TryToSimplifyUncondBranchFromEmptyBlock(&BB
, DTU
)) {
470 RemoveRedundantDbgInstrs(Succ
);
471 // BB is valid for cleanup here because we passed in DTU. F remains
472 // BB's parent until a DTU->getDomTree() event.
473 LVI
->eraseBlock(&BB
);
478 EverChanged
|= Changed
;
485 // Replace uses of Cond with ToVal when safe to do so. If all uses are
486 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond
487 // because we may incorrectly replace uses when guards/assumes are uses of
488 // of `Cond` and we used the guards/assume to reason about the `Cond` value
489 // at the end of block. RAUW unconditionally replaces all uses
490 // including the guards/assumes themselves and the uses before the
492 static void replaceFoldableUses(Instruction
*Cond
, Value
*ToVal
) {
493 assert(Cond
->getType() == ToVal
->getType());
494 auto *BB
= Cond
->getParent();
495 // We can unconditionally replace all uses in non-local blocks (i.e. uses
496 // strictly dominated by BB), since LVI information is true from the
498 replaceNonLocalUsesWith(Cond
, ToVal
);
499 for (Instruction
&I
: reverse(*BB
)) {
500 // Reached the Cond whose uses we are trying to replace, so there are no
504 // We only replace uses in instructions that are guaranteed to reach the end
505 // of BB, where we know Cond is ToVal.
506 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
508 I
.replaceUsesOfWith(Cond
, ToVal
);
510 if (Cond
->use_empty() && !Cond
->mayHaveSideEffects())
511 Cond
->eraseFromParent();
514 /// Return the cost of duplicating a piece of this block from first non-phi
515 /// and before StopAt instruction to thread across it. Stop scanning the block
516 /// when exceeding the threshold. If duplication is impossible, returns ~0U.
517 static unsigned getJumpThreadDuplicationCost(BasicBlock
*BB
,
519 unsigned Threshold
) {
520 assert(StopAt
->getParent() == BB
&& "Not an instruction from proper BB?");
521 /// Ignore PHI nodes, these will be flattened when duplication happens.
522 BasicBlock::const_iterator
I(BB
->getFirstNonPHI());
524 // FIXME: THREADING will delete values that are just used to compute the
525 // branch, so they shouldn't count against the duplication cost.
528 if (BB
->getTerminator() == StopAt
) {
529 // Threading through a switch statement is particularly profitable. If this
530 // block ends in a switch, decrease its cost to make it more likely to
532 if (isa
<SwitchInst
>(StopAt
))
535 // The same holds for indirect branches, but slightly more so.
536 if (isa
<IndirectBrInst
>(StopAt
))
540 // Bump the threshold up so the early exit from the loop doesn't skip the
541 // terminator-based Size adjustment at the end.
544 // Sum up the cost of each instruction until we get to the terminator. Don't
545 // include the terminator because the copy won't include it.
547 for (; &*I
!= StopAt
; ++I
) {
549 // Stop scanning the block if we've reached the threshold.
550 if (Size
> Threshold
)
553 // Debugger intrinsics don't incur code size.
554 if (isa
<DbgInfoIntrinsic
>(I
)) continue;
556 // Pseudo-probes don't incur code size.
557 if (isa
<PseudoProbeInst
>(I
))
560 // If this is a pointer->pointer bitcast, it is free.
561 if (isa
<BitCastInst
>(I
) && I
->getType()->isPointerTy())
564 // Freeze instruction is free, too.
565 if (isa
<FreezeInst
>(I
))
568 // Bail out if this instruction gives back a token type, it is not possible
569 // to duplicate it if it is used outside this BB.
570 if (I
->getType()->isTokenTy() && I
->isUsedOutsideOfBlock(BB
))
573 // All other instructions count for at least one unit.
576 // Calls are more expensive. If they are non-intrinsic calls, we model them
577 // as having cost of 4. If they are a non-vector intrinsic, we model them
578 // as having cost of 2 total, and if they are a vector intrinsic, we model
579 // them as having cost 1.
580 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
)) {
581 if (CI
->cannotDuplicate() || CI
->isConvergent())
582 // Blocks with NoDuplicate are modelled as having infinite cost, so they
583 // are never duplicated.
585 else if (!isa
<IntrinsicInst
>(CI
))
587 else if (!CI
->getType()->isVectorTy())
592 return Size
> Bonus
? Size
- Bonus
: 0;
595 /// findLoopHeaders - We do not want jump threading to turn proper loop
596 /// structures into irreducible loops. Doing this breaks up the loop nesting
597 /// hierarchy and pessimizes later transformations. To prevent this from
598 /// happening, we first have to find the loop headers. Here we approximate this
599 /// by finding targets of backedges in the CFG.
601 /// Note that there definitely are cases when we want to allow threading of
602 /// edges across a loop header. For example, threading a jump from outside the
603 /// loop (the preheader) to an exit block of the loop is definitely profitable.
604 /// It is also almost always profitable to thread backedges from within the loop
605 /// to exit blocks, and is often profitable to thread backedges to other blocks
606 /// within the loop (forming a nested loop). This simple analysis is not rich
607 /// enough to track all of these properties and keep it up-to-date as the CFG
608 /// mutates, so we don't allow any of these transformations.
609 void JumpThreadingPass::findLoopHeaders(Function
&F
) {
610 SmallVector
<std::pair
<const BasicBlock
*,const BasicBlock
*>, 32> Edges
;
611 FindFunctionBackedges(F
, Edges
);
613 for (const auto &Edge
: Edges
)
614 LoopHeaders
.insert(Edge
.second
);
617 /// getKnownConstant - Helper method to determine if we can thread over a
618 /// terminator with the given value as its condition, and if so what value to
619 /// use for that. What kind of value this is depends on whether we want an
620 /// integer or a block address, but an undef is always accepted.
621 /// Returns null if Val is null or not an appropriate constant.
622 static Constant
*getKnownConstant(Value
*Val
, ConstantPreference Preference
) {
626 // Undef is "known" enough.
627 if (UndefValue
*U
= dyn_cast
<UndefValue
>(Val
))
630 if (Preference
== WantBlockAddress
)
631 return dyn_cast
<BlockAddress
>(Val
->stripPointerCasts());
633 return dyn_cast
<ConstantInt
>(Val
);
636 /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see
637 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef
638 /// in any of our predecessors. If so, return the known list of value and pred
639 /// BB in the result vector.
641 /// This returns true if there were any known values.
642 bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
643 Value
*V
, BasicBlock
*BB
, PredValueInfo
&Result
,
644 ConstantPreference Preference
, DenseSet
<Value
*> &RecursionSet
,
646 // This method walks up use-def chains recursively. Because of this, we could
647 // get into an infinite loop going around loops in the use-def chain. To
648 // prevent this, keep track of what (value, block) pairs we've already visited
649 // and terminate the search if we loop back to them
650 if (!RecursionSet
.insert(V
).second
)
653 // If V is a constant, then it is known in all predecessors.
654 if (Constant
*KC
= getKnownConstant(V
, Preference
)) {
655 for (BasicBlock
*Pred
: predecessors(BB
))
656 Result
.emplace_back(KC
, Pred
);
658 return !Result
.empty();
661 // If V is a non-instruction value, or an instruction in a different block,
662 // then it can't be derived from a PHI.
663 Instruction
*I
= dyn_cast
<Instruction
>(V
);
664 if (!I
|| I
->getParent() != BB
) {
666 // Okay, if this is a live-in value, see if it has a known value at the end
667 // of any of our predecessors.
669 // FIXME: This should be an edge property, not a block end property.
670 /// TODO: Per PR2563, we could infer value range information about a
671 /// predecessor based on its terminator.
673 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
674 // "I" is a non-local compare-with-a-constant instruction. This would be
675 // able to handle value inequalities better, for example if the compare is
676 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
677 // Perhaps getConstantOnEdge should be smart enough to do this?
678 for (BasicBlock
*P
: predecessors(BB
)) {
679 // If the value is known by LazyValueInfo to be a constant in a
680 // predecessor, use that information to try to thread this block.
681 Constant
*PredCst
= LVI
->getConstantOnEdge(V
, P
, BB
, CxtI
);
682 if (Constant
*KC
= getKnownConstant(PredCst
, Preference
))
683 Result
.emplace_back(KC
, P
);
686 return !Result
.empty();
689 /// If I is a PHI node, then we know the incoming values for any constants.
690 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
691 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
692 Value
*InVal
= PN
->getIncomingValue(i
);
693 if (Constant
*KC
= getKnownConstant(InVal
, Preference
)) {
694 Result
.emplace_back(KC
, PN
->getIncomingBlock(i
));
696 Constant
*CI
= LVI
->getConstantOnEdge(InVal
,
697 PN
->getIncomingBlock(i
),
699 if (Constant
*KC
= getKnownConstant(CI
, Preference
))
700 Result
.emplace_back(KC
, PN
->getIncomingBlock(i
));
704 return !Result
.empty();
707 // Handle Cast instructions.
708 if (CastInst
*CI
= dyn_cast
<CastInst
>(I
)) {
709 Value
*Source
= CI
->getOperand(0);
710 computeValueKnownInPredecessorsImpl(Source
, BB
, Result
, Preference
,
715 // Convert the known values.
716 for (auto &R
: Result
)
717 R
.first
= ConstantExpr::getCast(CI
->getOpcode(), R
.first
, CI
->getType());
722 if (FreezeInst
*FI
= dyn_cast
<FreezeInst
>(I
)) {
723 Value
*Source
= FI
->getOperand(0);
724 computeValueKnownInPredecessorsImpl(Source
, BB
, Result
, Preference
,
727 erase_if(Result
, [](auto &Pair
) {
728 return !isGuaranteedNotToBeUndefOrPoison(Pair
.first
);
731 return !Result
.empty();
734 // Handle some boolean conditions.
735 if (I
->getType()->getPrimitiveSizeInBits() == 1) {
736 using namespace PatternMatch
;
738 assert(Preference
== WantInteger
&& "One-bit non-integer type?");
740 // X & false -> false
742 if (match(I
, m_LogicalOr(m_Value(Op0
), m_Value(Op1
))) ||
743 match(I
, m_LogicalAnd(m_Value(Op0
), m_Value(Op1
)))) {
744 PredValueInfoTy LHSVals
, RHSVals
;
746 computeValueKnownInPredecessorsImpl(Op0
, BB
, LHSVals
, WantInteger
,
748 computeValueKnownInPredecessorsImpl(Op1
, BB
, RHSVals
, WantInteger
,
751 if (LHSVals
.empty() && RHSVals
.empty())
754 ConstantInt
*InterestingVal
;
755 if (match(I
, m_LogicalOr()))
756 InterestingVal
= ConstantInt::getTrue(I
->getContext());
758 InterestingVal
= ConstantInt::getFalse(I
->getContext());
760 SmallPtrSet
<BasicBlock
*, 4> LHSKnownBBs
;
762 // Scan for the sentinel. If we find an undef, force it to the
763 // interesting value: x|undef -> true and x&undef -> false.
764 for (const auto &LHSVal
: LHSVals
)
765 if (LHSVal
.first
== InterestingVal
|| isa
<UndefValue
>(LHSVal
.first
)) {
766 Result
.emplace_back(InterestingVal
, LHSVal
.second
);
767 LHSKnownBBs
.insert(LHSVal
.second
);
769 for (const auto &RHSVal
: RHSVals
)
770 if (RHSVal
.first
== InterestingVal
|| isa
<UndefValue
>(RHSVal
.first
)) {
771 // If we already inferred a value for this block on the LHS, don't
773 if (!LHSKnownBBs
.count(RHSVal
.second
))
774 Result
.emplace_back(InterestingVal
, RHSVal
.second
);
777 return !Result
.empty();
780 // Handle the NOT form of XOR.
781 if (I
->getOpcode() == Instruction::Xor
&&
782 isa
<ConstantInt
>(I
->getOperand(1)) &&
783 cast
<ConstantInt
>(I
->getOperand(1))->isOne()) {
784 computeValueKnownInPredecessorsImpl(I
->getOperand(0), BB
, Result
,
785 WantInteger
, RecursionSet
, CxtI
);
789 // Invert the known values.
790 for (auto &R
: Result
)
791 R
.first
= ConstantExpr::getNot(R
.first
);
796 // Try to simplify some other binary operator values.
797 } else if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(I
)) {
798 assert(Preference
!= WantBlockAddress
799 && "A binary operator creating a block address?");
800 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->getOperand(1))) {
801 PredValueInfoTy LHSVals
;
802 computeValueKnownInPredecessorsImpl(BO
->getOperand(0), BB
, LHSVals
,
803 WantInteger
, RecursionSet
, CxtI
);
805 // Try to use constant folding to simplify the binary operator.
806 for (const auto &LHSVal
: LHSVals
) {
807 Constant
*V
= LHSVal
.first
;
808 Constant
*Folded
= ConstantExpr::get(BO
->getOpcode(), V
, CI
);
810 if (Constant
*KC
= getKnownConstant(Folded
, WantInteger
))
811 Result
.emplace_back(KC
, LHSVal
.second
);
815 return !Result
.empty();
818 // Handle compare with phi operand, where the PHI is defined in this block.
819 if (CmpInst
*Cmp
= dyn_cast
<CmpInst
>(I
)) {
820 assert(Preference
== WantInteger
&& "Compares only produce integers");
821 Type
*CmpType
= Cmp
->getType();
822 Value
*CmpLHS
= Cmp
->getOperand(0);
823 Value
*CmpRHS
= Cmp
->getOperand(1);
824 CmpInst::Predicate Pred
= Cmp
->getPredicate();
826 PHINode
*PN
= dyn_cast
<PHINode
>(CmpLHS
);
828 PN
= dyn_cast
<PHINode
>(CmpRHS
);
829 if (PN
&& PN
->getParent() == BB
) {
830 const DataLayout
&DL
= PN
->getModule()->getDataLayout();
831 // We can do this simplification if any comparisons fold to true or false.
833 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
834 BasicBlock
*PredBB
= PN
->getIncomingBlock(i
);
837 LHS
= PN
->getIncomingValue(i
);
838 RHS
= CmpRHS
->DoPHITranslation(BB
, PredBB
);
840 LHS
= CmpLHS
->DoPHITranslation(BB
, PredBB
);
841 RHS
= PN
->getIncomingValue(i
);
843 Value
*Res
= SimplifyCmpInst(Pred
, LHS
, RHS
, {DL
});
845 if (!isa
<Constant
>(RHS
))
848 // getPredicateOnEdge call will make no sense if LHS is defined in BB.
849 auto LHSInst
= dyn_cast
<Instruction
>(LHS
);
850 if (LHSInst
&& LHSInst
->getParent() == BB
)
853 LazyValueInfo::Tristate
854 ResT
= LVI
->getPredicateOnEdge(Pred
, LHS
,
855 cast
<Constant
>(RHS
), PredBB
, BB
,
857 if (ResT
== LazyValueInfo::Unknown
)
859 Res
= ConstantInt::get(Type::getInt1Ty(LHS
->getContext()), ResT
);
862 if (Constant
*KC
= getKnownConstant(Res
, WantInteger
))
863 Result
.emplace_back(KC
, PredBB
);
866 return !Result
.empty();
869 // If comparing a live-in value against a constant, see if we know the
870 // live-in value on any predecessors.
871 if (isa
<Constant
>(CmpRHS
) && !CmpType
->isVectorTy()) {
872 Constant
*CmpConst
= cast
<Constant
>(CmpRHS
);
874 if (!isa
<Instruction
>(CmpLHS
) ||
875 cast
<Instruction
>(CmpLHS
)->getParent() != BB
) {
876 for (BasicBlock
*P
: predecessors(BB
)) {
877 // If the value is known by LazyValueInfo to be a constant in a
878 // predecessor, use that information to try to thread this block.
879 LazyValueInfo::Tristate Res
=
880 LVI
->getPredicateOnEdge(Pred
, CmpLHS
,
881 CmpConst
, P
, BB
, CxtI
? CxtI
: Cmp
);
882 if (Res
== LazyValueInfo::Unknown
)
885 Constant
*ResC
= ConstantInt::get(CmpType
, Res
);
886 Result
.emplace_back(ResC
, P
);
889 return !Result
.empty();
892 // InstCombine can fold some forms of constant range checks into
893 // (icmp (add (x, C1)), C2). See if we have we have such a thing with
896 using namespace PatternMatch
;
899 ConstantInt
*AddConst
;
900 if (isa
<ConstantInt
>(CmpConst
) &&
901 match(CmpLHS
, m_Add(m_Value(AddLHS
), m_ConstantInt(AddConst
)))) {
902 if (!isa
<Instruction
>(AddLHS
) ||
903 cast
<Instruction
>(AddLHS
)->getParent() != BB
) {
904 for (BasicBlock
*P
: predecessors(BB
)) {
905 // If the value is known by LazyValueInfo to be a ConstantRange in
906 // a predecessor, use that information to try to thread this
908 ConstantRange CR
= LVI
->getConstantRangeOnEdge(
909 AddLHS
, P
, BB
, CxtI
? CxtI
: cast
<Instruction
>(CmpLHS
));
910 // Propagate the range through the addition.
911 CR
= CR
.add(AddConst
->getValue());
913 // Get the range where the compare returns true.
914 ConstantRange CmpRange
= ConstantRange::makeExactICmpRegion(
915 Pred
, cast
<ConstantInt
>(CmpConst
)->getValue());
918 if (CmpRange
.contains(CR
))
919 ResC
= ConstantInt::getTrue(CmpType
);
920 else if (CmpRange
.inverse().contains(CR
))
921 ResC
= ConstantInt::getFalse(CmpType
);
925 Result
.emplace_back(ResC
, P
);
928 return !Result
.empty();
933 // Try to find a constant value for the LHS of a comparison,
934 // and evaluate it statically if we can.
935 PredValueInfoTy LHSVals
;
936 computeValueKnownInPredecessorsImpl(I
->getOperand(0), BB
, LHSVals
,
937 WantInteger
, RecursionSet
, CxtI
);
939 for (const auto &LHSVal
: LHSVals
) {
940 Constant
*V
= LHSVal
.first
;
941 Constant
*Folded
= ConstantExpr::getCompare(Pred
, V
, CmpConst
);
942 if (Constant
*KC
= getKnownConstant(Folded
, WantInteger
))
943 Result
.emplace_back(KC
, LHSVal
.second
);
946 return !Result
.empty();
950 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(I
)) {
951 // Handle select instructions where at least one operand is a known constant
952 // and we can figure out the condition value for any predecessor block.
953 Constant
*TrueVal
= getKnownConstant(SI
->getTrueValue(), Preference
);
954 Constant
*FalseVal
= getKnownConstant(SI
->getFalseValue(), Preference
);
955 PredValueInfoTy Conds
;
956 if ((TrueVal
|| FalseVal
) &&
957 computeValueKnownInPredecessorsImpl(SI
->getCondition(), BB
, Conds
,
958 WantInteger
, RecursionSet
, CxtI
)) {
959 for (auto &C
: Conds
) {
960 Constant
*Cond
= C
.first
;
962 // Figure out what value to use for the condition.
964 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Cond
)) {
966 KnownCond
= CI
->isOne();
968 assert(isa
<UndefValue
>(Cond
) && "Unexpected condition value");
969 // Either operand will do, so be sure to pick the one that's a known
971 // FIXME: Do this more cleverly if both values are known constants?
972 KnownCond
= (TrueVal
!= nullptr);
975 // See if the select has a known constant value for this predecessor.
976 if (Constant
*Val
= KnownCond
? TrueVal
: FalseVal
)
977 Result
.emplace_back(Val
, C
.second
);
980 return !Result
.empty();
984 // If all else fails, see if LVI can figure out a constant value for us.
985 assert(CxtI
->getParent() == BB
&& "CxtI should be in BB");
986 Constant
*CI
= LVI
->getConstant(V
, CxtI
);
987 if (Constant
*KC
= getKnownConstant(CI
, Preference
)) {
988 for (BasicBlock
*Pred
: predecessors(BB
))
989 Result
.emplace_back(KC
, Pred
);
992 return !Result
.empty();
995 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends
996 /// in an undefined jump, decide which block is best to revector to.
998 /// Since we can pick an arbitrary destination, we pick the successor with the
999 /// fewest predecessors. This should reduce the in-degree of the others.
1000 static unsigned getBestDestForJumpOnUndef(BasicBlock
*BB
) {
1001 Instruction
*BBTerm
= BB
->getTerminator();
1002 unsigned MinSucc
= 0;
1003 BasicBlock
*TestBB
= BBTerm
->getSuccessor(MinSucc
);
1004 // Compute the successor with the minimum number of predecessors.
1005 unsigned MinNumPreds
= pred_size(TestBB
);
1006 for (unsigned i
= 1, e
= BBTerm
->getNumSuccessors(); i
!= e
; ++i
) {
1007 TestBB
= BBTerm
->getSuccessor(i
);
1008 unsigned NumPreds
= pred_size(TestBB
);
1009 if (NumPreds
< MinNumPreds
) {
1011 MinNumPreds
= NumPreds
;
1018 static bool hasAddressTakenAndUsed(BasicBlock
*BB
) {
1019 if (!BB
->hasAddressTaken()) return false;
1021 // If the block has its address taken, it may be a tree of dead constants
1022 // hanging off of it. These shouldn't keep the block alive.
1023 BlockAddress
*BA
= BlockAddress::get(BB
);
1024 BA
->removeDeadConstantUsers();
1025 return !BA
->use_empty();
1028 /// processBlock - If there are any predecessors whose control can be threaded
1029 /// through to a successor, transform them now.
1030 bool JumpThreadingPass::processBlock(BasicBlock
*BB
) {
1031 // If the block is trivially dead, just return and let the caller nuke it.
1032 // This simplifies other transformations.
1033 if (DTU
->isBBPendingDeletion(BB
) ||
1034 (pred_empty(BB
) && BB
!= &BB
->getParent()->getEntryBlock()))
1037 // If this block has a single predecessor, and if that pred has a single
1038 // successor, merge the blocks. This encourages recursive jump threading
1039 // because now the condition in this block can be threaded through
1040 // predecessors of our predecessor block.
1041 if (maybeMergeBasicBlockIntoOnlyPred(BB
))
1044 if (tryToUnfoldSelectInCurrBB(BB
))
1047 // Look if we can propagate guards to predecessors.
1048 if (HasGuards
&& processGuards(BB
))
1051 // What kind of constant we're looking for.
1052 ConstantPreference Preference
= WantInteger
;
1054 // Look to see if the terminator is a conditional branch, switch or indirect
1055 // branch, if not we can't thread it.
1057 Instruction
*Terminator
= BB
->getTerminator();
1058 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(Terminator
)) {
1059 // Can't thread an unconditional jump.
1060 if (BI
->isUnconditional()) return false;
1061 Condition
= BI
->getCondition();
1062 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(Terminator
)) {
1063 Condition
= SI
->getCondition();
1064 } else if (IndirectBrInst
*IB
= dyn_cast
<IndirectBrInst
>(Terminator
)) {
1065 // Can't thread indirect branch with no successors.
1066 if (IB
->getNumSuccessors() == 0) return false;
1067 Condition
= IB
->getAddress()->stripPointerCasts();
1068 Preference
= WantBlockAddress
;
1070 return false; // Must be an invoke or callbr.
1073 // Keep track if we constant folded the condition in this invocation.
1074 bool ConstantFolded
= false;
1076 // Run constant folding to see if we can reduce the condition to a simple
1078 if (Instruction
*I
= dyn_cast
<Instruction
>(Condition
)) {
1080 ConstantFoldInstruction(I
, BB
->getModule()->getDataLayout(), TLI
);
1082 I
->replaceAllUsesWith(SimpleVal
);
1083 if (isInstructionTriviallyDead(I
, TLI
))
1084 I
->eraseFromParent();
1085 Condition
= SimpleVal
;
1086 ConstantFolded
= true;
1090 // If the terminator is branching on an undef or freeze undef, we can pick any
1091 // of the successors to branch to. Let getBestDestForJumpOnUndef decide.
1092 auto *FI
= dyn_cast
<FreezeInst
>(Condition
);
1093 if (isa
<UndefValue
>(Condition
) ||
1094 (FI
&& isa
<UndefValue
>(FI
->getOperand(0)) && FI
->hasOneUse())) {
1095 unsigned BestSucc
= getBestDestForJumpOnUndef(BB
);
1096 std::vector
<DominatorTree::UpdateType
> Updates
;
1098 // Fold the branch/switch.
1099 Instruction
*BBTerm
= BB
->getTerminator();
1100 Updates
.reserve(BBTerm
->getNumSuccessors());
1101 for (unsigned i
= 0, e
= BBTerm
->getNumSuccessors(); i
!= e
; ++i
) {
1102 if (i
== BestSucc
) continue;
1103 BasicBlock
*Succ
= BBTerm
->getSuccessor(i
);
1104 Succ
->removePredecessor(BB
, true);
1105 Updates
.push_back({DominatorTree::Delete
, BB
, Succ
});
1108 LLVM_DEBUG(dbgs() << " In block '" << BB
->getName()
1109 << "' folding undef terminator: " << *BBTerm
<< '\n');
1110 BranchInst::Create(BBTerm
->getSuccessor(BestSucc
), BBTerm
);
1112 BBTerm
->eraseFromParent();
1113 DTU
->applyUpdatesPermissive(Updates
);
1115 FI
->eraseFromParent();
1119 // If the terminator of this block is branching on a constant, simplify the
1120 // terminator to an unconditional branch. This can occur due to threading in
1122 if (getKnownConstant(Condition
, Preference
)) {
1123 LLVM_DEBUG(dbgs() << " In block '" << BB
->getName()
1124 << "' folding terminator: " << *BB
->getTerminator()
1127 ConstantFoldTerminator(BB
, true, nullptr, DTU
);
1129 BPI
->eraseBlock(BB
);
1133 Instruction
*CondInst
= dyn_cast
<Instruction
>(Condition
);
1135 // All the rest of our checks depend on the condition being an instruction.
1137 // FIXME: Unify this with code below.
1138 if (processThreadableEdges(Condition
, BB
, Preference
, Terminator
))
1140 return ConstantFolded
;
1143 if (CmpInst
*CondCmp
= dyn_cast
<CmpInst
>(CondInst
)) {
1144 // If we're branching on a conditional, LVI might be able to determine
1145 // it's value at the branch instruction. We only handle comparisons
1146 // against a constant at this time.
1147 // TODO: This should be extended to handle switches as well.
1148 BranchInst
*CondBr
= dyn_cast
<BranchInst
>(BB
->getTerminator());
1149 Constant
*CondConst
= dyn_cast
<Constant
>(CondCmp
->getOperand(1));
1150 if (CondBr
&& CondConst
) {
1151 // We should have returned as soon as we turn a conditional branch to
1152 // unconditional. Because its no longer interesting as far as jump
1153 // threading is concerned.
1154 assert(CondBr
->isConditional() && "Threading on unconditional terminator");
1156 LazyValueInfo::Tristate Ret
=
1157 LVI
->getPredicateAt(CondCmp
->getPredicate(), CondCmp
->getOperand(0),
1158 CondConst
, CondBr
, /*UseBlockValue=*/false);
1159 if (Ret
!= LazyValueInfo::Unknown
) {
1160 unsigned ToRemove
= Ret
== LazyValueInfo::True
? 1 : 0;
1161 unsigned ToKeep
= Ret
== LazyValueInfo::True
? 0 : 1;
1162 BasicBlock
*ToRemoveSucc
= CondBr
->getSuccessor(ToRemove
);
1163 ToRemoveSucc
->removePredecessor(BB
, true);
1164 BranchInst
*UncondBr
=
1165 BranchInst::Create(CondBr
->getSuccessor(ToKeep
), CondBr
);
1166 UncondBr
->setDebugLoc(CondBr
->getDebugLoc());
1168 CondBr
->eraseFromParent();
1169 if (CondCmp
->use_empty())
1170 CondCmp
->eraseFromParent();
1171 // We can safely replace *some* uses of the CondInst if it has
1172 // exactly one value as returned by LVI. RAUW is incorrect in the
1173 // presence of guards and assumes, that have the `Cond` as the use. This
1174 // is because we use the guards/assume to reason about the `Cond` value
1175 // at the end of block, but RAUW unconditionally replaces all uses
1176 // including the guards/assumes themselves and the uses before the
1178 else if (CondCmp
->getParent() == BB
) {
1179 auto *CI
= Ret
== LazyValueInfo::True
?
1180 ConstantInt::getTrue(CondCmp
->getType()) :
1181 ConstantInt::getFalse(CondCmp
->getType());
1182 replaceFoldableUses(CondCmp
, CI
);
1184 DTU
->applyUpdatesPermissive(
1185 {{DominatorTree::Delete
, BB
, ToRemoveSucc
}});
1187 BPI
->eraseBlock(BB
);
1191 // We did not manage to simplify this branch, try to see whether
1192 // CondCmp depends on a known phi-select pattern.
1193 if (tryToUnfoldSelect(CondCmp
, BB
))
1198 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(BB
->getTerminator()))
1199 if (tryToUnfoldSelect(SI
, BB
))
1202 // Check for some cases that are worth simplifying. Right now we want to look
1203 // for loads that are used by a switch or by the condition for the branch. If
1204 // we see one, check to see if it's partially redundant. If so, insert a PHI
1205 // which can then be used to thread the values.
1206 Value
*SimplifyValue
= CondInst
;
1208 if (auto *FI
= dyn_cast
<FreezeInst
>(SimplifyValue
))
1209 // Look into freeze's operand
1210 SimplifyValue
= FI
->getOperand(0);
1212 if (CmpInst
*CondCmp
= dyn_cast
<CmpInst
>(SimplifyValue
))
1213 if (isa
<Constant
>(CondCmp
->getOperand(1)))
1214 SimplifyValue
= CondCmp
->getOperand(0);
1216 // TODO: There are other places where load PRE would be profitable, such as
1217 // more complex comparisons.
1218 if (LoadInst
*LoadI
= dyn_cast
<LoadInst
>(SimplifyValue
))
1219 if (simplifyPartiallyRedundantLoad(LoadI
))
1222 // Before threading, try to propagate profile data backwards:
1223 if (PHINode
*PN
= dyn_cast
<PHINode
>(CondInst
))
1224 if (PN
->getParent() == BB
&& isa
<BranchInst
>(BB
->getTerminator()))
1225 updatePredecessorProfileMetadata(PN
, BB
);
1227 // Handle a variety of cases where we are branching on something derived from
1228 // a PHI node in the current block. If we can prove that any predecessors
1229 // compute a predictable value based on a PHI node, thread those predecessors.
1230 if (processThreadableEdges(CondInst
, BB
, Preference
, Terminator
))
1233 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in
1234 // the current block, see if we can simplify.
1235 PHINode
*PN
= dyn_cast
<PHINode
>(
1236 isa
<FreezeInst
>(CondInst
) ? cast
<FreezeInst
>(CondInst
)->getOperand(0)
1239 if (PN
&& PN
->getParent() == BB
&& isa
<BranchInst
>(BB
->getTerminator()))
1240 return processBranchOnPHI(PN
);
1242 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
1243 if (CondInst
->getOpcode() == Instruction::Xor
&&
1244 CondInst
->getParent() == BB
&& isa
<BranchInst
>(BB
->getTerminator()))
1245 return processBranchOnXOR(cast
<BinaryOperator
>(CondInst
));
1247 // Search for a stronger dominating condition that can be used to simplify a
1248 // conditional branch leaving BB.
1249 if (processImpliedCondition(BB
))
1255 bool JumpThreadingPass::processImpliedCondition(BasicBlock
*BB
) {
1256 auto *BI
= dyn_cast
<BranchInst
>(BB
->getTerminator());
1257 if (!BI
|| !BI
->isConditional())
1260 Value
*Cond
= BI
->getCondition();
1261 BasicBlock
*CurrentBB
= BB
;
1262 BasicBlock
*CurrentPred
= BB
->getSinglePredecessor();
1265 auto &DL
= BB
->getModule()->getDataLayout();
1267 while (CurrentPred
&& Iter
++ < ImplicationSearchThreshold
) {
1268 auto *PBI
= dyn_cast
<BranchInst
>(CurrentPred
->getTerminator());
1269 if (!PBI
|| !PBI
->isConditional())
1271 if (PBI
->getSuccessor(0) != CurrentBB
&& PBI
->getSuccessor(1) != CurrentBB
)
1274 bool CondIsTrue
= PBI
->getSuccessor(0) == CurrentBB
;
1275 Optional
<bool> Implication
=
1276 isImpliedCondition(PBI
->getCondition(), Cond
, DL
, CondIsTrue
);
1278 BasicBlock
*KeepSucc
= BI
->getSuccessor(*Implication
? 0 : 1);
1279 BasicBlock
*RemoveSucc
= BI
->getSuccessor(*Implication
? 1 : 0);
1280 RemoveSucc
->removePredecessor(BB
);
1281 BranchInst
*UncondBI
= BranchInst::Create(KeepSucc
, BI
);
1282 UncondBI
->setDebugLoc(BI
->getDebugLoc());
1284 BI
->eraseFromParent();
1285 DTU
->applyUpdatesPermissive({{DominatorTree::Delete
, BB
, RemoveSucc
}});
1287 BPI
->eraseBlock(BB
);
1290 CurrentBB
= CurrentPred
;
1291 CurrentPred
= CurrentBB
->getSinglePredecessor();
1297 /// Return true if Op is an instruction defined in the given block.
1298 static bool isOpDefinedInBlock(Value
*Op
, BasicBlock
*BB
) {
1299 if (Instruction
*OpInst
= dyn_cast
<Instruction
>(Op
))
1300 if (OpInst
->getParent() == BB
)
1305 /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially
1306 /// redundant load instruction, eliminate it by replacing it with a PHI node.
1307 /// This is an important optimization that encourages jump threading, and needs
1308 /// to be run interlaced with other jump threading tasks.
1309 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst
*LoadI
) {
1310 // Don't hack volatile and ordered loads.
1311 if (!LoadI
->isUnordered()) return false;
1313 // If the load is defined in a block with exactly one predecessor, it can't be
1314 // partially redundant.
1315 BasicBlock
*LoadBB
= LoadI
->getParent();
1316 if (LoadBB
->getSinglePredecessor())
1319 // If the load is defined in an EH pad, it can't be partially redundant,
1320 // because the edges between the invoke and the EH pad cannot have other
1321 // instructions between them.
1322 if (LoadBB
->isEHPad())
1325 Value
*LoadedPtr
= LoadI
->getOperand(0);
1327 // If the loaded operand is defined in the LoadBB and its not a phi,
1328 // it can't be available in predecessors.
1329 if (isOpDefinedInBlock(LoadedPtr
, LoadBB
) && !isa
<PHINode
>(LoadedPtr
))
1332 // Scan a few instructions up from the load, to see if it is obviously live at
1333 // the entry to its block.
1334 BasicBlock::iterator
BBIt(LoadI
);
1336 if (Value
*AvailableVal
= FindAvailableLoadedValue(
1337 LoadI
, LoadBB
, BBIt
, DefMaxInstsToScan
, AA
, &IsLoadCSE
)) {
1338 // If the value of the load is locally available within the block, just use
1339 // it. This frequently occurs for reg2mem'd allocas.
1342 LoadInst
*NLoadI
= cast
<LoadInst
>(AvailableVal
);
1343 combineMetadataForCSE(NLoadI
, LoadI
, false);
1346 // If the returned value is the load itself, replace with an undef. This can
1347 // only happen in dead loops.
1348 if (AvailableVal
== LoadI
)
1349 AvailableVal
= UndefValue::get(LoadI
->getType());
1350 if (AvailableVal
->getType() != LoadI
->getType())
1351 AvailableVal
= CastInst::CreateBitOrPointerCast(
1352 AvailableVal
, LoadI
->getType(), "", LoadI
);
1353 LoadI
->replaceAllUsesWith(AvailableVal
);
1354 LoadI
->eraseFromParent();
1358 // Otherwise, if we scanned the whole block and got to the top of the block,
1359 // we know the block is locally transparent to the load. If not, something
1360 // might clobber its value.
1361 if (BBIt
!= LoadBB
->begin())
1364 // If all of the loads and stores that feed the value have the same AA tags,
1365 // then we can propagate them onto any newly inserted loads.
1367 LoadI
->getAAMetadata(AATags
);
1369 SmallPtrSet
<BasicBlock
*, 8> PredsScanned
;
1371 using AvailablePredsTy
= SmallVector
<std::pair
<BasicBlock
*, Value
*>, 8>;
1373 AvailablePredsTy AvailablePreds
;
1374 BasicBlock
*OneUnavailablePred
= nullptr;
1375 SmallVector
<LoadInst
*, 8> CSELoads
;
1377 // If we got here, the loaded value is transparent through to the start of the
1378 // block. Check to see if it is available in any of the predecessor blocks.
1379 for (BasicBlock
*PredBB
: predecessors(LoadBB
)) {
1380 // If we already scanned this predecessor, skip it.
1381 if (!PredsScanned
.insert(PredBB
).second
)
1384 BBIt
= PredBB
->end();
1385 unsigned NumScanedInst
= 0;
1386 Value
*PredAvailable
= nullptr;
1387 // NOTE: We don't CSE load that is volatile or anything stronger than
1388 // unordered, that should have been checked when we entered the function.
1389 assert(LoadI
->isUnordered() &&
1390 "Attempting to CSE volatile or atomic loads");
1391 // If this is a load on a phi pointer, phi-translate it and search
1392 // for available load/store to the pointer in predecessors.
1393 Type
*AccessTy
= LoadI
->getType();
1394 const auto &DL
= LoadI
->getModule()->getDataLayout();
1395 MemoryLocation
Loc(LoadedPtr
->DoPHITranslation(LoadBB
, PredBB
),
1396 LocationSize::precise(DL
.getTypeStoreSize(AccessTy
)),
1398 PredAvailable
= findAvailablePtrLoadStore(Loc
, AccessTy
, LoadI
->isAtomic(),
1399 PredBB
, BBIt
, DefMaxInstsToScan
,
1400 AA
, &IsLoadCSE
, &NumScanedInst
);
1402 // If PredBB has a single predecessor, continue scanning through the
1403 // single predecessor.
1404 BasicBlock
*SinglePredBB
= PredBB
;
1405 while (!PredAvailable
&& SinglePredBB
&& BBIt
== SinglePredBB
->begin() &&
1406 NumScanedInst
< DefMaxInstsToScan
) {
1407 SinglePredBB
= SinglePredBB
->getSinglePredecessor();
1409 BBIt
= SinglePredBB
->end();
1410 PredAvailable
= findAvailablePtrLoadStore(
1411 Loc
, AccessTy
, LoadI
->isAtomic(), SinglePredBB
, BBIt
,
1412 (DefMaxInstsToScan
- NumScanedInst
), AA
, &IsLoadCSE
,
1417 if (!PredAvailable
) {
1418 OneUnavailablePred
= PredBB
;
1423 CSELoads
.push_back(cast
<LoadInst
>(PredAvailable
));
1425 // If so, this load is partially redundant. Remember this info so that we
1426 // can create a PHI node.
1427 AvailablePreds
.emplace_back(PredBB
, PredAvailable
);
1430 // If the loaded value isn't available in any predecessor, it isn't partially
1432 if (AvailablePreds
.empty()) return false;
1434 // Okay, the loaded value is available in at least one (and maybe all!)
1435 // predecessors. If the value is unavailable in more than one unique
1436 // predecessor, we want to insert a merge block for those common predecessors.
1437 // This ensures that we only have to insert one reload, thus not increasing
1439 BasicBlock
*UnavailablePred
= nullptr;
1441 // If the value is unavailable in one of predecessors, we will end up
1442 // inserting a new instruction into them. It is only valid if all the
1443 // instructions before LoadI are guaranteed to pass execution to its
1444 // successor, or if LoadI is safe to speculate.
1445 // TODO: If this logic becomes more complex, and we will perform PRE insertion
1446 // farther than to a predecessor, we need to reuse the code from GVN's PRE.
1447 // It requires domination tree analysis, so for this simple case it is an
1449 if (PredsScanned
.size() != AvailablePreds
.size() &&
1450 !isSafeToSpeculativelyExecute(LoadI
))
1451 for (auto I
= LoadBB
->begin(); &*I
!= LoadI
; ++I
)
1452 if (!isGuaranteedToTransferExecutionToSuccessor(&*I
))
1455 // If there is exactly one predecessor where the value is unavailable, the
1456 // already computed 'OneUnavailablePred' block is it. If it ends in an
1457 // unconditional branch, we know that it isn't a critical edge.
1458 if (PredsScanned
.size() == AvailablePreds
.size()+1 &&
1459 OneUnavailablePred
->getTerminator()->getNumSuccessors() == 1) {
1460 UnavailablePred
= OneUnavailablePred
;
1461 } else if (PredsScanned
.size() != AvailablePreds
.size()) {
1462 // Otherwise, we had multiple unavailable predecessors or we had a critical
1463 // edge from the one.
1464 SmallVector
<BasicBlock
*, 8> PredsToSplit
;
1465 SmallPtrSet
<BasicBlock
*, 8> AvailablePredSet
;
1467 for (const auto &AvailablePred
: AvailablePreds
)
1468 AvailablePredSet
.insert(AvailablePred
.first
);
1470 // Add all the unavailable predecessors to the PredsToSplit list.
1471 for (BasicBlock
*P
: predecessors(LoadBB
)) {
1472 // If the predecessor is an indirect goto, we can't split the edge.
1474 if (isa
<IndirectBrInst
>(P
->getTerminator()) ||
1475 isa
<CallBrInst
>(P
->getTerminator()))
1478 if (!AvailablePredSet
.count(P
))
1479 PredsToSplit
.push_back(P
);
1482 // Split them out to their own block.
1483 UnavailablePred
= splitBlockPreds(LoadBB
, PredsToSplit
, "thread-pre-split");
1486 // If the value isn't available in all predecessors, then there will be
1487 // exactly one where it isn't available. Insert a load on that edge and add
1488 // it to the AvailablePreds list.
1489 if (UnavailablePred
) {
1490 assert(UnavailablePred
->getTerminator()->getNumSuccessors() == 1 &&
1491 "Can't handle critical edge here!");
1492 LoadInst
*NewVal
= new LoadInst(
1493 LoadI
->getType(), LoadedPtr
->DoPHITranslation(LoadBB
, UnavailablePred
),
1494 LoadI
->getName() + ".pr", false, LoadI
->getAlign(),
1495 LoadI
->getOrdering(), LoadI
->getSyncScopeID(),
1496 UnavailablePred
->getTerminator());
1497 NewVal
->setDebugLoc(LoadI
->getDebugLoc());
1499 NewVal
->setAAMetadata(AATags
);
1501 AvailablePreds
.emplace_back(UnavailablePred
, NewVal
);
1504 // Now we know that each predecessor of this block has a value in
1505 // AvailablePreds, sort them for efficient access as we're walking the preds.
1506 array_pod_sort(AvailablePreds
.begin(), AvailablePreds
.end());
1508 // Create a PHI node at the start of the block for the PRE'd load value.
1509 pred_iterator PB
= pred_begin(LoadBB
), PE
= pred_end(LoadBB
);
1510 PHINode
*PN
= PHINode::Create(LoadI
->getType(), std::distance(PB
, PE
), "",
1512 PN
->takeName(LoadI
);
1513 PN
->setDebugLoc(LoadI
->getDebugLoc());
1515 // Insert new entries into the PHI for each predecessor. A single block may
1516 // have multiple entries here.
1517 for (pred_iterator PI
= PB
; PI
!= PE
; ++PI
) {
1518 BasicBlock
*P
= *PI
;
1519 AvailablePredsTy::iterator I
=
1520 llvm::lower_bound(AvailablePreds
, std::make_pair(P
, (Value
*)nullptr));
1522 assert(I
!= AvailablePreds
.end() && I
->first
== P
&&
1523 "Didn't find entry for predecessor!");
1525 // If we have an available predecessor but it requires casting, insert the
1526 // cast in the predecessor and use the cast. Note that we have to update the
1527 // AvailablePreds vector as we go so that all of the PHI entries for this
1528 // predecessor use the same bitcast.
1529 Value
*&PredV
= I
->second
;
1530 if (PredV
->getType() != LoadI
->getType())
1531 PredV
= CastInst::CreateBitOrPointerCast(PredV
, LoadI
->getType(), "",
1532 P
->getTerminator());
1534 PN
->addIncoming(PredV
, I
->first
);
1537 for (LoadInst
*PredLoadI
: CSELoads
) {
1538 combineMetadataForCSE(PredLoadI
, LoadI
, true);
1541 LoadI
->replaceAllUsesWith(PN
);
1542 LoadI
->eraseFromParent();
1547 /// findMostPopularDest - The specified list contains multiple possible
1548 /// threadable destinations. Pick the one that occurs the most frequently in
1551 findMostPopularDest(BasicBlock
*BB
,
1552 const SmallVectorImpl
<std::pair
<BasicBlock
*,
1553 BasicBlock
*>> &PredToDestList
) {
1554 assert(!PredToDestList
.empty());
1556 // Determine popularity. If there are multiple possible destinations, we
1557 // explicitly choose to ignore 'undef' destinations. We prefer to thread
1558 // blocks with known and real destinations to threading undef. We'll handle
1559 // them later if interesting.
1560 MapVector
<BasicBlock
*, unsigned> DestPopularity
;
1562 // Populate DestPopularity with the successors in the order they appear in the
1563 // successor list. This way, we ensure determinism by iterating it in the
1564 // same order in std::max_element below. We map nullptr to 0 so that we can
1565 // return nullptr when PredToDestList contains nullptr only.
1566 DestPopularity
[nullptr] = 0;
1567 for (auto *SuccBB
: successors(BB
))
1568 DestPopularity
[SuccBB
] = 0;
1570 for (const auto &PredToDest
: PredToDestList
)
1571 if (PredToDest
.second
)
1572 DestPopularity
[PredToDest
.second
]++;
1574 // Find the most popular dest.
1575 using VT
= decltype(DestPopularity
)::value_type
;
1576 auto MostPopular
= std::max_element(
1577 DestPopularity
.begin(), DestPopularity
.end(),
1578 [](const VT
&L
, const VT
&R
) { return L
.second
< R
.second
; });
1580 // Okay, we have finally picked the most popular destination.
1581 return MostPopular
->first
;
1584 // Try to evaluate the value of V when the control flows from PredPredBB to
1585 // BB->getSinglePredecessor() and then on to BB.
1586 Constant
*JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock
*BB
,
1587 BasicBlock
*PredPredBB
,
1589 BasicBlock
*PredBB
= BB
->getSinglePredecessor();
1590 assert(PredBB
&& "Expected a single predecessor");
1592 if (Constant
*Cst
= dyn_cast
<Constant
>(V
)) {
1596 // Consult LVI if V is not an instruction in BB or PredBB.
1597 Instruction
*I
= dyn_cast
<Instruction
>(V
);
1598 if (!I
|| (I
->getParent() != BB
&& I
->getParent() != PredBB
)) {
1599 return LVI
->getConstantOnEdge(V
, PredPredBB
, PredBB
, nullptr);
1602 // Look into a PHI argument.
1603 if (PHINode
*PHI
= dyn_cast
<PHINode
>(V
)) {
1604 if (PHI
->getParent() == PredBB
)
1605 return dyn_cast
<Constant
>(PHI
->getIncomingValueForBlock(PredPredBB
));
1609 // If we have a CmpInst, try to fold it for each incoming edge into PredBB.
1610 if (CmpInst
*CondCmp
= dyn_cast
<CmpInst
>(V
)) {
1611 if (CondCmp
->getParent() == BB
) {
1613 evaluateOnPredecessorEdge(BB
, PredPredBB
, CondCmp
->getOperand(0));
1615 evaluateOnPredecessorEdge(BB
, PredPredBB
, CondCmp
->getOperand(1));
1617 return ConstantExpr::getCompare(CondCmp
->getPredicate(), Op0
, Op1
);
1626 bool JumpThreadingPass::processThreadableEdges(Value
*Cond
, BasicBlock
*BB
,
1627 ConstantPreference Preference
,
1628 Instruction
*CxtI
) {
1629 // If threading this would thread across a loop header, don't even try to
1631 if (LoopHeaders
.count(BB
))
1634 PredValueInfoTy PredValues
;
1635 if (!computeValueKnownInPredecessors(Cond
, BB
, PredValues
, Preference
,
1637 // We don't have known values in predecessors. See if we can thread through
1638 // BB and its sole predecessor.
1639 return maybethreadThroughTwoBasicBlocks(BB
, Cond
);
1642 assert(!PredValues
.empty() &&
1643 "computeValueKnownInPredecessors returned true with no values");
1645 LLVM_DEBUG(dbgs() << "IN BB: " << *BB
;
1646 for (const auto &PredValue
: PredValues
) {
1647 dbgs() << " BB '" << BB
->getName()
1648 << "': FOUND condition = " << *PredValue
.first
1649 << " for pred '" << PredValue
.second
->getName() << "'.\n";
1652 // Decide what we want to thread through. Convert our list of known values to
1653 // a list of known destinations for each pred. This also discards duplicate
1654 // predecessors and keeps track of the undefined inputs (which are represented
1655 // as a null dest in the PredToDestList).
1656 SmallPtrSet
<BasicBlock
*, 16> SeenPreds
;
1657 SmallVector
<std::pair
<BasicBlock
*, BasicBlock
*>, 16> PredToDestList
;
1659 BasicBlock
*OnlyDest
= nullptr;
1660 BasicBlock
*MultipleDestSentinel
= (BasicBlock
*)(intptr_t)~0ULL;
1661 Constant
*OnlyVal
= nullptr;
1662 Constant
*MultipleVal
= (Constant
*)(intptr_t)~0ULL;
1664 for (const auto &PredValue
: PredValues
) {
1665 BasicBlock
*Pred
= PredValue
.second
;
1666 if (!SeenPreds
.insert(Pred
).second
)
1667 continue; // Duplicate predecessor entry.
1669 Constant
*Val
= PredValue
.first
;
1672 if (isa
<UndefValue
>(Val
))
1674 else if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BB
->getTerminator())) {
1675 assert(isa
<ConstantInt
>(Val
) && "Expecting a constant integer");
1676 DestBB
= BI
->getSuccessor(cast
<ConstantInt
>(Val
)->isZero());
1677 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(BB
->getTerminator())) {
1678 assert(isa
<ConstantInt
>(Val
) && "Expecting a constant integer");
1679 DestBB
= SI
->findCaseValue(cast
<ConstantInt
>(Val
))->getCaseSuccessor();
1681 assert(isa
<IndirectBrInst
>(BB
->getTerminator())
1682 && "Unexpected terminator");
1683 assert(isa
<BlockAddress
>(Val
) && "Expecting a constant blockaddress");
1684 DestBB
= cast
<BlockAddress
>(Val
)->getBasicBlock();
1687 // If we have exactly one destination, remember it for efficiency below.
1688 if (PredToDestList
.empty()) {
1692 if (OnlyDest
!= DestBB
)
1693 OnlyDest
= MultipleDestSentinel
;
1694 // It possible we have same destination, but different value, e.g. default
1695 // case in switchinst.
1697 OnlyVal
= MultipleVal
;
1700 // If the predecessor ends with an indirect goto, we can't change its
1701 // destination. Same for CallBr.
1702 if (isa
<IndirectBrInst
>(Pred
->getTerminator()) ||
1703 isa
<CallBrInst
>(Pred
->getTerminator()))
1706 PredToDestList
.emplace_back(Pred
, DestBB
);
1709 // If all edges were unthreadable, we fail.
1710 if (PredToDestList
.empty())
1713 // If all the predecessors go to a single known successor, we want to fold,
1714 // not thread. By doing so, we do not need to duplicate the current block and
1715 // also miss potential opportunities in case we dont/cant duplicate.
1716 if (OnlyDest
&& OnlyDest
!= MultipleDestSentinel
) {
1717 if (BB
->hasNPredecessors(PredToDestList
.size())) {
1718 bool SeenFirstBranchToOnlyDest
= false;
1719 std::vector
<DominatorTree::UpdateType
> Updates
;
1720 Updates
.reserve(BB
->getTerminator()->getNumSuccessors() - 1);
1721 for (BasicBlock
*SuccBB
: successors(BB
)) {
1722 if (SuccBB
== OnlyDest
&& !SeenFirstBranchToOnlyDest
) {
1723 SeenFirstBranchToOnlyDest
= true; // Don't modify the first branch.
1725 SuccBB
->removePredecessor(BB
, true); // This is unreachable successor.
1726 Updates
.push_back({DominatorTree::Delete
, BB
, SuccBB
});
1730 // Finally update the terminator.
1731 Instruction
*Term
= BB
->getTerminator();
1732 BranchInst::Create(OnlyDest
, Term
);
1734 Term
->eraseFromParent();
1735 DTU
->applyUpdatesPermissive(Updates
);
1737 BPI
->eraseBlock(BB
);
1739 // If the condition is now dead due to the removal of the old terminator,
1741 if (auto *CondInst
= dyn_cast
<Instruction
>(Cond
)) {
1742 if (CondInst
->use_empty() && !CondInst
->mayHaveSideEffects())
1743 CondInst
->eraseFromParent();
1744 // We can safely replace *some* uses of the CondInst if it has
1745 // exactly one value as returned by LVI. RAUW is incorrect in the
1746 // presence of guards and assumes, that have the `Cond` as the use. This
1747 // is because we use the guards/assume to reason about the `Cond` value
1748 // at the end of block, but RAUW unconditionally replaces all uses
1749 // including the guards/assumes themselves and the uses before the
1751 else if (OnlyVal
&& OnlyVal
!= MultipleVal
&&
1752 CondInst
->getParent() == BB
)
1753 replaceFoldableUses(CondInst
, OnlyVal
);
1759 // Determine which is the most common successor. If we have many inputs and
1760 // this block is a switch, we want to start by threading the batch that goes
1761 // to the most popular destination first. If we only know about one
1762 // threadable destination (the common case) we can avoid this.
1763 BasicBlock
*MostPopularDest
= OnlyDest
;
1765 if (MostPopularDest
== MultipleDestSentinel
) {
1766 // Remove any loop headers from the Dest list, threadEdge conservatively
1767 // won't process them, but we might have other destination that are eligible
1768 // and we still want to process.
1769 erase_if(PredToDestList
,
1770 [&](const std::pair
<BasicBlock
*, BasicBlock
*> &PredToDest
) {
1771 return LoopHeaders
.contains(PredToDest
.second
);
1774 if (PredToDestList
.empty())
1777 MostPopularDest
= findMostPopularDest(BB
, PredToDestList
);
1780 // Now that we know what the most popular destination is, factor all
1781 // predecessors that will jump to it into a single predecessor.
1782 SmallVector
<BasicBlock
*, 16> PredsToFactor
;
1783 for (const auto &PredToDest
: PredToDestList
)
1784 if (PredToDest
.second
== MostPopularDest
) {
1785 BasicBlock
*Pred
= PredToDest
.first
;
1787 // This predecessor may be a switch or something else that has multiple
1788 // edges to the block. Factor each of these edges by listing them
1789 // according to # occurrences in PredsToFactor.
1790 for (BasicBlock
*Succ
: successors(Pred
))
1792 PredsToFactor
.push_back(Pred
);
1795 // If the threadable edges are branching on an undefined value, we get to pick
1796 // the destination that these predecessors should get to.
1797 if (!MostPopularDest
)
1798 MostPopularDest
= BB
->getTerminator()->
1799 getSuccessor(getBestDestForJumpOnUndef(BB
));
1801 // Ok, try to thread it!
1802 return tryThreadEdge(BB
, PredsToFactor
, MostPopularDest
);
1805 /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on
1806 /// a PHI node (or freeze PHI) in the current block. See if there are any
1807 /// simplifications we can do based on inputs to the phi node.
1808 bool JumpThreadingPass::processBranchOnPHI(PHINode
*PN
) {
1809 BasicBlock
*BB
= PN
->getParent();
1811 // TODO: We could make use of this to do it once for blocks with common PHI
1813 SmallVector
<BasicBlock
*, 1> PredBBs
;
1816 // If any of the predecessor blocks end in an unconditional branch, we can
1817 // *duplicate* the conditional branch into that block in order to further
1818 // encourage jump threading and to eliminate cases where we have branch on a
1819 // phi of an icmp (branch on icmp is much better).
1820 // This is still beneficial when a frozen phi is used as the branch condition
1821 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp))
1822 // to br(icmp(freeze ...)).
1823 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1824 BasicBlock
*PredBB
= PN
->getIncomingBlock(i
);
1825 if (BranchInst
*PredBr
= dyn_cast
<BranchInst
>(PredBB
->getTerminator()))
1826 if (PredBr
->isUnconditional()) {
1827 PredBBs
[0] = PredBB
;
1828 // Try to duplicate BB into PredBB.
1829 if (duplicateCondBranchOnPHIIntoPred(BB
, PredBBs
))
1837 /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on
1838 /// a xor instruction in the current block. See if there are any
1839 /// simplifications we can do based on inputs to the xor.
1840 bool JumpThreadingPass::processBranchOnXOR(BinaryOperator
*BO
) {
1841 BasicBlock
*BB
= BO
->getParent();
1843 // If either the LHS or RHS of the xor is a constant, don't do this
1845 if (isa
<ConstantInt
>(BO
->getOperand(0)) ||
1846 isa
<ConstantInt
>(BO
->getOperand(1)))
1849 // If the first instruction in BB isn't a phi, we won't be able to infer
1850 // anything special about any particular predecessor.
1851 if (!isa
<PHINode
>(BB
->front()))
1854 // If this BB is a landing pad, we won't be able to split the edge into it.
1858 // If we have a xor as the branch input to this block, and we know that the
1859 // LHS or RHS of the xor in any predecessor is true/false, then we can clone
1860 // the condition into the predecessor and fix that value to true, saving some
1861 // logical ops on that path and encouraging other paths to simplify.
1863 // This copies something like this:
1866 // %X = phi i1 [1], [%X']
1867 // %Y = icmp eq i32 %A, %B
1868 // %Z = xor i1 %X, %Y
1873 // %Y = icmp ne i32 %A, %B
1876 PredValueInfoTy XorOpValues
;
1878 if (!computeValueKnownInPredecessors(BO
->getOperand(0), BB
, XorOpValues
,
1880 assert(XorOpValues
.empty());
1881 if (!computeValueKnownInPredecessors(BO
->getOperand(1), BB
, XorOpValues
,
1887 assert(!XorOpValues
.empty() &&
1888 "computeValueKnownInPredecessors returned true with no values");
1890 // Scan the information to see which is most popular: true or false. The
1891 // predecessors can be of the set true, false, or undef.
1892 unsigned NumTrue
= 0, NumFalse
= 0;
1893 for (const auto &XorOpValue
: XorOpValues
) {
1894 if (isa
<UndefValue
>(XorOpValue
.first
))
1895 // Ignore undefs for the count.
1897 if (cast
<ConstantInt
>(XorOpValue
.first
)->isZero())
1903 // Determine which value to split on, true, false, or undef if neither.
1904 ConstantInt
*SplitVal
= nullptr;
1905 if (NumTrue
> NumFalse
)
1906 SplitVal
= ConstantInt::getTrue(BB
->getContext());
1907 else if (NumTrue
!= 0 || NumFalse
!= 0)
1908 SplitVal
= ConstantInt::getFalse(BB
->getContext());
1910 // Collect all of the blocks that this can be folded into so that we can
1911 // factor this once and clone it once.
1912 SmallVector
<BasicBlock
*, 8> BlocksToFoldInto
;
1913 for (const auto &XorOpValue
: XorOpValues
) {
1914 if (XorOpValue
.first
!= SplitVal
&& !isa
<UndefValue
>(XorOpValue
.first
))
1917 BlocksToFoldInto
.push_back(XorOpValue
.second
);
1920 // If we inferred a value for all of the predecessors, then duplication won't
1921 // help us. However, we can just replace the LHS or RHS with the constant.
1922 if (BlocksToFoldInto
.size() ==
1923 cast
<PHINode
>(BB
->front()).getNumIncomingValues()) {
1925 // If all preds provide undef, just nuke the xor, because it is undef too.
1926 BO
->replaceAllUsesWith(UndefValue::get(BO
->getType()));
1927 BO
->eraseFromParent();
1928 } else if (SplitVal
->isZero()) {
1929 // If all preds provide 0, replace the xor with the other input.
1930 BO
->replaceAllUsesWith(BO
->getOperand(isLHS
));
1931 BO
->eraseFromParent();
1933 // If all preds provide 1, set the computed value to 1.
1934 BO
->setOperand(!isLHS
, SplitVal
);
1940 // If any of predecessors end with an indirect goto, we can't change its
1941 // destination. Same for CallBr.
1942 if (any_of(BlocksToFoldInto
, [](BasicBlock
*Pred
) {
1943 return isa
<IndirectBrInst
>(Pred
->getTerminator()) ||
1944 isa
<CallBrInst
>(Pred
->getTerminator());
1948 // Try to duplicate BB into PredBB.
1949 return duplicateCondBranchOnPHIIntoPred(BB
, BlocksToFoldInto
);
1952 /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new
1953 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for
1954 /// NewPred using the entries from OldPred (suitably mapped).
1955 static void addPHINodeEntriesForMappedBlock(BasicBlock
*PHIBB
,
1956 BasicBlock
*OldPred
,
1957 BasicBlock
*NewPred
,
1958 DenseMap
<Instruction
*, Value
*> &ValueMap
) {
1959 for (PHINode
&PN
: PHIBB
->phis()) {
1960 // Ok, we have a PHI node. Figure out what the incoming value was for the
1962 Value
*IV
= PN
.getIncomingValueForBlock(OldPred
);
1964 // Remap the value if necessary.
1965 if (Instruction
*Inst
= dyn_cast
<Instruction
>(IV
)) {
1966 DenseMap
<Instruction
*, Value
*>::iterator I
= ValueMap
.find(Inst
);
1967 if (I
!= ValueMap
.end())
1971 PN
.addIncoming(IV
, NewPred
);
1975 /// Merge basic block BB into its sole predecessor if possible.
1976 bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock
*BB
) {
1977 BasicBlock
*SinglePred
= BB
->getSinglePredecessor();
1981 const Instruction
*TI
= SinglePred
->getTerminator();
1982 if (TI
->isExceptionalTerminator() || TI
->getNumSuccessors() != 1 ||
1983 SinglePred
== BB
|| hasAddressTakenAndUsed(BB
))
1986 // If SinglePred was a loop header, BB becomes one.
1987 if (LoopHeaders
.erase(SinglePred
))
1988 LoopHeaders
.insert(BB
);
1990 LVI
->eraseBlock(SinglePred
);
1991 MergeBasicBlockIntoOnlyPred(BB
, DTU
);
1993 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by
1994 // BB code within one basic block `BB`), we need to invalidate the LVI
1995 // information associated with BB, because the LVI information need not be
1996 // true for all of BB after the merge. For example,
1997 // Before the merge, LVI info and code is as follows:
1998 // SinglePred: <LVI info1 for %p val>
2000 // call @exit() // need not transfer execution to successor.
2001 // assume(%p) // from this point on %p is true
2003 // BB: <LVI info2 for %p val, i.e. %p is true>
2007 // Note that this LVI info for blocks BB and SinglPred is correct for %p
2008 // (info2 and info1 respectively). After the merge and the deletion of the
2009 // LVI info1 for SinglePred. We have the following code:
2010 // BB: <LVI info2 for %p val>
2014 // %x = use of %p <-- LVI info2 is correct from here onwards.
2016 // LVI info2 for BB is incorrect at the beginning of BB.
2018 // Invalidate LVI information for BB if the LVI is not provably true for
2020 if (!isGuaranteedToTransferExecutionToSuccessor(BB
))
2021 LVI
->eraseBlock(BB
);
2025 /// Update the SSA form. NewBB contains instructions that are copied from BB.
2026 /// ValueMapping maps old values in BB to new ones in NewBB.
2027 void JumpThreadingPass::updateSSA(
2028 BasicBlock
*BB
, BasicBlock
*NewBB
,
2029 DenseMap
<Instruction
*, Value
*> &ValueMapping
) {
2030 // If there were values defined in BB that are used outside the block, then we
2031 // now have to update all uses of the value to use either the original value,
2032 // the cloned value, or some PHI derived value. This can require arbitrary
2033 // PHI insertion, of which we are prepared to do, clean these up now.
2034 SSAUpdater SSAUpdate
;
2035 SmallVector
<Use
*, 16> UsesToRename
;
2037 for (Instruction
&I
: *BB
) {
2038 // Scan all uses of this instruction to see if it is used outside of its
2039 // block, and if so, record them in UsesToRename.
2040 for (Use
&U
: I
.uses()) {
2041 Instruction
*User
= cast
<Instruction
>(U
.getUser());
2042 if (PHINode
*UserPN
= dyn_cast
<PHINode
>(User
)) {
2043 if (UserPN
->getIncomingBlock(U
) == BB
)
2045 } else if (User
->getParent() == BB
)
2048 UsesToRename
.push_back(&U
);
2051 // If there are no uses outside the block, we're done with this instruction.
2052 if (UsesToRename
.empty())
2054 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I
<< "\n");
2056 // We found a use of I outside of BB. Rename all uses of I that are outside
2057 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
2058 // with the two values we know.
2059 SSAUpdate
.Initialize(I
.getType(), I
.getName());
2060 SSAUpdate
.AddAvailableValue(BB
, &I
);
2061 SSAUpdate
.AddAvailableValue(NewBB
, ValueMapping
[&I
]);
2063 while (!UsesToRename
.empty())
2064 SSAUpdate
.RewriteUse(*UsesToRename
.pop_back_val());
2065 LLVM_DEBUG(dbgs() << "\n");
2069 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone
2070 /// arguments that come from PredBB. Return the map from the variables in the
2071 /// source basic block to the variables in the newly created basic block.
2072 DenseMap
<Instruction
*, Value
*>
2073 JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI
,
2074 BasicBlock::iterator BE
, BasicBlock
*NewBB
,
2075 BasicBlock
*PredBB
) {
2076 // We are going to have to map operands from the source basic block to the new
2077 // copy of the block 'NewBB'. If there are PHI nodes in the source basic
2078 // block, evaluate them to account for entry from PredBB.
2079 DenseMap
<Instruction
*, Value
*> ValueMapping
;
2081 // Clone the phi nodes of the source basic block into NewBB. The resulting
2082 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater
2083 // might need to rewrite the operand of the cloned phi.
2084 for (; PHINode
*PN
= dyn_cast
<PHINode
>(BI
); ++BI
) {
2085 PHINode
*NewPN
= PHINode::Create(PN
->getType(), 1, PN
->getName(), NewBB
);
2086 NewPN
->addIncoming(PN
->getIncomingValueForBlock(PredBB
), PredBB
);
2087 ValueMapping
[PN
] = NewPN
;
2090 // Clone noalias scope declarations in the threaded block. When threading a
2091 // loop exit, we would otherwise end up with two idential scope declarations
2092 // visible at the same time.
2093 SmallVector
<MDNode
*> NoAliasScopes
;
2094 DenseMap
<MDNode
*, MDNode
*> ClonedScopes
;
2095 LLVMContext
&Context
= PredBB
->getContext();
2096 identifyNoAliasScopesToClone(BI
, BE
, NoAliasScopes
);
2097 cloneNoAliasScopes(NoAliasScopes
, ClonedScopes
, "thread", Context
);
2099 // Clone the non-phi instructions of the source basic block into NewBB,
2100 // keeping track of the mapping and using it to remap operands in the cloned
2102 for (; BI
!= BE
; ++BI
) {
2103 Instruction
*New
= BI
->clone();
2104 New
->setName(BI
->getName());
2105 NewBB
->getInstList().push_back(New
);
2106 ValueMapping
[&*BI
] = New
;
2107 adaptNoAliasScopes(New
, ClonedScopes
, Context
);
2109 // Remap operands to patch up intra-block references.
2110 for (unsigned i
= 0, e
= New
->getNumOperands(); i
!= e
; ++i
)
2111 if (Instruction
*Inst
= dyn_cast
<Instruction
>(New
->getOperand(i
))) {
2112 DenseMap
<Instruction
*, Value
*>::iterator I
= ValueMapping
.find(Inst
);
2113 if (I
!= ValueMapping
.end())
2114 New
->setOperand(i
, I
->second
);
2118 return ValueMapping
;
2121 /// Attempt to thread through two successive basic blocks.
2122 bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock
*BB
,
2127 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ]
2128 // %tobool = icmp eq i32 %cond, 0
2129 // br i1 %tobool, label %BB, label ...
2132 // %cmp = icmp eq i32* %var, null
2133 // br i1 %cmp, label ..., label ...
2135 // We don't know the value of %var at BB even if we know which incoming edge
2136 // we take to BB. However, once we duplicate PredBB for each of its incoming
2137 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of
2138 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB.
2140 // Require that BB end with a Branch for simplicity.
2141 BranchInst
*CondBr
= dyn_cast
<BranchInst
>(BB
->getTerminator());
2145 // BB must have exactly one predecessor.
2146 BasicBlock
*PredBB
= BB
->getSinglePredecessor();
2150 // Require that PredBB end with a conditional Branch. If PredBB ends with an
2151 // unconditional branch, we should be merging PredBB and BB instead. For
2152 // simplicity, we don't deal with a switch.
2153 BranchInst
*PredBBBranch
= dyn_cast
<BranchInst
>(PredBB
->getTerminator());
2154 if (!PredBBBranch
|| PredBBBranch
->isUnconditional())
2157 // If PredBB has exactly one incoming edge, we don't gain anything by copying
2159 if (PredBB
->getSinglePredecessor())
2162 // Don't thread through PredBB if it contains a successor edge to itself, in
2163 // which case we would infinite loop. Suppose we are threading an edge from
2164 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a
2165 // successor edge to itself. If we allowed jump threading in this case, we
2166 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since
2167 // PredBB.thread has a successor edge to PredBB, we would immediately come up
2168 // with another jump threading opportunity from PredBB.thread through PredBB
2169 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we
2170 // would keep peeling one iteration from PredBB.
2171 if (llvm::is_contained(successors(PredBB
), PredBB
))
2174 // Don't thread across a loop header.
2175 if (LoopHeaders
.count(PredBB
))
2178 // Avoid complication with duplicating EH pads.
2179 if (PredBB
->isEHPad())
2182 // Find a predecessor that we can thread. For simplicity, we only consider a
2183 // successor edge out of BB to which we thread exactly one incoming edge into
2185 unsigned ZeroCount
= 0;
2186 unsigned OneCount
= 0;
2187 BasicBlock
*ZeroPred
= nullptr;
2188 BasicBlock
*OnePred
= nullptr;
2189 for (BasicBlock
*P
: predecessors(PredBB
)) {
2190 if (ConstantInt
*CI
= dyn_cast_or_null
<ConstantInt
>(
2191 evaluateOnPredecessorEdge(BB
, P
, Cond
))) {
2195 } else if (CI
->isOne()) {
2202 // Disregard complicated cases where we have to thread multiple edges.
2203 BasicBlock
*PredPredBB
;
2204 if (ZeroCount
== 1) {
2205 PredPredBB
= ZeroPred
;
2206 } else if (OneCount
== 1) {
2207 PredPredBB
= OnePred
;
2212 BasicBlock
*SuccBB
= CondBr
->getSuccessor(PredPredBB
== ZeroPred
);
2214 // If threading to the same block as we come from, we would infinite loop.
2216 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB
->getName()
2217 << "' - would thread to self!\n");
2221 // If threading this would thread across a loop header, don't thread the edge.
2222 // See the comments above findLoopHeaders for justifications and caveats.
2223 if (LoopHeaders
.count(BB
) || LoopHeaders
.count(SuccBB
)) {
2225 bool BBIsHeader
= LoopHeaders
.count(BB
);
2226 bool SuccIsHeader
= LoopHeaders
.count(SuccBB
);
2227 dbgs() << " Not threading across "
2228 << (BBIsHeader
? "loop header BB '" : "block BB '")
2229 << BB
->getName() << "' to dest "
2230 << (SuccIsHeader
? "loop header BB '" : "block BB '")
2231 << SuccBB
->getName()
2232 << "' - it might create an irreducible loop!\n";
2237 // Compute the cost of duplicating BB and PredBB.
2239 getJumpThreadDuplicationCost(BB
, BB
->getTerminator(), BBDupThreshold
);
2240 unsigned PredBBCost
= getJumpThreadDuplicationCost(
2241 PredBB
, PredBB
->getTerminator(), BBDupThreshold
);
2243 // Give up if costs are too high. We need to check BBCost and PredBBCost
2244 // individually before checking their sum because getJumpThreadDuplicationCost
2245 // return (unsigned)~0 for those basic blocks that cannot be duplicated.
2246 if (BBCost
> BBDupThreshold
|| PredBBCost
> BBDupThreshold
||
2247 BBCost
+ PredBBCost
> BBDupThreshold
) {
2248 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB
->getName()
2249 << "' - Cost is too high: " << PredBBCost
2250 << " for PredBB, " << BBCost
<< "for BB\n");
2254 // Now we are ready to duplicate PredBB.
2255 threadThroughTwoBasicBlocks(PredPredBB
, PredBB
, BB
, SuccBB
);
2259 void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock
*PredPredBB
,
2262 BasicBlock
*SuccBB
) {
2263 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB
->getName() << "' and '"
2264 << BB
->getName() << "'\n");
2266 BranchInst
*CondBr
= cast
<BranchInst
>(BB
->getTerminator());
2267 BranchInst
*PredBBBranch
= cast
<BranchInst
>(PredBB
->getTerminator());
2270 BasicBlock::Create(PredBB
->getContext(), PredBB
->getName() + ".thread",
2271 PredBB
->getParent(), PredBB
);
2272 NewBB
->moveAfter(PredBB
);
2274 // Set the block frequency of NewBB.
2275 if (HasProfileData
) {
2276 auto NewBBFreq
= BFI
->getBlockFreq(PredPredBB
) *
2277 BPI
->getEdgeProbability(PredPredBB
, PredBB
);
2278 BFI
->setBlockFreq(NewBB
, NewBBFreq
.getFrequency());
2281 // We are going to have to map operands from the original BB block to the new
2282 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them
2283 // to account for entry from PredPredBB.
2284 DenseMap
<Instruction
*, Value
*> ValueMapping
=
2285 cloneInstructions(PredBB
->begin(), PredBB
->end(), NewBB
, PredPredBB
);
2287 // Copy the edge probabilities from PredBB to NewBB.
2289 BPI
->copyEdgeProbabilities(PredBB
, NewBB
);
2291 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB.
2292 // This eliminates predecessors from PredPredBB, which requires us to simplify
2293 // any PHI nodes in PredBB.
2294 Instruction
*PredPredTerm
= PredPredBB
->getTerminator();
2295 for (unsigned i
= 0, e
= PredPredTerm
->getNumSuccessors(); i
!= e
; ++i
)
2296 if (PredPredTerm
->getSuccessor(i
) == PredBB
) {
2297 PredBB
->removePredecessor(PredPredBB
, true);
2298 PredPredTerm
->setSuccessor(i
, NewBB
);
2301 addPHINodeEntriesForMappedBlock(PredBBBranch
->getSuccessor(0), PredBB
, NewBB
,
2303 addPHINodeEntriesForMappedBlock(PredBBBranch
->getSuccessor(1), PredBB
, NewBB
,
2306 DTU
->applyUpdatesPermissive(
2307 {{DominatorTree::Insert
, NewBB
, CondBr
->getSuccessor(0)},
2308 {DominatorTree::Insert
, NewBB
, CondBr
->getSuccessor(1)},
2309 {DominatorTree::Insert
, PredPredBB
, NewBB
},
2310 {DominatorTree::Delete
, PredPredBB
, PredBB
}});
2312 updateSSA(PredBB
, NewBB
, ValueMapping
);
2314 // Clean up things like PHI nodes with single operands, dead instructions,
2316 SimplifyInstructionsInBlock(NewBB
, TLI
);
2317 SimplifyInstructionsInBlock(PredBB
, TLI
);
2319 SmallVector
<BasicBlock
*, 1> PredsToFactor
;
2320 PredsToFactor
.push_back(NewBB
);
2321 threadEdge(BB
, PredsToFactor
, SuccBB
);
2324 /// tryThreadEdge - Thread an edge if it's safe and profitable to do so.
2325 bool JumpThreadingPass::tryThreadEdge(
2326 BasicBlock
*BB
, const SmallVectorImpl
<BasicBlock
*> &PredBBs
,
2327 BasicBlock
*SuccBB
) {
2328 // If threading to the same block as we come from, we would infinite loop.
2330 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB
->getName()
2331 << "' - would thread to self!\n");
2335 // If threading this would thread across a loop header, don't thread the edge.
2336 // See the comments above findLoopHeaders for justifications and caveats.
2337 if (LoopHeaders
.count(BB
) || LoopHeaders
.count(SuccBB
)) {
2339 bool BBIsHeader
= LoopHeaders
.count(BB
);
2340 bool SuccIsHeader
= LoopHeaders
.count(SuccBB
);
2341 dbgs() << " Not threading across "
2342 << (BBIsHeader
? "loop header BB '" : "block BB '") << BB
->getName()
2343 << "' to dest " << (SuccIsHeader
? "loop header BB '" : "block BB '")
2344 << SuccBB
->getName() << "' - it might create an irreducible loop!\n";
2349 unsigned JumpThreadCost
=
2350 getJumpThreadDuplicationCost(BB
, BB
->getTerminator(), BBDupThreshold
);
2351 if (JumpThreadCost
> BBDupThreshold
) {
2352 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB
->getName()
2353 << "' - Cost is too high: " << JumpThreadCost
<< "\n");
2357 threadEdge(BB
, PredBBs
, SuccBB
);
2361 /// threadEdge - We have decided that it is safe and profitable to factor the
2362 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
2363 /// across BB. Transform the IR to reflect this change.
2364 void JumpThreadingPass::threadEdge(BasicBlock
*BB
,
2365 const SmallVectorImpl
<BasicBlock
*> &PredBBs
,
2366 BasicBlock
*SuccBB
) {
2367 assert(SuccBB
!= BB
&& "Don't create an infinite loop");
2369 assert(!LoopHeaders
.count(BB
) && !LoopHeaders
.count(SuccBB
) &&
2370 "Don't thread across loop headers");
2372 // And finally, do it! Start by factoring the predecessors if needed.
2374 if (PredBBs
.size() == 1)
2375 PredBB
= PredBBs
[0];
2377 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs
.size()
2378 << " common predecessors.\n");
2379 PredBB
= splitBlockPreds(BB
, PredBBs
, ".thr_comm");
2382 // And finally, do it!
2383 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB
->getName()
2384 << "' to '" << SuccBB
->getName()
2385 << ", across block:\n " << *BB
<< "\n");
2387 LVI
->threadEdge(PredBB
, BB
, SuccBB
);
2389 BasicBlock
*NewBB
= BasicBlock::Create(BB
->getContext(),
2390 BB
->getName()+".thread",
2391 BB
->getParent(), BB
);
2392 NewBB
->moveAfter(PredBB
);
2394 // Set the block frequency of NewBB.
2395 if (HasProfileData
) {
2397 BFI
->getBlockFreq(PredBB
) * BPI
->getEdgeProbability(PredBB
, BB
);
2398 BFI
->setBlockFreq(NewBB
, NewBBFreq
.getFrequency());
2401 // Copy all the instructions from BB to NewBB except the terminator.
2402 DenseMap
<Instruction
*, Value
*> ValueMapping
=
2403 cloneInstructions(BB
->begin(), std::prev(BB
->end()), NewBB
, PredBB
);
2405 // We didn't copy the terminator from BB over to NewBB, because there is now
2406 // an unconditional jump to SuccBB. Insert the unconditional jump.
2407 BranchInst
*NewBI
= BranchInst::Create(SuccBB
, NewBB
);
2408 NewBI
->setDebugLoc(BB
->getTerminator()->getDebugLoc());
2410 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
2411 // PHI nodes for NewBB now.
2412 addPHINodeEntriesForMappedBlock(SuccBB
, BB
, NewBB
, ValueMapping
);
2414 // Update the terminator of PredBB to jump to NewBB instead of BB. This
2415 // eliminates predecessors from BB, which requires us to simplify any PHI
2417 Instruction
*PredTerm
= PredBB
->getTerminator();
2418 for (unsigned i
= 0, e
= PredTerm
->getNumSuccessors(); i
!= e
; ++i
)
2419 if (PredTerm
->getSuccessor(i
) == BB
) {
2420 BB
->removePredecessor(PredBB
, true);
2421 PredTerm
->setSuccessor(i
, NewBB
);
2424 // Enqueue required DT updates.
2425 DTU
->applyUpdatesPermissive({{DominatorTree::Insert
, NewBB
, SuccBB
},
2426 {DominatorTree::Insert
, PredBB
, NewBB
},
2427 {DominatorTree::Delete
, PredBB
, BB
}});
2429 updateSSA(BB
, NewBB
, ValueMapping
);
2431 // At this point, the IR is fully up to date and consistent. Do a quick scan
2432 // over the new instructions and zap any that are constants or dead. This
2433 // frequently happens because of phi translation.
2434 SimplifyInstructionsInBlock(NewBB
, TLI
);
2436 // Update the edge weight from BB to SuccBB, which should be less than before.
2437 updateBlockFreqAndEdgeWeight(PredBB
, BB
, NewBB
, SuccBB
);
2439 // Threaded an edge!
2443 /// Create a new basic block that will be the predecessor of BB and successor of
2444 /// all blocks in Preds. When profile data is available, update the frequency of
2446 BasicBlock
*JumpThreadingPass::splitBlockPreds(BasicBlock
*BB
,
2447 ArrayRef
<BasicBlock
*> Preds
,
2448 const char *Suffix
) {
2449 SmallVector
<BasicBlock
*, 2> NewBBs
;
2451 // Collect the frequencies of all predecessors of BB, which will be used to
2452 // update the edge weight of the result of splitting predecessors.
2453 DenseMap
<BasicBlock
*, BlockFrequency
> FreqMap
;
2455 for (auto Pred
: Preds
)
2456 FreqMap
.insert(std::make_pair(
2457 Pred
, BFI
->getBlockFreq(Pred
) * BPI
->getEdgeProbability(Pred
, BB
)));
2459 // In the case when BB is a LandingPad block we create 2 new predecessors
2460 // instead of just one.
2461 if (BB
->isLandingPad()) {
2462 std::string NewName
= std::string(Suffix
) + ".split-lp";
2463 SplitLandingPadPredecessors(BB
, Preds
, Suffix
, NewName
.c_str(), NewBBs
);
2465 NewBBs
.push_back(SplitBlockPredecessors(BB
, Preds
, Suffix
));
2468 std::vector
<DominatorTree::UpdateType
> Updates
;
2469 Updates
.reserve((2 * Preds
.size()) + NewBBs
.size());
2470 for (auto NewBB
: NewBBs
) {
2471 BlockFrequency
NewBBFreq(0);
2472 Updates
.push_back({DominatorTree::Insert
, NewBB
, BB
});
2473 for (auto Pred
: predecessors(NewBB
)) {
2474 Updates
.push_back({DominatorTree::Delete
, Pred
, BB
});
2475 Updates
.push_back({DominatorTree::Insert
, Pred
, NewBB
});
2476 if (HasProfileData
) // Update frequencies between Pred -> NewBB.
2477 NewBBFreq
+= FreqMap
.lookup(Pred
);
2479 if (HasProfileData
) // Apply the summed frequency to NewBB.
2480 BFI
->setBlockFreq(NewBB
, NewBBFreq
.getFrequency());
2483 DTU
->applyUpdatesPermissive(Updates
);
2487 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock
*BB
) {
2488 const Instruction
*TI
= BB
->getTerminator();
2489 assert(TI
->getNumSuccessors() > 1 && "not a split");
2491 MDNode
*WeightsNode
= TI
->getMetadata(LLVMContext::MD_prof
);
2495 MDString
*MDName
= cast
<MDString
>(WeightsNode
->getOperand(0));
2496 if (MDName
->getString() != "branch_weights")
2499 // Ensure there are weights for all of the successors. Note that the first
2500 // operand to the metadata node is a name, not a weight.
2501 return WeightsNode
->getNumOperands() == TI
->getNumSuccessors() + 1;
2504 /// Update the block frequency of BB and branch weight and the metadata on the
2505 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 -
2506 /// Freq(PredBB->BB) / Freq(BB->SuccBB).
2507 void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock
*PredBB
,
2510 BasicBlock
*SuccBB
) {
2511 if (!HasProfileData
)
2514 assert(BFI
&& BPI
&& "BFI & BPI should have been created here");
2516 // As the edge from PredBB to BB is deleted, we have to update the block
2518 auto BBOrigFreq
= BFI
->getBlockFreq(BB
);
2519 auto NewBBFreq
= BFI
->getBlockFreq(NewBB
);
2520 auto BB2SuccBBFreq
= BBOrigFreq
* BPI
->getEdgeProbability(BB
, SuccBB
);
2521 auto BBNewFreq
= BBOrigFreq
- NewBBFreq
;
2522 BFI
->setBlockFreq(BB
, BBNewFreq
.getFrequency());
2524 // Collect updated outgoing edges' frequencies from BB and use them to update
2525 // edge probabilities.
2526 SmallVector
<uint64_t, 4> BBSuccFreq
;
2527 for (BasicBlock
*Succ
: successors(BB
)) {
2528 auto SuccFreq
= (Succ
== SuccBB
)
2529 ? BB2SuccBBFreq
- NewBBFreq
2530 : BBOrigFreq
* BPI
->getEdgeProbability(BB
, Succ
);
2531 BBSuccFreq
.push_back(SuccFreq
.getFrequency());
2534 uint64_t MaxBBSuccFreq
=
2535 *std::max_element(BBSuccFreq
.begin(), BBSuccFreq
.end());
2537 SmallVector
<BranchProbability
, 4> BBSuccProbs
;
2538 if (MaxBBSuccFreq
== 0)
2539 BBSuccProbs
.assign(BBSuccFreq
.size(),
2540 {1, static_cast<unsigned>(BBSuccFreq
.size())});
2542 for (uint64_t Freq
: BBSuccFreq
)
2543 BBSuccProbs
.push_back(
2544 BranchProbability::getBranchProbability(Freq
, MaxBBSuccFreq
));
2545 // Normalize edge probabilities so that they sum up to one.
2546 BranchProbability::normalizeProbabilities(BBSuccProbs
.begin(),
2550 // Update edge probabilities in BPI.
2551 BPI
->setEdgeProbability(BB
, BBSuccProbs
);
2553 // Update the profile metadata as well.
2555 // Don't do this if the profile of the transformed blocks was statically
2556 // estimated. (This could occur despite the function having an entry
2557 // frequency in completely cold parts of the CFG.)
2559 // In this case we don't want to suggest to subsequent passes that the
2560 // calculated weights are fully consistent. Consider this graph:
2575 // Assuming the blocks check_* all compare the same value against 1, 2 and 3,
2576 // the overall probabilities are inconsistent; the total probability that the
2577 // value is either 1, 2 or 3 is 150%.
2579 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3
2580 // becomes 0%. This is even worse if the edge whose probability becomes 0% is
2581 // the loop exit edge. Then based solely on static estimation we would assume
2582 // the loop was extremely hot.
2584 // FIXME this locally as well so that BPI and BFI are consistent as well. We
2585 // shouldn't make edges extremely likely or unlikely based solely on static
2587 if (BBSuccProbs
.size() >= 2 && doesBlockHaveProfileData(BB
)) {
2588 SmallVector
<uint32_t, 4> Weights
;
2589 for (auto Prob
: BBSuccProbs
)
2590 Weights
.push_back(Prob
.getNumerator());
2592 auto TI
= BB
->getTerminator();
2594 LLVMContext::MD_prof
,
2595 MDBuilder(TI
->getParent()->getContext()).createBranchWeights(Weights
));
2599 /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch
2600 /// to BB which contains an i1 PHI node and a conditional branch on that PHI.
2601 /// If we can duplicate the contents of BB up into PredBB do so now, this
2602 /// improves the odds that the branch will be on an analyzable instruction like
2604 bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred(
2605 BasicBlock
*BB
, const SmallVectorImpl
<BasicBlock
*> &PredBBs
) {
2606 assert(!PredBBs
.empty() && "Can't handle an empty set");
2608 // If BB is a loop header, then duplicating this block outside the loop would
2609 // cause us to transform this into an irreducible loop, don't do this.
2610 // See the comments above findLoopHeaders for justifications and caveats.
2611 if (LoopHeaders
.count(BB
)) {
2612 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB
->getName()
2613 << "' into predecessor block '" << PredBBs
[0]->getName()
2614 << "' - it might create an irreducible loop!\n");
2618 unsigned DuplicationCost
=
2619 getJumpThreadDuplicationCost(BB
, BB
->getTerminator(), BBDupThreshold
);
2620 if (DuplicationCost
> BBDupThreshold
) {
2621 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB
->getName()
2622 << "' - Cost is too high: " << DuplicationCost
<< "\n");
2626 // And finally, do it! Start by factoring the predecessors if needed.
2627 std::vector
<DominatorTree::UpdateType
> Updates
;
2629 if (PredBBs
.size() == 1)
2630 PredBB
= PredBBs
[0];
2632 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs
.size()
2633 << " common predecessors.\n");
2634 PredBB
= splitBlockPreds(BB
, PredBBs
, ".thr_comm");
2636 Updates
.push_back({DominatorTree::Delete
, PredBB
, BB
});
2638 // Okay, we decided to do this! Clone all the instructions in BB onto the end
2640 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB
->getName()
2641 << "' into end of '" << PredBB
->getName()
2642 << "' to eliminate branch on phi. Cost: "
2643 << DuplicationCost
<< " block is:" << *BB
<< "\n");
2645 // Unless PredBB ends with an unconditional branch, split the edge so that we
2646 // can just clone the bits from BB into the end of the new PredBB.
2647 BranchInst
*OldPredBranch
= dyn_cast
<BranchInst
>(PredBB
->getTerminator());
2649 if (!OldPredBranch
|| !OldPredBranch
->isUnconditional()) {
2650 BasicBlock
*OldPredBB
= PredBB
;
2651 PredBB
= SplitEdge(OldPredBB
, BB
);
2652 Updates
.push_back({DominatorTree::Insert
, OldPredBB
, PredBB
});
2653 Updates
.push_back({DominatorTree::Insert
, PredBB
, BB
});
2654 Updates
.push_back({DominatorTree::Delete
, OldPredBB
, BB
});
2655 OldPredBranch
= cast
<BranchInst
>(PredBB
->getTerminator());
2658 // We are going to have to map operands from the original BB block into the
2659 // PredBB block. Evaluate PHI nodes in BB.
2660 DenseMap
<Instruction
*, Value
*> ValueMapping
;
2662 BasicBlock::iterator BI
= BB
->begin();
2663 for (; PHINode
*PN
= dyn_cast
<PHINode
>(BI
); ++BI
)
2664 ValueMapping
[PN
] = PN
->getIncomingValueForBlock(PredBB
);
2665 // Clone the non-phi instructions of BB into PredBB, keeping track of the
2666 // mapping and using it to remap operands in the cloned instructions.
2667 for (; BI
!= BB
->end(); ++BI
) {
2668 Instruction
*New
= BI
->clone();
2670 // Remap operands to patch up intra-block references.
2671 for (unsigned i
= 0, e
= New
->getNumOperands(); i
!= e
; ++i
)
2672 if (Instruction
*Inst
= dyn_cast
<Instruction
>(New
->getOperand(i
))) {
2673 DenseMap
<Instruction
*, Value
*>::iterator I
= ValueMapping
.find(Inst
);
2674 if (I
!= ValueMapping
.end())
2675 New
->setOperand(i
, I
->second
);
2678 // If this instruction can be simplified after the operands are updated,
2679 // just use the simplified value instead. This frequently happens due to
2681 if (Value
*IV
= SimplifyInstruction(
2683 {BB
->getModule()->getDataLayout(), TLI
, nullptr, nullptr, New
})) {
2684 ValueMapping
[&*BI
] = IV
;
2685 if (!New
->mayHaveSideEffects()) {
2690 ValueMapping
[&*BI
] = New
;
2693 // Otherwise, insert the new instruction into the block.
2694 New
->setName(BI
->getName());
2695 PredBB
->getInstList().insert(OldPredBranch
->getIterator(), New
);
2696 // Update Dominance from simplified New instruction operands.
2697 for (unsigned i
= 0, e
= New
->getNumOperands(); i
!= e
; ++i
)
2698 if (BasicBlock
*SuccBB
= dyn_cast
<BasicBlock
>(New
->getOperand(i
)))
2699 Updates
.push_back({DominatorTree::Insert
, PredBB
, SuccBB
});
2703 // Check to see if the targets of the branch had PHI nodes. If so, we need to
2704 // add entries to the PHI nodes for branch from PredBB now.
2705 BranchInst
*BBBranch
= cast
<BranchInst
>(BB
->getTerminator());
2706 addPHINodeEntriesForMappedBlock(BBBranch
->getSuccessor(0), BB
, PredBB
,
2708 addPHINodeEntriesForMappedBlock(BBBranch
->getSuccessor(1), BB
, PredBB
,
2711 updateSSA(BB
, PredBB
, ValueMapping
);
2713 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge
2715 BB
->removePredecessor(PredBB
, true);
2717 // Remove the unconditional branch at the end of the PredBB block.
2718 OldPredBranch
->eraseFromParent();
2720 BPI
->copyEdgeProbabilities(BB
, PredBB
);
2721 DTU
->applyUpdatesPermissive(Updates
);
2727 // Pred is a predecessor of BB with an unconditional branch to BB. SI is
2728 // a Select instruction in Pred. BB has other predecessors and SI is used in
2729 // a PHI node in BB. SI has no other use.
2730 // A new basic block, NewBB, is created and SI is converted to compare and
2731 // conditional branch. SI is erased from parent.
2732 void JumpThreadingPass::unfoldSelectInstr(BasicBlock
*Pred
, BasicBlock
*BB
,
2733 SelectInst
*SI
, PHINode
*SIUse
,
2735 // Expand the select.
2744 BranchInst
*PredTerm
= cast
<BranchInst
>(Pred
->getTerminator());
2745 BasicBlock
*NewBB
= BasicBlock::Create(BB
->getContext(), "select.unfold",
2746 BB
->getParent(), BB
);
2747 // Move the unconditional branch to NewBB.
2748 PredTerm
->removeFromParent();
2749 NewBB
->getInstList().insert(NewBB
->end(), PredTerm
);
2750 // Create a conditional branch and update PHI nodes.
2751 auto *BI
= BranchInst::Create(NewBB
, BB
, SI
->getCondition(), Pred
);
2752 BI
->applyMergedLocation(PredTerm
->getDebugLoc(), SI
->getDebugLoc());
2753 SIUse
->setIncomingValue(Idx
, SI
->getFalseValue());
2754 SIUse
->addIncoming(SI
->getTrueValue(), NewBB
);
2756 // The select is now dead.
2757 SI
->eraseFromParent();
2758 DTU
->applyUpdatesPermissive({{DominatorTree::Insert
, NewBB
, BB
},
2759 {DominatorTree::Insert
, Pred
, NewBB
}});
2761 // Update any other PHI nodes in BB.
2762 for (BasicBlock::iterator BI
= BB
->begin();
2763 PHINode
*Phi
= dyn_cast
<PHINode
>(BI
); ++BI
)
2765 Phi
->addIncoming(Phi
->getIncomingValueForBlock(Pred
), NewBB
);
2768 bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst
*SI
, BasicBlock
*BB
) {
2769 PHINode
*CondPHI
= dyn_cast
<PHINode
>(SI
->getCondition());
2771 if (!CondPHI
|| CondPHI
->getParent() != BB
)
2774 for (unsigned I
= 0, E
= CondPHI
->getNumIncomingValues(); I
!= E
; ++I
) {
2775 BasicBlock
*Pred
= CondPHI
->getIncomingBlock(I
);
2776 SelectInst
*PredSI
= dyn_cast
<SelectInst
>(CondPHI
->getIncomingValue(I
));
2778 // The second and third condition can be potentially relaxed. Currently
2779 // the conditions help to simplify the code and allow us to reuse existing
2780 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *)
2781 if (!PredSI
|| PredSI
->getParent() != Pred
|| !PredSI
->hasOneUse())
2784 BranchInst
*PredTerm
= dyn_cast
<BranchInst
>(Pred
->getTerminator());
2785 if (!PredTerm
|| !PredTerm
->isUnconditional())
2788 unfoldSelectInstr(Pred
, BB
, PredSI
, CondPHI
, I
);
2794 /// tryToUnfoldSelect - Look for blocks of the form
2800 /// %p = phi [%a, %bb1] ...
2804 /// And expand the select into a branch structure if one of its arms allows %c
2805 /// to be folded. This later enables threading from bb1 over bb2.
2806 bool JumpThreadingPass::tryToUnfoldSelect(CmpInst
*CondCmp
, BasicBlock
*BB
) {
2807 BranchInst
*CondBr
= dyn_cast
<BranchInst
>(BB
->getTerminator());
2808 PHINode
*CondLHS
= dyn_cast
<PHINode
>(CondCmp
->getOperand(0));
2809 Constant
*CondRHS
= cast
<Constant
>(CondCmp
->getOperand(1));
2811 if (!CondBr
|| !CondBr
->isConditional() || !CondLHS
||
2812 CondLHS
->getParent() != BB
)
2815 for (unsigned I
= 0, E
= CondLHS
->getNumIncomingValues(); I
!= E
; ++I
) {
2816 BasicBlock
*Pred
= CondLHS
->getIncomingBlock(I
);
2817 SelectInst
*SI
= dyn_cast
<SelectInst
>(CondLHS
->getIncomingValue(I
));
2819 // Look if one of the incoming values is a select in the corresponding
2821 if (!SI
|| SI
->getParent() != Pred
|| !SI
->hasOneUse())
2824 BranchInst
*PredTerm
= dyn_cast
<BranchInst
>(Pred
->getTerminator());
2825 if (!PredTerm
|| !PredTerm
->isUnconditional())
2828 // Now check if one of the select values would allow us to constant fold the
2829 // terminator in BB. We don't do the transform if both sides fold, those
2830 // cases will be threaded in any case.
2831 LazyValueInfo::Tristate LHSFolds
=
2832 LVI
->getPredicateOnEdge(CondCmp
->getPredicate(), SI
->getOperand(1),
2833 CondRHS
, Pred
, BB
, CondCmp
);
2834 LazyValueInfo::Tristate RHSFolds
=
2835 LVI
->getPredicateOnEdge(CondCmp
->getPredicate(), SI
->getOperand(2),
2836 CondRHS
, Pred
, BB
, CondCmp
);
2837 if ((LHSFolds
!= LazyValueInfo::Unknown
||
2838 RHSFolds
!= LazyValueInfo::Unknown
) &&
2839 LHSFolds
!= RHSFolds
) {
2840 unfoldSelectInstr(Pred
, BB
, SI
, CondLHS
, I
);
2847 /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the
2848 /// same BB in the form
2850 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ...
2851 /// %s = select %p, trueval, falseval
2856 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ...
2858 /// %s = select %c, trueval, falseval
2860 /// And expand the select into a branch structure. This later enables
2861 /// jump-threading over bb in this pass.
2863 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold
2864 /// select if the associated PHI has at least one constant. If the unfolded
2865 /// select is not jump-threaded, it will be folded again in the later
2867 bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock
*BB
) {
2868 // This transform would reduce the quality of msan diagnostics.
2869 // Disable this transform under MemorySanitizer.
2870 if (BB
->getParent()->hasFnAttribute(Attribute::SanitizeMemory
))
2873 // If threading this would thread across a loop header, don't thread the edge.
2874 // See the comments above findLoopHeaders for justifications and caveats.
2875 if (LoopHeaders
.count(BB
))
2878 for (BasicBlock::iterator BI
= BB
->begin();
2879 PHINode
*PN
= dyn_cast
<PHINode
>(BI
); ++BI
) {
2880 // Look for a Phi having at least one constant incoming value.
2881 if (llvm::all_of(PN
->incoming_values(),
2882 [](Value
*V
) { return !isa
<ConstantInt
>(V
); }))
2885 auto isUnfoldCandidate
= [BB
](SelectInst
*SI
, Value
*V
) {
2886 using namespace PatternMatch
;
2888 // Check if SI is in BB and use V as condition.
2889 if (SI
->getParent() != BB
)
2891 Value
*Cond
= SI
->getCondition();
2892 bool IsAndOr
= match(SI
, m_CombineOr(m_LogicalAnd(), m_LogicalOr()));
2893 return Cond
&& Cond
== V
&& Cond
->getType()->isIntegerTy(1) && !IsAndOr
;
2896 SelectInst
*SI
= nullptr;
2897 for (Use
&U
: PN
->uses()) {
2898 if (ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(U
.getUser())) {
2899 // Look for a ICmp in BB that compares PN with a constant and is the
2900 // condition of a Select.
2901 if (Cmp
->getParent() == BB
&& Cmp
->hasOneUse() &&
2902 isa
<ConstantInt
>(Cmp
->getOperand(1 - U
.getOperandNo())))
2903 if (SelectInst
*SelectI
= dyn_cast
<SelectInst
>(Cmp
->user_back()))
2904 if (isUnfoldCandidate(SelectI
, Cmp
->use_begin()->get())) {
2908 } else if (SelectInst
*SelectI
= dyn_cast
<SelectInst
>(U
.getUser())) {
2909 // Look for a Select in BB that uses PN as condition.
2910 if (isUnfoldCandidate(SelectI
, U
.get())) {
2919 // Expand the select.
2920 Value
*Cond
= SI
->getCondition();
2921 if (InsertFreezeWhenUnfoldingSelect
&&
2922 !isGuaranteedNotToBeUndefOrPoison(Cond
, nullptr, SI
,
2923 &DTU
->getDomTree()))
2924 Cond
= new FreezeInst(Cond
, "cond.fr", SI
);
2925 Instruction
*Term
= SplitBlockAndInsertIfThen(Cond
, SI
, false);
2926 BasicBlock
*SplitBB
= SI
->getParent();
2927 BasicBlock
*NewBB
= Term
->getParent();
2928 PHINode
*NewPN
= PHINode::Create(SI
->getType(), 2, "", SI
);
2929 NewPN
->addIncoming(SI
->getTrueValue(), Term
->getParent());
2930 NewPN
->addIncoming(SI
->getFalseValue(), BB
);
2931 SI
->replaceAllUsesWith(NewPN
);
2932 SI
->eraseFromParent();
2933 // NewBB and SplitBB are newly created blocks which require insertion.
2934 std::vector
<DominatorTree::UpdateType
> Updates
;
2935 Updates
.reserve((2 * SplitBB
->getTerminator()->getNumSuccessors()) + 3);
2936 Updates
.push_back({DominatorTree::Insert
, BB
, SplitBB
});
2937 Updates
.push_back({DominatorTree::Insert
, BB
, NewBB
});
2938 Updates
.push_back({DominatorTree::Insert
, NewBB
, SplitBB
});
2939 // BB's successors were moved to SplitBB, update DTU accordingly.
2940 for (auto *Succ
: successors(SplitBB
)) {
2941 Updates
.push_back({DominatorTree::Delete
, BB
, Succ
});
2942 Updates
.push_back({DominatorTree::Insert
, SplitBB
, Succ
});
2944 DTU
->applyUpdatesPermissive(Updates
);
2950 /// Try to propagate a guard from the current BB into one of its predecessors
2951 /// in case if another branch of execution implies that the condition of this
2952 /// guard is always true. Currently we only process the simplest case that
2957 /// br i1 %cond, label %T1, label %F1
2963 /// %condGuard = ...
2964 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ]
2966 /// And cond either implies condGuard or !condGuard. In this case all the
2967 /// instructions before the guard can be duplicated in both branches, and the
2968 /// guard is then threaded to one of them.
2969 bool JumpThreadingPass::processGuards(BasicBlock
*BB
) {
2970 using namespace PatternMatch
;
2972 // We only want to deal with two predecessors.
2973 BasicBlock
*Pred1
, *Pred2
;
2974 auto PI
= pred_begin(BB
), PE
= pred_end(BB
);
2986 // Try to thread one of the guards of the block.
2987 // TODO: Look up deeper than to immediate predecessor?
2988 auto *Parent
= Pred1
->getSinglePredecessor();
2989 if (!Parent
|| Parent
!= Pred2
->getSinglePredecessor())
2992 if (auto *BI
= dyn_cast
<BranchInst
>(Parent
->getTerminator()))
2994 if (isGuard(&I
) && threadGuard(BB
, cast
<IntrinsicInst
>(&I
), BI
))
3000 /// Try to propagate the guard from BB which is the lower block of a diamond
3001 /// to one of its branches, in case if diamond's condition implies guard's
3003 bool JumpThreadingPass::threadGuard(BasicBlock
*BB
, IntrinsicInst
*Guard
,
3005 assert(BI
->getNumSuccessors() == 2 && "Wrong number of successors?");
3006 assert(BI
->isConditional() && "Unconditional branch has 2 successors?");
3007 Value
*GuardCond
= Guard
->getArgOperand(0);
3008 Value
*BranchCond
= BI
->getCondition();
3009 BasicBlock
*TrueDest
= BI
->getSuccessor(0);
3010 BasicBlock
*FalseDest
= BI
->getSuccessor(1);
3012 auto &DL
= BB
->getModule()->getDataLayout();
3013 bool TrueDestIsSafe
= false;
3014 bool FalseDestIsSafe
= false;
3016 // True dest is safe if BranchCond => GuardCond.
3017 auto Impl
= isImpliedCondition(BranchCond
, GuardCond
, DL
);
3019 TrueDestIsSafe
= true;
3021 // False dest is safe if !BranchCond => GuardCond.
3022 Impl
= isImpliedCondition(BranchCond
, GuardCond
, DL
, /* LHSIsTrue */ false);
3024 FalseDestIsSafe
= true;
3027 if (!TrueDestIsSafe
&& !FalseDestIsSafe
)
3030 BasicBlock
*PredUnguardedBlock
= TrueDestIsSafe
? TrueDest
: FalseDest
;
3031 BasicBlock
*PredGuardedBlock
= FalseDestIsSafe
? TrueDest
: FalseDest
;
3033 ValueToValueMapTy UnguardedMapping
, GuardedMapping
;
3034 Instruction
*AfterGuard
= Guard
->getNextNode();
3035 unsigned Cost
= getJumpThreadDuplicationCost(BB
, AfterGuard
, BBDupThreshold
);
3036 if (Cost
> BBDupThreshold
)
3038 // Duplicate all instructions before the guard and the guard itself to the
3039 // branch where implication is not proved.
3040 BasicBlock
*GuardedBlock
= DuplicateInstructionsInSplitBetween(
3041 BB
, PredGuardedBlock
, AfterGuard
, GuardedMapping
, *DTU
);
3042 assert(GuardedBlock
&& "Could not create the guarded block?");
3043 // Duplicate all instructions before the guard in the unguarded branch.
3044 // Since we have successfully duplicated the guarded block and this block
3045 // has fewer instructions, we expect it to succeed.
3046 BasicBlock
*UnguardedBlock
= DuplicateInstructionsInSplitBetween(
3047 BB
, PredUnguardedBlock
, Guard
, UnguardedMapping
, *DTU
);
3048 assert(UnguardedBlock
&& "Could not create the unguarded block?");
3049 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard
<< " to block "
3050 << GuardedBlock
->getName() << "\n");
3051 // Some instructions before the guard may still have uses. For them, we need
3052 // to create Phi nodes merging their copies in both guarded and unguarded
3053 // branches. Those instructions that have no uses can be just removed.
3054 SmallVector
<Instruction
*, 4> ToRemove
;
3055 for (auto BI
= BB
->begin(); &*BI
!= AfterGuard
; ++BI
)
3056 if (!isa
<PHINode
>(&*BI
))
3057 ToRemove
.push_back(&*BI
);
3059 Instruction
*InsertionPoint
= &*BB
->getFirstInsertionPt();
3060 assert(InsertionPoint
&& "Empty block?");
3061 // Substitute with Phis & remove.
3062 for (auto *Inst
: reverse(ToRemove
)) {
3063 if (!Inst
->use_empty()) {
3064 PHINode
*NewPN
= PHINode::Create(Inst
->getType(), 2);
3065 NewPN
->addIncoming(UnguardedMapping
[Inst
], UnguardedBlock
);
3066 NewPN
->addIncoming(GuardedMapping
[Inst
], GuardedBlock
);
3067 NewPN
->insertBefore(InsertionPoint
);
3068 Inst
->replaceAllUsesWith(NewPN
);
3070 Inst
->eraseFromParent();