[ARM] Rejig MVE load store tests. NFC
[llvm-core.git] / lib / Transforms / Utils / LowerSwitch.cpp
blob8256e3b5f5afd3504ef79751e23b9a0c96721289
1 //===- LowerSwitch.cpp - Eliminate Switch instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The LowerSwitch transformation rewrites switch instructions with a sequence
10 // of branches, which allows targets to get away with not implementing the
11 // switch instruction until it is convenient.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include <algorithm>
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <limits>
43 #include <vector>
45 using namespace llvm;
47 #define DEBUG_TYPE "lower-switch"
49 namespace {
51 struct IntRange {
52 int64_t Low, High;
55 } // end anonymous namespace
57 // Return true iff R is covered by Ranges.
58 static bool IsInRanges(const IntRange &R,
59 const std::vector<IntRange> &Ranges) {
60 // Note: Ranges must be sorted, non-overlapping and non-adjacent.
62 // Find the first range whose High field is >= R.High,
63 // then check if the Low field is <= R.Low. If so, we
64 // have a Range that covers R.
65 auto I = llvm::lower_bound(
66 Ranges, R, [](IntRange A, IntRange B) { return A.High < B.High; });
67 return I != Ranges.end() && I->Low <= R.Low;
70 namespace {
72 /// Replace all SwitchInst instructions with chained branch instructions.
73 class LowerSwitch : public FunctionPass {
74 public:
75 // Pass identification, replacement for typeid
76 static char ID;
78 LowerSwitch() : FunctionPass(ID) {
79 initializeLowerSwitchPass(*PassRegistry::getPassRegistry());
82 bool runOnFunction(Function &F) override;
84 void getAnalysisUsage(AnalysisUsage &AU) const override {
85 AU.addRequired<LazyValueInfoWrapperPass>();
88 struct CaseRange {
89 ConstantInt* Low;
90 ConstantInt* High;
91 BasicBlock* BB;
93 CaseRange(ConstantInt *low, ConstantInt *high, BasicBlock *bb)
94 : Low(low), High(high), BB(bb) {}
97 using CaseVector = std::vector<CaseRange>;
98 using CaseItr = std::vector<CaseRange>::iterator;
100 private:
101 void processSwitchInst(SwitchInst *SI,
102 SmallPtrSetImpl<BasicBlock *> &DeleteList,
103 AssumptionCache *AC, LazyValueInfo *LVI);
105 BasicBlock *switchConvert(CaseItr Begin, CaseItr End,
106 ConstantInt *LowerBound, ConstantInt *UpperBound,
107 Value *Val, BasicBlock *Predecessor,
108 BasicBlock *OrigBlock, BasicBlock *Default,
109 const std::vector<IntRange> &UnreachableRanges);
110 BasicBlock *newLeafBlock(CaseRange &Leaf, Value *Val,
111 ConstantInt *LowerBound, ConstantInt *UpperBound,
112 BasicBlock *OrigBlock, BasicBlock *Default);
113 unsigned Clusterify(CaseVector &Cases, SwitchInst *SI);
116 /// The comparison function for sorting the switch case values in the vector.
117 /// WARNING: Case ranges should be disjoint!
118 struct CaseCmp {
119 bool operator()(const LowerSwitch::CaseRange& C1,
120 const LowerSwitch::CaseRange& C2) {
121 const ConstantInt* CI1 = cast<const ConstantInt>(C1.Low);
122 const ConstantInt* CI2 = cast<const ConstantInt>(C2.High);
123 return CI1->getValue().slt(CI2->getValue());
127 } // end anonymous namespace
129 char LowerSwitch::ID = 0;
131 // Publicly exposed interface to pass...
132 char &llvm::LowerSwitchID = LowerSwitch::ID;
134 INITIALIZE_PASS_BEGIN(LowerSwitch, "lowerswitch",
135 "Lower SwitchInst's to branches", false, false)
136 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
137 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
138 INITIALIZE_PASS_END(LowerSwitch, "lowerswitch",
139 "Lower SwitchInst's to branches", false, false)
141 // createLowerSwitchPass - Interface to this file...
142 FunctionPass *llvm::createLowerSwitchPass() {
143 return new LowerSwitch();
146 bool LowerSwitch::runOnFunction(Function &F) {
147 LazyValueInfo *LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
148 auto *ACT = getAnalysisIfAvailable<AssumptionCacheTracker>();
149 AssumptionCache *AC = ACT ? &ACT->getAssumptionCache(F) : nullptr;
150 // Prevent LazyValueInfo from using the DominatorTree as LowerSwitch does not
151 // preserve it and it becomes stale (when available) pretty much immediately.
152 // Currently the DominatorTree is only used by LowerSwitch indirectly via LVI
153 // and computeKnownBits to refine isValidAssumeForContext's results. Given
154 // that the latter can handle some of the simple cases w/o a DominatorTree,
155 // it's easier to refrain from using the tree than to keep it up to date.
156 LVI->disableDT();
158 bool Changed = false;
159 SmallPtrSet<BasicBlock*, 8> DeleteList;
161 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
162 BasicBlock *Cur = &*I++; // Advance over block so we don't traverse new blocks
164 // If the block is a dead Default block that will be deleted later, don't
165 // waste time processing it.
166 if (DeleteList.count(Cur))
167 continue;
169 if (SwitchInst *SI = dyn_cast<SwitchInst>(Cur->getTerminator())) {
170 Changed = true;
171 processSwitchInst(SI, DeleteList, AC, LVI);
175 for (BasicBlock* BB: DeleteList) {
176 LVI->eraseBlock(BB);
177 DeleteDeadBlock(BB);
180 return Changed;
183 /// Used for debugging purposes.
184 LLVM_ATTRIBUTE_USED
185 static raw_ostream &operator<<(raw_ostream &O,
186 const LowerSwitch::CaseVector &C) {
187 O << "[";
189 for (LowerSwitch::CaseVector::const_iterator B = C.begin(), E = C.end();
190 B != E;) {
191 O << "[" << B->Low->getValue() << ", " << B->High->getValue() << "]";
192 if (++B != E)
193 O << ", ";
196 return O << "]";
199 /// Update the first occurrence of the "switch statement" BB in the PHI
200 /// node with the "new" BB. The other occurrences will:
202 /// 1) Be updated by subsequent calls to this function. Switch statements may
203 /// have more than one outcoming edge into the same BB if they all have the same
204 /// value. When the switch statement is converted these incoming edges are now
205 /// coming from multiple BBs.
206 /// 2) Removed if subsequent incoming values now share the same case, i.e.,
207 /// multiple outcome edges are condensed into one. This is necessary to keep the
208 /// number of phi values equal to the number of branches to SuccBB.
209 static void
210 fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
211 const unsigned NumMergedCases = std::numeric_limits<unsigned>::max()) {
212 for (BasicBlock::iterator I = SuccBB->begin(),
213 IE = SuccBB->getFirstNonPHI()->getIterator();
214 I != IE; ++I) {
215 PHINode *PN = cast<PHINode>(I);
217 // Only update the first occurrence.
218 unsigned Idx = 0, E = PN->getNumIncomingValues();
219 unsigned LocalNumMergedCases = NumMergedCases;
220 for (; Idx != E; ++Idx) {
221 if (PN->getIncomingBlock(Idx) == OrigBB) {
222 PN->setIncomingBlock(Idx, NewBB);
223 break;
227 // Remove additional occurrences coming from condensed cases and keep the
228 // number of incoming values equal to the number of branches to SuccBB.
229 SmallVector<unsigned, 8> Indices;
230 for (++Idx; LocalNumMergedCases > 0 && Idx < E; ++Idx)
231 if (PN->getIncomingBlock(Idx) == OrigBB) {
232 Indices.push_back(Idx);
233 LocalNumMergedCases--;
235 // Remove incoming values in the reverse order to prevent invalidating
236 // *successive* index.
237 for (unsigned III : llvm::reverse(Indices))
238 PN->removeIncomingValue(III);
242 /// Convert the switch statement into a binary lookup of the case values.
243 /// The function recursively builds this tree. LowerBound and UpperBound are
244 /// used to keep track of the bounds for Val that have already been checked by
245 /// a block emitted by one of the previous calls to switchConvert in the call
246 /// stack.
247 BasicBlock *
248 LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound,
249 ConstantInt *UpperBound, Value *Val,
250 BasicBlock *Predecessor, BasicBlock *OrigBlock,
251 BasicBlock *Default,
252 const std::vector<IntRange> &UnreachableRanges) {
253 assert(LowerBound && UpperBound && "Bounds must be initialized");
254 unsigned Size = End - Begin;
256 if (Size == 1) {
257 // Check if the Case Range is perfectly squeezed in between
258 // already checked Upper and Lower bounds. If it is then we can avoid
259 // emitting the code that checks if the value actually falls in the range
260 // because the bounds already tell us so.
261 if (Begin->Low == LowerBound && Begin->High == UpperBound) {
262 unsigned NumMergedCases = 0;
263 NumMergedCases = UpperBound->getSExtValue() - LowerBound->getSExtValue();
264 fixPhis(Begin->BB, OrigBlock, Predecessor, NumMergedCases);
265 return Begin->BB;
267 return newLeafBlock(*Begin, Val, LowerBound, UpperBound, OrigBlock,
268 Default);
271 unsigned Mid = Size / 2;
272 std::vector<CaseRange> LHS(Begin, Begin + Mid);
273 LLVM_DEBUG(dbgs() << "LHS: " << LHS << "\n");
274 std::vector<CaseRange> RHS(Begin + Mid, End);
275 LLVM_DEBUG(dbgs() << "RHS: " << RHS << "\n");
277 CaseRange &Pivot = *(Begin + Mid);
278 LLVM_DEBUG(dbgs() << "Pivot ==> [" << Pivot.Low->getValue() << ", "
279 << Pivot.High->getValue() << "]\n");
281 // NewLowerBound here should never be the integer minimal value.
282 // This is because it is computed from a case range that is never
283 // the smallest, so there is always a case range that has at least
284 // a smaller value.
285 ConstantInt *NewLowerBound = Pivot.Low;
287 // Because NewLowerBound is never the smallest representable integer
288 // it is safe here to subtract one.
289 ConstantInt *NewUpperBound = ConstantInt::get(NewLowerBound->getContext(),
290 NewLowerBound->getValue() - 1);
292 if (!UnreachableRanges.empty()) {
293 // Check if the gap between LHS's highest and NewLowerBound is unreachable.
294 int64_t GapLow = LHS.back().High->getSExtValue() + 1;
295 int64_t GapHigh = NewLowerBound->getSExtValue() - 1;
296 IntRange Gap = { GapLow, GapHigh };
297 if (GapHigh >= GapLow && IsInRanges(Gap, UnreachableRanges))
298 NewUpperBound = LHS.back().High;
301 LLVM_DEBUG(dbgs() << "LHS Bounds ==> [" << LowerBound->getSExtValue() << ", "
302 << NewUpperBound->getSExtValue() << "]\n"
303 << "RHS Bounds ==> [" << NewLowerBound->getSExtValue()
304 << ", " << UpperBound->getSExtValue() << "]\n");
306 // Create a new node that checks if the value is < pivot. Go to the
307 // left branch if it is and right branch if not.
308 Function* F = OrigBlock->getParent();
309 BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock");
311 ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_SLT,
312 Val, Pivot.Low, "Pivot");
314 BasicBlock *LBranch = switchConvert(LHS.begin(), LHS.end(), LowerBound,
315 NewUpperBound, Val, NewNode, OrigBlock,
316 Default, UnreachableRanges);
317 BasicBlock *RBranch = switchConvert(RHS.begin(), RHS.end(), NewLowerBound,
318 UpperBound, Val, NewNode, OrigBlock,
319 Default, UnreachableRanges);
321 F->getBasicBlockList().insert(++OrigBlock->getIterator(), NewNode);
322 NewNode->getInstList().push_back(Comp);
324 BranchInst::Create(LBranch, RBranch, Comp, NewNode);
325 return NewNode;
328 /// Create a new leaf block for the binary lookup tree. It checks if the
329 /// switch's value == the case's value. If not, then it jumps to the default
330 /// branch. At this point in the tree, the value can't be another valid case
331 /// value, so the jump to the "default" branch is warranted.
332 BasicBlock *LowerSwitch::newLeafBlock(CaseRange &Leaf, Value *Val,
333 ConstantInt *LowerBound,
334 ConstantInt *UpperBound,
335 BasicBlock *OrigBlock,
336 BasicBlock *Default) {
337 Function* F = OrigBlock->getParent();
338 BasicBlock* NewLeaf = BasicBlock::Create(Val->getContext(), "LeafBlock");
339 F->getBasicBlockList().insert(++OrigBlock->getIterator(), NewLeaf);
341 // Emit comparison
342 ICmpInst* Comp = nullptr;
343 if (Leaf.Low == Leaf.High) {
344 // Make the seteq instruction...
345 Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_EQ, Val,
346 Leaf.Low, "SwitchLeaf");
347 } else {
348 // Make range comparison
349 if (Leaf.Low == LowerBound) {
350 // Val >= Min && Val <= Hi --> Val <= Hi
351 Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_SLE, Val, Leaf.High,
352 "SwitchLeaf");
353 } else if (Leaf.High == UpperBound) {
354 // Val <= Max && Val >= Lo --> Val >= Lo
355 Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_SGE, Val, Leaf.Low,
356 "SwitchLeaf");
357 } else if (Leaf.Low->isZero()) {
358 // Val >= 0 && Val <= Hi --> Val <=u Hi
359 Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_ULE, Val, Leaf.High,
360 "SwitchLeaf");
361 } else {
362 // Emit V-Lo <=u Hi-Lo
363 Constant* NegLo = ConstantExpr::getNeg(Leaf.Low);
364 Instruction* Add = BinaryOperator::CreateAdd(Val, NegLo,
365 Val->getName()+".off",
366 NewLeaf);
367 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Leaf.High);
368 Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_ULE, Add, UpperBound,
369 "SwitchLeaf");
373 // Make the conditional branch...
374 BasicBlock* Succ = Leaf.BB;
375 BranchInst::Create(Succ, Default, Comp, NewLeaf);
377 // If there were any PHI nodes in this successor, rewrite one entry
378 // from OrigBlock to come from NewLeaf.
379 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
380 PHINode* PN = cast<PHINode>(I);
381 // Remove all but one incoming entries from the cluster
382 uint64_t Range = Leaf.High->getSExtValue() -
383 Leaf.Low->getSExtValue();
384 for (uint64_t j = 0; j < Range; ++j) {
385 PN->removeIncomingValue(OrigBlock);
388 int BlockIdx = PN->getBasicBlockIndex(OrigBlock);
389 assert(BlockIdx != -1 && "Switch didn't go to this successor??");
390 PN->setIncomingBlock((unsigned)BlockIdx, NewLeaf);
393 return NewLeaf;
396 /// Transform simple list of \p SI's cases into list of CaseRange's \p Cases.
397 /// \post \p Cases wouldn't contain references to \p SI's default BB.
398 /// \returns Number of \p SI's cases that do not reference \p SI's default BB.
399 unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
400 unsigned NumSimpleCases = 0;
402 // Start with "simple" cases
403 for (auto Case : SI->cases()) {
404 if (Case.getCaseSuccessor() == SI->getDefaultDest())
405 continue;
406 Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
407 Case.getCaseSuccessor()));
408 ++NumSimpleCases;
411 llvm::sort(Cases, CaseCmp());
413 // Merge case into clusters
414 if (Cases.size() >= 2) {
415 CaseItr I = Cases.begin();
416 for (CaseItr J = std::next(I), E = Cases.end(); J != E; ++J) {
417 int64_t nextValue = J->Low->getSExtValue();
418 int64_t currentValue = I->High->getSExtValue();
419 BasicBlock* nextBB = J->BB;
420 BasicBlock* currentBB = I->BB;
422 // If the two neighboring cases go to the same destination, merge them
423 // into a single case.
424 assert(nextValue > currentValue && "Cases should be strictly ascending");
425 if ((nextValue == currentValue + 1) && (currentBB == nextBB)) {
426 I->High = J->High;
427 // FIXME: Combine branch weights.
428 } else if (++I != J) {
429 *I = *J;
432 Cases.erase(std::next(I), Cases.end());
435 return NumSimpleCases;
438 /// Replace the specified switch instruction with a sequence of chained if-then
439 /// insts in a balanced binary search.
440 void LowerSwitch::processSwitchInst(SwitchInst *SI,
441 SmallPtrSetImpl<BasicBlock *> &DeleteList,
442 AssumptionCache *AC, LazyValueInfo *LVI) {
443 BasicBlock *OrigBlock = SI->getParent();
444 Function *F = OrigBlock->getParent();
445 Value *Val = SI->getCondition(); // The value we are switching on...
446 BasicBlock* Default = SI->getDefaultDest();
448 // Don't handle unreachable blocks. If there are successors with phis, this
449 // would leave them behind with missing predecessors.
450 if ((OrigBlock != &F->getEntryBlock() && pred_empty(OrigBlock)) ||
451 OrigBlock->getSinglePredecessor() == OrigBlock) {
452 DeleteList.insert(OrigBlock);
453 return;
456 // Prepare cases vector.
457 CaseVector Cases;
458 const unsigned NumSimpleCases = Clusterify(Cases, SI);
459 LLVM_DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
460 << ". Total non-default cases: " << NumSimpleCases
461 << "\nCase clusters: " << Cases << "\n");
463 // If there is only the default destination, just branch.
464 if (Cases.empty()) {
465 BranchInst::Create(Default, OrigBlock);
466 // Remove all the references from Default's PHIs to OrigBlock, but one.
467 fixPhis(Default, OrigBlock, OrigBlock);
468 SI->eraseFromParent();
469 return;
472 ConstantInt *LowerBound = nullptr;
473 ConstantInt *UpperBound = nullptr;
474 bool DefaultIsUnreachableFromSwitch = false;
476 if (isa<UnreachableInst>(Default->getFirstNonPHIOrDbg())) {
477 // Make the bounds tightly fitted around the case value range, because we
478 // know that the value passed to the switch must be exactly one of the case
479 // values.
480 LowerBound = Cases.front().Low;
481 UpperBound = Cases.back().High;
482 DefaultIsUnreachableFromSwitch = true;
483 } else {
484 // Constraining the range of the value being switched over helps eliminating
485 // unreachable BBs and minimizing the number of `add` instructions
486 // newLeafBlock ends up emitting. Running CorrelatedValuePropagation after
487 // LowerSwitch isn't as good, and also much more expensive in terms of
488 // compile time for the following reasons:
489 // 1. it processes many kinds of instructions, not just switches;
490 // 2. even if limited to icmp instructions only, it will have to process
491 // roughly C icmp's per switch, where C is the number of cases in the
492 // switch, while LowerSwitch only needs to call LVI once per switch.
493 const DataLayout &DL = F->getParent()->getDataLayout();
494 KnownBits Known = computeKnownBits(Val, DL, /*Depth=*/0, AC, SI);
495 // TODO Shouldn't this create a signed range?
496 ConstantRange KnownBitsRange =
497 ConstantRange::fromKnownBits(Known, /*IsSigned=*/false);
498 const ConstantRange LVIRange = LVI->getConstantRange(Val, OrigBlock, SI);
499 ConstantRange ValRange = KnownBitsRange.intersectWith(LVIRange);
500 // We delegate removal of unreachable non-default cases to other passes. In
501 // the unlikely event that some of them survived, we just conservatively
502 // maintain the invariant that all the cases lie between the bounds. This
503 // may, however, still render the default case effectively unreachable.
504 APInt Low = Cases.front().Low->getValue();
505 APInt High = Cases.back().High->getValue();
506 APInt Min = APIntOps::smin(ValRange.getSignedMin(), Low);
507 APInt Max = APIntOps::smax(ValRange.getSignedMax(), High);
509 LowerBound = ConstantInt::get(SI->getContext(), Min);
510 UpperBound = ConstantInt::get(SI->getContext(), Max);
511 DefaultIsUnreachableFromSwitch = (Min + (NumSimpleCases - 1) == Max);
514 std::vector<IntRange> UnreachableRanges;
516 if (DefaultIsUnreachableFromSwitch) {
517 DenseMap<BasicBlock *, unsigned> Popularity;
518 unsigned MaxPop = 0;
519 BasicBlock *PopSucc = nullptr;
521 IntRange R = {std::numeric_limits<int64_t>::min(),
522 std::numeric_limits<int64_t>::max()};
523 UnreachableRanges.push_back(R);
524 for (const auto &I : Cases) {
525 int64_t Low = I.Low->getSExtValue();
526 int64_t High = I.High->getSExtValue();
528 IntRange &LastRange = UnreachableRanges.back();
529 if (LastRange.Low == Low) {
530 // There is nothing left of the previous range.
531 UnreachableRanges.pop_back();
532 } else {
533 // Terminate the previous range.
534 assert(Low > LastRange.Low);
535 LastRange.High = Low - 1;
537 if (High != std::numeric_limits<int64_t>::max()) {
538 IntRange R = { High + 1, std::numeric_limits<int64_t>::max() };
539 UnreachableRanges.push_back(R);
542 // Count popularity.
543 int64_t N = High - Low + 1;
544 unsigned &Pop = Popularity[I.BB];
545 if ((Pop += N) > MaxPop) {
546 MaxPop = Pop;
547 PopSucc = I.BB;
550 #ifndef NDEBUG
551 /* UnreachableRanges should be sorted and the ranges non-adjacent. */
552 for (auto I = UnreachableRanges.begin(), E = UnreachableRanges.end();
553 I != E; ++I) {
554 assert(I->Low <= I->High);
555 auto Next = I + 1;
556 if (Next != E) {
557 assert(Next->Low > I->High);
560 #endif
562 // As the default block in the switch is unreachable, update the PHI nodes
563 // (remove all of the references to the default block) to reflect this.
564 const unsigned NumDefaultEdges = SI->getNumCases() + 1 - NumSimpleCases;
565 for (unsigned I = 0; I < NumDefaultEdges; ++I)
566 Default->removePredecessor(OrigBlock);
568 // Use the most popular block as the new default, reducing the number of
569 // cases.
570 assert(MaxPop > 0 && PopSucc);
571 Default = PopSucc;
572 Cases.erase(
573 llvm::remove_if(
574 Cases, [PopSucc](const CaseRange &R) { return R.BB == PopSucc; }),
575 Cases.end());
577 // If there are no cases left, just branch.
578 if (Cases.empty()) {
579 BranchInst::Create(Default, OrigBlock);
580 SI->eraseFromParent();
581 // As all the cases have been replaced with a single branch, only keep
582 // one entry in the PHI nodes.
583 for (unsigned I = 0 ; I < (MaxPop - 1) ; ++I)
584 PopSucc->removePredecessor(OrigBlock);
585 return;
588 // If the condition was a PHI node with the switch block as a predecessor
589 // removing predecessors may have caused the condition to be erased.
590 // Getting the condition value again here protects against that.
591 Val = SI->getCondition();
594 // Create a new, empty default block so that the new hierarchy of
595 // if-then statements go to this and the PHI nodes are happy.
596 BasicBlock *NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
597 F->getBasicBlockList().insert(Default->getIterator(), NewDefault);
598 BranchInst::Create(Default, NewDefault);
600 BasicBlock *SwitchBlock =
601 switchConvert(Cases.begin(), Cases.end(), LowerBound, UpperBound, Val,
602 OrigBlock, OrigBlock, NewDefault, UnreachableRanges);
604 // If there are entries in any PHI nodes for the default edge, make sure
605 // to update them as well.
606 fixPhis(Default, OrigBlock, NewDefault);
608 // Branch to our shiny new if-then stuff...
609 BranchInst::Create(SwitchBlock, OrigBlock);
611 // We are now done with the switch instruction, delete it.
612 BasicBlock *OldDefault = SI->getDefaultDest();
613 OrigBlock->getInstList().erase(SI);
615 // If the Default block has no more predecessors just add it to DeleteList.
616 if (pred_begin(OldDefault) == pred_end(OldDefault))
617 DeleteList.insert(OldDefault);