[win/asan] GetInstructionSize: Fix `83 E4 XX` to return 3. (#119644)
[llvm-project.git] / llvm / lib / Transforms / Utils / LoopUtils.cpp
blob45915c10107b2ebfb59c5fc4f306c0079743ccc2
1 //===-- LoopUtils.cpp - Loop Utility functions -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines common loop utility functions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Transforms/Utils/LoopUtils.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/PriorityWorklist.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BasicAliasAnalysis.h"
22 #include "llvm/Analysis/DomTreeUpdater.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/InstSimplifyFolder.h"
25 #include "llvm/Analysis/LoopAccessAnalysis.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Analysis/MemorySSA.h"
29 #include "llvm/Analysis/MemorySSAUpdater.h"
30 #include "llvm/Analysis/ScalarEvolution.h"
31 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
32 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
33 #include "llvm/IR/DIBuilder.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/PatternMatch.h"
40 #include "llvm/IR/ProfDataUtils.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
49 using namespace llvm;
50 using namespace llvm::PatternMatch;
52 #define DEBUG_TYPE "loop-utils"
54 static const char *LLVMLoopDisableNonforced = "llvm.loop.disable_nonforced";
55 static const char *LLVMLoopDisableLICM = "llvm.licm.disable";
57 bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
58 MemorySSAUpdater *MSSAU,
59 bool PreserveLCSSA) {
60 bool Changed = false;
62 // We re-use a vector for the in-loop predecesosrs.
63 SmallVector<BasicBlock *, 4> InLoopPredecessors;
65 auto RewriteExit = [&](BasicBlock *BB) {
66 assert(InLoopPredecessors.empty() &&
67 "Must start with an empty predecessors list!");
68 auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); });
70 // See if there are any non-loop predecessors of this exit block and
71 // keep track of the in-loop predecessors.
72 bool IsDedicatedExit = true;
73 for (auto *PredBB : predecessors(BB))
74 if (L->contains(PredBB)) {
75 if (isa<IndirectBrInst>(PredBB->getTerminator()))
76 // We cannot rewrite exiting edges from an indirectbr.
77 return false;
79 InLoopPredecessors.push_back(PredBB);
80 } else {
81 IsDedicatedExit = false;
84 assert(!InLoopPredecessors.empty() && "Must have *some* loop predecessor!");
86 // Nothing to do if this is already a dedicated exit.
87 if (IsDedicatedExit)
88 return false;
90 auto *NewExitBB = SplitBlockPredecessors(
91 BB, InLoopPredecessors, ".loopexit", DT, LI, MSSAU, PreserveLCSSA);
93 if (!NewExitBB)
94 LLVM_DEBUG(
95 dbgs() << "WARNING: Can't create a dedicated exit block for loop: "
96 << *L << "\n");
97 else
98 LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
99 << NewExitBB->getName() << "\n");
100 return true;
103 // Walk the exit blocks directly rather than building up a data structure for
104 // them, but only visit each one once.
105 SmallPtrSet<BasicBlock *, 4> Visited;
106 for (auto *BB : L->blocks())
107 for (auto *SuccBB : successors(BB)) {
108 // We're looking for exit blocks so skip in-loop successors.
109 if (L->contains(SuccBB))
110 continue;
112 // Visit each exit block exactly once.
113 if (!Visited.insert(SuccBB).second)
114 continue;
116 Changed |= RewriteExit(SuccBB);
119 return Changed;
122 /// Returns the instructions that use values defined in the loop.
123 SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
124 SmallVector<Instruction *, 8> UsedOutside;
126 for (auto *Block : L->getBlocks())
127 // FIXME: I believe that this could use copy_if if the Inst reference could
128 // be adapted into a pointer.
129 for (auto &Inst : *Block) {
130 auto Users = Inst.users();
131 if (any_of(Users, [&](User *U) {
132 auto *Use = cast<Instruction>(U);
133 return !L->contains(Use->getParent());
135 UsedOutside.push_back(&Inst);
138 return UsedOutside;
141 void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) {
142 // By definition, all loop passes need the LoopInfo analysis and the
143 // Dominator tree it depends on. Because they all participate in the loop
144 // pass manager, they must also preserve these.
145 AU.addRequired<DominatorTreeWrapperPass>();
146 AU.addPreserved<DominatorTreeWrapperPass>();
147 AU.addRequired<LoopInfoWrapperPass>();
148 AU.addPreserved<LoopInfoWrapperPass>();
150 // We must also preserve LoopSimplify and LCSSA. We locally access their IDs
151 // here because users shouldn't directly get them from this header.
152 extern char &LoopSimplifyID;
153 extern char &LCSSAID;
154 AU.addRequiredID(LoopSimplifyID);
155 AU.addPreservedID(LoopSimplifyID);
156 AU.addRequiredID(LCSSAID);
157 AU.addPreservedID(LCSSAID);
158 // This is used in the LPPassManager to perform LCSSA verification on passes
159 // which preserve lcssa form
160 AU.addRequired<LCSSAVerificationPass>();
161 AU.addPreserved<LCSSAVerificationPass>();
163 // Loop passes are designed to run inside of a loop pass manager which means
164 // that any function analyses they require must be required by the first loop
165 // pass in the manager (so that it is computed before the loop pass manager
166 // runs) and preserved by all loop pasess in the manager. To make this
167 // reasonably robust, the set needed for most loop passes is maintained here.
168 // If your loop pass requires an analysis not listed here, you will need to
169 // carefully audit the loop pass manager nesting structure that results.
170 AU.addRequired<AAResultsWrapperPass>();
171 AU.addPreserved<AAResultsWrapperPass>();
172 AU.addPreserved<BasicAAWrapperPass>();
173 AU.addPreserved<GlobalsAAWrapperPass>();
174 AU.addPreserved<SCEVAAWrapperPass>();
175 AU.addRequired<ScalarEvolutionWrapperPass>();
176 AU.addPreserved<ScalarEvolutionWrapperPass>();
177 // FIXME: When all loop passes preserve MemorySSA, it can be required and
178 // preserved here instead of the individual handling in each pass.
181 /// Manually defined generic "LoopPass" dependency initialization. This is used
182 /// to initialize the exact set of passes from above in \c
183 /// getLoopAnalysisUsage. It can be used within a loop pass's initialization
184 /// with:
186 /// INITIALIZE_PASS_DEPENDENCY(LoopPass)
188 /// As-if "LoopPass" were a pass.
189 void llvm::initializeLoopPassPass(PassRegistry &Registry) {
190 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
191 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
192 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
193 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
194 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
195 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
196 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
197 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
198 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
199 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
202 /// Create MDNode for input string.
203 static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
204 LLVMContext &Context = TheLoop->getHeader()->getContext();
205 Metadata *MDs[] = {
206 MDString::get(Context, Name),
207 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Context), V))};
208 return MDNode::get(Context, MDs);
211 /// Set input string into loop metadata by keeping other values intact.
212 /// If the string is already in loop metadata update value if it is
213 /// different.
214 void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *StringMD,
215 unsigned V) {
216 SmallVector<Metadata *, 4> MDs(1);
217 // If the loop already has metadata, retain it.
218 MDNode *LoopID = TheLoop->getLoopID();
219 if (LoopID) {
220 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
221 MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
222 // If it is of form key = value, try to parse it.
223 if (Node->getNumOperands() == 2) {
224 MDString *S = dyn_cast<MDString>(Node->getOperand(0));
225 if (S && S->getString() == StringMD) {
226 ConstantInt *IntMD =
227 mdconst::extract_or_null<ConstantInt>(Node->getOperand(1));
228 if (IntMD && IntMD->getSExtValue() == V)
229 // It is already in place. Do nothing.
230 return;
231 // We need to update the value, so just skip it here and it will
232 // be added after copying other existed nodes.
233 continue;
236 MDs.push_back(Node);
239 // Add new metadata.
240 MDs.push_back(createStringMetadata(TheLoop, StringMD, V));
241 // Replace current metadata node with new one.
242 LLVMContext &Context = TheLoop->getHeader()->getContext();
243 MDNode *NewLoopID = MDNode::get(Context, MDs);
244 // Set operand 0 to refer to the loop id itself.
245 NewLoopID->replaceOperandWith(0, NewLoopID);
246 TheLoop->setLoopID(NewLoopID);
249 std::optional<ElementCount>
250 llvm::getOptionalElementCountLoopAttribute(const Loop *TheLoop) {
251 std::optional<int> Width =
252 getOptionalIntLoopAttribute(TheLoop, "llvm.loop.vectorize.width");
254 if (Width) {
255 std::optional<int> IsScalable = getOptionalIntLoopAttribute(
256 TheLoop, "llvm.loop.vectorize.scalable.enable");
257 return ElementCount::get(*Width, IsScalable.value_or(false));
260 return std::nullopt;
263 std::optional<MDNode *> llvm::makeFollowupLoopID(
264 MDNode *OrigLoopID, ArrayRef<StringRef> FollowupOptions,
265 const char *InheritOptionsExceptPrefix, bool AlwaysNew) {
266 if (!OrigLoopID) {
267 if (AlwaysNew)
268 return nullptr;
269 return std::nullopt;
272 assert(OrigLoopID->getOperand(0) == OrigLoopID);
274 bool InheritAllAttrs = !InheritOptionsExceptPrefix;
275 bool InheritSomeAttrs =
276 InheritOptionsExceptPrefix && InheritOptionsExceptPrefix[0] != '\0';
277 SmallVector<Metadata *, 8> MDs;
278 MDs.push_back(nullptr);
280 bool Changed = false;
281 if (InheritAllAttrs || InheritSomeAttrs) {
282 for (const MDOperand &Existing : drop_begin(OrigLoopID->operands())) {
283 MDNode *Op = cast<MDNode>(Existing.get());
285 auto InheritThisAttribute = [InheritSomeAttrs,
286 InheritOptionsExceptPrefix](MDNode *Op) {
287 if (!InheritSomeAttrs)
288 return false;
290 // Skip malformatted attribute metadata nodes.
291 if (Op->getNumOperands() == 0)
292 return true;
293 Metadata *NameMD = Op->getOperand(0).get();
294 if (!isa<MDString>(NameMD))
295 return true;
296 StringRef AttrName = cast<MDString>(NameMD)->getString();
298 // Do not inherit excluded attributes.
299 return !AttrName.starts_with(InheritOptionsExceptPrefix);
302 if (InheritThisAttribute(Op))
303 MDs.push_back(Op);
304 else
305 Changed = true;
307 } else {
308 // Modified if we dropped at least one attribute.
309 Changed = OrigLoopID->getNumOperands() > 1;
312 bool HasAnyFollowup = false;
313 for (StringRef OptionName : FollowupOptions) {
314 MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName);
315 if (!FollowupNode)
316 continue;
318 HasAnyFollowup = true;
319 for (const MDOperand &Option : drop_begin(FollowupNode->operands())) {
320 MDs.push_back(Option.get());
321 Changed = true;
325 // Attributes of the followup loop not specified explicity, so signal to the
326 // transformation pass to add suitable attributes.
327 if (!AlwaysNew && !HasAnyFollowup)
328 return std::nullopt;
330 // If no attributes were added or remove, the previous loop Id can be reused.
331 if (!AlwaysNew && !Changed)
332 return OrigLoopID;
334 // No attributes is equivalent to having no !llvm.loop metadata at all.
335 if (MDs.size() == 1)
336 return nullptr;
338 // Build the new loop ID.
339 MDTuple *FollowupLoopID = MDNode::get(OrigLoopID->getContext(), MDs);
340 FollowupLoopID->replaceOperandWith(0, FollowupLoopID);
341 return FollowupLoopID;
344 bool llvm::hasDisableAllTransformsHint(const Loop *L) {
345 return getBooleanLoopAttribute(L, LLVMLoopDisableNonforced);
348 bool llvm::hasDisableLICMTransformsHint(const Loop *L) {
349 return getBooleanLoopAttribute(L, LLVMLoopDisableLICM);
352 TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
353 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable"))
354 return TM_SuppressedByUser;
356 std::optional<int> Count =
357 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
358 if (Count)
359 return *Count == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
361 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
362 return TM_ForcedByUser;
364 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.full"))
365 return TM_ForcedByUser;
367 if (hasDisableAllTransformsHint(L))
368 return TM_Disable;
370 return TM_Unspecified;
373 TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
374 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.disable"))
375 return TM_SuppressedByUser;
377 std::optional<int> Count =
378 getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
379 if (Count)
380 return *Count == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
382 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
383 return TM_ForcedByUser;
385 if (hasDisableAllTransformsHint(L))
386 return TM_Disable;
388 return TM_Unspecified;
391 TransformationMode llvm::hasVectorizeTransformation(const Loop *L) {
392 std::optional<bool> Enable =
393 getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable");
395 if (Enable == false)
396 return TM_SuppressedByUser;
398 std::optional<ElementCount> VectorizeWidth =
399 getOptionalElementCountLoopAttribute(L);
400 std::optional<int> InterleaveCount =
401 getOptionalIntLoopAttribute(L, "llvm.loop.interleave.count");
403 // 'Forcing' vector width and interleave count to one effectively disables
404 // this tranformation.
405 if (Enable == true && VectorizeWidth && VectorizeWidth->isScalar() &&
406 InterleaveCount == 1)
407 return TM_SuppressedByUser;
409 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
410 return TM_Disable;
412 if (Enable == true)
413 return TM_ForcedByUser;
415 if ((VectorizeWidth && VectorizeWidth->isScalar()) && InterleaveCount == 1)
416 return TM_Disable;
418 if ((VectorizeWidth && VectorizeWidth->isVector()) || InterleaveCount > 1)
419 return TM_Enable;
421 if (hasDisableAllTransformsHint(L))
422 return TM_Disable;
424 return TM_Unspecified;
427 TransformationMode llvm::hasDistributeTransformation(const Loop *L) {
428 if (getBooleanLoopAttribute(L, "llvm.loop.distribute.enable"))
429 return TM_ForcedByUser;
431 if (hasDisableAllTransformsHint(L))
432 return TM_Disable;
434 return TM_Unspecified;
437 TransformationMode llvm::hasLICMVersioningTransformation(const Loop *L) {
438 if (getBooleanLoopAttribute(L, "llvm.loop.licm_versioning.disable"))
439 return TM_SuppressedByUser;
441 if (hasDisableAllTransformsHint(L))
442 return TM_Disable;
444 return TM_Unspecified;
447 /// Does a BFS from a given node to all of its children inside a given loop.
448 /// The returned vector of basic blocks includes the starting point.
449 SmallVector<BasicBlock *, 16> llvm::collectChildrenInLoop(DominatorTree *DT,
450 DomTreeNode *N,
451 const Loop *CurLoop) {
452 SmallVector<BasicBlock *, 16> Worklist;
453 auto AddRegionToWorklist = [&](DomTreeNode *DTN) {
454 // Only include subregions in the top level loop.
455 BasicBlock *BB = DTN->getBlock();
456 if (CurLoop->contains(BB))
457 Worklist.push_back(DTN->getBlock());
460 AddRegionToWorklist(N);
462 for (size_t I = 0; I < Worklist.size(); I++) {
463 for (DomTreeNode *Child : DT->getNode(Worklist[I])->children())
464 AddRegionToWorklist(Child);
467 return Worklist;
470 bool llvm::isAlmostDeadIV(PHINode *PN, BasicBlock *LatchBlock, Value *Cond) {
471 int LatchIdx = PN->getBasicBlockIndex(LatchBlock);
472 assert(LatchIdx != -1 && "LatchBlock is not a case in this PHINode");
473 Value *IncV = PN->getIncomingValue(LatchIdx);
475 for (User *U : PN->users())
476 if (U != Cond && U != IncV) return false;
478 for (User *U : IncV->users())
479 if (U != Cond && U != PN) return false;
480 return true;
484 void llvm::deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
485 LoopInfo *LI, MemorySSA *MSSA) {
486 assert((!DT || L->isLCSSAForm(*DT)) && "Expected LCSSA!");
487 auto *Preheader = L->getLoopPreheader();
488 assert(Preheader && "Preheader should exist!");
490 std::unique_ptr<MemorySSAUpdater> MSSAU;
491 if (MSSA)
492 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
494 // Now that we know the removal is safe, remove the loop by changing the
495 // branch from the preheader to go to the single exit block.
497 // Because we're deleting a large chunk of code at once, the sequence in which
498 // we remove things is very important to avoid invalidation issues.
500 // Tell ScalarEvolution that the loop is deleted. Do this before
501 // deleting the loop so that ScalarEvolution can look at the loop
502 // to determine what it needs to clean up.
503 if (SE) {
504 SE->forgetLoop(L);
505 SE->forgetBlockAndLoopDispositions();
508 Instruction *OldTerm = Preheader->getTerminator();
509 assert(!OldTerm->mayHaveSideEffects() &&
510 "Preheader must end with a side-effect-free terminator");
511 assert(OldTerm->getNumSuccessors() == 1 &&
512 "Preheader must have a single successor");
513 // Connect the preheader to the exit block. Keep the old edge to the header
514 // around to perform the dominator tree update in two separate steps
515 // -- #1 insertion of the edge preheader -> exit and #2 deletion of the edge
516 // preheader -> header.
519 // 0. Preheader 1. Preheader 2. Preheader
520 // | | | |
521 // V | V |
522 // Header <--\ | Header <--\ | Header <--\
523 // | | | | | | | | | | |
524 // | V | | | V | | | V |
525 // | Body --/ | | Body --/ | | Body --/
526 // V V V V V
527 // Exit Exit Exit
529 // By doing this is two separate steps we can perform the dominator tree
530 // update without using the batch update API.
532 // Even when the loop is never executed, we cannot remove the edge from the
533 // source block to the exit block. Consider the case where the unexecuted loop
534 // branches back to an outer loop. If we deleted the loop and removed the edge
535 // coming to this inner loop, this will break the outer loop structure (by
536 // deleting the backedge of the outer loop). If the outer loop is indeed a
537 // non-loop, it will be deleted in a future iteration of loop deletion pass.
538 IRBuilder<> Builder(OldTerm);
540 auto *ExitBlock = L->getUniqueExitBlock();
541 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
542 if (ExitBlock) {
543 assert(ExitBlock && "Should have a unique exit block!");
544 assert(L->hasDedicatedExits() && "Loop should have dedicated exits!");
546 Builder.CreateCondBr(Builder.getFalse(), L->getHeader(), ExitBlock);
547 // Remove the old branch. The conditional branch becomes a new terminator.
548 OldTerm->eraseFromParent();
550 // Rewrite phis in the exit block to get their inputs from the Preheader
551 // instead of the exiting block.
552 for (PHINode &P : ExitBlock->phis()) {
553 // Set the zero'th element of Phi to be from the preheader and remove all
554 // other incoming values. Given the loop has dedicated exits, all other
555 // incoming values must be from the exiting blocks.
556 int PredIndex = 0;
557 P.setIncomingBlock(PredIndex, Preheader);
558 // Removes all incoming values from all other exiting blocks (including
559 // duplicate values from an exiting block).
560 // Nuke all entries except the zero'th entry which is the preheader entry.
561 P.removeIncomingValueIf([](unsigned Idx) { return Idx != 0; },
562 /* DeletePHIIfEmpty */ false);
564 assert((P.getNumIncomingValues() == 1 &&
565 P.getIncomingBlock(PredIndex) == Preheader) &&
566 "Should have exactly one value and that's from the preheader!");
569 if (DT) {
570 DTU.applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}});
571 if (MSSA) {
572 MSSAU->applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}},
573 *DT);
574 if (VerifyMemorySSA)
575 MSSA->verifyMemorySSA();
579 // Disconnect the loop body by branching directly to its exit.
580 Builder.SetInsertPoint(Preheader->getTerminator());
581 Builder.CreateBr(ExitBlock);
582 // Remove the old branch.
583 Preheader->getTerminator()->eraseFromParent();
584 } else {
585 assert(L->hasNoExitBlocks() &&
586 "Loop should have either zero or one exit blocks.");
588 Builder.SetInsertPoint(OldTerm);
589 Builder.CreateUnreachable();
590 Preheader->getTerminator()->eraseFromParent();
593 if (DT) {
594 DTU.applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}});
595 if (MSSA) {
596 MSSAU->applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}},
597 *DT);
598 SmallSetVector<BasicBlock *, 8> DeadBlockSet(L->block_begin(),
599 L->block_end());
600 MSSAU->removeBlocks(DeadBlockSet);
601 if (VerifyMemorySSA)
602 MSSA->verifyMemorySSA();
606 // Use a map to unique and a vector to guarantee deterministic ordering.
607 llvm::SmallDenseSet<DebugVariable, 4> DeadDebugSet;
608 llvm::SmallVector<DbgVariableIntrinsic *, 4> DeadDebugInst;
609 llvm::SmallVector<DbgVariableRecord *, 4> DeadDbgVariableRecords;
611 if (ExitBlock) {
612 // Given LCSSA form is satisfied, we should not have users of instructions
613 // within the dead loop outside of the loop. However, LCSSA doesn't take
614 // unreachable uses into account. We handle them here.
615 // We could do it after drop all references (in this case all users in the
616 // loop will be already eliminated and we have less work to do but according
617 // to API doc of User::dropAllReferences only valid operation after dropping
618 // references, is deletion. So let's substitute all usages of
619 // instruction from the loop with poison value of corresponding type first.
620 for (auto *Block : L->blocks())
621 for (Instruction &I : *Block) {
622 auto *Poison = PoisonValue::get(I.getType());
623 for (Use &U : llvm::make_early_inc_range(I.uses())) {
624 if (auto *Usr = dyn_cast<Instruction>(U.getUser()))
625 if (L->contains(Usr->getParent()))
626 continue;
627 // If we have a DT then we can check that uses outside a loop only in
628 // unreachable block.
629 if (DT)
630 assert(!DT->isReachableFromEntry(U) &&
631 "Unexpected user in reachable block");
632 U.set(Poison);
635 // RemoveDIs: do the same as below for DbgVariableRecords.
636 if (Block->IsNewDbgInfoFormat) {
637 for (DbgVariableRecord &DVR : llvm::make_early_inc_range(
638 filterDbgVars(I.getDbgRecordRange()))) {
639 DebugVariable Key(DVR.getVariable(), DVR.getExpression(),
640 DVR.getDebugLoc().get());
641 if (!DeadDebugSet.insert(Key).second)
642 continue;
643 // Unlinks the DVR from it's container, for later insertion.
644 DVR.removeFromParent();
645 DeadDbgVariableRecords.push_back(&DVR);
649 // For one of each variable encountered, preserve a debug intrinsic (set
650 // to Poison) and transfer it to the loop exit. This terminates any
651 // variable locations that were set during the loop.
652 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I);
653 if (!DVI)
654 continue;
655 if (!DeadDebugSet.insert(DebugVariable(DVI)).second)
656 continue;
657 DeadDebugInst.push_back(DVI);
660 // After the loop has been deleted all the values defined and modified
661 // inside the loop are going to be unavailable. Values computed in the
662 // loop will have been deleted, automatically causing their debug uses
663 // be be replaced with undef. Loop invariant values will still be available.
664 // Move dbg.values out the loop so that earlier location ranges are still
665 // terminated and loop invariant assignments are preserved.
666 DIBuilder DIB(*ExitBlock->getModule());
667 BasicBlock::iterator InsertDbgValueBefore =
668 ExitBlock->getFirstInsertionPt();
669 assert(InsertDbgValueBefore != ExitBlock->end() &&
670 "There should be a non-PHI instruction in exit block, else these "
671 "instructions will have no parent.");
673 for (auto *DVI : DeadDebugInst)
674 DVI->moveBefore(*ExitBlock, InsertDbgValueBefore);
676 // Due to the "head" bit in BasicBlock::iterator, we're going to insert
677 // each DbgVariableRecord right at the start of the block, wheras dbg.values
678 // would be repeatedly inserted before the first instruction. To replicate
679 // this behaviour, do it backwards.
680 for (DbgVariableRecord *DVR : llvm::reverse(DeadDbgVariableRecords))
681 ExitBlock->insertDbgRecordBefore(DVR, InsertDbgValueBefore);
684 // Remove the block from the reference counting scheme, so that we can
685 // delete it freely later.
686 for (auto *Block : L->blocks())
687 Block->dropAllReferences();
689 if (MSSA && VerifyMemorySSA)
690 MSSA->verifyMemorySSA();
692 if (LI) {
693 // Erase the instructions and the blocks without having to worry
694 // about ordering because we already dropped the references.
695 // NOTE: This iteration is safe because erasing the block does not remove
696 // its entry from the loop's block list. We do that in the next section.
697 for (BasicBlock *BB : L->blocks())
698 BB->eraseFromParent();
700 // Finally, the blocks from loopinfo. This has to happen late because
701 // otherwise our loop iterators won't work.
703 SmallPtrSet<BasicBlock *, 8> blocks;
704 blocks.insert(L->block_begin(), L->block_end());
705 for (BasicBlock *BB : blocks)
706 LI->removeBlock(BB);
708 // The last step is to update LoopInfo now that we've eliminated this loop.
709 // Note: LoopInfo::erase remove the given loop and relink its subloops with
710 // its parent. While removeLoop/removeChildLoop remove the given loop but
711 // not relink its subloops, which is what we want.
712 if (Loop *ParentLoop = L->getParentLoop()) {
713 Loop::iterator I = find(*ParentLoop, L);
714 assert(I != ParentLoop->end() && "Couldn't find loop");
715 ParentLoop->removeChildLoop(I);
716 } else {
717 Loop::iterator I = find(*LI, L);
718 assert(I != LI->end() && "Couldn't find loop");
719 LI->removeLoop(I);
721 LI->destroy(L);
725 void llvm::breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
726 LoopInfo &LI, MemorySSA *MSSA) {
727 auto *Latch = L->getLoopLatch();
728 assert(Latch && "multiple latches not yet supported");
729 auto *Header = L->getHeader();
730 Loop *OutermostLoop = L->getOutermostLoop();
732 SE.forgetLoop(L);
733 SE.forgetBlockAndLoopDispositions();
735 std::unique_ptr<MemorySSAUpdater> MSSAU;
736 if (MSSA)
737 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
739 // Update the CFG and domtree. We chose to special case a couple of
740 // of common cases for code quality and test readability reasons.
741 [&]() -> void {
742 if (auto *BI = dyn_cast<BranchInst>(Latch->getTerminator())) {
743 if (!BI->isConditional()) {
744 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
745 (void)changeToUnreachable(BI, /*PreserveLCSSA*/ true, &DTU,
746 MSSAU.get());
747 return;
750 // Conditional latch/exit - note that latch can be shared by inner
751 // and outer loop so the other target doesn't need to an exit
752 if (L->isLoopExiting(Latch)) {
753 // TODO: Generalize ConstantFoldTerminator so that it can be used
754 // here without invalidating LCSSA or MemorySSA. (Tricky case for
755 // LCSSA: header is an exit block of a preceeding sibling loop w/o
756 // dedicated exits.)
757 const unsigned ExitIdx = L->contains(BI->getSuccessor(0)) ? 1 : 0;
758 BasicBlock *ExitBB = BI->getSuccessor(ExitIdx);
760 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
761 Header->removePredecessor(Latch, true);
763 IRBuilder<> Builder(BI);
764 auto *NewBI = Builder.CreateBr(ExitBB);
765 // Transfer the metadata to the new branch instruction (minus the
766 // loop info since this is no longer a loop)
767 NewBI->copyMetadata(*BI, {LLVMContext::MD_dbg,
768 LLVMContext::MD_annotation});
770 BI->eraseFromParent();
771 DTU.applyUpdates({{DominatorTree::Delete, Latch, Header}});
772 if (MSSA)
773 MSSAU->applyUpdates({{DominatorTree::Delete, Latch, Header}}, DT);
774 return;
778 // General case. By splitting the backedge, and then explicitly making it
779 // unreachable we gracefully handle corner cases such as switch and invoke
780 // termiantors.
781 auto *BackedgeBB = SplitEdge(Latch, Header, &DT, &LI, MSSAU.get());
783 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
784 (void)changeToUnreachable(BackedgeBB->getTerminator(),
785 /*PreserveLCSSA*/ true, &DTU, MSSAU.get());
786 }();
788 // Erase (and destroy) this loop instance. Handles relinking sub-loops
789 // and blocks within the loop as needed.
790 LI.erase(L);
792 // If the loop we broke had a parent, then changeToUnreachable might have
793 // caused a block to be removed from the parent loop (see loop_nest_lcssa
794 // test case in zero-btc.ll for an example), thus changing the parent's
795 // exit blocks. If that happened, we need to rebuild LCSSA on the outermost
796 // loop which might have a had a block removed.
797 if (OutermostLoop != L)
798 formLCSSARecursively(*OutermostLoop, DT, &LI, &SE);
802 /// Checks if \p L has an exiting latch branch. There may also be other
803 /// exiting blocks. Returns branch instruction terminating the loop
804 /// latch if above check is successful, nullptr otherwise.
805 static BranchInst *getExpectedExitLoopLatchBranch(Loop *L) {
806 BasicBlock *Latch = L->getLoopLatch();
807 if (!Latch)
808 return nullptr;
810 BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
811 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
812 return nullptr;
814 assert((LatchBR->getSuccessor(0) == L->getHeader() ||
815 LatchBR->getSuccessor(1) == L->getHeader()) &&
816 "At least one edge out of the latch must go to the header");
818 return LatchBR;
821 /// Return the estimated trip count for any exiting branch which dominates
822 /// the loop latch.
823 static std::optional<uint64_t> getEstimatedTripCount(BranchInst *ExitingBranch,
824 Loop *L,
825 uint64_t &OrigExitWeight) {
826 // To estimate the number of times the loop body was executed, we want to
827 // know the number of times the backedge was taken, vs. the number of times
828 // we exited the loop.
829 uint64_t LoopWeight, ExitWeight;
830 if (!extractBranchWeights(*ExitingBranch, LoopWeight, ExitWeight))
831 return std::nullopt;
833 if (L->contains(ExitingBranch->getSuccessor(1)))
834 std::swap(LoopWeight, ExitWeight);
836 if (!ExitWeight)
837 // Don't have a way to return predicated infinite
838 return std::nullopt;
840 OrigExitWeight = ExitWeight;
842 // Estimated exit count is a ratio of the loop weight by the weight of the
843 // edge exiting the loop, rounded to nearest.
844 uint64_t ExitCount = llvm::divideNearest(LoopWeight, ExitWeight);
845 // Estimated trip count is one plus estimated exit count.
846 return ExitCount + 1;
849 std::optional<unsigned>
850 llvm::getLoopEstimatedTripCount(Loop *L,
851 unsigned *EstimatedLoopInvocationWeight) {
852 // Currently we take the estimate exit count only from the loop latch,
853 // ignoring other exiting blocks. This can overestimate the trip count
854 // if we exit through another exit, but can never underestimate it.
855 // TODO: incorporate information from other exits
856 if (BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L)) {
857 uint64_t ExitWeight;
858 if (std::optional<uint64_t> EstTripCount =
859 getEstimatedTripCount(LatchBranch, L, ExitWeight)) {
860 if (EstimatedLoopInvocationWeight)
861 *EstimatedLoopInvocationWeight = ExitWeight;
862 return *EstTripCount;
865 return std::nullopt;
868 bool llvm::setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
869 unsigned EstimatedloopInvocationWeight) {
870 // At the moment, we currently support changing the estimate trip count of
871 // the latch branch only. We could extend this API to manipulate estimated
872 // trip counts for any exit.
873 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
874 if (!LatchBranch)
875 return false;
877 // Calculate taken and exit weights.
878 unsigned LatchExitWeight = 0;
879 unsigned BackedgeTakenWeight = 0;
881 if (EstimatedTripCount > 0) {
882 LatchExitWeight = EstimatedloopInvocationWeight;
883 BackedgeTakenWeight = (EstimatedTripCount - 1) * LatchExitWeight;
886 // Make a swap if back edge is taken when condition is "false".
887 if (LatchBranch->getSuccessor(0) != L->getHeader())
888 std::swap(BackedgeTakenWeight, LatchExitWeight);
890 MDBuilder MDB(LatchBranch->getContext());
892 // Set/Update profile metadata.
893 LatchBranch->setMetadata(
894 LLVMContext::MD_prof,
895 MDB.createBranchWeights(BackedgeTakenWeight, LatchExitWeight));
897 return true;
900 bool llvm::hasIterationCountInvariantInParent(Loop *InnerLoop,
901 ScalarEvolution &SE) {
902 Loop *OuterL = InnerLoop->getParentLoop();
903 if (!OuterL)
904 return true;
906 // Get the backedge taken count for the inner loop
907 BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
908 const SCEV *InnerLoopBECountSC = SE.getExitCount(InnerLoop, InnerLoopLatch);
909 if (isa<SCEVCouldNotCompute>(InnerLoopBECountSC) ||
910 !InnerLoopBECountSC->getType()->isIntegerTy())
911 return false;
913 // Get whether count is invariant to the outer loop
914 ScalarEvolution::LoopDisposition LD =
915 SE.getLoopDisposition(InnerLoopBECountSC, OuterL);
916 if (LD != ScalarEvolution::LoopInvariant)
917 return false;
919 return true;
922 constexpr Intrinsic::ID llvm::getReductionIntrinsicID(RecurKind RK) {
923 switch (RK) {
924 default:
925 llvm_unreachable("Unexpected recurrence kind");
926 case RecurKind::Add:
927 return Intrinsic::vector_reduce_add;
928 case RecurKind::Mul:
929 return Intrinsic::vector_reduce_mul;
930 case RecurKind::And:
931 return Intrinsic::vector_reduce_and;
932 case RecurKind::Or:
933 return Intrinsic::vector_reduce_or;
934 case RecurKind::Xor:
935 return Intrinsic::vector_reduce_xor;
936 case RecurKind::FMulAdd:
937 case RecurKind::FAdd:
938 return Intrinsic::vector_reduce_fadd;
939 case RecurKind::FMul:
940 return Intrinsic::vector_reduce_fmul;
941 case RecurKind::SMax:
942 return Intrinsic::vector_reduce_smax;
943 case RecurKind::SMin:
944 return Intrinsic::vector_reduce_smin;
945 case RecurKind::UMax:
946 return Intrinsic::vector_reduce_umax;
947 case RecurKind::UMin:
948 return Intrinsic::vector_reduce_umin;
949 case RecurKind::FMax:
950 return Intrinsic::vector_reduce_fmax;
951 case RecurKind::FMin:
952 return Intrinsic::vector_reduce_fmin;
953 case RecurKind::FMaximum:
954 return Intrinsic::vector_reduce_fmaximum;
955 case RecurKind::FMinimum:
956 return Intrinsic::vector_reduce_fminimum;
960 unsigned llvm::getArithmeticReductionInstruction(Intrinsic::ID RdxID) {
961 switch (RdxID) {
962 case Intrinsic::vector_reduce_fadd:
963 return Instruction::FAdd;
964 case Intrinsic::vector_reduce_fmul:
965 return Instruction::FMul;
966 case Intrinsic::vector_reduce_add:
967 return Instruction::Add;
968 case Intrinsic::vector_reduce_mul:
969 return Instruction::Mul;
970 case Intrinsic::vector_reduce_and:
971 return Instruction::And;
972 case Intrinsic::vector_reduce_or:
973 return Instruction::Or;
974 case Intrinsic::vector_reduce_xor:
975 return Instruction::Xor;
976 case Intrinsic::vector_reduce_smax:
977 case Intrinsic::vector_reduce_smin:
978 case Intrinsic::vector_reduce_umax:
979 case Intrinsic::vector_reduce_umin:
980 return Instruction::ICmp;
981 case Intrinsic::vector_reduce_fmax:
982 case Intrinsic::vector_reduce_fmin:
983 return Instruction::FCmp;
984 default:
985 llvm_unreachable("Unexpected ID");
989 Intrinsic::ID llvm::getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID) {
990 switch (RdxID) {
991 default:
992 llvm_unreachable("Unknown min/max recurrence kind");
993 case Intrinsic::vector_reduce_umin:
994 return Intrinsic::umin;
995 case Intrinsic::vector_reduce_umax:
996 return Intrinsic::umax;
997 case Intrinsic::vector_reduce_smin:
998 return Intrinsic::smin;
999 case Intrinsic::vector_reduce_smax:
1000 return Intrinsic::smax;
1001 case Intrinsic::vector_reduce_fmin:
1002 return Intrinsic::minnum;
1003 case Intrinsic::vector_reduce_fmax:
1004 return Intrinsic::maxnum;
1005 case Intrinsic::vector_reduce_fminimum:
1006 return Intrinsic::minimum;
1007 case Intrinsic::vector_reduce_fmaximum:
1008 return Intrinsic::maximum;
1012 Intrinsic::ID llvm::getMinMaxReductionIntrinsicOp(RecurKind RK) {
1013 switch (RK) {
1014 default:
1015 llvm_unreachable("Unknown min/max recurrence kind");
1016 case RecurKind::UMin:
1017 return Intrinsic::umin;
1018 case RecurKind::UMax:
1019 return Intrinsic::umax;
1020 case RecurKind::SMin:
1021 return Intrinsic::smin;
1022 case RecurKind::SMax:
1023 return Intrinsic::smax;
1024 case RecurKind::FMin:
1025 return Intrinsic::minnum;
1026 case RecurKind::FMax:
1027 return Intrinsic::maxnum;
1028 case RecurKind::FMinimum:
1029 return Intrinsic::minimum;
1030 case RecurKind::FMaximum:
1031 return Intrinsic::maximum;
1035 RecurKind llvm::getMinMaxReductionRecurKind(Intrinsic::ID RdxID) {
1036 switch (RdxID) {
1037 case Intrinsic::vector_reduce_smax:
1038 return RecurKind::SMax;
1039 case Intrinsic::vector_reduce_smin:
1040 return RecurKind::SMin;
1041 case Intrinsic::vector_reduce_umax:
1042 return RecurKind::UMax;
1043 case Intrinsic::vector_reduce_umin:
1044 return RecurKind::UMin;
1045 case Intrinsic::vector_reduce_fmax:
1046 return RecurKind::FMax;
1047 case Intrinsic::vector_reduce_fmin:
1048 return RecurKind::FMin;
1049 default:
1050 return RecurKind::None;
1054 CmpInst::Predicate llvm::getMinMaxReductionPredicate(RecurKind RK) {
1055 switch (RK) {
1056 default:
1057 llvm_unreachable("Unknown min/max recurrence kind");
1058 case RecurKind::UMin:
1059 return CmpInst::ICMP_ULT;
1060 case RecurKind::UMax:
1061 return CmpInst::ICMP_UGT;
1062 case RecurKind::SMin:
1063 return CmpInst::ICMP_SLT;
1064 case RecurKind::SMax:
1065 return CmpInst::ICMP_SGT;
1066 case RecurKind::FMin:
1067 return CmpInst::FCMP_OLT;
1068 case RecurKind::FMax:
1069 return CmpInst::FCMP_OGT;
1070 // We do not add FMinimum/FMaximum recurrence kind here since there is no
1071 // equivalent predicate which compares signed zeroes according to the
1072 // semantics of the intrinsics (llvm.minimum/maximum).
1076 Value *llvm::createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
1077 Value *Right) {
1078 Type *Ty = Left->getType();
1079 if (Ty->isIntOrIntVectorTy() ||
1080 (RK == RecurKind::FMinimum || RK == RecurKind::FMaximum)) {
1081 // TODO: Add float minnum/maxnum support when FMF nnan is set.
1082 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RK);
1083 return Builder.CreateIntrinsic(Ty, Id, {Left, Right}, nullptr,
1084 "rdx.minmax");
1086 CmpInst::Predicate Pred = getMinMaxReductionPredicate(RK);
1087 Value *Cmp = Builder.CreateCmp(Pred, Left, Right, "rdx.minmax.cmp");
1088 Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select");
1089 return Select;
1092 // Helper to generate an ordered reduction.
1093 Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
1094 unsigned Op, RecurKind RdxKind) {
1095 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
1097 // Extract and apply reduction ops in ascending order:
1098 // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1]
1099 Value *Result = Acc;
1100 for (unsigned ExtractIdx = 0; ExtractIdx != VF; ++ExtractIdx) {
1101 Value *Ext =
1102 Builder.CreateExtractElement(Src, Builder.getInt32(ExtractIdx));
1104 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
1105 Result = Builder.CreateBinOp((Instruction::BinaryOps)Op, Result, Ext,
1106 "bin.rdx");
1107 } else {
1108 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
1109 "Invalid min/max");
1110 Result = createMinMaxOp(Builder, RdxKind, Result, Ext);
1114 return Result;
1117 // Helper to generate a log2 shuffle reduction.
1118 Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
1119 unsigned Op,
1120 TargetTransformInfo::ReductionShuffle RS,
1121 RecurKind RdxKind) {
1122 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
1123 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
1124 // and vector ops, reducing the set of values being computed by half each
1125 // round.
1126 assert(isPowerOf2_32(VF) &&
1127 "Reduction emission only supported for pow2 vectors!");
1128 // Note: fast-math-flags flags are controlled by the builder configuration
1129 // and are assumed to apply to all generated arithmetic instructions. Other
1130 // poison generating flags (nsw/nuw/inbounds/inrange/exact) are not part
1131 // of the builder configuration, and since they're not passed explicitly,
1132 // will never be relevant here. Note that it would be generally unsound to
1133 // propagate these from an intrinsic call to the expansion anyways as we/
1134 // change the order of operations.
1135 auto BuildShuffledOp = [&Builder, &Op,
1136 &RdxKind](SmallVectorImpl<int> &ShuffleMask,
1137 Value *&TmpVec) -> void {
1138 Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf");
1139 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
1140 TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
1141 "bin.rdx");
1142 } else {
1143 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
1144 "Invalid min/max");
1145 TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf);
1149 Value *TmpVec = Src;
1150 if (TargetTransformInfo::ReductionShuffle::Pairwise == RS) {
1151 SmallVector<int, 32> ShuffleMask(VF);
1152 for (unsigned stride = 1; stride < VF; stride <<= 1) {
1153 // Initialise the mask with undef.
1154 std::fill(ShuffleMask.begin(), ShuffleMask.end(), -1);
1155 for (unsigned j = 0; j < VF; j += stride << 1) {
1156 ShuffleMask[j] = j + stride;
1158 BuildShuffledOp(ShuffleMask, TmpVec);
1160 } else {
1161 SmallVector<int, 32> ShuffleMask(VF);
1162 for (unsigned i = VF; i != 1; i >>= 1) {
1163 // Move the upper half of the vector to the lower half.
1164 for (unsigned j = 0; j != i / 2; ++j)
1165 ShuffleMask[j] = i / 2 + j;
1167 // Fill the rest of the mask with undef.
1168 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
1169 BuildShuffledOp(ShuffleMask, TmpVec);
1172 // The result is in the first element of the vector.
1173 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
1176 Value *llvm::createAnyOfReduction(IRBuilderBase &Builder, Value *Src,
1177 const RecurrenceDescriptor &Desc,
1178 PHINode *OrigPhi) {
1179 assert(
1180 RecurrenceDescriptor::isAnyOfRecurrenceKind(Desc.getRecurrenceKind()) &&
1181 "Unexpected reduction kind");
1182 Value *InitVal = Desc.getRecurrenceStartValue();
1183 Value *NewVal = nullptr;
1185 // First use the original phi to determine the new value we're trying to
1186 // select from in the loop.
1187 SelectInst *SI = nullptr;
1188 for (auto *U : OrigPhi->users()) {
1189 if ((SI = dyn_cast<SelectInst>(U)))
1190 break;
1192 assert(SI && "One user of the original phi should be a select");
1194 if (SI->getTrueValue() == OrigPhi)
1195 NewVal = SI->getFalseValue();
1196 else {
1197 assert(SI->getFalseValue() == OrigPhi &&
1198 "At least one input to the select should be the original Phi");
1199 NewVal = SI->getTrueValue();
1202 // If any predicate is true it means that we want to select the new value.
1203 Value *AnyOf =
1204 Src->getType()->isVectorTy() ? Builder.CreateOrReduce(Src) : Src;
1205 // The compares in the loop may yield poison, which propagates through the
1206 // bitwise ORs. Freeze it here before the condition is used.
1207 AnyOf = Builder.CreateFreeze(AnyOf);
1208 return Builder.CreateSelect(AnyOf, NewVal, InitVal, "rdx.select");
1211 Value *llvm::createFindLastIVReduction(IRBuilderBase &Builder, Value *Src,
1212 const RecurrenceDescriptor &Desc) {
1213 assert(RecurrenceDescriptor::isFindLastIVRecurrenceKind(
1214 Desc.getRecurrenceKind()) &&
1215 "Unexpected reduction kind");
1216 Value *StartVal = Desc.getRecurrenceStartValue();
1217 Value *Sentinel = Desc.getSentinelValue();
1218 Value *MaxRdx = Src->getType()->isVectorTy()
1219 ? Builder.CreateIntMaxReduce(Src, true)
1220 : Src;
1221 // Correct the final reduction result back to the start value if the maximum
1222 // reduction is sentinel value.
1223 Value *Cmp =
1224 Builder.CreateCmp(CmpInst::ICMP_NE, MaxRdx, Sentinel, "rdx.select.cmp");
1225 return Builder.CreateSelect(Cmp, MaxRdx, StartVal, "rdx.select");
1228 Value *llvm::getReductionIdentity(Intrinsic::ID RdxID, Type *Ty,
1229 FastMathFlags Flags) {
1230 bool Negative = false;
1231 switch (RdxID) {
1232 default:
1233 llvm_unreachable("Expecting a reduction intrinsic");
1234 case Intrinsic::vector_reduce_add:
1235 case Intrinsic::vector_reduce_mul:
1236 case Intrinsic::vector_reduce_or:
1237 case Intrinsic::vector_reduce_xor:
1238 case Intrinsic::vector_reduce_and:
1239 case Intrinsic::vector_reduce_fadd:
1240 case Intrinsic::vector_reduce_fmul: {
1241 unsigned Opc = getArithmeticReductionInstruction(RdxID);
1242 return ConstantExpr::getBinOpIdentity(Opc, Ty, false,
1243 Flags.noSignedZeros());
1245 case Intrinsic::vector_reduce_umax:
1246 case Intrinsic::vector_reduce_umin:
1247 case Intrinsic::vector_reduce_smin:
1248 case Intrinsic::vector_reduce_smax: {
1249 Intrinsic::ID ScalarID = getMinMaxReductionIntrinsicOp(RdxID);
1250 return ConstantExpr::getIntrinsicIdentity(ScalarID, Ty);
1252 case Intrinsic::vector_reduce_fmax:
1253 case Intrinsic::vector_reduce_fmaximum:
1254 Negative = true;
1255 [[fallthrough]];
1256 case Intrinsic::vector_reduce_fmin:
1257 case Intrinsic::vector_reduce_fminimum: {
1258 bool PropagatesNaN = RdxID == Intrinsic::vector_reduce_fminimum ||
1259 RdxID == Intrinsic::vector_reduce_fmaximum;
1260 const fltSemantics &Semantics = Ty->getFltSemantics();
1261 return (!Flags.noNaNs() && !PropagatesNaN)
1262 ? ConstantFP::getQNaN(Ty, Negative)
1263 : !Flags.noInfs()
1264 ? ConstantFP::getInfinity(Ty, Negative)
1265 : ConstantFP::get(Ty, APFloat::getLargest(Semantics, Negative));
1270 Value *llvm::getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) {
1271 assert((!(K == RecurKind::FMin || K == RecurKind::FMax) ||
1272 (FMF.noNaNs() && FMF.noSignedZeros())) &&
1273 "nnan, nsz is expected to be set for FP min/max reduction.");
1274 Intrinsic::ID RdxID = getReductionIntrinsicID(K);
1275 return getReductionIdentity(RdxID, Tp, FMF);
1278 Value *llvm::createSimpleReduction(IRBuilderBase &Builder, Value *Src,
1279 RecurKind RdxKind) {
1280 auto *SrcVecEltTy = cast<VectorType>(Src->getType())->getElementType();
1281 auto getIdentity = [&]() {
1282 return getRecurrenceIdentity(RdxKind, SrcVecEltTy,
1283 Builder.getFastMathFlags());
1285 switch (RdxKind) {
1286 case RecurKind::Add:
1287 case RecurKind::Mul:
1288 case RecurKind::And:
1289 case RecurKind::Or:
1290 case RecurKind::Xor:
1291 case RecurKind::SMax:
1292 case RecurKind::SMin:
1293 case RecurKind::UMax:
1294 case RecurKind::UMin:
1295 case RecurKind::FMax:
1296 case RecurKind::FMin:
1297 case RecurKind::FMinimum:
1298 case RecurKind::FMaximum:
1299 return Builder.CreateUnaryIntrinsic(getReductionIntrinsicID(RdxKind), Src);
1300 case RecurKind::FMulAdd:
1301 case RecurKind::FAdd:
1302 return Builder.CreateFAddReduce(getIdentity(), Src);
1303 case RecurKind::FMul:
1304 return Builder.CreateFMulReduce(getIdentity(), Src);
1305 default:
1306 llvm_unreachable("Unhandled opcode");
1310 Value *llvm::createSimpleReduction(VectorBuilder &VBuilder, Value *Src,
1311 const RecurrenceDescriptor &Desc) {
1312 RecurKind Kind = Desc.getRecurrenceKind();
1313 assert(!RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind) &&
1314 "AnyOf reduction is not supported.");
1315 Intrinsic::ID Id = getReductionIntrinsicID(Kind);
1316 auto *SrcTy = cast<VectorType>(Src->getType());
1317 Type *SrcEltTy = SrcTy->getElementType();
1318 Value *Iden = getRecurrenceIdentity(Kind, SrcEltTy, Desc.getFastMathFlags());
1319 Value *Ops[] = {Iden, Src};
1320 return VBuilder.createSimpleReduction(Id, SrcTy, Ops);
1323 Value *llvm::createReduction(IRBuilderBase &B,
1324 const RecurrenceDescriptor &Desc, Value *Src,
1325 PHINode *OrigPhi) {
1326 // TODO: Support in-order reductions based on the recurrence descriptor.
1327 // All ops in the reduction inherit fast-math-flags from the recurrence
1328 // descriptor.
1329 IRBuilderBase::FastMathFlagGuard FMFGuard(B);
1330 B.setFastMathFlags(Desc.getFastMathFlags());
1332 RecurKind RK = Desc.getRecurrenceKind();
1333 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK))
1334 return createAnyOfReduction(B, Src, Desc, OrigPhi);
1335 if (RecurrenceDescriptor::isFindLastIVRecurrenceKind(RK))
1336 return createFindLastIVReduction(B, Src, Desc);
1338 return createSimpleReduction(B, Src, RK);
1341 Value *llvm::createOrderedReduction(IRBuilderBase &B,
1342 const RecurrenceDescriptor &Desc,
1343 Value *Src, Value *Start) {
1344 assert((Desc.getRecurrenceKind() == RecurKind::FAdd ||
1345 Desc.getRecurrenceKind() == RecurKind::FMulAdd) &&
1346 "Unexpected reduction kind");
1347 assert(Src->getType()->isVectorTy() && "Expected a vector type");
1348 assert(!Start->getType()->isVectorTy() && "Expected a scalar type");
1350 return B.CreateFAddReduce(Start, Src);
1353 Value *llvm::createOrderedReduction(VectorBuilder &VBuilder,
1354 const RecurrenceDescriptor &Desc,
1355 Value *Src, Value *Start) {
1356 assert((Desc.getRecurrenceKind() == RecurKind::FAdd ||
1357 Desc.getRecurrenceKind() == RecurKind::FMulAdd) &&
1358 "Unexpected reduction kind");
1359 assert(Src->getType()->isVectorTy() && "Expected a vector type");
1360 assert(!Start->getType()->isVectorTy() && "Expected a scalar type");
1362 Intrinsic::ID Id = getReductionIntrinsicID(RecurKind::FAdd);
1363 auto *SrcTy = cast<VectorType>(Src->getType());
1364 Value *Ops[] = {Start, Src};
1365 return VBuilder.createSimpleReduction(Id, SrcTy, Ops);
1368 void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue,
1369 bool IncludeWrapFlags) {
1370 auto *VecOp = dyn_cast<Instruction>(I);
1371 if (!VecOp)
1372 return;
1373 auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0])
1374 : dyn_cast<Instruction>(OpValue);
1375 if (!Intersection)
1376 return;
1377 const unsigned Opcode = Intersection->getOpcode();
1378 VecOp->copyIRFlags(Intersection, IncludeWrapFlags);
1379 for (auto *V : VL) {
1380 auto *Instr = dyn_cast<Instruction>(V);
1381 if (!Instr)
1382 continue;
1383 if (OpValue == nullptr || Opcode == Instr->getOpcode())
1384 VecOp->andIRFlags(V);
1388 bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L,
1389 ScalarEvolution &SE) {
1390 const SCEV *Zero = SE.getZero(S->getType());
1391 return SE.isAvailableAtLoopEntry(S, L) &&
1392 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero);
1395 bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
1396 ScalarEvolution &SE) {
1397 const SCEV *Zero = SE.getZero(S->getType());
1398 return SE.isAvailableAtLoopEntry(S, L) &&
1399 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero);
1402 bool llvm::isKnownPositiveInLoop(const SCEV *S, const Loop *L,
1403 ScalarEvolution &SE) {
1404 const SCEV *Zero = SE.getZero(S->getType());
1405 return SE.isAvailableAtLoopEntry(S, L) &&
1406 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, S, Zero);
1409 bool llvm::isKnownNonPositiveInLoop(const SCEV *S, const Loop *L,
1410 ScalarEvolution &SE) {
1411 const SCEV *Zero = SE.getZero(S->getType());
1412 return SE.isAvailableAtLoopEntry(S, L) &&
1413 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLE, S, Zero);
1416 bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1417 bool Signed) {
1418 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1419 APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
1420 APInt::getMinValue(BitWidth);
1421 auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1422 return SE.isAvailableAtLoopEntry(S, L) &&
1423 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1424 SE.getConstant(Min));
1427 bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1428 bool Signed) {
1429 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1430 APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
1431 APInt::getMaxValue(BitWidth);
1432 auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1433 return SE.isAvailableAtLoopEntry(S, L) &&
1434 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1435 SE.getConstant(Max));
1438 //===----------------------------------------------------------------------===//
1439 // rewriteLoopExitValues - Optimize IV users outside the loop.
1440 // As a side effect, reduces the amount of IV processing within the loop.
1441 //===----------------------------------------------------------------------===//
1443 static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) {
1444 SmallPtrSet<const Instruction *, 8> Visited;
1445 SmallVector<const Instruction *, 8> WorkList;
1446 Visited.insert(I);
1447 WorkList.push_back(I);
1448 while (!WorkList.empty()) {
1449 const Instruction *Curr = WorkList.pop_back_val();
1450 // This use is outside the loop, nothing to do.
1451 if (!L->contains(Curr))
1452 continue;
1453 // Do we assume it is a "hard" use which will not be eliminated easily?
1454 if (Curr->mayHaveSideEffects())
1455 return true;
1456 // Otherwise, add all its users to worklist.
1457 for (const auto *U : Curr->users()) {
1458 auto *UI = cast<Instruction>(U);
1459 if (Visited.insert(UI).second)
1460 WorkList.push_back(UI);
1463 return false;
1466 // Collect information about PHI nodes which can be transformed in
1467 // rewriteLoopExitValues.
1468 struct RewritePhi {
1469 PHINode *PN; // For which PHI node is this replacement?
1470 unsigned Ith; // For which incoming value?
1471 const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
1472 Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
1473 bool HighCost; // Is this expansion a high-cost?
1475 RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
1476 bool H)
1477 : PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
1478 HighCost(H) {}
1481 // Check whether it is possible to delete the loop after rewriting exit
1482 // value. If it is possible, ignore ReplaceExitValue and do rewriting
1483 // aggressively.
1484 static bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) {
1485 BasicBlock *Preheader = L->getLoopPreheader();
1486 // If there is no preheader, the loop will not be deleted.
1487 if (!Preheader)
1488 return false;
1490 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1.
1491 // We obviate multiple ExitingBlocks case for simplicity.
1492 // TODO: If we see testcase with multiple ExitingBlocks can be deleted
1493 // after exit value rewriting, we can enhance the logic here.
1494 SmallVector<BasicBlock *, 4> ExitingBlocks;
1495 L->getExitingBlocks(ExitingBlocks);
1496 SmallVector<BasicBlock *, 8> ExitBlocks;
1497 L->getUniqueExitBlocks(ExitBlocks);
1498 if (ExitBlocks.size() != 1 || ExitingBlocks.size() != 1)
1499 return false;
1501 BasicBlock *ExitBlock = ExitBlocks[0];
1502 BasicBlock::iterator BI = ExitBlock->begin();
1503 while (PHINode *P = dyn_cast<PHINode>(BI)) {
1504 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]);
1506 // If the Incoming value of P is found in RewritePhiSet, we know it
1507 // could be rewritten to use a loop invariant value in transformation
1508 // phase later. Skip it in the loop invariant check below.
1509 bool found = false;
1510 for (const RewritePhi &Phi : RewritePhiSet) {
1511 unsigned i = Phi.Ith;
1512 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
1513 found = true;
1514 break;
1518 Instruction *I;
1519 if (!found && (I = dyn_cast<Instruction>(Incoming)))
1520 if (!L->hasLoopInvariantOperands(I))
1521 return false;
1523 ++BI;
1526 for (auto *BB : L->blocks())
1527 if (llvm::any_of(*BB, [](Instruction &I) {
1528 return I.mayHaveSideEffects();
1530 return false;
1532 return true;
1535 /// Checks if it is safe to call InductionDescriptor::isInductionPHI for \p Phi,
1536 /// and returns true if this Phi is an induction phi in the loop. When
1537 /// isInductionPHI returns true, \p ID will be also be set by isInductionPHI.
1538 static bool checkIsIndPhi(PHINode *Phi, Loop *L, ScalarEvolution *SE,
1539 InductionDescriptor &ID) {
1540 if (!Phi)
1541 return false;
1542 if (!L->getLoopPreheader())
1543 return false;
1544 if (Phi->getParent() != L->getHeader())
1545 return false;
1546 return InductionDescriptor::isInductionPHI(Phi, L, SE, ID);
1549 int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
1550 ScalarEvolution *SE,
1551 const TargetTransformInfo *TTI,
1552 SCEVExpander &Rewriter, DominatorTree *DT,
1553 ReplaceExitVal ReplaceExitValue,
1554 SmallVector<WeakTrackingVH, 16> &DeadInsts) {
1555 // Check a pre-condition.
1556 assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
1557 "Indvars did not preserve LCSSA!");
1559 SmallVector<BasicBlock*, 8> ExitBlocks;
1560 L->getUniqueExitBlocks(ExitBlocks);
1562 SmallVector<RewritePhi, 8> RewritePhiSet;
1563 // Find all values that are computed inside the loop, but used outside of it.
1564 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
1565 // the exit blocks of the loop to find them.
1566 for (BasicBlock *ExitBB : ExitBlocks) {
1567 // If there are no PHI nodes in this exit block, then no values defined
1568 // inside the loop are used on this path, skip it.
1569 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
1570 if (!PN) continue;
1572 unsigned NumPreds = PN->getNumIncomingValues();
1574 // Iterate over all of the PHI nodes.
1575 BasicBlock::iterator BBI = ExitBB->begin();
1576 while ((PN = dyn_cast<PHINode>(BBI++))) {
1577 if (PN->use_empty())
1578 continue; // dead use, don't replace it
1580 if (!SE->isSCEVable(PN->getType()))
1581 continue;
1583 // Iterate over all of the values in all the PHI nodes.
1584 for (unsigned i = 0; i != NumPreds; ++i) {
1585 // If the value being merged in is not integer or is not defined
1586 // in the loop, skip it.
1587 Value *InVal = PN->getIncomingValue(i);
1588 if (!isa<Instruction>(InVal))
1589 continue;
1591 // If this pred is for a subloop, not L itself, skip it.
1592 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
1593 continue; // The Block is in a subloop, skip it.
1595 // Check that InVal is defined in the loop.
1596 Instruction *Inst = cast<Instruction>(InVal);
1597 if (!L->contains(Inst))
1598 continue;
1600 // Find exit values which are induction variables in the loop, and are
1601 // unused in the loop, with the only use being the exit block PhiNode,
1602 // and the induction variable update binary operator.
1603 // The exit value can be replaced with the final value when it is cheap
1604 // to do so.
1605 if (ReplaceExitValue == UnusedIndVarInLoop) {
1606 InductionDescriptor ID;
1607 PHINode *IndPhi = dyn_cast<PHINode>(Inst);
1608 if (IndPhi) {
1609 if (!checkIsIndPhi(IndPhi, L, SE, ID))
1610 continue;
1611 // This is an induction PHI. Check that the only users are PHI
1612 // nodes, and induction variable update binary operators.
1613 if (llvm::any_of(Inst->users(), [&](User *U) {
1614 if (!isa<PHINode>(U) && !isa<BinaryOperator>(U))
1615 return true;
1616 BinaryOperator *B = dyn_cast<BinaryOperator>(U);
1617 if (B && B != ID.getInductionBinOp())
1618 return true;
1619 return false;
1621 continue;
1622 } else {
1623 // If it is not an induction phi, it must be an induction update
1624 // binary operator with an induction phi user.
1625 BinaryOperator *B = dyn_cast<BinaryOperator>(Inst);
1626 if (!B)
1627 continue;
1628 if (llvm::any_of(Inst->users(), [&](User *U) {
1629 PHINode *Phi = dyn_cast<PHINode>(U);
1630 if (Phi != PN && !checkIsIndPhi(Phi, L, SE, ID))
1631 return true;
1632 return false;
1634 continue;
1635 if (B != ID.getInductionBinOp())
1636 continue;
1640 // Okay, this instruction has a user outside of the current loop
1641 // and varies predictably *inside* the loop. Evaluate the value it
1642 // contains when the loop exits, if possible. We prefer to start with
1643 // expressions which are true for all exits (so as to maximize
1644 // expression reuse by the SCEVExpander), but resort to per-exit
1645 // evaluation if that fails.
1646 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
1647 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1648 !SE->isLoopInvariant(ExitValue, L) ||
1649 !Rewriter.isSafeToExpand(ExitValue)) {
1650 // TODO: This should probably be sunk into SCEV in some way; maybe a
1651 // getSCEVForExit(SCEV*, L, ExitingBB)? It can be generalized for
1652 // most SCEV expressions and other recurrence types (e.g. shift
1653 // recurrences). Is there existing code we can reuse?
1654 const SCEV *ExitCount = SE->getExitCount(L, PN->getIncomingBlock(i));
1655 if (isa<SCEVCouldNotCompute>(ExitCount))
1656 continue;
1657 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Inst)))
1658 if (AddRec->getLoop() == L)
1659 ExitValue = AddRec->evaluateAtIteration(ExitCount, *SE);
1660 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1661 !SE->isLoopInvariant(ExitValue, L) ||
1662 !Rewriter.isSafeToExpand(ExitValue))
1663 continue;
1666 // Computing the value outside of the loop brings no benefit if it is
1667 // definitely used inside the loop in a way which can not be optimized
1668 // away. Avoid doing so unless we know we have a value which computes
1669 // the ExitValue already. TODO: This should be merged into SCEV
1670 // expander to leverage its knowledge of existing expressions.
1671 if (ReplaceExitValue != AlwaysRepl && !isa<SCEVConstant>(ExitValue) &&
1672 !isa<SCEVUnknown>(ExitValue) && hasHardUserWithinLoop(L, Inst))
1673 continue;
1675 // Check if expansions of this SCEV would count as being high cost.
1676 bool HighCost = Rewriter.isHighCostExpansion(
1677 ExitValue, L, SCEVCheapExpansionBudget, TTI, Inst);
1679 // Note that we must not perform expansions until after
1680 // we query *all* the costs, because if we perform temporary expansion
1681 // inbetween, one that we might not intend to keep, said expansion
1682 // *may* affect cost calculation of the next SCEV's we'll query,
1683 // and next SCEV may errneously get smaller cost.
1685 // Collect all the candidate PHINodes to be rewritten.
1686 Instruction *InsertPt =
1687 (isa<PHINode>(Inst) || isa<LandingPadInst>(Inst)) ?
1688 &*Inst->getParent()->getFirstInsertionPt() : Inst;
1689 RewritePhiSet.emplace_back(PN, i, ExitValue, InsertPt, HighCost);
1694 // TODO: evaluate whether it is beneficial to change how we calculate
1695 // high-cost: if we have SCEV 'A' which we know we will expand, should we
1696 // calculate the cost of other SCEV's after expanding SCEV 'A', thus
1697 // potentially giving cost bonus to those other SCEV's?
1699 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
1700 int NumReplaced = 0;
1702 // Transformation.
1703 for (const RewritePhi &Phi : RewritePhiSet) {
1704 PHINode *PN = Phi.PN;
1706 // Only do the rewrite when the ExitValue can be expanded cheaply.
1707 // If LoopCanBeDel is true, rewrite exit value aggressively.
1708 if ((ReplaceExitValue == OnlyCheapRepl ||
1709 ReplaceExitValue == UnusedIndVarInLoop) &&
1710 !LoopCanBeDel && Phi.HighCost)
1711 continue;
1713 Value *ExitVal = Rewriter.expandCodeFor(
1714 Phi.ExpansionSCEV, Phi.PN->getType(), Phi.ExpansionPoint);
1716 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = " << *ExitVal
1717 << '\n'
1718 << " LoopVal = " << *(Phi.ExpansionPoint) << "\n");
1720 #ifndef NDEBUG
1721 // If we reuse an instruction from a loop which is neither L nor one of
1722 // its containing loops, we end up breaking LCSSA form for this loop by
1723 // creating a new use of its instruction.
1724 if (auto *ExitInsn = dyn_cast<Instruction>(ExitVal))
1725 if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
1726 if (EVL != L)
1727 assert(EVL->contains(L) && "LCSSA breach detected!");
1728 #endif
1730 NumReplaced++;
1731 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith));
1732 PN->setIncomingValue(Phi.Ith, ExitVal);
1733 // It's necessary to tell ScalarEvolution about this explicitly so that
1734 // it can walk the def-use list and forget all SCEVs, as it may not be
1735 // watching the PHI itself. Once the new exit value is in place, there
1736 // may not be a def-use connection between the loop and every instruction
1737 // which got a SCEVAddRecExpr for that loop.
1738 SE->forgetValue(PN);
1740 // If this instruction is dead now, delete it. Don't do it now to avoid
1741 // invalidating iterators.
1742 if (isInstructionTriviallyDead(Inst, TLI))
1743 DeadInsts.push_back(Inst);
1745 // Replace PN with ExitVal if that is legal and does not break LCSSA.
1746 if (PN->getNumIncomingValues() == 1 &&
1747 LI->replacementPreservesLCSSAForm(PN, ExitVal)) {
1748 PN->replaceAllUsesWith(ExitVal);
1749 PN->eraseFromParent();
1753 // The insertion point instruction may have been deleted; clear it out
1754 // so that the rewriter doesn't trip over it later.
1755 Rewriter.clearInsertPoint();
1756 return NumReplaced;
1759 /// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
1760 /// \p OrigLoop.
1761 void llvm::setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
1762 Loop *RemainderLoop, uint64_t UF) {
1763 assert(UF > 0 && "Zero unrolled factor is not supported");
1764 assert(UnrolledLoop != RemainderLoop &&
1765 "Unrolled and Remainder loops are expected to distinct");
1767 // Get number of iterations in the original scalar loop.
1768 unsigned OrigLoopInvocationWeight = 0;
1769 std::optional<unsigned> OrigAverageTripCount =
1770 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
1771 if (!OrigAverageTripCount)
1772 return;
1774 // Calculate number of iterations in unrolled loop.
1775 unsigned UnrolledAverageTripCount = *OrigAverageTripCount / UF;
1776 // Calculate number of iterations for remainder loop.
1777 unsigned RemainderAverageTripCount = *OrigAverageTripCount % UF;
1779 setLoopEstimatedTripCount(UnrolledLoop, UnrolledAverageTripCount,
1780 OrigLoopInvocationWeight);
1781 setLoopEstimatedTripCount(RemainderLoop, RemainderAverageTripCount,
1782 OrigLoopInvocationWeight);
1785 /// Utility that implements appending of loops onto a worklist.
1786 /// Loops are added in preorder (analogous for reverse postorder for trees),
1787 /// and the worklist is processed LIFO.
1788 template <typename RangeT>
1789 void llvm::appendReversedLoopsToWorklist(
1790 RangeT &&Loops, SmallPriorityWorklist<Loop *, 4> &Worklist) {
1791 // We use an internal worklist to build up the preorder traversal without
1792 // recursion.
1793 SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
1795 // We walk the initial sequence of loops in reverse because we generally want
1796 // to visit defs before uses and the worklist is LIFO.
1797 for (Loop *RootL : Loops) {
1798 assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
1799 assert(PreOrderWorklist.empty() &&
1800 "Must start with an empty preorder walk worklist.");
1801 PreOrderWorklist.push_back(RootL);
1802 do {
1803 Loop *L = PreOrderWorklist.pop_back_val();
1804 PreOrderWorklist.append(L->begin(), L->end());
1805 PreOrderLoops.push_back(L);
1806 } while (!PreOrderWorklist.empty());
1808 Worklist.insert(std::move(PreOrderLoops));
1809 PreOrderLoops.clear();
1813 template <typename RangeT>
1814 void llvm::appendLoopsToWorklist(RangeT &&Loops,
1815 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1816 appendReversedLoopsToWorklist(reverse(Loops), Worklist);
1819 template void llvm::appendLoopsToWorklist<ArrayRef<Loop *> &>(
1820 ArrayRef<Loop *> &Loops, SmallPriorityWorklist<Loop *, 4> &Worklist);
1822 template void
1823 llvm::appendLoopsToWorklist<Loop &>(Loop &L,
1824 SmallPriorityWorklist<Loop *, 4> &Worklist);
1826 void llvm::appendLoopsToWorklist(LoopInfo &LI,
1827 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1828 appendReversedLoopsToWorklist(LI, Worklist);
1831 Loop *llvm::cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
1832 LoopInfo *LI, LPPassManager *LPM) {
1833 Loop &New = *LI->AllocateLoop();
1834 if (PL)
1835 PL->addChildLoop(&New);
1836 else
1837 LI->addTopLevelLoop(&New);
1839 if (LPM)
1840 LPM->addLoop(New);
1842 // Add all of the blocks in L to the new loop.
1843 for (BasicBlock *BB : L->blocks())
1844 if (LI->getLoopFor(BB) == L)
1845 New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), *LI);
1847 // Add all of the subloops to the new loop.
1848 for (Loop *I : *L)
1849 cloneLoop(I, &New, VM, LI, LPM);
1851 return &New;
1854 /// IR Values for the lower and upper bounds of a pointer evolution. We
1855 /// need to use value-handles because SCEV expansion can invalidate previously
1856 /// expanded values. Thus expansion of a pointer can invalidate the bounds for
1857 /// a previous one.
1858 struct PointerBounds {
1859 TrackingVH<Value> Start;
1860 TrackingVH<Value> End;
1861 Value *StrideToCheck;
1864 /// Expand code for the lower and upper bound of the pointer group \p CG
1865 /// in \p TheLoop. \return the values for the bounds.
1866 static PointerBounds expandBounds(const RuntimeCheckingPtrGroup *CG,
1867 Loop *TheLoop, Instruction *Loc,
1868 SCEVExpander &Exp, bool HoistRuntimeChecks) {
1869 LLVMContext &Ctx = Loc->getContext();
1870 Type *PtrArithTy = PointerType::get(Ctx, CG->AddressSpace);
1872 Value *Start = nullptr, *End = nullptr;
1873 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
1874 const SCEV *Low = CG->Low, *High = CG->High, *Stride = nullptr;
1876 // If the Low and High values are themselves loop-variant, then we may want
1877 // to expand the range to include those covered by the outer loop as well.
1878 // There is a trade-off here with the advantage being that creating checks
1879 // using the expanded range permits the runtime memory checks to be hoisted
1880 // out of the outer loop. This reduces the cost of entering the inner loop,
1881 // which can be significant for low trip counts. The disadvantage is that
1882 // there is a chance we may now never enter the vectorized inner loop,
1883 // whereas using a restricted range check could have allowed us to enter at
1884 // least once. This is why the behaviour is not currently the default and is
1885 // controlled by the parameter 'HoistRuntimeChecks'.
1886 if (HoistRuntimeChecks && TheLoop->getParentLoop() &&
1887 isa<SCEVAddRecExpr>(High) && isa<SCEVAddRecExpr>(Low)) {
1888 auto *HighAR = cast<SCEVAddRecExpr>(High);
1889 auto *LowAR = cast<SCEVAddRecExpr>(Low);
1890 const Loop *OuterLoop = TheLoop->getParentLoop();
1891 ScalarEvolution &SE = *Exp.getSE();
1892 const SCEV *Recur = LowAR->getStepRecurrence(SE);
1893 if (Recur == HighAR->getStepRecurrence(SE) &&
1894 HighAR->getLoop() == OuterLoop && LowAR->getLoop() == OuterLoop) {
1895 BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
1896 const SCEV *OuterExitCount = SE.getExitCount(OuterLoop, OuterLoopLatch);
1897 if (!isa<SCEVCouldNotCompute>(OuterExitCount) &&
1898 OuterExitCount->getType()->isIntegerTy()) {
1899 const SCEV *NewHigh =
1900 cast<SCEVAddRecExpr>(High)->evaluateAtIteration(OuterExitCount, SE);
1901 if (!isa<SCEVCouldNotCompute>(NewHigh)) {
1902 LLVM_DEBUG(dbgs() << "LAA: Expanded RT check for range to include "
1903 "outer loop in order to permit hoisting\n");
1904 High = NewHigh;
1905 Low = cast<SCEVAddRecExpr>(Low)->getStart();
1906 // If there is a possibility that the stride is negative then we have
1907 // to generate extra checks to ensure the stride is positive.
1908 if (!SE.isKnownNonNegative(
1909 SE.applyLoopGuards(Recur, HighAR->getLoop()))) {
1910 Stride = Recur;
1911 LLVM_DEBUG(dbgs() << "LAA: ... but need to check stride is "
1912 "positive: "
1913 << *Stride << '\n');
1920 Start = Exp.expandCodeFor(Low, PtrArithTy, Loc);
1921 End = Exp.expandCodeFor(High, PtrArithTy, Loc);
1922 if (CG->NeedsFreeze) {
1923 IRBuilder<> Builder(Loc);
1924 Start = Builder.CreateFreeze(Start, Start->getName() + ".fr");
1925 End = Builder.CreateFreeze(End, End->getName() + ".fr");
1927 Value *StrideVal =
1928 Stride ? Exp.expandCodeFor(Stride, Stride->getType(), Loc) : nullptr;
1929 LLVM_DEBUG(dbgs() << "Start: " << *Low << " End: " << *High << "\n");
1930 return {Start, End, StrideVal};
1933 /// Turns a collection of checks into a collection of expanded upper and
1934 /// lower bounds for both pointers in the check.
1935 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4>
1936 expandBounds(const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, Loop *L,
1937 Instruction *Loc, SCEVExpander &Exp, bool HoistRuntimeChecks) {
1938 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1940 // Here we're relying on the SCEV Expander's cache to only emit code for the
1941 // same bounds once.
1942 transform(PointerChecks, std::back_inserter(ChecksWithBounds),
1943 [&](const RuntimePointerCheck &Check) {
1944 PointerBounds First = expandBounds(Check.first, L, Loc, Exp,
1945 HoistRuntimeChecks),
1946 Second = expandBounds(Check.second, L, Loc, Exp,
1947 HoistRuntimeChecks);
1948 return std::make_pair(First, Second);
1951 return ChecksWithBounds;
1954 Value *llvm::addRuntimeChecks(
1955 Instruction *Loc, Loop *TheLoop,
1956 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
1957 SCEVExpander &Exp, bool HoistRuntimeChecks) {
1958 // TODO: Move noalias annotation code from LoopVersioning here and share with LV if possible.
1959 // TODO: Pass RtPtrChecking instead of PointerChecks and SE separately, if possible
1960 auto ExpandedChecks =
1961 expandBounds(PointerChecks, TheLoop, Loc, Exp, HoistRuntimeChecks);
1963 LLVMContext &Ctx = Loc->getContext();
1964 IRBuilder ChkBuilder(Ctx, InstSimplifyFolder(Loc->getDataLayout()));
1965 ChkBuilder.SetInsertPoint(Loc);
1966 // Our instructions might fold to a constant.
1967 Value *MemoryRuntimeCheck = nullptr;
1969 for (const auto &[A, B] : ExpandedChecks) {
1970 // Check if two pointers (A and B) conflict where conflict is computed as:
1971 // start(A) <= end(B) && start(B) <= end(A)
1973 assert((A.Start->getType()->getPointerAddressSpace() ==
1974 B.End->getType()->getPointerAddressSpace()) &&
1975 (B.Start->getType()->getPointerAddressSpace() ==
1976 A.End->getType()->getPointerAddressSpace()) &&
1977 "Trying to bounds check pointers with different address spaces");
1979 // [A|B].Start points to the first accessed byte under base [A|B].
1980 // [A|B].End points to the last accessed byte, plus one.
1981 // There is no conflict when the intervals are disjoint:
1982 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
1984 // bound0 = (B.Start < A.End)
1985 // bound1 = (A.Start < B.End)
1986 // IsConflict = bound0 & bound1
1987 Value *Cmp0 = ChkBuilder.CreateICmpULT(A.Start, B.End, "bound0");
1988 Value *Cmp1 = ChkBuilder.CreateICmpULT(B.Start, A.End, "bound1");
1989 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1990 if (A.StrideToCheck) {
1991 Value *IsNegativeStride = ChkBuilder.CreateICmpSLT(
1992 A.StrideToCheck, ConstantInt::get(A.StrideToCheck->getType(), 0),
1993 "stride.check");
1994 IsConflict = ChkBuilder.CreateOr(IsConflict, IsNegativeStride);
1996 if (B.StrideToCheck) {
1997 Value *IsNegativeStride = ChkBuilder.CreateICmpSLT(
1998 B.StrideToCheck, ConstantInt::get(B.StrideToCheck->getType(), 0),
1999 "stride.check");
2000 IsConflict = ChkBuilder.CreateOr(IsConflict, IsNegativeStride);
2002 if (MemoryRuntimeCheck) {
2003 IsConflict =
2004 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
2006 MemoryRuntimeCheck = IsConflict;
2009 return MemoryRuntimeCheck;
2012 Value *llvm::addDiffRuntimeChecks(
2013 Instruction *Loc, ArrayRef<PointerDiffInfo> Checks, SCEVExpander &Expander,
2014 function_ref<Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC) {
2016 LLVMContext &Ctx = Loc->getContext();
2017 IRBuilder ChkBuilder(Ctx, InstSimplifyFolder(Loc->getDataLayout()));
2018 ChkBuilder.SetInsertPoint(Loc);
2019 // Our instructions might fold to a constant.
2020 Value *MemoryRuntimeCheck = nullptr;
2022 auto &SE = *Expander.getSE();
2023 // Map to keep track of created compares, The key is the pair of operands for
2024 // the compare, to allow detecting and re-using redundant compares.
2025 DenseMap<std::pair<Value *, Value *>, Value *> SeenCompares;
2026 for (const auto &[SrcStart, SinkStart, AccessSize, NeedsFreeze] : Checks) {
2027 Type *Ty = SinkStart->getType();
2028 // Compute VF * IC * AccessSize.
2029 auto *VFTimesUFTimesSize =
2030 ChkBuilder.CreateMul(GetVF(ChkBuilder, Ty->getScalarSizeInBits()),
2031 ConstantInt::get(Ty, IC * AccessSize));
2032 Value *Diff =
2033 Expander.expandCodeFor(SE.getMinusSCEV(SinkStart, SrcStart), Ty, Loc);
2035 // Check if the same compare has already been created earlier. In that case,
2036 // there is no need to check it again.
2037 Value *IsConflict = SeenCompares.lookup({Diff, VFTimesUFTimesSize});
2038 if (IsConflict)
2039 continue;
2041 IsConflict =
2042 ChkBuilder.CreateICmpULT(Diff, VFTimesUFTimesSize, "diff.check");
2043 SeenCompares.insert({{Diff, VFTimesUFTimesSize}, IsConflict});
2044 if (NeedsFreeze)
2045 IsConflict =
2046 ChkBuilder.CreateFreeze(IsConflict, IsConflict->getName() + ".fr");
2047 if (MemoryRuntimeCheck) {
2048 IsConflict =
2049 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
2051 MemoryRuntimeCheck = IsConflict;
2054 return MemoryRuntimeCheck;
2057 std::optional<IVConditionInfo>
2058 llvm::hasPartialIVCondition(const Loop &L, unsigned MSSAThreshold,
2059 const MemorySSA &MSSA, AAResults &AA) {
2060 auto *TI = dyn_cast<BranchInst>(L.getHeader()->getTerminator());
2061 if (!TI || !TI->isConditional())
2062 return {};
2064 auto *CondI = dyn_cast<Instruction>(TI->getCondition());
2065 // The case with the condition outside the loop should already be handled
2066 // earlier.
2067 // Allow CmpInst and TruncInsts as they may be users of load instructions
2068 // and have potential for partial unswitching
2069 if (!CondI || !isa<CmpInst, TruncInst>(CondI) || !L.contains(CondI))
2070 return {};
2072 SmallVector<Instruction *> InstToDuplicate;
2073 InstToDuplicate.push_back(CondI);
2075 SmallVector<Value *, 4> WorkList;
2076 WorkList.append(CondI->op_begin(), CondI->op_end());
2078 SmallVector<MemoryAccess *, 4> AccessesToCheck;
2079 SmallVector<MemoryLocation, 4> AccessedLocs;
2080 while (!WorkList.empty()) {
2081 Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val());
2082 if (!I || !L.contains(I))
2083 continue;
2085 // TODO: support additional instructions.
2086 if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I))
2087 return {};
2089 // Do not duplicate volatile and atomic loads.
2090 if (auto *LI = dyn_cast<LoadInst>(I))
2091 if (LI->isVolatile() || LI->isAtomic())
2092 return {};
2094 InstToDuplicate.push_back(I);
2095 if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) {
2096 if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) {
2097 // Queue the defining access to check for alias checks.
2098 AccessesToCheck.push_back(MemUse->getDefiningAccess());
2099 AccessedLocs.push_back(MemoryLocation::get(I));
2100 } else {
2101 // MemoryDefs may clobber the location or may be atomic memory
2102 // operations. Bail out.
2103 return {};
2106 WorkList.append(I->op_begin(), I->op_end());
2109 if (InstToDuplicate.empty())
2110 return {};
2112 SmallVector<BasicBlock *, 4> ExitingBlocks;
2113 L.getExitingBlocks(ExitingBlocks);
2114 auto HasNoClobbersOnPath =
2115 [&L, &AA, &AccessedLocs, &ExitingBlocks, &InstToDuplicate,
2116 MSSAThreshold](BasicBlock *Succ, BasicBlock *Header,
2117 SmallVector<MemoryAccess *, 4> AccessesToCheck)
2118 -> std::optional<IVConditionInfo> {
2119 IVConditionInfo Info;
2120 // First, collect all blocks in the loop that are on a patch from Succ
2121 // to the header.
2122 SmallVector<BasicBlock *, 4> WorkList;
2123 WorkList.push_back(Succ);
2124 WorkList.push_back(Header);
2125 SmallPtrSet<BasicBlock *, 4> Seen;
2126 Seen.insert(Header);
2127 Info.PathIsNoop &=
2128 all_of(*Header, [](Instruction &I) { return !I.mayHaveSideEffects(); });
2130 while (!WorkList.empty()) {
2131 BasicBlock *Current = WorkList.pop_back_val();
2132 if (!L.contains(Current))
2133 continue;
2134 const auto &SeenIns = Seen.insert(Current);
2135 if (!SeenIns.second)
2136 continue;
2138 Info.PathIsNoop &= all_of(
2139 *Current, [](Instruction &I) { return !I.mayHaveSideEffects(); });
2140 WorkList.append(succ_begin(Current), succ_end(Current));
2143 // Require at least 2 blocks on a path through the loop. This skips
2144 // paths that directly exit the loop.
2145 if (Seen.size() < 2)
2146 return {};
2148 // Next, check if there are any MemoryDefs that are on the path through
2149 // the loop (in the Seen set) and they may-alias any of the locations in
2150 // AccessedLocs. If that is the case, they may modify the condition and
2151 // partial unswitching is not possible.
2152 SmallPtrSet<MemoryAccess *, 4> SeenAccesses;
2153 while (!AccessesToCheck.empty()) {
2154 MemoryAccess *Current = AccessesToCheck.pop_back_val();
2155 auto SeenI = SeenAccesses.insert(Current);
2156 if (!SeenI.second || !Seen.contains(Current->getBlock()))
2157 continue;
2159 // Bail out if exceeded the threshold.
2160 if (SeenAccesses.size() >= MSSAThreshold)
2161 return {};
2163 // MemoryUse are read-only accesses.
2164 if (isa<MemoryUse>(Current))
2165 continue;
2167 // For a MemoryDef, check if is aliases any of the location feeding
2168 // the original condition.
2169 if (auto *CurrentDef = dyn_cast<MemoryDef>(Current)) {
2170 if (any_of(AccessedLocs, [&AA, CurrentDef](MemoryLocation &Loc) {
2171 return isModSet(
2172 AA.getModRefInfo(CurrentDef->getMemoryInst(), Loc));
2174 return {};
2177 for (Use &U : Current->uses())
2178 AccessesToCheck.push_back(cast<MemoryAccess>(U.getUser()));
2181 // We could also allow loops with known trip counts without mustprogress,
2182 // but ScalarEvolution may not be available.
2183 Info.PathIsNoop &= isMustProgress(&L);
2185 // If the path is considered a no-op so far, check if it reaches a
2186 // single exit block without any phis. This ensures no values from the
2187 // loop are used outside of the loop.
2188 if (Info.PathIsNoop) {
2189 for (auto *Exiting : ExitingBlocks) {
2190 if (!Seen.contains(Exiting))
2191 continue;
2192 for (auto *Succ : successors(Exiting)) {
2193 if (L.contains(Succ))
2194 continue;
2196 Info.PathIsNoop &= Succ->phis().empty() &&
2197 (!Info.ExitForPath || Info.ExitForPath == Succ);
2198 if (!Info.PathIsNoop)
2199 break;
2200 assert((!Info.ExitForPath || Info.ExitForPath == Succ) &&
2201 "cannot have multiple exit blocks");
2202 Info.ExitForPath = Succ;
2206 if (!Info.ExitForPath)
2207 Info.PathIsNoop = false;
2209 Info.InstToDuplicate = InstToDuplicate;
2210 return Info;
2213 // If we branch to the same successor, partial unswitching will not be
2214 // beneficial.
2215 if (TI->getSuccessor(0) == TI->getSuccessor(1))
2216 return {};
2218 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(0), L.getHeader(),
2219 AccessesToCheck)) {
2220 Info->KnownValue = ConstantInt::getTrue(TI->getContext());
2221 return Info;
2223 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(1), L.getHeader(),
2224 AccessesToCheck)) {
2225 Info->KnownValue = ConstantInt::getFalse(TI->getContext());
2226 return Info;
2229 return {};