1 //===- LoopFlatten.cpp - Loop flattening pass------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass flattens pairs nested loops into a single loop.
11 // The intention is to optimise loop nests like this, which together access an
14 // for (int i = 0; i < N; ++i)
15 // for (int j = 0; j < M; ++j)
20 // for (int i = 0; i < (N*M); ++i)
23 // It can also flatten loops where the induction variables are not used in the
24 // loop. This is only worth doing if the induction variables are only used in an
25 // expression like i*M+j. If they had any other uses, we would have to insert a
26 // div/mod to reconstruct the original values, so this wouldn't be profitable.
28 // We also need to prove that N*M will not overflow. The preferred solution is
29 // to widen the IV, which avoids overflow checks, so that is tried first. If
30 // the IV cannot be widened, then we try to determine that this new tripcount
31 // expression won't overflow.
33 // Q: Does LoopFlatten use SCEV?
34 // Short answer: Yes and no.
37 // For this transformation to be valid, we require all uses of the induction
38 // variables to be linear expressions of the form i*M+j. The different Loop
39 // APIs are used to get some loop components like the induction variable,
40 // compare statement, etc. In addition, we do some pattern matching to find the
41 // linear expressions and other loop components like the loop increment. The
42 // latter are examples of expressions that do use the induction variable, but
43 // are safe to ignore when we check all uses to be of the form i*M+j. We keep
44 // track of all of this in bookkeeping struct FlattenInfo.
45 // We assume the loops to be canonical, i.e. starting at 0 and increment with
46 // 1. This makes RHS of the compare the loop tripcount (with the right
47 // predicate). We use SCEV to then sanity check that this tripcount matches
48 // with the tripcount as computed by SCEV.
50 //===----------------------------------------------------------------------===//
52 #include "llvm/Transforms/Scalar/LoopFlatten.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/AssumptionCache.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/LoopNestAnalysis.h"
58 #include "llvm/Analysis/MemorySSAUpdater.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/TargetTransformInfo.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Dominators.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/IRBuilder.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/Scalar/LoopPassManager.h"
71 #include "llvm/Transforms/Utils/Local.h"
72 #include "llvm/Transforms/Utils/LoopUtils.h"
73 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
74 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
78 using namespace llvm::PatternMatch
;
80 #define DEBUG_TYPE "loop-flatten"
82 STATISTIC(NumFlattened
, "Number of loops flattened");
84 static cl::opt
<unsigned> RepeatedInstructionThreshold(
85 "loop-flatten-cost-threshold", cl::Hidden
, cl::init(2),
86 cl::desc("Limit on the cost of instructions that can be repeated due to "
90 AssumeNoOverflow("loop-flatten-assume-no-overflow", cl::Hidden
,
92 cl::desc("Assume that the product of the two iteration "
93 "trip counts will never overflow"));
96 WidenIV("loop-flatten-widen-iv", cl::Hidden
, cl::init(true),
97 cl::desc("Widen the loop induction variables, if possible, so "
98 "overflow checks won't reject flattening"));
101 // We require all uses of both induction variables to match this pattern:
103 // (OuterPHI * InnerTripCount) + InnerPHI
105 // I.e., it needs to be a linear expression of the induction variables and the
106 // inner loop trip count. We keep track of all different expressions on which
107 // checks will be performed in this bookkeeping struct.
110 Loop
*OuterLoop
= nullptr; // The loop pair to be flattened.
111 Loop
*InnerLoop
= nullptr;
113 PHINode
*InnerInductionPHI
= nullptr; // These PHINodes correspond to loop
114 PHINode
*OuterInductionPHI
= nullptr; // induction variables, which are
115 // expected to start at zero and
116 // increment by one on each loop.
118 Value
*InnerTripCount
= nullptr; // The product of these two tripcounts
119 Value
*OuterTripCount
= nullptr; // will be the new flattened loop
120 // tripcount. Also used to recognise a
121 // linear expression that will be replaced.
123 SmallPtrSet
<Value
*, 4> LinearIVUses
; // Contains the linear expressions
124 // of the form i*M+j that will be
127 BinaryOperator
*InnerIncrement
= nullptr; // Uses of induction variables in
128 BinaryOperator
*OuterIncrement
= nullptr; // loop control statements that
129 BranchInst
*InnerBranch
= nullptr; // are safe to ignore.
131 BranchInst
*OuterBranch
= nullptr; // The instruction that needs to be
132 // updated with new tripcount.
134 SmallPtrSet
<PHINode
*, 4> InnerPHIsToTransform
;
136 bool Widened
= false; // Whether this holds the flatten info before or after
139 PHINode
*NarrowInnerInductionPHI
= nullptr; // Holds the old/narrow induction
140 PHINode
*NarrowOuterInductionPHI
= nullptr; // phis, i.e. the Phis before IV
141 // has been applied. Used to skip
142 // checks on phi nodes.
144 FlattenInfo(Loop
*OL
, Loop
*IL
) : OuterLoop(OL
), InnerLoop(IL
){};
146 bool isNarrowInductionPhi(PHINode
*Phi
) {
147 // This can't be the narrow phi if we haven't widened the IV first.
150 return NarrowInnerInductionPHI
== Phi
|| NarrowOuterInductionPHI
== Phi
;
152 bool isInnerLoopIncrement(User
*U
) {
153 return InnerIncrement
== U
;
155 bool isOuterLoopIncrement(User
*U
) {
156 return OuterIncrement
== U
;
158 bool isInnerLoopTest(User
*U
) {
159 return InnerBranch
->getCondition() == U
;
162 bool checkOuterInductionPhiUsers(SmallPtrSet
<Value
*, 4> &ValidOuterPHIUses
) {
163 for (User
*U
: OuterInductionPHI
->users()) {
164 if (isOuterLoopIncrement(U
))
167 auto IsValidOuterPHIUses
= [&] (User
*U
) -> bool {
168 LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U
->dump());
169 if (!ValidOuterPHIUses
.count(U
)) {
170 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
173 LLVM_DEBUG(dbgs() << "Use is optimisable\n");
177 if (auto *V
= dyn_cast
<TruncInst
>(U
)) {
178 for (auto *K
: V
->users()) {
179 if (!IsValidOuterPHIUses(K
))
185 if (!IsValidOuterPHIUses(U
))
191 bool matchLinearIVUser(User
*U
, Value
*InnerTripCount
,
192 SmallPtrSet
<Value
*, 4> &ValidOuterPHIUses
) {
193 LLVM_DEBUG(dbgs() << "Checking linear i*M+j expression for: "; U
->dump());
194 Value
*MatchedMul
= nullptr;
195 Value
*MatchedItCount
= nullptr;
197 bool IsAdd
= match(U
, m_c_Add(m_Specific(InnerInductionPHI
),
198 m_Value(MatchedMul
))) &&
199 match(MatchedMul
, m_c_Mul(m_Specific(OuterInductionPHI
),
200 m_Value(MatchedItCount
)));
202 // Matches the same pattern as above, except it also looks for truncs
203 // on the phi, which can be the result of widening the induction variables.
205 match(U
, m_c_Add(m_Trunc(m_Specific(InnerInductionPHI
)),
206 m_Value(MatchedMul
))) &&
207 match(MatchedMul
, m_c_Mul(m_Trunc(m_Specific(OuterInductionPHI
)),
208 m_Value(MatchedItCount
)));
210 // Matches the pattern ptr+i*M+j, with the two additions being done via GEP.
211 bool IsGEP
= match(U
, m_GEP(m_GEP(m_Value(), m_Value(MatchedMul
)),
212 m_Specific(InnerInductionPHI
))) &&
213 match(MatchedMul
, m_c_Mul(m_Specific(OuterInductionPHI
),
214 m_Value(MatchedItCount
)));
219 LLVM_DEBUG(dbgs() << "Matched multiplication: "; MatchedMul
->dump());
220 LLVM_DEBUG(dbgs() << "Matched iteration count: "; MatchedItCount
->dump());
222 // The mul should not have any other uses. Widening may leave trivially dead
223 // uses, which can be ignored.
224 if (count_if(MatchedMul
->users(), [](User
*U
) {
225 return !isInstructionTriviallyDead(cast
<Instruction
>(U
));
227 LLVM_DEBUG(dbgs() << "Multiply has more than one use\n");
231 // Look through extends if the IV has been widened. Don't look through
232 // extends if we already looked through a trunc.
233 if (Widened
&& (IsAdd
|| IsGEP
) &&
234 (isa
<SExtInst
>(MatchedItCount
) || isa
<ZExtInst
>(MatchedItCount
))) {
235 assert(MatchedItCount
->getType() == InnerInductionPHI
->getType() &&
236 "Unexpected type mismatch in types after widening");
237 MatchedItCount
= isa
<SExtInst
>(MatchedItCount
)
238 ? dyn_cast
<SExtInst
>(MatchedItCount
)->getOperand(0)
239 : dyn_cast
<ZExtInst
>(MatchedItCount
)->getOperand(0);
242 LLVM_DEBUG(dbgs() << "Looking for inner trip count: ";
243 InnerTripCount
->dump());
245 if ((IsAdd
|| IsAddTrunc
|| IsGEP
) && MatchedItCount
== InnerTripCount
) {
246 LLVM_DEBUG(dbgs() << "Found. This sse is optimisable\n");
247 ValidOuterPHIUses
.insert(MatchedMul
);
248 LinearIVUses
.insert(U
);
252 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
256 bool checkInnerInductionPhiUsers(SmallPtrSet
<Value
*, 4> &ValidOuterPHIUses
) {
257 Value
*SExtInnerTripCount
= InnerTripCount
;
259 (isa
<SExtInst
>(InnerTripCount
) || isa
<ZExtInst
>(InnerTripCount
)))
260 SExtInnerTripCount
= cast
<Instruction
>(InnerTripCount
)->getOperand(0);
262 for (User
*U
: InnerInductionPHI
->users()) {
263 LLVM_DEBUG(dbgs() << "Checking User: "; U
->dump());
264 if (isInnerLoopIncrement(U
)) {
265 LLVM_DEBUG(dbgs() << "Use is inner loop increment, continuing\n");
269 // After widening the IVs, a trunc instruction might have been introduced,
270 // so look through truncs.
271 if (isa
<TruncInst
>(U
)) {
274 U
= *U
->user_begin();
277 // If the use is in the compare (which is also the condition of the inner
278 // branch) then the compare has been altered by another transformation e.g
279 // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is
280 // a constant. Ignore this use as the compare gets removed later anyway.
281 if (isInnerLoopTest(U
)) {
282 LLVM_DEBUG(dbgs() << "Use is the inner loop test, continuing\n");
286 if (!matchLinearIVUser(U
, SExtInnerTripCount
, ValidOuterPHIUses
)) {
287 LLVM_DEBUG(dbgs() << "Not a linear IV user\n");
290 LLVM_DEBUG(dbgs() << "Linear IV users found!\n");
298 setLoopComponents(Value
*&TC
, Value
*&TripCount
, BinaryOperator
*&Increment
,
299 SmallPtrSetImpl
<Instruction
*> &IterationInstructions
) {
301 IterationInstructions
.insert(Increment
);
302 LLVM_DEBUG(dbgs() << "Found Increment: "; Increment
->dump());
303 LLVM_DEBUG(dbgs() << "Found trip count: "; TripCount
->dump());
304 LLVM_DEBUG(dbgs() << "Successfully found all loop components\n");
308 // Given the RHS of the loop latch compare instruction, verify with SCEV
309 // that this is indeed the loop tripcount.
310 // TODO: This used to be a straightforward check but has grown to be quite
311 // complicated now. It is therefore worth revisiting what the additional
312 // benefits are of this (compared to relying on canonical loops and pattern
314 static bool verifyTripCount(Value
*RHS
, Loop
*L
,
315 SmallPtrSetImpl
<Instruction
*> &IterationInstructions
,
316 PHINode
*&InductionPHI
, Value
*&TripCount
, BinaryOperator
*&Increment
,
317 BranchInst
*&BackBranch
, ScalarEvolution
*SE
, bool IsWidened
) {
318 const SCEV
*BackedgeTakenCount
= SE
->getBackedgeTakenCount(L
);
319 if (isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
)) {
320 LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n");
324 // Evaluating in the trip count's type can not overflow here as the overflow
325 // checks are performed in checkOverflow, but are first tried to avoid by
327 const SCEV
*SCEVTripCount
=
328 SE
->getTripCountFromExitCount(BackedgeTakenCount
,
329 BackedgeTakenCount
->getType(), L
);
331 const SCEV
*SCEVRHS
= SE
->getSCEV(RHS
);
332 if (SCEVRHS
== SCEVTripCount
)
333 return setLoopComponents(RHS
, TripCount
, Increment
, IterationInstructions
);
334 ConstantInt
*ConstantRHS
= dyn_cast
<ConstantInt
>(RHS
);
336 const SCEV
*BackedgeTCExt
= nullptr;
338 const SCEV
*SCEVTripCountExt
;
339 // Find the extended backedge taken count and extended trip count using
340 // SCEV. One of these should now match the RHS of the compare.
341 BackedgeTCExt
= SE
->getZeroExtendExpr(BackedgeTakenCount
, RHS
->getType());
342 SCEVTripCountExt
= SE
->getTripCountFromExitCount(BackedgeTCExt
,
344 if (SCEVRHS
!= BackedgeTCExt
&& SCEVRHS
!= SCEVTripCountExt
) {
345 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
349 // If the RHS of the compare is equal to the backedge taken count we need
350 // to add one to get the trip count.
351 if (SCEVRHS
== BackedgeTCExt
|| SCEVRHS
== BackedgeTakenCount
) {
352 Value
*NewRHS
= ConstantInt::get(ConstantRHS
->getContext(),
353 ConstantRHS
->getValue() + 1);
354 return setLoopComponents(NewRHS
, TripCount
, Increment
,
355 IterationInstructions
);
357 return setLoopComponents(RHS
, TripCount
, Increment
, IterationInstructions
);
359 // If the RHS isn't a constant then check that the reason it doesn't match
360 // the SCEV trip count is because the RHS is a ZExt or SExt instruction
361 // (and take the trip count to be the RHS).
363 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
366 auto *TripCountInst
= dyn_cast
<Instruction
>(RHS
);
367 if (!TripCountInst
) {
368 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
371 if ((!isa
<ZExtInst
>(TripCountInst
) && !isa
<SExtInst
>(TripCountInst
)) ||
372 SE
->getSCEV(TripCountInst
->getOperand(0)) != SCEVTripCount
) {
373 LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n");
376 return setLoopComponents(RHS
, TripCount
, Increment
, IterationInstructions
);
379 // Finds the induction variable, increment and trip count for a simple loop that
381 static bool findLoopComponents(
382 Loop
*L
, SmallPtrSetImpl
<Instruction
*> &IterationInstructions
,
383 PHINode
*&InductionPHI
, Value
*&TripCount
, BinaryOperator
*&Increment
,
384 BranchInst
*&BackBranch
, ScalarEvolution
*SE
, bool IsWidened
) {
385 LLVM_DEBUG(dbgs() << "Finding components of loop: " << L
->getName() << "\n");
387 if (!L
->isLoopSimplifyForm()) {
388 LLVM_DEBUG(dbgs() << "Loop is not in normal form\n");
392 // Currently, to simplify the implementation, the Loop induction variable must
393 // start at zero and increment with a step size of one.
394 if (!L
->isCanonical(*SE
)) {
395 LLVM_DEBUG(dbgs() << "Loop is not canonical\n");
399 // There must be exactly one exiting block, and it must be the same at the
401 BasicBlock
*Latch
= L
->getLoopLatch();
402 if (L
->getExitingBlock() != Latch
) {
403 LLVM_DEBUG(dbgs() << "Exiting and latch block are different\n");
407 // Find the induction PHI. If there is no induction PHI, we can't do the
408 // transformation. TODO: could other variables trigger this? Do we have to
409 // search for the best one?
410 InductionPHI
= L
->getInductionVariable(*SE
);
412 LLVM_DEBUG(dbgs() << "Could not find induction PHI\n");
415 LLVM_DEBUG(dbgs() << "Found induction PHI: "; InductionPHI
->dump());
417 bool ContinueOnTrue
= L
->contains(Latch
->getTerminator()->getSuccessor(0));
418 auto IsValidPredicate
= [&](ICmpInst::Predicate Pred
) {
420 return Pred
== CmpInst::ICMP_NE
|| Pred
== CmpInst::ICMP_ULT
;
422 return Pred
== CmpInst::ICMP_EQ
;
425 // Find Compare and make sure it is valid. getLatchCmpInst checks that the
426 // back branch of the latch is conditional.
427 ICmpInst
*Compare
= L
->getLatchCmpInst();
428 if (!Compare
|| !IsValidPredicate(Compare
->getUnsignedPredicate()) ||
429 Compare
->hasNUsesOrMore(2)) {
430 LLVM_DEBUG(dbgs() << "Could not find valid comparison\n");
433 BackBranch
= cast
<BranchInst
>(Latch
->getTerminator());
434 IterationInstructions
.insert(BackBranch
);
435 LLVM_DEBUG(dbgs() << "Found back branch: "; BackBranch
->dump());
436 IterationInstructions
.insert(Compare
);
437 LLVM_DEBUG(dbgs() << "Found comparison: "; Compare
->dump());
439 // Find increment and trip count.
440 // There are exactly 2 incoming values to the induction phi; one from the
441 // pre-header and one from the latch. The incoming latch value is the
442 // increment variable.
444 cast
<BinaryOperator
>(InductionPHI
->getIncomingValueForBlock(Latch
));
445 if ((Compare
->getOperand(0) != Increment
|| !Increment
->hasNUses(2)) &&
446 !Increment
->hasNUses(1)) {
447 LLVM_DEBUG(dbgs() << "Could not find valid increment\n");
450 // The trip count is the RHS of the compare. If this doesn't match the trip
451 // count computed by SCEV then this is because the trip count variable
452 // has been widened so the types don't match, or because it is a constant and
453 // another transformation has changed the compare (e.g. icmp ult %inc,
454 // tripcount -> icmp ult %j, tripcount-1), or both.
455 Value
*RHS
= Compare
->getOperand(1);
457 return verifyTripCount(RHS
, L
, IterationInstructions
, InductionPHI
, TripCount
,
458 Increment
, BackBranch
, SE
, IsWidened
);
461 static bool checkPHIs(FlattenInfo
&FI
, const TargetTransformInfo
*TTI
) {
462 // All PHIs in the inner and outer headers must either be:
463 // - The induction PHI, which we are going to rewrite as one induction in
464 // the new loop. This is already checked by findLoopComponents.
465 // - An outer header PHI with all incoming values from outside the loop.
466 // LoopSimplify guarantees we have a pre-header, so we don't need to
467 // worry about that here.
468 // - Pairs of PHIs in the inner and outer headers, which implement a
469 // loop-carried dependency that will still be valid in the new loop. To
470 // be valid, this variable must be modified only in the inner loop.
472 // The set of PHI nodes in the outer loop header that we know will still be
473 // valid after the transformation. These will not need to be modified (with
474 // the exception of the induction variable), but we do need to check that
475 // there are no unsafe PHI nodes.
476 SmallPtrSet
<PHINode
*, 4> SafeOuterPHIs
;
477 SafeOuterPHIs
.insert(FI
.OuterInductionPHI
);
479 // Check that all PHI nodes in the inner loop header match one of the valid
481 for (PHINode
&InnerPHI
: FI
.InnerLoop
->getHeader()->phis()) {
482 // The induction PHIs break these rules, and that's OK because we treat
483 // them specially when doing the transformation.
484 if (&InnerPHI
== FI
.InnerInductionPHI
)
486 if (FI
.isNarrowInductionPhi(&InnerPHI
))
489 // Each inner loop PHI node must have two incoming values/blocks - one
490 // from the pre-header, and one from the latch.
491 assert(InnerPHI
.getNumIncomingValues() == 2);
492 Value
*PreHeaderValue
=
493 InnerPHI
.getIncomingValueForBlock(FI
.InnerLoop
->getLoopPreheader());
495 InnerPHI
.getIncomingValueForBlock(FI
.InnerLoop
->getLoopLatch());
497 // The incoming value from the outer loop must be the PHI node in the
498 // outer loop header, with no modifications made in the top of the outer
500 PHINode
*OuterPHI
= dyn_cast
<PHINode
>(PreHeaderValue
);
501 if (!OuterPHI
|| OuterPHI
->getParent() != FI
.OuterLoop
->getHeader()) {
502 LLVM_DEBUG(dbgs() << "value modified in top of outer loop\n");
506 // The other incoming value must come from the inner loop, without any
507 // modifications in the tail end of the outer loop. We are in LCSSA form,
508 // so this will actually be a PHI in the inner loop's exit block, which
509 // only uses values from inside the inner loop.
510 PHINode
*LCSSAPHI
= dyn_cast
<PHINode
>(
511 OuterPHI
->getIncomingValueForBlock(FI
.OuterLoop
->getLoopLatch()));
513 LLVM_DEBUG(dbgs() << "could not find LCSSA PHI\n");
517 // The value used by the LCSSA PHI must be the same one that the inner
519 if (LCSSAPHI
->hasConstantValue() != LatchValue
) {
521 dbgs() << "LCSSA PHI incoming value does not match latch value\n");
525 LLVM_DEBUG(dbgs() << "PHI pair is safe:\n");
526 LLVM_DEBUG(dbgs() << " Inner: "; InnerPHI
.dump());
527 LLVM_DEBUG(dbgs() << " Outer: "; OuterPHI
->dump());
528 SafeOuterPHIs
.insert(OuterPHI
);
529 FI
.InnerPHIsToTransform
.insert(&InnerPHI
);
532 for (PHINode
&OuterPHI
: FI
.OuterLoop
->getHeader()->phis()) {
533 if (FI
.isNarrowInductionPhi(&OuterPHI
))
535 if (!SafeOuterPHIs
.count(&OuterPHI
)) {
536 LLVM_DEBUG(dbgs() << "found unsafe PHI in outer loop: "; OuterPHI
.dump());
541 LLVM_DEBUG(dbgs() << "checkPHIs: OK\n");
546 checkOuterLoopInsts(FlattenInfo
&FI
,
547 SmallPtrSetImpl
<Instruction
*> &IterationInstructions
,
548 const TargetTransformInfo
*TTI
) {
549 // Check for instructions in the outer but not inner loop. If any of these
550 // have side-effects then this transformation is not legal, and if there is
551 // a significant amount of code here which can't be optimised out that it's
552 // not profitable (as these instructions would get executed for each
553 // iteration of the inner loop).
554 InstructionCost RepeatedInstrCost
= 0;
555 for (auto *B
: FI
.OuterLoop
->getBlocks()) {
556 if (FI
.InnerLoop
->contains(B
))
560 if (!isa
<PHINode
>(&I
) && !I
.isTerminator() &&
561 !isSafeToSpeculativelyExecute(&I
)) {
562 LLVM_DEBUG(dbgs() << "Cannot flatten because instruction may have "
567 // The execution count of the outer loop's iteration instructions
568 // (increment, compare and branch) will be increased, but the
569 // equivalent instructions will be removed from the inner loop, so
570 // they make a net difference of zero.
571 if (IterationInstructions
.count(&I
))
573 // The unconditional branch to the inner loop's header will turn into
574 // a fall-through, so adds no cost.
575 BranchInst
*Br
= dyn_cast
<BranchInst
>(&I
);
576 if (Br
&& Br
->isUnconditional() &&
577 Br
->getSuccessor(0) == FI
.InnerLoop
->getHeader())
579 // Multiplies of the outer iteration variable and inner iteration
580 // count will be optimised out.
581 if (match(&I
, m_c_Mul(m_Specific(FI
.OuterInductionPHI
),
582 m_Specific(FI
.InnerTripCount
))))
584 InstructionCost Cost
=
585 TTI
->getInstructionCost(&I
, TargetTransformInfo::TCK_SizeAndLatency
);
586 LLVM_DEBUG(dbgs() << "Cost " << Cost
<< ": "; I
.dump());
587 RepeatedInstrCost
+= Cost
;
591 LLVM_DEBUG(dbgs() << "Cost of instructions that will be repeated: "
592 << RepeatedInstrCost
<< "\n");
593 // Bail out if flattening the loops would cause instructions in the outer
594 // loop but not in the inner loop to be executed extra times.
595 if (RepeatedInstrCost
> RepeatedInstructionThreshold
) {
596 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n");
600 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: OK\n");
606 // We require all uses of both induction variables to match this pattern:
608 // (OuterPHI * InnerTripCount) + InnerPHI
610 // Any uses of the induction variables not matching that pattern would
611 // require a div/mod to reconstruct in the flattened loop, so the
612 // transformation wouldn't be profitable.
613 static bool checkIVUsers(FlattenInfo
&FI
) {
614 // Check that all uses of the inner loop's induction variable match the
615 // expected pattern, recording the uses of the outer IV.
616 SmallPtrSet
<Value
*, 4> ValidOuterPHIUses
;
617 if (!FI
.checkInnerInductionPhiUsers(ValidOuterPHIUses
))
620 // Check that there are no uses of the outer IV other than the ones found
621 // as part of the pattern above.
622 if (!FI
.checkOuterInductionPhiUsers(ValidOuterPHIUses
))
625 LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n";
626 dbgs() << "Found " << FI
.LinearIVUses
.size()
627 << " value(s) that can be replaced:\n";
628 for (Value
*V
: FI
.LinearIVUses
) {
635 // Return an OverflowResult dependant on if overflow of the multiplication of
636 // InnerTripCount and OuterTripCount can be assumed not to happen.
637 static OverflowResult
checkOverflow(FlattenInfo
&FI
, DominatorTree
*DT
,
638 AssumptionCache
*AC
) {
639 Function
*F
= FI
.OuterLoop
->getHeader()->getParent();
640 const DataLayout
&DL
= F
->getParent()->getDataLayout();
642 // For debugging/testing.
643 if (AssumeNoOverflow
)
644 return OverflowResult::NeverOverflows
;
646 // Check if the multiply could not overflow due to known ranges of the
648 OverflowResult OR
= computeOverflowForUnsignedMul(
649 FI
.InnerTripCount
, FI
.OuterTripCount
,
650 SimplifyQuery(DL
, DT
, AC
,
651 FI
.OuterLoop
->getLoopPreheader()->getTerminator()));
652 if (OR
!= OverflowResult::MayOverflow
)
655 auto CheckGEP
= [&](GetElementPtrInst
*GEP
, Value
*GEPOperand
) {
656 for (Value
*GEPUser
: GEP
->users()) {
657 auto *GEPUserInst
= cast
<Instruction
>(GEPUser
);
658 if (!isa
<LoadInst
>(GEPUserInst
) &&
659 !(isa
<StoreInst
>(GEPUserInst
) && GEP
== GEPUserInst
->getOperand(1)))
661 if (!isGuaranteedToExecuteForEveryIteration(GEPUserInst
, FI
.InnerLoop
))
663 // The IV is used as the operand of a GEP which dominates the loop
664 // latch, and the IV is at least as wide as the address space of the
665 // GEP. In this case, the GEP would wrap around the address space
666 // before the IV increment wraps, which would be UB.
667 if (GEP
->isInBounds() &&
668 GEPOperand
->getType()->getIntegerBitWidth() >=
669 DL
.getPointerTypeSizeInBits(GEP
->getType())) {
671 dbgs() << "use of linear IV would be UB if overflow occurred: ";
679 // Check if any IV user is, or is used by, a GEP that would cause UB if the
680 // multiply overflows.
681 for (Value
*V
: FI
.LinearIVUses
) {
682 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(V
))
683 if (GEP
->getNumIndices() == 1 && CheckGEP(GEP
, GEP
->getOperand(1)))
684 return OverflowResult::NeverOverflows
;
685 for (Value
*U
: V
->users())
686 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(U
))
687 if (CheckGEP(GEP
, V
))
688 return OverflowResult::NeverOverflows
;
691 return OverflowResult::MayOverflow
;
694 static bool CanFlattenLoopPair(FlattenInfo
&FI
, DominatorTree
*DT
, LoopInfo
*LI
,
695 ScalarEvolution
*SE
, AssumptionCache
*AC
,
696 const TargetTransformInfo
*TTI
) {
697 SmallPtrSet
<Instruction
*, 8> IterationInstructions
;
698 if (!findLoopComponents(FI
.InnerLoop
, IterationInstructions
,
699 FI
.InnerInductionPHI
, FI
.InnerTripCount
,
700 FI
.InnerIncrement
, FI
.InnerBranch
, SE
, FI
.Widened
))
702 if (!findLoopComponents(FI
.OuterLoop
, IterationInstructions
,
703 FI
.OuterInductionPHI
, FI
.OuterTripCount
,
704 FI
.OuterIncrement
, FI
.OuterBranch
, SE
, FI
.Widened
))
707 // Both of the loop trip count values must be invariant in the outer loop
708 // (non-instructions are all inherently invariant).
709 if (!FI
.OuterLoop
->isLoopInvariant(FI
.InnerTripCount
)) {
710 LLVM_DEBUG(dbgs() << "inner loop trip count not invariant\n");
713 if (!FI
.OuterLoop
->isLoopInvariant(FI
.OuterTripCount
)) {
714 LLVM_DEBUG(dbgs() << "outer loop trip count not invariant\n");
718 if (!checkPHIs(FI
, TTI
))
721 // FIXME: it should be possible to handle different types correctly.
722 if (FI
.InnerInductionPHI
->getType() != FI
.OuterInductionPHI
->getType())
725 if (!checkOuterLoopInsts(FI
, IterationInstructions
, TTI
))
728 // Find the values in the loop that can be replaced with the linearized
729 // induction variable, and check that there are no other uses of the inner
730 // or outer induction variable. If there were, we could still do this
731 // transformation, but we'd have to insert a div/mod to calculate the
732 // original IVs, so it wouldn't be profitable.
733 if (!checkIVUsers(FI
))
736 LLVM_DEBUG(dbgs() << "CanFlattenLoopPair: OK\n");
740 static bool DoFlattenLoopPair(FlattenInfo
&FI
, DominatorTree
*DT
, LoopInfo
*LI
,
741 ScalarEvolution
*SE
, AssumptionCache
*AC
,
742 const TargetTransformInfo
*TTI
, LPMUpdater
*U
,
743 MemorySSAUpdater
*MSSAU
) {
744 Function
*F
= FI
.OuterLoop
->getHeader()->getParent();
745 LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n");
748 OptimizationRemark
Remark(DEBUG_TYPE
, "Flattened", FI
.InnerLoop
->getStartLoc(),
749 FI
.InnerLoop
->getHeader());
750 OptimizationRemarkEmitter
ORE(F
);
751 Remark
<< "Flattened into outer loop";
755 Value
*NewTripCount
= BinaryOperator::CreateMul(
756 FI
.InnerTripCount
, FI
.OuterTripCount
, "flatten.tripcount",
757 FI
.OuterLoop
->getLoopPreheader()->getTerminator());
758 LLVM_DEBUG(dbgs() << "Created new trip count in preheader: ";
759 NewTripCount
->dump());
761 // Fix up PHI nodes that take values from the inner loop back-edge, which
762 // we are about to remove.
763 FI
.InnerInductionPHI
->removeIncomingValue(FI
.InnerLoop
->getLoopLatch());
765 // The old Phi will be optimised away later, but for now we can't leave
766 // leave it in an invalid state, so are updating them too.
767 for (PHINode
*PHI
: FI
.InnerPHIsToTransform
)
768 PHI
->removeIncomingValue(FI
.InnerLoop
->getLoopLatch());
770 // Modify the trip count of the outer loop to be the product of the two
772 cast
<User
>(FI
.OuterBranch
->getCondition())->setOperand(1, NewTripCount
);
774 // Replace the inner loop backedge with an unconditional branch to the exit.
775 BasicBlock
*InnerExitBlock
= FI
.InnerLoop
->getExitBlock();
776 BasicBlock
*InnerExitingBlock
= FI
.InnerLoop
->getExitingBlock();
777 InnerExitingBlock
->getTerminator()->eraseFromParent();
778 BranchInst::Create(InnerExitBlock
, InnerExitingBlock
);
780 // Update the DomTree and MemorySSA.
781 DT
->deleteEdge(InnerExitingBlock
, FI
.InnerLoop
->getHeader());
783 MSSAU
->removeEdge(InnerExitingBlock
, FI
.InnerLoop
->getHeader());
785 // Replace all uses of the polynomial calculated from the two induction
786 // variables with the one new one.
787 IRBuilder
<> Builder(FI
.OuterInductionPHI
->getParent()->getTerminator());
788 for (Value
*V
: FI
.LinearIVUses
) {
789 Value
*OuterValue
= FI
.OuterInductionPHI
;
791 OuterValue
= Builder
.CreateTrunc(FI
.OuterInductionPHI
, V
->getType(),
794 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(V
)) {
795 // Replace the GEP with one that uses OuterValue as the offset.
796 auto *InnerGEP
= cast
<GetElementPtrInst
>(GEP
->getOperand(0));
797 Value
*Base
= InnerGEP
->getOperand(0);
798 // When the base of the GEP doesn't dominate the outer induction phi then
799 // we need to insert the new GEP where the old GEP was.
800 if (!DT
->dominates(Base
, &*Builder
.GetInsertPoint()))
801 Builder
.SetInsertPoint(cast
<Instruction
>(V
));
802 OuterValue
= Builder
.CreateGEP(GEP
->getSourceElementType(), Base
,
803 OuterValue
, "flatten." + V
->getName());
806 LLVM_DEBUG(dbgs() << "Replacing: "; V
->dump(); dbgs() << "with: ";
808 V
->replaceAllUsesWith(OuterValue
);
811 // Tell LoopInfo, SCEV and the pass manager that the inner loop has been
812 // deleted, and invalidate any outer loop information.
813 SE
->forgetLoop(FI
.OuterLoop
);
814 SE
->forgetBlockAndLoopDispositions();
816 U
->markLoopAsDeleted(*FI
.InnerLoop
, FI
.InnerLoop
->getName());
817 LI
->erase(FI
.InnerLoop
);
819 // Increment statistic value.
825 static bool CanWidenIV(FlattenInfo
&FI
, DominatorTree
*DT
, LoopInfo
*LI
,
826 ScalarEvolution
*SE
, AssumptionCache
*AC
,
827 const TargetTransformInfo
*TTI
) {
829 LLVM_DEBUG(dbgs() << "Widening the IVs is disabled\n");
833 LLVM_DEBUG(dbgs() << "Try widening the IVs\n");
834 Module
*M
= FI
.InnerLoop
->getHeader()->getParent()->getParent();
835 auto &DL
= M
->getDataLayout();
836 auto *InnerType
= FI
.InnerInductionPHI
->getType();
837 auto *OuterType
= FI
.OuterInductionPHI
->getType();
838 unsigned MaxLegalSize
= DL
.getLargestLegalIntTypeSizeInBits();
839 auto *MaxLegalType
= DL
.getLargestLegalIntType(M
->getContext());
841 // If both induction types are less than the maximum legal integer width,
842 // promote both to the widest type available so we know calculating
843 // (OuterTripCount * InnerTripCount) as the new trip count is safe.
844 if (InnerType
!= OuterType
||
845 InnerType
->getScalarSizeInBits() >= MaxLegalSize
||
846 MaxLegalType
->getScalarSizeInBits() <
847 InnerType
->getScalarSizeInBits() * 2) {
848 LLVM_DEBUG(dbgs() << "Can't widen the IV\n");
852 SCEVExpander
Rewriter(*SE
, DL
, "loopflatten");
853 SmallVector
<WeakTrackingVH
, 4> DeadInsts
;
854 unsigned ElimExt
= 0;
855 unsigned Widened
= 0;
857 auto CreateWideIV
= [&](WideIVInfo WideIV
, bool &Deleted
) -> bool {
859 createWideIV(WideIV
, LI
, SE
, Rewriter
, DT
, DeadInsts
, ElimExt
, Widened
,
860 true /* HasGuards */, true /* UsePostIncrementRanges */);
863 LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi
->dump());
864 LLVM_DEBUG(dbgs() << "Deleting old phi: "; WideIV
.NarrowIV
->dump());
865 Deleted
= RecursivelyDeleteDeadPHINode(WideIV
.NarrowIV
);
870 if (!CreateWideIV({FI
.InnerInductionPHI
, MaxLegalType
, false}, Deleted
))
872 // Add the narrow phi to list, so that it will be adjusted later when the
873 // the transformation is performed.
875 FI
.InnerPHIsToTransform
.insert(FI
.InnerInductionPHI
);
877 if (!CreateWideIV({FI
.OuterInductionPHI
, MaxLegalType
, false}, Deleted
))
880 assert(Widened
&& "Widened IV expected");
883 // Save the old/narrow induction phis, which we need to ignore in CheckPHIs.
884 FI
.NarrowInnerInductionPHI
= FI
.InnerInductionPHI
;
885 FI
.NarrowOuterInductionPHI
= FI
.OuterInductionPHI
;
887 // After widening, rediscover all the loop components.
888 return CanFlattenLoopPair(FI
, DT
, LI
, SE
, AC
, TTI
);
891 static bool FlattenLoopPair(FlattenInfo
&FI
, DominatorTree
*DT
, LoopInfo
*LI
,
892 ScalarEvolution
*SE
, AssumptionCache
*AC
,
893 const TargetTransformInfo
*TTI
, LPMUpdater
*U
,
894 MemorySSAUpdater
*MSSAU
) {
896 dbgs() << "Loop flattening running on outer loop "
897 << FI
.OuterLoop
->getHeader()->getName() << " and inner loop "
898 << FI
.InnerLoop
->getHeader()->getName() << " in "
899 << FI
.OuterLoop
->getHeader()->getParent()->getName() << "\n");
901 if (!CanFlattenLoopPair(FI
, DT
, LI
, SE
, AC
, TTI
))
904 // Check if we can widen the induction variables to avoid overflow checks.
905 bool CanFlatten
= CanWidenIV(FI
, DT
, LI
, SE
, AC
, TTI
);
907 // It can happen that after widening of the IV, flattening may not be
908 // possible/happening, e.g. when it is deemed unprofitable. So bail here if
910 // TODO: IV widening without performing the actual flattening transformation
911 // is not ideal. While this codegen change should not matter much, it is an
912 // unnecessary change which is better to avoid. It's unlikely this happens
913 // often, because if it's unprofitibale after widening, it should be
914 // unprofitabe before widening as checked in the first round of checks. But
915 // 'RepeatedInstructionThreshold' is set to only 2, which can probably be
916 // relaxed. Because this is making a code change (the IV widening, but not
917 // the flattening), we return true here.
918 if (FI
.Widened
&& !CanFlatten
)
921 // If we have widened and can perform the transformation, do that here.
923 return DoFlattenLoopPair(FI
, DT
, LI
, SE
, AC
, TTI
, U
, MSSAU
);
925 // Otherwise, if we haven't widened the IV, check if the new iteration
926 // variable might overflow. In this case, we need to version the loop, and
927 // select the original version at runtime if the iteration space is too
929 // TODO: We currently don't version the loop.
930 OverflowResult OR
= checkOverflow(FI
, DT
, AC
);
931 if (OR
== OverflowResult::AlwaysOverflowsHigh
||
932 OR
== OverflowResult::AlwaysOverflowsLow
) {
933 LLVM_DEBUG(dbgs() << "Multiply would always overflow, so not profitable\n");
935 } else if (OR
== OverflowResult::MayOverflow
) {
936 LLVM_DEBUG(dbgs() << "Multiply might overflow, not flattening\n");
940 LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n");
941 return DoFlattenLoopPair(FI
, DT
, LI
, SE
, AC
, TTI
, U
, MSSAU
);
944 PreservedAnalyses
LoopFlattenPass::run(LoopNest
&LN
, LoopAnalysisManager
&LAM
,
945 LoopStandardAnalysisResults
&AR
,
948 bool Changed
= false;
950 std::optional
<MemorySSAUpdater
> MSSAU
;
952 MSSAU
= MemorySSAUpdater(AR
.MSSA
);
954 AR
.MSSA
->verifyMemorySSA();
957 // The loop flattening pass requires loops to be
958 // in simplified form, and also needs LCSSA. Running
959 // this pass will simplify all loops that contain inner loops,
960 // regardless of whether anything ends up being flattened.
961 for (Loop
*InnerLoop
: LN
.getLoops()) {
962 auto *OuterLoop
= InnerLoop
->getParentLoop();
965 FlattenInfo
FI(OuterLoop
, InnerLoop
);
966 Changed
|= FlattenLoopPair(FI
, &AR
.DT
, &AR
.LI
, &AR
.SE
, &AR
.AC
, &AR
.TTI
, &U
,
967 MSSAU
? &*MSSAU
: nullptr);
971 return PreservedAnalyses::all();
973 if (AR
.MSSA
&& VerifyMemorySSA
)
974 AR
.MSSA
->verifyMemorySSA();
976 auto PA
= getLoopPassPreservedAnalyses();
978 PA
.preserve
<MemorySSAAnalysis
>();