1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
32 using namespace PatternMatch
;
34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
35 /// reusing an existing cast if a suitable one exists, moving an existing
36 /// cast if a suitable one exists but isn't in the right place, or
37 /// creating a new one.
38 Value
*SCEVExpander::ReuseOrCreateCast(Value
*V
, Type
*Ty
,
39 Instruction::CastOps Op
,
40 BasicBlock::iterator IP
) {
41 // This function must be called with the builder having a valid insertion
42 // point. It doesn't need to be the actual IP where the uses of the returned
43 // cast will be added, but it must dominate such IP.
44 // We use this precondition to produce a cast that will dominate all its
45 // uses. In particular, this is crucial for the case where the builder's
46 // insertion point *is* the point where we were asked to put the cast.
47 // Since we don't know the builder's insertion point is actually
48 // where the uses will be added (only that it dominates it), we are
49 // not allowed to move it.
50 BasicBlock::iterator BIP
= Builder
.GetInsertPoint();
52 Instruction
*Ret
= nullptr;
54 // Check to see if there is already a cast!
55 for (User
*U
: V
->users())
56 if (U
->getType() == Ty
)
57 if (CastInst
*CI
= dyn_cast
<CastInst
>(U
))
58 if (CI
->getOpcode() == Op
) {
59 // If the cast isn't where we want it, create a new cast at IP.
60 // Likewise, do not reuse a cast at BIP because it must dominate
61 // instructions that might be inserted before BIP.
62 if (BasicBlock::iterator(CI
) != IP
|| BIP
== IP
) {
63 // Create a new cast, and leave the old cast in place in case
64 // it is being used as an insert point. Clear its operand
65 // so that it doesn't hold anything live.
66 Ret
= CastInst::Create(Op
, V
, Ty
, "", &*IP
);
68 CI
->replaceAllUsesWith(Ret
);
69 CI
->setOperand(0, UndefValue::get(V
->getType()));
78 Ret
= CastInst::Create(Op
, V
, Ty
, V
->getName(), &*IP
);
80 // We assert at the end of the function since IP might point to an
81 // instruction with different dominance properties than a cast
82 // (an invoke for example) and not dominate BIP (but the cast does).
83 assert(SE
.DT
.dominates(Ret
, &*BIP
));
85 rememberInstruction(Ret
);
89 static BasicBlock::iterator
findInsertPointAfter(Instruction
*I
,
90 BasicBlock
*MustDominate
) {
91 BasicBlock::iterator IP
= ++I
->getIterator();
92 if (auto *II
= dyn_cast
<InvokeInst
>(I
))
93 IP
= II
->getNormalDest()->begin();
95 while (isa
<PHINode
>(IP
))
98 if (isa
<FuncletPadInst
>(IP
) || isa
<LandingPadInst
>(IP
)) {
100 } else if (isa
<CatchSwitchInst
>(IP
)) {
101 IP
= MustDominate
->getFirstInsertionPt();
103 assert(!IP
->isEHPad() && "unexpected eh pad!");
109 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
110 /// which must be possible with a noop cast, doing what we can to share
112 Value
*SCEVExpander::InsertNoopCastOfTo(Value
*V
, Type
*Ty
) {
113 Instruction::CastOps Op
= CastInst::getCastOpcode(V
, false, Ty
, false);
114 assert((Op
== Instruction::BitCast
||
115 Op
== Instruction::PtrToInt
||
116 Op
== Instruction::IntToPtr
) &&
117 "InsertNoopCastOfTo cannot perform non-noop casts!");
118 assert(SE
.getTypeSizeInBits(V
->getType()) == SE
.getTypeSizeInBits(Ty
) &&
119 "InsertNoopCastOfTo cannot change sizes!");
121 // Short-circuit unnecessary bitcasts.
122 if (Op
== Instruction::BitCast
) {
123 if (V
->getType() == Ty
)
125 if (CastInst
*CI
= dyn_cast
<CastInst
>(V
)) {
126 if (CI
->getOperand(0)->getType() == Ty
)
127 return CI
->getOperand(0);
130 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
131 if ((Op
== Instruction::PtrToInt
|| Op
== Instruction::IntToPtr
) &&
132 SE
.getTypeSizeInBits(Ty
) == SE
.getTypeSizeInBits(V
->getType())) {
133 if (CastInst
*CI
= dyn_cast
<CastInst
>(V
))
134 if ((CI
->getOpcode() == Instruction::PtrToInt
||
135 CI
->getOpcode() == Instruction::IntToPtr
) &&
136 SE
.getTypeSizeInBits(CI
->getType()) ==
137 SE
.getTypeSizeInBits(CI
->getOperand(0)->getType()))
138 return CI
->getOperand(0);
139 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
))
140 if ((CE
->getOpcode() == Instruction::PtrToInt
||
141 CE
->getOpcode() == Instruction::IntToPtr
) &&
142 SE
.getTypeSizeInBits(CE
->getType()) ==
143 SE
.getTypeSizeInBits(CE
->getOperand(0)->getType()))
144 return CE
->getOperand(0);
147 // Fold a cast of a constant.
148 if (Constant
*C
= dyn_cast
<Constant
>(V
))
149 return ConstantExpr::getCast(Op
, C
, Ty
);
151 // Cast the argument at the beginning of the entry block, after
152 // any bitcasts of other arguments.
153 if (Argument
*A
= dyn_cast
<Argument
>(V
)) {
154 BasicBlock::iterator IP
= A
->getParent()->getEntryBlock().begin();
155 while ((isa
<BitCastInst
>(IP
) &&
156 isa
<Argument
>(cast
<BitCastInst
>(IP
)->getOperand(0)) &&
157 cast
<BitCastInst
>(IP
)->getOperand(0) != A
) ||
158 isa
<DbgInfoIntrinsic
>(IP
))
160 return ReuseOrCreateCast(A
, Ty
, Op
, IP
);
163 // Cast the instruction immediately after the instruction.
164 Instruction
*I
= cast
<Instruction
>(V
);
165 BasicBlock::iterator IP
= findInsertPointAfter(I
, Builder
.GetInsertBlock());
166 return ReuseOrCreateCast(I
, Ty
, Op
, IP
);
169 /// InsertBinop - Insert the specified binary operator, doing a small amount
170 /// of work to avoid inserting an obviously redundant operation.
171 Value
*SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode
,
172 Value
*LHS
, Value
*RHS
) {
173 // Fold a binop with constant operands.
174 if (Constant
*CLHS
= dyn_cast
<Constant
>(LHS
))
175 if (Constant
*CRHS
= dyn_cast
<Constant
>(RHS
))
176 return ConstantExpr::get(Opcode
, CLHS
, CRHS
);
178 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
179 unsigned ScanLimit
= 6;
180 BasicBlock::iterator BlockBegin
= Builder
.GetInsertBlock()->begin();
181 // Scanning starts from the last instruction before the insertion point.
182 BasicBlock::iterator IP
= Builder
.GetInsertPoint();
183 if (IP
!= BlockBegin
) {
185 for (; ScanLimit
; --IP
, --ScanLimit
) {
186 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
188 if (isa
<DbgInfoIntrinsic
>(IP
))
191 // Conservatively, do not use any instruction which has any of wrap/exact
193 // TODO: Instead of simply disable poison instructions we can be clever
194 // here and match SCEV to this instruction.
195 auto canGeneratePoison
= [](Instruction
*I
) {
196 if (isa
<OverflowingBinaryOperator
>(I
) &&
197 (I
->hasNoSignedWrap() || I
->hasNoUnsignedWrap()))
199 if (isa
<PossiblyExactOperator
>(I
) && I
->isExact())
203 if (IP
->getOpcode() == (unsigned)Opcode
&& IP
->getOperand(0) == LHS
&&
204 IP
->getOperand(1) == RHS
&& !canGeneratePoison(&*IP
))
206 if (IP
== BlockBegin
) break;
210 // Save the original insertion point so we can restore it when we're done.
211 DebugLoc Loc
= Builder
.GetInsertPoint()->getDebugLoc();
212 SCEVInsertPointGuard
Guard(Builder
, this);
214 // Move the insertion point out of as many loops as we can.
215 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
216 if (!L
->isLoopInvariant(LHS
) || !L
->isLoopInvariant(RHS
)) break;
217 BasicBlock
*Preheader
= L
->getLoopPreheader();
218 if (!Preheader
) break;
220 // Ok, move up a level.
221 Builder
.SetInsertPoint(Preheader
->getTerminator());
224 // If we haven't found this binop, insert it.
225 Instruction
*BO
= cast
<Instruction
>(Builder
.CreateBinOp(Opcode
, LHS
, RHS
));
226 BO
->setDebugLoc(Loc
);
227 rememberInstruction(BO
);
232 /// FactorOutConstant - Test if S is divisible by Factor, using signed
233 /// division. If so, update S with Factor divided out and return true.
234 /// S need not be evenly divisible if a reasonable remainder can be
236 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
237 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
238 /// check to see if the divide was folded.
239 static bool FactorOutConstant(const SCEV
*&S
, const SCEV
*&Remainder
,
240 const SCEV
*Factor
, ScalarEvolution
&SE
,
241 const DataLayout
&DL
) {
242 // Everything is divisible by one.
248 S
= SE
.getConstant(S
->getType(), 1);
252 // For a Constant, check for a multiple of the given factor.
253 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
)) {
257 // Check for divisibility.
258 if (const SCEVConstant
*FC
= dyn_cast
<SCEVConstant
>(Factor
)) {
260 ConstantInt::get(SE
.getContext(), C
->getAPInt().sdiv(FC
->getAPInt()));
261 // If the quotient is zero and the remainder is non-zero, reject
262 // the value at this scale. It will be considered for subsequent
265 const SCEV
*Div
= SE
.getConstant(CI
);
267 Remainder
= SE
.getAddExpr(
268 Remainder
, SE
.getConstant(C
->getAPInt().srem(FC
->getAPInt())));
274 // In a Mul, check if there is a constant operand which is a multiple
275 // of the given factor.
276 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
277 // Size is known, check if there is a constant operand which is a multiple
278 // of the given factor. If so, we can factor it.
279 const SCEVConstant
*FC
= cast
<SCEVConstant
>(Factor
);
280 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(M
->getOperand(0)))
281 if (!C
->getAPInt().srem(FC
->getAPInt())) {
282 SmallVector
<const SCEV
*, 4> NewMulOps(M
->op_begin(), M
->op_end());
283 NewMulOps
[0] = SE
.getConstant(C
->getAPInt().sdiv(FC
->getAPInt()));
284 S
= SE
.getMulExpr(NewMulOps
);
289 // In an AddRec, check if both start and step are divisible.
290 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
291 const SCEV
*Step
= A
->getStepRecurrence(SE
);
292 const SCEV
*StepRem
= SE
.getConstant(Step
->getType(), 0);
293 if (!FactorOutConstant(Step
, StepRem
, Factor
, SE
, DL
))
295 if (!StepRem
->isZero())
297 const SCEV
*Start
= A
->getStart();
298 if (!FactorOutConstant(Start
, Remainder
, Factor
, SE
, DL
))
300 S
= SE
.getAddRecExpr(Start
, Step
, A
->getLoop(),
301 A
->getNoWrapFlags(SCEV::FlagNW
));
308 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
309 /// is the number of SCEVAddRecExprs present, which are kept at the end of
312 static void SimplifyAddOperands(SmallVectorImpl
<const SCEV
*> &Ops
,
314 ScalarEvolution
&SE
) {
315 unsigned NumAddRecs
= 0;
316 for (unsigned i
= Ops
.size(); i
> 0 && isa
<SCEVAddRecExpr
>(Ops
[i
-1]); --i
)
318 // Group Ops into non-addrecs and addrecs.
319 SmallVector
<const SCEV
*, 8> NoAddRecs(Ops
.begin(), Ops
.end() - NumAddRecs
);
320 SmallVector
<const SCEV
*, 8> AddRecs(Ops
.end() - NumAddRecs
, Ops
.end());
321 // Let ScalarEvolution sort and simplify the non-addrecs list.
322 const SCEV
*Sum
= NoAddRecs
.empty() ?
323 SE
.getConstant(Ty
, 0) :
324 SE
.getAddExpr(NoAddRecs
);
325 // If it returned an add, use the operands. Otherwise it simplified
326 // the sum into a single value, so just use that.
328 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Sum
))
329 Ops
.append(Add
->op_begin(), Add
->op_end());
330 else if (!Sum
->isZero())
332 // Then append the addrecs.
333 Ops
.append(AddRecs
.begin(), AddRecs
.end());
336 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
337 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
338 /// This helps expose more opportunities for folding parts of the expressions
339 /// into GEP indices.
341 static void SplitAddRecs(SmallVectorImpl
<const SCEV
*> &Ops
,
343 ScalarEvolution
&SE
) {
345 SmallVector
<const SCEV
*, 8> AddRecs
;
346 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
347 while (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(Ops
[i
])) {
348 const SCEV
*Start
= A
->getStart();
349 if (Start
->isZero()) break;
350 const SCEV
*Zero
= SE
.getConstant(Ty
, 0);
351 AddRecs
.push_back(SE
.getAddRecExpr(Zero
,
352 A
->getStepRecurrence(SE
),
354 A
->getNoWrapFlags(SCEV::FlagNW
)));
355 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Start
)) {
357 Ops
.append(Add
->op_begin(), Add
->op_end());
358 e
+= Add
->getNumOperands();
363 if (!AddRecs
.empty()) {
364 // Add the addrecs onto the end of the list.
365 Ops
.append(AddRecs
.begin(), AddRecs
.end());
366 // Resort the operand list, moving any constants to the front.
367 SimplifyAddOperands(Ops
, Ty
, SE
);
371 /// expandAddToGEP - Expand an addition expression with a pointer type into
372 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
373 /// BasicAliasAnalysis and other passes analyze the result. See the rules
374 /// for getelementptr vs. inttoptr in
375 /// http://llvm.org/docs/LangRef.html#pointeraliasing
378 /// Design note: The correctness of using getelementptr here depends on
379 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
380 /// they may introduce pointer arithmetic which may not be safely converted
381 /// into getelementptr.
383 /// Design note: It might seem desirable for this function to be more
384 /// loop-aware. If some of the indices are loop-invariant while others
385 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
386 /// loop-invariant portions of the overall computation outside the loop.
387 /// However, there are a few reasons this is not done here. Hoisting simple
388 /// arithmetic is a low-level optimization that often isn't very
389 /// important until late in the optimization process. In fact, passes
390 /// like InstructionCombining will combine GEPs, even if it means
391 /// pushing loop-invariant computation down into loops, so even if the
392 /// GEPs were split here, the work would quickly be undone. The
393 /// LoopStrengthReduction pass, which is usually run quite late (and
394 /// after the last InstructionCombining pass), takes care of hoisting
395 /// loop-invariant portions of expressions, after considering what
396 /// can be folded using target addressing modes.
398 Value
*SCEVExpander::expandAddToGEP(const SCEV
*const *op_begin
,
399 const SCEV
*const *op_end
,
403 Type
*OriginalElTy
= PTy
->getElementType();
404 Type
*ElTy
= OriginalElTy
;
405 SmallVector
<Value
*, 4> GepIndices
;
406 SmallVector
<const SCEV
*, 8> Ops(op_begin
, op_end
);
407 bool AnyNonZeroIndices
= false;
409 // Split AddRecs up into parts as either of the parts may be usable
410 // without the other.
411 SplitAddRecs(Ops
, Ty
, SE
);
413 Type
*IntPtrTy
= DL
.getIntPtrType(PTy
);
415 // Descend down the pointer's type and attempt to convert the other
416 // operands into GEP indices, at each level. The first index in a GEP
417 // indexes into the array implied by the pointer operand; the rest of
418 // the indices index into the element or field type selected by the
421 // If the scale size is not 0, attempt to factor out a scale for
423 SmallVector
<const SCEV
*, 8> ScaledOps
;
424 if (ElTy
->isSized()) {
425 const SCEV
*ElSize
= SE
.getSizeOfExpr(IntPtrTy
, ElTy
);
426 if (!ElSize
->isZero()) {
427 SmallVector
<const SCEV
*, 8> NewOps
;
428 for (const SCEV
*Op
: Ops
) {
429 const SCEV
*Remainder
= SE
.getConstant(Ty
, 0);
430 if (FactorOutConstant(Op
, Remainder
, ElSize
, SE
, DL
)) {
431 // Op now has ElSize factored out.
432 ScaledOps
.push_back(Op
);
433 if (!Remainder
->isZero())
434 NewOps
.push_back(Remainder
);
435 AnyNonZeroIndices
= true;
437 // The operand was not divisible, so add it to the list of operands
438 // we'll scan next iteration.
439 NewOps
.push_back(Op
);
442 // If we made any changes, update Ops.
443 if (!ScaledOps
.empty()) {
445 SimplifyAddOperands(Ops
, Ty
, SE
);
450 // Record the scaled array index for this level of the type. If
451 // we didn't find any operands that could be factored, tentatively
452 // assume that element zero was selected (since the zero offset
453 // would obviously be folded away).
454 Value
*Scaled
= ScaledOps
.empty() ?
455 Constant::getNullValue(Ty
) :
456 expandCodeFor(SE
.getAddExpr(ScaledOps
), Ty
);
457 GepIndices
.push_back(Scaled
);
459 // Collect struct field index operands.
460 while (StructType
*STy
= dyn_cast
<StructType
>(ElTy
)) {
461 bool FoundFieldNo
= false;
462 // An empty struct has no fields.
463 if (STy
->getNumElements() == 0) break;
464 // Field offsets are known. See if a constant offset falls within any of
465 // the struct fields.
468 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[0]))
469 if (SE
.getTypeSizeInBits(C
->getType()) <= 64) {
470 const StructLayout
&SL
= *DL
.getStructLayout(STy
);
471 uint64_t FullOffset
= C
->getValue()->getZExtValue();
472 if (FullOffset
< SL
.getSizeInBytes()) {
473 unsigned ElIdx
= SL
.getElementContainingOffset(FullOffset
);
474 GepIndices
.push_back(
475 ConstantInt::get(Type::getInt32Ty(Ty
->getContext()), ElIdx
));
476 ElTy
= STy
->getTypeAtIndex(ElIdx
);
478 SE
.getConstant(Ty
, FullOffset
- SL
.getElementOffset(ElIdx
));
479 AnyNonZeroIndices
= true;
483 // If no struct field offsets were found, tentatively assume that
484 // field zero was selected (since the zero offset would obviously
487 ElTy
= STy
->getTypeAtIndex(0u);
488 GepIndices
.push_back(
489 Constant::getNullValue(Type::getInt32Ty(Ty
->getContext())));
493 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(ElTy
))
494 ElTy
= ATy
->getElementType();
499 // If none of the operands were convertible to proper GEP indices, cast
500 // the base to i8* and do an ugly getelementptr with that. It's still
501 // better than ptrtoint+arithmetic+inttoptr at least.
502 if (!AnyNonZeroIndices
) {
503 // Cast the base to i8*.
504 V
= InsertNoopCastOfTo(V
,
505 Type::getInt8PtrTy(Ty
->getContext(), PTy
->getAddressSpace()));
507 assert(!isa
<Instruction
>(V
) ||
508 SE
.DT
.dominates(cast
<Instruction
>(V
), &*Builder
.GetInsertPoint()));
510 // Expand the operands for a plain byte offset.
511 Value
*Idx
= expandCodeFor(SE
.getAddExpr(Ops
), Ty
);
513 // Fold a GEP with constant operands.
514 if (Constant
*CLHS
= dyn_cast
<Constant
>(V
))
515 if (Constant
*CRHS
= dyn_cast
<Constant
>(Idx
))
516 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty
->getContext()),
519 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
520 unsigned ScanLimit
= 6;
521 BasicBlock::iterator BlockBegin
= Builder
.GetInsertBlock()->begin();
522 // Scanning starts from the last instruction before the insertion point.
523 BasicBlock::iterator IP
= Builder
.GetInsertPoint();
524 if (IP
!= BlockBegin
) {
526 for (; ScanLimit
; --IP
, --ScanLimit
) {
527 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
529 if (isa
<DbgInfoIntrinsic
>(IP
))
531 if (IP
->getOpcode() == Instruction::GetElementPtr
&&
532 IP
->getOperand(0) == V
&& IP
->getOperand(1) == Idx
)
534 if (IP
== BlockBegin
) break;
538 // Save the original insertion point so we can restore it when we're done.
539 SCEVInsertPointGuard
Guard(Builder
, this);
541 // Move the insertion point out of as many loops as we can.
542 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
543 if (!L
->isLoopInvariant(V
) || !L
->isLoopInvariant(Idx
)) break;
544 BasicBlock
*Preheader
= L
->getLoopPreheader();
545 if (!Preheader
) break;
547 // Ok, move up a level.
548 Builder
.SetInsertPoint(Preheader
->getTerminator());
552 Value
*GEP
= Builder
.CreateGEP(Builder
.getInt8Ty(), V
, Idx
, "uglygep");
553 rememberInstruction(GEP
);
559 SCEVInsertPointGuard
Guard(Builder
, this);
561 // Move the insertion point out of as many loops as we can.
562 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
563 if (!L
->isLoopInvariant(V
)) break;
565 bool AnyIndexNotLoopInvariant
= any_of(
566 GepIndices
, [L
](Value
*Op
) { return !L
->isLoopInvariant(Op
); });
568 if (AnyIndexNotLoopInvariant
)
571 BasicBlock
*Preheader
= L
->getLoopPreheader();
572 if (!Preheader
) break;
574 // Ok, move up a level.
575 Builder
.SetInsertPoint(Preheader
->getTerminator());
578 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
579 // because ScalarEvolution may have changed the address arithmetic to
580 // compute a value which is beyond the end of the allocated object.
582 if (V
->getType() != PTy
)
583 Casted
= InsertNoopCastOfTo(Casted
, PTy
);
584 Value
*GEP
= Builder
.CreateGEP(OriginalElTy
, Casted
, GepIndices
, "scevgep");
585 Ops
.push_back(SE
.getUnknown(GEP
));
586 rememberInstruction(GEP
);
589 return expand(SE
.getAddExpr(Ops
));
592 Value
*SCEVExpander::expandAddToGEP(const SCEV
*Op
, PointerType
*PTy
, Type
*Ty
,
594 const SCEV
*const Ops
[1] = {Op
};
595 return expandAddToGEP(Ops
, Ops
+ 1, PTy
, Ty
, V
);
598 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
599 /// SCEV expansion. If they are nested, this is the most nested. If they are
600 /// neighboring, pick the later.
601 static const Loop
*PickMostRelevantLoop(const Loop
*A
, const Loop
*B
,
605 if (A
->contains(B
)) return B
;
606 if (B
->contains(A
)) return A
;
607 if (DT
.dominates(A
->getHeader(), B
->getHeader())) return B
;
608 if (DT
.dominates(B
->getHeader(), A
->getHeader())) return A
;
609 return A
; // Arbitrarily break the tie.
612 /// getRelevantLoop - Get the most relevant loop associated with the given
613 /// expression, according to PickMostRelevantLoop.
614 const Loop
*SCEVExpander::getRelevantLoop(const SCEV
*S
) {
615 // Test whether we've already computed the most relevant loop for this SCEV.
616 auto Pair
= RelevantLoops
.insert(std::make_pair(S
, nullptr));
618 return Pair
.first
->second
;
620 if (isa
<SCEVConstant
>(S
))
621 // A constant has no relevant loops.
623 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
624 if (const Instruction
*I
= dyn_cast
<Instruction
>(U
->getValue()))
625 return Pair
.first
->second
= SE
.LI
.getLoopFor(I
->getParent());
626 // A non-instruction has no relevant loops.
629 if (const SCEVNAryExpr
*N
= dyn_cast
<SCEVNAryExpr
>(S
)) {
630 const Loop
*L
= nullptr;
631 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
633 for (const SCEV
*Op
: N
->operands())
634 L
= PickMostRelevantLoop(L
, getRelevantLoop(Op
), SE
.DT
);
635 return RelevantLoops
[N
] = L
;
637 if (const SCEVCastExpr
*C
= dyn_cast
<SCEVCastExpr
>(S
)) {
638 const Loop
*Result
= getRelevantLoop(C
->getOperand());
639 return RelevantLoops
[C
] = Result
;
641 if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
642 const Loop
*Result
= PickMostRelevantLoop(
643 getRelevantLoop(D
->getLHS()), getRelevantLoop(D
->getRHS()), SE
.DT
);
644 return RelevantLoops
[D
] = Result
;
646 llvm_unreachable("Unexpected SCEV type!");
651 /// LoopCompare - Compare loops by PickMostRelevantLoop.
655 explicit LoopCompare(DominatorTree
&dt
) : DT(dt
) {}
657 bool operator()(std::pair
<const Loop
*, const SCEV
*> LHS
,
658 std::pair
<const Loop
*, const SCEV
*> RHS
) const {
659 // Keep pointer operands sorted at the end.
660 if (LHS
.second
->getType()->isPointerTy() !=
661 RHS
.second
->getType()->isPointerTy())
662 return LHS
.second
->getType()->isPointerTy();
664 // Compare loops with PickMostRelevantLoop.
665 if (LHS
.first
!= RHS
.first
)
666 return PickMostRelevantLoop(LHS
.first
, RHS
.first
, DT
) != LHS
.first
;
668 // If one operand is a non-constant negative and the other is not,
669 // put the non-constant negative on the right so that a sub can
670 // be used instead of a negate and add.
671 if (LHS
.second
->isNonConstantNegative()) {
672 if (!RHS
.second
->isNonConstantNegative())
674 } else if (RHS
.second
->isNonConstantNegative())
677 // Otherwise they are equivalent according to this comparison.
684 Value
*SCEVExpander::visitAddExpr(const SCEVAddExpr
*S
) {
685 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
687 // Collect all the add operands in a loop, along with their associated loops.
688 // Iterate in reverse so that constants are emitted last, all else equal, and
689 // so that pointer operands are inserted first, which the code below relies on
690 // to form more involved GEPs.
691 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 8> OpsAndLoops
;
692 for (std::reverse_iterator
<SCEVAddExpr::op_iterator
> I(S
->op_end()),
693 E(S
->op_begin()); I
!= E
; ++I
)
694 OpsAndLoops
.push_back(std::make_pair(getRelevantLoop(*I
), *I
));
696 // Sort by loop. Use a stable sort so that constants follow non-constants and
697 // pointer operands precede non-pointer operands.
698 std::stable_sort(OpsAndLoops
.begin(), OpsAndLoops
.end(), LoopCompare(SE
.DT
));
700 // Emit instructions to add all the operands. Hoist as much as possible
701 // out of loops, and form meaningful getelementptrs where possible.
702 Value
*Sum
= nullptr;
703 for (auto I
= OpsAndLoops
.begin(), E
= OpsAndLoops
.end(); I
!= E
;) {
704 const Loop
*CurLoop
= I
->first
;
705 const SCEV
*Op
= I
->second
;
707 // This is the first operand. Just expand it.
710 } else if (PointerType
*PTy
= dyn_cast
<PointerType
>(Sum
->getType())) {
711 // The running sum expression is a pointer. Try to form a getelementptr
712 // at this level with that as the base.
713 SmallVector
<const SCEV
*, 4> NewOps
;
714 for (; I
!= E
&& I
->first
== CurLoop
; ++I
) {
715 // If the operand is SCEVUnknown and not instructions, peek through
716 // it, to enable more of it to be folded into the GEP.
717 const SCEV
*X
= I
->second
;
718 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(X
))
719 if (!isa
<Instruction
>(U
->getValue()))
720 X
= SE
.getSCEV(U
->getValue());
723 Sum
= expandAddToGEP(NewOps
.begin(), NewOps
.end(), PTy
, Ty
, Sum
);
724 } else if (PointerType
*PTy
= dyn_cast
<PointerType
>(Op
->getType())) {
725 // The running sum is an integer, and there's a pointer at this level.
726 // Try to form a getelementptr. If the running sum is instructions,
727 // use a SCEVUnknown to avoid re-analyzing them.
728 SmallVector
<const SCEV
*, 4> NewOps
;
729 NewOps
.push_back(isa
<Instruction
>(Sum
) ? SE
.getUnknown(Sum
) :
731 for (++I
; I
!= E
&& I
->first
== CurLoop
; ++I
)
732 NewOps
.push_back(I
->second
);
733 Sum
= expandAddToGEP(NewOps
.begin(), NewOps
.end(), PTy
, Ty
, expand(Op
));
734 } else if (Op
->isNonConstantNegative()) {
735 // Instead of doing a negate and add, just do a subtract.
736 Value
*W
= expandCodeFor(SE
.getNegativeSCEV(Op
), Ty
);
737 Sum
= InsertNoopCastOfTo(Sum
, Ty
);
738 Sum
= InsertBinop(Instruction::Sub
, Sum
, W
);
742 Value
*W
= expandCodeFor(Op
, Ty
);
743 Sum
= InsertNoopCastOfTo(Sum
, Ty
);
744 // Canonicalize a constant to the RHS.
745 if (isa
<Constant
>(Sum
)) std::swap(Sum
, W
);
746 Sum
= InsertBinop(Instruction::Add
, Sum
, W
);
754 Value
*SCEVExpander::visitMulExpr(const SCEVMulExpr
*S
) {
755 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
757 // Collect all the mul operands in a loop, along with their associated loops.
758 // Iterate in reverse so that constants are emitted last, all else equal.
759 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 8> OpsAndLoops
;
760 for (std::reverse_iterator
<SCEVMulExpr::op_iterator
> I(S
->op_end()),
761 E(S
->op_begin()); I
!= E
; ++I
)
762 OpsAndLoops
.push_back(std::make_pair(getRelevantLoop(*I
), *I
));
764 // Sort by loop. Use a stable sort so that constants follow non-constants.
765 std::stable_sort(OpsAndLoops
.begin(), OpsAndLoops
.end(), LoopCompare(SE
.DT
));
767 // Emit instructions to mul all the operands. Hoist as much as possible
769 Value
*Prod
= nullptr;
770 auto I
= OpsAndLoops
.begin();
772 // Expand the calculation of X pow N in the following manner:
773 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
774 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
775 const auto ExpandOpBinPowN
= [this, &I
, &OpsAndLoops
, &Ty
]() {
777 // Calculate how many times the same operand from the same loop is included
779 uint64_t Exponent
= 0;
780 const uint64_t MaxExponent
= UINT64_MAX
>> 1;
781 // No one sane will ever try to calculate such huge exponents, but if we
782 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
783 // below when the power of 2 exceeds our Exponent, and we want it to be
784 // 1u << 31 at most to not deal with unsigned overflow.
785 while (E
!= OpsAndLoops
.end() && *I
== *E
&& Exponent
!= MaxExponent
) {
789 assert(Exponent
> 0 && "Trying to calculate a zeroth exponent of operand?");
791 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
792 // that are needed into the result.
793 Value
*P
= expandCodeFor(I
->second
, Ty
);
794 Value
*Result
= nullptr;
797 for (uint64_t BinExp
= 2; BinExp
<= Exponent
; BinExp
<<= 1) {
798 P
= InsertBinop(Instruction::Mul
, P
, P
);
799 if (Exponent
& BinExp
)
800 Result
= Result
? InsertBinop(Instruction::Mul
, Result
, P
) : P
;
804 assert(Result
&& "Nothing was expanded?");
808 while (I
!= OpsAndLoops
.end()) {
810 // This is the first operand. Just expand it.
811 Prod
= ExpandOpBinPowN();
812 } else if (I
->second
->isAllOnesValue()) {
813 // Instead of doing a multiply by negative one, just do a negate.
814 Prod
= InsertNoopCastOfTo(Prod
, Ty
);
815 Prod
= InsertBinop(Instruction::Sub
, Constant::getNullValue(Ty
), Prod
);
819 Value
*W
= ExpandOpBinPowN();
820 Prod
= InsertNoopCastOfTo(Prod
, Ty
);
821 // Canonicalize a constant to the RHS.
822 if (isa
<Constant
>(Prod
)) std::swap(Prod
, W
);
824 if (match(W
, m_Power2(RHS
))) {
825 // Canonicalize Prod*(1<<C) to Prod<<C.
826 assert(!Ty
->isVectorTy() && "vector types are not SCEVable");
827 Prod
= InsertBinop(Instruction::Shl
, Prod
,
828 ConstantInt::get(Ty
, RHS
->logBase2()));
830 Prod
= InsertBinop(Instruction::Mul
, Prod
, W
);
838 Value
*SCEVExpander::visitUDivExpr(const SCEVUDivExpr
*S
) {
839 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
841 Value
*LHS
= expandCodeFor(S
->getLHS(), Ty
);
842 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(S
->getRHS())) {
843 const APInt
&RHS
= SC
->getAPInt();
844 if (RHS
.isPowerOf2())
845 return InsertBinop(Instruction::LShr
, LHS
,
846 ConstantInt::get(Ty
, RHS
.logBase2()));
849 Value
*RHS
= expandCodeFor(S
->getRHS(), Ty
);
850 return InsertBinop(Instruction::UDiv
, LHS
, RHS
);
853 /// Move parts of Base into Rest to leave Base with the minimal
854 /// expression that provides a pointer operand suitable for a
856 static void ExposePointerBase(const SCEV
*&Base
, const SCEV
*&Rest
,
857 ScalarEvolution
&SE
) {
858 while (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(Base
)) {
859 Base
= A
->getStart();
860 Rest
= SE
.getAddExpr(Rest
,
861 SE
.getAddRecExpr(SE
.getConstant(A
->getType(), 0),
862 A
->getStepRecurrence(SE
),
864 A
->getNoWrapFlags(SCEV::FlagNW
)));
866 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(Base
)) {
867 Base
= A
->getOperand(A
->getNumOperands()-1);
868 SmallVector
<const SCEV
*, 8> NewAddOps(A
->op_begin(), A
->op_end());
869 NewAddOps
.back() = Rest
;
870 Rest
= SE
.getAddExpr(NewAddOps
);
871 ExposePointerBase(Base
, Rest
, SE
);
875 /// Determine if this is a well-behaved chain of instructions leading back to
876 /// the PHI. If so, it may be reused by expanded expressions.
877 bool SCEVExpander::isNormalAddRecExprPHI(PHINode
*PN
, Instruction
*IncV
,
879 if (IncV
->getNumOperands() == 0 || isa
<PHINode
>(IncV
) ||
880 (isa
<CastInst
>(IncV
) && !isa
<BitCastInst
>(IncV
)))
882 // If any of the operands don't dominate the insert position, bail.
883 // Addrec operands are always loop-invariant, so this can only happen
884 // if there are instructions which haven't been hoisted.
885 if (L
== IVIncInsertLoop
) {
886 for (User::op_iterator OI
= IncV
->op_begin()+1,
887 OE
= IncV
->op_end(); OI
!= OE
; ++OI
)
888 if (Instruction
*OInst
= dyn_cast
<Instruction
>(OI
))
889 if (!SE
.DT
.dominates(OInst
, IVIncInsertPos
))
892 // Advance to the next instruction.
893 IncV
= dyn_cast
<Instruction
>(IncV
->getOperand(0));
897 if (IncV
->mayHaveSideEffects())
903 return isNormalAddRecExprPHI(PN
, IncV
, L
);
906 /// getIVIncOperand returns an induction variable increment's induction
907 /// variable operand.
909 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
910 /// operands dominate InsertPos.
912 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
913 /// simple patterns generated by getAddRecExprPHILiterally and
914 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
915 Instruction
*SCEVExpander::getIVIncOperand(Instruction
*IncV
,
916 Instruction
*InsertPos
,
918 if (IncV
== InsertPos
)
921 switch (IncV
->getOpcode()) {
924 // Check for a simple Add/Sub or GEP of a loop invariant step.
925 case Instruction::Add
:
926 case Instruction::Sub
: {
927 Instruction
*OInst
= dyn_cast
<Instruction
>(IncV
->getOperand(1));
928 if (!OInst
|| SE
.DT
.dominates(OInst
, InsertPos
))
929 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
932 case Instruction::BitCast
:
933 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
934 case Instruction::GetElementPtr
:
935 for (auto I
= IncV
->op_begin() + 1, E
= IncV
->op_end(); I
!= E
; ++I
) {
936 if (isa
<Constant
>(*I
))
938 if (Instruction
*OInst
= dyn_cast
<Instruction
>(*I
)) {
939 if (!SE
.DT
.dominates(OInst
, InsertPos
))
943 // allow any kind of GEP as long as it can be hoisted.
946 // This must be a pointer addition of constants (pretty), which is already
947 // handled, or some number of address-size elements (ugly). Ugly geps
948 // have 2 operands. i1* is used by the expander to represent an
949 // address-size element.
950 if (IncV
->getNumOperands() != 2)
952 unsigned AS
= cast
<PointerType
>(IncV
->getType())->getAddressSpace();
953 if (IncV
->getType() != Type::getInt1PtrTy(SE
.getContext(), AS
)
954 && IncV
->getType() != Type::getInt8PtrTy(SE
.getContext(), AS
))
958 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
962 /// If the insert point of the current builder or any of the builders on the
963 /// stack of saved builders has 'I' as its insert point, update it to point to
964 /// the instruction after 'I'. This is intended to be used when the instruction
965 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
966 /// different block, the inconsistent insert point (with a mismatched
967 /// Instruction and Block) can lead to an instruction being inserted in a block
968 /// other than its parent.
969 void SCEVExpander::fixupInsertPoints(Instruction
*I
) {
970 BasicBlock::iterator
It(*I
);
971 BasicBlock::iterator NewInsertPt
= std::next(It
);
972 if (Builder
.GetInsertPoint() == It
)
973 Builder
.SetInsertPoint(&*NewInsertPt
);
974 for (auto *InsertPtGuard
: InsertPointGuards
)
975 if (InsertPtGuard
->GetInsertPoint() == It
)
976 InsertPtGuard
->SetInsertPoint(NewInsertPt
);
979 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
980 /// it available to other uses in this loop. Recursively hoist any operands,
981 /// until we reach a value that dominates InsertPos.
982 bool SCEVExpander::hoistIVInc(Instruction
*IncV
, Instruction
*InsertPos
) {
983 if (SE
.DT
.dominates(IncV
, InsertPos
))
986 // InsertPos must itself dominate IncV so that IncV's new position satisfies
987 // its existing users.
988 if (isa
<PHINode
>(InsertPos
) ||
989 !SE
.DT
.dominates(InsertPos
->getParent(), IncV
->getParent()))
992 if (!SE
.LI
.movementPreservesLCSSAForm(IncV
, InsertPos
))
995 // Check that the chain of IV operands leading back to Phi can be hoisted.
996 SmallVector
<Instruction
*, 4> IVIncs
;
998 Instruction
*Oper
= getIVIncOperand(IncV
, InsertPos
, /*allowScale*/true);
1001 // IncV is safe to hoist.
1002 IVIncs
.push_back(IncV
);
1004 if (SE
.DT
.dominates(IncV
, InsertPos
))
1007 for (auto I
= IVIncs
.rbegin(), E
= IVIncs
.rend(); I
!= E
; ++I
) {
1008 fixupInsertPoints(*I
);
1009 (*I
)->moveBefore(InsertPos
);
1014 /// Determine if this cyclic phi is in a form that would have been generated by
1015 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1016 /// as it is in a low-cost form, for example, no implied multiplication. This
1017 /// should match any patterns generated by getAddRecExprPHILiterally and
1019 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode
*PN
, Instruction
*IncV
,
1021 for(Instruction
*IVOper
= IncV
;
1022 (IVOper
= getIVIncOperand(IVOper
, L
->getLoopPreheader()->getTerminator(),
1023 /*allowScale=*/false));) {
1030 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1031 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1032 /// need to materialize IV increments elsewhere to handle difficult situations.
1033 Value
*SCEVExpander::expandIVInc(PHINode
*PN
, Value
*StepV
, const Loop
*L
,
1034 Type
*ExpandTy
, Type
*IntTy
,
1037 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1038 if (ExpandTy
->isPointerTy()) {
1039 PointerType
*GEPPtrTy
= cast
<PointerType
>(ExpandTy
);
1040 // If the step isn't constant, don't use an implicitly scaled GEP, because
1041 // that would require a multiply inside the loop.
1042 if (!isa
<ConstantInt
>(StepV
))
1043 GEPPtrTy
= PointerType::get(Type::getInt1Ty(SE
.getContext()),
1044 GEPPtrTy
->getAddressSpace());
1045 IncV
= expandAddToGEP(SE
.getSCEV(StepV
), GEPPtrTy
, IntTy
, PN
);
1046 if (IncV
->getType() != PN
->getType()) {
1047 IncV
= Builder
.CreateBitCast(IncV
, PN
->getType());
1048 rememberInstruction(IncV
);
1051 IncV
= useSubtract
?
1052 Builder
.CreateSub(PN
, StepV
, Twine(IVName
) + ".iv.next") :
1053 Builder
.CreateAdd(PN
, StepV
, Twine(IVName
) + ".iv.next");
1054 rememberInstruction(IncV
);
1059 /// Hoist the addrec instruction chain rooted in the loop phi above the
1060 /// position. This routine assumes that this is possible (has been checked).
1061 void SCEVExpander::hoistBeforePos(DominatorTree
*DT
, Instruction
*InstToHoist
,
1062 Instruction
*Pos
, PHINode
*LoopPhi
) {
1064 if (DT
->dominates(InstToHoist
, Pos
))
1066 // Make sure the increment is where we want it. But don't move it
1067 // down past a potential existing post-inc user.
1068 fixupInsertPoints(InstToHoist
);
1069 InstToHoist
->moveBefore(Pos
);
1071 InstToHoist
= cast
<Instruction
>(InstToHoist
->getOperand(0));
1072 } while (InstToHoist
!= LoopPhi
);
1075 /// Check whether we can cheaply express the requested SCEV in terms of
1076 /// the available PHI SCEV by truncation and/or inversion of the step.
1077 static bool canBeCheaplyTransformed(ScalarEvolution
&SE
,
1078 const SCEVAddRecExpr
*Phi
,
1079 const SCEVAddRecExpr
*Requested
,
1081 Type
*PhiTy
= SE
.getEffectiveSCEVType(Phi
->getType());
1082 Type
*RequestedTy
= SE
.getEffectiveSCEVType(Requested
->getType());
1084 if (RequestedTy
->getIntegerBitWidth() > PhiTy
->getIntegerBitWidth())
1087 // Try truncate it if necessary.
1088 Phi
= dyn_cast
<SCEVAddRecExpr
>(SE
.getTruncateOrNoop(Phi
, RequestedTy
));
1092 // Check whether truncation will help.
1093 if (Phi
== Requested
) {
1098 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1099 if (SE
.getAddExpr(Requested
->getStart(),
1100 SE
.getNegativeSCEV(Requested
)) == Phi
) {
1108 static bool IsIncrementNSW(ScalarEvolution
&SE
, const SCEVAddRecExpr
*AR
) {
1109 if (!isa
<IntegerType
>(AR
->getType()))
1112 unsigned BitWidth
= cast
<IntegerType
>(AR
->getType())->getBitWidth();
1113 Type
*WideTy
= IntegerType::get(AR
->getType()->getContext(), BitWidth
* 2);
1114 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
1115 const SCEV
*OpAfterExtend
= SE
.getAddExpr(SE
.getSignExtendExpr(Step
, WideTy
),
1116 SE
.getSignExtendExpr(AR
, WideTy
));
1117 const SCEV
*ExtendAfterOp
=
1118 SE
.getSignExtendExpr(SE
.getAddExpr(AR
, Step
), WideTy
);
1119 return ExtendAfterOp
== OpAfterExtend
;
1122 static bool IsIncrementNUW(ScalarEvolution
&SE
, const SCEVAddRecExpr
*AR
) {
1123 if (!isa
<IntegerType
>(AR
->getType()))
1126 unsigned BitWidth
= cast
<IntegerType
>(AR
->getType())->getBitWidth();
1127 Type
*WideTy
= IntegerType::get(AR
->getType()->getContext(), BitWidth
* 2);
1128 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
1129 const SCEV
*OpAfterExtend
= SE
.getAddExpr(SE
.getZeroExtendExpr(Step
, WideTy
),
1130 SE
.getZeroExtendExpr(AR
, WideTy
));
1131 const SCEV
*ExtendAfterOp
=
1132 SE
.getZeroExtendExpr(SE
.getAddExpr(AR
, Step
), WideTy
);
1133 return ExtendAfterOp
== OpAfterExtend
;
1136 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1137 /// the base addrec, which is the addrec without any non-loop-dominating
1138 /// values, and return the PHI.
1140 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr
*Normalized
,
1146 assert((!IVIncInsertLoop
||IVIncInsertPos
) && "Uninitialized insert position");
1148 // Reuse a previously-inserted PHI, if present.
1149 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1151 PHINode
*AddRecPhiMatch
= nullptr;
1152 Instruction
*IncV
= nullptr;
1156 // Only try partially matching scevs that need truncation and/or
1157 // step-inversion if we know this loop is outside the current loop.
1158 bool TryNonMatchingSCEV
=
1160 SE
.DT
.properlyDominates(LatchBlock
, IVIncInsertLoop
->getHeader());
1162 for (PHINode
&PN
: L
->getHeader()->phis()) {
1163 if (!SE
.isSCEVable(PN
.getType()))
1166 const SCEVAddRecExpr
*PhiSCEV
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(&PN
));
1170 bool IsMatchingSCEV
= PhiSCEV
== Normalized
;
1171 // We only handle truncation and inversion of phi recurrences for the
1172 // expanded expression if the expanded expression's loop dominates the
1173 // loop we insert to. Check now, so we can bail out early.
1174 if (!IsMatchingSCEV
&& !TryNonMatchingSCEV
)
1177 // TODO: this possibly can be reworked to avoid this cast at all.
1178 Instruction
*TempIncV
=
1179 dyn_cast
<Instruction
>(PN
.getIncomingValueForBlock(LatchBlock
));
1183 // Check whether we can reuse this PHI node.
1185 if (!isExpandedAddRecExprPHI(&PN
, TempIncV
, L
))
1187 if (L
== IVIncInsertLoop
&& !hoistIVInc(TempIncV
, IVIncInsertPos
))
1190 if (!isNormalAddRecExprPHI(&PN
, TempIncV
, L
))
1194 // Stop if we have found an exact match SCEV.
1195 if (IsMatchingSCEV
) {
1199 AddRecPhiMatch
= &PN
;
1203 // Try whether the phi can be translated into the requested form
1204 // (truncated and/or offset by a constant).
1205 if ((!TruncTy
|| InvertStep
) &&
1206 canBeCheaplyTransformed(SE
, PhiSCEV
, Normalized
, InvertStep
)) {
1207 // Record the phi node. But don't stop we might find an exact match
1209 AddRecPhiMatch
= &PN
;
1211 TruncTy
= SE
.getEffectiveSCEVType(Normalized
->getType());
1215 if (AddRecPhiMatch
) {
1216 // Potentially, move the increment. We have made sure in
1217 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1218 if (L
== IVIncInsertLoop
)
1219 hoistBeforePos(&SE
.DT
, IncV
, IVIncInsertPos
, AddRecPhiMatch
);
1221 // Ok, the add recurrence looks usable.
1222 // Remember this PHI, even in post-inc mode.
1223 InsertedValues
.insert(AddRecPhiMatch
);
1224 // Remember the increment.
1225 rememberInstruction(IncV
);
1226 return AddRecPhiMatch
;
1230 // Save the original insertion point so we can restore it when we're done.
1231 SCEVInsertPointGuard
Guard(Builder
, this);
1233 // Another AddRec may need to be recursively expanded below. For example, if
1234 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1235 // loop. Remove this loop from the PostIncLoops set before expanding such
1236 // AddRecs. Otherwise, we cannot find a valid position for the step
1237 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1238 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1239 // so it's not worth implementing SmallPtrSet::swap.
1240 PostIncLoopSet SavedPostIncLoops
= PostIncLoops
;
1241 PostIncLoops
.clear();
1243 // Expand code for the start value into the loop preheader.
1244 assert(L
->getLoopPreheader() &&
1245 "Can't expand add recurrences without a loop preheader!");
1246 Value
*StartV
= expandCodeFor(Normalized
->getStart(), ExpandTy
,
1247 L
->getLoopPreheader()->getTerminator());
1249 // StartV must have been be inserted into L's preheader to dominate the new
1251 assert(!isa
<Instruction
>(StartV
) ||
1252 SE
.DT
.properlyDominates(cast
<Instruction
>(StartV
)->getParent(),
1255 // Expand code for the step value. Do this before creating the PHI so that PHI
1256 // reuse code doesn't see an incomplete PHI.
1257 const SCEV
*Step
= Normalized
->getStepRecurrence(SE
);
1258 // If the stride is negative, insert a sub instead of an add for the increment
1259 // (unless it's a constant, because subtracts of constants are canonicalized
1261 bool useSubtract
= !ExpandTy
->isPointerTy() && Step
->isNonConstantNegative();
1263 Step
= SE
.getNegativeSCEV(Step
);
1264 // Expand the step somewhere that dominates the loop header.
1265 Value
*StepV
= expandCodeFor(Step
, IntTy
, &L
->getHeader()->front());
1267 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1268 // we actually do emit an addition. It does not apply if we emit a
1270 bool IncrementIsNUW
= !useSubtract
&& IsIncrementNUW(SE
, Normalized
);
1271 bool IncrementIsNSW
= !useSubtract
&& IsIncrementNSW(SE
, Normalized
);
1274 BasicBlock
*Header
= L
->getHeader();
1275 Builder
.SetInsertPoint(Header
, Header
->begin());
1276 pred_iterator HPB
= pred_begin(Header
), HPE
= pred_end(Header
);
1277 PHINode
*PN
= Builder
.CreatePHI(ExpandTy
, std::distance(HPB
, HPE
),
1278 Twine(IVName
) + ".iv");
1279 rememberInstruction(PN
);
1281 // Create the step instructions and populate the PHI.
1282 for (pred_iterator HPI
= HPB
; HPI
!= HPE
; ++HPI
) {
1283 BasicBlock
*Pred
= *HPI
;
1285 // Add a start value.
1286 if (!L
->contains(Pred
)) {
1287 PN
->addIncoming(StartV
, Pred
);
1291 // Create a step value and add it to the PHI.
1292 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1293 // instructions at IVIncInsertPos.
1294 Instruction
*InsertPos
= L
== IVIncInsertLoop
?
1295 IVIncInsertPos
: Pred
->getTerminator();
1296 Builder
.SetInsertPoint(InsertPos
);
1297 Value
*IncV
= expandIVInc(PN
, StepV
, L
, ExpandTy
, IntTy
, useSubtract
);
1299 if (isa
<OverflowingBinaryOperator
>(IncV
)) {
1301 cast
<BinaryOperator
>(IncV
)->setHasNoUnsignedWrap();
1303 cast
<BinaryOperator
>(IncV
)->setHasNoSignedWrap();
1305 PN
->addIncoming(IncV
, Pred
);
1308 // After expanding subexpressions, restore the PostIncLoops set so the caller
1309 // can ensure that IVIncrement dominates the current uses.
1310 PostIncLoops
= SavedPostIncLoops
;
1312 // Remember this PHI, even in post-inc mode.
1313 InsertedValues
.insert(PN
);
1318 Value
*SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr
*S
) {
1319 Type
*STy
= S
->getType();
1320 Type
*IntTy
= SE
.getEffectiveSCEVType(STy
);
1321 const Loop
*L
= S
->getLoop();
1323 // Determine a normalized form of this expression, which is the expression
1324 // before any post-inc adjustment is made.
1325 const SCEVAddRecExpr
*Normalized
= S
;
1326 if (PostIncLoops
.count(L
)) {
1327 PostIncLoopSet Loops
;
1329 Normalized
= cast
<SCEVAddRecExpr
>(normalizeForPostIncUse(S
, Loops
, SE
));
1332 // Strip off any non-loop-dominating component from the addrec start.
1333 const SCEV
*Start
= Normalized
->getStart();
1334 const SCEV
*PostLoopOffset
= nullptr;
1335 if (!SE
.properlyDominates(Start
, L
->getHeader())) {
1336 PostLoopOffset
= Start
;
1337 Start
= SE
.getConstant(Normalized
->getType(), 0);
1338 Normalized
= cast
<SCEVAddRecExpr
>(
1339 SE
.getAddRecExpr(Start
, Normalized
->getStepRecurrence(SE
),
1340 Normalized
->getLoop(),
1341 Normalized
->getNoWrapFlags(SCEV::FlagNW
)));
1344 // Strip off any non-loop-dominating component from the addrec step.
1345 const SCEV
*Step
= Normalized
->getStepRecurrence(SE
);
1346 const SCEV
*PostLoopScale
= nullptr;
1347 if (!SE
.dominates(Step
, L
->getHeader())) {
1348 PostLoopScale
= Step
;
1349 Step
= SE
.getConstant(Normalized
->getType(), 1);
1350 if (!Start
->isZero()) {
1351 // The normalization below assumes that Start is constant zero, so if
1352 // it isn't re-associate Start to PostLoopOffset.
1353 assert(!PostLoopOffset
&& "Start not-null but PostLoopOffset set?");
1354 PostLoopOffset
= Start
;
1355 Start
= SE
.getConstant(Normalized
->getType(), 0);
1358 cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(
1359 Start
, Step
, Normalized
->getLoop(),
1360 Normalized
->getNoWrapFlags(SCEV::FlagNW
)));
1363 // Expand the core addrec. If we need post-loop scaling, force it to
1364 // expand to an integer type to avoid the need for additional casting.
1365 Type
*ExpandTy
= PostLoopScale
? IntTy
: STy
;
1366 // We can't use a pointer type for the addrec if the pointer type is
1368 Type
*AddRecPHIExpandTy
=
1369 DL
.isNonIntegralPointerType(STy
) ? Normalized
->getType() : ExpandTy
;
1371 // In some cases, we decide to reuse an existing phi node but need to truncate
1372 // it and/or invert the step.
1373 Type
*TruncTy
= nullptr;
1374 bool InvertStep
= false;
1375 PHINode
*PN
= getAddRecExprPHILiterally(Normalized
, L
, AddRecPHIExpandTy
,
1376 IntTy
, TruncTy
, InvertStep
);
1378 // Accommodate post-inc mode, if necessary.
1380 if (!PostIncLoops
.count(L
))
1383 // In PostInc mode, use the post-incremented value.
1384 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1385 assert(LatchBlock
&& "PostInc mode requires a unique loop latch!");
1386 Result
= PN
->getIncomingValueForBlock(LatchBlock
);
1388 // For an expansion to use the postinc form, the client must call
1389 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1390 // or dominated by IVIncInsertPos.
1391 if (isa
<Instruction
>(Result
) &&
1392 !SE
.DT
.dominates(cast
<Instruction
>(Result
),
1393 &*Builder
.GetInsertPoint())) {
1394 // The induction variable's postinc expansion does not dominate this use.
1395 // IVUsers tries to prevent this case, so it is rare. However, it can
1396 // happen when an IVUser outside the loop is not dominated by the latch
1397 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1398 // all cases. Consider a phi outside whose operand is replaced during
1399 // expansion with the value of the postinc user. Without fundamentally
1400 // changing the way postinc users are tracked, the only remedy is
1401 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1402 // but hopefully expandCodeFor handles that.
1404 !ExpandTy
->isPointerTy() && Step
->isNonConstantNegative();
1406 Step
= SE
.getNegativeSCEV(Step
);
1409 // Expand the step somewhere that dominates the loop header.
1410 SCEVInsertPointGuard
Guard(Builder
, this);
1411 StepV
= expandCodeFor(Step
, IntTy
, &L
->getHeader()->front());
1413 Result
= expandIVInc(PN
, StepV
, L
, ExpandTy
, IntTy
, useSubtract
);
1417 // We have decided to reuse an induction variable of a dominating loop. Apply
1418 // truncation and/or inversion of the step.
1420 Type
*ResTy
= Result
->getType();
1421 // Normalize the result type.
1422 if (ResTy
!= SE
.getEffectiveSCEVType(ResTy
))
1423 Result
= InsertNoopCastOfTo(Result
, SE
.getEffectiveSCEVType(ResTy
));
1424 // Truncate the result.
1425 if (TruncTy
!= Result
->getType()) {
1426 Result
= Builder
.CreateTrunc(Result
, TruncTy
);
1427 rememberInstruction(Result
);
1429 // Invert the result.
1431 Result
= Builder
.CreateSub(expandCodeFor(Normalized
->getStart(), TruncTy
),
1433 rememberInstruction(Result
);
1437 // Re-apply any non-loop-dominating scale.
1438 if (PostLoopScale
) {
1439 assert(S
->isAffine() && "Can't linearly scale non-affine recurrences.");
1440 Result
= InsertNoopCastOfTo(Result
, IntTy
);
1441 Result
= Builder
.CreateMul(Result
,
1442 expandCodeFor(PostLoopScale
, IntTy
));
1443 rememberInstruction(Result
);
1446 // Re-apply any non-loop-dominating offset.
1447 if (PostLoopOffset
) {
1448 if (PointerType
*PTy
= dyn_cast
<PointerType
>(ExpandTy
)) {
1449 if (Result
->getType()->isIntegerTy()) {
1450 Value
*Base
= expandCodeFor(PostLoopOffset
, ExpandTy
);
1451 Result
= expandAddToGEP(SE
.getUnknown(Result
), PTy
, IntTy
, Base
);
1453 Result
= expandAddToGEP(PostLoopOffset
, PTy
, IntTy
, Result
);
1456 Result
= InsertNoopCastOfTo(Result
, IntTy
);
1457 Result
= Builder
.CreateAdd(Result
,
1458 expandCodeFor(PostLoopOffset
, IntTy
));
1459 rememberInstruction(Result
);
1466 Value
*SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr
*S
) {
1467 if (!CanonicalMode
) return expandAddRecExprLiterally(S
);
1469 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1470 const Loop
*L
= S
->getLoop();
1472 // First check for an existing canonical IV in a suitable type.
1473 PHINode
*CanonicalIV
= nullptr;
1474 if (PHINode
*PN
= L
->getCanonicalInductionVariable())
1475 if (SE
.getTypeSizeInBits(PN
->getType()) >= SE
.getTypeSizeInBits(Ty
))
1478 // Rewrite an AddRec in terms of the canonical induction variable, if
1479 // its type is more narrow.
1481 SE
.getTypeSizeInBits(CanonicalIV
->getType()) >
1482 SE
.getTypeSizeInBits(Ty
)) {
1483 SmallVector
<const SCEV
*, 4> NewOps(S
->getNumOperands());
1484 for (unsigned i
= 0, e
= S
->getNumOperands(); i
!= e
; ++i
)
1485 NewOps
[i
] = SE
.getAnyExtendExpr(S
->op_begin()[i
], CanonicalIV
->getType());
1486 Value
*V
= expand(SE
.getAddRecExpr(NewOps
, S
->getLoop(),
1487 S
->getNoWrapFlags(SCEV::FlagNW
)));
1488 BasicBlock::iterator NewInsertPt
=
1489 findInsertPointAfter(cast
<Instruction
>(V
), Builder
.GetInsertBlock());
1490 V
= expandCodeFor(SE
.getTruncateExpr(SE
.getUnknown(V
), Ty
), nullptr,
1495 // {X,+,F} --> X + {0,+,F}
1496 if (!S
->getStart()->isZero()) {
1497 SmallVector
<const SCEV
*, 4> NewOps(S
->op_begin(), S
->op_end());
1498 NewOps
[0] = SE
.getConstant(Ty
, 0);
1499 const SCEV
*Rest
= SE
.getAddRecExpr(NewOps
, L
,
1500 S
->getNoWrapFlags(SCEV::FlagNW
));
1502 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1503 // comments on expandAddToGEP for details.
1504 const SCEV
*Base
= S
->getStart();
1505 // Dig into the expression to find the pointer base for a GEP.
1506 const SCEV
*ExposedRest
= Rest
;
1507 ExposePointerBase(Base
, ExposedRest
, SE
);
1508 // If we found a pointer, expand the AddRec with a GEP.
1509 if (PointerType
*PTy
= dyn_cast
<PointerType
>(Base
->getType())) {
1510 // Make sure the Base isn't something exotic, such as a multiplied
1511 // or divided pointer value. In those cases, the result type isn't
1512 // actually a pointer type.
1513 if (!isa
<SCEVMulExpr
>(Base
) && !isa
<SCEVUDivExpr
>(Base
)) {
1514 Value
*StartV
= expand(Base
);
1515 assert(StartV
->getType() == PTy
&& "Pointer type mismatch for GEP!");
1516 return expandAddToGEP(ExposedRest
, PTy
, Ty
, StartV
);
1520 // Just do a normal add. Pre-expand the operands to suppress folding.
1522 // The LHS and RHS values are factored out of the expand call to make the
1523 // output independent of the argument evaluation order.
1524 const SCEV
*AddExprLHS
= SE
.getUnknown(expand(S
->getStart()));
1525 const SCEV
*AddExprRHS
= SE
.getUnknown(expand(Rest
));
1526 return expand(SE
.getAddExpr(AddExprLHS
, AddExprRHS
));
1529 // If we don't yet have a canonical IV, create one.
1531 // Create and insert the PHI node for the induction variable in the
1533 BasicBlock
*Header
= L
->getHeader();
1534 pred_iterator HPB
= pred_begin(Header
), HPE
= pred_end(Header
);
1535 CanonicalIV
= PHINode::Create(Ty
, std::distance(HPB
, HPE
), "indvar",
1537 rememberInstruction(CanonicalIV
);
1539 SmallSet
<BasicBlock
*, 4> PredSeen
;
1540 Constant
*One
= ConstantInt::get(Ty
, 1);
1541 for (pred_iterator HPI
= HPB
; HPI
!= HPE
; ++HPI
) {
1542 BasicBlock
*HP
= *HPI
;
1543 if (!PredSeen
.insert(HP
).second
) {
1544 // There must be an incoming value for each predecessor, even the
1546 CanonicalIV
->addIncoming(CanonicalIV
->getIncomingValueForBlock(HP
), HP
);
1550 if (L
->contains(HP
)) {
1551 // Insert a unit add instruction right before the terminator
1552 // corresponding to the back-edge.
1553 Instruction
*Add
= BinaryOperator::CreateAdd(CanonicalIV
, One
,
1555 HP
->getTerminator());
1556 Add
->setDebugLoc(HP
->getTerminator()->getDebugLoc());
1557 rememberInstruction(Add
);
1558 CanonicalIV
->addIncoming(Add
, HP
);
1560 CanonicalIV
->addIncoming(Constant::getNullValue(Ty
), HP
);
1565 // {0,+,1} --> Insert a canonical induction variable into the loop!
1566 if (S
->isAffine() && S
->getOperand(1)->isOne()) {
1567 assert(Ty
== SE
.getEffectiveSCEVType(CanonicalIV
->getType()) &&
1568 "IVs with types different from the canonical IV should "
1569 "already have been handled!");
1573 // {0,+,F} --> {0,+,1} * F
1575 // If this is a simple linear addrec, emit it now as a special case.
1576 if (S
->isAffine()) // {0,+,F} --> i*F
1578 expand(SE
.getTruncateOrNoop(
1579 SE
.getMulExpr(SE
.getUnknown(CanonicalIV
),
1580 SE
.getNoopOrAnyExtend(S
->getOperand(1),
1581 CanonicalIV
->getType())),
1584 // If this is a chain of recurrences, turn it into a closed form, using the
1585 // folders, then expandCodeFor the closed form. This allows the folders to
1586 // simplify the expression without having to build a bunch of special code
1587 // into this folder.
1588 const SCEV
*IH
= SE
.getUnknown(CanonicalIV
); // Get I as a "symbolic" SCEV.
1590 // Promote S up to the canonical IV type, if the cast is foldable.
1591 const SCEV
*NewS
= S
;
1592 const SCEV
*Ext
= SE
.getNoopOrAnyExtend(S
, CanonicalIV
->getType());
1593 if (isa
<SCEVAddRecExpr
>(Ext
))
1596 const SCEV
*V
= cast
<SCEVAddRecExpr
>(NewS
)->evaluateAtIteration(IH
, SE
);
1597 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1599 // Truncate the result down to the original type, if needed.
1600 const SCEV
*T
= SE
.getTruncateOrNoop(V
, Ty
);
1604 Value
*SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr
*S
) {
1605 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1606 Value
*V
= expandCodeFor(S
->getOperand(),
1607 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1608 Value
*I
= Builder
.CreateTrunc(V
, Ty
);
1609 rememberInstruction(I
);
1613 Value
*SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr
*S
) {
1614 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1615 Value
*V
= expandCodeFor(S
->getOperand(),
1616 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1617 Value
*I
= Builder
.CreateZExt(V
, Ty
);
1618 rememberInstruction(I
);
1622 Value
*SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr
*S
) {
1623 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1624 Value
*V
= expandCodeFor(S
->getOperand(),
1625 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1626 Value
*I
= Builder
.CreateSExt(V
, Ty
);
1627 rememberInstruction(I
);
1631 Value
*SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr
*S
) {
1632 Value
*LHS
= expand(S
->getOperand(S
->getNumOperands()-1));
1633 Type
*Ty
= LHS
->getType();
1634 for (int i
= S
->getNumOperands()-2; i
>= 0; --i
) {
1635 // In the case of mixed integer and pointer types, do the
1636 // rest of the comparisons as integer.
1637 if (S
->getOperand(i
)->getType() != Ty
) {
1638 Ty
= SE
.getEffectiveSCEVType(Ty
);
1639 LHS
= InsertNoopCastOfTo(LHS
, Ty
);
1641 Value
*RHS
= expandCodeFor(S
->getOperand(i
), Ty
);
1642 Value
*ICmp
= Builder
.CreateICmpSGT(LHS
, RHS
);
1643 rememberInstruction(ICmp
);
1644 Value
*Sel
= Builder
.CreateSelect(ICmp
, LHS
, RHS
, "smax");
1645 rememberInstruction(Sel
);
1648 // In the case of mixed integer and pointer types, cast the
1649 // final result back to the pointer type.
1650 if (LHS
->getType() != S
->getType())
1651 LHS
= InsertNoopCastOfTo(LHS
, S
->getType());
1655 Value
*SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr
*S
) {
1656 Value
*LHS
= expand(S
->getOperand(S
->getNumOperands()-1));
1657 Type
*Ty
= LHS
->getType();
1658 for (int i
= S
->getNumOperands()-2; i
>= 0; --i
) {
1659 // In the case of mixed integer and pointer types, do the
1660 // rest of the comparisons as integer.
1661 if (S
->getOperand(i
)->getType() != Ty
) {
1662 Ty
= SE
.getEffectiveSCEVType(Ty
);
1663 LHS
= InsertNoopCastOfTo(LHS
, Ty
);
1665 Value
*RHS
= expandCodeFor(S
->getOperand(i
), Ty
);
1666 Value
*ICmp
= Builder
.CreateICmpUGT(LHS
, RHS
);
1667 rememberInstruction(ICmp
);
1668 Value
*Sel
= Builder
.CreateSelect(ICmp
, LHS
, RHS
, "umax");
1669 rememberInstruction(Sel
);
1672 // In the case of mixed integer and pointer types, cast the
1673 // final result back to the pointer type.
1674 if (LHS
->getType() != S
->getType())
1675 LHS
= InsertNoopCastOfTo(LHS
, S
->getType());
1679 Value
*SCEVExpander::expandCodeFor(const SCEV
*SH
, Type
*Ty
,
1682 return expandCodeFor(SH
, Ty
);
1685 Value
*SCEVExpander::expandCodeFor(const SCEV
*SH
, Type
*Ty
) {
1686 // Expand the code for this SCEV.
1687 Value
*V
= expand(SH
);
1689 assert(SE
.getTypeSizeInBits(Ty
) == SE
.getTypeSizeInBits(SH
->getType()) &&
1690 "non-trivial casts should be done with the SCEVs directly!");
1691 V
= InsertNoopCastOfTo(V
, Ty
);
1696 ScalarEvolution::ValueOffsetPair
1697 SCEVExpander::FindValueInExprValueMap(const SCEV
*S
,
1698 const Instruction
*InsertPt
) {
1699 SetVector
<ScalarEvolution::ValueOffsetPair
> *Set
= SE
.getSCEVValues(S
);
1700 // If the expansion is not in CanonicalMode, and the SCEV contains any
1701 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1702 if (CanonicalMode
|| !SE
.containsAddRecurrence(S
)) {
1703 // If S is scConstant, it may be worse to reuse an existing Value.
1704 if (S
->getSCEVType() != scConstant
&& Set
) {
1705 // Choose a Value from the set which dominates the insertPt.
1706 // insertPt should be inside the Value's parent loop so as not to break
1708 for (auto const &VOPair
: *Set
) {
1709 Value
*V
= VOPair
.first
;
1710 ConstantInt
*Offset
= VOPair
.second
;
1711 Instruction
*EntInst
= nullptr;
1712 if (V
&& isa
<Instruction
>(V
) && (EntInst
= cast
<Instruction
>(V
)) &&
1713 S
->getType() == V
->getType() &&
1714 EntInst
->getFunction() == InsertPt
->getFunction() &&
1715 SE
.DT
.dominates(EntInst
, InsertPt
) &&
1716 (SE
.LI
.getLoopFor(EntInst
->getParent()) == nullptr ||
1717 SE
.LI
.getLoopFor(EntInst
->getParent())->contains(InsertPt
)))
1722 return {nullptr, nullptr};
1725 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1726 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1727 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1728 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1729 // the expansion will try to reuse Value from ExprValueMap, and only when it
1730 // fails, expand the SCEV literally.
1731 Value
*SCEVExpander::expand(const SCEV
*S
) {
1732 // Compute an insertion point for this SCEV object. Hoist the instructions
1733 // as far out in the loop nest as possible.
1734 Instruction
*InsertPt
= &*Builder
.GetInsertPoint();
1735 for (Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock());;
1736 L
= L
->getParentLoop())
1737 if (SE
.isLoopInvariant(S
, L
)) {
1739 if (BasicBlock
*Preheader
= L
->getLoopPreheader())
1740 InsertPt
= Preheader
->getTerminator();
1742 // LSR sets the insertion point for AddRec start/step values to the
1743 // block start to simplify value reuse, even though it's an invalid
1744 // position. SCEVExpander must correct for this in all cases.
1745 InsertPt
= &*L
->getHeader()->getFirstInsertionPt();
1748 // We can move insertion point only if there is no div or rem operations
1749 // otherwise we are risky to move it over the check for zero denominator.
1750 auto SafeToHoist
= [](const SCEV
*S
) {
1751 return !SCEVExprContains(S
, [](const SCEV
*S
) {
1752 if (const auto *D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
1753 if (const auto *SC
= dyn_cast
<SCEVConstant
>(D
->getRHS()))
1754 // Division by non-zero constants can be hoisted.
1755 return SC
->getValue()->isZero();
1756 // All other divisions should not be moved as they may be
1757 // divisions by zero and should be kept within the
1758 // conditions of the surrounding loops that guard their
1759 // execution (see PR35406).
1765 // If the SCEV is computable at this level, insert it into the header
1766 // after the PHIs (and after any other instructions that we've inserted
1767 // there) so that it is guaranteed to dominate any user inside the loop.
1768 if (L
&& SE
.hasComputableLoopEvolution(S
, L
) && !PostIncLoops
.count(L
) &&
1770 InsertPt
= &*L
->getHeader()->getFirstInsertionPt();
1771 while (InsertPt
->getIterator() != Builder
.GetInsertPoint() &&
1772 (isInsertedInstruction(InsertPt
) ||
1773 isa
<DbgInfoIntrinsic
>(InsertPt
))) {
1774 InsertPt
= &*std::next(InsertPt
->getIterator());
1779 // Check to see if we already expanded this here.
1780 auto I
= InsertedExpressions
.find(std::make_pair(S
, InsertPt
));
1781 if (I
!= InsertedExpressions
.end())
1784 SCEVInsertPointGuard
Guard(Builder
, this);
1785 Builder
.SetInsertPoint(InsertPt
);
1787 // Expand the expression into instructions.
1788 ScalarEvolution::ValueOffsetPair VO
= FindValueInExprValueMap(S
, InsertPt
);
1789 Value
*V
= VO
.first
;
1793 else if (VO
.second
) {
1794 if (PointerType
*Vty
= dyn_cast
<PointerType
>(V
->getType())) {
1795 Type
*Ety
= Vty
->getPointerElementType();
1796 int64_t Offset
= VO
.second
->getSExtValue();
1797 int64_t ESize
= SE
.getTypeSizeInBits(Ety
);
1798 if ((Offset
* 8) % ESize
== 0) {
1800 ConstantInt::getSigned(VO
.second
->getType(), -(Offset
* 8) / ESize
);
1801 V
= Builder
.CreateGEP(Ety
, V
, Idx
, "scevgep");
1804 ConstantInt::getSigned(VO
.second
->getType(), -Offset
);
1805 unsigned AS
= Vty
->getAddressSpace();
1806 V
= Builder
.CreateBitCast(V
, Type::getInt8PtrTy(SE
.getContext(), AS
));
1807 V
= Builder
.CreateGEP(Type::getInt8Ty(SE
.getContext()), V
, Idx
,
1809 V
= Builder
.CreateBitCast(V
, Vty
);
1812 V
= Builder
.CreateSub(V
, VO
.second
);
1815 // Remember the expanded value for this SCEV at this location.
1817 // This is independent of PostIncLoops. The mapped value simply materializes
1818 // the expression at this insertion point. If the mapped value happened to be
1819 // a postinc expansion, it could be reused by a non-postinc user, but only if
1820 // its insertion point was already at the head of the loop.
1821 InsertedExpressions
[std::make_pair(S
, InsertPt
)] = V
;
1825 void SCEVExpander::rememberInstruction(Value
*I
) {
1826 if (!PostIncLoops
.empty())
1827 InsertedPostIncValues
.insert(I
);
1829 InsertedValues
.insert(I
);
1832 /// getOrInsertCanonicalInductionVariable - This method returns the
1833 /// canonical induction variable of the specified type for the specified
1834 /// loop (inserting one if there is none). A canonical induction variable
1835 /// starts at zero and steps by one on each iteration.
1837 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop
*L
,
1839 assert(Ty
->isIntegerTy() && "Can only insert integer induction variables!");
1841 // Build a SCEV for {0,+,1}<L>.
1842 // Conservatively use FlagAnyWrap for now.
1843 const SCEV
*H
= SE
.getAddRecExpr(SE
.getConstant(Ty
, 0),
1844 SE
.getConstant(Ty
, 1), L
, SCEV::FlagAnyWrap
);
1846 // Emit code for it.
1847 SCEVInsertPointGuard
Guard(Builder
, this);
1849 cast
<PHINode
>(expandCodeFor(H
, nullptr, &L
->getHeader()->front()));
1854 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1855 /// replace them with their most canonical representative. Return the number of
1856 /// phis eliminated.
1858 /// This does not depend on any SCEVExpander state but should be used in
1859 /// the same context that SCEVExpander is used.
1861 SCEVExpander::replaceCongruentIVs(Loop
*L
, const DominatorTree
*DT
,
1862 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
,
1863 const TargetTransformInfo
*TTI
) {
1864 // Find integer phis in order of increasing width.
1865 SmallVector
<PHINode
*, 8> Phis
;
1866 for (PHINode
&PN
: L
->getHeader()->phis())
1867 Phis
.push_back(&PN
);
1870 llvm::sort(Phis
, [](Value
*LHS
, Value
*RHS
) {
1871 // Put pointers at the back and make sure pointer < pointer = false.
1872 if (!LHS
->getType()->isIntegerTy() || !RHS
->getType()->isIntegerTy())
1873 return RHS
->getType()->isIntegerTy() && !LHS
->getType()->isIntegerTy();
1874 return RHS
->getType()->getPrimitiveSizeInBits() <
1875 LHS
->getType()->getPrimitiveSizeInBits();
1878 unsigned NumElim
= 0;
1879 DenseMap
<const SCEV
*, PHINode
*> ExprToIVMap
;
1880 // Process phis from wide to narrow. Map wide phis to their truncation
1881 // so narrow phis can reuse them.
1882 for (PHINode
*Phi
: Phis
) {
1883 auto SimplifyPHINode
= [&](PHINode
*PN
) -> Value
* {
1884 if (Value
*V
= SimplifyInstruction(PN
, {DL
, &SE
.TLI
, &SE
.DT
, &SE
.AC
}))
1886 if (!SE
.isSCEVable(PN
->getType()))
1888 auto *Const
= dyn_cast
<SCEVConstant
>(SE
.getSCEV(PN
));
1891 return Const
->getValue();
1894 // Fold constant phis. They may be congruent to other constant phis and
1895 // would confuse the logic below that expects proper IVs.
1896 if (Value
*V
= SimplifyPHINode(Phi
)) {
1897 if (V
->getType() != Phi
->getType())
1899 Phi
->replaceAllUsesWith(V
);
1900 DeadInsts
.emplace_back(Phi
);
1902 DEBUG_WITH_TYPE(DebugType
, dbgs()
1903 << "INDVARS: Eliminated constant iv: " << *Phi
<< '\n');
1907 if (!SE
.isSCEVable(Phi
->getType()))
1910 PHINode
*&OrigPhiRef
= ExprToIVMap
[SE
.getSCEV(Phi
)];
1913 if (Phi
->getType()->isIntegerTy() && TTI
&&
1914 TTI
->isTruncateFree(Phi
->getType(), Phis
.back()->getType())) {
1915 // This phi can be freely truncated to the narrowest phi type. Map the
1916 // truncated expression to it so it will be reused for narrow types.
1917 const SCEV
*TruncExpr
=
1918 SE
.getTruncateExpr(SE
.getSCEV(Phi
), Phis
.back()->getType());
1919 ExprToIVMap
[TruncExpr
] = Phi
;
1924 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1926 if (OrigPhiRef
->getType()->isPointerTy() != Phi
->getType()->isPointerTy())
1929 if (BasicBlock
*LatchBlock
= L
->getLoopLatch()) {
1930 Instruction
*OrigInc
= dyn_cast
<Instruction
>(
1931 OrigPhiRef
->getIncomingValueForBlock(LatchBlock
));
1932 Instruction
*IsomorphicInc
=
1933 dyn_cast
<Instruction
>(Phi
->getIncomingValueForBlock(LatchBlock
));
1935 if (OrigInc
&& IsomorphicInc
) {
1936 // If this phi has the same width but is more canonical, replace the
1937 // original with it. As part of the "more canonical" determination,
1938 // respect a prior decision to use an IV chain.
1939 if (OrigPhiRef
->getType() == Phi
->getType() &&
1940 !(ChainedPhis
.count(Phi
) ||
1941 isExpandedAddRecExprPHI(OrigPhiRef
, OrigInc
, L
)) &&
1942 (ChainedPhis
.count(Phi
) ||
1943 isExpandedAddRecExprPHI(Phi
, IsomorphicInc
, L
))) {
1944 std::swap(OrigPhiRef
, Phi
);
1945 std::swap(OrigInc
, IsomorphicInc
);
1947 // Replacing the congruent phi is sufficient because acyclic
1948 // redundancy elimination, CSE/GVN, should handle the
1949 // rest. However, once SCEV proves that a phi is congruent,
1950 // it's often the head of an IV user cycle that is isomorphic
1951 // with the original phi. It's worth eagerly cleaning up the
1952 // common case of a single IV increment so that DeleteDeadPHIs
1953 // can remove cycles that had postinc uses.
1954 const SCEV
*TruncExpr
=
1955 SE
.getTruncateOrNoop(SE
.getSCEV(OrigInc
), IsomorphicInc
->getType());
1956 if (OrigInc
!= IsomorphicInc
&&
1957 TruncExpr
== SE
.getSCEV(IsomorphicInc
) &&
1958 SE
.LI
.replacementPreservesLCSSAForm(IsomorphicInc
, OrigInc
) &&
1959 hoistIVInc(OrigInc
, IsomorphicInc
)) {
1960 DEBUG_WITH_TYPE(DebugType
,
1961 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1962 << *IsomorphicInc
<< '\n');
1963 Value
*NewInc
= OrigInc
;
1964 if (OrigInc
->getType() != IsomorphicInc
->getType()) {
1965 Instruction
*IP
= nullptr;
1966 if (PHINode
*PN
= dyn_cast
<PHINode
>(OrigInc
))
1967 IP
= &*PN
->getParent()->getFirstInsertionPt();
1969 IP
= OrigInc
->getNextNode();
1971 IRBuilder
<> Builder(IP
);
1972 Builder
.SetCurrentDebugLocation(IsomorphicInc
->getDebugLoc());
1973 NewInc
= Builder
.CreateTruncOrBitCast(
1974 OrigInc
, IsomorphicInc
->getType(), IVName
);
1976 IsomorphicInc
->replaceAllUsesWith(NewInc
);
1977 DeadInsts
.emplace_back(IsomorphicInc
);
1981 DEBUG_WITH_TYPE(DebugType
, dbgs() << "INDVARS: Eliminated congruent iv: "
1984 Value
*NewIV
= OrigPhiRef
;
1985 if (OrigPhiRef
->getType() != Phi
->getType()) {
1986 IRBuilder
<> Builder(&*L
->getHeader()->getFirstInsertionPt());
1987 Builder
.SetCurrentDebugLocation(Phi
->getDebugLoc());
1988 NewIV
= Builder
.CreateTruncOrBitCast(OrigPhiRef
, Phi
->getType(), IVName
);
1990 Phi
->replaceAllUsesWith(NewIV
);
1991 DeadInsts
.emplace_back(Phi
);
1996 Value
*SCEVExpander::getExactExistingExpansion(const SCEV
*S
,
1997 const Instruction
*At
, Loop
*L
) {
1998 Optional
<ScalarEvolution::ValueOffsetPair
> VO
=
1999 getRelatedExistingExpansion(S
, At
, L
);
2000 if (VO
&& VO
.getValue().second
== nullptr)
2001 return VO
.getValue().first
;
2005 Optional
<ScalarEvolution::ValueOffsetPair
>
2006 SCEVExpander::getRelatedExistingExpansion(const SCEV
*S
, const Instruction
*At
,
2008 using namespace llvm::PatternMatch
;
2010 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
2011 L
->getExitingBlocks(ExitingBlocks
);
2013 // Look for suitable value in simple conditions at the loop exits.
2014 for (BasicBlock
*BB
: ExitingBlocks
) {
2015 ICmpInst::Predicate Pred
;
2016 Instruction
*LHS
, *RHS
;
2017 BasicBlock
*TrueBB
, *FalseBB
;
2019 if (!match(BB
->getTerminator(),
2020 m_Br(m_ICmp(Pred
, m_Instruction(LHS
), m_Instruction(RHS
)),
2024 if (SE
.getSCEV(LHS
) == S
&& SE
.DT
.dominates(LHS
, At
))
2025 return ScalarEvolution::ValueOffsetPair(LHS
, nullptr);
2027 if (SE
.getSCEV(RHS
) == S
&& SE
.DT
.dominates(RHS
, At
))
2028 return ScalarEvolution::ValueOffsetPair(RHS
, nullptr);
2031 // Use expand's logic which is used for reusing a previous Value in
2033 ScalarEvolution::ValueOffsetPair VO
= FindValueInExprValueMap(S
, At
);
2037 // There is potential to make this significantly smarter, but this simple
2038 // heuristic already gets some interesting cases.
2040 // Can not find suitable value.
2044 bool SCEVExpander::isHighCostExpansionHelper(
2045 const SCEV
*S
, Loop
*L
, const Instruction
*At
,
2046 SmallPtrSetImpl
<const SCEV
*> &Processed
) {
2048 // If we can find an existing value for this scev available at the point "At"
2049 // then consider the expression cheap.
2050 if (At
&& getRelatedExistingExpansion(S
, At
, L
))
2053 // Zero/One operand expressions
2054 switch (S
->getSCEVType()) {
2059 return isHighCostExpansionHelper(cast
<SCEVTruncateExpr
>(S
)->getOperand(),
2062 return isHighCostExpansionHelper(cast
<SCEVZeroExtendExpr
>(S
)->getOperand(),
2065 return isHighCostExpansionHelper(cast
<SCEVSignExtendExpr
>(S
)->getOperand(),
2069 if (!Processed
.insert(S
).second
)
2072 if (auto *UDivExpr
= dyn_cast
<SCEVUDivExpr
>(S
)) {
2073 // If the divisor is a power of two and the SCEV type fits in a native
2074 // integer, consider the division cheap irrespective of whether it occurs in
2075 // the user code since it can be lowered into a right shift.
2076 if (auto *SC
= dyn_cast
<SCEVConstant
>(UDivExpr
->getRHS()))
2077 if (SC
->getAPInt().isPowerOf2()) {
2078 const DataLayout
&DL
=
2079 L
->getHeader()->getParent()->getParent()->getDataLayout();
2080 unsigned Width
= cast
<IntegerType
>(UDivExpr
->getType())->getBitWidth();
2081 return DL
.isIllegalInteger(Width
);
2084 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2085 // HowManyLessThans produced to compute a precise expression, rather than a
2086 // UDiv from the user's code. If we can't find a UDiv in the code with some
2087 // simple searching, assume the former consider UDivExpr expensive to
2089 BasicBlock
*ExitingBB
= L
->getExitingBlock();
2093 // At the beginning of this function we already tried to find existing value
2094 // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
2095 // involving division. This is just a simple search heuristic.
2097 At
= &ExitingBB
->back();
2098 if (!getRelatedExistingExpansion(
2099 SE
.getAddExpr(S
, SE
.getConstant(S
->getType(), 1)), At
, L
))
2103 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
2104 // the exit condition.
2105 if (isa
<SCEVSMaxExpr
>(S
) || isa
<SCEVUMaxExpr
>(S
))
2108 // Recurse past nary expressions, which commonly occur in the
2109 // BackedgeTakenCount. They may already exist in program code, and if not,
2110 // they are not too expensive rematerialize.
2111 if (const SCEVNAryExpr
*NAry
= dyn_cast
<SCEVNAryExpr
>(S
)) {
2112 for (auto *Op
: NAry
->operands())
2113 if (isHighCostExpansionHelper(Op
, L
, At
, Processed
))
2117 // If we haven't recognized an expensive SCEV pattern, assume it's an
2118 // expression produced by program code.
2122 Value
*SCEVExpander::expandCodeForPredicate(const SCEVPredicate
*Pred
,
2125 switch (Pred
->getKind()) {
2126 case SCEVPredicate::P_Union
:
2127 return expandUnionPredicate(cast
<SCEVUnionPredicate
>(Pred
), IP
);
2128 case SCEVPredicate::P_Equal
:
2129 return expandEqualPredicate(cast
<SCEVEqualPredicate
>(Pred
), IP
);
2130 case SCEVPredicate::P_Wrap
: {
2131 auto *AddRecPred
= cast
<SCEVWrapPredicate
>(Pred
);
2132 return expandWrapPredicate(AddRecPred
, IP
);
2135 llvm_unreachable("Unknown SCEV predicate type");
2138 Value
*SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate
*Pred
,
2140 Value
*Expr0
= expandCodeFor(Pred
->getLHS(), Pred
->getLHS()->getType(), IP
);
2141 Value
*Expr1
= expandCodeFor(Pred
->getRHS(), Pred
->getRHS()->getType(), IP
);
2143 Builder
.SetInsertPoint(IP
);
2144 auto *I
= Builder
.CreateICmpNE(Expr0
, Expr1
, "ident.check");
2148 Value
*SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr
*AR
,
2149 Instruction
*Loc
, bool Signed
) {
2150 assert(AR
->isAffine() && "Cannot generate RT check for "
2151 "non-affine expression");
2153 SCEVUnionPredicate Pred
;
2154 const SCEV
*ExitCount
=
2155 SE
.getPredicatedBackedgeTakenCount(AR
->getLoop(), Pred
);
2157 assert(ExitCount
!= SE
.getCouldNotCompute() && "Invalid loop count");
2159 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
2160 const SCEV
*Start
= AR
->getStart();
2162 Type
*ARTy
= AR
->getType();
2163 unsigned SrcBits
= SE
.getTypeSizeInBits(ExitCount
->getType());
2164 unsigned DstBits
= SE
.getTypeSizeInBits(ARTy
);
2166 // The expression {Start,+,Step} has nusw/nssw if
2167 // Step < 0, Start - |Step| * Backedge <= Start
2168 // Step >= 0, Start + |Step| * Backedge > Start
2169 // and |Step| * Backedge doesn't unsigned overflow.
2171 IntegerType
*CountTy
= IntegerType::get(Loc
->getContext(), SrcBits
);
2172 Builder
.SetInsertPoint(Loc
);
2173 Value
*TripCountVal
= expandCodeFor(ExitCount
, CountTy
, Loc
);
2176 IntegerType::get(Loc
->getContext(), SE
.getTypeSizeInBits(ARTy
));
2177 Type
*ARExpandTy
= DL
.isNonIntegralPointerType(ARTy
) ? ARTy
: Ty
;
2179 Value
*StepValue
= expandCodeFor(Step
, Ty
, Loc
);
2180 Value
*NegStepValue
= expandCodeFor(SE
.getNegativeSCEV(Step
), Ty
, Loc
);
2181 Value
*StartValue
= expandCodeFor(Start
, ARExpandTy
, Loc
);
2184 ConstantInt::get(Loc
->getContext(), APInt::getNullValue(DstBits
));
2186 Builder
.SetInsertPoint(Loc
);
2188 Value
*StepCompare
= Builder
.CreateICmp(ICmpInst::ICMP_SLT
, StepValue
, Zero
);
2189 Value
*AbsStep
= Builder
.CreateSelect(StepCompare
, NegStepValue
, StepValue
);
2191 // Get the backedge taken count and truncate or extended to the AR type.
2192 Value
*TruncTripCount
= Builder
.CreateZExtOrTrunc(TripCountVal
, Ty
);
2193 auto *MulF
= Intrinsic::getDeclaration(Loc
->getModule(),
2194 Intrinsic::umul_with_overflow
, Ty
);
2196 // Compute |Step| * Backedge
2197 CallInst
*Mul
= Builder
.CreateCall(MulF
, {AbsStep
, TruncTripCount
}, "mul");
2198 Value
*MulV
= Builder
.CreateExtractValue(Mul
, 0, "mul.result");
2199 Value
*OfMul
= Builder
.CreateExtractValue(Mul
, 1, "mul.overflow");
2202 // Start + |Step| * Backedge < Start
2203 // Start - |Step| * Backedge > Start
2204 Value
*Add
= nullptr, *Sub
= nullptr;
2205 if (PointerType
*ARPtrTy
= dyn_cast
<PointerType
>(ARExpandTy
)) {
2206 const SCEV
*MulS
= SE
.getSCEV(MulV
);
2207 const SCEV
*NegMulS
= SE
.getNegativeSCEV(MulS
);
2208 Add
= Builder
.CreateBitCast(expandAddToGEP(MulS
, ARPtrTy
, Ty
, StartValue
),
2210 Sub
= Builder
.CreateBitCast(
2211 expandAddToGEP(NegMulS
, ARPtrTy
, Ty
, StartValue
), ARPtrTy
);
2213 Add
= Builder
.CreateAdd(StartValue
, MulV
);
2214 Sub
= Builder
.CreateSub(StartValue
, MulV
);
2217 Value
*EndCompareGT
= Builder
.CreateICmp(
2218 Signed
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
, Sub
, StartValue
);
2220 Value
*EndCompareLT
= Builder
.CreateICmp(
2221 Signed
? ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
, Add
, StartValue
);
2223 // Select the answer based on the sign of Step.
2225 Builder
.CreateSelect(StepCompare
, EndCompareGT
, EndCompareLT
);
2227 // If the backedge taken count type is larger than the AR type,
2228 // check that we don't drop any bits by truncating it. If we are
2229 // dropping bits, then we have overflow (unless the step is zero).
2230 if (SE
.getTypeSizeInBits(CountTy
) > SE
.getTypeSizeInBits(Ty
)) {
2231 auto MaxVal
= APInt::getMaxValue(DstBits
).zext(SrcBits
);
2232 auto *BackedgeCheck
=
2233 Builder
.CreateICmp(ICmpInst::ICMP_UGT
, TripCountVal
,
2234 ConstantInt::get(Loc
->getContext(), MaxVal
));
2235 BackedgeCheck
= Builder
.CreateAnd(
2236 BackedgeCheck
, Builder
.CreateICmp(ICmpInst::ICMP_NE
, StepValue
, Zero
));
2238 EndCheck
= Builder
.CreateOr(EndCheck
, BackedgeCheck
);
2241 EndCheck
= Builder
.CreateOr(EndCheck
, OfMul
);
2245 Value
*SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate
*Pred
,
2247 const auto *A
= cast
<SCEVAddRecExpr
>(Pred
->getExpr());
2248 Value
*NSSWCheck
= nullptr, *NUSWCheck
= nullptr;
2250 // Add a check for NUSW
2251 if (Pred
->getFlags() & SCEVWrapPredicate::IncrementNUSW
)
2252 NUSWCheck
= generateOverflowCheck(A
, IP
, false);
2254 // Add a check for NSSW
2255 if (Pred
->getFlags() & SCEVWrapPredicate::IncrementNSSW
)
2256 NSSWCheck
= generateOverflowCheck(A
, IP
, true);
2258 if (NUSWCheck
&& NSSWCheck
)
2259 return Builder
.CreateOr(NUSWCheck
, NSSWCheck
);
2267 return ConstantInt::getFalse(IP
->getContext());
2270 Value
*SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate
*Union
,
2272 auto *BoolType
= IntegerType::get(IP
->getContext(), 1);
2273 Value
*Check
= ConstantInt::getNullValue(BoolType
);
2275 // Loop over all checks in this set.
2276 for (auto Pred
: Union
->getPredicates()) {
2277 auto *NextCheck
= expandCodeForPredicate(Pred
, IP
);
2278 Builder
.SetInsertPoint(IP
);
2279 Check
= Builder
.CreateOr(Check
, NextCheck
);
2286 // Search for a SCEV subexpression that is not safe to expand. Any expression
2287 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2288 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2289 // instruction, but the important thing is that we prove the denominator is
2290 // nonzero before expansion.
2292 // IVUsers already checks that IV-derived expressions are safe. So this check is
2293 // only needed when the expression includes some subexpression that is not IV
2296 // Currently, we only allow division by a nonzero constant here. If this is
2297 // inadequate, we could easily allow division by SCEVUnknown by using
2298 // ValueTracking to check isKnownNonZero().
2300 // We cannot generally expand recurrences unless the step dominates the loop
2301 // header. The expander handles the special case of affine recurrences by
2302 // scaling the recurrence outside the loop, but this technique isn't generally
2303 // applicable. Expanding a nested recurrence outside a loop requires computing
2304 // binomial coefficients. This could be done, but the recurrence has to be in a
2305 // perfectly reduced form, which can't be guaranteed.
2306 struct SCEVFindUnsafe
{
2307 ScalarEvolution
&SE
;
2310 SCEVFindUnsafe(ScalarEvolution
&se
): SE(se
), IsUnsafe(false) {}
2312 bool follow(const SCEV
*S
) {
2313 if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
2314 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(D
->getRHS());
2315 if (!SC
|| SC
->getValue()->isZero()) {
2320 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
2321 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
2322 if (!AR
->isAffine() && !SE
.dominates(Step
, AR
->getLoop()->getHeader())) {
2329 bool isDone() const { return IsUnsafe
; }
2334 bool isSafeToExpand(const SCEV
*S
, ScalarEvolution
&SE
) {
2335 SCEVFindUnsafe
Search(SE
);
2336 visitAll(S
, Search
);
2337 return !Search
.IsUnsafe
;
2340 bool isSafeToExpandAt(const SCEV
*S
, const Instruction
*InsertionPoint
,
2341 ScalarEvolution
&SE
) {
2342 return isSafeToExpand(S
, SE
) && SE
.dominates(S
, InsertionPoint
->getParent());