1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
31 using namespace PatternMatch
;
33 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
34 /// reusing an existing cast if a suitable one exists, moving an existing
35 /// cast if a suitable one exists but isn't in the right place, or
36 /// creating a new one.
37 Value
*SCEVExpander::ReuseOrCreateCast(Value
*V
, Type
*Ty
,
38 Instruction::CastOps Op
,
39 BasicBlock::iterator IP
) {
40 // This function must be called with the builder having a valid insertion
41 // point. It doesn't need to be the actual IP where the uses of the returned
42 // cast will be added, but it must dominate such IP.
43 // We use this precondition to produce a cast that will dominate all its
44 // uses. In particular, this is crucial for the case where the builder's
45 // insertion point *is* the point where we were asked to put the cast.
46 // Since we don't know the builder's insertion point is actually
47 // where the uses will be added (only that it dominates it), we are
48 // not allowed to move it.
49 BasicBlock::iterator BIP
= Builder
.GetInsertPoint();
51 Instruction
*Ret
= nullptr;
53 // Check to see if there is already a cast!
54 for (User
*U
: V
->users())
55 if (U
->getType() == Ty
)
56 if (CastInst
*CI
= dyn_cast
<CastInst
>(U
))
57 if (CI
->getOpcode() == Op
) {
58 // If the cast isn't where we want it, create a new cast at IP.
59 // Likewise, do not reuse a cast at BIP because it must dominate
60 // instructions that might be inserted before BIP.
61 if (BasicBlock::iterator(CI
) != IP
|| BIP
== IP
) {
62 // Create a new cast, and leave the old cast in place in case
63 // it is being used as an insert point. Clear its operand
64 // so that it doesn't hold anything live.
65 Ret
= CastInst::Create(Op
, V
, Ty
, "", &*IP
);
67 CI
->replaceAllUsesWith(Ret
);
68 CI
->setOperand(0, UndefValue::get(V
->getType()));
77 Ret
= CastInst::Create(Op
, V
, Ty
, V
->getName(), &*IP
);
79 // We assert at the end of the function since IP might point to an
80 // instruction with different dominance properties than a cast
81 // (an invoke for example) and not dominate BIP (but the cast does).
82 assert(SE
.DT
.dominates(Ret
, &*BIP
));
84 rememberInstruction(Ret
);
88 static BasicBlock::iterator
findInsertPointAfter(Instruction
*I
,
89 BasicBlock
*MustDominate
) {
90 BasicBlock::iterator IP
= ++I
->getIterator();
91 if (auto *II
= dyn_cast
<InvokeInst
>(I
))
92 IP
= II
->getNormalDest()->begin();
94 while (isa
<PHINode
>(IP
))
97 if (isa
<FuncletPadInst
>(IP
) || isa
<LandingPadInst
>(IP
)) {
99 } else if (isa
<CatchSwitchInst
>(IP
)) {
100 IP
= MustDominate
->getFirstInsertionPt();
102 assert(!IP
->isEHPad() && "unexpected eh pad!");
108 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
109 /// which must be possible with a noop cast, doing what we can to share
111 Value
*SCEVExpander::InsertNoopCastOfTo(Value
*V
, Type
*Ty
) {
112 Instruction::CastOps Op
= CastInst::getCastOpcode(V
, false, Ty
, false);
113 assert((Op
== Instruction::BitCast
||
114 Op
== Instruction::PtrToInt
||
115 Op
== Instruction::IntToPtr
) &&
116 "InsertNoopCastOfTo cannot perform non-noop casts!");
117 assert(SE
.getTypeSizeInBits(V
->getType()) == SE
.getTypeSizeInBits(Ty
) &&
118 "InsertNoopCastOfTo cannot change sizes!");
120 // Short-circuit unnecessary bitcasts.
121 if (Op
== Instruction::BitCast
) {
122 if (V
->getType() == Ty
)
124 if (CastInst
*CI
= dyn_cast
<CastInst
>(V
)) {
125 if (CI
->getOperand(0)->getType() == Ty
)
126 return CI
->getOperand(0);
129 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
130 if ((Op
== Instruction::PtrToInt
|| Op
== Instruction::IntToPtr
) &&
131 SE
.getTypeSizeInBits(Ty
) == SE
.getTypeSizeInBits(V
->getType())) {
132 if (CastInst
*CI
= dyn_cast
<CastInst
>(V
))
133 if ((CI
->getOpcode() == Instruction::PtrToInt
||
134 CI
->getOpcode() == Instruction::IntToPtr
) &&
135 SE
.getTypeSizeInBits(CI
->getType()) ==
136 SE
.getTypeSizeInBits(CI
->getOperand(0)->getType()))
137 return CI
->getOperand(0);
138 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
))
139 if ((CE
->getOpcode() == Instruction::PtrToInt
||
140 CE
->getOpcode() == Instruction::IntToPtr
) &&
141 SE
.getTypeSizeInBits(CE
->getType()) ==
142 SE
.getTypeSizeInBits(CE
->getOperand(0)->getType()))
143 return CE
->getOperand(0);
146 // Fold a cast of a constant.
147 if (Constant
*C
= dyn_cast
<Constant
>(V
))
148 return ConstantExpr::getCast(Op
, C
, Ty
);
150 // Cast the argument at the beginning of the entry block, after
151 // any bitcasts of other arguments.
152 if (Argument
*A
= dyn_cast
<Argument
>(V
)) {
153 BasicBlock::iterator IP
= A
->getParent()->getEntryBlock().begin();
154 while ((isa
<BitCastInst
>(IP
) &&
155 isa
<Argument
>(cast
<BitCastInst
>(IP
)->getOperand(0)) &&
156 cast
<BitCastInst
>(IP
)->getOperand(0) != A
) ||
157 isa
<DbgInfoIntrinsic
>(IP
))
159 return ReuseOrCreateCast(A
, Ty
, Op
, IP
);
162 // Cast the instruction immediately after the instruction.
163 Instruction
*I
= cast
<Instruction
>(V
);
164 BasicBlock::iterator IP
= findInsertPointAfter(I
, Builder
.GetInsertBlock());
165 return ReuseOrCreateCast(I
, Ty
, Op
, IP
);
168 /// InsertBinop - Insert the specified binary operator, doing a small amount
169 /// of work to avoid inserting an obviously redundant operation.
170 Value
*SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode
,
171 Value
*LHS
, Value
*RHS
) {
172 // Fold a binop with constant operands.
173 if (Constant
*CLHS
= dyn_cast
<Constant
>(LHS
))
174 if (Constant
*CRHS
= dyn_cast
<Constant
>(RHS
))
175 return ConstantExpr::get(Opcode
, CLHS
, CRHS
);
177 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
178 unsigned ScanLimit
= 6;
179 BasicBlock::iterator BlockBegin
= Builder
.GetInsertBlock()->begin();
180 // Scanning starts from the last instruction before the insertion point.
181 BasicBlock::iterator IP
= Builder
.GetInsertPoint();
182 if (IP
!= BlockBegin
) {
184 for (; ScanLimit
; --IP
, --ScanLimit
) {
185 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
187 if (isa
<DbgInfoIntrinsic
>(IP
))
190 // Conservatively, do not use any instruction which has any of wrap/exact
192 // TODO: Instead of simply disable poison instructions we can be clever
193 // here and match SCEV to this instruction.
194 auto canGeneratePoison
= [](Instruction
*I
) {
195 if (isa
<OverflowingBinaryOperator
>(I
) &&
196 (I
->hasNoSignedWrap() || I
->hasNoUnsignedWrap()))
198 if (isa
<PossiblyExactOperator
>(I
) && I
->isExact())
202 if (IP
->getOpcode() == (unsigned)Opcode
&& IP
->getOperand(0) == LHS
&&
203 IP
->getOperand(1) == RHS
&& !canGeneratePoison(&*IP
))
205 if (IP
== BlockBegin
) break;
209 // Save the original insertion point so we can restore it when we're done.
210 DebugLoc Loc
= Builder
.GetInsertPoint()->getDebugLoc();
211 SCEVInsertPointGuard
Guard(Builder
, this);
213 // Move the insertion point out of as many loops as we can.
214 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
215 if (!L
->isLoopInvariant(LHS
) || !L
->isLoopInvariant(RHS
)) break;
216 BasicBlock
*Preheader
= L
->getLoopPreheader();
217 if (!Preheader
) break;
219 // Ok, move up a level.
220 Builder
.SetInsertPoint(Preheader
->getTerminator());
223 // If we haven't found this binop, insert it.
224 Instruction
*BO
= cast
<Instruction
>(Builder
.CreateBinOp(Opcode
, LHS
, RHS
));
225 BO
->setDebugLoc(Loc
);
226 rememberInstruction(BO
);
231 /// FactorOutConstant - Test if S is divisible by Factor, using signed
232 /// division. If so, update S with Factor divided out and return true.
233 /// S need not be evenly divisible if a reasonable remainder can be
235 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
236 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
237 /// check to see if the divide was folded.
238 static bool FactorOutConstant(const SCEV
*&S
, const SCEV
*&Remainder
,
239 const SCEV
*Factor
, ScalarEvolution
&SE
,
240 const DataLayout
&DL
) {
241 // Everything is divisible by one.
247 S
= SE
.getConstant(S
->getType(), 1);
251 // For a Constant, check for a multiple of the given factor.
252 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
)) {
256 // Check for divisibility.
257 if (const SCEVConstant
*FC
= dyn_cast
<SCEVConstant
>(Factor
)) {
259 ConstantInt::get(SE
.getContext(), C
->getAPInt().sdiv(FC
->getAPInt()));
260 // If the quotient is zero and the remainder is non-zero, reject
261 // the value at this scale. It will be considered for subsequent
264 const SCEV
*Div
= SE
.getConstant(CI
);
266 Remainder
= SE
.getAddExpr(
267 Remainder
, SE
.getConstant(C
->getAPInt().srem(FC
->getAPInt())));
273 // In a Mul, check if there is a constant operand which is a multiple
274 // of the given factor.
275 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
276 // Size is known, check if there is a constant operand which is a multiple
277 // of the given factor. If so, we can factor it.
278 const SCEVConstant
*FC
= cast
<SCEVConstant
>(Factor
);
279 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(M
->getOperand(0)))
280 if (!C
->getAPInt().srem(FC
->getAPInt())) {
281 SmallVector
<const SCEV
*, 4> NewMulOps(M
->op_begin(), M
->op_end());
282 NewMulOps
[0] = SE
.getConstant(C
->getAPInt().sdiv(FC
->getAPInt()));
283 S
= SE
.getMulExpr(NewMulOps
);
288 // In an AddRec, check if both start and step are divisible.
289 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
290 const SCEV
*Step
= A
->getStepRecurrence(SE
);
291 const SCEV
*StepRem
= SE
.getConstant(Step
->getType(), 0);
292 if (!FactorOutConstant(Step
, StepRem
, Factor
, SE
, DL
))
294 if (!StepRem
->isZero())
296 const SCEV
*Start
= A
->getStart();
297 if (!FactorOutConstant(Start
, Remainder
, Factor
, SE
, DL
))
299 S
= SE
.getAddRecExpr(Start
, Step
, A
->getLoop(),
300 A
->getNoWrapFlags(SCEV::FlagNW
));
307 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
308 /// is the number of SCEVAddRecExprs present, which are kept at the end of
311 static void SimplifyAddOperands(SmallVectorImpl
<const SCEV
*> &Ops
,
313 ScalarEvolution
&SE
) {
314 unsigned NumAddRecs
= 0;
315 for (unsigned i
= Ops
.size(); i
> 0 && isa
<SCEVAddRecExpr
>(Ops
[i
-1]); --i
)
317 // Group Ops into non-addrecs and addrecs.
318 SmallVector
<const SCEV
*, 8> NoAddRecs(Ops
.begin(), Ops
.end() - NumAddRecs
);
319 SmallVector
<const SCEV
*, 8> AddRecs(Ops
.end() - NumAddRecs
, Ops
.end());
320 // Let ScalarEvolution sort and simplify the non-addrecs list.
321 const SCEV
*Sum
= NoAddRecs
.empty() ?
322 SE
.getConstant(Ty
, 0) :
323 SE
.getAddExpr(NoAddRecs
);
324 // If it returned an add, use the operands. Otherwise it simplified
325 // the sum into a single value, so just use that.
327 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Sum
))
328 Ops
.append(Add
->op_begin(), Add
->op_end());
329 else if (!Sum
->isZero())
331 // Then append the addrecs.
332 Ops
.append(AddRecs
.begin(), AddRecs
.end());
335 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
336 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
337 /// This helps expose more opportunities for folding parts of the expressions
338 /// into GEP indices.
340 static void SplitAddRecs(SmallVectorImpl
<const SCEV
*> &Ops
,
342 ScalarEvolution
&SE
) {
344 SmallVector
<const SCEV
*, 8> AddRecs
;
345 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
346 while (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(Ops
[i
])) {
347 const SCEV
*Start
= A
->getStart();
348 if (Start
->isZero()) break;
349 const SCEV
*Zero
= SE
.getConstant(Ty
, 0);
350 AddRecs
.push_back(SE
.getAddRecExpr(Zero
,
351 A
->getStepRecurrence(SE
),
353 A
->getNoWrapFlags(SCEV::FlagNW
)));
354 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Start
)) {
356 Ops
.append(Add
->op_begin(), Add
->op_end());
357 e
+= Add
->getNumOperands();
362 if (!AddRecs
.empty()) {
363 // Add the addrecs onto the end of the list.
364 Ops
.append(AddRecs
.begin(), AddRecs
.end());
365 // Resort the operand list, moving any constants to the front.
366 SimplifyAddOperands(Ops
, Ty
, SE
);
370 /// expandAddToGEP - Expand an addition expression with a pointer type into
371 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
372 /// BasicAliasAnalysis and other passes analyze the result. See the rules
373 /// for getelementptr vs. inttoptr in
374 /// http://llvm.org/docs/LangRef.html#pointeraliasing
377 /// Design note: The correctness of using getelementptr here depends on
378 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
379 /// they may introduce pointer arithmetic which may not be safely converted
380 /// into getelementptr.
382 /// Design note: It might seem desirable for this function to be more
383 /// loop-aware. If some of the indices are loop-invariant while others
384 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
385 /// loop-invariant portions of the overall computation outside the loop.
386 /// However, there are a few reasons this is not done here. Hoisting simple
387 /// arithmetic is a low-level optimization that often isn't very
388 /// important until late in the optimization process. In fact, passes
389 /// like InstructionCombining will combine GEPs, even if it means
390 /// pushing loop-invariant computation down into loops, so even if the
391 /// GEPs were split here, the work would quickly be undone. The
392 /// LoopStrengthReduction pass, which is usually run quite late (and
393 /// after the last InstructionCombining pass), takes care of hoisting
394 /// loop-invariant portions of expressions, after considering what
395 /// can be folded using target addressing modes.
397 Value
*SCEVExpander::expandAddToGEP(const SCEV
*const *op_begin
,
398 const SCEV
*const *op_end
,
402 Type
*OriginalElTy
= PTy
->getElementType();
403 Type
*ElTy
= OriginalElTy
;
404 SmallVector
<Value
*, 4> GepIndices
;
405 SmallVector
<const SCEV
*, 8> Ops(op_begin
, op_end
);
406 bool AnyNonZeroIndices
= false;
408 // Split AddRecs up into parts as either of the parts may be usable
409 // without the other.
410 SplitAddRecs(Ops
, Ty
, SE
);
412 Type
*IntPtrTy
= DL
.getIntPtrType(PTy
);
414 // Descend down the pointer's type and attempt to convert the other
415 // operands into GEP indices, at each level. The first index in a GEP
416 // indexes into the array implied by the pointer operand; the rest of
417 // the indices index into the element or field type selected by the
420 // If the scale size is not 0, attempt to factor out a scale for
422 SmallVector
<const SCEV
*, 8> ScaledOps
;
423 if (ElTy
->isSized()) {
424 const SCEV
*ElSize
= SE
.getSizeOfExpr(IntPtrTy
, ElTy
);
425 if (!ElSize
->isZero()) {
426 SmallVector
<const SCEV
*, 8> NewOps
;
427 for (const SCEV
*Op
: Ops
) {
428 const SCEV
*Remainder
= SE
.getConstant(Ty
, 0);
429 if (FactorOutConstant(Op
, Remainder
, ElSize
, SE
, DL
)) {
430 // Op now has ElSize factored out.
431 ScaledOps
.push_back(Op
);
432 if (!Remainder
->isZero())
433 NewOps
.push_back(Remainder
);
434 AnyNonZeroIndices
= true;
436 // The operand was not divisible, so add it to the list of operands
437 // we'll scan next iteration.
438 NewOps
.push_back(Op
);
441 // If we made any changes, update Ops.
442 if (!ScaledOps
.empty()) {
444 SimplifyAddOperands(Ops
, Ty
, SE
);
449 // Record the scaled array index for this level of the type. If
450 // we didn't find any operands that could be factored, tentatively
451 // assume that element zero was selected (since the zero offset
452 // would obviously be folded away).
453 Value
*Scaled
= ScaledOps
.empty() ?
454 Constant::getNullValue(Ty
) :
455 expandCodeFor(SE
.getAddExpr(ScaledOps
), Ty
);
456 GepIndices
.push_back(Scaled
);
458 // Collect struct field index operands.
459 while (StructType
*STy
= dyn_cast
<StructType
>(ElTy
)) {
460 bool FoundFieldNo
= false;
461 // An empty struct has no fields.
462 if (STy
->getNumElements() == 0) break;
463 // Field offsets are known. See if a constant offset falls within any of
464 // the struct fields.
467 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[0]))
468 if (SE
.getTypeSizeInBits(C
->getType()) <= 64) {
469 const StructLayout
&SL
= *DL
.getStructLayout(STy
);
470 uint64_t FullOffset
= C
->getValue()->getZExtValue();
471 if (FullOffset
< SL
.getSizeInBytes()) {
472 unsigned ElIdx
= SL
.getElementContainingOffset(FullOffset
);
473 GepIndices
.push_back(
474 ConstantInt::get(Type::getInt32Ty(Ty
->getContext()), ElIdx
));
475 ElTy
= STy
->getTypeAtIndex(ElIdx
);
477 SE
.getConstant(Ty
, FullOffset
- SL
.getElementOffset(ElIdx
));
478 AnyNonZeroIndices
= true;
482 // If no struct field offsets were found, tentatively assume that
483 // field zero was selected (since the zero offset would obviously
486 ElTy
= STy
->getTypeAtIndex(0u);
487 GepIndices
.push_back(
488 Constant::getNullValue(Type::getInt32Ty(Ty
->getContext())));
492 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(ElTy
))
493 ElTy
= ATy
->getElementType();
498 // If none of the operands were convertible to proper GEP indices, cast
499 // the base to i8* and do an ugly getelementptr with that. It's still
500 // better than ptrtoint+arithmetic+inttoptr at least.
501 if (!AnyNonZeroIndices
) {
502 // Cast the base to i8*.
503 V
= InsertNoopCastOfTo(V
,
504 Type::getInt8PtrTy(Ty
->getContext(), PTy
->getAddressSpace()));
506 assert(!isa
<Instruction
>(V
) ||
507 SE
.DT
.dominates(cast
<Instruction
>(V
), &*Builder
.GetInsertPoint()));
509 // Expand the operands for a plain byte offset.
510 Value
*Idx
= expandCodeFor(SE
.getAddExpr(Ops
), Ty
);
512 // Fold a GEP with constant operands.
513 if (Constant
*CLHS
= dyn_cast
<Constant
>(V
))
514 if (Constant
*CRHS
= dyn_cast
<Constant
>(Idx
))
515 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty
->getContext()),
518 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
519 unsigned ScanLimit
= 6;
520 BasicBlock::iterator BlockBegin
= Builder
.GetInsertBlock()->begin();
521 // Scanning starts from the last instruction before the insertion point.
522 BasicBlock::iterator IP
= Builder
.GetInsertPoint();
523 if (IP
!= BlockBegin
) {
525 for (; ScanLimit
; --IP
, --ScanLimit
) {
526 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
528 if (isa
<DbgInfoIntrinsic
>(IP
))
530 if (IP
->getOpcode() == Instruction::GetElementPtr
&&
531 IP
->getOperand(0) == V
&& IP
->getOperand(1) == Idx
)
533 if (IP
== BlockBegin
) break;
537 // Save the original insertion point so we can restore it when we're done.
538 SCEVInsertPointGuard
Guard(Builder
, this);
540 // Move the insertion point out of as many loops as we can.
541 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
542 if (!L
->isLoopInvariant(V
) || !L
->isLoopInvariant(Idx
)) break;
543 BasicBlock
*Preheader
= L
->getLoopPreheader();
544 if (!Preheader
) break;
546 // Ok, move up a level.
547 Builder
.SetInsertPoint(Preheader
->getTerminator());
551 Value
*GEP
= Builder
.CreateGEP(Builder
.getInt8Ty(), V
, Idx
, "uglygep");
552 rememberInstruction(GEP
);
558 SCEVInsertPointGuard
Guard(Builder
, this);
560 // Move the insertion point out of as many loops as we can.
561 while (const Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock())) {
562 if (!L
->isLoopInvariant(V
)) break;
564 bool AnyIndexNotLoopInvariant
= any_of(
565 GepIndices
, [L
](Value
*Op
) { return !L
->isLoopInvariant(Op
); });
567 if (AnyIndexNotLoopInvariant
)
570 BasicBlock
*Preheader
= L
->getLoopPreheader();
571 if (!Preheader
) break;
573 // Ok, move up a level.
574 Builder
.SetInsertPoint(Preheader
->getTerminator());
577 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
578 // because ScalarEvolution may have changed the address arithmetic to
579 // compute a value which is beyond the end of the allocated object.
581 if (V
->getType() != PTy
)
582 Casted
= InsertNoopCastOfTo(Casted
, PTy
);
583 Value
*GEP
= Builder
.CreateGEP(OriginalElTy
, Casted
, GepIndices
, "scevgep");
584 Ops
.push_back(SE
.getUnknown(GEP
));
585 rememberInstruction(GEP
);
588 return expand(SE
.getAddExpr(Ops
));
591 Value
*SCEVExpander::expandAddToGEP(const SCEV
*Op
, PointerType
*PTy
, Type
*Ty
,
593 const SCEV
*const Ops
[1] = {Op
};
594 return expandAddToGEP(Ops
, Ops
+ 1, PTy
, Ty
, V
);
597 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
598 /// SCEV expansion. If they are nested, this is the most nested. If they are
599 /// neighboring, pick the later.
600 static const Loop
*PickMostRelevantLoop(const Loop
*A
, const Loop
*B
,
604 if (A
->contains(B
)) return B
;
605 if (B
->contains(A
)) return A
;
606 if (DT
.dominates(A
->getHeader(), B
->getHeader())) return B
;
607 if (DT
.dominates(B
->getHeader(), A
->getHeader())) return A
;
608 return A
; // Arbitrarily break the tie.
611 /// getRelevantLoop - Get the most relevant loop associated with the given
612 /// expression, according to PickMostRelevantLoop.
613 const Loop
*SCEVExpander::getRelevantLoop(const SCEV
*S
) {
614 // Test whether we've already computed the most relevant loop for this SCEV.
615 auto Pair
= RelevantLoops
.insert(std::make_pair(S
, nullptr));
617 return Pair
.first
->second
;
619 if (isa
<SCEVConstant
>(S
))
620 // A constant has no relevant loops.
622 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
623 if (const Instruction
*I
= dyn_cast
<Instruction
>(U
->getValue()))
624 return Pair
.first
->second
= SE
.LI
.getLoopFor(I
->getParent());
625 // A non-instruction has no relevant loops.
628 if (const SCEVNAryExpr
*N
= dyn_cast
<SCEVNAryExpr
>(S
)) {
629 const Loop
*L
= nullptr;
630 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
632 for (const SCEV
*Op
: N
->operands())
633 L
= PickMostRelevantLoop(L
, getRelevantLoop(Op
), SE
.DT
);
634 return RelevantLoops
[N
] = L
;
636 if (const SCEVCastExpr
*C
= dyn_cast
<SCEVCastExpr
>(S
)) {
637 const Loop
*Result
= getRelevantLoop(C
->getOperand());
638 return RelevantLoops
[C
] = Result
;
640 if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
641 const Loop
*Result
= PickMostRelevantLoop(
642 getRelevantLoop(D
->getLHS()), getRelevantLoop(D
->getRHS()), SE
.DT
);
643 return RelevantLoops
[D
] = Result
;
645 llvm_unreachable("Unexpected SCEV type!");
650 /// LoopCompare - Compare loops by PickMostRelevantLoop.
654 explicit LoopCompare(DominatorTree
&dt
) : DT(dt
) {}
656 bool operator()(std::pair
<const Loop
*, const SCEV
*> LHS
,
657 std::pair
<const Loop
*, const SCEV
*> RHS
) const {
658 // Keep pointer operands sorted at the end.
659 if (LHS
.second
->getType()->isPointerTy() !=
660 RHS
.second
->getType()->isPointerTy())
661 return LHS
.second
->getType()->isPointerTy();
663 // Compare loops with PickMostRelevantLoop.
664 if (LHS
.first
!= RHS
.first
)
665 return PickMostRelevantLoop(LHS
.first
, RHS
.first
, DT
) != LHS
.first
;
667 // If one operand is a non-constant negative and the other is not,
668 // put the non-constant negative on the right so that a sub can
669 // be used instead of a negate and add.
670 if (LHS
.second
->isNonConstantNegative()) {
671 if (!RHS
.second
->isNonConstantNegative())
673 } else if (RHS
.second
->isNonConstantNegative())
676 // Otherwise they are equivalent according to this comparison.
683 Value
*SCEVExpander::visitAddExpr(const SCEVAddExpr
*S
) {
684 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
686 // Collect all the add operands in a loop, along with their associated loops.
687 // Iterate in reverse so that constants are emitted last, all else equal, and
688 // so that pointer operands are inserted first, which the code below relies on
689 // to form more involved GEPs.
690 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 8> OpsAndLoops
;
691 for (std::reverse_iterator
<SCEVAddExpr::op_iterator
> I(S
->op_end()),
692 E(S
->op_begin()); I
!= E
; ++I
)
693 OpsAndLoops
.push_back(std::make_pair(getRelevantLoop(*I
), *I
));
695 // Sort by loop. Use a stable sort so that constants follow non-constants and
696 // pointer operands precede non-pointer operands.
697 std::stable_sort(OpsAndLoops
.begin(), OpsAndLoops
.end(), LoopCompare(SE
.DT
));
699 // Emit instructions to add all the operands. Hoist as much as possible
700 // out of loops, and form meaningful getelementptrs where possible.
701 Value
*Sum
= nullptr;
702 for (auto I
= OpsAndLoops
.begin(), E
= OpsAndLoops
.end(); I
!= E
;) {
703 const Loop
*CurLoop
= I
->first
;
704 const SCEV
*Op
= I
->second
;
706 // This is the first operand. Just expand it.
709 } else if (PointerType
*PTy
= dyn_cast
<PointerType
>(Sum
->getType())) {
710 // The running sum expression is a pointer. Try to form a getelementptr
711 // at this level with that as the base.
712 SmallVector
<const SCEV
*, 4> NewOps
;
713 for (; I
!= E
&& I
->first
== CurLoop
; ++I
) {
714 // If the operand is SCEVUnknown and not instructions, peek through
715 // it, to enable more of it to be folded into the GEP.
716 const SCEV
*X
= I
->second
;
717 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(X
))
718 if (!isa
<Instruction
>(U
->getValue()))
719 X
= SE
.getSCEV(U
->getValue());
722 Sum
= expandAddToGEP(NewOps
.begin(), NewOps
.end(), PTy
, Ty
, Sum
);
723 } else if (PointerType
*PTy
= dyn_cast
<PointerType
>(Op
->getType())) {
724 // The running sum is an integer, and there's a pointer at this level.
725 // Try to form a getelementptr. If the running sum is instructions,
726 // use a SCEVUnknown to avoid re-analyzing them.
727 SmallVector
<const SCEV
*, 4> NewOps
;
728 NewOps
.push_back(isa
<Instruction
>(Sum
) ? SE
.getUnknown(Sum
) :
730 for (++I
; I
!= E
&& I
->first
== CurLoop
; ++I
)
731 NewOps
.push_back(I
->second
);
732 Sum
= expandAddToGEP(NewOps
.begin(), NewOps
.end(), PTy
, Ty
, expand(Op
));
733 } else if (Op
->isNonConstantNegative()) {
734 // Instead of doing a negate and add, just do a subtract.
735 Value
*W
= expandCodeFor(SE
.getNegativeSCEV(Op
), Ty
);
736 Sum
= InsertNoopCastOfTo(Sum
, Ty
);
737 Sum
= InsertBinop(Instruction::Sub
, Sum
, W
);
741 Value
*W
= expandCodeFor(Op
, Ty
);
742 Sum
= InsertNoopCastOfTo(Sum
, Ty
);
743 // Canonicalize a constant to the RHS.
744 if (isa
<Constant
>(Sum
)) std::swap(Sum
, W
);
745 Sum
= InsertBinop(Instruction::Add
, Sum
, W
);
753 Value
*SCEVExpander::visitMulExpr(const SCEVMulExpr
*S
) {
754 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
756 // Collect all the mul operands in a loop, along with their associated loops.
757 // Iterate in reverse so that constants are emitted last, all else equal.
758 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 8> OpsAndLoops
;
759 for (std::reverse_iterator
<SCEVMulExpr::op_iterator
> I(S
->op_end()),
760 E(S
->op_begin()); I
!= E
; ++I
)
761 OpsAndLoops
.push_back(std::make_pair(getRelevantLoop(*I
), *I
));
763 // Sort by loop. Use a stable sort so that constants follow non-constants.
764 std::stable_sort(OpsAndLoops
.begin(), OpsAndLoops
.end(), LoopCompare(SE
.DT
));
766 // Emit instructions to mul all the operands. Hoist as much as possible
768 Value
*Prod
= nullptr;
769 auto I
= OpsAndLoops
.begin();
771 // Expand the calculation of X pow N in the following manner:
772 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
773 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
774 const auto ExpandOpBinPowN
= [this, &I
, &OpsAndLoops
, &Ty
]() {
776 // Calculate how many times the same operand from the same loop is included
778 uint64_t Exponent
= 0;
779 const uint64_t MaxExponent
= UINT64_MAX
>> 1;
780 // No one sane will ever try to calculate such huge exponents, but if we
781 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
782 // below when the power of 2 exceeds our Exponent, and we want it to be
783 // 1u << 31 at most to not deal with unsigned overflow.
784 while (E
!= OpsAndLoops
.end() && *I
== *E
&& Exponent
!= MaxExponent
) {
788 assert(Exponent
> 0 && "Trying to calculate a zeroth exponent of operand?");
790 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
791 // that are needed into the result.
792 Value
*P
= expandCodeFor(I
->second
, Ty
);
793 Value
*Result
= nullptr;
796 for (uint64_t BinExp
= 2; BinExp
<= Exponent
; BinExp
<<= 1) {
797 P
= InsertBinop(Instruction::Mul
, P
, P
);
798 if (Exponent
& BinExp
)
799 Result
= Result
? InsertBinop(Instruction::Mul
, Result
, P
) : P
;
803 assert(Result
&& "Nothing was expanded?");
807 while (I
!= OpsAndLoops
.end()) {
809 // This is the first operand. Just expand it.
810 Prod
= ExpandOpBinPowN();
811 } else if (I
->second
->isAllOnesValue()) {
812 // Instead of doing a multiply by negative one, just do a negate.
813 Prod
= InsertNoopCastOfTo(Prod
, Ty
);
814 Prod
= InsertBinop(Instruction::Sub
, Constant::getNullValue(Ty
), Prod
);
818 Value
*W
= ExpandOpBinPowN();
819 Prod
= InsertNoopCastOfTo(Prod
, Ty
);
820 // Canonicalize a constant to the RHS.
821 if (isa
<Constant
>(Prod
)) std::swap(Prod
, W
);
823 if (match(W
, m_Power2(RHS
))) {
824 // Canonicalize Prod*(1<<C) to Prod<<C.
825 assert(!Ty
->isVectorTy() && "vector types are not SCEVable");
826 Prod
= InsertBinop(Instruction::Shl
, Prod
,
827 ConstantInt::get(Ty
, RHS
->logBase2()));
829 Prod
= InsertBinop(Instruction::Mul
, Prod
, W
);
837 Value
*SCEVExpander::visitUDivExpr(const SCEVUDivExpr
*S
) {
838 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
840 Value
*LHS
= expandCodeFor(S
->getLHS(), Ty
);
841 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(S
->getRHS())) {
842 const APInt
&RHS
= SC
->getAPInt();
843 if (RHS
.isPowerOf2())
844 return InsertBinop(Instruction::LShr
, LHS
,
845 ConstantInt::get(Ty
, RHS
.logBase2()));
848 Value
*RHS
= expandCodeFor(S
->getRHS(), Ty
);
849 return InsertBinop(Instruction::UDiv
, LHS
, RHS
);
852 /// Move parts of Base into Rest to leave Base with the minimal
853 /// expression that provides a pointer operand suitable for a
855 static void ExposePointerBase(const SCEV
*&Base
, const SCEV
*&Rest
,
856 ScalarEvolution
&SE
) {
857 while (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(Base
)) {
858 Base
= A
->getStart();
859 Rest
= SE
.getAddExpr(Rest
,
860 SE
.getAddRecExpr(SE
.getConstant(A
->getType(), 0),
861 A
->getStepRecurrence(SE
),
863 A
->getNoWrapFlags(SCEV::FlagNW
)));
865 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(Base
)) {
866 Base
= A
->getOperand(A
->getNumOperands()-1);
867 SmallVector
<const SCEV
*, 8> NewAddOps(A
->op_begin(), A
->op_end());
868 NewAddOps
.back() = Rest
;
869 Rest
= SE
.getAddExpr(NewAddOps
);
870 ExposePointerBase(Base
, Rest
, SE
);
874 /// Determine if this is a well-behaved chain of instructions leading back to
875 /// the PHI. If so, it may be reused by expanded expressions.
876 bool SCEVExpander::isNormalAddRecExprPHI(PHINode
*PN
, Instruction
*IncV
,
878 if (IncV
->getNumOperands() == 0 || isa
<PHINode
>(IncV
) ||
879 (isa
<CastInst
>(IncV
) && !isa
<BitCastInst
>(IncV
)))
881 // If any of the operands don't dominate the insert position, bail.
882 // Addrec operands are always loop-invariant, so this can only happen
883 // if there are instructions which haven't been hoisted.
884 if (L
== IVIncInsertLoop
) {
885 for (User::op_iterator OI
= IncV
->op_begin()+1,
886 OE
= IncV
->op_end(); OI
!= OE
; ++OI
)
887 if (Instruction
*OInst
= dyn_cast
<Instruction
>(OI
))
888 if (!SE
.DT
.dominates(OInst
, IVIncInsertPos
))
891 // Advance to the next instruction.
892 IncV
= dyn_cast
<Instruction
>(IncV
->getOperand(0));
896 if (IncV
->mayHaveSideEffects())
902 return isNormalAddRecExprPHI(PN
, IncV
, L
);
905 /// getIVIncOperand returns an induction variable increment's induction
906 /// variable operand.
908 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
909 /// operands dominate InsertPos.
911 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
912 /// simple patterns generated by getAddRecExprPHILiterally and
913 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
914 Instruction
*SCEVExpander::getIVIncOperand(Instruction
*IncV
,
915 Instruction
*InsertPos
,
917 if (IncV
== InsertPos
)
920 switch (IncV
->getOpcode()) {
923 // Check for a simple Add/Sub or GEP of a loop invariant step.
924 case Instruction::Add
:
925 case Instruction::Sub
: {
926 Instruction
*OInst
= dyn_cast
<Instruction
>(IncV
->getOperand(1));
927 if (!OInst
|| SE
.DT
.dominates(OInst
, InsertPos
))
928 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
931 case Instruction::BitCast
:
932 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
933 case Instruction::GetElementPtr
:
934 for (auto I
= IncV
->op_begin() + 1, E
= IncV
->op_end(); I
!= E
; ++I
) {
935 if (isa
<Constant
>(*I
))
937 if (Instruction
*OInst
= dyn_cast
<Instruction
>(*I
)) {
938 if (!SE
.DT
.dominates(OInst
, InsertPos
))
942 // allow any kind of GEP as long as it can be hoisted.
945 // This must be a pointer addition of constants (pretty), which is already
946 // handled, or some number of address-size elements (ugly). Ugly geps
947 // have 2 operands. i1* is used by the expander to represent an
948 // address-size element.
949 if (IncV
->getNumOperands() != 2)
951 unsigned AS
= cast
<PointerType
>(IncV
->getType())->getAddressSpace();
952 if (IncV
->getType() != Type::getInt1PtrTy(SE
.getContext(), AS
)
953 && IncV
->getType() != Type::getInt8PtrTy(SE
.getContext(), AS
))
957 return dyn_cast
<Instruction
>(IncV
->getOperand(0));
961 /// If the insert point of the current builder or any of the builders on the
962 /// stack of saved builders has 'I' as its insert point, update it to point to
963 /// the instruction after 'I'. This is intended to be used when the instruction
964 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
965 /// different block, the inconsistent insert point (with a mismatched
966 /// Instruction and Block) can lead to an instruction being inserted in a block
967 /// other than its parent.
968 void SCEVExpander::fixupInsertPoints(Instruction
*I
) {
969 BasicBlock::iterator
It(*I
);
970 BasicBlock::iterator NewInsertPt
= std::next(It
);
971 if (Builder
.GetInsertPoint() == It
)
972 Builder
.SetInsertPoint(&*NewInsertPt
);
973 for (auto *InsertPtGuard
: InsertPointGuards
)
974 if (InsertPtGuard
->GetInsertPoint() == It
)
975 InsertPtGuard
->SetInsertPoint(NewInsertPt
);
978 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
979 /// it available to other uses in this loop. Recursively hoist any operands,
980 /// until we reach a value that dominates InsertPos.
981 bool SCEVExpander::hoistIVInc(Instruction
*IncV
, Instruction
*InsertPos
) {
982 if (SE
.DT
.dominates(IncV
, InsertPos
))
985 // InsertPos must itself dominate IncV so that IncV's new position satisfies
986 // its existing users.
987 if (isa
<PHINode
>(InsertPos
) ||
988 !SE
.DT
.dominates(InsertPos
->getParent(), IncV
->getParent()))
991 if (!SE
.LI
.movementPreservesLCSSAForm(IncV
, InsertPos
))
994 // Check that the chain of IV operands leading back to Phi can be hoisted.
995 SmallVector
<Instruction
*, 4> IVIncs
;
997 Instruction
*Oper
= getIVIncOperand(IncV
, InsertPos
, /*allowScale*/true);
1000 // IncV is safe to hoist.
1001 IVIncs
.push_back(IncV
);
1003 if (SE
.DT
.dominates(IncV
, InsertPos
))
1006 for (auto I
= IVIncs
.rbegin(), E
= IVIncs
.rend(); I
!= E
; ++I
) {
1007 fixupInsertPoints(*I
);
1008 (*I
)->moveBefore(InsertPos
);
1013 /// Determine if this cyclic phi is in a form that would have been generated by
1014 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1015 /// as it is in a low-cost form, for example, no implied multiplication. This
1016 /// should match any patterns generated by getAddRecExprPHILiterally and
1018 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode
*PN
, Instruction
*IncV
,
1020 for(Instruction
*IVOper
= IncV
;
1021 (IVOper
= getIVIncOperand(IVOper
, L
->getLoopPreheader()->getTerminator(),
1022 /*allowScale=*/false));) {
1029 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1030 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1031 /// need to materialize IV increments elsewhere to handle difficult situations.
1032 Value
*SCEVExpander::expandIVInc(PHINode
*PN
, Value
*StepV
, const Loop
*L
,
1033 Type
*ExpandTy
, Type
*IntTy
,
1036 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1037 if (ExpandTy
->isPointerTy()) {
1038 PointerType
*GEPPtrTy
= cast
<PointerType
>(ExpandTy
);
1039 // If the step isn't constant, don't use an implicitly scaled GEP, because
1040 // that would require a multiply inside the loop.
1041 if (!isa
<ConstantInt
>(StepV
))
1042 GEPPtrTy
= PointerType::get(Type::getInt1Ty(SE
.getContext()),
1043 GEPPtrTy
->getAddressSpace());
1044 IncV
= expandAddToGEP(SE
.getSCEV(StepV
), GEPPtrTy
, IntTy
, PN
);
1045 if (IncV
->getType() != PN
->getType()) {
1046 IncV
= Builder
.CreateBitCast(IncV
, PN
->getType());
1047 rememberInstruction(IncV
);
1050 IncV
= useSubtract
?
1051 Builder
.CreateSub(PN
, StepV
, Twine(IVName
) + ".iv.next") :
1052 Builder
.CreateAdd(PN
, StepV
, Twine(IVName
) + ".iv.next");
1053 rememberInstruction(IncV
);
1058 /// Hoist the addrec instruction chain rooted in the loop phi above the
1059 /// position. This routine assumes that this is possible (has been checked).
1060 void SCEVExpander::hoistBeforePos(DominatorTree
*DT
, Instruction
*InstToHoist
,
1061 Instruction
*Pos
, PHINode
*LoopPhi
) {
1063 if (DT
->dominates(InstToHoist
, Pos
))
1065 // Make sure the increment is where we want it. But don't move it
1066 // down past a potential existing post-inc user.
1067 fixupInsertPoints(InstToHoist
);
1068 InstToHoist
->moveBefore(Pos
);
1070 InstToHoist
= cast
<Instruction
>(InstToHoist
->getOperand(0));
1071 } while (InstToHoist
!= LoopPhi
);
1074 /// Check whether we can cheaply express the requested SCEV in terms of
1075 /// the available PHI SCEV by truncation and/or inversion of the step.
1076 static bool canBeCheaplyTransformed(ScalarEvolution
&SE
,
1077 const SCEVAddRecExpr
*Phi
,
1078 const SCEVAddRecExpr
*Requested
,
1080 Type
*PhiTy
= SE
.getEffectiveSCEVType(Phi
->getType());
1081 Type
*RequestedTy
= SE
.getEffectiveSCEVType(Requested
->getType());
1083 if (RequestedTy
->getIntegerBitWidth() > PhiTy
->getIntegerBitWidth())
1086 // Try truncate it if necessary.
1087 Phi
= dyn_cast
<SCEVAddRecExpr
>(SE
.getTruncateOrNoop(Phi
, RequestedTy
));
1091 // Check whether truncation will help.
1092 if (Phi
== Requested
) {
1097 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1098 if (SE
.getAddExpr(Requested
->getStart(),
1099 SE
.getNegativeSCEV(Requested
)) == Phi
) {
1107 static bool IsIncrementNSW(ScalarEvolution
&SE
, const SCEVAddRecExpr
*AR
) {
1108 if (!isa
<IntegerType
>(AR
->getType()))
1111 unsigned BitWidth
= cast
<IntegerType
>(AR
->getType())->getBitWidth();
1112 Type
*WideTy
= IntegerType::get(AR
->getType()->getContext(), BitWidth
* 2);
1113 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
1114 const SCEV
*OpAfterExtend
= SE
.getAddExpr(SE
.getSignExtendExpr(Step
, WideTy
),
1115 SE
.getSignExtendExpr(AR
, WideTy
));
1116 const SCEV
*ExtendAfterOp
=
1117 SE
.getSignExtendExpr(SE
.getAddExpr(AR
, Step
), WideTy
);
1118 return ExtendAfterOp
== OpAfterExtend
;
1121 static bool IsIncrementNUW(ScalarEvolution
&SE
, const SCEVAddRecExpr
*AR
) {
1122 if (!isa
<IntegerType
>(AR
->getType()))
1125 unsigned BitWidth
= cast
<IntegerType
>(AR
->getType())->getBitWidth();
1126 Type
*WideTy
= IntegerType::get(AR
->getType()->getContext(), BitWidth
* 2);
1127 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
1128 const SCEV
*OpAfterExtend
= SE
.getAddExpr(SE
.getZeroExtendExpr(Step
, WideTy
),
1129 SE
.getZeroExtendExpr(AR
, WideTy
));
1130 const SCEV
*ExtendAfterOp
=
1131 SE
.getZeroExtendExpr(SE
.getAddExpr(AR
, Step
), WideTy
);
1132 return ExtendAfterOp
== OpAfterExtend
;
1135 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1136 /// the base addrec, which is the addrec without any non-loop-dominating
1137 /// values, and return the PHI.
1139 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr
*Normalized
,
1145 assert((!IVIncInsertLoop
||IVIncInsertPos
) && "Uninitialized insert position");
1147 // Reuse a previously-inserted PHI, if present.
1148 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1150 PHINode
*AddRecPhiMatch
= nullptr;
1151 Instruction
*IncV
= nullptr;
1155 // Only try partially matching scevs that need truncation and/or
1156 // step-inversion if we know this loop is outside the current loop.
1157 bool TryNonMatchingSCEV
=
1159 SE
.DT
.properlyDominates(LatchBlock
, IVIncInsertLoop
->getHeader());
1161 for (PHINode
&PN
: L
->getHeader()->phis()) {
1162 if (!SE
.isSCEVable(PN
.getType()))
1165 const SCEVAddRecExpr
*PhiSCEV
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(&PN
));
1169 bool IsMatchingSCEV
= PhiSCEV
== Normalized
;
1170 // We only handle truncation and inversion of phi recurrences for the
1171 // expanded expression if the expanded expression's loop dominates the
1172 // loop we insert to. Check now, so we can bail out early.
1173 if (!IsMatchingSCEV
&& !TryNonMatchingSCEV
)
1176 // TODO: this possibly can be reworked to avoid this cast at all.
1177 Instruction
*TempIncV
=
1178 dyn_cast
<Instruction
>(PN
.getIncomingValueForBlock(LatchBlock
));
1182 // Check whether we can reuse this PHI node.
1184 if (!isExpandedAddRecExprPHI(&PN
, TempIncV
, L
))
1186 if (L
== IVIncInsertLoop
&& !hoistIVInc(TempIncV
, IVIncInsertPos
))
1189 if (!isNormalAddRecExprPHI(&PN
, TempIncV
, L
))
1193 // Stop if we have found an exact match SCEV.
1194 if (IsMatchingSCEV
) {
1198 AddRecPhiMatch
= &PN
;
1202 // Try whether the phi can be translated into the requested form
1203 // (truncated and/or offset by a constant).
1204 if ((!TruncTy
|| InvertStep
) &&
1205 canBeCheaplyTransformed(SE
, PhiSCEV
, Normalized
, InvertStep
)) {
1206 // Record the phi node. But don't stop we might find an exact match
1208 AddRecPhiMatch
= &PN
;
1210 TruncTy
= SE
.getEffectiveSCEVType(Normalized
->getType());
1214 if (AddRecPhiMatch
) {
1215 // Potentially, move the increment. We have made sure in
1216 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1217 if (L
== IVIncInsertLoop
)
1218 hoistBeforePos(&SE
.DT
, IncV
, IVIncInsertPos
, AddRecPhiMatch
);
1220 // Ok, the add recurrence looks usable.
1221 // Remember this PHI, even in post-inc mode.
1222 InsertedValues
.insert(AddRecPhiMatch
);
1223 // Remember the increment.
1224 rememberInstruction(IncV
);
1225 return AddRecPhiMatch
;
1229 // Save the original insertion point so we can restore it when we're done.
1230 SCEVInsertPointGuard
Guard(Builder
, this);
1232 // Another AddRec may need to be recursively expanded below. For example, if
1233 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1234 // loop. Remove this loop from the PostIncLoops set before expanding such
1235 // AddRecs. Otherwise, we cannot find a valid position for the step
1236 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1237 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1238 // so it's not worth implementing SmallPtrSet::swap.
1239 PostIncLoopSet SavedPostIncLoops
= PostIncLoops
;
1240 PostIncLoops
.clear();
1242 // Expand code for the start value into the loop preheader.
1243 assert(L
->getLoopPreheader() &&
1244 "Can't expand add recurrences without a loop preheader!");
1245 Value
*StartV
= expandCodeFor(Normalized
->getStart(), ExpandTy
,
1246 L
->getLoopPreheader()->getTerminator());
1248 // StartV must have been be inserted into L's preheader to dominate the new
1250 assert(!isa
<Instruction
>(StartV
) ||
1251 SE
.DT
.properlyDominates(cast
<Instruction
>(StartV
)->getParent(),
1254 // Expand code for the step value. Do this before creating the PHI so that PHI
1255 // reuse code doesn't see an incomplete PHI.
1256 const SCEV
*Step
= Normalized
->getStepRecurrence(SE
);
1257 // If the stride is negative, insert a sub instead of an add for the increment
1258 // (unless it's a constant, because subtracts of constants are canonicalized
1260 bool useSubtract
= !ExpandTy
->isPointerTy() && Step
->isNonConstantNegative();
1262 Step
= SE
.getNegativeSCEV(Step
);
1263 // Expand the step somewhere that dominates the loop header.
1264 Value
*StepV
= expandCodeFor(Step
, IntTy
, &L
->getHeader()->front());
1266 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1267 // we actually do emit an addition. It does not apply if we emit a
1269 bool IncrementIsNUW
= !useSubtract
&& IsIncrementNUW(SE
, Normalized
);
1270 bool IncrementIsNSW
= !useSubtract
&& IsIncrementNSW(SE
, Normalized
);
1273 BasicBlock
*Header
= L
->getHeader();
1274 Builder
.SetInsertPoint(Header
, Header
->begin());
1275 pred_iterator HPB
= pred_begin(Header
), HPE
= pred_end(Header
);
1276 PHINode
*PN
= Builder
.CreatePHI(ExpandTy
, std::distance(HPB
, HPE
),
1277 Twine(IVName
) + ".iv");
1278 rememberInstruction(PN
);
1280 // Create the step instructions and populate the PHI.
1281 for (pred_iterator HPI
= HPB
; HPI
!= HPE
; ++HPI
) {
1282 BasicBlock
*Pred
= *HPI
;
1284 // Add a start value.
1285 if (!L
->contains(Pred
)) {
1286 PN
->addIncoming(StartV
, Pred
);
1290 // Create a step value and add it to the PHI.
1291 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1292 // instructions at IVIncInsertPos.
1293 Instruction
*InsertPos
= L
== IVIncInsertLoop
?
1294 IVIncInsertPos
: Pred
->getTerminator();
1295 Builder
.SetInsertPoint(InsertPos
);
1296 Value
*IncV
= expandIVInc(PN
, StepV
, L
, ExpandTy
, IntTy
, useSubtract
);
1298 if (isa
<OverflowingBinaryOperator
>(IncV
)) {
1300 cast
<BinaryOperator
>(IncV
)->setHasNoUnsignedWrap();
1302 cast
<BinaryOperator
>(IncV
)->setHasNoSignedWrap();
1304 PN
->addIncoming(IncV
, Pred
);
1307 // After expanding subexpressions, restore the PostIncLoops set so the caller
1308 // can ensure that IVIncrement dominates the current uses.
1309 PostIncLoops
= SavedPostIncLoops
;
1311 // Remember this PHI, even in post-inc mode.
1312 InsertedValues
.insert(PN
);
1317 Value
*SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr
*S
) {
1318 Type
*STy
= S
->getType();
1319 Type
*IntTy
= SE
.getEffectiveSCEVType(STy
);
1320 const Loop
*L
= S
->getLoop();
1322 // Determine a normalized form of this expression, which is the expression
1323 // before any post-inc adjustment is made.
1324 const SCEVAddRecExpr
*Normalized
= S
;
1325 if (PostIncLoops
.count(L
)) {
1326 PostIncLoopSet Loops
;
1328 Normalized
= cast
<SCEVAddRecExpr
>(normalizeForPostIncUse(S
, Loops
, SE
));
1331 // Strip off any non-loop-dominating component from the addrec start.
1332 const SCEV
*Start
= Normalized
->getStart();
1333 const SCEV
*PostLoopOffset
= nullptr;
1334 if (!SE
.properlyDominates(Start
, L
->getHeader())) {
1335 PostLoopOffset
= Start
;
1336 Start
= SE
.getConstant(Normalized
->getType(), 0);
1337 Normalized
= cast
<SCEVAddRecExpr
>(
1338 SE
.getAddRecExpr(Start
, Normalized
->getStepRecurrence(SE
),
1339 Normalized
->getLoop(),
1340 Normalized
->getNoWrapFlags(SCEV::FlagNW
)));
1343 // Strip off any non-loop-dominating component from the addrec step.
1344 const SCEV
*Step
= Normalized
->getStepRecurrence(SE
);
1345 const SCEV
*PostLoopScale
= nullptr;
1346 if (!SE
.dominates(Step
, L
->getHeader())) {
1347 PostLoopScale
= Step
;
1348 Step
= SE
.getConstant(Normalized
->getType(), 1);
1349 if (!Start
->isZero()) {
1350 // The normalization below assumes that Start is constant zero, so if
1351 // it isn't re-associate Start to PostLoopOffset.
1352 assert(!PostLoopOffset
&& "Start not-null but PostLoopOffset set?");
1353 PostLoopOffset
= Start
;
1354 Start
= SE
.getConstant(Normalized
->getType(), 0);
1357 cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(
1358 Start
, Step
, Normalized
->getLoop(),
1359 Normalized
->getNoWrapFlags(SCEV::FlagNW
)));
1362 // Expand the core addrec. If we need post-loop scaling, force it to
1363 // expand to an integer type to avoid the need for additional casting.
1364 Type
*ExpandTy
= PostLoopScale
? IntTy
: STy
;
1365 // We can't use a pointer type for the addrec if the pointer type is
1367 Type
*AddRecPHIExpandTy
=
1368 DL
.isNonIntegralPointerType(STy
) ? Normalized
->getType() : ExpandTy
;
1370 // In some cases, we decide to reuse an existing phi node but need to truncate
1371 // it and/or invert the step.
1372 Type
*TruncTy
= nullptr;
1373 bool InvertStep
= false;
1374 PHINode
*PN
= getAddRecExprPHILiterally(Normalized
, L
, AddRecPHIExpandTy
,
1375 IntTy
, TruncTy
, InvertStep
);
1377 // Accommodate post-inc mode, if necessary.
1379 if (!PostIncLoops
.count(L
))
1382 // In PostInc mode, use the post-incremented value.
1383 BasicBlock
*LatchBlock
= L
->getLoopLatch();
1384 assert(LatchBlock
&& "PostInc mode requires a unique loop latch!");
1385 Result
= PN
->getIncomingValueForBlock(LatchBlock
);
1387 // For an expansion to use the postinc form, the client must call
1388 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1389 // or dominated by IVIncInsertPos.
1390 if (isa
<Instruction
>(Result
) &&
1391 !SE
.DT
.dominates(cast
<Instruction
>(Result
),
1392 &*Builder
.GetInsertPoint())) {
1393 // The induction variable's postinc expansion does not dominate this use.
1394 // IVUsers tries to prevent this case, so it is rare. However, it can
1395 // happen when an IVUser outside the loop is not dominated by the latch
1396 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1397 // all cases. Consider a phi outside whose operand is replaced during
1398 // expansion with the value of the postinc user. Without fundamentally
1399 // changing the way postinc users are tracked, the only remedy is
1400 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1401 // but hopefully expandCodeFor handles that.
1403 !ExpandTy
->isPointerTy() && Step
->isNonConstantNegative();
1405 Step
= SE
.getNegativeSCEV(Step
);
1408 // Expand the step somewhere that dominates the loop header.
1409 SCEVInsertPointGuard
Guard(Builder
, this);
1410 StepV
= expandCodeFor(Step
, IntTy
, &L
->getHeader()->front());
1412 Result
= expandIVInc(PN
, StepV
, L
, ExpandTy
, IntTy
, useSubtract
);
1416 // We have decided to reuse an induction variable of a dominating loop. Apply
1417 // truncation and/or inversion of the step.
1419 Type
*ResTy
= Result
->getType();
1420 // Normalize the result type.
1421 if (ResTy
!= SE
.getEffectiveSCEVType(ResTy
))
1422 Result
= InsertNoopCastOfTo(Result
, SE
.getEffectiveSCEVType(ResTy
));
1423 // Truncate the result.
1424 if (TruncTy
!= Result
->getType()) {
1425 Result
= Builder
.CreateTrunc(Result
, TruncTy
);
1426 rememberInstruction(Result
);
1428 // Invert the result.
1430 Result
= Builder
.CreateSub(expandCodeFor(Normalized
->getStart(), TruncTy
),
1432 rememberInstruction(Result
);
1436 // Re-apply any non-loop-dominating scale.
1437 if (PostLoopScale
) {
1438 assert(S
->isAffine() && "Can't linearly scale non-affine recurrences.");
1439 Result
= InsertNoopCastOfTo(Result
, IntTy
);
1440 Result
= Builder
.CreateMul(Result
,
1441 expandCodeFor(PostLoopScale
, IntTy
));
1442 rememberInstruction(Result
);
1445 // Re-apply any non-loop-dominating offset.
1446 if (PostLoopOffset
) {
1447 if (PointerType
*PTy
= dyn_cast
<PointerType
>(ExpandTy
)) {
1448 if (Result
->getType()->isIntegerTy()) {
1449 Value
*Base
= expandCodeFor(PostLoopOffset
, ExpandTy
);
1450 Result
= expandAddToGEP(SE
.getUnknown(Result
), PTy
, IntTy
, Base
);
1452 Result
= expandAddToGEP(PostLoopOffset
, PTy
, IntTy
, Result
);
1455 Result
= InsertNoopCastOfTo(Result
, IntTy
);
1456 Result
= Builder
.CreateAdd(Result
,
1457 expandCodeFor(PostLoopOffset
, IntTy
));
1458 rememberInstruction(Result
);
1465 Value
*SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr
*S
) {
1466 if (!CanonicalMode
) return expandAddRecExprLiterally(S
);
1468 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1469 const Loop
*L
= S
->getLoop();
1471 // First check for an existing canonical IV in a suitable type.
1472 PHINode
*CanonicalIV
= nullptr;
1473 if (PHINode
*PN
= L
->getCanonicalInductionVariable())
1474 if (SE
.getTypeSizeInBits(PN
->getType()) >= SE
.getTypeSizeInBits(Ty
))
1477 // Rewrite an AddRec in terms of the canonical induction variable, if
1478 // its type is more narrow.
1480 SE
.getTypeSizeInBits(CanonicalIV
->getType()) >
1481 SE
.getTypeSizeInBits(Ty
)) {
1482 SmallVector
<const SCEV
*, 4> NewOps(S
->getNumOperands());
1483 for (unsigned i
= 0, e
= S
->getNumOperands(); i
!= e
; ++i
)
1484 NewOps
[i
] = SE
.getAnyExtendExpr(S
->op_begin()[i
], CanonicalIV
->getType());
1485 Value
*V
= expand(SE
.getAddRecExpr(NewOps
, S
->getLoop(),
1486 S
->getNoWrapFlags(SCEV::FlagNW
)));
1487 BasicBlock::iterator NewInsertPt
=
1488 findInsertPointAfter(cast
<Instruction
>(V
), Builder
.GetInsertBlock());
1489 V
= expandCodeFor(SE
.getTruncateExpr(SE
.getUnknown(V
), Ty
), nullptr,
1494 // {X,+,F} --> X + {0,+,F}
1495 if (!S
->getStart()->isZero()) {
1496 SmallVector
<const SCEV
*, 4> NewOps(S
->op_begin(), S
->op_end());
1497 NewOps
[0] = SE
.getConstant(Ty
, 0);
1498 const SCEV
*Rest
= SE
.getAddRecExpr(NewOps
, L
,
1499 S
->getNoWrapFlags(SCEV::FlagNW
));
1501 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1502 // comments on expandAddToGEP for details.
1503 const SCEV
*Base
= S
->getStart();
1504 // Dig into the expression to find the pointer base for a GEP.
1505 const SCEV
*ExposedRest
= Rest
;
1506 ExposePointerBase(Base
, ExposedRest
, SE
);
1507 // If we found a pointer, expand the AddRec with a GEP.
1508 if (PointerType
*PTy
= dyn_cast
<PointerType
>(Base
->getType())) {
1509 // Make sure the Base isn't something exotic, such as a multiplied
1510 // or divided pointer value. In those cases, the result type isn't
1511 // actually a pointer type.
1512 if (!isa
<SCEVMulExpr
>(Base
) && !isa
<SCEVUDivExpr
>(Base
)) {
1513 Value
*StartV
= expand(Base
);
1514 assert(StartV
->getType() == PTy
&& "Pointer type mismatch for GEP!");
1515 return expandAddToGEP(ExposedRest
, PTy
, Ty
, StartV
);
1519 // Just do a normal add. Pre-expand the operands to suppress folding.
1521 // The LHS and RHS values are factored out of the expand call to make the
1522 // output independent of the argument evaluation order.
1523 const SCEV
*AddExprLHS
= SE
.getUnknown(expand(S
->getStart()));
1524 const SCEV
*AddExprRHS
= SE
.getUnknown(expand(Rest
));
1525 return expand(SE
.getAddExpr(AddExprLHS
, AddExprRHS
));
1528 // If we don't yet have a canonical IV, create one.
1530 // Create and insert the PHI node for the induction variable in the
1532 BasicBlock
*Header
= L
->getHeader();
1533 pred_iterator HPB
= pred_begin(Header
), HPE
= pred_end(Header
);
1534 CanonicalIV
= PHINode::Create(Ty
, std::distance(HPB
, HPE
), "indvar",
1536 rememberInstruction(CanonicalIV
);
1538 SmallSet
<BasicBlock
*, 4> PredSeen
;
1539 Constant
*One
= ConstantInt::get(Ty
, 1);
1540 for (pred_iterator HPI
= HPB
; HPI
!= HPE
; ++HPI
) {
1541 BasicBlock
*HP
= *HPI
;
1542 if (!PredSeen
.insert(HP
).second
) {
1543 // There must be an incoming value for each predecessor, even the
1545 CanonicalIV
->addIncoming(CanonicalIV
->getIncomingValueForBlock(HP
), HP
);
1549 if (L
->contains(HP
)) {
1550 // Insert a unit add instruction right before the terminator
1551 // corresponding to the back-edge.
1552 Instruction
*Add
= BinaryOperator::CreateAdd(CanonicalIV
, One
,
1554 HP
->getTerminator());
1555 Add
->setDebugLoc(HP
->getTerminator()->getDebugLoc());
1556 rememberInstruction(Add
);
1557 CanonicalIV
->addIncoming(Add
, HP
);
1559 CanonicalIV
->addIncoming(Constant::getNullValue(Ty
), HP
);
1564 // {0,+,1} --> Insert a canonical induction variable into the loop!
1565 if (S
->isAffine() && S
->getOperand(1)->isOne()) {
1566 assert(Ty
== SE
.getEffectiveSCEVType(CanonicalIV
->getType()) &&
1567 "IVs with types different from the canonical IV should "
1568 "already have been handled!");
1572 // {0,+,F} --> {0,+,1} * F
1574 // If this is a simple linear addrec, emit it now as a special case.
1575 if (S
->isAffine()) // {0,+,F} --> i*F
1577 expand(SE
.getTruncateOrNoop(
1578 SE
.getMulExpr(SE
.getUnknown(CanonicalIV
),
1579 SE
.getNoopOrAnyExtend(S
->getOperand(1),
1580 CanonicalIV
->getType())),
1583 // If this is a chain of recurrences, turn it into a closed form, using the
1584 // folders, then expandCodeFor the closed form. This allows the folders to
1585 // simplify the expression without having to build a bunch of special code
1586 // into this folder.
1587 const SCEV
*IH
= SE
.getUnknown(CanonicalIV
); // Get I as a "symbolic" SCEV.
1589 // Promote S up to the canonical IV type, if the cast is foldable.
1590 const SCEV
*NewS
= S
;
1591 const SCEV
*Ext
= SE
.getNoopOrAnyExtend(S
, CanonicalIV
->getType());
1592 if (isa
<SCEVAddRecExpr
>(Ext
))
1595 const SCEV
*V
= cast
<SCEVAddRecExpr
>(NewS
)->evaluateAtIteration(IH
, SE
);
1596 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1598 // Truncate the result down to the original type, if needed.
1599 const SCEV
*T
= SE
.getTruncateOrNoop(V
, Ty
);
1603 Value
*SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr
*S
) {
1604 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1605 Value
*V
= expandCodeFor(S
->getOperand(),
1606 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1607 Value
*I
= Builder
.CreateTrunc(V
, Ty
);
1608 rememberInstruction(I
);
1612 Value
*SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr
*S
) {
1613 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1614 Value
*V
= expandCodeFor(S
->getOperand(),
1615 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1616 Value
*I
= Builder
.CreateZExt(V
, Ty
);
1617 rememberInstruction(I
);
1621 Value
*SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr
*S
) {
1622 Type
*Ty
= SE
.getEffectiveSCEVType(S
->getType());
1623 Value
*V
= expandCodeFor(S
->getOperand(),
1624 SE
.getEffectiveSCEVType(S
->getOperand()->getType()));
1625 Value
*I
= Builder
.CreateSExt(V
, Ty
);
1626 rememberInstruction(I
);
1630 Value
*SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr
*S
) {
1631 Value
*LHS
= expand(S
->getOperand(S
->getNumOperands()-1));
1632 Type
*Ty
= LHS
->getType();
1633 for (int i
= S
->getNumOperands()-2; i
>= 0; --i
) {
1634 // In the case of mixed integer and pointer types, do the
1635 // rest of the comparisons as integer.
1636 if (S
->getOperand(i
)->getType() != Ty
) {
1637 Ty
= SE
.getEffectiveSCEVType(Ty
);
1638 LHS
= InsertNoopCastOfTo(LHS
, Ty
);
1640 Value
*RHS
= expandCodeFor(S
->getOperand(i
), Ty
);
1641 Value
*ICmp
= Builder
.CreateICmpSGT(LHS
, RHS
);
1642 rememberInstruction(ICmp
);
1643 Value
*Sel
= Builder
.CreateSelect(ICmp
, LHS
, RHS
, "smax");
1644 rememberInstruction(Sel
);
1647 // In the case of mixed integer and pointer types, cast the
1648 // final result back to the pointer type.
1649 if (LHS
->getType() != S
->getType())
1650 LHS
= InsertNoopCastOfTo(LHS
, S
->getType());
1654 Value
*SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr
*S
) {
1655 Value
*LHS
= expand(S
->getOperand(S
->getNumOperands()-1));
1656 Type
*Ty
= LHS
->getType();
1657 for (int i
= S
->getNumOperands()-2; i
>= 0; --i
) {
1658 // In the case of mixed integer and pointer types, do the
1659 // rest of the comparisons as integer.
1660 if (S
->getOperand(i
)->getType() != Ty
) {
1661 Ty
= SE
.getEffectiveSCEVType(Ty
);
1662 LHS
= InsertNoopCastOfTo(LHS
, Ty
);
1664 Value
*RHS
= expandCodeFor(S
->getOperand(i
), Ty
);
1665 Value
*ICmp
= Builder
.CreateICmpUGT(LHS
, RHS
);
1666 rememberInstruction(ICmp
);
1667 Value
*Sel
= Builder
.CreateSelect(ICmp
, LHS
, RHS
, "umax");
1668 rememberInstruction(Sel
);
1671 // In the case of mixed integer and pointer types, cast the
1672 // final result back to the pointer type.
1673 if (LHS
->getType() != S
->getType())
1674 LHS
= InsertNoopCastOfTo(LHS
, S
->getType());
1678 Value
*SCEVExpander::expandCodeFor(const SCEV
*SH
, Type
*Ty
,
1681 return expandCodeFor(SH
, Ty
);
1684 Value
*SCEVExpander::expandCodeFor(const SCEV
*SH
, Type
*Ty
) {
1685 // Expand the code for this SCEV.
1686 Value
*V
= expand(SH
);
1688 assert(SE
.getTypeSizeInBits(Ty
) == SE
.getTypeSizeInBits(SH
->getType()) &&
1689 "non-trivial casts should be done with the SCEVs directly!");
1690 V
= InsertNoopCastOfTo(V
, Ty
);
1695 ScalarEvolution::ValueOffsetPair
1696 SCEVExpander::FindValueInExprValueMap(const SCEV
*S
,
1697 const Instruction
*InsertPt
) {
1698 SetVector
<ScalarEvolution::ValueOffsetPair
> *Set
= SE
.getSCEVValues(S
);
1699 // If the expansion is not in CanonicalMode, and the SCEV contains any
1700 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1701 if (CanonicalMode
|| !SE
.containsAddRecurrence(S
)) {
1702 // If S is scConstant, it may be worse to reuse an existing Value.
1703 if (S
->getSCEVType() != scConstant
&& Set
) {
1704 // Choose a Value from the set which dominates the insertPt.
1705 // insertPt should be inside the Value's parent loop so as not to break
1707 for (auto const &VOPair
: *Set
) {
1708 Value
*V
= VOPair
.first
;
1709 ConstantInt
*Offset
= VOPair
.second
;
1710 Instruction
*EntInst
= nullptr;
1711 if (V
&& isa
<Instruction
>(V
) && (EntInst
= cast
<Instruction
>(V
)) &&
1712 S
->getType() == V
->getType() &&
1713 EntInst
->getFunction() == InsertPt
->getFunction() &&
1714 SE
.DT
.dominates(EntInst
, InsertPt
) &&
1715 (SE
.LI
.getLoopFor(EntInst
->getParent()) == nullptr ||
1716 SE
.LI
.getLoopFor(EntInst
->getParent())->contains(InsertPt
)))
1721 return {nullptr, nullptr};
1724 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1725 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1726 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1727 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1728 // the expansion will try to reuse Value from ExprValueMap, and only when it
1729 // fails, expand the SCEV literally.
1730 Value
*SCEVExpander::expand(const SCEV
*S
) {
1731 // Compute an insertion point for this SCEV object. Hoist the instructions
1732 // as far out in the loop nest as possible.
1733 Instruction
*InsertPt
= &*Builder
.GetInsertPoint();
1734 for (Loop
*L
= SE
.LI
.getLoopFor(Builder
.GetInsertBlock());;
1735 L
= L
->getParentLoop())
1736 if (SE
.isLoopInvariant(S
, L
)) {
1738 if (BasicBlock
*Preheader
= L
->getLoopPreheader())
1739 InsertPt
= Preheader
->getTerminator();
1741 // LSR sets the insertion point for AddRec start/step values to the
1742 // block start to simplify value reuse, even though it's an invalid
1743 // position. SCEVExpander must correct for this in all cases.
1744 InsertPt
= &*L
->getHeader()->getFirstInsertionPt();
1747 // We can move insertion point only if there is no div or rem operations
1748 // otherwise we are risky to move it over the check for zero denominator.
1749 auto SafeToHoist
= [](const SCEV
*S
) {
1750 return !SCEVExprContains(S
, [](const SCEV
*S
) {
1751 if (const auto *D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
1752 if (const auto *SC
= dyn_cast
<SCEVConstant
>(D
->getRHS()))
1753 // Division by non-zero constants can be hoisted.
1754 return SC
->getValue()->isZero();
1755 // All other divisions should not be moved as they may be
1756 // divisions by zero and should be kept within the
1757 // conditions of the surrounding loops that guard their
1758 // execution (see PR35406).
1764 // If the SCEV is computable at this level, insert it into the header
1765 // after the PHIs (and after any other instructions that we've inserted
1766 // there) so that it is guaranteed to dominate any user inside the loop.
1767 if (L
&& SE
.hasComputableLoopEvolution(S
, L
) && !PostIncLoops
.count(L
) &&
1769 InsertPt
= &*L
->getHeader()->getFirstInsertionPt();
1770 while (InsertPt
->getIterator() != Builder
.GetInsertPoint() &&
1771 (isInsertedInstruction(InsertPt
) ||
1772 isa
<DbgInfoIntrinsic
>(InsertPt
))) {
1773 InsertPt
= &*std::next(InsertPt
->getIterator());
1778 // Check to see if we already expanded this here.
1779 auto I
= InsertedExpressions
.find(std::make_pair(S
, InsertPt
));
1780 if (I
!= InsertedExpressions
.end())
1783 SCEVInsertPointGuard
Guard(Builder
, this);
1784 Builder
.SetInsertPoint(InsertPt
);
1786 // Expand the expression into instructions.
1787 ScalarEvolution::ValueOffsetPair VO
= FindValueInExprValueMap(S
, InsertPt
);
1788 Value
*V
= VO
.first
;
1792 else if (VO
.second
) {
1793 if (PointerType
*Vty
= dyn_cast
<PointerType
>(V
->getType())) {
1794 Type
*Ety
= Vty
->getPointerElementType();
1795 int64_t Offset
= VO
.second
->getSExtValue();
1796 int64_t ESize
= SE
.getTypeSizeInBits(Ety
);
1797 if ((Offset
* 8) % ESize
== 0) {
1799 ConstantInt::getSigned(VO
.second
->getType(), -(Offset
* 8) / ESize
);
1800 V
= Builder
.CreateGEP(Ety
, V
, Idx
, "scevgep");
1803 ConstantInt::getSigned(VO
.second
->getType(), -Offset
);
1804 unsigned AS
= Vty
->getAddressSpace();
1805 V
= Builder
.CreateBitCast(V
, Type::getInt8PtrTy(SE
.getContext(), AS
));
1806 V
= Builder
.CreateGEP(Type::getInt8Ty(SE
.getContext()), V
, Idx
,
1808 V
= Builder
.CreateBitCast(V
, Vty
);
1811 V
= Builder
.CreateSub(V
, VO
.second
);
1814 // Remember the expanded value for this SCEV at this location.
1816 // This is independent of PostIncLoops. The mapped value simply materializes
1817 // the expression at this insertion point. If the mapped value happened to be
1818 // a postinc expansion, it could be reused by a non-postinc user, but only if
1819 // its insertion point was already at the head of the loop.
1820 InsertedExpressions
[std::make_pair(S
, InsertPt
)] = V
;
1824 void SCEVExpander::rememberInstruction(Value
*I
) {
1825 if (!PostIncLoops
.empty())
1826 InsertedPostIncValues
.insert(I
);
1828 InsertedValues
.insert(I
);
1831 /// getOrInsertCanonicalInductionVariable - This method returns the
1832 /// canonical induction variable of the specified type for the specified
1833 /// loop (inserting one if there is none). A canonical induction variable
1834 /// starts at zero and steps by one on each iteration.
1836 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop
*L
,
1838 assert(Ty
->isIntegerTy() && "Can only insert integer induction variables!");
1840 // Build a SCEV for {0,+,1}<L>.
1841 // Conservatively use FlagAnyWrap for now.
1842 const SCEV
*H
= SE
.getAddRecExpr(SE
.getConstant(Ty
, 0),
1843 SE
.getConstant(Ty
, 1), L
, SCEV::FlagAnyWrap
);
1845 // Emit code for it.
1846 SCEVInsertPointGuard
Guard(Builder
, this);
1848 cast
<PHINode
>(expandCodeFor(H
, nullptr, &L
->getHeader()->front()));
1853 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1854 /// replace them with their most canonical representative. Return the number of
1855 /// phis eliminated.
1857 /// This does not depend on any SCEVExpander state but should be used in
1858 /// the same context that SCEVExpander is used.
1860 SCEVExpander::replaceCongruentIVs(Loop
*L
, const DominatorTree
*DT
,
1861 SmallVectorImpl
<WeakTrackingVH
> &DeadInsts
,
1862 const TargetTransformInfo
*TTI
) {
1863 // Find integer phis in order of increasing width.
1864 SmallVector
<PHINode
*, 8> Phis
;
1865 for (PHINode
&PN
: L
->getHeader()->phis())
1866 Phis
.push_back(&PN
);
1869 llvm::sort(Phis
, [](Value
*LHS
, Value
*RHS
) {
1870 // Put pointers at the back and make sure pointer < pointer = false.
1871 if (!LHS
->getType()->isIntegerTy() || !RHS
->getType()->isIntegerTy())
1872 return RHS
->getType()->isIntegerTy() && !LHS
->getType()->isIntegerTy();
1873 return RHS
->getType()->getPrimitiveSizeInBits() <
1874 LHS
->getType()->getPrimitiveSizeInBits();
1877 unsigned NumElim
= 0;
1878 DenseMap
<const SCEV
*, PHINode
*> ExprToIVMap
;
1879 // Process phis from wide to narrow. Map wide phis to their truncation
1880 // so narrow phis can reuse them.
1881 for (PHINode
*Phi
: Phis
) {
1882 auto SimplifyPHINode
= [&](PHINode
*PN
) -> Value
* {
1883 if (Value
*V
= SimplifyInstruction(PN
, {DL
, &SE
.TLI
, &SE
.DT
, &SE
.AC
}))
1885 if (!SE
.isSCEVable(PN
->getType()))
1887 auto *Const
= dyn_cast
<SCEVConstant
>(SE
.getSCEV(PN
));
1890 return Const
->getValue();
1893 // Fold constant phis. They may be congruent to other constant phis and
1894 // would confuse the logic below that expects proper IVs.
1895 if (Value
*V
= SimplifyPHINode(Phi
)) {
1896 if (V
->getType() != Phi
->getType())
1898 Phi
->replaceAllUsesWith(V
);
1899 DeadInsts
.emplace_back(Phi
);
1901 DEBUG_WITH_TYPE(DebugType
, dbgs()
1902 << "INDVARS: Eliminated constant iv: " << *Phi
<< '\n');
1906 if (!SE
.isSCEVable(Phi
->getType()))
1909 PHINode
*&OrigPhiRef
= ExprToIVMap
[SE
.getSCEV(Phi
)];
1912 if (Phi
->getType()->isIntegerTy() && TTI
&&
1913 TTI
->isTruncateFree(Phi
->getType(), Phis
.back()->getType())) {
1914 // This phi can be freely truncated to the narrowest phi type. Map the
1915 // truncated expression to it so it will be reused for narrow types.
1916 const SCEV
*TruncExpr
=
1917 SE
.getTruncateExpr(SE
.getSCEV(Phi
), Phis
.back()->getType());
1918 ExprToIVMap
[TruncExpr
] = Phi
;
1923 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1925 if (OrigPhiRef
->getType()->isPointerTy() != Phi
->getType()->isPointerTy())
1928 if (BasicBlock
*LatchBlock
= L
->getLoopLatch()) {
1929 Instruction
*OrigInc
= dyn_cast
<Instruction
>(
1930 OrigPhiRef
->getIncomingValueForBlock(LatchBlock
));
1931 Instruction
*IsomorphicInc
=
1932 dyn_cast
<Instruction
>(Phi
->getIncomingValueForBlock(LatchBlock
));
1934 if (OrigInc
&& IsomorphicInc
) {
1935 // If this phi has the same width but is more canonical, replace the
1936 // original with it. As part of the "more canonical" determination,
1937 // respect a prior decision to use an IV chain.
1938 if (OrigPhiRef
->getType() == Phi
->getType() &&
1939 !(ChainedPhis
.count(Phi
) ||
1940 isExpandedAddRecExprPHI(OrigPhiRef
, OrigInc
, L
)) &&
1941 (ChainedPhis
.count(Phi
) ||
1942 isExpandedAddRecExprPHI(Phi
, IsomorphicInc
, L
))) {
1943 std::swap(OrigPhiRef
, Phi
);
1944 std::swap(OrigInc
, IsomorphicInc
);
1946 // Replacing the congruent phi is sufficient because acyclic
1947 // redundancy elimination, CSE/GVN, should handle the
1948 // rest. However, once SCEV proves that a phi is congruent,
1949 // it's often the head of an IV user cycle that is isomorphic
1950 // with the original phi. It's worth eagerly cleaning up the
1951 // common case of a single IV increment so that DeleteDeadPHIs
1952 // can remove cycles that had postinc uses.
1953 const SCEV
*TruncExpr
=
1954 SE
.getTruncateOrNoop(SE
.getSCEV(OrigInc
), IsomorphicInc
->getType());
1955 if (OrigInc
!= IsomorphicInc
&&
1956 TruncExpr
== SE
.getSCEV(IsomorphicInc
) &&
1957 SE
.LI
.replacementPreservesLCSSAForm(IsomorphicInc
, OrigInc
) &&
1958 hoistIVInc(OrigInc
, IsomorphicInc
)) {
1959 DEBUG_WITH_TYPE(DebugType
,
1960 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1961 << *IsomorphicInc
<< '\n');
1962 Value
*NewInc
= OrigInc
;
1963 if (OrigInc
->getType() != IsomorphicInc
->getType()) {
1964 Instruction
*IP
= nullptr;
1965 if (PHINode
*PN
= dyn_cast
<PHINode
>(OrigInc
))
1966 IP
= &*PN
->getParent()->getFirstInsertionPt();
1968 IP
= OrigInc
->getNextNode();
1970 IRBuilder
<> Builder(IP
);
1971 Builder
.SetCurrentDebugLocation(IsomorphicInc
->getDebugLoc());
1972 NewInc
= Builder
.CreateTruncOrBitCast(
1973 OrigInc
, IsomorphicInc
->getType(), IVName
);
1975 IsomorphicInc
->replaceAllUsesWith(NewInc
);
1976 DeadInsts
.emplace_back(IsomorphicInc
);
1980 DEBUG_WITH_TYPE(DebugType
, dbgs() << "INDVARS: Eliminated congruent iv: "
1983 Value
*NewIV
= OrigPhiRef
;
1984 if (OrigPhiRef
->getType() != Phi
->getType()) {
1985 IRBuilder
<> Builder(&*L
->getHeader()->getFirstInsertionPt());
1986 Builder
.SetCurrentDebugLocation(Phi
->getDebugLoc());
1987 NewIV
= Builder
.CreateTruncOrBitCast(OrigPhiRef
, Phi
->getType(), IVName
);
1989 Phi
->replaceAllUsesWith(NewIV
);
1990 DeadInsts
.emplace_back(Phi
);
1995 Value
*SCEVExpander::getExactExistingExpansion(const SCEV
*S
,
1996 const Instruction
*At
, Loop
*L
) {
1997 Optional
<ScalarEvolution::ValueOffsetPair
> VO
=
1998 getRelatedExistingExpansion(S
, At
, L
);
1999 if (VO
&& VO
.getValue().second
== nullptr)
2000 return VO
.getValue().first
;
2004 Optional
<ScalarEvolution::ValueOffsetPair
>
2005 SCEVExpander::getRelatedExistingExpansion(const SCEV
*S
, const Instruction
*At
,
2007 using namespace llvm::PatternMatch
;
2009 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
2010 L
->getExitingBlocks(ExitingBlocks
);
2012 // Look for suitable value in simple conditions at the loop exits.
2013 for (BasicBlock
*BB
: ExitingBlocks
) {
2014 ICmpInst::Predicate Pred
;
2015 Instruction
*LHS
, *RHS
;
2016 BasicBlock
*TrueBB
, *FalseBB
;
2018 if (!match(BB
->getTerminator(),
2019 m_Br(m_ICmp(Pred
, m_Instruction(LHS
), m_Instruction(RHS
)),
2023 if (SE
.getSCEV(LHS
) == S
&& SE
.DT
.dominates(LHS
, At
))
2024 return ScalarEvolution::ValueOffsetPair(LHS
, nullptr);
2026 if (SE
.getSCEV(RHS
) == S
&& SE
.DT
.dominates(RHS
, At
))
2027 return ScalarEvolution::ValueOffsetPair(RHS
, nullptr);
2030 // Use expand's logic which is used for reusing a previous Value in
2032 ScalarEvolution::ValueOffsetPair VO
= FindValueInExprValueMap(S
, At
);
2036 // There is potential to make this significantly smarter, but this simple
2037 // heuristic already gets some interesting cases.
2039 // Can not find suitable value.
2043 bool SCEVExpander::isHighCostExpansionHelper(
2044 const SCEV
*S
, Loop
*L
, const Instruction
*At
,
2045 SmallPtrSetImpl
<const SCEV
*> &Processed
) {
2047 // If we can find an existing value for this scev available at the point "At"
2048 // then consider the expression cheap.
2049 if (At
&& getRelatedExistingExpansion(S
, At
, L
))
2052 // Zero/One operand expressions
2053 switch (S
->getSCEVType()) {
2058 return isHighCostExpansionHelper(cast
<SCEVTruncateExpr
>(S
)->getOperand(),
2061 return isHighCostExpansionHelper(cast
<SCEVZeroExtendExpr
>(S
)->getOperand(),
2064 return isHighCostExpansionHelper(cast
<SCEVSignExtendExpr
>(S
)->getOperand(),
2068 if (!Processed
.insert(S
).second
)
2071 if (auto *UDivExpr
= dyn_cast
<SCEVUDivExpr
>(S
)) {
2072 // If the divisor is a power of two and the SCEV type fits in a native
2073 // integer, consider the division cheap irrespective of whether it occurs in
2074 // the user code since it can be lowered into a right shift.
2075 if (auto *SC
= dyn_cast
<SCEVConstant
>(UDivExpr
->getRHS()))
2076 if (SC
->getAPInt().isPowerOf2()) {
2077 const DataLayout
&DL
=
2078 L
->getHeader()->getParent()->getParent()->getDataLayout();
2079 unsigned Width
= cast
<IntegerType
>(UDivExpr
->getType())->getBitWidth();
2080 return DL
.isIllegalInteger(Width
);
2083 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2084 // HowManyLessThans produced to compute a precise expression, rather than a
2085 // UDiv from the user's code. If we can't find a UDiv in the code with some
2086 // simple searching, assume the former consider UDivExpr expensive to
2088 BasicBlock
*ExitingBB
= L
->getExitingBlock();
2092 // At the beginning of this function we already tried to find existing value
2093 // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
2094 // involving division. This is just a simple search heuristic.
2096 At
= &ExitingBB
->back();
2097 if (!getRelatedExistingExpansion(
2098 SE
.getAddExpr(S
, SE
.getConstant(S
->getType(), 1)), At
, L
))
2102 // HowManyLessThans uses a Max expression whenever the loop is not guarded by
2103 // the exit condition.
2104 if (isa
<SCEVSMaxExpr
>(S
) || isa
<SCEVUMaxExpr
>(S
))
2107 // Recurse past nary expressions, which commonly occur in the
2108 // BackedgeTakenCount. They may already exist in program code, and if not,
2109 // they are not too expensive rematerialize.
2110 if (const SCEVNAryExpr
*NAry
= dyn_cast
<SCEVNAryExpr
>(S
)) {
2111 for (auto *Op
: NAry
->operands())
2112 if (isHighCostExpansionHelper(Op
, L
, At
, Processed
))
2116 // If we haven't recognized an expensive SCEV pattern, assume it's an
2117 // expression produced by program code.
2121 Value
*SCEVExpander::expandCodeForPredicate(const SCEVPredicate
*Pred
,
2124 switch (Pred
->getKind()) {
2125 case SCEVPredicate::P_Union
:
2126 return expandUnionPredicate(cast
<SCEVUnionPredicate
>(Pred
), IP
);
2127 case SCEVPredicate::P_Equal
:
2128 return expandEqualPredicate(cast
<SCEVEqualPredicate
>(Pred
), IP
);
2129 case SCEVPredicate::P_Wrap
: {
2130 auto *AddRecPred
= cast
<SCEVWrapPredicate
>(Pred
);
2131 return expandWrapPredicate(AddRecPred
, IP
);
2134 llvm_unreachable("Unknown SCEV predicate type");
2137 Value
*SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate
*Pred
,
2139 Value
*Expr0
= expandCodeFor(Pred
->getLHS(), Pred
->getLHS()->getType(), IP
);
2140 Value
*Expr1
= expandCodeFor(Pred
->getRHS(), Pred
->getRHS()->getType(), IP
);
2142 Builder
.SetInsertPoint(IP
);
2143 auto *I
= Builder
.CreateICmpNE(Expr0
, Expr1
, "ident.check");
2147 Value
*SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr
*AR
,
2148 Instruction
*Loc
, bool Signed
) {
2149 assert(AR
->isAffine() && "Cannot generate RT check for "
2150 "non-affine expression");
2152 SCEVUnionPredicate Pred
;
2153 const SCEV
*ExitCount
=
2154 SE
.getPredicatedBackedgeTakenCount(AR
->getLoop(), Pred
);
2156 assert(ExitCount
!= SE
.getCouldNotCompute() && "Invalid loop count");
2158 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
2159 const SCEV
*Start
= AR
->getStart();
2161 Type
*ARTy
= AR
->getType();
2162 unsigned SrcBits
= SE
.getTypeSizeInBits(ExitCount
->getType());
2163 unsigned DstBits
= SE
.getTypeSizeInBits(ARTy
);
2165 // The expression {Start,+,Step} has nusw/nssw if
2166 // Step < 0, Start - |Step| * Backedge <= Start
2167 // Step >= 0, Start + |Step| * Backedge > Start
2168 // and |Step| * Backedge doesn't unsigned overflow.
2170 IntegerType
*CountTy
= IntegerType::get(Loc
->getContext(), SrcBits
);
2171 Builder
.SetInsertPoint(Loc
);
2172 Value
*TripCountVal
= expandCodeFor(ExitCount
, CountTy
, Loc
);
2175 IntegerType::get(Loc
->getContext(), SE
.getTypeSizeInBits(ARTy
));
2176 Type
*ARExpandTy
= DL
.isNonIntegralPointerType(ARTy
) ? ARTy
: Ty
;
2178 Value
*StepValue
= expandCodeFor(Step
, Ty
, Loc
);
2179 Value
*NegStepValue
= expandCodeFor(SE
.getNegativeSCEV(Step
), Ty
, Loc
);
2180 Value
*StartValue
= expandCodeFor(Start
, ARExpandTy
, Loc
);
2183 ConstantInt::get(Loc
->getContext(), APInt::getNullValue(DstBits
));
2185 Builder
.SetInsertPoint(Loc
);
2187 Value
*StepCompare
= Builder
.CreateICmp(ICmpInst::ICMP_SLT
, StepValue
, Zero
);
2188 Value
*AbsStep
= Builder
.CreateSelect(StepCompare
, NegStepValue
, StepValue
);
2190 // Get the backedge taken count and truncate or extended to the AR type.
2191 Value
*TruncTripCount
= Builder
.CreateZExtOrTrunc(TripCountVal
, Ty
);
2192 auto *MulF
= Intrinsic::getDeclaration(Loc
->getModule(),
2193 Intrinsic::umul_with_overflow
, Ty
);
2195 // Compute |Step| * Backedge
2196 CallInst
*Mul
= Builder
.CreateCall(MulF
, {AbsStep
, TruncTripCount
}, "mul");
2197 Value
*MulV
= Builder
.CreateExtractValue(Mul
, 0, "mul.result");
2198 Value
*OfMul
= Builder
.CreateExtractValue(Mul
, 1, "mul.overflow");
2201 // Start + |Step| * Backedge < Start
2202 // Start - |Step| * Backedge > Start
2203 Value
*Add
= nullptr, *Sub
= nullptr;
2204 if (PointerType
*ARPtrTy
= dyn_cast
<PointerType
>(ARExpandTy
)) {
2205 const SCEV
*MulS
= SE
.getSCEV(MulV
);
2206 const SCEV
*NegMulS
= SE
.getNegativeSCEV(MulS
);
2207 Add
= Builder
.CreateBitCast(expandAddToGEP(MulS
, ARPtrTy
, Ty
, StartValue
),
2209 Sub
= Builder
.CreateBitCast(
2210 expandAddToGEP(NegMulS
, ARPtrTy
, Ty
, StartValue
), ARPtrTy
);
2212 Add
= Builder
.CreateAdd(StartValue
, MulV
);
2213 Sub
= Builder
.CreateSub(StartValue
, MulV
);
2216 Value
*EndCompareGT
= Builder
.CreateICmp(
2217 Signed
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
, Sub
, StartValue
);
2219 Value
*EndCompareLT
= Builder
.CreateICmp(
2220 Signed
? ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
, Add
, StartValue
);
2222 // Select the answer based on the sign of Step.
2224 Builder
.CreateSelect(StepCompare
, EndCompareGT
, EndCompareLT
);
2226 // If the backedge taken count type is larger than the AR type,
2227 // check that we don't drop any bits by truncating it. If we are
2228 // dropping bits, then we have overflow (unless the step is zero).
2229 if (SE
.getTypeSizeInBits(CountTy
) > SE
.getTypeSizeInBits(Ty
)) {
2230 auto MaxVal
= APInt::getMaxValue(DstBits
).zext(SrcBits
);
2231 auto *BackedgeCheck
=
2232 Builder
.CreateICmp(ICmpInst::ICMP_UGT
, TripCountVal
,
2233 ConstantInt::get(Loc
->getContext(), MaxVal
));
2234 BackedgeCheck
= Builder
.CreateAnd(
2235 BackedgeCheck
, Builder
.CreateICmp(ICmpInst::ICMP_NE
, StepValue
, Zero
));
2237 EndCheck
= Builder
.CreateOr(EndCheck
, BackedgeCheck
);
2240 EndCheck
= Builder
.CreateOr(EndCheck
, OfMul
);
2244 Value
*SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate
*Pred
,
2246 const auto *A
= cast
<SCEVAddRecExpr
>(Pred
->getExpr());
2247 Value
*NSSWCheck
= nullptr, *NUSWCheck
= nullptr;
2249 // Add a check for NUSW
2250 if (Pred
->getFlags() & SCEVWrapPredicate::IncrementNUSW
)
2251 NUSWCheck
= generateOverflowCheck(A
, IP
, false);
2253 // Add a check for NSSW
2254 if (Pred
->getFlags() & SCEVWrapPredicate::IncrementNSSW
)
2255 NSSWCheck
= generateOverflowCheck(A
, IP
, true);
2257 if (NUSWCheck
&& NSSWCheck
)
2258 return Builder
.CreateOr(NUSWCheck
, NSSWCheck
);
2266 return ConstantInt::getFalse(IP
->getContext());
2269 Value
*SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate
*Union
,
2271 auto *BoolType
= IntegerType::get(IP
->getContext(), 1);
2272 Value
*Check
= ConstantInt::getNullValue(BoolType
);
2274 // Loop over all checks in this set.
2275 for (auto Pred
: Union
->getPredicates()) {
2276 auto *NextCheck
= expandCodeForPredicate(Pred
, IP
);
2277 Builder
.SetInsertPoint(IP
);
2278 Check
= Builder
.CreateOr(Check
, NextCheck
);
2285 // Search for a SCEV subexpression that is not safe to expand. Any expression
2286 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2287 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2288 // instruction, but the important thing is that we prove the denominator is
2289 // nonzero before expansion.
2291 // IVUsers already checks that IV-derived expressions are safe. So this check is
2292 // only needed when the expression includes some subexpression that is not IV
2295 // Currently, we only allow division by a nonzero constant here. If this is
2296 // inadequate, we could easily allow division by SCEVUnknown by using
2297 // ValueTracking to check isKnownNonZero().
2299 // We cannot generally expand recurrences unless the step dominates the loop
2300 // header. The expander handles the special case of affine recurrences by
2301 // scaling the recurrence outside the loop, but this technique isn't generally
2302 // applicable. Expanding a nested recurrence outside a loop requires computing
2303 // binomial coefficients. This could be done, but the recurrence has to be in a
2304 // perfectly reduced form, which can't be guaranteed.
2305 struct SCEVFindUnsafe
{
2306 ScalarEvolution
&SE
;
2309 SCEVFindUnsafe(ScalarEvolution
&se
): SE(se
), IsUnsafe(false) {}
2311 bool follow(const SCEV
*S
) {
2312 if (const SCEVUDivExpr
*D
= dyn_cast
<SCEVUDivExpr
>(S
)) {
2313 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(D
->getRHS());
2314 if (!SC
|| SC
->getValue()->isZero()) {
2319 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
2320 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
2321 if (!AR
->isAffine() && !SE
.dominates(Step
, AR
->getLoop()->getHeader())) {
2328 bool isDone() const { return IsUnsafe
; }
2333 bool isSafeToExpand(const SCEV
*S
, ScalarEvolution
&SE
) {
2334 SCEVFindUnsafe
Search(SE
);
2335 visitAll(S
, Search
);
2336 return !Search
.IsUnsafe
;
2339 bool isSafeToExpandAt(const SCEV
*S
, const Instruction
*InsertionPoint
,
2340 ScalarEvolution
&SE
) {
2341 return isSafeToExpand(S
, SE
) && SE
.dominates(S
, InsertionPoint
->getParent());