[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / llvm / lib / Analysis / IVDescriptors.cpp
blob46629e381bc366540a11ecfaf99dcb1f9482e84d
1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file "describes" induction and recurrence variables.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/IVDescriptors.h"
14 #include "llvm/Analysis/DemandedBits.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/KnownBits.h"
27 #include <set>
29 using namespace llvm;
30 using namespace llvm::PatternMatch;
32 #define DEBUG_TYPE "iv-descriptors"
34 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
35 SmallPtrSetImpl<Instruction *> &Set) {
36 for (const Use &Use : I->operands())
37 if (!Set.count(dyn_cast<Instruction>(Use)))
38 return false;
39 return true;
42 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
43 switch (Kind) {
44 default:
45 break;
46 case RecurKind::Add:
47 case RecurKind::Mul:
48 case RecurKind::Or:
49 case RecurKind::And:
50 case RecurKind::Xor:
51 case RecurKind::SMax:
52 case RecurKind::SMin:
53 case RecurKind::UMax:
54 case RecurKind::UMin:
55 case RecurKind::IAnyOf:
56 case RecurKind::FAnyOf:
57 return true;
59 return false;
62 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
63 return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
66 /// Determines if Phi may have been type-promoted. If Phi has a single user
67 /// that ANDs the Phi with a type mask, return the user. RT is updated to
68 /// account for the narrower bit width represented by the mask, and the AND
69 /// instruction is added to CI.
70 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
71 SmallPtrSetImpl<Instruction *> &Visited,
72 SmallPtrSetImpl<Instruction *> &CI) {
73 if (!Phi->hasOneUse())
74 return Phi;
76 const APInt *M = nullptr;
77 Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
79 // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
80 // with a new integer type of the corresponding bit width.
81 if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
82 int32_t Bits = (*M + 1).exactLogBase2();
83 if (Bits > 0) {
84 RT = IntegerType::get(Phi->getContext(), Bits);
85 Visited.insert(Phi);
86 CI.insert(J);
87 return J;
90 return Phi;
93 /// Compute the minimal bit width needed to represent a reduction whose exit
94 /// instruction is given by Exit.
95 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
96 DemandedBits *DB,
97 AssumptionCache *AC,
98 DominatorTree *DT) {
99 bool IsSigned = false;
100 const DataLayout &DL = Exit->getModule()->getDataLayout();
101 uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
103 if (DB) {
104 // Use the demanded bits analysis to determine the bits that are live out
105 // of the exit instruction, rounding up to the nearest power of two. If the
106 // use of demanded bits results in a smaller bit width, we know the value
107 // must be positive (i.e., IsSigned = false), because if this were not the
108 // case, the sign bit would have been demanded.
109 auto Mask = DB->getDemandedBits(Exit);
110 MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero();
113 if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
114 // If demanded bits wasn't able to limit the bit width, we can try to use
115 // value tracking instead. This can be the case, for example, if the value
116 // may be negative.
117 auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
118 auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
119 MaxBitWidth = NumTypeBits - NumSignBits;
120 KnownBits Bits = computeKnownBits(Exit, DL);
121 if (!Bits.isNonNegative()) {
122 // If the value is not known to be non-negative, we set IsSigned to true,
123 // meaning that we will use sext instructions instead of zext
124 // instructions to restore the original type.
125 IsSigned = true;
126 // Make sure at least one sign bit is included in the result, so it
127 // will get properly sign-extended.
128 ++MaxBitWidth;
131 MaxBitWidth = llvm::bit_ceil(MaxBitWidth);
133 return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
134 IsSigned);
137 /// Collect cast instructions that can be ignored in the vectorizer's cost
138 /// model, given a reduction exit value and the minimal type in which the
139 // reduction can be represented. Also search casts to the recurrence type
140 // to find the minimum width used by the recurrence.
141 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
142 Type *RecurrenceType,
143 SmallPtrSetImpl<Instruction *> &Casts,
144 unsigned &MinWidthCastToRecurTy) {
146 SmallVector<Instruction *, 8> Worklist;
147 SmallPtrSet<Instruction *, 8> Visited;
148 Worklist.push_back(Exit);
149 MinWidthCastToRecurTy = -1U;
151 while (!Worklist.empty()) {
152 Instruction *Val = Worklist.pop_back_val();
153 Visited.insert(Val);
154 if (auto *Cast = dyn_cast<CastInst>(Val)) {
155 if (Cast->getSrcTy() == RecurrenceType) {
156 // If the source type of a cast instruction is equal to the recurrence
157 // type, it will be eliminated, and should be ignored in the vectorizer
158 // cost model.
159 Casts.insert(Cast);
160 continue;
162 if (Cast->getDestTy() == RecurrenceType) {
163 // The minimum width used by the recurrence is found by checking for
164 // casts on its operands. The minimum width is used by the vectorizer
165 // when finding the widest type for in-loop reductions without any
166 // loads/stores.
167 MinWidthCastToRecurTy = std::min<unsigned>(
168 MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
169 continue;
172 // Add all operands to the work list if they are loop-varying values that
173 // we haven't yet visited.
174 for (Value *O : cast<User>(Val)->operands())
175 if (auto *I = dyn_cast<Instruction>(O))
176 if (TheLoop->contains(I) && !Visited.count(I))
177 Worklist.push_back(I);
181 // Check if a given Phi node can be recognized as an ordered reduction for
182 // vectorizing floating point operations without unsafe math.
183 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
184 Instruction *Exit, PHINode *Phi) {
185 // Currently only FAdd and FMulAdd are supported.
186 if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
187 return false;
189 if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
190 return false;
192 if (Kind == RecurKind::FMulAdd &&
193 !RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
194 return false;
196 // Ensure the exit instruction has only one user other than the reduction PHI
197 if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
198 return false;
200 // The only pattern accepted is the one in which the reduction PHI
201 // is used as one of the operands of the exit instruction
202 auto *Op0 = Exit->getOperand(0);
203 auto *Op1 = Exit->getOperand(1);
204 if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
205 return false;
206 if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
207 return false;
209 LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
210 << ", ExitInst: " << *Exit << "\n");
212 return true;
215 bool RecurrenceDescriptor::AddReductionVar(
216 PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
217 RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
218 DominatorTree *DT, ScalarEvolution *SE) {
219 if (Phi->getNumIncomingValues() != 2)
220 return false;
222 // Reduction variables are only found in the loop header block.
223 if (Phi->getParent() != TheLoop->getHeader())
224 return false;
226 // Obtain the reduction start value from the value that comes from the loop
227 // preheader.
228 Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
230 // ExitInstruction is the single value which is used outside the loop.
231 // We only allow for a single reduction value to be used outside the loop.
232 // This includes users of the reduction, variables (which form a cycle
233 // which ends in the phi node).
234 Instruction *ExitInstruction = nullptr;
236 // Variable to keep last visited store instruction. By the end of the
237 // algorithm this variable will be either empty or having intermediate
238 // reduction value stored in invariant address.
239 StoreInst *IntermediateStore = nullptr;
241 // Indicates that we found a reduction operation in our scan.
242 bool FoundReduxOp = false;
244 // We start with the PHI node and scan for all of the users of this
245 // instruction. All users must be instructions that can be used as reduction
246 // variables (such as ADD). We must have a single out-of-block user. The cycle
247 // must include the original PHI.
248 bool FoundStartPHI = false;
250 // To recognize min/max patterns formed by a icmp select sequence, we store
251 // the number of instruction we saw from the recognized min/max pattern,
252 // to make sure we only see exactly the two instructions.
253 unsigned NumCmpSelectPatternInst = 0;
254 InstDesc ReduxDesc(false, nullptr);
256 // Data used for determining if the recurrence has been type-promoted.
257 Type *RecurrenceType = Phi->getType();
258 SmallPtrSet<Instruction *, 4> CastInsts;
259 unsigned MinWidthCastToRecurrenceType;
260 Instruction *Start = Phi;
261 bool IsSigned = false;
263 SmallPtrSet<Instruction *, 8> VisitedInsts;
264 SmallVector<Instruction *, 8> Worklist;
266 // Return early if the recurrence kind does not match the type of Phi. If the
267 // recurrence kind is arithmetic, we attempt to look through AND operations
268 // resulting from the type promotion performed by InstCombine. Vector
269 // operations are not limited to the legal integer widths, so we may be able
270 // to evaluate the reduction in the narrower width.
271 if (RecurrenceType->isFloatingPointTy()) {
272 if (!isFloatingPointRecurrenceKind(Kind))
273 return false;
274 } else if (RecurrenceType->isIntegerTy()) {
275 if (!isIntegerRecurrenceKind(Kind))
276 return false;
277 if (!isMinMaxRecurrenceKind(Kind))
278 Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
279 } else {
280 // Pointer min/max may exist, but it is not supported as a reduction op.
281 return false;
284 Worklist.push_back(Start);
285 VisitedInsts.insert(Start);
287 // Start with all flags set because we will intersect this with the reduction
288 // flags from all the reduction operations.
289 FastMathFlags FMF = FastMathFlags::getFast();
291 // The first instruction in the use-def chain of the Phi node that requires
292 // exact floating point operations.
293 Instruction *ExactFPMathInst = nullptr;
295 // A value in the reduction can be used:
296 // - By the reduction:
297 // - Reduction operation:
298 // - One use of reduction value (safe).
299 // - Multiple use of reduction value (not safe).
300 // - PHI:
301 // - All uses of the PHI must be the reduction (safe).
302 // - Otherwise, not safe.
303 // - By instructions outside of the loop (safe).
304 // * One value may have several outside users, but all outside
305 // uses must be of the same value.
306 // - By store instructions with a loop invariant address (safe with
307 // the following restrictions):
308 // * If there are several stores, all must have the same address.
309 // * Final value should be stored in that loop invariant address.
310 // - By an instruction that is not part of the reduction (not safe).
311 // This is either:
312 // * An instruction type other than PHI or the reduction operation.
313 // * A PHI in the header other than the initial PHI.
314 while (!Worklist.empty()) {
315 Instruction *Cur = Worklist.pop_back_val();
317 // Store instructions are allowed iff it is the store of the reduction
318 // value to the same loop invariant memory location.
319 if (auto *SI = dyn_cast<StoreInst>(Cur)) {
320 if (!SE) {
321 LLVM_DEBUG(dbgs() << "Store instructions are not processed without "
322 << "Scalar Evolution Analysis\n");
323 return false;
326 const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
327 // Check it is the same address as previous stores
328 if (IntermediateStore) {
329 const SCEV *OtherScev =
330 SE->getSCEV(IntermediateStore->getPointerOperand());
332 if (OtherScev != PtrScev) {
333 LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "
334 << "inside the loop: " << *SI->getPointerOperand()
335 << " and "
336 << *IntermediateStore->getPointerOperand() << '\n');
337 return false;
341 // Check the pointer is loop invariant
342 if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
343 LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "
344 << "inside the loop: " << *SI->getPointerOperand()
345 << '\n');
346 return false;
349 // IntermediateStore is always the last store in the loop.
350 IntermediateStore = SI;
351 continue;
354 // No Users.
355 // If the instruction has no users then this is a broken chain and can't be
356 // a reduction variable.
357 if (Cur->use_empty())
358 return false;
360 bool IsAPhi = isa<PHINode>(Cur);
362 // A header PHI use other than the original PHI.
363 if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
364 return false;
366 // Reductions of instructions such as Div, and Sub is only possible if the
367 // LHS is the reduction variable.
368 if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
369 !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
370 !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
371 return false;
373 // Any reduction instruction must be of one of the allowed kinds. We ignore
374 // the starting value (the Phi or an AND instruction if the Phi has been
375 // type-promoted).
376 if (Cur != Start) {
377 ReduxDesc =
378 isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
379 ExactFPMathInst = ExactFPMathInst == nullptr
380 ? ReduxDesc.getExactFPMathInst()
381 : ExactFPMathInst;
382 if (!ReduxDesc.isRecurrence())
383 return false;
384 // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
385 if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
386 FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
387 if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
388 // Accept FMF on either fcmp or select of a min/max idiom.
389 // TODO: This is a hack to work-around the fact that FMF may not be
390 // assigned/propagated correctly. If that problem is fixed or we
391 // standardize on fmin/fmax via intrinsics, this can be removed.
392 if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
393 CurFMF |= FCmp->getFastMathFlags();
395 FMF &= CurFMF;
397 // Update this reduction kind if we matched a new instruction.
398 // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
399 // state accurate while processing the worklist?
400 if (ReduxDesc.getRecKind() != RecurKind::None)
401 Kind = ReduxDesc.getRecKind();
404 bool IsASelect = isa<SelectInst>(Cur);
406 // A conditional reduction operation must only have 2 or less uses in
407 // VisitedInsts.
408 if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
409 hasMultipleUsesOf(Cur, VisitedInsts, 2))
410 return false;
412 // A reduction operation must only have one use of the reduction value.
413 if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
414 !isAnyOfRecurrenceKind(Kind) && hasMultipleUsesOf(Cur, VisitedInsts, 1))
415 return false;
417 // All inputs to a PHI node must be a reduction value.
418 if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
419 return false;
421 if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::IAnyOf) &&
422 (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
423 ++NumCmpSelectPatternInst;
424 if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::FAnyOf) &&
425 (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
426 ++NumCmpSelectPatternInst;
428 // Check whether we found a reduction operator.
429 FoundReduxOp |= !IsAPhi && Cur != Start;
431 // Process users of current instruction. Push non-PHI nodes after PHI nodes
432 // onto the stack. This way we are going to have seen all inputs to PHI
433 // nodes once we get to them.
434 SmallVector<Instruction *, 8> NonPHIs;
435 SmallVector<Instruction *, 8> PHIs;
436 for (User *U : Cur->users()) {
437 Instruction *UI = cast<Instruction>(U);
439 // If the user is a call to llvm.fmuladd then the instruction can only be
440 // the final operand.
441 if (isFMulAddIntrinsic(UI))
442 if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
443 return false;
445 // Check if we found the exit user.
446 BasicBlock *Parent = UI->getParent();
447 if (!TheLoop->contains(Parent)) {
448 // If we already know this instruction is used externally, move on to
449 // the next user.
450 if (ExitInstruction == Cur)
451 continue;
453 // Exit if you find multiple values used outside or if the header phi
454 // node is being used. In this case the user uses the value of the
455 // previous iteration, in which case we would loose "VF-1" iterations of
456 // the reduction operation if we vectorize.
457 if (ExitInstruction != nullptr || Cur == Phi)
458 return false;
460 // The instruction used by an outside user must be the last instruction
461 // before we feed back to the reduction phi. Otherwise, we loose VF-1
462 // operations on the value.
463 if (!is_contained(Phi->operands(), Cur))
464 return false;
466 ExitInstruction = Cur;
467 continue;
470 // Process instructions only once (termination). Each reduction cycle
471 // value must only be used once, except by phi nodes and min/max
472 // reductions which are represented as a cmp followed by a select.
473 InstDesc IgnoredVal(false, nullptr);
474 if (VisitedInsts.insert(UI).second) {
475 if (isa<PHINode>(UI)) {
476 PHIs.push_back(UI);
477 } else {
478 StoreInst *SI = dyn_cast<StoreInst>(UI);
479 if (SI && SI->getPointerOperand() == Cur) {
480 // Reduction variable chain can only be stored somewhere but it
481 // can't be used as an address.
482 return false;
484 NonPHIs.push_back(UI);
486 } else if (!isa<PHINode>(UI) &&
487 ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
488 !isa<SelectInst>(UI)) ||
489 (!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
490 !isAnyOfPattern(TheLoop, Phi, UI, IgnoredVal)
491 .isRecurrence() &&
492 !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
493 return false;
495 // Remember that we completed the cycle.
496 if (UI == Phi)
497 FoundStartPHI = true;
499 Worklist.append(PHIs.begin(), PHIs.end());
500 Worklist.append(NonPHIs.begin(), NonPHIs.end());
503 // This means we have seen one but not the other instruction of the
504 // pattern or more than just a select and cmp. Zero implies that we saw a
505 // llvm.min/max intrinsic, which is always OK.
506 if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
507 NumCmpSelectPatternInst != 0)
508 return false;
510 if (isAnyOfRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
511 return false;
513 if (IntermediateStore) {
514 // Check that stored value goes to the phi node again. This way we make sure
515 // that the value stored in IntermediateStore is indeed the final reduction
516 // value.
517 if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
518 LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "
519 << *IntermediateStore << '\n');
520 return false;
523 // If there is an exit instruction it's value should be stored in
524 // IntermediateStore
525 if (ExitInstruction &&
526 IntermediateStore->getValueOperand() != ExitInstruction) {
527 LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "
528 "store last calculated value of the reduction: "
529 << *IntermediateStore << '\n');
530 return false;
533 // If all uses are inside the loop (intermediate stores), then the
534 // reduction value after the loop will be the one used in the last store.
535 if (!ExitInstruction)
536 ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
539 if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
540 return false;
542 const bool IsOrdered =
543 checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
545 if (Start != Phi) {
546 // If the starting value is not the same as the phi node, we speculatively
547 // looked through an 'and' instruction when evaluating a potential
548 // arithmetic reduction to determine if it may have been type-promoted.
550 // We now compute the minimal bit width that is required to represent the
551 // reduction. If this is the same width that was indicated by the 'and', we
552 // can represent the reduction in the smaller type. The 'and' instruction
553 // will be eliminated since it will essentially be a cast instruction that
554 // can be ignore in the cost model. If we compute a different type than we
555 // did when evaluating the 'and', the 'and' will not be eliminated, and we
556 // will end up with different kinds of operations in the recurrence
557 // expression (e.g., IntegerAND, IntegerADD). We give up if this is
558 // the case.
560 // The vectorizer relies on InstCombine to perform the actual
561 // type-shrinking. It does this by inserting instructions to truncate the
562 // exit value of the reduction to the width indicated by RecurrenceType and
563 // then extend this value back to the original width. If IsSigned is false,
564 // a 'zext' instruction will be generated; otherwise, a 'sext' will be
565 // used.
567 // TODO: We should not rely on InstCombine to rewrite the reduction in the
568 // smaller type. We should just generate a correctly typed expression
569 // to begin with.
570 Type *ComputedType;
571 std::tie(ComputedType, IsSigned) =
572 computeRecurrenceType(ExitInstruction, DB, AC, DT);
573 if (ComputedType != RecurrenceType)
574 return false;
577 // Collect cast instructions and the minimum width used by the recurrence.
578 // If the starting value is not the same as the phi node and the computed
579 // recurrence type is equal to the recurrence type, the recurrence expression
580 // will be represented in a narrower or wider type. If there are any cast
581 // instructions that will be unnecessary, collect them in CastsFromRecurTy.
582 // Note that the 'and' instruction was already included in this list.
584 // TODO: A better way to represent this may be to tag in some way all the
585 // instructions that are a part of the reduction. The vectorizer cost
586 // model could then apply the recurrence type to these instructions,
587 // without needing a white list of instructions to ignore.
588 // This may also be useful for the inloop reductions, if it can be
589 // kept simple enough.
590 collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
591 MinWidthCastToRecurrenceType);
593 // We found a reduction var if we have reached the original phi node and we
594 // only have a single instruction with out-of-loop users.
596 // The ExitInstruction(Instruction which is allowed to have out-of-loop users)
597 // is saved as part of the RecurrenceDescriptor.
599 // Save the description of this reduction variable.
600 RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
601 FMF, ExactFPMathInst, RecurrenceType, IsSigned,
602 IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
603 RedDes = RD;
605 return true;
608 // We are looking for loops that do something like this:
609 // int r = 0;
610 // for (int i = 0; i < n; i++) {
611 // if (src[i] > 3)
612 // r = 3;
613 // }
614 // where the reduction value (r) only has two states, in this example 0 or 3.
615 // The generated LLVM IR for this type of loop will be like this:
616 // for.body:
617 // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
618 // ...
619 // %cmp = icmp sgt i32 %5, 3
620 // %spec.select = select i1 %cmp, i32 3, i32 %r
621 // ...
622 // In general we can support vectorization of loops where 'r' flips between
623 // any two non-constants, provided they are loop invariant. The only thing
624 // we actually care about at the end of the loop is whether or not any lane
625 // in the selected vector is different from the start value. The final
626 // across-vector reduction after the loop simply involves choosing the start
627 // value if nothing changed (0 in the example above) or the other selected
628 // value (3 in the example above).
629 RecurrenceDescriptor::InstDesc
630 RecurrenceDescriptor::isAnyOfPattern(Loop *Loop, PHINode *OrigPhi,
631 Instruction *I, InstDesc &Prev) {
632 // We must handle the select(cmp(),x,y) as a single instruction. Advance to
633 // the select.
634 CmpInst::Predicate Pred;
635 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
636 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
637 return InstDesc(Select, Prev.getRecKind());
640 // Only match select with single use cmp condition.
641 if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
642 m_Value())))
643 return InstDesc(false, I);
645 SelectInst *SI = cast<SelectInst>(I);
646 Value *NonPhi = nullptr;
648 if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
649 NonPhi = SI->getFalseValue();
650 else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
651 NonPhi = SI->getTrueValue();
652 else
653 return InstDesc(false, I);
655 // We are looking for selects of the form:
656 // select(cmp(), phi, loop_invariant) or
657 // select(cmp(), loop_invariant, phi)
658 if (!Loop->isLoopInvariant(NonPhi))
659 return InstDesc(false, I);
661 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::IAnyOf
662 : RecurKind::FAnyOf);
665 RecurrenceDescriptor::InstDesc
666 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
667 const InstDesc &Prev) {
668 assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&
669 "Expected a cmp or select or call instruction");
670 if (!isMinMaxRecurrenceKind(Kind))
671 return InstDesc(false, I);
673 // We must handle the select(cmp()) as a single instruction. Advance to the
674 // select.
675 CmpInst::Predicate Pred;
676 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
677 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
678 return InstDesc(Select, Prev.getRecKind());
681 // Only match select with single use cmp condition, or a min/max intrinsic.
682 if (!isa<IntrinsicInst>(I) &&
683 !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
684 m_Value())))
685 return InstDesc(false, I);
687 // Look for a min/max pattern.
688 if (match(I, m_UMin(m_Value(), m_Value())))
689 return InstDesc(Kind == RecurKind::UMin, I);
690 if (match(I, m_UMax(m_Value(), m_Value())))
691 return InstDesc(Kind == RecurKind::UMax, I);
692 if (match(I, m_SMax(m_Value(), m_Value())))
693 return InstDesc(Kind == RecurKind::SMax, I);
694 if (match(I, m_SMin(m_Value(), m_Value())))
695 return InstDesc(Kind == RecurKind::SMin, I);
696 if (match(I, m_OrdFMin(m_Value(), m_Value())))
697 return InstDesc(Kind == RecurKind::FMin, I);
698 if (match(I, m_OrdFMax(m_Value(), m_Value())))
699 return InstDesc(Kind == RecurKind::FMax, I);
700 if (match(I, m_UnordFMin(m_Value(), m_Value())))
701 return InstDesc(Kind == RecurKind::FMin, I);
702 if (match(I, m_UnordFMax(m_Value(), m_Value())))
703 return InstDesc(Kind == RecurKind::FMax, I);
704 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
705 return InstDesc(Kind == RecurKind::FMin, I);
706 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
707 return InstDesc(Kind == RecurKind::FMax, I);
708 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())))
709 return InstDesc(Kind == RecurKind::FMinimum, I);
710 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())))
711 return InstDesc(Kind == RecurKind::FMaximum, I);
713 return InstDesc(false, I);
716 /// Returns true if the select instruction has users in the compare-and-add
717 /// reduction pattern below. The select instruction argument is the last one
718 /// in the sequence.
720 /// %sum.1 = phi ...
721 /// ...
722 /// %cmp = fcmp pred %0, %CFP
723 /// %add = fadd %0, %sum.1
724 /// %sum.2 = select %cmp, %add, %sum.1
725 RecurrenceDescriptor::InstDesc
726 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
727 SelectInst *SI = dyn_cast<SelectInst>(I);
728 if (!SI)
729 return InstDesc(false, I);
731 CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
732 // Only handle single use cases for now.
733 if (!CI || !CI->hasOneUse())
734 return InstDesc(false, I);
736 Value *TrueVal = SI->getTrueValue();
737 Value *FalseVal = SI->getFalseValue();
738 // Handle only when either of operands of select instruction is a PHI
739 // node for now.
740 if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
741 (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
742 return InstDesc(false, I);
744 Instruction *I1 =
745 isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
746 : dyn_cast<Instruction>(TrueVal);
747 if (!I1 || !I1->isBinaryOp())
748 return InstDesc(false, I);
750 Value *Op1, *Op2;
751 if (!(((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) ||
752 m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
753 I1->isFast()) ||
754 (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast())) ||
755 ((m_Add(m_Value(Op1), m_Value(Op2)).match(I1) ||
756 m_Sub(m_Value(Op1), m_Value(Op2)).match(I1))) ||
757 (m_Mul(m_Value(Op1), m_Value(Op2)).match(I1))))
758 return InstDesc(false, I);
760 Instruction *IPhi = isa<PHINode>(*Op1) ? dyn_cast<Instruction>(Op1)
761 : dyn_cast<Instruction>(Op2);
762 if (!IPhi || IPhi != FalseVal)
763 return InstDesc(false, I);
765 return InstDesc(true, SI);
768 RecurrenceDescriptor::InstDesc
769 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
770 Instruction *I, RecurKind Kind,
771 InstDesc &Prev, FastMathFlags FuncFMF) {
772 assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind);
773 switch (I->getOpcode()) {
774 default:
775 return InstDesc(false, I);
776 case Instruction::PHI:
777 return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
778 case Instruction::Sub:
779 case Instruction::Add:
780 return InstDesc(Kind == RecurKind::Add, I);
781 case Instruction::Mul:
782 return InstDesc(Kind == RecurKind::Mul, I);
783 case Instruction::And:
784 return InstDesc(Kind == RecurKind::And, I);
785 case Instruction::Or:
786 return InstDesc(Kind == RecurKind::Or, I);
787 case Instruction::Xor:
788 return InstDesc(Kind == RecurKind::Xor, I);
789 case Instruction::FDiv:
790 case Instruction::FMul:
791 return InstDesc(Kind == RecurKind::FMul, I,
792 I->hasAllowReassoc() ? nullptr : I);
793 case Instruction::FSub:
794 case Instruction::FAdd:
795 return InstDesc(Kind == RecurKind::FAdd, I,
796 I->hasAllowReassoc() ? nullptr : I);
797 case Instruction::Select:
798 if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul ||
799 Kind == RecurKind::Add || Kind == RecurKind::Mul)
800 return isConditionalRdxPattern(Kind, I);
801 [[fallthrough]];
802 case Instruction::FCmp:
803 case Instruction::ICmp:
804 case Instruction::Call:
805 if (isAnyOfRecurrenceKind(Kind))
806 return isAnyOfPattern(L, OrigPhi, I, Prev);
807 auto HasRequiredFMF = [&]() {
808 if (FuncFMF.noNaNs() && FuncFMF.noSignedZeros())
809 return true;
810 if (isa<FPMathOperator>(I) && I->hasNoNaNs() && I->hasNoSignedZeros())
811 return true;
812 // minimum and maximum intrinsics do not require nsz and nnan flags since
813 // NaN and signed zeroes are propagated in the intrinsic implementation.
814 return match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())) ||
815 match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()));
817 if (isIntMinMaxRecurrenceKind(Kind) ||
818 (HasRequiredFMF() && isFPMinMaxRecurrenceKind(Kind)))
819 return isMinMaxPattern(I, Kind, Prev);
820 else if (isFMulAddIntrinsic(I))
821 return InstDesc(Kind == RecurKind::FMulAdd, I,
822 I->hasAllowReassoc() ? nullptr : I);
823 return InstDesc(false, I);
827 bool RecurrenceDescriptor::hasMultipleUsesOf(
828 Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
829 unsigned MaxNumUses) {
830 unsigned NumUses = 0;
831 for (const Use &U : I->operands()) {
832 if (Insts.count(dyn_cast<Instruction>(U)))
833 ++NumUses;
834 if (NumUses > MaxNumUses)
835 return true;
838 return false;
841 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
842 RecurrenceDescriptor &RedDes,
843 DemandedBits *DB, AssumptionCache *AC,
844 DominatorTree *DT,
845 ScalarEvolution *SE) {
846 BasicBlock *Header = TheLoop->getHeader();
847 Function &F = *Header->getParent();
848 FastMathFlags FMF;
849 FMF.setNoNaNs(
850 F.getFnAttribute("no-nans-fp-math").getValueAsBool());
851 FMF.setNoSignedZeros(
852 F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
854 if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
855 SE)) {
856 LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
857 return true;
859 if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
860 SE)) {
861 LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
862 return true;
864 if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
865 SE)) {
866 LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
867 return true;
869 if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
870 SE)) {
871 LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
872 return true;
874 if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
875 SE)) {
876 LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
877 return true;
879 if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
880 SE)) {
881 LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
882 return true;
884 if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
885 SE)) {
886 LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
887 return true;
889 if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
890 SE)) {
891 LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
892 return true;
894 if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
895 SE)) {
896 LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
897 return true;
899 if (AddReductionVar(Phi, RecurKind::IAnyOf, TheLoop, FMF, RedDes, DB, AC, DT,
900 SE)) {
901 LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."
902 << *Phi << "\n");
903 return true;
905 if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
906 SE)) {
907 LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
908 return true;
910 if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
911 SE)) {
912 LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
913 return true;
915 if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
916 SE)) {
917 LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
918 return true;
920 if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
921 SE)) {
922 LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
923 return true;
925 if (AddReductionVar(Phi, RecurKind::FAnyOf, TheLoop, FMF, RedDes, DB, AC, DT,
926 SE)) {
927 LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."
928 << " PHI." << *Phi << "\n");
929 return true;
931 if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
932 SE)) {
933 LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n");
934 return true;
936 if (AddReductionVar(Phi, RecurKind::FMaximum, TheLoop, FMF, RedDes, DB, AC, DT,
937 SE)) {
938 LLVM_DEBUG(dbgs() << "Found a float MAXIMUM reduction PHI." << *Phi << "\n");
939 return true;
941 if (AddReductionVar(Phi, RecurKind::FMinimum, TheLoop, FMF, RedDes, DB, AC, DT,
942 SE)) {
943 LLVM_DEBUG(dbgs() << "Found a float MINIMUM reduction PHI." << *Phi << "\n");
944 return true;
946 // Not a reduction of known type.
947 return false;
950 bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
951 DominatorTree *DT) {
953 // Ensure the phi node is in the loop header and has two incoming values.
954 if (Phi->getParent() != TheLoop->getHeader() ||
955 Phi->getNumIncomingValues() != 2)
956 return false;
958 // Ensure the loop has a preheader and a single latch block. The loop
959 // vectorizer will need the latch to set up the next iteration of the loop.
960 auto *Preheader = TheLoop->getLoopPreheader();
961 auto *Latch = TheLoop->getLoopLatch();
962 if (!Preheader || !Latch)
963 return false;
965 // Ensure the phi node's incoming blocks are the loop preheader and latch.
966 if (Phi->getBasicBlockIndex(Preheader) < 0 ||
967 Phi->getBasicBlockIndex(Latch) < 0)
968 return false;
970 // Get the previous value. The previous value comes from the latch edge while
971 // the initial value comes from the preheader edge.
972 auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
974 // If Previous is a phi in the header, go through incoming values from the
975 // latch until we find a non-phi value. Use this as the new Previous, all uses
976 // in the header will be dominated by the original phi, but need to be moved
977 // after the non-phi previous value.
978 SmallPtrSet<PHINode *, 4> SeenPhis;
979 while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) {
980 if (PrevPhi->getParent() != Phi->getParent())
981 return false;
982 if (!SeenPhis.insert(PrevPhi).second)
983 return false;
984 Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch));
987 if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous))
988 return false;
990 // Ensure every user of the phi node (recursively) is dominated by the
991 // previous value. The dominance requirement ensures the loop vectorizer will
992 // not need to vectorize the initial value prior to the first iteration of the
993 // loop.
994 // TODO: Consider extending this sinking to handle memory instructions.
996 SmallPtrSet<Value *, 8> Seen;
997 BasicBlock *PhiBB = Phi->getParent();
998 SmallVector<Instruction *, 8> WorkList;
999 auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
1000 // Cyclic dependence.
1001 if (Previous == SinkCandidate)
1002 return false;
1004 if (!Seen.insert(SinkCandidate).second)
1005 return true;
1006 if (DT->dominates(Previous,
1007 SinkCandidate)) // We already are good w/o sinking.
1008 return true;
1010 if (SinkCandidate->getParent() != PhiBB ||
1011 SinkCandidate->mayHaveSideEffects() ||
1012 SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
1013 return false;
1015 // If we reach a PHI node that is not dominated by Previous, we reached a
1016 // header PHI. No need for sinking.
1017 if (isa<PHINode>(SinkCandidate))
1018 return true;
1020 // Sink User tentatively and check its users
1021 WorkList.push_back(SinkCandidate);
1022 return true;
1025 WorkList.push_back(Phi);
1026 // Try to recursively sink instructions and their users after Previous.
1027 while (!WorkList.empty()) {
1028 Instruction *Current = WorkList.pop_back_val();
1029 for (User *User : Current->users()) {
1030 if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1031 return false;
1035 return true;
1038 /// This function returns the identity element (or neutral element) for
1039 /// the operation K.
1040 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1041 FastMathFlags FMF) const {
1042 switch (K) {
1043 case RecurKind::Xor:
1044 case RecurKind::Add:
1045 case RecurKind::Or:
1046 // Adding, Xoring, Oring zero to a number does not change it.
1047 return ConstantInt::get(Tp, 0);
1048 case RecurKind::Mul:
1049 // Multiplying a number by 1 does not change it.
1050 return ConstantInt::get(Tp, 1);
1051 case RecurKind::And:
1052 // AND-ing a number with an all-1 value does not change it.
1053 return ConstantInt::get(Tp, -1, true);
1054 case RecurKind::FMul:
1055 // Multiplying a number by 1 does not change it.
1056 return ConstantFP::get(Tp, 1.0L);
1057 case RecurKind::FMulAdd:
1058 case RecurKind::FAdd:
1059 // Adding zero to a number does not change it.
1060 // FIXME: Ideally we should not need to check FMF for FAdd and should always
1061 // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1062 // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1063 // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1064 // mean we can then remove the check for noSignedZeros() below (see D98963).
1065 if (FMF.noSignedZeros())
1066 return ConstantFP::get(Tp, 0.0L);
1067 return ConstantFP::get(Tp, -0.0L);
1068 case RecurKind::UMin:
1069 return ConstantInt::get(Tp, -1, true);
1070 case RecurKind::UMax:
1071 return ConstantInt::get(Tp, 0);
1072 case RecurKind::SMin:
1073 return ConstantInt::get(Tp,
1074 APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1075 case RecurKind::SMax:
1076 return ConstantInt::get(Tp,
1077 APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1078 case RecurKind::FMin:
1079 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1080 "nnan, nsz is expected to be set for FP min reduction.");
1081 return ConstantFP::getInfinity(Tp, false /*Negative*/);
1082 case RecurKind::FMax:
1083 assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1084 "nnan, nsz is expected to be set for FP max reduction.");
1085 return ConstantFP::getInfinity(Tp, true /*Negative*/);
1086 case RecurKind::FMinimum:
1087 return ConstantFP::getInfinity(Tp, false /*Negative*/);
1088 case RecurKind::FMaximum:
1089 return ConstantFP::getInfinity(Tp, true /*Negative*/);
1090 case RecurKind::IAnyOf:
1091 case RecurKind::FAnyOf:
1092 return getRecurrenceStartValue();
1093 break;
1094 default:
1095 llvm_unreachable("Unknown recurrence kind");
1099 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1100 switch (Kind) {
1101 case RecurKind::Add:
1102 return Instruction::Add;
1103 case RecurKind::Mul:
1104 return Instruction::Mul;
1105 case RecurKind::Or:
1106 return Instruction::Or;
1107 case RecurKind::And:
1108 return Instruction::And;
1109 case RecurKind::Xor:
1110 return Instruction::Xor;
1111 case RecurKind::FMul:
1112 return Instruction::FMul;
1113 case RecurKind::FMulAdd:
1114 case RecurKind::FAdd:
1115 return Instruction::FAdd;
1116 case RecurKind::SMax:
1117 case RecurKind::SMin:
1118 case RecurKind::UMax:
1119 case RecurKind::UMin:
1120 case RecurKind::IAnyOf:
1121 return Instruction::ICmp;
1122 case RecurKind::FMax:
1123 case RecurKind::FMin:
1124 case RecurKind::FMaximum:
1125 case RecurKind::FMinimum:
1126 case RecurKind::FAnyOf:
1127 return Instruction::FCmp;
1128 default:
1129 llvm_unreachable("Unknown recurrence operation");
1133 SmallVector<Instruction *, 4>
1134 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1135 SmallVector<Instruction *, 4> ReductionOperations;
1136 unsigned RedOp = getOpcode(Kind);
1138 // Search down from the Phi to the LoopExitInstr, looking for instructions
1139 // with a single user of the correct type for the reduction.
1141 // Note that we check that the type of the operand is correct for each item in
1142 // the chain, including the last (the loop exit value). This can come up from
1143 // sub, which would otherwise be treated as an add reduction. MinMax also need
1144 // to check for a pair of icmp/select, for which we use getNextInstruction and
1145 // isCorrectOpcode functions to step the right number of instruction, and
1146 // check the icmp/select pair.
1147 // FIXME: We also do not attempt to look through Select's yet, which might
1148 // be part of the reduction chain, or attempt to looks through And's to find a
1149 // smaller bitwidth. Subs are also currently not allowed (which are usually
1150 // treated as part of a add reduction) as they are expected to generally be
1151 // more expensive than out-of-loop reductions, and need to be costed more
1152 // carefully.
1153 unsigned ExpectedUses = 1;
1154 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp)
1155 ExpectedUses = 2;
1157 auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1158 for (auto *User : Cur->users()) {
1159 Instruction *UI = cast<Instruction>(User);
1160 if (isa<PHINode>(UI))
1161 continue;
1162 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1163 // We are expecting a icmp/select pair, which we go to the next select
1164 // instruction if we can. We already know that Cur has 2 uses.
1165 if (isa<SelectInst>(UI))
1166 return UI;
1167 continue;
1169 return UI;
1171 return nullptr;
1173 auto isCorrectOpcode = [&](Instruction *Cur) {
1174 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1175 Value *LHS, *RHS;
1176 return SelectPatternResult::isMinOrMax(
1177 matchSelectPattern(Cur, LHS, RHS).Flavor);
1179 // Recognize a call to the llvm.fmuladd intrinsic.
1180 if (isFMulAddIntrinsic(Cur))
1181 return true;
1183 return Cur->getOpcode() == RedOp;
1186 // Attempt to look through Phis which are part of the reduction chain
1187 unsigned ExtraPhiUses = 0;
1188 Instruction *RdxInstr = LoopExitInstr;
1189 if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) {
1190 if (ExitPhi->getNumIncomingValues() != 2)
1191 return {};
1193 Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
1194 Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1196 Instruction *Chain = nullptr;
1197 if (Inc0 == Phi)
1198 Chain = Inc1;
1199 else if (Inc1 == Phi)
1200 Chain = Inc0;
1201 else
1202 return {};
1204 RdxInstr = Chain;
1205 ExtraPhiUses = 1;
1208 // The loop exit instruction we check first (as a quick test) but add last. We
1209 // check the opcode is correct (and dont allow them to be Subs) and that they
1210 // have expected to have the expected number of uses. They will have one use
1211 // from the phi and one from a LCSSA value, no matter the type.
1212 if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
1213 return {};
1215 // Check that the Phi has one (or two for min/max) uses, plus an extra use
1216 // for conditional reductions.
1217 if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
1218 return {};
1220 Instruction *Cur = getNextInstruction(Phi);
1222 // Each other instruction in the chain should have the expected number of uses
1223 // and be the correct opcode.
1224 while (Cur != RdxInstr) {
1225 if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1226 return {};
1228 ReductionOperations.push_back(Cur);
1229 Cur = getNextInstruction(Cur);
1232 ReductionOperations.push_back(Cur);
1233 return ReductionOperations;
1236 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1237 const SCEV *Step, BinaryOperator *BOp,
1238 SmallVectorImpl<Instruction *> *Casts)
1239 : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) {
1240 assert(IK != IK_NoInduction && "Not an induction");
1242 // Start value type should match the induction kind and the value
1243 // itself should not be null.
1244 assert(StartValue && "StartValue is null");
1245 assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
1246 "StartValue is not a pointer for pointer induction");
1247 assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
1248 "StartValue is not an integer for integer induction");
1250 // Check the Step Value. It should be non-zero integer value.
1251 assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
1252 "Step value is zero");
1254 assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
1255 "StepValue is not an integer");
1257 assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
1258 "StepValue is not FP for FpInduction");
1259 assert((IK != IK_FpInduction ||
1260 (InductionBinOp &&
1261 (InductionBinOp->getOpcode() == Instruction::FAdd ||
1262 InductionBinOp->getOpcode() == Instruction::FSub))) &&
1263 "Binary opcode should be specified for FP induction");
1265 if (Casts) {
1266 for (auto &Inst : *Casts) {
1267 RedundantCasts.push_back(Inst);
1272 ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1273 if (isa<SCEVConstant>(Step))
1274 return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1275 return nullptr;
1278 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1279 ScalarEvolution *SE,
1280 InductionDescriptor &D) {
1282 // Here we only handle FP induction variables.
1283 assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
1285 if (TheLoop->getHeader() != Phi->getParent())
1286 return false;
1288 // The loop may have multiple entrances or multiple exits; we can analyze
1289 // this phi if it has a unique entry value and a unique backedge value.
1290 if (Phi->getNumIncomingValues() != 2)
1291 return false;
1292 Value *BEValue = nullptr, *StartValue = nullptr;
1293 if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1294 BEValue = Phi->getIncomingValue(0);
1295 StartValue = Phi->getIncomingValue(1);
1296 } else {
1297 assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
1298 "Unexpected Phi node in the loop");
1299 BEValue = Phi->getIncomingValue(1);
1300 StartValue = Phi->getIncomingValue(0);
1303 BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1304 if (!BOp)
1305 return false;
1307 Value *Addend = nullptr;
1308 if (BOp->getOpcode() == Instruction::FAdd) {
1309 if (BOp->getOperand(0) == Phi)
1310 Addend = BOp->getOperand(1);
1311 else if (BOp->getOperand(1) == Phi)
1312 Addend = BOp->getOperand(0);
1313 } else if (BOp->getOpcode() == Instruction::FSub)
1314 if (BOp->getOperand(0) == Phi)
1315 Addend = BOp->getOperand(1);
1317 if (!Addend)
1318 return false;
1320 // The addend should be loop invariant
1321 if (auto *I = dyn_cast<Instruction>(Addend))
1322 if (TheLoop->contains(I))
1323 return false;
1325 // FP Step has unknown SCEV
1326 const SCEV *Step = SE->getUnknown(Addend);
1327 D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1328 return true;
1331 /// This function is called when we suspect that the update-chain of a phi node
1332 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1333 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1334 /// predicate P under which the SCEV expression for the phi can be the
1335 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1336 /// cast instructions that are involved in the update-chain of this induction.
1337 /// A caller that adds the required runtime predicate can be free to drop these
1338 /// cast instructions, and compute the phi using \p AR (instead of some scev
1339 /// expression with casts).
1341 /// For example, without a predicate the scev expression can take the following
1342 /// form:
1343 /// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1345 /// It corresponds to the following IR sequence:
1346 /// %for.body:
1347 /// %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1348 /// %casted_phi = "ExtTrunc i64 %x"
1349 /// %add = add i64 %casted_phi, %step
1351 /// where %x is given in \p PN,
1352 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1353 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1354 /// several forms, for example, such as:
1355 /// ExtTrunc1: %casted_phi = and %x, 2^n-1
1356 /// or:
1357 /// ExtTrunc2: %t = shl %x, m
1358 /// %casted_phi = ashr %t, m
1360 /// If we are able to find such sequence, we return the instructions
1361 /// we found, namely %casted_phi and the instructions on its use-def chain up
1362 /// to the phi (not including the phi).
1363 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1364 const SCEVUnknown *PhiScev,
1365 const SCEVAddRecExpr *AR,
1366 SmallVectorImpl<Instruction *> &CastInsts) {
1368 assert(CastInsts.empty() && "CastInsts is expected to be empty.");
1369 auto *PN = cast<PHINode>(PhiScev->getValue());
1370 assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression");
1371 const Loop *L = AR->getLoop();
1373 // Find any cast instructions that participate in the def-use chain of
1374 // PhiScev in the loop.
1375 // FORNOW/TODO: We currently expect the def-use chain to include only
1376 // two-operand instructions, where one of the operands is an invariant.
1377 // createAddRecFromPHIWithCasts() currently does not support anything more
1378 // involved than that, so we keep the search simple. This can be
1379 // extended/generalized as needed.
1381 auto getDef = [&](const Value *Val) -> Value * {
1382 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1383 if (!BinOp)
1384 return nullptr;
1385 Value *Op0 = BinOp->getOperand(0);
1386 Value *Op1 = BinOp->getOperand(1);
1387 Value *Def = nullptr;
1388 if (L->isLoopInvariant(Op0))
1389 Def = Op1;
1390 else if (L->isLoopInvariant(Op1))
1391 Def = Op0;
1392 return Def;
1395 // Look for the instruction that defines the induction via the
1396 // loop backedge.
1397 BasicBlock *Latch = L->getLoopLatch();
1398 if (!Latch)
1399 return false;
1400 Value *Val = PN->getIncomingValueForBlock(Latch);
1401 if (!Val)
1402 return false;
1404 // Follow the def-use chain until the induction phi is reached.
1405 // If on the way we encounter a Value that has the same SCEV Expr as the
1406 // phi node, we can consider the instructions we visit from that point
1407 // as part of the cast-sequence that can be ignored.
1408 bool InCastSequence = false;
1409 auto *Inst = dyn_cast<Instruction>(Val);
1410 while (Val != PN) {
1411 // If we encountered a phi node other than PN, or if we left the loop,
1412 // we bail out.
1413 if (!Inst || !L->contains(Inst)) {
1414 return false;
1416 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1417 if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1418 InCastSequence = true;
1419 if (InCastSequence) {
1420 // Only the last instruction in the cast sequence is expected to have
1421 // uses outside the induction def-use chain.
1422 if (!CastInsts.empty())
1423 if (!Inst->hasOneUse())
1424 return false;
1425 CastInsts.push_back(Inst);
1427 Val = getDef(Val);
1428 if (!Val)
1429 return false;
1430 Inst = dyn_cast<Instruction>(Val);
1433 return InCastSequence;
1436 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1437 PredicatedScalarEvolution &PSE,
1438 InductionDescriptor &D, bool Assume) {
1439 Type *PhiTy = Phi->getType();
1441 // Handle integer and pointer inductions variables.
1442 // Now we handle also FP induction but not trying to make a
1443 // recurrent expression from the PHI node in-place.
1445 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1446 !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1447 return false;
1449 if (PhiTy->isFloatingPointTy())
1450 return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1452 const SCEV *PhiScev = PSE.getSCEV(Phi);
1453 const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1455 // We need this expression to be an AddRecExpr.
1456 if (Assume && !AR)
1457 AR = PSE.getAsAddRec(Phi);
1459 if (!AR) {
1460 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1461 return false;
1464 // Record any Cast instructions that participate in the induction update
1465 const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1466 // If we started from an UnknownSCEV, and managed to build an addRecurrence
1467 // only after enabling Assume with PSCEV, this means we may have encountered
1468 // cast instructions that required adding a runtime check in order to
1469 // guarantee the correctness of the AddRecurrence respresentation of the
1470 // induction.
1471 if (PhiScev != AR && SymbolicPhi) {
1472 SmallVector<Instruction *, 2> Casts;
1473 if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1474 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1477 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1480 bool InductionDescriptor::isInductionPHI(
1481 PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1482 InductionDescriptor &D, const SCEV *Expr,
1483 SmallVectorImpl<Instruction *> *CastsToIgnore) {
1484 Type *PhiTy = Phi->getType();
1485 // We only handle integer and pointer inductions variables.
1486 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1487 return false;
1489 // Check that the PHI is consecutive.
1490 const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1491 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1493 if (!AR) {
1494 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1495 return false;
1498 if (AR->getLoop() != TheLoop) {
1499 // FIXME: We should treat this as a uniform. Unfortunately, we
1500 // don't currently know how to handled uniform PHIs.
1501 LLVM_DEBUG(
1502 dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
1503 return false;
1506 // This function assumes that InductionPhi is called only on Phi nodes
1507 // present inside loop headers. Check for the same, and throw an assert if
1508 // the current Phi is not present inside the loop header.
1509 assert(Phi->getParent() == AR->getLoop()->getHeader()
1510 && "Invalid Phi node, not present in loop header");
1512 Value *StartValue =
1513 Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1515 BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1516 if (!Latch)
1517 return false;
1519 const SCEV *Step = AR->getStepRecurrence(*SE);
1520 // Calculate the pointer stride and check if it is consecutive.
1521 // The stride may be a constant or a loop invariant integer value.
1522 const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1523 if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1524 return false;
1526 if (PhiTy->isIntegerTy()) {
1527 BinaryOperator *BOp =
1528 dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1529 D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1530 CastsToIgnore);
1531 return true;
1534 assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
1536 // This allows induction variables w/non-constant steps.
1537 D = InductionDescriptor(StartValue, IK_PtrInduction, Step);
1538 return true;