1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/DenseSet.h"
23 #include "llvm/ADT/MapVector.h"
24 #include "llvm/ADT/None.h"
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/iterator.h"
34 #include "llvm/ADT/iterator_range.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/DemandedBits.h"
38 #include "llvm/Analysis/GlobalsModRef.h"
39 #include "llvm/Analysis/LoopAccessAnalysis.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/MemoryLocation.h"
42 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
43 #include "llvm/Analysis/ScalarEvolution.h"
44 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
45 #include "llvm/Analysis/TargetLibraryInfo.h"
46 #include "llvm/Analysis/TargetTransformInfo.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Analysis/VectorUtils.h"
49 #include "llvm/IR/Attributes.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/Constant.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Dominators.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstrTypes.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/NoFolder.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Compiler.h"
79 #include "llvm/Support/DOTGraphTraits.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/GraphWriter.h"
83 #include "llvm/Support/KnownBits.h"
84 #include "llvm/Support/MathExtras.h"
85 #include "llvm/Support/raw_ostream.h"
86 #include "llvm/Transforms/Utils/LoopUtils.h"
87 #include "llvm/Transforms/Vectorize.h"
100 using namespace llvm::PatternMatch
;
101 using namespace slpvectorizer
;
103 #define SV_NAME "slp-vectorizer"
104 #define DEBUG_TYPE "SLP"
106 STATISTIC(NumVectorInstructions
, "Number of vector instructions generated");
109 llvm::RunSLPVectorization("vectorize-slp", cl::init(false), cl::Hidden
,
110 cl::desc("Run the SLP vectorization passes"));
113 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden
,
114 cl::desc("Only vectorize if you gain more than this "
118 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden
,
119 cl::desc("Attempt to vectorize horizontal reductions"));
121 static cl::opt
<bool> ShouldStartVectorizeHorAtStore(
122 "slp-vectorize-hor-store", cl::init(false), cl::Hidden
,
124 "Attempt to vectorize horizontal reductions feeding into a store"));
127 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden
,
128 cl::desc("Attempt to vectorize for this register size in bits"));
130 /// Limits the size of scheduling regions in a block.
131 /// It avoid long compile times for _very_ large blocks where vector
132 /// instructions are spread over a wide range.
133 /// This limit is way higher than needed by real-world functions.
135 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden
,
136 cl::desc("Limit the size of the SLP scheduling region per block"));
138 static cl::opt
<int> MinVectorRegSizeOption(
139 "slp-min-reg-size", cl::init(128), cl::Hidden
,
140 cl::desc("Attempt to vectorize for this register size in bits"));
142 static cl::opt
<unsigned> RecursionMaxDepth(
143 "slp-recursion-max-depth", cl::init(12), cl::Hidden
,
144 cl::desc("Limit the recursion depth when building a vectorizable tree"));
146 static cl::opt
<unsigned> MinTreeSize(
147 "slp-min-tree-size", cl::init(3), cl::Hidden
,
148 cl::desc("Only vectorize small trees if they are fully vectorizable"));
151 ViewSLPTree("view-slp-tree", cl::Hidden
,
152 cl::desc("Display the SLP trees with Graphviz"));
154 // Limit the number of alias checks. The limit is chosen so that
155 // it has no negative effect on the llvm benchmarks.
156 static const unsigned AliasedCheckLimit
= 10;
158 // Another limit for the alias checks: The maximum distance between load/store
159 // instructions where alias checks are done.
160 // This limit is useful for very large basic blocks.
161 static const unsigned MaxMemDepDistance
= 160;
163 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
164 /// regions to be handled.
165 static const int MinScheduleRegionSize
= 16;
167 /// Predicate for the element types that the SLP vectorizer supports.
169 /// The most important thing to filter here are types which are invalid in LLVM
170 /// vectors. We also filter target specific types which have absolutely no
171 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
172 /// avoids spending time checking the cost model and realizing that they will
173 /// be inevitably scalarized.
174 static bool isValidElementType(Type
*Ty
) {
175 return VectorType::isValidElementType(Ty
) && !Ty
->isX86_FP80Ty() &&
176 !Ty
->isPPC_FP128Ty();
179 /// \returns true if all of the instructions in \p VL are in the same block or
181 static bool allSameBlock(ArrayRef
<Value
*> VL
) {
182 Instruction
*I0
= dyn_cast
<Instruction
>(VL
[0]);
185 BasicBlock
*BB
= I0
->getParent();
186 for (int i
= 1, e
= VL
.size(); i
< e
; i
++) {
187 Instruction
*I
= dyn_cast
<Instruction
>(VL
[i
]);
191 if (BB
!= I
->getParent())
197 /// \returns True if all of the values in \p VL are constants (but not
198 /// globals/constant expressions).
199 static bool allConstant(ArrayRef
<Value
*> VL
) {
200 // Constant expressions and globals can't be vectorized like normal integer/FP
203 if (!isa
<Constant
>(i
) || isa
<ConstantExpr
>(i
) || isa
<GlobalValue
>(i
))
208 /// \returns True if all of the values in \p VL are identical.
209 static bool isSplat(ArrayRef
<Value
*> VL
) {
210 for (unsigned i
= 1, e
= VL
.size(); i
< e
; ++i
)
216 /// \returns True if \p I is commutative, handles CmpInst as well as Instruction.
217 static bool isCommutative(Instruction
*I
) {
218 if (auto *IC
= dyn_cast
<CmpInst
>(I
))
219 return IC
->isCommutative();
220 return I
->isCommutative();
223 /// Checks if the vector of instructions can be represented as a shuffle, like:
224 /// %x0 = extractelement <4 x i8> %x, i32 0
225 /// %x3 = extractelement <4 x i8> %x, i32 3
226 /// %y1 = extractelement <4 x i8> %y, i32 1
227 /// %y2 = extractelement <4 x i8> %y, i32 2
228 /// %x0x0 = mul i8 %x0, %x0
229 /// %x3x3 = mul i8 %x3, %x3
230 /// %y1y1 = mul i8 %y1, %y1
231 /// %y2y2 = mul i8 %y2, %y2
232 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
233 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
234 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
235 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
236 /// ret <4 x i8> %ins4
237 /// can be transformed into:
238 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
240 /// %2 = mul <4 x i8> %1, %1
242 /// We convert this initially to something like:
243 /// %x0 = extractelement <4 x i8> %x, i32 0
244 /// %x3 = extractelement <4 x i8> %x, i32 3
245 /// %y1 = extractelement <4 x i8> %y, i32 1
246 /// %y2 = extractelement <4 x i8> %y, i32 2
247 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0
248 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
249 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
250 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
251 /// %5 = mul <4 x i8> %4, %4
252 /// %6 = extractelement <4 x i8> %5, i32 0
253 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0
254 /// %7 = extractelement <4 x i8> %5, i32 1
255 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
256 /// %8 = extractelement <4 x i8> %5, i32 2
257 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
258 /// %9 = extractelement <4 x i8> %5, i32 3
259 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
260 /// ret <4 x i8> %ins4
261 /// InstCombiner transforms this into a shuffle and vector mul
262 /// TODO: Can we split off and reuse the shuffle mask detection from
263 /// TargetTransformInfo::getInstructionThroughput?
264 static Optional
<TargetTransformInfo::ShuffleKind
>
265 isShuffle(ArrayRef
<Value
*> VL
) {
266 auto *EI0
= cast
<ExtractElementInst
>(VL
[0]);
267 unsigned Size
= EI0
->getVectorOperandType()->getVectorNumElements();
268 Value
*Vec1
= nullptr;
269 Value
*Vec2
= nullptr;
270 enum ShuffleMode
{ Unknown
, Select
, Permute
};
271 ShuffleMode CommonShuffleMode
= Unknown
;
272 for (unsigned I
= 0, E
= VL
.size(); I
< E
; ++I
) {
273 auto *EI
= cast
<ExtractElementInst
>(VL
[I
]);
274 auto *Vec
= EI
->getVectorOperand();
275 // All vector operands must have the same number of vector elements.
276 if (Vec
->getType()->getVectorNumElements() != Size
)
278 auto *Idx
= dyn_cast
<ConstantInt
>(EI
->getIndexOperand());
281 // Undefined behavior if Idx is negative or >= Size.
282 if (Idx
->getValue().uge(Size
))
284 unsigned IntIdx
= Idx
->getValue().getZExtValue();
285 // We can extractelement from undef vector.
286 if (isa
<UndefValue
>(Vec
))
288 // For correct shuffling we have to have at most 2 different vector operands
289 // in all extractelement instructions.
290 if (!Vec1
|| Vec1
== Vec
)
292 else if (!Vec2
|| Vec2
== Vec
)
296 if (CommonShuffleMode
== Permute
)
298 // If the extract index is not the same as the operation number, it is a
301 CommonShuffleMode
= Permute
;
304 CommonShuffleMode
= Select
;
306 // If we're not crossing lanes in different vectors, consider it as blending.
307 if (CommonShuffleMode
== Select
&& Vec2
)
308 return TargetTransformInfo::SK_Select
;
309 // If Vec2 was never used, we have a permutation of a single vector, otherwise
310 // we have permutation of 2 vectors.
311 return Vec2
? TargetTransformInfo::SK_PermuteTwoSrc
312 : TargetTransformInfo::SK_PermuteSingleSrc
;
317 /// Main data required for vectorization of instructions.
318 struct InstructionsState
{
319 /// The very first instruction in the list with the main opcode.
320 Value
*OpValue
= nullptr;
322 /// The main/alternate instruction.
323 Instruction
*MainOp
= nullptr;
324 Instruction
*AltOp
= nullptr;
326 /// The main/alternate opcodes for the list of instructions.
327 unsigned getOpcode() const {
328 return MainOp
? MainOp
->getOpcode() : 0;
331 unsigned getAltOpcode() const {
332 return AltOp
? AltOp
->getOpcode() : 0;
335 /// Some of the instructions in the list have alternate opcodes.
336 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
338 bool isOpcodeOrAlt(Instruction
*I
) const {
339 unsigned CheckedOpcode
= I
->getOpcode();
340 return getOpcode() == CheckedOpcode
|| getAltOpcode() == CheckedOpcode
;
343 InstructionsState() = delete;
344 InstructionsState(Value
*OpValue
, Instruction
*MainOp
, Instruction
*AltOp
)
345 : OpValue(OpValue
), MainOp(MainOp
), AltOp(AltOp
) {}
348 } // end anonymous namespace
350 /// Chooses the correct key for scheduling data. If \p Op has the same (or
351 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
353 static Value
*isOneOf(const InstructionsState
&S
, Value
*Op
) {
354 auto *I
= dyn_cast
<Instruction
>(Op
);
355 if (I
&& S
.isOpcodeOrAlt(I
))
360 /// \returns analysis of the Instructions in \p VL described in
361 /// InstructionsState, the Opcode that we suppose the whole list
362 /// could be vectorized even if its structure is diverse.
363 static InstructionsState
getSameOpcode(ArrayRef
<Value
*> VL
,
364 unsigned BaseIndex
= 0) {
365 // Make sure these are all Instructions.
366 if (llvm::any_of(VL
, [](Value
*V
) { return !isa
<Instruction
>(V
); }))
367 return InstructionsState(VL
[BaseIndex
], nullptr, nullptr);
369 bool IsCastOp
= isa
<CastInst
>(VL
[BaseIndex
]);
370 bool IsBinOp
= isa
<BinaryOperator
>(VL
[BaseIndex
]);
371 unsigned Opcode
= cast
<Instruction
>(VL
[BaseIndex
])->getOpcode();
372 unsigned AltOpcode
= Opcode
;
373 unsigned AltIndex
= BaseIndex
;
375 // Check for one alternate opcode from another BinaryOperator.
376 // TODO - generalize to support all operators (types, calls etc.).
377 for (int Cnt
= 0, E
= VL
.size(); Cnt
< E
; Cnt
++) {
378 unsigned InstOpcode
= cast
<Instruction
>(VL
[Cnt
])->getOpcode();
379 if (IsBinOp
&& isa
<BinaryOperator
>(VL
[Cnt
])) {
380 if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
382 if (Opcode
== AltOpcode
) {
383 AltOpcode
= InstOpcode
;
387 } else if (IsCastOp
&& isa
<CastInst
>(VL
[Cnt
])) {
388 Type
*Ty0
= cast
<Instruction
>(VL
[BaseIndex
])->getOperand(0)->getType();
389 Type
*Ty1
= cast
<Instruction
>(VL
[Cnt
])->getOperand(0)->getType();
391 if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
393 if (Opcode
== AltOpcode
) {
394 AltOpcode
= InstOpcode
;
399 } else if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
401 return InstructionsState(VL
[BaseIndex
], nullptr, nullptr);
404 return InstructionsState(VL
[BaseIndex
], cast
<Instruction
>(VL
[BaseIndex
]),
405 cast
<Instruction
>(VL
[AltIndex
]));
408 /// \returns true if all of the values in \p VL have the same type or false
410 static bool allSameType(ArrayRef
<Value
*> VL
) {
411 Type
*Ty
= VL
[0]->getType();
412 for (int i
= 1, e
= VL
.size(); i
< e
; i
++)
413 if (VL
[i
]->getType() != Ty
)
419 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
420 static Optional
<unsigned> getExtractIndex(Instruction
*E
) {
421 unsigned Opcode
= E
->getOpcode();
422 assert((Opcode
== Instruction::ExtractElement
||
423 Opcode
== Instruction::ExtractValue
) &&
424 "Expected extractelement or extractvalue instruction.");
425 if (Opcode
== Instruction::ExtractElement
) {
426 auto *CI
= dyn_cast
<ConstantInt
>(E
->getOperand(1));
429 return CI
->getZExtValue();
431 ExtractValueInst
*EI
= cast
<ExtractValueInst
>(E
);
432 if (EI
->getNumIndices() != 1)
434 return *EI
->idx_begin();
437 /// \returns True if in-tree use also needs extract. This refers to
438 /// possible scalar operand in vectorized instruction.
439 static bool InTreeUserNeedToExtract(Value
*Scalar
, Instruction
*UserInst
,
440 TargetLibraryInfo
*TLI
) {
441 unsigned Opcode
= UserInst
->getOpcode();
443 case Instruction::Load
: {
444 LoadInst
*LI
= cast
<LoadInst
>(UserInst
);
445 return (LI
->getPointerOperand() == Scalar
);
447 case Instruction::Store
: {
448 StoreInst
*SI
= cast
<StoreInst
>(UserInst
);
449 return (SI
->getPointerOperand() == Scalar
);
451 case Instruction::Call
: {
452 CallInst
*CI
= cast
<CallInst
>(UserInst
);
453 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
454 for (unsigned i
= 0, e
= CI
->getNumArgOperands(); i
!= e
; ++i
) {
455 if (hasVectorInstrinsicScalarOpd(ID
, i
))
456 return (CI
->getArgOperand(i
) == Scalar
);
465 /// \returns the AA location that is being access by the instruction.
466 static MemoryLocation
getLocation(Instruction
*I
, AliasAnalysis
*AA
) {
467 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
468 return MemoryLocation::get(SI
);
469 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
470 return MemoryLocation::get(LI
);
471 return MemoryLocation();
474 /// \returns True if the instruction is not a volatile or atomic load/store.
475 static bool isSimple(Instruction
*I
) {
476 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
477 return LI
->isSimple();
478 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
479 return SI
->isSimple();
480 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(I
))
481 return !MI
->isVolatile();
487 namespace slpvectorizer
{
489 /// Bottom Up SLP Vectorizer.
495 using ValueList
= SmallVector
<Value
*, 8>;
496 using InstrList
= SmallVector
<Instruction
*, 16>;
497 using ValueSet
= SmallPtrSet
<Value
*, 16>;
498 using StoreList
= SmallVector
<StoreInst
*, 8>;
499 using ExtraValueToDebugLocsMap
=
500 MapVector
<Value
*, SmallVector
<Instruction
*, 2>>;
502 BoUpSLP(Function
*Func
, ScalarEvolution
*Se
, TargetTransformInfo
*Tti
,
503 TargetLibraryInfo
*TLi
, AliasAnalysis
*Aa
, LoopInfo
*Li
,
504 DominatorTree
*Dt
, AssumptionCache
*AC
, DemandedBits
*DB
,
505 const DataLayout
*DL
, OptimizationRemarkEmitter
*ORE
)
506 : F(Func
), SE(Se
), TTI(Tti
), TLI(TLi
), AA(Aa
), LI(Li
), DT(Dt
), AC(AC
),
507 DB(DB
), DL(DL
), ORE(ORE
), Builder(Se
->getContext()) {
508 CodeMetrics::collectEphemeralValues(F
, AC
, EphValues
);
509 // Use the vector register size specified by the target unless overridden
510 // by a command-line option.
511 // TODO: It would be better to limit the vectorization factor based on
512 // data type rather than just register size. For example, x86 AVX has
513 // 256-bit registers, but it does not support integer operations
514 // at that width (that requires AVX2).
515 if (MaxVectorRegSizeOption
.getNumOccurrences())
516 MaxVecRegSize
= MaxVectorRegSizeOption
;
518 MaxVecRegSize
= TTI
->getRegisterBitWidth(true);
520 if (MinVectorRegSizeOption
.getNumOccurrences())
521 MinVecRegSize
= MinVectorRegSizeOption
;
523 MinVecRegSize
= TTI
->getMinVectorRegisterBitWidth();
526 /// Vectorize the tree that starts with the elements in \p VL.
527 /// Returns the vectorized root.
528 Value
*vectorizeTree();
530 /// Vectorize the tree but with the list of externally used values \p
531 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
532 /// generated extractvalue instructions.
533 Value
*vectorizeTree(ExtraValueToDebugLocsMap
&ExternallyUsedValues
);
535 /// \returns the cost incurred by unwanted spills and fills, caused by
536 /// holding live values over call sites.
537 int getSpillCost() const;
539 /// \returns the vectorization cost of the subtree that starts at \p VL.
540 /// A negative number means that this is profitable.
543 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
544 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
545 void buildTree(ArrayRef
<Value
*> Roots
,
546 ArrayRef
<Value
*> UserIgnoreLst
= None
);
548 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
549 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
550 /// into account (anf updating it, if required) list of externally used
551 /// values stored in \p ExternallyUsedValues.
552 void buildTree(ArrayRef
<Value
*> Roots
,
553 ExtraValueToDebugLocsMap
&ExternallyUsedValues
,
554 ArrayRef
<Value
*> UserIgnoreLst
= None
);
556 /// Clear the internal data structures that are created by 'buildTree'.
558 VectorizableTree
.clear();
559 ScalarToTreeEntry
.clear();
561 ExternalUses
.clear();
562 NumOpsWantToKeepOrder
.clear();
563 NumOpsWantToKeepOriginalOrder
= 0;
564 for (auto &Iter
: BlocksSchedules
) {
565 BlockScheduling
*BS
= Iter
.second
.get();
571 unsigned getTreeSize() const { return VectorizableTree
.size(); }
573 /// Perform LICM and CSE on the newly generated gather sequences.
574 void optimizeGatherSequence();
576 /// \returns The best order of instructions for vectorization.
577 Optional
<ArrayRef
<unsigned>> bestOrder() const {
578 auto I
= std::max_element(
579 NumOpsWantToKeepOrder
.begin(), NumOpsWantToKeepOrder
.end(),
580 [](const decltype(NumOpsWantToKeepOrder
)::value_type
&D1
,
581 const decltype(NumOpsWantToKeepOrder
)::value_type
&D2
) {
582 return D1
.second
< D2
.second
;
584 if (I
== NumOpsWantToKeepOrder
.end() ||
585 I
->getSecond() <= NumOpsWantToKeepOriginalOrder
)
588 return makeArrayRef(I
->getFirst());
591 /// \return The vector element size in bits to use when vectorizing the
592 /// expression tree ending at \p V. If V is a store, the size is the width of
593 /// the stored value. Otherwise, the size is the width of the largest loaded
594 /// value reaching V. This method is used by the vectorizer to calculate
595 /// vectorization factors.
596 unsigned getVectorElementSize(Value
*V
) const;
598 /// Compute the minimum type sizes required to represent the entries in a
599 /// vectorizable tree.
600 void computeMinimumValueSizes();
602 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
603 unsigned getMaxVecRegSize() const {
604 return MaxVecRegSize
;
607 // \returns minimum vector register size as set by cl::opt.
608 unsigned getMinVecRegSize() const {
609 return MinVecRegSize
;
612 /// Check if ArrayType or StructType is isomorphic to some VectorType.
614 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
615 unsigned canMapToVector(Type
*T
, const DataLayout
&DL
) const;
617 /// \returns True if the VectorizableTree is both tiny and not fully
618 /// vectorizable. We do not vectorize such trees.
619 bool isTreeTinyAndNotFullyVectorizable() const;
621 OptimizationRemarkEmitter
*getORE() { return ORE
; }
623 /// This structure holds any data we need about the edges being traversed
624 /// during buildTree_rec(). We keep track of:
625 /// (i) the user TreeEntry index, and
626 /// (ii) the index of the edge.
628 EdgeInfo() = default;
629 EdgeInfo(TreeEntry
*UserTE
, unsigned EdgeIdx
)
630 : UserTE(UserTE
), EdgeIdx(EdgeIdx
) {}
631 /// The user TreeEntry.
632 TreeEntry
*UserTE
= nullptr;
633 /// The operand index of the use.
634 unsigned EdgeIdx
= UINT_MAX
;
636 friend inline raw_ostream
&operator<<(raw_ostream
&OS
,
637 const BoUpSLP::EdgeInfo
&EI
) {
642 void dump(raw_ostream
&OS
) const {
643 OS
<< "{User:" << (UserTE
? std::to_string(UserTE
->Idx
) : "null")
644 << " EdgeIdx:" << EdgeIdx
<< "}";
646 LLVM_DUMP_METHOD
void dump() const { dump(dbgs()); }
650 /// A helper data structure to hold the operands of a vector of instructions.
651 /// This supports a fixed vector length for all operand vectors.
653 /// For each operand we need (i) the value, and (ii) the opcode that it
654 /// would be attached to if the expression was in a left-linearized form.
655 /// This is required to avoid illegal operand reordering.
660 /// Op1 Op2 Linearized + Op2
661 /// \ / ----------> |/
664 /// Op1 - Op2 (0 + Op1) - Op2
667 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
669 /// Another way to think of this is to track all the operations across the
670 /// path from the operand all the way to the root of the tree and to
671 /// calculate the operation that corresponds to this path. For example, the
672 /// path from Op2 to the root crosses the RHS of the '-', therefore the
673 /// corresponding operation is a '-' (which matches the one in the
674 /// linearized tree, as shown above).
676 /// For lack of a better term, we refer to this operation as Accumulated
677 /// Path Operation (APO).
679 OperandData() = default;
680 OperandData(Value
*V
, bool APO
, bool IsUsed
)
681 : V(V
), APO(APO
), IsUsed(IsUsed
) {}
682 /// The operand value.
684 /// TreeEntries only allow a single opcode, or an alternate sequence of
685 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
686 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
687 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
690 /// Helper data for the reordering function.
694 /// During operand reordering, we are trying to select the operand at lane
695 /// that matches best with the operand at the neighboring lane. Our
696 /// selection is based on the type of value we are looking for. For example,
697 /// if the neighboring lane has a load, we need to look for a load that is
698 /// accessing a consecutive address. These strategies are summarized in the
699 /// 'ReorderingMode' enumerator.
700 enum class ReorderingMode
{
701 Load
, ///< Matching loads to consecutive memory addresses
702 Opcode
, ///< Matching instructions based on opcode (same or alternate)
703 Constant
, ///< Matching constants
704 Splat
, ///< Matching the same instruction multiple times (broadcast)
705 Failed
, ///< We failed to create a vectorizable group
708 using OperandDataVec
= SmallVector
<OperandData
, 2>;
710 /// A vector of operand vectors.
711 SmallVector
<OperandDataVec
, 4> OpsVec
;
713 const DataLayout
&DL
;
716 /// \returns the operand data at \p OpIdx and \p Lane.
717 OperandData
&getData(unsigned OpIdx
, unsigned Lane
) {
718 return OpsVec
[OpIdx
][Lane
];
721 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
722 const OperandData
&getData(unsigned OpIdx
, unsigned Lane
) const {
723 return OpsVec
[OpIdx
][Lane
];
726 /// Clears the used flag for all entries.
728 for (unsigned OpIdx
= 0, NumOperands
= getNumOperands();
729 OpIdx
!= NumOperands
; ++OpIdx
)
730 for (unsigned Lane
= 0, NumLanes
= getNumLanes(); Lane
!= NumLanes
;
732 OpsVec
[OpIdx
][Lane
].IsUsed
= false;
735 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
736 void swap(unsigned OpIdx1
, unsigned OpIdx2
, unsigned Lane
) {
737 std::swap(OpsVec
[OpIdx1
][Lane
], OpsVec
[OpIdx2
][Lane
]);
740 // Search all operands in Ops[*][Lane] for the one that matches best
741 // Ops[OpIdx][LastLane] and return its opreand index.
742 // If no good match can be found, return None.
744 getBestOperand(unsigned OpIdx
, int Lane
, int LastLane
,
745 ArrayRef
<ReorderingMode
> ReorderingModes
) {
746 unsigned NumOperands
= getNumOperands();
748 // The operand of the previous lane at OpIdx.
749 Value
*OpLastLane
= getData(OpIdx
, LastLane
).V
;
751 // Our strategy mode for OpIdx.
752 ReorderingMode RMode
= ReorderingModes
[OpIdx
];
754 // The linearized opcode of the operand at OpIdx, Lane.
755 bool OpIdxAPO
= getData(OpIdx
, Lane
).APO
;
757 const unsigned BestScore
= 2;
758 const unsigned GoodScore
= 1;
760 // The best operand index and its score.
761 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
762 // are using the score to differentiate between the two.
764 Optional
<unsigned> Idx
= None
;
768 // Iterate through all unused operands and look for the best.
769 for (unsigned Idx
= 0; Idx
!= NumOperands
; ++Idx
) {
770 // Get the operand at Idx and Lane.
771 OperandData
&OpData
= getData(Idx
, Lane
);
772 Value
*Op
= OpData
.V
;
773 bool OpAPO
= OpData
.APO
;
775 // Skip already selected operands.
779 // Skip if we are trying to move the operand to a position with a
780 // different opcode in the linearized tree form. This would break the
782 if (OpAPO
!= OpIdxAPO
)
785 // Look for an operand that matches the current mode.
787 case ReorderingMode::Load
:
788 if (isa
<LoadInst
>(Op
)) {
789 // Figure out which is left and right, so that we can check for
791 bool LeftToRight
= Lane
> LastLane
;
792 Value
*OpLeft
= (LeftToRight
) ? OpLastLane
: Op
;
793 Value
*OpRight
= (LeftToRight
) ? Op
: OpLastLane
;
794 if (isConsecutiveAccess(cast
<LoadInst
>(OpLeft
),
795 cast
<LoadInst
>(OpRight
), DL
, SE
))
799 case ReorderingMode::Opcode
:
800 // We accept both Instructions and Undefs, but with different scores.
801 if ((isa
<Instruction
>(Op
) && isa
<Instruction
>(OpLastLane
) &&
802 cast
<Instruction
>(Op
)->getOpcode() ==
803 cast
<Instruction
>(OpLastLane
)->getOpcode()) ||
804 (isa
<UndefValue
>(OpLastLane
) && isa
<Instruction
>(Op
)) ||
805 isa
<UndefValue
>(Op
)) {
806 // An instruction has a higher score than an undef.
807 unsigned Score
= (isa
<UndefValue
>(Op
)) ? GoodScore
: BestScore
;
808 if (Score
> BestOp
.Score
) {
810 BestOp
.Score
= Score
;
814 case ReorderingMode::Constant
:
815 if (isa
<Constant
>(Op
)) {
816 unsigned Score
= (isa
<UndefValue
>(Op
)) ? GoodScore
: BestScore
;
817 if (Score
> BestOp
.Score
) {
819 BestOp
.Score
= Score
;
823 case ReorderingMode::Splat
:
824 if (Op
== OpLastLane
)
827 case ReorderingMode::Failed
:
833 getData(BestOp
.Idx
.getValue(), Lane
).IsUsed
= true;
836 // If we could not find a good match return None.
840 /// Helper for reorderOperandVecs. \Returns the lane that we should start
841 /// reordering from. This is the one which has the least number of operands
842 /// that can freely move about.
843 unsigned getBestLaneToStartReordering() const {
844 unsigned BestLane
= 0;
845 unsigned Min
= UINT_MAX
;
846 for (unsigned Lane
= 0, NumLanes
= getNumLanes(); Lane
!= NumLanes
;
848 unsigned NumFreeOps
= getMaxNumOperandsThatCanBeReordered(Lane
);
849 if (NumFreeOps
< Min
) {
857 /// \Returns the maximum number of operands that are allowed to be reordered
858 /// for \p Lane. This is used as a heuristic for selecting the first lane to
859 /// start operand reordering.
860 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane
) const {
861 unsigned CntTrue
= 0;
862 unsigned NumOperands
= getNumOperands();
863 // Operands with the same APO can be reordered. We therefore need to count
864 // how many of them we have for each APO, like this: Cnt[APO] = x.
865 // Since we only have two APOs, namely true and false, we can avoid using
866 // a map. Instead we can simply count the number of operands that
867 // correspond to one of them (in this case the 'true' APO), and calculate
868 // the other by subtracting it from the total number of operands.
869 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
)
870 if (getData(OpIdx
, Lane
).APO
)
872 unsigned CntFalse
= NumOperands
- CntTrue
;
873 return std::max(CntTrue
, CntFalse
);
876 /// Go through the instructions in VL and append their operands.
877 void appendOperandsOfVL(ArrayRef
<Value
*> VL
) {
878 assert(!VL
.empty() && "Bad VL");
879 assert((empty() || VL
.size() == getNumLanes()) &&
880 "Expected same number of lanes");
881 assert(isa
<Instruction
>(VL
[0]) && "Expected instruction");
882 unsigned NumOperands
= cast
<Instruction
>(VL
[0])->getNumOperands();
883 OpsVec
.resize(NumOperands
);
884 unsigned NumLanes
= VL
.size();
885 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
886 OpsVec
[OpIdx
].resize(NumLanes
);
887 for (unsigned Lane
= 0; Lane
!= NumLanes
; ++Lane
) {
888 assert(isa
<Instruction
>(VL
[Lane
]) && "Expected instruction");
889 // Our tree has just 3 nodes: the root and two operands.
890 // It is therefore trivial to get the APO. We only need to check the
891 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
892 // RHS operand. The LHS operand of both add and sub is never attached
893 // to an inversese operation in the linearized form, therefore its APO
894 // is false. The RHS is true only if VL[Lane] is an inverse operation.
896 // Since operand reordering is performed on groups of commutative
897 // operations or alternating sequences (e.g., +, -), we can safely
898 // tell the inverse operations by checking commutativity.
899 bool IsInverseOperation
= !isCommutative(cast
<Instruction
>(VL
[Lane
]));
900 bool APO
= (OpIdx
== 0) ? false : IsInverseOperation
;
901 OpsVec
[OpIdx
][Lane
] = {cast
<Instruction
>(VL
[Lane
])->getOperand(OpIdx
),
907 /// \returns the number of operands.
908 unsigned getNumOperands() const { return OpsVec
.size(); }
910 /// \returns the number of lanes.
911 unsigned getNumLanes() const { return OpsVec
[0].size(); }
913 /// \returns the operand value at \p OpIdx and \p Lane.
914 Value
*getValue(unsigned OpIdx
, unsigned Lane
) const {
915 return getData(OpIdx
, Lane
).V
;
918 /// \returns true if the data structure is empty.
919 bool empty() const { return OpsVec
.empty(); }
922 void clear() { OpsVec
.clear(); }
924 /// \Returns true if there are enough operands identical to \p Op to fill
925 /// the whole vector.
926 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
927 bool shouldBroadcast(Value
*Op
, unsigned OpIdx
, unsigned Lane
) {
928 bool OpAPO
= getData(OpIdx
, Lane
).APO
;
929 for (unsigned Ln
= 0, Lns
= getNumLanes(); Ln
!= Lns
; ++Ln
) {
932 // This is set to true if we found a candidate for broadcast at Lane.
933 bool FoundCandidate
= false;
934 for (unsigned OpI
= 0, OpE
= getNumOperands(); OpI
!= OpE
; ++OpI
) {
935 OperandData
&Data
= getData(OpI
, Ln
);
936 if (Data
.APO
!= OpAPO
|| Data
.IsUsed
)
939 FoundCandidate
= true;
951 /// Initialize with all the operands of the instruction vector \p RootVL.
952 VLOperands(ArrayRef
<Value
*> RootVL
, const DataLayout
&DL
,
955 // Append all the operands of RootVL.
956 appendOperandsOfVL(RootVL
);
959 /// \Returns a value vector with the operands across all lanes for the
960 /// opearnd at \p OpIdx.
961 ValueList
getVL(unsigned OpIdx
) const {
962 ValueList
OpVL(OpsVec
[OpIdx
].size());
963 assert(OpsVec
[OpIdx
].size() == getNumLanes() &&
964 "Expected same num of lanes across all operands");
965 for (unsigned Lane
= 0, Lanes
= getNumLanes(); Lane
!= Lanes
; ++Lane
)
966 OpVL
[Lane
] = OpsVec
[OpIdx
][Lane
].V
;
970 // Performs operand reordering for 2 or more operands.
971 // The original operands are in OrigOps[OpIdx][Lane].
972 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
974 unsigned NumOperands
= getNumOperands();
975 unsigned NumLanes
= getNumLanes();
976 // Each operand has its own mode. We are using this mode to help us select
977 // the instructions for each lane, so that they match best with the ones
978 // we have selected so far.
979 SmallVector
<ReorderingMode
, 2> ReorderingModes(NumOperands
);
981 // This is a greedy single-pass algorithm. We are going over each lane
982 // once and deciding on the best order right away with no back-tracking.
983 // However, in order to increase its effectiveness, we start with the lane
984 // that has operands that can move the least. For example, given the
986 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
987 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
988 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
989 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
990 // we will start at Lane 1, since the operands of the subtraction cannot
991 // be reordered. Then we will visit the rest of the lanes in a circular
992 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
994 // Find the first lane that we will start our search from.
995 unsigned FirstLane
= getBestLaneToStartReordering();
997 // Initialize the modes.
998 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
999 Value
*OpLane0
= getValue(OpIdx
, FirstLane
);
1000 // Keep track if we have instructions with all the same opcode on one
1002 if (isa
<LoadInst
>(OpLane0
))
1003 ReorderingModes
[OpIdx
] = ReorderingMode::Load
;
1004 else if (isa
<Instruction
>(OpLane0
)) {
1005 // Check if OpLane0 should be broadcast.
1006 if (shouldBroadcast(OpLane0
, OpIdx
, FirstLane
))
1007 ReorderingModes
[OpIdx
] = ReorderingMode::Splat
;
1009 ReorderingModes
[OpIdx
] = ReorderingMode::Opcode
;
1011 else if (isa
<Constant
>(OpLane0
))
1012 ReorderingModes
[OpIdx
] = ReorderingMode::Constant
;
1013 else if (isa
<Argument
>(OpLane0
))
1014 // Our best hope is a Splat. It may save some cost in some cases.
1015 ReorderingModes
[OpIdx
] = ReorderingMode::Splat
;
1017 // NOTE: This should be unreachable.
1018 ReorderingModes
[OpIdx
] = ReorderingMode::Failed
;
1021 // If the initial strategy fails for any of the operand indexes, then we
1022 // perform reordering again in a second pass. This helps avoid assigning
1023 // high priority to the failed strategy, and should improve reordering for
1024 // the non-failed operand indexes.
1025 for (int Pass
= 0; Pass
!= 2; ++Pass
) {
1026 // Skip the second pass if the first pass did not fail.
1027 bool StrategyFailed
= false;
1028 // Mark all operand data as free to use.
1030 // We keep the original operand order for the FirstLane, so reorder the
1031 // rest of the lanes. We are visiting the nodes in a circular fashion,
1032 // using FirstLane as the center point and increasing the radius
1034 for (unsigned Distance
= 1; Distance
!= NumLanes
; ++Distance
) {
1035 // Visit the lane on the right and then the lane on the left.
1036 for (int Direction
: {+1, -1}) {
1037 int Lane
= FirstLane
+ Direction
* Distance
;
1038 if (Lane
< 0 || Lane
>= (int)NumLanes
)
1040 int LastLane
= Lane
- Direction
;
1041 assert(LastLane
>= 0 && LastLane
< (int)NumLanes
&&
1043 // Look for a good match for each operand.
1044 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
1045 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1046 Optional
<unsigned> BestIdx
=
1047 getBestOperand(OpIdx
, Lane
, LastLane
, ReorderingModes
);
1048 // By not selecting a value, we allow the operands that follow to
1049 // select a better matching value. We will get a non-null value in
1050 // the next run of getBestOperand().
1052 // Swap the current operand with the one returned by
1053 // getBestOperand().
1054 swap(OpIdx
, BestIdx
.getValue(), Lane
);
1056 // We failed to find a best operand, set mode to 'Failed'.
1057 ReorderingModes
[OpIdx
] = ReorderingMode::Failed
;
1058 // Enable the second pass.
1059 StrategyFailed
= true;
1064 // Skip second pass if the strategy did not fail.
1065 if (!StrategyFailed
)
1070 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1071 LLVM_DUMP_METHOD
static StringRef
getModeStr(ReorderingMode RMode
) {
1073 case ReorderingMode::Load
:
1075 case ReorderingMode::Opcode
:
1077 case ReorderingMode::Constant
:
1079 case ReorderingMode::Splat
:
1081 case ReorderingMode::Failed
:
1084 llvm_unreachable("Unimplemented Reordering Type");
1087 LLVM_DUMP_METHOD
static raw_ostream
&printMode(ReorderingMode RMode
,
1089 return OS
<< getModeStr(RMode
);
1093 LLVM_DUMP_METHOD
static void dumpMode(ReorderingMode RMode
) {
1094 printMode(RMode
, dbgs());
1097 friend raw_ostream
&operator<<(raw_ostream
&OS
, ReorderingMode RMode
) {
1098 return printMode(RMode
, OS
);
1101 LLVM_DUMP_METHOD raw_ostream
&print(raw_ostream
&OS
) const {
1102 const unsigned Indent
= 2;
1104 for (const OperandDataVec
&OpDataVec
: OpsVec
) {
1105 OS
<< "Operand " << Cnt
++ << "\n";
1106 for (const OperandData
&OpData
: OpDataVec
) {
1107 OS
.indent(Indent
) << "{";
1108 if (Value
*V
= OpData
.V
)
1112 OS
<< ", APO:" << OpData
.APO
<< "}\n";
1120 LLVM_DUMP_METHOD
void dump() const { print(dbgs()); }
1124 /// Checks if the instruction is marked for deletion.
1125 bool isDeleted(Instruction
*I
) const { return DeletedInstructions
.count(I
); }
1127 /// Marks values operands for later deletion by replacing them with Undefs.
1128 void eraseInstructions(ArrayRef
<Value
*> AV
);
1133 /// Checks if all users of \p I are the part of the vectorization tree.
1134 bool areAllUsersVectorized(Instruction
*I
) const;
1136 /// \returns the cost of the vectorizable entry.
1137 int getEntryCost(TreeEntry
*E
);
1139 /// This is the recursive part of buildTree.
1140 void buildTree_rec(ArrayRef
<Value
*> Roots
, unsigned Depth
,
1141 const EdgeInfo
&EI
);
1143 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1144 /// be vectorized to use the original vector (or aggregate "bitcast" to a
1145 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1146 /// returns false, setting \p CurrentOrder to either an empty vector or a
1147 /// non-identity permutation that allows to reuse extract instructions.
1148 bool canReuseExtract(ArrayRef
<Value
*> VL
, Value
*OpValue
,
1149 SmallVectorImpl
<unsigned> &CurrentOrder
) const;
1151 /// Vectorize a single entry in the tree.
1152 Value
*vectorizeTree(TreeEntry
*E
);
1154 /// Vectorize a single entry in the tree, starting in \p VL.
1155 Value
*vectorizeTree(ArrayRef
<Value
*> VL
);
1157 /// \returns the scalarization cost for this type. Scalarization in this
1158 /// context means the creation of vectors from a group of scalars.
1159 int getGatherCost(Type
*Ty
, const DenseSet
<unsigned> &ShuffledIndices
) const;
1161 /// \returns the scalarization cost for this list of values. Assuming that
1162 /// this subtree gets vectorized, we may need to extract the values from the
1163 /// roots. This method calculates the cost of extracting the values.
1164 int getGatherCost(ArrayRef
<Value
*> VL
) const;
1166 /// Set the Builder insert point to one after the last instruction in
1168 void setInsertPointAfterBundle(TreeEntry
*E
);
1170 /// \returns a vector from a collection of scalars in \p VL.
1171 Value
*Gather(ArrayRef
<Value
*> VL
, VectorType
*Ty
);
1173 /// \returns whether the VectorizableTree is fully vectorizable and will
1174 /// be beneficial even the tree height is tiny.
1175 bool isFullyVectorizableTinyTree() const;
1177 /// Reorder commutative or alt operands to get better probability of
1178 /// generating vectorized code.
1179 static void reorderInputsAccordingToOpcode(ArrayRef
<Value
*> VL
,
1180 SmallVectorImpl
<Value
*> &Left
,
1181 SmallVectorImpl
<Value
*> &Right
,
1182 const DataLayout
&DL
,
1183 ScalarEvolution
&SE
);
1185 using VecTreeTy
= SmallVector
<std::unique_ptr
<TreeEntry
>, 8>;
1186 TreeEntry(VecTreeTy
&Container
) : Container(Container
) {}
1188 /// \returns true if the scalars in VL are equal to this entry.
1189 bool isSame(ArrayRef
<Value
*> VL
) const {
1190 if (VL
.size() == Scalars
.size())
1191 return std::equal(VL
.begin(), VL
.end(), Scalars
.begin());
1192 return VL
.size() == ReuseShuffleIndices
.size() &&
1194 VL
.begin(), VL
.end(), ReuseShuffleIndices
.begin(),
1195 [this](Value
*V
, unsigned Idx
) { return V
== Scalars
[Idx
]; });
1198 /// A vector of scalars.
1201 /// The Scalars are vectorized into this value. It is initialized to Null.
1202 Value
*VectorizedValue
= nullptr;
1204 /// Do we need to gather this sequence ?
1205 bool NeedToGather
= false;
1207 /// Does this sequence require some shuffling?
1208 SmallVector
<unsigned, 4> ReuseShuffleIndices
;
1210 /// Does this entry require reordering?
1211 ArrayRef
<unsigned> ReorderIndices
;
1213 /// Points back to the VectorizableTree.
1215 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
1216 /// to be a pointer and needs to be able to initialize the child iterator.
1217 /// Thus we need a reference back to the container to translate the indices
1219 VecTreeTy
&Container
;
1221 /// The TreeEntry index containing the user of this entry. We can actually
1222 /// have multiple users so the data structure is not truly a tree.
1223 SmallVector
<EdgeInfo
, 1> UserTreeIndices
;
1225 /// The index of this treeEntry in VectorizableTree.
1229 /// The operands of each instruction in each lane Operands[op_index][lane].
1230 /// Note: This helps avoid the replication of the code that performs the
1231 /// reordering of operands during buildTree_rec() and vectorizeTree().
1232 SmallVector
<ValueList
, 2> Operands
;
1234 /// The main/alternate instruction.
1235 Instruction
*MainOp
= nullptr;
1236 Instruction
*AltOp
= nullptr;
1239 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1240 void setOperand(unsigned OpIdx
, ArrayRef
<Value
*> OpVL
) {
1241 if (Operands
.size() < OpIdx
+ 1)
1242 Operands
.resize(OpIdx
+ 1);
1243 assert(Operands
[OpIdx
].size() == 0 && "Already resized?");
1244 Operands
[OpIdx
].resize(Scalars
.size());
1245 for (unsigned Lane
= 0, E
= Scalars
.size(); Lane
!= E
; ++Lane
)
1246 Operands
[OpIdx
][Lane
] = OpVL
[Lane
];
1249 /// Set the operands of this bundle in their original order.
1250 void setOperandsInOrder() {
1251 assert(Operands
.empty() && "Already initialized?");
1252 auto *I0
= cast
<Instruction
>(Scalars
[0]);
1253 Operands
.resize(I0
->getNumOperands());
1254 unsigned NumLanes
= Scalars
.size();
1255 for (unsigned OpIdx
= 0, NumOperands
= I0
->getNumOperands();
1256 OpIdx
!= NumOperands
; ++OpIdx
) {
1257 Operands
[OpIdx
].resize(NumLanes
);
1258 for (unsigned Lane
= 0; Lane
!= NumLanes
; ++Lane
) {
1259 auto *I
= cast
<Instruction
>(Scalars
[Lane
]);
1260 assert(I
->getNumOperands() == NumOperands
&&
1261 "Expected same number of operands");
1262 Operands
[OpIdx
][Lane
] = I
->getOperand(OpIdx
);
1267 /// \returns the \p OpIdx operand of this TreeEntry.
1268 ValueList
&getOperand(unsigned OpIdx
) {
1269 assert(OpIdx
< Operands
.size() && "Off bounds");
1270 return Operands
[OpIdx
];
1273 /// \returns the number of operands.
1274 unsigned getNumOperands() const { return Operands
.size(); }
1276 /// \return the single \p OpIdx operand.
1277 Value
*getSingleOperand(unsigned OpIdx
) const {
1278 assert(OpIdx
< Operands
.size() && "Off bounds");
1279 assert(!Operands
[OpIdx
].empty() && "No operand available");
1280 return Operands
[OpIdx
][0];
1283 /// Some of the instructions in the list have alternate opcodes.
1284 bool isAltShuffle() const {
1285 return getOpcode() != getAltOpcode();
1288 bool isOpcodeOrAlt(Instruction
*I
) const {
1289 unsigned CheckedOpcode
= I
->getOpcode();
1290 return (getOpcode() == CheckedOpcode
||
1291 getAltOpcode() == CheckedOpcode
);
1294 /// Chooses the correct key for scheduling data. If \p Op has the same (or
1295 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1297 Value
*isOneOf(Value
*Op
) const {
1298 auto *I
= dyn_cast
<Instruction
>(Op
);
1299 if (I
&& isOpcodeOrAlt(I
))
1304 void setOperations(const InstructionsState
&S
) {
1309 Instruction
*getMainOp() const {
1313 Instruction
*getAltOp() const {
1317 /// The main/alternate opcodes for the list of instructions.
1318 unsigned getOpcode() const {
1319 return MainOp
? MainOp
->getOpcode() : 0;
1322 unsigned getAltOpcode() const {
1323 return AltOp
? AltOp
->getOpcode() : 0;
1326 /// Update operations state of this entry if reorder occurred.
1327 bool updateStateIfReorder() {
1328 if (ReorderIndices
.empty())
1330 InstructionsState S
= getSameOpcode(Scalars
, ReorderIndices
.front());
1337 LLVM_DUMP_METHOD
void dump() const {
1338 dbgs() << Idx
<< ".\n";
1339 for (unsigned OpI
= 0, OpE
= Operands
.size(); OpI
!= OpE
; ++OpI
) {
1340 dbgs() << "Operand " << OpI
<< ":\n";
1341 for (const Value
*V
: Operands
[OpI
])
1342 dbgs().indent(2) << *V
<< "\n";
1344 dbgs() << "Scalars: \n";
1345 for (Value
*V
: Scalars
)
1346 dbgs().indent(2) << *V
<< "\n";
1347 dbgs() << "NeedToGather: " << NeedToGather
<< "\n";
1348 dbgs() << "MainOp: " << *MainOp
<< "\n";
1349 dbgs() << "AltOp: " << *AltOp
<< "\n";
1350 dbgs() << "VectorizedValue: ";
1351 if (VectorizedValue
)
1352 dbgs() << *VectorizedValue
;
1356 dbgs() << "ReuseShuffleIndices: ";
1357 if (ReuseShuffleIndices
.empty())
1360 for (unsigned ReuseIdx
: ReuseShuffleIndices
)
1361 dbgs() << ReuseIdx
<< ", ";
1363 dbgs() << "ReorderIndices: ";
1364 for (unsigned ReorderIdx
: ReorderIndices
)
1365 dbgs() << ReorderIdx
<< ", ";
1367 dbgs() << "UserTreeIndices: ";
1368 for (const auto &EInfo
: UserTreeIndices
)
1369 dbgs() << EInfo
<< ", ";
1375 /// Create a new VectorizableTree entry.
1376 TreeEntry
*newTreeEntry(ArrayRef
<Value
*> VL
, Optional
<ScheduleData
*> Bundle
,
1377 const InstructionsState
&S
,
1378 const EdgeInfo
&UserTreeIdx
,
1379 ArrayRef
<unsigned> ReuseShuffleIndices
= None
,
1380 ArrayRef
<unsigned> ReorderIndices
= None
) {
1381 bool Vectorized
= (bool)Bundle
;
1382 VectorizableTree
.push_back(std::make_unique
<TreeEntry
>(VectorizableTree
));
1383 TreeEntry
*Last
= VectorizableTree
.back().get();
1384 Last
->Idx
= VectorizableTree
.size() - 1;
1385 Last
->Scalars
.insert(Last
->Scalars
.begin(), VL
.begin(), VL
.end());
1386 Last
->NeedToGather
= !Vectorized
;
1387 Last
->ReuseShuffleIndices
.append(ReuseShuffleIndices
.begin(),
1388 ReuseShuffleIndices
.end());
1389 Last
->ReorderIndices
= ReorderIndices
;
1390 Last
->setOperations(S
);
1392 for (int i
= 0, e
= VL
.size(); i
!= e
; ++i
) {
1393 assert(!getTreeEntry(VL
[i
]) && "Scalar already in tree!");
1394 ScalarToTreeEntry
[VL
[i
]] = Last
;
1396 // Update the scheduler bundle to point to this TreeEntry.
1398 for (ScheduleData
*BundleMember
= Bundle
.getValue(); BundleMember
;
1399 BundleMember
= BundleMember
->NextInBundle
) {
1400 BundleMember
->TE
= Last
;
1401 BundleMember
->Lane
= Lane
;
1404 assert((!Bundle
.getValue() || Lane
== VL
.size()) &&
1405 "Bundle and VL out of sync");
1407 MustGather
.insert(VL
.begin(), VL
.end());
1410 if (UserTreeIdx
.UserTE
)
1411 Last
->UserTreeIndices
.push_back(UserTreeIdx
);
1416 /// -- Vectorization State --
1417 /// Holds all of the tree entries.
1418 TreeEntry::VecTreeTy VectorizableTree
;
1422 LLVM_DUMP_METHOD
void dumpVectorizableTree() const {
1423 for (unsigned Id
= 0, IdE
= VectorizableTree
.size(); Id
!= IdE
; ++Id
) {
1424 VectorizableTree
[Id
]->dump();
1430 TreeEntry
*getTreeEntry(Value
*V
) {
1431 auto I
= ScalarToTreeEntry
.find(V
);
1432 if (I
!= ScalarToTreeEntry
.end())
1437 const TreeEntry
*getTreeEntry(Value
*V
) const {
1438 auto I
= ScalarToTreeEntry
.find(V
);
1439 if (I
!= ScalarToTreeEntry
.end())
1444 /// Maps a specific scalar to its tree entry.
1445 SmallDenseMap
<Value
*, TreeEntry
*> ScalarToTreeEntry
;
1447 /// A list of scalars that we found that we need to keep as scalars.
1448 ValueSet MustGather
;
1450 /// This POD struct describes one external user in the vectorized tree.
1451 struct ExternalUser
{
1452 ExternalUser(Value
*S
, llvm::User
*U
, int L
)
1453 : Scalar(S
), User(U
), Lane(L
) {}
1455 // Which scalar in our function.
1458 // Which user that uses the scalar.
1461 // Which lane does the scalar belong to.
1464 using UserList
= SmallVector
<ExternalUser
, 16>;
1466 /// Checks if two instructions may access the same memory.
1468 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1469 /// is invariant in the calling loop.
1470 bool isAliased(const MemoryLocation
&Loc1
, Instruction
*Inst1
,
1471 Instruction
*Inst2
) {
1472 // First check if the result is already in the cache.
1473 AliasCacheKey key
= std::make_pair(Inst1
, Inst2
);
1474 Optional
<bool> &result
= AliasCache
[key
];
1475 if (result
.hasValue()) {
1476 return result
.getValue();
1478 MemoryLocation Loc2
= getLocation(Inst2
, AA
);
1479 bool aliased
= true;
1480 if (Loc1
.Ptr
&& Loc2
.Ptr
&& isSimple(Inst1
) && isSimple(Inst2
)) {
1481 // Do the alias check.
1482 aliased
= AA
->alias(Loc1
, Loc2
);
1484 // Store the result in the cache.
1489 using AliasCacheKey
= std::pair
<Instruction
*, Instruction
*>;
1491 /// Cache for alias results.
1492 /// TODO: consider moving this to the AliasAnalysis itself.
1493 DenseMap
<AliasCacheKey
, Optional
<bool>> AliasCache
;
1495 /// Removes an instruction from its block and eventually deletes it.
1496 /// It's like Instruction::eraseFromParent() except that the actual deletion
1497 /// is delayed until BoUpSLP is destructed.
1498 /// This is required to ensure that there are no incorrect collisions in the
1499 /// AliasCache, which can happen if a new instruction is allocated at the
1500 /// same address as a previously deleted instruction.
1501 void eraseInstruction(Instruction
*I
, bool ReplaceOpsWithUndef
= false) {
1502 auto It
= DeletedInstructions
.try_emplace(I
, ReplaceOpsWithUndef
).first
;
1503 It
->getSecond() = It
->getSecond() && ReplaceOpsWithUndef
;
1506 /// Temporary store for deleted instructions. Instructions will be deleted
1507 /// eventually when the BoUpSLP is destructed.
1508 DenseMap
<Instruction
*, bool> DeletedInstructions
;
1510 /// A list of values that need to extracted out of the tree.
1511 /// This list holds pairs of (Internal Scalar : External User). External User
1512 /// can be nullptr, it means that this Internal Scalar will be used later,
1513 /// after vectorization.
1514 UserList ExternalUses
;
1516 /// Values used only by @llvm.assume calls.
1517 SmallPtrSet
<const Value
*, 32> EphValues
;
1519 /// Holds all of the instructions that we gathered.
1520 SetVector
<Instruction
*> GatherSeq
;
1522 /// A list of blocks that we are going to CSE.
1523 SetVector
<BasicBlock
*> CSEBlocks
;
1525 /// Contains all scheduling relevant data for an instruction.
1526 /// A ScheduleData either represents a single instruction or a member of an
1527 /// instruction bundle (= a group of instructions which is combined into a
1528 /// vector instruction).
1529 struct ScheduleData
{
1530 // The initial value for the dependency counters. It means that the
1531 // dependencies are not calculated yet.
1532 enum { InvalidDeps
= -1 };
1534 ScheduleData() = default;
1536 void init(int BlockSchedulingRegionID
, Value
*OpVal
) {
1537 FirstInBundle
= this;
1538 NextInBundle
= nullptr;
1539 NextLoadStore
= nullptr;
1540 IsScheduled
= false;
1541 SchedulingRegionID
= BlockSchedulingRegionID
;
1542 UnscheduledDepsInBundle
= UnscheduledDeps
;
1543 clearDependencies();
1549 /// Returns true if the dependency information has been calculated.
1550 bool hasValidDependencies() const { return Dependencies
!= InvalidDeps
; }
1552 /// Returns true for single instructions and for bundle representatives
1553 /// (= the head of a bundle).
1554 bool isSchedulingEntity() const { return FirstInBundle
== this; }
1556 /// Returns true if it represents an instruction bundle and not only a
1557 /// single instruction.
1558 bool isPartOfBundle() const {
1559 return NextInBundle
!= nullptr || FirstInBundle
!= this;
1562 /// Returns true if it is ready for scheduling, i.e. it has no more
1563 /// unscheduled depending instructions/bundles.
1564 bool isReady() const {
1565 assert(isSchedulingEntity() &&
1566 "can't consider non-scheduling entity for ready list");
1567 return UnscheduledDepsInBundle
== 0 && !IsScheduled
;
1570 /// Modifies the number of unscheduled dependencies, also updating it for
1571 /// the whole bundle.
1572 int incrementUnscheduledDeps(int Incr
) {
1573 UnscheduledDeps
+= Incr
;
1574 return FirstInBundle
->UnscheduledDepsInBundle
+= Incr
;
1577 /// Sets the number of unscheduled dependencies to the number of
1579 void resetUnscheduledDeps() {
1580 incrementUnscheduledDeps(Dependencies
- UnscheduledDeps
);
1583 /// Clears all dependency information.
1584 void clearDependencies() {
1585 Dependencies
= InvalidDeps
;
1586 resetUnscheduledDeps();
1587 MemoryDependencies
.clear();
1590 void dump(raw_ostream
&os
) const {
1591 if (!isSchedulingEntity()) {
1592 os
<< "/ " << *Inst
;
1593 } else if (NextInBundle
) {
1595 ScheduleData
*SD
= NextInBundle
;
1597 os
<< ';' << *SD
->Inst
;
1598 SD
= SD
->NextInBundle
;
1606 Instruction
*Inst
= nullptr;
1608 /// Points to the head in an instruction bundle (and always to this for
1609 /// single instructions).
1610 ScheduleData
*FirstInBundle
= nullptr;
1612 /// Single linked list of all instructions in a bundle. Null if it is a
1613 /// single instruction.
1614 ScheduleData
*NextInBundle
= nullptr;
1616 /// Single linked list of all memory instructions (e.g. load, store, call)
1617 /// in the block - until the end of the scheduling region.
1618 ScheduleData
*NextLoadStore
= nullptr;
1620 /// The dependent memory instructions.
1621 /// This list is derived on demand in calculateDependencies().
1622 SmallVector
<ScheduleData
*, 4> MemoryDependencies
;
1624 /// This ScheduleData is in the current scheduling region if this matches
1625 /// the current SchedulingRegionID of BlockScheduling.
1626 int SchedulingRegionID
= 0;
1628 /// Used for getting a "good" final ordering of instructions.
1629 int SchedulingPriority
= 0;
1631 /// The number of dependencies. Constitutes of the number of users of the
1632 /// instruction plus the number of dependent memory instructions (if any).
1633 /// This value is calculated on demand.
1634 /// If InvalidDeps, the number of dependencies is not calculated yet.
1635 int Dependencies
= InvalidDeps
;
1637 /// The number of dependencies minus the number of dependencies of scheduled
1638 /// instructions. As soon as this is zero, the instruction/bundle gets ready
1640 /// Note that this is negative as long as Dependencies is not calculated.
1641 int UnscheduledDeps
= InvalidDeps
;
1643 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
1644 /// single instructions.
1645 int UnscheduledDepsInBundle
= InvalidDeps
;
1647 /// True if this instruction is scheduled (or considered as scheduled in the
1649 bool IsScheduled
= false;
1651 /// Opcode of the current instruction in the schedule data.
1652 Value
*OpValue
= nullptr;
1654 /// The TreeEntry that this instruction corresponds to.
1655 TreeEntry
*TE
= nullptr;
1657 /// The lane of this node in the TreeEntry.
1662 friend inline raw_ostream
&operator<<(raw_ostream
&os
,
1663 const BoUpSLP::ScheduleData
&SD
) {
1669 friend struct GraphTraits
<BoUpSLP
*>;
1670 friend struct DOTGraphTraits
<BoUpSLP
*>;
1672 /// Contains all scheduling data for a basic block.
1673 struct BlockScheduling
{
1674 BlockScheduling(BasicBlock
*BB
)
1675 : BB(BB
), ChunkSize(BB
->size()), ChunkPos(ChunkSize
) {}
1679 ScheduleStart
= nullptr;
1680 ScheduleEnd
= nullptr;
1681 FirstLoadStoreInRegion
= nullptr;
1682 LastLoadStoreInRegion
= nullptr;
1684 // Reduce the maximum schedule region size by the size of the
1685 // previous scheduling run.
1686 ScheduleRegionSizeLimit
-= ScheduleRegionSize
;
1687 if (ScheduleRegionSizeLimit
< MinScheduleRegionSize
)
1688 ScheduleRegionSizeLimit
= MinScheduleRegionSize
;
1689 ScheduleRegionSize
= 0;
1691 // Make a new scheduling region, i.e. all existing ScheduleData is not
1692 // in the new region yet.
1693 ++SchedulingRegionID
;
1696 ScheduleData
*getScheduleData(Value
*V
) {
1697 ScheduleData
*SD
= ScheduleDataMap
[V
];
1698 if (SD
&& SD
->SchedulingRegionID
== SchedulingRegionID
)
1703 ScheduleData
*getScheduleData(Value
*V
, Value
*Key
) {
1705 return getScheduleData(V
);
1706 auto I
= ExtraScheduleDataMap
.find(V
);
1707 if (I
!= ExtraScheduleDataMap
.end()) {
1708 ScheduleData
*SD
= I
->second
[Key
];
1709 if (SD
&& SD
->SchedulingRegionID
== SchedulingRegionID
)
1715 bool isInSchedulingRegion(ScheduleData
*SD
) {
1716 return SD
->SchedulingRegionID
== SchedulingRegionID
;
1719 /// Marks an instruction as scheduled and puts all dependent ready
1720 /// instructions into the ready-list.
1721 template <typename ReadyListType
>
1722 void schedule(ScheduleData
*SD
, ReadyListType
&ReadyList
) {
1723 SD
->IsScheduled
= true;
1724 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD
<< "\n");
1726 ScheduleData
*BundleMember
= SD
;
1727 while (BundleMember
) {
1728 if (BundleMember
->Inst
!= BundleMember
->OpValue
) {
1729 BundleMember
= BundleMember
->NextInBundle
;
1732 // Handle the def-use chain dependencies.
1734 // Decrement the unscheduled counter and insert to ready list if ready.
1735 auto &&DecrUnsched
= [this, &ReadyList
](Instruction
*I
) {
1736 doForAllOpcodes(I
, [&ReadyList
](ScheduleData
*OpDef
) {
1737 if (OpDef
&& OpDef
->hasValidDependencies() &&
1738 OpDef
->incrementUnscheduledDeps(-1) == 0) {
1739 // There are no more unscheduled dependencies after
1740 // decrementing, so we can put the dependent instruction
1741 // into the ready list.
1742 ScheduleData
*DepBundle
= OpDef
->FirstInBundle
;
1743 assert(!DepBundle
->IsScheduled
&&
1744 "already scheduled bundle gets ready");
1745 ReadyList
.insert(DepBundle
);
1747 << "SLP: gets ready (def): " << *DepBundle
<< "\n");
1752 // If BundleMember is a vector bundle, its operands may have been
1753 // reordered duiring buildTree(). We therefore need to get its operands
1754 // through the TreeEntry.
1755 if (TreeEntry
*TE
= BundleMember
->TE
) {
1756 int Lane
= BundleMember
->Lane
;
1757 assert(Lane
>= 0 && "Lane not set");
1758 for (unsigned OpIdx
= 0, NumOperands
= TE
->getNumOperands();
1759 OpIdx
!= NumOperands
; ++OpIdx
)
1760 if (auto *I
= dyn_cast
<Instruction
>(TE
->getOperand(OpIdx
)[Lane
]))
1763 // If BundleMember is a stand-alone instruction, no operand reordering
1764 // has taken place, so we directly access its operands.
1765 for (Use
&U
: BundleMember
->Inst
->operands())
1766 if (auto *I
= dyn_cast
<Instruction
>(U
.get()))
1769 // Handle the memory dependencies.
1770 for (ScheduleData
*MemoryDepSD
: BundleMember
->MemoryDependencies
) {
1771 if (MemoryDepSD
->incrementUnscheduledDeps(-1) == 0) {
1772 // There are no more unscheduled dependencies after decrementing,
1773 // so we can put the dependent instruction into the ready list.
1774 ScheduleData
*DepBundle
= MemoryDepSD
->FirstInBundle
;
1775 assert(!DepBundle
->IsScheduled
&&
1776 "already scheduled bundle gets ready");
1777 ReadyList
.insert(DepBundle
);
1779 << "SLP: gets ready (mem): " << *DepBundle
<< "\n");
1782 BundleMember
= BundleMember
->NextInBundle
;
1786 void doForAllOpcodes(Value
*V
,
1787 function_ref
<void(ScheduleData
*SD
)> Action
) {
1788 if (ScheduleData
*SD
= getScheduleData(V
))
1790 auto I
= ExtraScheduleDataMap
.find(V
);
1791 if (I
!= ExtraScheduleDataMap
.end())
1792 for (auto &P
: I
->second
)
1793 if (P
.second
->SchedulingRegionID
== SchedulingRegionID
)
1797 /// Put all instructions into the ReadyList which are ready for scheduling.
1798 template <typename ReadyListType
>
1799 void initialFillReadyList(ReadyListType
&ReadyList
) {
1800 for (auto *I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
1801 doForAllOpcodes(I
, [&](ScheduleData
*SD
) {
1802 if (SD
->isSchedulingEntity() && SD
->isReady()) {
1803 ReadyList
.insert(SD
);
1805 << "SLP: initially in ready list: " << *I
<< "\n");
1811 /// Checks if a bundle of instructions can be scheduled, i.e. has no
1812 /// cyclic dependencies. This is only a dry-run, no instructions are
1813 /// actually moved at this stage.
1814 /// \returns the scheduling bundle. The returned Optional value is non-None
1815 /// if \p VL is allowed to be scheduled.
1816 Optional
<ScheduleData
*>
1817 tryScheduleBundle(ArrayRef
<Value
*> VL
, BoUpSLP
*SLP
,
1818 const InstructionsState
&S
);
1820 /// Un-bundles a group of instructions.
1821 void cancelScheduling(ArrayRef
<Value
*> VL
, Value
*OpValue
);
1823 /// Allocates schedule data chunk.
1824 ScheduleData
*allocateScheduleDataChunks();
1826 /// Extends the scheduling region so that V is inside the region.
1827 /// \returns true if the region size is within the limit.
1828 bool extendSchedulingRegion(Value
*V
, const InstructionsState
&S
);
1830 /// Initialize the ScheduleData structures for new instructions in the
1831 /// scheduling region.
1832 void initScheduleData(Instruction
*FromI
, Instruction
*ToI
,
1833 ScheduleData
*PrevLoadStore
,
1834 ScheduleData
*NextLoadStore
);
1836 /// Updates the dependency information of a bundle and of all instructions/
1837 /// bundles which depend on the original bundle.
1838 void calculateDependencies(ScheduleData
*SD
, bool InsertInReadyList
,
1841 /// Sets all instruction in the scheduling region to un-scheduled.
1842 void resetSchedule();
1846 /// Simple memory allocation for ScheduleData.
1847 std::vector
<std::unique_ptr
<ScheduleData
[]>> ScheduleDataChunks
;
1849 /// The size of a ScheduleData array in ScheduleDataChunks.
1852 /// The allocator position in the current chunk, which is the last entry
1853 /// of ScheduleDataChunks.
1856 /// Attaches ScheduleData to Instruction.
1857 /// Note that the mapping survives during all vectorization iterations, i.e.
1858 /// ScheduleData structures are recycled.
1859 DenseMap
<Value
*, ScheduleData
*> ScheduleDataMap
;
1861 /// Attaches ScheduleData to Instruction with the leading key.
1862 DenseMap
<Value
*, SmallDenseMap
<Value
*, ScheduleData
*>>
1863 ExtraScheduleDataMap
;
1865 struct ReadyList
: SmallVector
<ScheduleData
*, 8> {
1866 void insert(ScheduleData
*SD
) { push_back(SD
); }
1869 /// The ready-list for scheduling (only used for the dry-run).
1870 ReadyList ReadyInsts
;
1872 /// The first instruction of the scheduling region.
1873 Instruction
*ScheduleStart
= nullptr;
1875 /// The first instruction _after_ the scheduling region.
1876 Instruction
*ScheduleEnd
= nullptr;
1878 /// The first memory accessing instruction in the scheduling region
1880 ScheduleData
*FirstLoadStoreInRegion
= nullptr;
1882 /// The last memory accessing instruction in the scheduling region
1884 ScheduleData
*LastLoadStoreInRegion
= nullptr;
1886 /// The current size of the scheduling region.
1887 int ScheduleRegionSize
= 0;
1889 /// The maximum size allowed for the scheduling region.
1890 int ScheduleRegionSizeLimit
= ScheduleRegionSizeBudget
;
1892 /// The ID of the scheduling region. For a new vectorization iteration this
1893 /// is incremented which "removes" all ScheduleData from the region.
1894 // Make sure that the initial SchedulingRegionID is greater than the
1895 // initial SchedulingRegionID in ScheduleData (which is 0).
1896 int SchedulingRegionID
= 1;
1899 /// Attaches the BlockScheduling structures to basic blocks.
1900 MapVector
<BasicBlock
*, std::unique_ptr
<BlockScheduling
>> BlocksSchedules
;
1902 /// Performs the "real" scheduling. Done before vectorization is actually
1903 /// performed in a basic block.
1904 void scheduleBlock(BlockScheduling
*BS
);
1906 /// List of users to ignore during scheduling and that don't need extracting.
1907 ArrayRef
<Value
*> UserIgnoreList
;
1909 using OrdersType
= SmallVector
<unsigned, 4>;
1910 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
1911 /// sorted SmallVectors of unsigned.
1912 struct OrdersTypeDenseMapInfo
{
1913 static OrdersType
getEmptyKey() {
1919 static OrdersType
getTombstoneKey() {
1925 static unsigned getHashValue(const OrdersType
&V
) {
1926 return static_cast<unsigned>(hash_combine_range(V
.begin(), V
.end()));
1929 static bool isEqual(const OrdersType
&LHS
, const OrdersType
&RHS
) {
1934 /// Contains orders of operations along with the number of bundles that have
1935 /// operations in this order. It stores only those orders that require
1936 /// reordering, if reordering is not required it is counted using \a
1937 /// NumOpsWantToKeepOriginalOrder.
1938 DenseMap
<OrdersType
, unsigned, OrdersTypeDenseMapInfo
> NumOpsWantToKeepOrder
;
1939 /// Number of bundles that do not require reordering.
1940 unsigned NumOpsWantToKeepOriginalOrder
= 0;
1942 // Analysis and block reference.
1944 ScalarEvolution
*SE
;
1945 TargetTransformInfo
*TTI
;
1946 TargetLibraryInfo
*TLI
;
1950 AssumptionCache
*AC
;
1952 const DataLayout
*DL
;
1953 OptimizationRemarkEmitter
*ORE
;
1955 unsigned MaxVecRegSize
; // This is set by TTI or overridden by cl::opt.
1956 unsigned MinVecRegSize
; // Set by cl::opt (default: 128).
1958 /// Instruction builder to construct the vectorized tree.
1959 IRBuilder
<> Builder
;
1961 /// A map of scalar integer values to the smallest bit width with which they
1962 /// can legally be represented. The values map to (width, signed) pairs,
1963 /// where "width" indicates the minimum bit width and "signed" is True if the
1964 /// value must be signed-extended, rather than zero-extended, back to its
1966 MapVector
<Value
*, std::pair
<uint64_t, bool>> MinBWs
;
1969 } // end namespace slpvectorizer
1971 template <> struct GraphTraits
<BoUpSLP
*> {
1972 using TreeEntry
= BoUpSLP::TreeEntry
;
1974 /// NodeRef has to be a pointer per the GraphWriter.
1975 using NodeRef
= TreeEntry
*;
1977 using ContainerTy
= BoUpSLP::TreeEntry::VecTreeTy
;
1979 /// Add the VectorizableTree to the index iterator to be able to return
1980 /// TreeEntry pointers.
1981 struct ChildIteratorType
1982 : public iterator_adaptor_base
<
1983 ChildIteratorType
, SmallVector
<BoUpSLP::EdgeInfo
, 1>::iterator
> {
1984 ContainerTy
&VectorizableTree
;
1986 ChildIteratorType(SmallVector
<BoUpSLP::EdgeInfo
, 1>::iterator W
,
1988 : ChildIteratorType::iterator_adaptor_base(W
), VectorizableTree(VT
) {}
1990 NodeRef
operator*() { return I
->UserTE
; }
1993 static NodeRef
getEntryNode(BoUpSLP
&R
) {
1994 return R
.VectorizableTree
[0].get();
1997 static ChildIteratorType
child_begin(NodeRef N
) {
1998 return {N
->UserTreeIndices
.begin(), N
->Container
};
2001 static ChildIteratorType
child_end(NodeRef N
) {
2002 return {N
->UserTreeIndices
.end(), N
->Container
};
2005 /// For the node iterator we just need to turn the TreeEntry iterator into a
2006 /// TreeEntry* iterator so that it dereferences to NodeRef.
2007 class nodes_iterator
{
2008 using ItTy
= ContainerTy::iterator
;
2012 nodes_iterator(const ItTy
&It2
) : It(It2
) {}
2013 NodeRef
operator*() { return It
->get(); }
2014 nodes_iterator
operator++() {
2018 bool operator!=(const nodes_iterator
&N2
) const { return N2
.It
!= It
; }
2021 static nodes_iterator
nodes_begin(BoUpSLP
*R
) {
2022 return nodes_iterator(R
->VectorizableTree
.begin());
2025 static nodes_iterator
nodes_end(BoUpSLP
*R
) {
2026 return nodes_iterator(R
->VectorizableTree
.end());
2029 static unsigned size(BoUpSLP
*R
) { return R
->VectorizableTree
.size(); }
2032 template <> struct DOTGraphTraits
<BoUpSLP
*> : public DefaultDOTGraphTraits
{
2033 using TreeEntry
= BoUpSLP::TreeEntry
;
2035 DOTGraphTraits(bool isSimple
= false) : DefaultDOTGraphTraits(isSimple
) {}
2037 std::string
getNodeLabel(const TreeEntry
*Entry
, const BoUpSLP
*R
) {
2039 raw_string_ostream
OS(Str
);
2040 if (isSplat(Entry
->Scalars
)) {
2041 OS
<< "<splat> " << *Entry
->Scalars
[0];
2044 for (auto V
: Entry
->Scalars
) {
2047 R
->ExternalUses
.begin(), R
->ExternalUses
.end(),
2048 [&](const BoUpSLP::ExternalUser
&EU
) { return EU
.Scalar
== V
; }))
2055 static std::string
getNodeAttributes(const TreeEntry
*Entry
,
2057 if (Entry
->NeedToGather
)
2063 } // end namespace llvm
2065 BoUpSLP::~BoUpSLP() {
2066 for (const auto &Pair
: DeletedInstructions
) {
2067 // Replace operands of ignored instructions with Undefs in case if they were
2068 // marked for deletion.
2069 if (Pair
.getSecond()) {
2070 Value
*Undef
= UndefValue::get(Pair
.getFirst()->getType());
2071 Pair
.getFirst()->replaceAllUsesWith(Undef
);
2073 Pair
.getFirst()->dropAllReferences();
2075 for (const auto &Pair
: DeletedInstructions
) {
2076 assert(Pair
.getFirst()->use_empty() &&
2077 "trying to erase instruction with users.");
2078 Pair
.getFirst()->eraseFromParent();
2082 void BoUpSLP::eraseInstructions(ArrayRef
<Value
*> AV
) {
2083 for (auto *V
: AV
) {
2084 if (auto *I
= dyn_cast
<Instruction
>(V
))
2085 eraseInstruction(I
, /*ReplaceWithUndef=*/true);
2089 void BoUpSLP::buildTree(ArrayRef
<Value
*> Roots
,
2090 ArrayRef
<Value
*> UserIgnoreLst
) {
2091 ExtraValueToDebugLocsMap ExternallyUsedValues
;
2092 buildTree(Roots
, ExternallyUsedValues
, UserIgnoreLst
);
2095 void BoUpSLP::buildTree(ArrayRef
<Value
*> Roots
,
2096 ExtraValueToDebugLocsMap
&ExternallyUsedValues
,
2097 ArrayRef
<Value
*> UserIgnoreLst
) {
2099 UserIgnoreList
= UserIgnoreLst
;
2100 if (!allSameType(Roots
))
2102 buildTree_rec(Roots
, 0, EdgeInfo());
2104 // Collect the values that we need to extract from the tree.
2105 for (auto &TEPtr
: VectorizableTree
) {
2106 TreeEntry
*Entry
= TEPtr
.get();
2108 // No need to handle users of gathered values.
2109 if (Entry
->NeedToGather
)
2113 for (int Lane
= 0, LE
= Entry
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
2114 Value
*Scalar
= Entry
->Scalars
[Lane
];
2115 int FoundLane
= Lane
;
2116 if (!Entry
->ReuseShuffleIndices
.empty()) {
2118 std::distance(Entry
->ReuseShuffleIndices
.begin(),
2119 llvm::find(Entry
->ReuseShuffleIndices
, FoundLane
));
2122 // Check if the scalar is externally used as an extra arg.
2123 auto ExtI
= ExternallyUsedValues
.find(Scalar
);
2124 if (ExtI
!= ExternallyUsedValues
.end()) {
2125 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
2126 << Lane
<< " from " << *Scalar
<< ".\n");
2127 ExternalUses
.emplace_back(Scalar
, nullptr, FoundLane
);
2129 for (User
*U
: Scalar
->users()) {
2130 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U
<< ".\n");
2132 Instruction
*UserInst
= dyn_cast
<Instruction
>(U
);
2136 // Skip in-tree scalars that become vectors
2137 if (TreeEntry
*UseEntry
= getTreeEntry(U
)) {
2138 Value
*UseScalar
= UseEntry
->Scalars
[0];
2139 // Some in-tree scalars will remain as scalar in vectorized
2140 // instructions. If that is the case, the one in Lane 0 will
2142 if (UseScalar
!= U
||
2143 !InTreeUserNeedToExtract(Scalar
, UserInst
, TLI
)) {
2144 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2146 assert(!UseEntry
->NeedToGather
&& "Bad state");
2151 // Ignore users in the user ignore list.
2152 if (is_contained(UserIgnoreList
, UserInst
))
2155 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U
<< " from lane "
2156 << Lane
<< " from " << *Scalar
<< ".\n");
2157 ExternalUses
.push_back(ExternalUser(Scalar
, U
, FoundLane
));
2163 void BoUpSLP::buildTree_rec(ArrayRef
<Value
*> VL
, unsigned Depth
,
2164 const EdgeInfo
&UserTreeIdx
) {
2165 assert((allConstant(VL
) || allSameType(VL
)) && "Invalid types!");
2167 InstructionsState S
= getSameOpcode(VL
);
2168 if (Depth
== RecursionMaxDepth
) {
2169 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2170 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2174 // Don't handle vectors.
2175 if (S
.OpValue
->getType()->isVectorTy()) {
2176 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2177 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2181 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(S
.OpValue
))
2182 if (SI
->getValueOperand()->getType()->isVectorTy()) {
2183 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2184 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2188 // If all of the operands are identical or constant we have a simple solution.
2189 if (allConstant(VL
) || isSplat(VL
) || !allSameBlock(VL
) || !S
.getOpcode()) {
2190 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2191 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2195 // We now know that this is a vector of instructions of the same type from
2198 // Don't vectorize ephemeral values.
2199 for (Value
*V
: VL
) {
2200 if (EphValues
.count(V
)) {
2201 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2202 << ") is ephemeral.\n");
2203 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2208 // Check if this is a duplicate of another entry.
2209 if (TreeEntry
*E
= getTreeEntry(S
.OpValue
)) {
2210 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S
.OpValue
<< ".\n");
2211 if (!E
->isSame(VL
)) {
2212 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2213 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2216 // Record the reuse of the tree node. FIXME, currently this is only used to
2217 // properly draw the graph rather than for the actual vectorization.
2218 E
->UserTreeIndices
.push_back(UserTreeIdx
);
2219 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S
.OpValue
2224 // Check that none of the instructions in the bundle are already in the tree.
2225 for (Value
*V
: VL
) {
2226 auto *I
= dyn_cast
<Instruction
>(V
);
2229 if (getTreeEntry(I
)) {
2230 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2231 << ") is already in tree.\n");
2232 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2237 // If any of the scalars is marked as a value that needs to stay scalar, then
2238 // we need to gather the scalars.
2239 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2240 for (Value
*V
: VL
) {
2241 if (MustGather
.count(V
) || is_contained(UserIgnoreList
, V
)) {
2242 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2243 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2248 // Check that all of the users of the scalars that we want to vectorize are
2250 auto *VL0
= cast
<Instruction
>(S
.OpValue
);
2251 BasicBlock
*BB
= VL0
->getParent();
2253 if (!DT
->isReachableFromEntry(BB
)) {
2254 // Don't go into unreachable blocks. They may contain instructions with
2255 // dependency cycles which confuse the final scheduling.
2256 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2257 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2261 // Check that every instruction appears once in this bundle.
2262 SmallVector
<unsigned, 4> ReuseShuffleIndicies
;
2263 SmallVector
<Value
*, 4> UniqueValues
;
2264 DenseMap
<Value
*, unsigned> UniquePositions
;
2265 for (Value
*V
: VL
) {
2266 auto Res
= UniquePositions
.try_emplace(V
, UniqueValues
.size());
2267 ReuseShuffleIndicies
.emplace_back(Res
.first
->second
);
2269 UniqueValues
.emplace_back(V
);
2271 size_t NumUniqueScalarValues
= UniqueValues
.size();
2272 if (NumUniqueScalarValues
== VL
.size()) {
2273 ReuseShuffleIndicies
.clear();
2275 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2276 if (NumUniqueScalarValues
<= 1 ||
2277 !llvm::isPowerOf2_32(NumUniqueScalarValues
)) {
2278 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2279 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
);
2285 auto &BSRef
= BlocksSchedules
[BB
];
2287 BSRef
= std::make_unique
<BlockScheduling
>(BB
);
2289 BlockScheduling
&BS
= *BSRef
.get();
2291 Optional
<ScheduleData
*> Bundle
= BS
.tryScheduleBundle(VL
, this, S
);
2293 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2294 assert((!BS
.getScheduleData(VL0
) ||
2295 !BS
.getScheduleData(VL0
)->isPartOfBundle()) &&
2296 "tryScheduleBundle should cancelScheduling on failure");
2297 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2298 ReuseShuffleIndicies
);
2301 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2303 unsigned ShuffleOrOp
= S
.isAltShuffle() ?
2304 (unsigned) Instruction::ShuffleVector
: S
.getOpcode();
2305 switch (ShuffleOrOp
) {
2306 case Instruction::PHI
: {
2307 auto *PH
= cast
<PHINode
>(VL0
);
2309 // Check for terminator values (e.g. invoke).
2310 for (unsigned j
= 0; j
< VL
.size(); ++j
)
2311 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
2312 Instruction
*Term
= dyn_cast
<Instruction
>(
2313 cast
<PHINode
>(VL
[j
])->getIncomingValueForBlock(
2314 PH
->getIncomingBlock(i
)));
2315 if (Term
&& Term
->isTerminator()) {
2317 << "SLP: Need to swizzle PHINodes (terminator use).\n");
2318 BS
.cancelScheduling(VL
, VL0
);
2319 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2320 ReuseShuffleIndicies
);
2326 newTreeEntry(VL
, Bundle
, S
, UserTreeIdx
, ReuseShuffleIndicies
);
2327 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2329 // Keeps the reordered operands to avoid code duplication.
2330 SmallVector
<ValueList
, 2> OperandsVec
;
2331 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
2333 // Prepare the operand vector.
2335 Operands
.push_back(cast
<PHINode
>(j
)->getIncomingValueForBlock(
2336 PH
->getIncomingBlock(i
)));
2337 TE
->setOperand(i
, Operands
);
2338 OperandsVec
.push_back(Operands
);
2340 for (unsigned OpIdx
= 0, OpE
= OperandsVec
.size(); OpIdx
!= OpE
; ++OpIdx
)
2341 buildTree_rec(OperandsVec
[OpIdx
], Depth
+ 1, {TE
, OpIdx
});
2344 case Instruction::ExtractValue
:
2345 case Instruction::ExtractElement
: {
2346 OrdersType CurrentOrder
;
2347 bool Reuse
= canReuseExtract(VL
, VL0
, CurrentOrder
);
2349 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2350 ++NumOpsWantToKeepOriginalOrder
;
2351 newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2352 ReuseShuffleIndicies
);
2353 // This is a special case, as it does not gather, but at the same time
2354 // we are not extending buildTree_rec() towards the operands.
2356 Op0
.assign(VL
.size(), VL0
->getOperand(0));
2357 VectorizableTree
.back()->setOperand(0, Op0
);
2360 if (!CurrentOrder
.empty()) {
2362 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2364 for (unsigned Idx
: CurrentOrder
)
2365 dbgs() << " " << Idx
;
2368 // Insert new order with initial value 0, if it does not exist,
2369 // otherwise return the iterator to the existing one.
2370 auto StoredCurrentOrderAndNum
=
2371 NumOpsWantToKeepOrder
.try_emplace(CurrentOrder
).first
;
2372 ++StoredCurrentOrderAndNum
->getSecond();
2373 newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2374 ReuseShuffleIndicies
,
2375 StoredCurrentOrderAndNum
->getFirst());
2376 // This is a special case, as it does not gather, but at the same time
2377 // we are not extending buildTree_rec() towards the operands.
2379 Op0
.assign(VL
.size(), VL0
->getOperand(0));
2380 VectorizableTree
.back()->setOperand(0, Op0
);
2383 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2384 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2385 ReuseShuffleIndicies
);
2386 BS
.cancelScheduling(VL
, VL0
);
2389 case Instruction::Load
: {
2390 // Check that a vectorized load would load the same memory as a scalar
2391 // load. For example, we don't want to vectorize loads that are smaller
2392 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2393 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2394 // from such a struct, we read/write packed bits disagreeing with the
2395 // unvectorized version.
2396 Type
*ScalarTy
= VL0
->getType();
2398 if (DL
->getTypeSizeInBits(ScalarTy
) !=
2399 DL
->getTypeAllocSizeInBits(ScalarTy
)) {
2400 BS
.cancelScheduling(VL
, VL0
);
2401 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2402 ReuseShuffleIndicies
);
2403 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2407 // Make sure all loads in the bundle are simple - we can't vectorize
2408 // atomic or volatile loads.
2409 SmallVector
<Value
*, 4> PointerOps(VL
.size());
2410 auto POIter
= PointerOps
.begin();
2411 for (Value
*V
: VL
) {
2412 auto *L
= cast
<LoadInst
>(V
);
2413 if (!L
->isSimple()) {
2414 BS
.cancelScheduling(VL
, VL0
);
2415 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2416 ReuseShuffleIndicies
);
2417 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2420 *POIter
= L
->getPointerOperand();
2424 OrdersType CurrentOrder
;
2425 // Check the order of pointer operands.
2426 if (llvm::sortPtrAccesses(PointerOps
, *DL
, *SE
, CurrentOrder
)) {
2429 if (CurrentOrder
.empty()) {
2430 Ptr0
= PointerOps
.front();
2431 PtrN
= PointerOps
.back();
2433 Ptr0
= PointerOps
[CurrentOrder
.front()];
2434 PtrN
= PointerOps
[CurrentOrder
.back()];
2436 const SCEV
*Scev0
= SE
->getSCEV(Ptr0
);
2437 const SCEV
*ScevN
= SE
->getSCEV(PtrN
);
2439 dyn_cast
<SCEVConstant
>(SE
->getMinusSCEV(ScevN
, Scev0
));
2440 uint64_t Size
= DL
->getTypeAllocSize(ScalarTy
);
2441 // Check that the sorted loads are consecutive.
2442 if (Diff
&& Diff
->getAPInt().getZExtValue() == (VL
.size() - 1) * Size
) {
2443 if (CurrentOrder
.empty()) {
2444 // Original loads are consecutive and does not require reordering.
2445 ++NumOpsWantToKeepOriginalOrder
;
2446 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
,
2447 UserTreeIdx
, ReuseShuffleIndicies
);
2448 TE
->setOperandsInOrder();
2449 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
2452 auto I
= NumOpsWantToKeepOrder
.try_emplace(CurrentOrder
).first
;
2455 newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2456 ReuseShuffleIndicies
, I
->getFirst());
2457 TE
->setOperandsInOrder();
2458 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
2464 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
2465 BS
.cancelScheduling(VL
, VL0
);
2466 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2467 ReuseShuffleIndicies
);
2470 case Instruction::ZExt
:
2471 case Instruction::SExt
:
2472 case Instruction::FPToUI
:
2473 case Instruction::FPToSI
:
2474 case Instruction::FPExt
:
2475 case Instruction::PtrToInt
:
2476 case Instruction::IntToPtr
:
2477 case Instruction::SIToFP
:
2478 case Instruction::UIToFP
:
2479 case Instruction::Trunc
:
2480 case Instruction::FPTrunc
:
2481 case Instruction::BitCast
: {
2482 Type
*SrcTy
= VL0
->getOperand(0)->getType();
2483 for (Value
*V
: VL
) {
2484 Type
*Ty
= cast
<Instruction
>(V
)->getOperand(0)->getType();
2485 if (Ty
!= SrcTy
|| !isValidElementType(Ty
)) {
2486 BS
.cancelScheduling(VL
, VL0
);
2487 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2488 ReuseShuffleIndicies
);
2490 << "SLP: Gathering casts with different src types.\n");
2494 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2495 ReuseShuffleIndicies
);
2496 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
2498 TE
->setOperandsInOrder();
2499 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2501 // Prepare the operand vector.
2503 Operands
.push_back(cast
<Instruction
>(V
)->getOperand(i
));
2505 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2509 case Instruction::ICmp
:
2510 case Instruction::FCmp
: {
2511 // Check that all of the compares have the same predicate.
2512 CmpInst::Predicate P0
= cast
<CmpInst
>(VL0
)->getPredicate();
2513 CmpInst::Predicate SwapP0
= CmpInst::getSwappedPredicate(P0
);
2514 Type
*ComparedTy
= VL0
->getOperand(0)->getType();
2515 for (Value
*V
: VL
) {
2516 CmpInst
*Cmp
= cast
<CmpInst
>(V
);
2517 if ((Cmp
->getPredicate() != P0
&& Cmp
->getPredicate() != SwapP0
) ||
2518 Cmp
->getOperand(0)->getType() != ComparedTy
) {
2519 BS
.cancelScheduling(VL
, VL0
);
2520 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2521 ReuseShuffleIndicies
);
2523 << "SLP: Gathering cmp with different predicate.\n");
2528 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2529 ReuseShuffleIndicies
);
2530 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
2532 ValueList Left
, Right
;
2533 if (cast
<CmpInst
>(VL0
)->isCommutative()) {
2534 // Commutative predicate - collect + sort operands of the instructions
2535 // so that each side is more likely to have the same opcode.
2536 assert(P0
== SwapP0
&& "Commutative Predicate mismatch");
2537 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2539 // Collect operands - commute if it uses the swapped predicate.
2540 for (Value
*V
: VL
) {
2541 auto *Cmp
= cast
<CmpInst
>(V
);
2542 Value
*LHS
= Cmp
->getOperand(0);
2543 Value
*RHS
= Cmp
->getOperand(1);
2544 if (Cmp
->getPredicate() != P0
)
2545 std::swap(LHS
, RHS
);
2546 Left
.push_back(LHS
);
2547 Right
.push_back(RHS
);
2550 TE
->setOperand(0, Left
);
2551 TE
->setOperand(1, Right
);
2552 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2553 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2556 case Instruction::Select
:
2557 case Instruction::FNeg
:
2558 case Instruction::Add
:
2559 case Instruction::FAdd
:
2560 case Instruction::Sub
:
2561 case Instruction::FSub
:
2562 case Instruction::Mul
:
2563 case Instruction::FMul
:
2564 case Instruction::UDiv
:
2565 case Instruction::SDiv
:
2566 case Instruction::FDiv
:
2567 case Instruction::URem
:
2568 case Instruction::SRem
:
2569 case Instruction::FRem
:
2570 case Instruction::Shl
:
2571 case Instruction::LShr
:
2572 case Instruction::AShr
:
2573 case Instruction::And
:
2574 case Instruction::Or
:
2575 case Instruction::Xor
: {
2576 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2577 ReuseShuffleIndicies
);
2578 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
2580 // Sort operands of the instructions so that each side is more likely to
2581 // have the same opcode.
2582 if (isa
<BinaryOperator
>(VL0
) && VL0
->isCommutative()) {
2583 ValueList Left
, Right
;
2584 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2585 TE
->setOperand(0, Left
);
2586 TE
->setOperand(1, Right
);
2587 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2588 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2592 TE
->setOperandsInOrder();
2593 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2595 // Prepare the operand vector.
2597 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(i
));
2599 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2603 case Instruction::GetElementPtr
: {
2604 // We don't combine GEPs with complicated (nested) indexing.
2605 for (Value
*V
: VL
) {
2606 if (cast
<Instruction
>(V
)->getNumOperands() != 2) {
2607 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
2608 BS
.cancelScheduling(VL
, VL0
);
2609 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2610 ReuseShuffleIndicies
);
2615 // We can't combine several GEPs into one vector if they operate on
2617 Type
*Ty0
= VL0
->getOperand(0)->getType();
2618 for (Value
*V
: VL
) {
2619 Type
*CurTy
= cast
<Instruction
>(V
)->getOperand(0)->getType();
2622 << "SLP: not-vectorizable GEP (different types).\n");
2623 BS
.cancelScheduling(VL
, VL0
);
2624 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2625 ReuseShuffleIndicies
);
2630 // We don't combine GEPs with non-constant indexes.
2631 for (Value
*V
: VL
) {
2632 auto Op
= cast
<Instruction
>(V
)->getOperand(1);
2633 if (!isa
<ConstantInt
>(Op
)) {
2635 << "SLP: not-vectorizable GEP (non-constant indexes).\n");
2636 BS
.cancelScheduling(VL
, VL0
);
2637 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2638 ReuseShuffleIndicies
);
2643 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2644 ReuseShuffleIndicies
);
2645 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
2646 TE
->setOperandsInOrder();
2647 for (unsigned i
= 0, e
= 2; i
< e
; ++i
) {
2649 // Prepare the operand vector.
2651 Operands
.push_back(cast
<Instruction
>(V
)->getOperand(i
));
2653 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2657 case Instruction::Store
: {
2658 // Check if the stores are consecutive or if we need to swizzle them.
2659 for (unsigned i
= 0, e
= VL
.size() - 1; i
< e
; ++i
)
2660 if (!isConsecutiveAccess(VL
[i
], VL
[i
+ 1], *DL
, *SE
)) {
2661 BS
.cancelScheduling(VL
, VL0
);
2662 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2663 ReuseShuffleIndicies
);
2664 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
2668 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2669 ReuseShuffleIndicies
);
2670 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
2674 Operands
.push_back(cast
<Instruction
>(V
)->getOperand(0));
2675 TE
->setOperandsInOrder();
2676 buildTree_rec(Operands
, Depth
+ 1, {TE
, 0});
2679 case Instruction::Call
: {
2680 // Check if the calls are all to the same vectorizable intrinsic.
2681 CallInst
*CI
= cast
<CallInst
>(VL0
);
2682 // Check if this is an Intrinsic call or something that can be
2683 // represented by an intrinsic call
2684 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
2685 if (!isTriviallyVectorizable(ID
)) {
2686 BS
.cancelScheduling(VL
, VL0
);
2687 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2688 ReuseShuffleIndicies
);
2689 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
2692 Function
*Int
= CI
->getCalledFunction();
2693 unsigned NumArgs
= CI
->getNumArgOperands();
2694 SmallVector
<Value
*, 4> ScalarArgs(NumArgs
, nullptr);
2695 for (unsigned j
= 0; j
!= NumArgs
; ++j
)
2696 if (hasVectorInstrinsicScalarOpd(ID
, j
))
2697 ScalarArgs
[j
] = CI
->getArgOperand(j
);
2698 for (Value
*V
: VL
) {
2699 CallInst
*CI2
= dyn_cast
<CallInst
>(V
);
2700 if (!CI2
|| CI2
->getCalledFunction() != Int
||
2701 getVectorIntrinsicIDForCall(CI2
, TLI
) != ID
||
2702 !CI
->hasIdenticalOperandBundleSchema(*CI2
)) {
2703 BS
.cancelScheduling(VL
, VL0
);
2704 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2705 ReuseShuffleIndicies
);
2706 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI
<< "!=" << *V
2710 // Some intrinsics have scalar arguments and should be same in order for
2711 // them to be vectorized.
2712 for (unsigned j
= 0; j
!= NumArgs
; ++j
) {
2713 if (hasVectorInstrinsicScalarOpd(ID
, j
)) {
2714 Value
*A1J
= CI2
->getArgOperand(j
);
2715 if (ScalarArgs
[j
] != A1J
) {
2716 BS
.cancelScheduling(VL
, VL0
);
2717 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2718 ReuseShuffleIndicies
);
2719 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
2720 << " argument " << ScalarArgs
[j
] << "!=" << A1J
2726 // Verify that the bundle operands are identical between the two calls.
2727 if (CI
->hasOperandBundles() &&
2728 !std::equal(CI
->op_begin() + CI
->getBundleOperandsStartIndex(),
2729 CI
->op_begin() + CI
->getBundleOperandsEndIndex(),
2730 CI2
->op_begin() + CI2
->getBundleOperandsStartIndex())) {
2731 BS
.cancelScheduling(VL
, VL0
);
2732 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2733 ReuseShuffleIndicies
);
2734 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
2735 << *CI
<< "!=" << *V
<< '\n');
2740 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2741 ReuseShuffleIndicies
);
2742 TE
->setOperandsInOrder();
2743 for (unsigned i
= 0, e
= CI
->getNumArgOperands(); i
!= e
; ++i
) {
2745 // Prepare the operand vector.
2746 for (Value
*V
: VL
) {
2747 auto *CI2
= cast
<CallInst
>(V
);
2748 Operands
.push_back(CI2
->getArgOperand(i
));
2750 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2754 case Instruction::ShuffleVector
: {
2755 // If this is not an alternate sequence of opcode like add-sub
2756 // then do not vectorize this instruction.
2757 if (!S
.isAltShuffle()) {
2758 BS
.cancelScheduling(VL
, VL0
);
2759 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2760 ReuseShuffleIndicies
);
2761 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
2764 TreeEntry
*TE
= newTreeEntry(VL
, Bundle
/*vectorized*/, S
, UserTreeIdx
,
2765 ReuseShuffleIndicies
);
2766 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
2768 // Reorder operands if reordering would enable vectorization.
2769 if (isa
<BinaryOperator
>(VL0
)) {
2770 ValueList Left
, Right
;
2771 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2772 TE
->setOperand(0, Left
);
2773 TE
->setOperand(1, Right
);
2774 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2775 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2779 TE
->setOperandsInOrder();
2780 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2782 // Prepare the operand vector.
2784 Operands
.push_back(cast
<Instruction
>(V
)->getOperand(i
));
2786 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2791 BS
.cancelScheduling(VL
, VL0
);
2792 newTreeEntry(VL
, None
/*not vectorized*/, S
, UserTreeIdx
,
2793 ReuseShuffleIndicies
);
2794 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
2799 unsigned BoUpSLP::canMapToVector(Type
*T
, const DataLayout
&DL
) const {
2802 auto *ST
= dyn_cast
<StructType
>(T
);
2804 N
= ST
->getNumElements();
2805 EltTy
= *ST
->element_begin();
2807 N
= cast
<ArrayType
>(T
)->getNumElements();
2808 EltTy
= cast
<ArrayType
>(T
)->getElementType();
2810 if (!isValidElementType(EltTy
))
2812 uint64_t VTSize
= DL
.getTypeStoreSizeInBits(VectorType::get(EltTy
, N
));
2813 if (VTSize
< MinVecRegSize
|| VTSize
> MaxVecRegSize
|| VTSize
!= DL
.getTypeStoreSizeInBits(T
))
2816 // Check that struct is homogeneous.
2817 for (const auto *Ty
: ST
->elements())
2824 bool BoUpSLP::canReuseExtract(ArrayRef
<Value
*> VL
, Value
*OpValue
,
2825 SmallVectorImpl
<unsigned> &CurrentOrder
) const {
2826 Instruction
*E0
= cast
<Instruction
>(OpValue
);
2827 assert(E0
->getOpcode() == Instruction::ExtractElement
||
2828 E0
->getOpcode() == Instruction::ExtractValue
);
2829 assert(E0
->getOpcode() == getSameOpcode(VL
).getOpcode() && "Invalid opcode");
2830 // Check if all of the extracts come from the same vector and from the
2832 Value
*Vec
= E0
->getOperand(0);
2834 CurrentOrder
.clear();
2836 // We have to extract from a vector/aggregate with the same number of elements.
2838 if (E0
->getOpcode() == Instruction::ExtractValue
) {
2839 const DataLayout
&DL
= E0
->getModule()->getDataLayout();
2840 NElts
= canMapToVector(Vec
->getType(), DL
);
2843 // Check if load can be rewritten as load of vector.
2844 LoadInst
*LI
= dyn_cast
<LoadInst
>(Vec
);
2845 if (!LI
|| !LI
->isSimple() || !LI
->hasNUses(VL
.size()))
2848 NElts
= Vec
->getType()->getVectorNumElements();
2851 if (NElts
!= VL
.size())
2854 // Check that all of the indices extract from the correct offset.
2855 bool ShouldKeepOrder
= true;
2856 unsigned E
= VL
.size();
2857 // Assign to all items the initial value E + 1 so we can check if the extract
2858 // instruction index was used already.
2859 // Also, later we can check that all the indices are used and we have a
2860 // consecutive access in the extract instructions, by checking that no
2861 // element of CurrentOrder still has value E + 1.
2862 CurrentOrder
.assign(E
, E
+ 1);
2864 for (; I
< E
; ++I
) {
2865 auto *Inst
= cast
<Instruction
>(VL
[I
]);
2866 if (Inst
->getOperand(0) != Vec
)
2868 Optional
<unsigned> Idx
= getExtractIndex(Inst
);
2871 const unsigned ExtIdx
= *Idx
;
2873 if (ExtIdx
>= E
|| CurrentOrder
[ExtIdx
] != E
+ 1)
2875 ShouldKeepOrder
= false;
2876 CurrentOrder
[ExtIdx
] = I
;
2878 if (CurrentOrder
[I
] != E
+ 1)
2880 CurrentOrder
[I
] = I
;
2884 CurrentOrder
.clear();
2888 return ShouldKeepOrder
;
2891 bool BoUpSLP::areAllUsersVectorized(Instruction
*I
) const {
2892 return I
->hasOneUse() ||
2893 std::all_of(I
->user_begin(), I
->user_end(), [this](User
*U
) {
2894 return ScalarToTreeEntry
.count(U
) > 0;
2898 int BoUpSLP::getEntryCost(TreeEntry
*E
) {
2899 ArrayRef
<Value
*> VL
= E
->Scalars
;
2901 Type
*ScalarTy
= VL
[0]->getType();
2902 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL
[0]))
2903 ScalarTy
= SI
->getValueOperand()->getType();
2904 else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(VL
[0]))
2905 ScalarTy
= CI
->getOperand(0)->getType();
2906 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
2908 // If we have computed a smaller type for the expression, update VecTy so
2909 // that the costs will be accurate.
2910 if (MinBWs
.count(VL
[0]))
2911 VecTy
= VectorType::get(
2912 IntegerType::get(F
->getContext(), MinBWs
[VL
[0]].first
), VL
.size());
2914 unsigned ReuseShuffleNumbers
= E
->ReuseShuffleIndices
.size();
2915 bool NeedToShuffleReuses
= !E
->ReuseShuffleIndices
.empty();
2916 int ReuseShuffleCost
= 0;
2917 if (NeedToShuffleReuses
) {
2919 TTI
->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
2921 if (E
->NeedToGather
) {
2922 if (allConstant(VL
))
2925 return ReuseShuffleCost
+
2926 TTI
->getShuffleCost(TargetTransformInfo::SK_Broadcast
, VecTy
, 0);
2928 if (E
->getOpcode() == Instruction::ExtractElement
&&
2929 allSameType(VL
) && allSameBlock(VL
)) {
2930 Optional
<TargetTransformInfo::ShuffleKind
> ShuffleKind
= isShuffle(VL
);
2931 if (ShuffleKind
.hasValue()) {
2932 int Cost
= TTI
->getShuffleCost(ShuffleKind
.getValue(), VecTy
);
2933 for (auto *V
: VL
) {
2934 // If all users of instruction are going to be vectorized and this
2935 // instruction itself is not going to be vectorized, consider this
2936 // instruction as dead and remove its cost from the final cost of the
2938 if (areAllUsersVectorized(cast
<Instruction
>(V
)) &&
2939 !ScalarToTreeEntry
.count(V
)) {
2940 auto *IO
= cast
<ConstantInt
>(
2941 cast
<ExtractElementInst
>(V
)->getIndexOperand());
2942 Cost
-= TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
,
2943 IO
->getZExtValue());
2946 return ReuseShuffleCost
+ Cost
;
2949 return ReuseShuffleCost
+ getGatherCost(VL
);
2951 assert(E
->getOpcode() && allSameType(VL
) && allSameBlock(VL
) && "Invalid VL");
2952 Instruction
*VL0
= E
->getMainOp();
2953 unsigned ShuffleOrOp
=
2954 E
->isAltShuffle() ? (unsigned)Instruction::ShuffleVector
: E
->getOpcode();
2955 switch (ShuffleOrOp
) {
2956 case Instruction::PHI
:
2959 case Instruction::ExtractValue
:
2960 case Instruction::ExtractElement
:
2961 if (NeedToShuffleReuses
) {
2963 for (unsigned I
: E
->ReuseShuffleIndices
) {
2964 if (ShuffleOrOp
== Instruction::ExtractElement
) {
2965 auto *IO
= cast
<ConstantInt
>(
2966 cast
<ExtractElementInst
>(VL
[I
])->getIndexOperand());
2967 Idx
= IO
->getZExtValue();
2968 ReuseShuffleCost
-= TTI
->getVectorInstrCost(
2969 Instruction::ExtractElement
, VecTy
, Idx
);
2971 ReuseShuffleCost
-= TTI
->getVectorInstrCost(
2972 Instruction::ExtractElement
, VecTy
, Idx
);
2976 Idx
= ReuseShuffleNumbers
;
2977 for (Value
*V
: VL
) {
2978 if (ShuffleOrOp
== Instruction::ExtractElement
) {
2979 auto *IO
= cast
<ConstantInt
>(
2980 cast
<ExtractElementInst
>(V
)->getIndexOperand());
2981 Idx
= IO
->getZExtValue();
2986 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, Idx
);
2989 if (!E
->NeedToGather
) {
2990 int DeadCost
= ReuseShuffleCost
;
2991 if (!E
->ReorderIndices
.empty()) {
2992 // TODO: Merge this shuffle with the ReuseShuffleCost.
2993 DeadCost
+= TTI
->getShuffleCost(
2994 TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
2996 for (unsigned i
= 0, e
= VL
.size(); i
< e
; ++i
) {
2997 Instruction
*E
= cast
<Instruction
>(VL
[i
]);
2998 // If all users are going to be vectorized, instruction can be
2999 // considered as dead.
3000 // The same, if have only one user, it will be vectorized for sure.
3001 if (areAllUsersVectorized(E
)) {
3002 // Take credit for instruction that will become dead.
3003 if (E
->hasOneUse()) {
3004 Instruction
*Ext
= E
->user_back();
3005 if ((isa
<SExtInst
>(Ext
) || isa
<ZExtInst
>(Ext
)) &&
3006 all_of(Ext
->users(),
3007 [](User
*U
) { return isa
<GetElementPtrInst
>(U
); })) {
3008 // Use getExtractWithExtendCost() to calculate the cost of
3009 // extractelement/ext pair.
3010 DeadCost
-= TTI
->getExtractWithExtendCost(
3011 Ext
->getOpcode(), Ext
->getType(), VecTy
, i
);
3012 // Add back the cost of s|zext which is subtracted separately.
3013 DeadCost
+= TTI
->getCastInstrCost(
3014 Ext
->getOpcode(), Ext
->getType(), E
->getType(), Ext
);
3019 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, i
);
3024 return ReuseShuffleCost
+ getGatherCost(VL
);
3026 case Instruction::ZExt
:
3027 case Instruction::SExt
:
3028 case Instruction::FPToUI
:
3029 case Instruction::FPToSI
:
3030 case Instruction::FPExt
:
3031 case Instruction::PtrToInt
:
3032 case Instruction::IntToPtr
:
3033 case Instruction::SIToFP
:
3034 case Instruction::UIToFP
:
3035 case Instruction::Trunc
:
3036 case Instruction::FPTrunc
:
3037 case Instruction::BitCast
: {
3038 Type
*SrcTy
= VL0
->getOperand(0)->getType();
3040 TTI
->getCastInstrCost(E
->getOpcode(), ScalarTy
, SrcTy
, VL0
);
3041 if (NeedToShuffleReuses
) {
3042 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3045 // Calculate the cost of this instruction.
3046 int ScalarCost
= VL
.size() * ScalarEltCost
;
3048 VectorType
*SrcVecTy
= VectorType::get(SrcTy
, VL
.size());
3050 // Check if the values are candidates to demote.
3051 if (!MinBWs
.count(VL0
) || VecTy
!= SrcVecTy
) {
3052 VecCost
= ReuseShuffleCost
+
3053 TTI
->getCastInstrCost(E
->getOpcode(), VecTy
, SrcVecTy
, VL0
);
3055 return VecCost
- ScalarCost
;
3057 case Instruction::FCmp
:
3058 case Instruction::ICmp
:
3059 case Instruction::Select
: {
3060 // Calculate the cost of this instruction.
3061 int ScalarEltCost
= TTI
->getCmpSelInstrCost(E
->getOpcode(), ScalarTy
,
3062 Builder
.getInt1Ty(), VL0
);
3063 if (NeedToShuffleReuses
) {
3064 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3066 VectorType
*MaskTy
= VectorType::get(Builder
.getInt1Ty(), VL
.size());
3067 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
3068 int VecCost
= TTI
->getCmpSelInstrCost(E
->getOpcode(), VecTy
, MaskTy
, VL0
);
3069 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
3071 case Instruction::FNeg
:
3072 case Instruction::Add
:
3073 case Instruction::FAdd
:
3074 case Instruction::Sub
:
3075 case Instruction::FSub
:
3076 case Instruction::Mul
:
3077 case Instruction::FMul
:
3078 case Instruction::UDiv
:
3079 case Instruction::SDiv
:
3080 case Instruction::FDiv
:
3081 case Instruction::URem
:
3082 case Instruction::SRem
:
3083 case Instruction::FRem
:
3084 case Instruction::Shl
:
3085 case Instruction::LShr
:
3086 case Instruction::AShr
:
3087 case Instruction::And
:
3088 case Instruction::Or
:
3089 case Instruction::Xor
: {
3090 // Certain instructions can be cheaper to vectorize if they have a
3091 // constant second vector operand.
3092 TargetTransformInfo::OperandValueKind Op1VK
=
3093 TargetTransformInfo::OK_AnyValue
;
3094 TargetTransformInfo::OperandValueKind Op2VK
=
3095 TargetTransformInfo::OK_UniformConstantValue
;
3096 TargetTransformInfo::OperandValueProperties Op1VP
=
3097 TargetTransformInfo::OP_None
;
3098 TargetTransformInfo::OperandValueProperties Op2VP
=
3099 TargetTransformInfo::OP_PowerOf2
;
3101 // If all operands are exactly the same ConstantInt then set the
3102 // operand kind to OK_UniformConstantValue.
3103 // If instead not all operands are constants, then set the operand kind
3104 // to OK_AnyValue. If all operands are constants but not the same,
3105 // then set the operand kind to OK_NonUniformConstantValue.
3106 ConstantInt
*CInt0
= nullptr;
3107 for (unsigned i
= 0, e
= VL
.size(); i
< e
; ++i
) {
3108 const Instruction
*I
= cast
<Instruction
>(VL
[i
]);
3109 unsigned OpIdx
= isa
<BinaryOperator
>(I
) ? 1 : 0;
3110 ConstantInt
*CInt
= dyn_cast
<ConstantInt
>(I
->getOperand(OpIdx
));
3112 Op2VK
= TargetTransformInfo::OK_AnyValue
;
3113 Op2VP
= TargetTransformInfo::OP_None
;
3116 if (Op2VP
== TargetTransformInfo::OP_PowerOf2
&&
3117 !CInt
->getValue().isPowerOf2())
3118 Op2VP
= TargetTransformInfo::OP_None
;
3124 Op2VK
= TargetTransformInfo::OK_NonUniformConstantValue
;
3127 SmallVector
<const Value
*, 4> Operands(VL0
->operand_values());
3128 int ScalarEltCost
= TTI
->getArithmeticInstrCost(
3129 E
->getOpcode(), ScalarTy
, Op1VK
, Op2VK
, Op1VP
, Op2VP
, Operands
);
3130 if (NeedToShuffleReuses
) {
3131 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3133 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
3134 int VecCost
= TTI
->getArithmeticInstrCost(E
->getOpcode(), VecTy
, Op1VK
,
3135 Op2VK
, Op1VP
, Op2VP
, Operands
);
3136 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
3138 case Instruction::GetElementPtr
: {
3139 TargetTransformInfo::OperandValueKind Op1VK
=
3140 TargetTransformInfo::OK_AnyValue
;
3141 TargetTransformInfo::OperandValueKind Op2VK
=
3142 TargetTransformInfo::OK_UniformConstantValue
;
3145 TTI
->getArithmeticInstrCost(Instruction::Add
, ScalarTy
, Op1VK
, Op2VK
);
3146 if (NeedToShuffleReuses
) {
3147 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3149 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
3151 TTI
->getArithmeticInstrCost(Instruction::Add
, VecTy
, Op1VK
, Op2VK
);
3152 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
3154 case Instruction::Load
: {
3155 // Cost of wide load - cost of scalar loads.
3156 unsigned alignment
= cast
<LoadInst
>(VL0
)->getAlignment();
3158 TTI
->getMemoryOpCost(Instruction::Load
, ScalarTy
, alignment
, 0, VL0
);
3159 if (NeedToShuffleReuses
) {
3160 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3162 int ScalarLdCost
= VecTy
->getNumElements() * ScalarEltCost
;
3164 TTI
->getMemoryOpCost(Instruction::Load
, VecTy
, alignment
, 0, VL0
);
3165 if (!E
->ReorderIndices
.empty()) {
3166 // TODO: Merge this shuffle with the ReuseShuffleCost.
3167 VecLdCost
+= TTI
->getShuffleCost(
3168 TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
3170 return ReuseShuffleCost
+ VecLdCost
- ScalarLdCost
;
3172 case Instruction::Store
: {
3173 // We know that we can merge the stores. Calculate the cost.
3174 unsigned alignment
= cast
<StoreInst
>(VL0
)->getAlignment();
3176 TTI
->getMemoryOpCost(Instruction::Store
, ScalarTy
, alignment
, 0, VL0
);
3177 if (NeedToShuffleReuses
) {
3178 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3180 int ScalarStCost
= VecTy
->getNumElements() * ScalarEltCost
;
3182 TTI
->getMemoryOpCost(Instruction::Store
, VecTy
, alignment
, 0, VL0
);
3183 return ReuseShuffleCost
+ VecStCost
- ScalarStCost
;
3185 case Instruction::Call
: {
3186 CallInst
*CI
= cast
<CallInst
>(VL0
);
3187 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
3189 // Calculate the cost of the scalar and vector calls.
3190 SmallVector
<Type
*, 4> ScalarTys
;
3191 for (unsigned op
= 0, opc
= CI
->getNumArgOperands(); op
!= opc
; ++op
)
3192 ScalarTys
.push_back(CI
->getArgOperand(op
)->getType());
3195 if (auto *FPMO
= dyn_cast
<FPMathOperator
>(CI
))
3196 FMF
= FPMO
->getFastMathFlags();
3199 TTI
->getIntrinsicInstrCost(ID
, ScalarTy
, ScalarTys
, FMF
);
3200 if (NeedToShuffleReuses
) {
3201 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3203 int ScalarCallCost
= VecTy
->getNumElements() * ScalarEltCost
;
3205 SmallVector
<Value
*, 4> Args(CI
->arg_operands());
3206 int VecCallCost
= TTI
->getIntrinsicInstrCost(ID
, CI
->getType(), Args
, FMF
,
3207 VecTy
->getNumElements());
3209 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost
- ScalarCallCost
3210 << " (" << VecCallCost
<< "-" << ScalarCallCost
<< ")"
3211 << " for " << *CI
<< "\n");
3213 return ReuseShuffleCost
+ VecCallCost
- ScalarCallCost
;
3215 case Instruction::ShuffleVector
: {
3216 assert(E
->isAltShuffle() &&
3217 ((Instruction::isBinaryOp(E
->getOpcode()) &&
3218 Instruction::isBinaryOp(E
->getAltOpcode())) ||
3219 (Instruction::isCast(E
->getOpcode()) &&
3220 Instruction::isCast(E
->getAltOpcode()))) &&
3221 "Invalid Shuffle Vector Operand");
3223 if (NeedToShuffleReuses
) {
3224 for (unsigned Idx
: E
->ReuseShuffleIndices
) {
3225 Instruction
*I
= cast
<Instruction
>(VL
[Idx
]);
3226 ReuseShuffleCost
-= TTI
->getInstructionCost(
3227 I
, TargetTransformInfo::TCK_RecipThroughput
);
3229 for (Value
*V
: VL
) {
3230 Instruction
*I
= cast
<Instruction
>(V
);
3231 ReuseShuffleCost
+= TTI
->getInstructionCost(
3232 I
, TargetTransformInfo::TCK_RecipThroughput
);
3235 for (Value
*V
: VL
) {
3236 Instruction
*I
= cast
<Instruction
>(V
);
3237 assert(E
->isOpcodeOrAlt(I
) && "Unexpected main/alternate opcode");
3238 ScalarCost
+= TTI
->getInstructionCost(
3239 I
, TargetTransformInfo::TCK_RecipThroughput
);
3241 // VecCost is equal to sum of the cost of creating 2 vectors
3242 // and the cost of creating shuffle.
3244 if (Instruction::isBinaryOp(E
->getOpcode())) {
3245 VecCost
= TTI
->getArithmeticInstrCost(E
->getOpcode(), VecTy
);
3246 VecCost
+= TTI
->getArithmeticInstrCost(E
->getAltOpcode(), VecTy
);
3248 Type
*Src0SclTy
= E
->getMainOp()->getOperand(0)->getType();
3249 Type
*Src1SclTy
= E
->getAltOp()->getOperand(0)->getType();
3250 VectorType
*Src0Ty
= VectorType::get(Src0SclTy
, VL
.size());
3251 VectorType
*Src1Ty
= VectorType::get(Src1SclTy
, VL
.size());
3252 VecCost
= TTI
->getCastInstrCost(E
->getOpcode(), VecTy
, Src0Ty
);
3253 VecCost
+= TTI
->getCastInstrCost(E
->getAltOpcode(), VecTy
, Src1Ty
);
3255 VecCost
+= TTI
->getShuffleCost(TargetTransformInfo::SK_Select
, VecTy
, 0);
3256 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
3259 llvm_unreachable("Unknown instruction");
3263 bool BoUpSLP::isFullyVectorizableTinyTree() const {
3264 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
3265 << VectorizableTree
.size() << " is fully vectorizable .\n");
3267 // We only handle trees of heights 1 and 2.
3268 if (VectorizableTree
.size() == 1 && !VectorizableTree
[0]->NeedToGather
)
3271 if (VectorizableTree
.size() != 2)
3274 // Handle splat and all-constants stores.
3275 if (!VectorizableTree
[0]->NeedToGather
&&
3276 (allConstant(VectorizableTree
[1]->Scalars
) ||
3277 isSplat(VectorizableTree
[1]->Scalars
)))
3280 // Gathering cost would be too much for tiny trees.
3281 if (VectorizableTree
[0]->NeedToGather
|| VectorizableTree
[1]->NeedToGather
)
3287 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
3288 // We can vectorize the tree if its size is greater than or equal to the
3289 // minimum size specified by the MinTreeSize command line option.
3290 if (VectorizableTree
.size() >= MinTreeSize
)
3293 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
3294 // can vectorize it if we can prove it fully vectorizable.
3295 if (isFullyVectorizableTinyTree())
3298 assert(VectorizableTree
.empty()
3299 ? ExternalUses
.empty()
3300 : true && "We shouldn't have any external users");
3302 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
3307 int BoUpSLP::getSpillCost() const {
3308 // Walk from the bottom of the tree to the top, tracking which values are
3309 // live. When we see a call instruction that is not part of our tree,
3310 // query TTI to see if there is a cost to keeping values live over it
3311 // (for example, if spills and fills are required).
3312 unsigned BundleWidth
= VectorizableTree
.front()->Scalars
.size();
3315 SmallPtrSet
<Instruction
*, 4> LiveValues
;
3316 Instruction
*PrevInst
= nullptr;
3318 for (const auto &TEPtr
: VectorizableTree
) {
3319 Instruction
*Inst
= dyn_cast
<Instruction
>(TEPtr
->Scalars
[0]);
3328 // Update LiveValues.
3329 LiveValues
.erase(PrevInst
);
3330 for (auto &J
: PrevInst
->operands()) {
3331 if (isa
<Instruction
>(&*J
) && getTreeEntry(&*J
))
3332 LiveValues
.insert(cast
<Instruction
>(&*J
));
3336 dbgs() << "SLP: #LV: " << LiveValues
.size();
3337 for (auto *X
: LiveValues
)
3338 dbgs() << " " << X
->getName();
3339 dbgs() << ", Looking at ";
3343 // Now find the sequence of instructions between PrevInst and Inst.
3344 unsigned NumCalls
= 0;
3345 BasicBlock::reverse_iterator InstIt
= ++Inst
->getIterator().getReverse(),
3347 PrevInst
->getIterator().getReverse();
3348 while (InstIt
!= PrevInstIt
) {
3349 if (PrevInstIt
== PrevInst
->getParent()->rend()) {
3350 PrevInstIt
= Inst
->getParent()->rbegin();
3354 // Debug informations don't impact spill cost.
3355 if ((isa
<CallInst
>(&*PrevInstIt
) &&
3356 !isa
<DbgInfoIntrinsic
>(&*PrevInstIt
)) &&
3357 &*PrevInstIt
!= PrevInst
)
3364 SmallVector
<Type
*, 4> V
;
3365 for (auto *II
: LiveValues
)
3366 V
.push_back(VectorType::get(II
->getType(), BundleWidth
));
3367 Cost
+= NumCalls
* TTI
->getCostOfKeepingLiveOverCall(V
);
3376 int BoUpSLP::getTreeCost() {
3378 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
3379 << VectorizableTree
.size() << ".\n");
3381 unsigned BundleWidth
= VectorizableTree
[0]->Scalars
.size();
3383 for (unsigned I
= 0, E
= VectorizableTree
.size(); I
< E
; ++I
) {
3384 TreeEntry
&TE
= *VectorizableTree
[I
].get();
3386 // We create duplicate tree entries for gather sequences that have multiple
3387 // uses. However, we should not compute the cost of duplicate sequences.
3388 // For example, if we have a build vector (i.e., insertelement sequence)
3389 // that is used by more than one vector instruction, we only need to
3390 // compute the cost of the insertelement instructions once. The redundant
3391 // instructions will be eliminated by CSE.
3393 // We should consider not creating duplicate tree entries for gather
3394 // sequences, and instead add additional edges to the tree representing
3395 // their uses. Since such an approach results in fewer total entries,
3396 // existing heuristics based on tree size may yield different results.
3398 if (TE
.NeedToGather
&&
3400 std::next(VectorizableTree
.begin(), I
+ 1), VectorizableTree
.end(),
3401 [TE
](const std::unique_ptr
<TreeEntry
> &EntryPtr
) {
3402 return EntryPtr
->NeedToGather
&& EntryPtr
->isSame(TE
.Scalars
);
3406 int C
= getEntryCost(&TE
);
3407 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
3408 << " for bundle that starts with " << *TE
.Scalars
[0]
3413 SmallPtrSet
<Value
*, 16> ExtractCostCalculated
;
3414 int ExtractCost
= 0;
3415 for (ExternalUser
&EU
: ExternalUses
) {
3416 // We only add extract cost once for the same scalar.
3417 if (!ExtractCostCalculated
.insert(EU
.Scalar
).second
)
3420 // Uses by ephemeral values are free (because the ephemeral value will be
3421 // removed prior to code generation, and so the extraction will be
3422 // removed as well).
3423 if (EphValues
.count(EU
.User
))
3426 // If we plan to rewrite the tree in a smaller type, we will need to sign
3427 // extend the extracted value back to the original type. Here, we account
3428 // for the extract and the added cost of the sign extend if needed.
3429 auto *VecTy
= VectorType::get(EU
.Scalar
->getType(), BundleWidth
);
3430 auto *ScalarRoot
= VectorizableTree
[0]->Scalars
[0];
3431 if (MinBWs
.count(ScalarRoot
)) {
3432 auto *MinTy
= IntegerType::get(F
->getContext(), MinBWs
[ScalarRoot
].first
);
3434 MinBWs
[ScalarRoot
].second
? Instruction::SExt
: Instruction::ZExt
;
3435 VecTy
= VectorType::get(MinTy
, BundleWidth
);
3436 ExtractCost
+= TTI
->getExtractWithExtendCost(Extend
, EU
.Scalar
->getType(),
3440 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, EU
.Lane
);
3444 int SpillCost
= getSpillCost();
3445 Cost
+= SpillCost
+ ExtractCost
;
3449 raw_string_ostream
OS(Str
);
3450 OS
<< "SLP: Spill Cost = " << SpillCost
<< ".\n"
3451 << "SLP: Extract Cost = " << ExtractCost
<< ".\n"
3452 << "SLP: Total Cost = " << Cost
<< ".\n";
3454 LLVM_DEBUG(dbgs() << Str
);
3457 ViewGraph(this, "SLP" + F
->getName(), false, Str
);
3462 int BoUpSLP::getGatherCost(Type
*Ty
,
3463 const DenseSet
<unsigned> &ShuffledIndices
) const {
3465 for (unsigned i
= 0, e
= cast
<VectorType
>(Ty
)->getNumElements(); i
< e
; ++i
)
3466 if (!ShuffledIndices
.count(i
))
3467 Cost
+= TTI
->getVectorInstrCost(Instruction::InsertElement
, Ty
, i
);
3468 if (!ShuffledIndices
.empty())
3469 Cost
+= TTI
->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc
, Ty
);
3473 int BoUpSLP::getGatherCost(ArrayRef
<Value
*> VL
) const {
3474 // Find the type of the operands in VL.
3475 Type
*ScalarTy
= VL
[0]->getType();
3476 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL
[0]))
3477 ScalarTy
= SI
->getValueOperand()->getType();
3478 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
3479 // Find the cost of inserting/extracting values from the vector.
3480 // Check if the same elements are inserted several times and count them as
3481 // shuffle candidates.
3482 DenseSet
<unsigned> ShuffledElements
;
3483 DenseSet
<Value
*> UniqueElements
;
3484 // Iterate in reverse order to consider insert elements with the high cost.
3485 for (unsigned I
= VL
.size(); I
> 0; --I
) {
3486 unsigned Idx
= I
- 1;
3487 if (!UniqueElements
.insert(VL
[Idx
]).second
)
3488 ShuffledElements
.insert(Idx
);
3490 return getGatherCost(VecTy
, ShuffledElements
);
3493 // Perform operand reordering on the instructions in VL and return the reordered
3494 // operands in Left and Right.
3495 void BoUpSLP::reorderInputsAccordingToOpcode(
3496 ArrayRef
<Value
*> VL
, SmallVectorImpl
<Value
*> &Left
,
3497 SmallVectorImpl
<Value
*> &Right
, const DataLayout
&DL
,
3498 ScalarEvolution
&SE
) {
3501 VLOperands
Ops(VL
, DL
, SE
);
3502 // Reorder the operands in place.
3504 Left
= Ops
.getVL(0);
3505 Right
= Ops
.getVL(1);
3508 void BoUpSLP::setInsertPointAfterBundle(TreeEntry
*E
) {
3509 // Get the basic block this bundle is in. All instructions in the bundle
3510 // should be in this block.
3511 auto *Front
= E
->getMainOp();
3512 auto *BB
= Front
->getParent();
3513 assert(llvm::all_of(make_range(E
->Scalars
.begin(), E
->Scalars
.end()),
3514 [=](Value
*V
) -> bool {
3515 auto *I
= cast
<Instruction
>(V
);
3516 return !E
->isOpcodeOrAlt(I
) || I
->getParent() == BB
;
3519 // The last instruction in the bundle in program order.
3520 Instruction
*LastInst
= nullptr;
3522 // Find the last instruction. The common case should be that BB has been
3523 // scheduled, and the last instruction is VL.back(). So we start with
3524 // VL.back() and iterate over schedule data until we reach the end of the
3525 // bundle. The end of the bundle is marked by null ScheduleData.
3526 if (BlocksSchedules
.count(BB
)) {
3528 BlocksSchedules
[BB
]->getScheduleData(E
->isOneOf(E
->Scalars
.back()));
3529 if (Bundle
&& Bundle
->isPartOfBundle())
3530 for (; Bundle
; Bundle
= Bundle
->NextInBundle
)
3531 if (Bundle
->OpValue
== Bundle
->Inst
)
3532 LastInst
= Bundle
->Inst
;
3535 // LastInst can still be null at this point if there's either not an entry
3536 // for BB in BlocksSchedules or there's no ScheduleData available for
3537 // VL.back(). This can be the case if buildTree_rec aborts for various
3538 // reasons (e.g., the maximum recursion depth is reached, the maximum region
3539 // size is reached, etc.). ScheduleData is initialized in the scheduling
3542 // If this happens, we can still find the last instruction by brute force. We
3543 // iterate forwards from Front (inclusive) until we either see all
3544 // instructions in the bundle or reach the end of the block. If Front is the
3545 // last instruction in program order, LastInst will be set to Front, and we
3546 // will visit all the remaining instructions in the block.
3548 // One of the reasons we exit early from buildTree_rec is to place an upper
3549 // bound on compile-time. Thus, taking an additional compile-time hit here is
3550 // not ideal. However, this should be exceedingly rare since it requires that
3551 // we both exit early from buildTree_rec and that the bundle be out-of-order
3552 // (causing us to iterate all the way to the end of the block).
3554 SmallPtrSet
<Value
*, 16> Bundle(E
->Scalars
.begin(), E
->Scalars
.end());
3555 for (auto &I
: make_range(BasicBlock::iterator(Front
), BB
->end())) {
3556 if (Bundle
.erase(&I
) && E
->isOpcodeOrAlt(&I
))
3562 assert(LastInst
&& "Failed to find last instruction in bundle");
3564 // Set the insertion point after the last instruction in the bundle. Set the
3565 // debug location to Front.
3566 Builder
.SetInsertPoint(BB
, ++LastInst
->getIterator());
3567 Builder
.SetCurrentDebugLocation(Front
->getDebugLoc());
3570 Value
*BoUpSLP::Gather(ArrayRef
<Value
*> VL
, VectorType
*Ty
) {
3571 Value
*Vec
= UndefValue::get(Ty
);
3572 // Generate the 'InsertElement' instruction.
3573 for (unsigned i
= 0; i
< Ty
->getNumElements(); ++i
) {
3574 Vec
= Builder
.CreateInsertElement(Vec
, VL
[i
], Builder
.getInt32(i
));
3575 if (auto *Insrt
= dyn_cast
<InsertElementInst
>(Vec
)) {
3576 GatherSeq
.insert(Insrt
);
3577 CSEBlocks
.insert(Insrt
->getParent());
3579 // Add to our 'need-to-extract' list.
3580 if (TreeEntry
*E
= getTreeEntry(VL
[i
])) {
3581 // Find which lane we need to extract.
3583 for (unsigned Lane
= 0, LE
= E
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
3584 // Is this the lane of the scalar that we are looking for ?
3585 if (E
->Scalars
[Lane
] == VL
[i
]) {
3590 assert(FoundLane
>= 0 && "Could not find the correct lane");
3591 if (!E
->ReuseShuffleIndices
.empty()) {
3593 std::distance(E
->ReuseShuffleIndices
.begin(),
3594 llvm::find(E
->ReuseShuffleIndices
, FoundLane
));
3596 ExternalUses
.push_back(ExternalUser(VL
[i
], Insrt
, FoundLane
));
3604 Value
*BoUpSLP::vectorizeTree(ArrayRef
<Value
*> VL
) {
3605 InstructionsState S
= getSameOpcode(VL
);
3606 if (S
.getOpcode()) {
3607 if (TreeEntry
*E
= getTreeEntry(S
.OpValue
)) {
3608 if (E
->isSame(VL
)) {
3609 Value
*V
= vectorizeTree(E
);
3610 if (VL
.size() == E
->Scalars
.size() && !E
->ReuseShuffleIndices
.empty()) {
3611 // We need to get the vectorized value but without shuffle.
3612 if (auto *SV
= dyn_cast
<ShuffleVectorInst
>(V
)) {
3613 V
= SV
->getOperand(0);
3615 // Reshuffle to get only unique values.
3616 SmallVector
<unsigned, 4> UniqueIdxs
;
3617 SmallSet
<unsigned, 4> UsedIdxs
;
3618 for(unsigned Idx
: E
->ReuseShuffleIndices
)
3619 if (UsedIdxs
.insert(Idx
).second
)
3620 UniqueIdxs
.emplace_back(Idx
);
3621 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
3630 Type
*ScalarTy
= S
.OpValue
->getType();
3631 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(S
.OpValue
))
3632 ScalarTy
= SI
->getValueOperand()->getType();
3634 // Check that every instruction appears once in this bundle.
3635 SmallVector
<unsigned, 4> ReuseShuffleIndicies
;
3636 SmallVector
<Value
*, 4> UniqueValues
;
3637 if (VL
.size() > 2) {
3638 DenseMap
<Value
*, unsigned> UniquePositions
;
3639 for (Value
*V
: VL
) {
3640 auto Res
= UniquePositions
.try_emplace(V
, UniqueValues
.size());
3641 ReuseShuffleIndicies
.emplace_back(Res
.first
->second
);
3642 if (Res
.second
|| isa
<Constant
>(V
))
3643 UniqueValues
.emplace_back(V
);
3645 // Do not shuffle single element or if number of unique values is not power
3647 if (UniqueValues
.size() == VL
.size() || UniqueValues
.size() <= 1 ||
3648 !llvm::isPowerOf2_32(UniqueValues
.size()))
3649 ReuseShuffleIndicies
.clear();
3653 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
3655 Value
*V
= Gather(VL
, VecTy
);
3656 if (!ReuseShuffleIndicies
.empty()) {
3657 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3658 ReuseShuffleIndicies
, "shuffle");
3659 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3660 GatherSeq
.insert(I
);
3661 CSEBlocks
.insert(I
->getParent());
3667 static void inversePermutation(ArrayRef
<unsigned> Indices
,
3668 SmallVectorImpl
<unsigned> &Mask
) {
3670 const unsigned E
= Indices
.size();
3672 for (unsigned I
= 0; I
< E
; ++I
)
3673 Mask
[Indices
[I
]] = I
;
3676 Value
*BoUpSLP::vectorizeTree(TreeEntry
*E
) {
3677 IRBuilder
<>::InsertPointGuard
Guard(Builder
);
3679 if (E
->VectorizedValue
) {
3680 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E
->Scalars
[0] << ".\n");
3681 return E
->VectorizedValue
;
3684 Instruction
*VL0
= E
->getMainOp();
3685 Type
*ScalarTy
= VL0
->getType();
3686 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL0
))
3687 ScalarTy
= SI
->getValueOperand()->getType();
3688 VectorType
*VecTy
= VectorType::get(ScalarTy
, E
->Scalars
.size());
3690 bool NeedToShuffleReuses
= !E
->ReuseShuffleIndices
.empty();
3692 if (E
->NeedToGather
) {
3693 setInsertPointAfterBundle(E
);
3694 auto *V
= Gather(E
->Scalars
, VecTy
);
3695 if (NeedToShuffleReuses
) {
3696 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3697 E
->ReuseShuffleIndices
, "shuffle");
3698 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3699 GatherSeq
.insert(I
);
3700 CSEBlocks
.insert(I
->getParent());
3703 E
->VectorizedValue
= V
;
3707 unsigned ShuffleOrOp
=
3708 E
->isAltShuffle() ? (unsigned)Instruction::ShuffleVector
: E
->getOpcode();
3709 switch (ShuffleOrOp
) {
3710 case Instruction::PHI
: {
3711 auto *PH
= cast
<PHINode
>(VL0
);
3712 Builder
.SetInsertPoint(PH
->getParent()->getFirstNonPHI());
3713 Builder
.SetCurrentDebugLocation(PH
->getDebugLoc());
3714 PHINode
*NewPhi
= Builder
.CreatePHI(VecTy
, PH
->getNumIncomingValues());
3716 if (NeedToShuffleReuses
) {
3717 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3718 E
->ReuseShuffleIndices
, "shuffle");
3720 E
->VectorizedValue
= V
;
3722 // PHINodes may have multiple entries from the same block. We want to
3723 // visit every block once.
3724 SmallPtrSet
<BasicBlock
*, 4> VisitedBBs
;
3726 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
3728 BasicBlock
*IBB
= PH
->getIncomingBlock(i
);
3730 if (!VisitedBBs
.insert(IBB
).second
) {
3731 NewPhi
->addIncoming(NewPhi
->getIncomingValueForBlock(IBB
), IBB
);
3735 Builder
.SetInsertPoint(IBB
->getTerminator());
3736 Builder
.SetCurrentDebugLocation(PH
->getDebugLoc());
3737 Value
*Vec
= vectorizeTree(E
->getOperand(i
));
3738 NewPhi
->addIncoming(Vec
, IBB
);
3741 assert(NewPhi
->getNumIncomingValues() == PH
->getNumIncomingValues() &&
3742 "Invalid number of incoming values");
3746 case Instruction::ExtractElement
: {
3747 if (!E
->NeedToGather
) {
3748 Value
*V
= E
->getSingleOperand(0);
3749 if (!E
->ReorderIndices
.empty()) {
3751 inversePermutation(E
->ReorderIndices
, Mask
);
3752 Builder
.SetInsertPoint(VL0
);
3753 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
), Mask
,
3756 if (NeedToShuffleReuses
) {
3757 // TODO: Merge this shuffle with the ReorderShuffleMask.
3758 if (E
->ReorderIndices
.empty())
3759 Builder
.SetInsertPoint(VL0
);
3760 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3761 E
->ReuseShuffleIndices
, "shuffle");
3763 E
->VectorizedValue
= V
;
3766 setInsertPointAfterBundle(E
);
3767 auto *V
= Gather(E
->Scalars
, VecTy
);
3768 if (NeedToShuffleReuses
) {
3769 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3770 E
->ReuseShuffleIndices
, "shuffle");
3771 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3772 GatherSeq
.insert(I
);
3773 CSEBlocks
.insert(I
->getParent());
3776 E
->VectorizedValue
= V
;
3779 case Instruction::ExtractValue
: {
3780 if (!E
->NeedToGather
) {
3781 LoadInst
*LI
= cast
<LoadInst
>(E
->getSingleOperand(0));
3782 Builder
.SetInsertPoint(LI
);
3783 PointerType
*PtrTy
= PointerType::get(VecTy
, LI
->getPointerAddressSpace());
3784 Value
*Ptr
= Builder
.CreateBitCast(LI
->getOperand(0), PtrTy
);
3785 LoadInst
*V
= Builder
.CreateAlignedLoad(VecTy
, Ptr
, LI
->getAlignment());
3786 Value
*NewV
= propagateMetadata(V
, E
->Scalars
);
3787 if (!E
->ReorderIndices
.empty()) {
3789 inversePermutation(E
->ReorderIndices
, Mask
);
3790 NewV
= Builder
.CreateShuffleVector(NewV
, UndefValue::get(VecTy
), Mask
,
3793 if (NeedToShuffleReuses
) {
3794 // TODO: Merge this shuffle with the ReorderShuffleMask.
3795 NewV
= Builder
.CreateShuffleVector(
3796 NewV
, UndefValue::get(VecTy
), E
->ReuseShuffleIndices
, "shuffle");
3798 E
->VectorizedValue
= NewV
;
3801 setInsertPointAfterBundle(E
);
3802 auto *V
= Gather(E
->Scalars
, VecTy
);
3803 if (NeedToShuffleReuses
) {
3804 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3805 E
->ReuseShuffleIndices
, "shuffle");
3806 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3807 GatherSeq
.insert(I
);
3808 CSEBlocks
.insert(I
->getParent());
3811 E
->VectorizedValue
= V
;
3814 case Instruction::ZExt
:
3815 case Instruction::SExt
:
3816 case Instruction::FPToUI
:
3817 case Instruction::FPToSI
:
3818 case Instruction::FPExt
:
3819 case Instruction::PtrToInt
:
3820 case Instruction::IntToPtr
:
3821 case Instruction::SIToFP
:
3822 case Instruction::UIToFP
:
3823 case Instruction::Trunc
:
3824 case Instruction::FPTrunc
:
3825 case Instruction::BitCast
: {
3826 setInsertPointAfterBundle(E
);
3828 Value
*InVec
= vectorizeTree(E
->getOperand(0));
3830 if (E
->VectorizedValue
) {
3831 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3832 return E
->VectorizedValue
;
3835 auto *CI
= cast
<CastInst
>(VL0
);
3836 Value
*V
= Builder
.CreateCast(CI
->getOpcode(), InVec
, VecTy
);
3837 if (NeedToShuffleReuses
) {
3838 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3839 E
->ReuseShuffleIndices
, "shuffle");
3841 E
->VectorizedValue
= V
;
3842 ++NumVectorInstructions
;
3845 case Instruction::FCmp
:
3846 case Instruction::ICmp
: {
3847 setInsertPointAfterBundle(E
);
3849 Value
*L
= vectorizeTree(E
->getOperand(0));
3850 Value
*R
= vectorizeTree(E
->getOperand(1));
3852 if (E
->VectorizedValue
) {
3853 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3854 return E
->VectorizedValue
;
3857 CmpInst::Predicate P0
= cast
<CmpInst
>(VL0
)->getPredicate();
3859 if (E
->getOpcode() == Instruction::FCmp
)
3860 V
= Builder
.CreateFCmp(P0
, L
, R
);
3862 V
= Builder
.CreateICmp(P0
, L
, R
);
3864 propagateIRFlags(V
, E
->Scalars
, VL0
);
3865 if (NeedToShuffleReuses
) {
3866 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3867 E
->ReuseShuffleIndices
, "shuffle");
3869 E
->VectorizedValue
= V
;
3870 ++NumVectorInstructions
;
3873 case Instruction::Select
: {
3874 setInsertPointAfterBundle(E
);
3876 Value
*Cond
= vectorizeTree(E
->getOperand(0));
3877 Value
*True
= vectorizeTree(E
->getOperand(1));
3878 Value
*False
= vectorizeTree(E
->getOperand(2));
3880 if (E
->VectorizedValue
) {
3881 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3882 return E
->VectorizedValue
;
3885 Value
*V
= Builder
.CreateSelect(Cond
, True
, False
);
3886 if (NeedToShuffleReuses
) {
3887 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3888 E
->ReuseShuffleIndices
, "shuffle");
3890 E
->VectorizedValue
= V
;
3891 ++NumVectorInstructions
;
3894 case Instruction::FNeg
: {
3895 setInsertPointAfterBundle(E
);
3897 Value
*Op
= vectorizeTree(E
->getOperand(0));
3899 if (E
->VectorizedValue
) {
3900 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3901 return E
->VectorizedValue
;
3904 Value
*V
= Builder
.CreateUnOp(
3905 static_cast<Instruction::UnaryOps
>(E
->getOpcode()), Op
);
3906 propagateIRFlags(V
, E
->Scalars
, VL0
);
3907 if (auto *I
= dyn_cast
<Instruction
>(V
))
3908 V
= propagateMetadata(I
, E
->Scalars
);
3910 if (NeedToShuffleReuses
) {
3911 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3912 E
->ReuseShuffleIndices
, "shuffle");
3914 E
->VectorizedValue
= V
;
3915 ++NumVectorInstructions
;
3919 case Instruction::Add
:
3920 case Instruction::FAdd
:
3921 case Instruction::Sub
:
3922 case Instruction::FSub
:
3923 case Instruction::Mul
:
3924 case Instruction::FMul
:
3925 case Instruction::UDiv
:
3926 case Instruction::SDiv
:
3927 case Instruction::FDiv
:
3928 case Instruction::URem
:
3929 case Instruction::SRem
:
3930 case Instruction::FRem
:
3931 case Instruction::Shl
:
3932 case Instruction::LShr
:
3933 case Instruction::AShr
:
3934 case Instruction::And
:
3935 case Instruction::Or
:
3936 case Instruction::Xor
: {
3937 setInsertPointAfterBundle(E
);
3939 Value
*LHS
= vectorizeTree(E
->getOperand(0));
3940 Value
*RHS
= vectorizeTree(E
->getOperand(1));
3942 if (E
->VectorizedValue
) {
3943 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3944 return E
->VectorizedValue
;
3947 Value
*V
= Builder
.CreateBinOp(
3948 static_cast<Instruction::BinaryOps
>(E
->getOpcode()), LHS
,
3950 propagateIRFlags(V
, E
->Scalars
, VL0
);
3951 if (auto *I
= dyn_cast
<Instruction
>(V
))
3952 V
= propagateMetadata(I
, E
->Scalars
);
3954 if (NeedToShuffleReuses
) {
3955 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3956 E
->ReuseShuffleIndices
, "shuffle");
3958 E
->VectorizedValue
= V
;
3959 ++NumVectorInstructions
;
3963 case Instruction::Load
: {
3964 // Loads are inserted at the head of the tree because we don't want to
3965 // sink them all the way down past store instructions.
3966 bool IsReorder
= E
->updateStateIfReorder();
3968 VL0
= E
->getMainOp();
3969 setInsertPointAfterBundle(E
);
3971 LoadInst
*LI
= cast
<LoadInst
>(VL0
);
3972 Type
*ScalarLoadTy
= LI
->getType();
3973 unsigned AS
= LI
->getPointerAddressSpace();
3975 Value
*VecPtr
= Builder
.CreateBitCast(LI
->getPointerOperand(),
3976 VecTy
->getPointerTo(AS
));
3978 // The pointer operand uses an in-tree scalar so we add the new BitCast to
3979 // ExternalUses list to make sure that an extract will be generated in the
3981 Value
*PO
= LI
->getPointerOperand();
3982 if (getTreeEntry(PO
))
3983 ExternalUses
.push_back(ExternalUser(PO
, cast
<User
>(VecPtr
), 0));
3985 MaybeAlign Alignment
= MaybeAlign(LI
->getAlignment());
3986 LI
= Builder
.CreateLoad(VecTy
, VecPtr
);
3988 Alignment
= MaybeAlign(DL
->getABITypeAlignment(ScalarLoadTy
));
3989 LI
->setAlignment(Alignment
);
3990 Value
*V
= propagateMetadata(LI
, E
->Scalars
);
3993 inversePermutation(E
->ReorderIndices
, Mask
);
3994 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
3995 Mask
, "reorder_shuffle");
3997 if (NeedToShuffleReuses
) {
3998 // TODO: Merge this shuffle with the ReorderShuffleMask.
3999 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
4000 E
->ReuseShuffleIndices
, "shuffle");
4002 E
->VectorizedValue
= V
;
4003 ++NumVectorInstructions
;
4006 case Instruction::Store
: {
4007 StoreInst
*SI
= cast
<StoreInst
>(VL0
);
4008 unsigned Alignment
= SI
->getAlignment();
4009 unsigned AS
= SI
->getPointerAddressSpace();
4011 setInsertPointAfterBundle(E
);
4013 Value
*VecValue
= vectorizeTree(E
->getOperand(0));
4014 Value
*ScalarPtr
= SI
->getPointerOperand();
4015 Value
*VecPtr
= Builder
.CreateBitCast(ScalarPtr
, VecTy
->getPointerTo(AS
));
4016 StoreInst
*ST
= Builder
.CreateStore(VecValue
, VecPtr
);
4018 // The pointer operand uses an in-tree scalar, so add the new BitCast to
4019 // ExternalUses to make sure that an extract will be generated in the
4021 if (getTreeEntry(ScalarPtr
))
4022 ExternalUses
.push_back(ExternalUser(ScalarPtr
, cast
<User
>(VecPtr
), 0));
4025 Alignment
= DL
->getABITypeAlignment(SI
->getValueOperand()->getType());
4027 ST
->setAlignment(Align(Alignment
));
4028 Value
*V
= propagateMetadata(ST
, E
->Scalars
);
4029 if (NeedToShuffleReuses
) {
4030 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
4031 E
->ReuseShuffleIndices
, "shuffle");
4033 E
->VectorizedValue
= V
;
4034 ++NumVectorInstructions
;
4037 case Instruction::GetElementPtr
: {
4038 setInsertPointAfterBundle(E
);
4040 Value
*Op0
= vectorizeTree(E
->getOperand(0));
4042 std::vector
<Value
*> OpVecs
;
4043 for (int j
= 1, e
= cast
<GetElementPtrInst
>(VL0
)->getNumOperands(); j
< e
;
4045 Value
*OpVec
= vectorizeTree(E
->getOperand(j
));
4046 OpVecs
.push_back(OpVec
);
4049 Value
*V
= Builder
.CreateGEP(
4050 cast
<GetElementPtrInst
>(VL0
)->getSourceElementType(), Op0
, OpVecs
);
4051 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
4052 V
= propagateMetadata(I
, E
->Scalars
);
4054 if (NeedToShuffleReuses
) {
4055 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
4056 E
->ReuseShuffleIndices
, "shuffle");
4058 E
->VectorizedValue
= V
;
4059 ++NumVectorInstructions
;
4063 case Instruction::Call
: {
4064 CallInst
*CI
= cast
<CallInst
>(VL0
);
4065 setInsertPointAfterBundle(E
);
4067 Intrinsic::ID IID
= Intrinsic::not_intrinsic
;
4068 if (Function
*FI
= CI
->getCalledFunction())
4069 IID
= FI
->getIntrinsicID();
4071 Value
*ScalarArg
= nullptr;
4072 std::vector
<Value
*> OpVecs
;
4073 for (int j
= 0, e
= CI
->getNumArgOperands(); j
< e
; ++j
) {
4075 // Some intrinsics have scalar arguments. This argument should not be
4077 if (hasVectorInstrinsicScalarOpd(IID
, j
)) {
4078 CallInst
*CEI
= cast
<CallInst
>(VL0
);
4079 ScalarArg
= CEI
->getArgOperand(j
);
4080 OpVecs
.push_back(CEI
->getArgOperand(j
));
4084 Value
*OpVec
= vectorizeTree(E
->getOperand(j
));
4085 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j
<< "]: " << *OpVec
<< "\n");
4086 OpVecs
.push_back(OpVec
);
4089 Module
*M
= F
->getParent();
4090 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
4091 Type
*Tys
[] = { VectorType::get(CI
->getType(), E
->Scalars
.size()) };
4092 Function
*CF
= Intrinsic::getDeclaration(M
, ID
, Tys
);
4093 SmallVector
<OperandBundleDef
, 1> OpBundles
;
4094 CI
->getOperandBundlesAsDefs(OpBundles
);
4095 Value
*V
= Builder
.CreateCall(CF
, OpVecs
, OpBundles
);
4097 // The scalar argument uses an in-tree scalar so we add the new vectorized
4098 // call to ExternalUses list to make sure that an extract will be
4099 // generated in the future.
4100 if (ScalarArg
&& getTreeEntry(ScalarArg
))
4101 ExternalUses
.push_back(ExternalUser(ScalarArg
, cast
<User
>(V
), 0));
4103 propagateIRFlags(V
, E
->Scalars
, VL0
);
4104 if (NeedToShuffleReuses
) {
4105 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
4106 E
->ReuseShuffleIndices
, "shuffle");
4108 E
->VectorizedValue
= V
;
4109 ++NumVectorInstructions
;
4112 case Instruction::ShuffleVector
: {
4113 assert(E
->isAltShuffle() &&
4114 ((Instruction::isBinaryOp(E
->getOpcode()) &&
4115 Instruction::isBinaryOp(E
->getAltOpcode())) ||
4116 (Instruction::isCast(E
->getOpcode()) &&
4117 Instruction::isCast(E
->getAltOpcode()))) &&
4118 "Invalid Shuffle Vector Operand");
4120 Value
*LHS
= nullptr, *RHS
= nullptr;
4121 if (Instruction::isBinaryOp(E
->getOpcode())) {
4122 setInsertPointAfterBundle(E
);
4123 LHS
= vectorizeTree(E
->getOperand(0));
4124 RHS
= vectorizeTree(E
->getOperand(1));
4126 setInsertPointAfterBundle(E
);
4127 LHS
= vectorizeTree(E
->getOperand(0));
4130 if (E
->VectorizedValue
) {
4131 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
4132 return E
->VectorizedValue
;
4136 if (Instruction::isBinaryOp(E
->getOpcode())) {
4137 V0
= Builder
.CreateBinOp(
4138 static_cast<Instruction::BinaryOps
>(E
->getOpcode()), LHS
, RHS
);
4139 V1
= Builder
.CreateBinOp(
4140 static_cast<Instruction::BinaryOps
>(E
->getAltOpcode()), LHS
, RHS
);
4142 V0
= Builder
.CreateCast(
4143 static_cast<Instruction::CastOps
>(E
->getOpcode()), LHS
, VecTy
);
4144 V1
= Builder
.CreateCast(
4145 static_cast<Instruction::CastOps
>(E
->getAltOpcode()), LHS
, VecTy
);
4148 // Create shuffle to take alternate operations from the vector.
4149 // Also, gather up main and alt scalar ops to propagate IR flags to
4150 // each vector operation.
4151 ValueList OpScalars
, AltScalars
;
4152 unsigned e
= E
->Scalars
.size();
4153 SmallVector
<Constant
*, 8> Mask(e
);
4154 for (unsigned i
= 0; i
< e
; ++i
) {
4155 auto *OpInst
= cast
<Instruction
>(E
->Scalars
[i
]);
4156 assert(E
->isOpcodeOrAlt(OpInst
) && "Unexpected main/alternate opcode");
4157 if (OpInst
->getOpcode() == E
->getAltOpcode()) {
4158 Mask
[i
] = Builder
.getInt32(e
+ i
);
4159 AltScalars
.push_back(E
->Scalars
[i
]);
4161 Mask
[i
] = Builder
.getInt32(i
);
4162 OpScalars
.push_back(E
->Scalars
[i
]);
4166 Value
*ShuffleMask
= ConstantVector::get(Mask
);
4167 propagateIRFlags(V0
, OpScalars
);
4168 propagateIRFlags(V1
, AltScalars
);
4170 Value
*V
= Builder
.CreateShuffleVector(V0
, V1
, ShuffleMask
);
4171 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
4172 V
= propagateMetadata(I
, E
->Scalars
);
4173 if (NeedToShuffleReuses
) {
4174 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
4175 E
->ReuseShuffleIndices
, "shuffle");
4177 E
->VectorizedValue
= V
;
4178 ++NumVectorInstructions
;
4183 llvm_unreachable("unknown inst");
4188 Value
*BoUpSLP::vectorizeTree() {
4189 ExtraValueToDebugLocsMap ExternallyUsedValues
;
4190 return vectorizeTree(ExternallyUsedValues
);
4194 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap
&ExternallyUsedValues
) {
4195 // All blocks must be scheduled before any instructions are inserted.
4196 for (auto &BSIter
: BlocksSchedules
) {
4197 scheduleBlock(BSIter
.second
.get());
4200 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4201 auto *VectorRoot
= vectorizeTree(VectorizableTree
[0].get());
4203 // If the vectorized tree can be rewritten in a smaller type, we truncate the
4204 // vectorized root. InstCombine will then rewrite the entire expression. We
4205 // sign extend the extracted values below.
4206 auto *ScalarRoot
= VectorizableTree
[0]->Scalars
[0];
4207 if (MinBWs
.count(ScalarRoot
)) {
4208 if (auto *I
= dyn_cast
<Instruction
>(VectorRoot
))
4209 Builder
.SetInsertPoint(&*++BasicBlock::iterator(I
));
4210 auto BundleWidth
= VectorizableTree
[0]->Scalars
.size();
4211 auto *MinTy
= IntegerType::get(F
->getContext(), MinBWs
[ScalarRoot
].first
);
4212 auto *VecTy
= VectorType::get(MinTy
, BundleWidth
);
4213 auto *Trunc
= Builder
.CreateTrunc(VectorRoot
, VecTy
);
4214 VectorizableTree
[0]->VectorizedValue
= Trunc
;
4217 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses
.size()
4220 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
4221 // specified by ScalarType.
4222 auto extend
= [&](Value
*ScalarRoot
, Value
*Ex
, Type
*ScalarType
) {
4223 if (!MinBWs
.count(ScalarRoot
))
4225 if (MinBWs
[ScalarRoot
].second
)
4226 return Builder
.CreateSExt(Ex
, ScalarType
);
4227 return Builder
.CreateZExt(Ex
, ScalarType
);
4230 // Extract all of the elements with the external uses.
4231 for (const auto &ExternalUse
: ExternalUses
) {
4232 Value
*Scalar
= ExternalUse
.Scalar
;
4233 llvm::User
*User
= ExternalUse
.User
;
4235 // Skip users that we already RAUW. This happens when one instruction
4236 // has multiple uses of the same value.
4237 if (User
&& !is_contained(Scalar
->users(), User
))
4239 TreeEntry
*E
= getTreeEntry(Scalar
);
4240 assert(E
&& "Invalid scalar");
4241 assert(!E
->NeedToGather
&& "Extracting from a gather list");
4243 Value
*Vec
= E
->VectorizedValue
;
4244 assert(Vec
&& "Can't find vectorizable value");
4246 Value
*Lane
= Builder
.getInt32(ExternalUse
.Lane
);
4247 // If User == nullptr, the Scalar is used as extra arg. Generate
4248 // ExtractElement instruction and update the record for this scalar in
4249 // ExternallyUsedValues.
4251 assert(ExternallyUsedValues
.count(Scalar
) &&
4252 "Scalar with nullptr as an external user must be registered in "
4253 "ExternallyUsedValues map");
4254 if (auto *VecI
= dyn_cast
<Instruction
>(Vec
)) {
4255 Builder
.SetInsertPoint(VecI
->getParent(),
4256 std::next(VecI
->getIterator()));
4258 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4260 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4261 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4262 CSEBlocks
.insert(cast
<Instruction
>(Scalar
)->getParent());
4263 auto &Locs
= ExternallyUsedValues
[Scalar
];
4264 ExternallyUsedValues
.insert({Ex
, Locs
});
4265 ExternallyUsedValues
.erase(Scalar
);
4266 // Required to update internally referenced instructions.
4267 Scalar
->replaceAllUsesWith(Ex
);
4271 // Generate extracts for out-of-tree users.
4272 // Find the insertion point for the extractelement lane.
4273 if (auto *VecI
= dyn_cast
<Instruction
>(Vec
)) {
4274 if (PHINode
*PH
= dyn_cast
<PHINode
>(User
)) {
4275 for (int i
= 0, e
= PH
->getNumIncomingValues(); i
!= e
; ++i
) {
4276 if (PH
->getIncomingValue(i
) == Scalar
) {
4277 Instruction
*IncomingTerminator
=
4278 PH
->getIncomingBlock(i
)->getTerminator();
4279 if (isa
<CatchSwitchInst
>(IncomingTerminator
)) {
4280 Builder
.SetInsertPoint(VecI
->getParent(),
4281 std::next(VecI
->getIterator()));
4283 Builder
.SetInsertPoint(PH
->getIncomingBlock(i
)->getTerminator());
4285 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4286 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4287 CSEBlocks
.insert(PH
->getIncomingBlock(i
));
4288 PH
->setOperand(i
, Ex
);
4292 Builder
.SetInsertPoint(cast
<Instruction
>(User
));
4293 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4294 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4295 CSEBlocks
.insert(cast
<Instruction
>(User
)->getParent());
4296 User
->replaceUsesOfWith(Scalar
, Ex
);
4299 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4300 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4301 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4302 CSEBlocks
.insert(&F
->getEntryBlock());
4303 User
->replaceUsesOfWith(Scalar
, Ex
);
4306 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User
<< ".\n");
4309 // For each vectorized value:
4310 for (auto &TEPtr
: VectorizableTree
) {
4311 TreeEntry
*Entry
= TEPtr
.get();
4313 // No need to handle users of gathered values.
4314 if (Entry
->NeedToGather
)
4317 assert(Entry
->VectorizedValue
&& "Can't find vectorizable value");
4320 for (int Lane
= 0, LE
= Entry
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
4321 Value
*Scalar
= Entry
->Scalars
[Lane
];
4324 Type
*Ty
= Scalar
->getType();
4325 if (!Ty
->isVoidTy()) {
4326 for (User
*U
: Scalar
->users()) {
4327 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U
<< ".\n");
4329 // It is legal to delete users in the ignorelist.
4330 assert((getTreeEntry(U
) || is_contained(UserIgnoreList
, U
)) &&
4331 "Deleting out-of-tree value");
4335 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar
<< ".\n");
4336 eraseInstruction(cast
<Instruction
>(Scalar
));
4340 Builder
.ClearInsertionPoint();
4342 return VectorizableTree
[0]->VectorizedValue
;
4345 void BoUpSLP::optimizeGatherSequence() {
4346 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq
.size()
4347 << " gather sequences instructions.\n");
4348 // LICM InsertElementInst sequences.
4349 for (Instruction
*I
: GatherSeq
) {
4353 // Check if this block is inside a loop.
4354 Loop
*L
= LI
->getLoopFor(I
->getParent());
4358 // Check if it has a preheader.
4359 BasicBlock
*PreHeader
= L
->getLoopPreheader();
4363 // If the vector or the element that we insert into it are
4364 // instructions that are defined in this basic block then we can't
4365 // hoist this instruction.
4366 auto *Op0
= dyn_cast
<Instruction
>(I
->getOperand(0));
4367 auto *Op1
= dyn_cast
<Instruction
>(I
->getOperand(1));
4368 if (Op0
&& L
->contains(Op0
))
4370 if (Op1
&& L
->contains(Op1
))
4373 // We can hoist this instruction. Move it to the pre-header.
4374 I
->moveBefore(PreHeader
->getTerminator());
4377 // Make a list of all reachable blocks in our CSE queue.
4378 SmallVector
<const DomTreeNode
*, 8> CSEWorkList
;
4379 CSEWorkList
.reserve(CSEBlocks
.size());
4380 for (BasicBlock
*BB
: CSEBlocks
)
4381 if (DomTreeNode
*N
= DT
->getNode(BB
)) {
4382 assert(DT
->isReachableFromEntry(N
));
4383 CSEWorkList
.push_back(N
);
4386 // Sort blocks by domination. This ensures we visit a block after all blocks
4387 // dominating it are visited.
4388 llvm::stable_sort(CSEWorkList
,
4389 [this](const DomTreeNode
*A
, const DomTreeNode
*B
) {
4390 return DT
->properlyDominates(A
, B
);
4393 // Perform O(N^2) search over the gather sequences and merge identical
4394 // instructions. TODO: We can further optimize this scan if we split the
4395 // instructions into different buckets based on the insert lane.
4396 SmallVector
<Instruction
*, 16> Visited
;
4397 for (auto I
= CSEWorkList
.begin(), E
= CSEWorkList
.end(); I
!= E
; ++I
) {
4398 assert((I
== CSEWorkList
.begin() || !DT
->dominates(*I
, *std::prev(I
))) &&
4399 "Worklist not sorted properly!");
4400 BasicBlock
*BB
= (*I
)->getBlock();
4401 // For all instructions in blocks containing gather sequences:
4402 for (BasicBlock::iterator it
= BB
->begin(), e
= BB
->end(); it
!= e
;) {
4403 Instruction
*In
= &*it
++;
4406 if (!isa
<InsertElementInst
>(In
) && !isa
<ExtractElementInst
>(In
))
4409 // Check if we can replace this instruction with any of the
4410 // visited instructions.
4411 for (Instruction
*v
: Visited
) {
4412 if (In
->isIdenticalTo(v
) &&
4413 DT
->dominates(v
->getParent(), In
->getParent())) {
4414 In
->replaceAllUsesWith(v
);
4415 eraseInstruction(In
);
4421 assert(!is_contained(Visited
, In
));
4422 Visited
.push_back(In
);
4430 // Groups the instructions to a bundle (which is then a single scheduling entity)
4431 // and schedules instructions until the bundle gets ready.
4432 Optional
<BoUpSLP::ScheduleData
*>
4433 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef
<Value
*> VL
, BoUpSLP
*SLP
,
4434 const InstructionsState
&S
) {
4435 if (isa
<PHINode
>(S
.OpValue
))
4438 // Initialize the instruction bundle.
4439 Instruction
*OldScheduleEnd
= ScheduleEnd
;
4440 ScheduleData
*PrevInBundle
= nullptr;
4441 ScheduleData
*Bundle
= nullptr;
4442 bool ReSchedule
= false;
4443 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S
.OpValue
<< "\n");
4445 // Make sure that the scheduling region contains all
4446 // instructions of the bundle.
4447 for (Value
*V
: VL
) {
4448 if (!extendSchedulingRegion(V
, S
))
4452 for (Value
*V
: VL
) {
4453 ScheduleData
*BundleMember
= getScheduleData(V
);
4454 assert(BundleMember
&&
4455 "no ScheduleData for bundle member (maybe not in same basic block)");
4456 if (BundleMember
->IsScheduled
) {
4457 // A bundle member was scheduled as single instruction before and now
4458 // needs to be scheduled as part of the bundle. We just get rid of the
4459 // existing schedule.
4460 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
4461 << " was already scheduled\n");
4464 assert(BundleMember
->isSchedulingEntity() &&
4465 "bundle member already part of other bundle");
4467 PrevInBundle
->NextInBundle
= BundleMember
;
4469 Bundle
= BundleMember
;
4471 BundleMember
->UnscheduledDepsInBundle
= 0;
4472 Bundle
->UnscheduledDepsInBundle
+= BundleMember
->UnscheduledDeps
;
4474 // Group the instructions to a bundle.
4475 BundleMember
->FirstInBundle
= Bundle
;
4476 PrevInBundle
= BundleMember
;
4478 if (ScheduleEnd
!= OldScheduleEnd
) {
4479 // The scheduling region got new instructions at the lower end (or it is a
4480 // new region for the first bundle). This makes it necessary to
4481 // recalculate all dependencies.
4482 // It is seldom that this needs to be done a second time after adding the
4483 // initial bundle to the region.
4484 for (auto *I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
4485 doForAllOpcodes(I
, [](ScheduleData
*SD
) {
4486 SD
->clearDependencies();
4493 initialFillReadyList(ReadyInsts
);
4495 assert(Bundle
&& "Failed to find schedule bundle");
4497 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
<< " in block "
4498 << BB
->getName() << "\n");
4500 calculateDependencies(Bundle
, true, SLP
);
4502 // Now try to schedule the new bundle. As soon as the bundle is "ready" it
4503 // means that there are no cyclic dependencies and we can schedule it.
4504 // Note that's important that we don't "schedule" the bundle yet (see
4505 // cancelScheduling).
4506 while (!Bundle
->isReady() && !ReadyInsts
.empty()) {
4508 ScheduleData
*pickedSD
= ReadyInsts
.back();
4509 ReadyInsts
.pop_back();
4511 if (pickedSD
->isSchedulingEntity() && pickedSD
->isReady()) {
4512 schedule(pickedSD
, ReadyInsts
);
4515 if (!Bundle
->isReady()) {
4516 cancelScheduling(VL
, S
.OpValue
);
4522 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef
<Value
*> VL
,
4524 if (isa
<PHINode
>(OpValue
))
4527 ScheduleData
*Bundle
= getScheduleData(OpValue
);
4528 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle
<< "\n");
4529 assert(!Bundle
->IsScheduled
&&
4530 "Can't cancel bundle which is already scheduled");
4531 assert(Bundle
->isSchedulingEntity() && Bundle
->isPartOfBundle() &&
4532 "tried to unbundle something which is not a bundle");
4534 // Un-bundle: make single instructions out of the bundle.
4535 ScheduleData
*BundleMember
= Bundle
;
4536 while (BundleMember
) {
4537 assert(BundleMember
->FirstInBundle
== Bundle
&& "corrupt bundle links");
4538 BundleMember
->FirstInBundle
= BundleMember
;
4539 ScheduleData
*Next
= BundleMember
->NextInBundle
;
4540 BundleMember
->NextInBundle
= nullptr;
4541 BundleMember
->UnscheduledDepsInBundle
= BundleMember
->UnscheduledDeps
;
4542 if (BundleMember
->UnscheduledDepsInBundle
== 0) {
4543 ReadyInsts
.insert(BundleMember
);
4545 BundleMember
= Next
;
4549 BoUpSLP::ScheduleData
*BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
4550 // Allocate a new ScheduleData for the instruction.
4551 if (ChunkPos
>= ChunkSize
) {
4552 ScheduleDataChunks
.push_back(std::make_unique
<ScheduleData
[]>(ChunkSize
));
4555 return &(ScheduleDataChunks
.back()[ChunkPos
++]);
4558 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value
*V
,
4559 const InstructionsState
&S
) {
4560 if (getScheduleData(V
, isOneOf(S
, V
)))
4562 Instruction
*I
= dyn_cast
<Instruction
>(V
);
4563 assert(I
&& "bundle member must be an instruction");
4564 assert(!isa
<PHINode
>(I
) && "phi nodes don't need to be scheduled");
4565 auto &&CheckSheduleForI
= [this, &S
](Instruction
*I
) -> bool {
4566 ScheduleData
*ISD
= getScheduleData(I
);
4569 assert(isInSchedulingRegion(ISD
) &&
4570 "ScheduleData not in scheduling region");
4571 ScheduleData
*SD
= allocateScheduleDataChunks();
4573 SD
->init(SchedulingRegionID
, S
.OpValue
);
4574 ExtraScheduleDataMap
[I
][S
.OpValue
] = SD
;
4577 if (CheckSheduleForI(I
))
4579 if (!ScheduleStart
) {
4580 // It's the first instruction in the new region.
4581 initScheduleData(I
, I
->getNextNode(), nullptr, nullptr);
4583 ScheduleEnd
= I
->getNextNode();
4584 if (isOneOf(S
, I
) != I
)
4585 CheckSheduleForI(I
);
4586 assert(ScheduleEnd
&& "tried to vectorize a terminator?");
4587 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I
<< "\n");
4590 // Search up and down at the same time, because we don't know if the new
4591 // instruction is above or below the existing scheduling region.
4592 BasicBlock::reverse_iterator UpIter
=
4593 ++ScheduleStart
->getIterator().getReverse();
4594 BasicBlock::reverse_iterator UpperEnd
= BB
->rend();
4595 BasicBlock::iterator DownIter
= ScheduleEnd
->getIterator();
4596 BasicBlock::iterator LowerEnd
= BB
->end();
4598 if (++ScheduleRegionSize
> ScheduleRegionSizeLimit
) {
4599 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
4603 if (UpIter
!= UpperEnd
) {
4604 if (&*UpIter
== I
) {
4605 initScheduleData(I
, ScheduleStart
, nullptr, FirstLoadStoreInRegion
);
4607 if (isOneOf(S
, I
) != I
)
4608 CheckSheduleForI(I
);
4609 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
4615 if (DownIter
!= LowerEnd
) {
4616 if (&*DownIter
== I
) {
4617 initScheduleData(ScheduleEnd
, I
->getNextNode(), LastLoadStoreInRegion
,
4619 ScheduleEnd
= I
->getNextNode();
4620 if (isOneOf(S
, I
) != I
)
4621 CheckSheduleForI(I
);
4622 assert(ScheduleEnd
&& "tried to vectorize a terminator?");
4623 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I
4629 assert((UpIter
!= UpperEnd
|| DownIter
!= LowerEnd
) &&
4630 "instruction not found in block");
4635 void BoUpSLP::BlockScheduling::initScheduleData(Instruction
*FromI
,
4637 ScheduleData
*PrevLoadStore
,
4638 ScheduleData
*NextLoadStore
) {
4639 ScheduleData
*CurrentLoadStore
= PrevLoadStore
;
4640 for (Instruction
*I
= FromI
; I
!= ToI
; I
= I
->getNextNode()) {
4641 ScheduleData
*SD
= ScheduleDataMap
[I
];
4643 SD
= allocateScheduleDataChunks();
4644 ScheduleDataMap
[I
] = SD
;
4647 assert(!isInSchedulingRegion(SD
) &&
4648 "new ScheduleData already in scheduling region");
4649 SD
->init(SchedulingRegionID
, I
);
4651 if (I
->mayReadOrWriteMemory() &&
4652 (!isa
<IntrinsicInst
>(I
) ||
4653 cast
<IntrinsicInst
>(I
)->getIntrinsicID() != Intrinsic::sideeffect
)) {
4654 // Update the linked list of memory accessing instructions.
4655 if (CurrentLoadStore
) {
4656 CurrentLoadStore
->NextLoadStore
= SD
;
4658 FirstLoadStoreInRegion
= SD
;
4660 CurrentLoadStore
= SD
;
4663 if (NextLoadStore
) {
4664 if (CurrentLoadStore
)
4665 CurrentLoadStore
->NextLoadStore
= NextLoadStore
;
4667 LastLoadStoreInRegion
= CurrentLoadStore
;
4671 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData
*SD
,
4672 bool InsertInReadyList
,
4674 assert(SD
->isSchedulingEntity());
4676 SmallVector
<ScheduleData
*, 10> WorkList
;
4677 WorkList
.push_back(SD
);
4679 while (!WorkList
.empty()) {
4680 ScheduleData
*SD
= WorkList
.back();
4681 WorkList
.pop_back();
4683 ScheduleData
*BundleMember
= SD
;
4684 while (BundleMember
) {
4685 assert(isInSchedulingRegion(BundleMember
));
4686 if (!BundleMember
->hasValidDependencies()) {
4688 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
4690 BundleMember
->Dependencies
= 0;
4691 BundleMember
->resetUnscheduledDeps();
4693 // Handle def-use chain dependencies.
4694 if (BundleMember
->OpValue
!= BundleMember
->Inst
) {
4695 ScheduleData
*UseSD
= getScheduleData(BundleMember
->Inst
);
4696 if (UseSD
&& isInSchedulingRegion(UseSD
->FirstInBundle
)) {
4697 BundleMember
->Dependencies
++;
4698 ScheduleData
*DestBundle
= UseSD
->FirstInBundle
;
4699 if (!DestBundle
->IsScheduled
)
4700 BundleMember
->incrementUnscheduledDeps(1);
4701 if (!DestBundle
->hasValidDependencies())
4702 WorkList
.push_back(DestBundle
);
4705 for (User
*U
: BundleMember
->Inst
->users()) {
4706 if (isa
<Instruction
>(U
)) {
4707 ScheduleData
*UseSD
= getScheduleData(U
);
4708 if (UseSD
&& isInSchedulingRegion(UseSD
->FirstInBundle
)) {
4709 BundleMember
->Dependencies
++;
4710 ScheduleData
*DestBundle
= UseSD
->FirstInBundle
;
4711 if (!DestBundle
->IsScheduled
)
4712 BundleMember
->incrementUnscheduledDeps(1);
4713 if (!DestBundle
->hasValidDependencies())
4714 WorkList
.push_back(DestBundle
);
4717 // I'm not sure if this can ever happen. But we need to be safe.
4718 // This lets the instruction/bundle never be scheduled and
4719 // eventually disable vectorization.
4720 BundleMember
->Dependencies
++;
4721 BundleMember
->incrementUnscheduledDeps(1);
4726 // Handle the memory dependencies.
4727 ScheduleData
*DepDest
= BundleMember
->NextLoadStore
;
4729 Instruction
*SrcInst
= BundleMember
->Inst
;
4730 MemoryLocation SrcLoc
= getLocation(SrcInst
, SLP
->AA
);
4731 bool SrcMayWrite
= BundleMember
->Inst
->mayWriteToMemory();
4732 unsigned numAliased
= 0;
4733 unsigned DistToSrc
= 1;
4736 assert(isInSchedulingRegion(DepDest
));
4738 // We have two limits to reduce the complexity:
4739 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
4740 // SLP->isAliased (which is the expensive part in this loop).
4741 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
4742 // the whole loop (even if the loop is fast, it's quadratic).
4743 // It's important for the loop break condition (see below) to
4744 // check this limit even between two read-only instructions.
4745 if (DistToSrc
>= MaxMemDepDistance
||
4746 ((SrcMayWrite
|| DepDest
->Inst
->mayWriteToMemory()) &&
4747 (numAliased
>= AliasedCheckLimit
||
4748 SLP
->isAliased(SrcLoc
, SrcInst
, DepDest
->Inst
)))) {
4750 // We increment the counter only if the locations are aliased
4751 // (instead of counting all alias checks). This gives a better
4752 // balance between reduced runtime and accurate dependencies.
4755 DepDest
->MemoryDependencies
.push_back(BundleMember
);
4756 BundleMember
->Dependencies
++;
4757 ScheduleData
*DestBundle
= DepDest
->FirstInBundle
;
4758 if (!DestBundle
->IsScheduled
) {
4759 BundleMember
->incrementUnscheduledDeps(1);
4761 if (!DestBundle
->hasValidDependencies()) {
4762 WorkList
.push_back(DestBundle
);
4765 DepDest
= DepDest
->NextLoadStore
;
4767 // Example, explaining the loop break condition: Let's assume our
4768 // starting instruction is i0 and MaxMemDepDistance = 3.
4771 // i0,i1,i2,i3,i4,i5,i6,i7,i8
4774 // MaxMemDepDistance let us stop alias-checking at i3 and we add
4775 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
4776 // Previously we already added dependencies from i3 to i6,i7,i8
4777 // (because of MaxMemDepDistance). As we added a dependency from
4778 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
4779 // and we can abort this loop at i6.
4780 if (DistToSrc
>= 2 * MaxMemDepDistance
)
4786 BundleMember
= BundleMember
->NextInBundle
;
4788 if (InsertInReadyList
&& SD
->isReady()) {
4789 ReadyInsts
.push_back(SD
);
4790 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD
->Inst
4796 void BoUpSLP::BlockScheduling::resetSchedule() {
4797 assert(ScheduleStart
&&
4798 "tried to reset schedule on block which has not been scheduled");
4799 for (Instruction
*I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
4800 doForAllOpcodes(I
, [&](ScheduleData
*SD
) {
4801 assert(isInSchedulingRegion(SD
) &&
4802 "ScheduleData not in scheduling region");
4803 SD
->IsScheduled
= false;
4804 SD
->resetUnscheduledDeps();
4810 void BoUpSLP::scheduleBlock(BlockScheduling
*BS
) {
4811 if (!BS
->ScheduleStart
)
4814 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS
->BB
->getName() << "\n");
4816 BS
->resetSchedule();
4818 // For the real scheduling we use a more sophisticated ready-list: it is
4819 // sorted by the original instruction location. This lets the final schedule
4820 // be as close as possible to the original instruction order.
4821 struct ScheduleDataCompare
{
4822 bool operator()(ScheduleData
*SD1
, ScheduleData
*SD2
) const {
4823 return SD2
->SchedulingPriority
< SD1
->SchedulingPriority
;
4826 std::set
<ScheduleData
*, ScheduleDataCompare
> ReadyInsts
;
4828 // Ensure that all dependency data is updated and fill the ready-list with
4829 // initial instructions.
4831 int NumToSchedule
= 0;
4832 for (auto *I
= BS
->ScheduleStart
; I
!= BS
->ScheduleEnd
;
4833 I
= I
->getNextNode()) {
4834 BS
->doForAllOpcodes(I
, [this, &Idx
, &NumToSchedule
, BS
](ScheduleData
*SD
) {
4835 assert(SD
->isPartOfBundle() ==
4836 (getTreeEntry(SD
->Inst
) != nullptr) &&
4837 "scheduler and vectorizer bundle mismatch");
4838 SD
->FirstInBundle
->SchedulingPriority
= Idx
++;
4839 if (SD
->isSchedulingEntity()) {
4840 BS
->calculateDependencies(SD
, false, this);
4845 BS
->initialFillReadyList(ReadyInsts
);
4847 Instruction
*LastScheduledInst
= BS
->ScheduleEnd
;
4849 // Do the "real" scheduling.
4850 while (!ReadyInsts
.empty()) {
4851 ScheduleData
*picked
= *ReadyInsts
.begin();
4852 ReadyInsts
.erase(ReadyInsts
.begin());
4854 // Move the scheduled instruction(s) to their dedicated places, if not
4856 ScheduleData
*BundleMember
= picked
;
4857 while (BundleMember
) {
4858 Instruction
*pickedInst
= BundleMember
->Inst
;
4859 if (LastScheduledInst
->getNextNode() != pickedInst
) {
4860 BS
->BB
->getInstList().remove(pickedInst
);
4861 BS
->BB
->getInstList().insert(LastScheduledInst
->getIterator(),
4864 LastScheduledInst
= pickedInst
;
4865 BundleMember
= BundleMember
->NextInBundle
;
4868 BS
->schedule(picked
, ReadyInsts
);
4871 assert(NumToSchedule
== 0 && "could not schedule all instructions");
4873 // Avoid duplicate scheduling of the block.
4874 BS
->ScheduleStart
= nullptr;
4877 unsigned BoUpSLP::getVectorElementSize(Value
*V
) const {
4878 // If V is a store, just return the width of the stored value without
4879 // traversing the expression tree. This is the common case.
4880 if (auto *Store
= dyn_cast
<StoreInst
>(V
))
4881 return DL
->getTypeSizeInBits(Store
->getValueOperand()->getType());
4883 // If V is not a store, we can traverse the expression tree to find loads
4884 // that feed it. The type of the loaded value may indicate a more suitable
4885 // width than V's type. We want to base the vector element size on the width
4886 // of memory operations where possible.
4887 SmallVector
<Instruction
*, 16> Worklist
;
4888 SmallPtrSet
<Instruction
*, 16> Visited
;
4889 if (auto *I
= dyn_cast
<Instruction
>(V
))
4890 Worklist
.push_back(I
);
4892 // Traverse the expression tree in bottom-up order looking for loads. If we
4893 // encounter an instruction we don't yet handle, we give up.
4895 auto FoundUnknownInst
= false;
4896 while (!Worklist
.empty() && !FoundUnknownInst
) {
4897 auto *I
= Worklist
.pop_back_val();
4900 // We should only be looking at scalar instructions here. If the current
4901 // instruction has a vector type, give up.
4902 auto *Ty
= I
->getType();
4903 if (isa
<VectorType
>(Ty
))
4904 FoundUnknownInst
= true;
4906 // If the current instruction is a load, update MaxWidth to reflect the
4907 // width of the loaded value.
4908 else if (isa
<LoadInst
>(I
))
4909 MaxWidth
= std::max
<unsigned>(MaxWidth
, DL
->getTypeSizeInBits(Ty
));
4911 // Otherwise, we need to visit the operands of the instruction. We only
4912 // handle the interesting cases from buildTree here. If an operand is an
4913 // instruction we haven't yet visited, we add it to the worklist.
4914 else if (isa
<PHINode
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
4915 isa
<CmpInst
>(I
) || isa
<SelectInst
>(I
) || isa
<BinaryOperator
>(I
)) {
4916 for (Use
&U
: I
->operands())
4917 if (auto *J
= dyn_cast
<Instruction
>(U
.get()))
4918 if (!Visited
.count(J
))
4919 Worklist
.push_back(J
);
4922 // If we don't yet handle the instruction, give up.
4924 FoundUnknownInst
= true;
4927 // If we didn't encounter a memory access in the expression tree, or if we
4928 // gave up for some reason, just return the width of V.
4929 if (!MaxWidth
|| FoundUnknownInst
)
4930 return DL
->getTypeSizeInBits(V
->getType());
4932 // Otherwise, return the maximum width we found.
4936 // Determine if a value V in a vectorizable expression Expr can be demoted to a
4937 // smaller type with a truncation. We collect the values that will be demoted
4938 // in ToDemote and additional roots that require investigating in Roots.
4939 static bool collectValuesToDemote(Value
*V
, SmallPtrSetImpl
<Value
*> &Expr
,
4940 SmallVectorImpl
<Value
*> &ToDemote
,
4941 SmallVectorImpl
<Value
*> &Roots
) {
4942 // We can always demote constants.
4943 if (isa
<Constant
>(V
)) {
4944 ToDemote
.push_back(V
);
4948 // If the value is not an instruction in the expression with only one use, it
4949 // cannot be demoted.
4950 auto *I
= dyn_cast
<Instruction
>(V
);
4951 if (!I
|| !I
->hasOneUse() || !Expr
.count(I
))
4954 switch (I
->getOpcode()) {
4956 // We can always demote truncations and extensions. Since truncations can
4957 // seed additional demotion, we save the truncated value.
4958 case Instruction::Trunc
:
4959 Roots
.push_back(I
->getOperand(0));
4961 case Instruction::ZExt
:
4962 case Instruction::SExt
:
4965 // We can demote certain binary operations if we can demote both of their
4967 case Instruction::Add
:
4968 case Instruction::Sub
:
4969 case Instruction::Mul
:
4970 case Instruction::And
:
4971 case Instruction::Or
:
4972 case Instruction::Xor
:
4973 if (!collectValuesToDemote(I
->getOperand(0), Expr
, ToDemote
, Roots
) ||
4974 !collectValuesToDemote(I
->getOperand(1), Expr
, ToDemote
, Roots
))
4978 // We can demote selects if we can demote their true and false values.
4979 case Instruction::Select
: {
4980 SelectInst
*SI
= cast
<SelectInst
>(I
);
4981 if (!collectValuesToDemote(SI
->getTrueValue(), Expr
, ToDemote
, Roots
) ||
4982 !collectValuesToDemote(SI
->getFalseValue(), Expr
, ToDemote
, Roots
))
4987 // We can demote phis if we can demote all their incoming operands. Note that
4988 // we don't need to worry about cycles since we ensure single use above.
4989 case Instruction::PHI
: {
4990 PHINode
*PN
= cast
<PHINode
>(I
);
4991 for (Value
*IncValue
: PN
->incoming_values())
4992 if (!collectValuesToDemote(IncValue
, Expr
, ToDemote
, Roots
))
4997 // Otherwise, conservatively give up.
5002 // Record the value that we can demote.
5003 ToDemote
.push_back(V
);
5007 void BoUpSLP::computeMinimumValueSizes() {
5008 // If there are no external uses, the expression tree must be rooted by a
5009 // store. We can't demote in-memory values, so there is nothing to do here.
5010 if (ExternalUses
.empty())
5013 // We only attempt to truncate integer expressions.
5014 auto &TreeRoot
= VectorizableTree
[0]->Scalars
;
5015 auto *TreeRootIT
= dyn_cast
<IntegerType
>(TreeRoot
[0]->getType());
5019 // If the expression is not rooted by a store, these roots should have
5020 // external uses. We will rely on InstCombine to rewrite the expression in
5021 // the narrower type. However, InstCombine only rewrites single-use values.
5022 // This means that if a tree entry other than a root is used externally, it
5023 // must have multiple uses and InstCombine will not rewrite it. The code
5024 // below ensures that only the roots are used externally.
5025 SmallPtrSet
<Value
*, 32> Expr(TreeRoot
.begin(), TreeRoot
.end());
5026 for (auto &EU
: ExternalUses
)
5027 if (!Expr
.erase(EU
.Scalar
))
5032 // Collect the scalar values of the vectorizable expression. We will use this
5033 // context to determine which values can be demoted. If we see a truncation,
5034 // we mark it as seeding another demotion.
5035 for (auto &EntryPtr
: VectorizableTree
)
5036 Expr
.insert(EntryPtr
->Scalars
.begin(), EntryPtr
->Scalars
.end());
5038 // Ensure the roots of the vectorizable tree don't form a cycle. They must
5039 // have a single external user that is not in the vectorizable tree.
5040 for (auto *Root
: TreeRoot
)
5041 if (!Root
->hasOneUse() || Expr
.count(*Root
->user_begin()))
5044 // Conservatively determine if we can actually truncate the roots of the
5045 // expression. Collect the values that can be demoted in ToDemote and
5046 // additional roots that require investigating in Roots.
5047 SmallVector
<Value
*, 32> ToDemote
;
5048 SmallVector
<Value
*, 4> Roots
;
5049 for (auto *Root
: TreeRoot
)
5050 if (!collectValuesToDemote(Root
, Expr
, ToDemote
, Roots
))
5053 // The maximum bit width required to represent all the values that can be
5054 // demoted without loss of precision. It would be safe to truncate the roots
5055 // of the expression to this width.
5056 auto MaxBitWidth
= 8u;
5058 // We first check if all the bits of the roots are demanded. If they're not,
5059 // we can truncate the roots to this narrower type.
5060 for (auto *Root
: TreeRoot
) {
5061 auto Mask
= DB
->getDemandedBits(cast
<Instruction
>(Root
));
5062 MaxBitWidth
= std::max
<unsigned>(
5063 Mask
.getBitWidth() - Mask
.countLeadingZeros(), MaxBitWidth
);
5066 // True if the roots can be zero-extended back to their original type, rather
5067 // than sign-extended. We know that if the leading bits are not demanded, we
5068 // can safely zero-extend. So we initialize IsKnownPositive to True.
5069 bool IsKnownPositive
= true;
5071 // If all the bits of the roots are demanded, we can try a little harder to
5072 // compute a narrower type. This can happen, for example, if the roots are
5073 // getelementptr indices. InstCombine promotes these indices to the pointer
5074 // width. Thus, all their bits are technically demanded even though the
5075 // address computation might be vectorized in a smaller type.
5077 // We start by looking at each entry that can be demoted. We compute the
5078 // maximum bit width required to store the scalar by using ValueTracking to
5079 // compute the number of high-order bits we can truncate.
5080 if (MaxBitWidth
== DL
->getTypeSizeInBits(TreeRoot
[0]->getType()) &&
5081 llvm::all_of(TreeRoot
, [](Value
*R
) {
5082 assert(R
->hasOneUse() && "Root should have only one use!");
5083 return isa
<GetElementPtrInst
>(R
->user_back());
5087 // Determine if the sign bit of all the roots is known to be zero. If not,
5088 // IsKnownPositive is set to False.
5089 IsKnownPositive
= llvm::all_of(TreeRoot
, [&](Value
*R
) {
5090 KnownBits Known
= computeKnownBits(R
, *DL
);
5091 return Known
.isNonNegative();
5094 // Determine the maximum number of bits required to store the scalar
5096 for (auto *Scalar
: ToDemote
) {
5097 auto NumSignBits
= ComputeNumSignBits(Scalar
, *DL
, 0, AC
, nullptr, DT
);
5098 auto NumTypeBits
= DL
->getTypeSizeInBits(Scalar
->getType());
5099 MaxBitWidth
= std::max
<unsigned>(NumTypeBits
- NumSignBits
, MaxBitWidth
);
5102 // If we can't prove that the sign bit is zero, we must add one to the
5103 // maximum bit width to account for the unknown sign bit. This preserves
5104 // the existing sign bit so we can safely sign-extend the root back to the
5105 // original type. Otherwise, if we know the sign bit is zero, we will
5106 // zero-extend the root instead.
5108 // FIXME: This is somewhat suboptimal, as there will be cases where adding
5109 // one to the maximum bit width will yield a larger-than-necessary
5110 // type. In general, we need to add an extra bit only if we can't
5111 // prove that the upper bit of the original type is equal to the
5112 // upper bit of the proposed smaller type. If these two bits are the
5113 // same (either zero or one) we know that sign-extending from the
5114 // smaller type will result in the same value. Here, since we can't
5115 // yet prove this, we are just making the proposed smaller type
5116 // larger to ensure correctness.
5117 if (!IsKnownPositive
)
5121 // Round MaxBitWidth up to the next power-of-two.
5122 if (!isPowerOf2_64(MaxBitWidth
))
5123 MaxBitWidth
= NextPowerOf2(MaxBitWidth
);
5125 // If the maximum bit width we compute is less than the with of the roots'
5126 // type, we can proceed with the narrowing. Otherwise, do nothing.
5127 if (MaxBitWidth
>= TreeRootIT
->getBitWidth())
5130 // If we can truncate the root, we must collect additional values that might
5131 // be demoted as a result. That is, those seeded by truncations we will
5133 while (!Roots
.empty())
5134 collectValuesToDemote(Roots
.pop_back_val(), Expr
, ToDemote
, Roots
);
5136 // Finally, map the values we can demote to the maximum bit with we computed.
5137 for (auto *Scalar
: ToDemote
)
5138 MinBWs
[Scalar
] = std::make_pair(MaxBitWidth
, !IsKnownPositive
);
5143 /// The SLPVectorizer Pass.
5144 struct SLPVectorizer
: public FunctionPass
{
5145 SLPVectorizerPass Impl
;
5147 /// Pass identification, replacement for typeid
5150 explicit SLPVectorizer() : FunctionPass(ID
) {
5151 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
5154 bool doInitialization(Module
&M
) override
{
5158 bool runOnFunction(Function
&F
) override
{
5159 if (skipFunction(F
))
5162 auto *SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
5163 auto *TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
5164 auto *TLIP
= getAnalysisIfAvailable
<TargetLibraryInfoWrapperPass
>();
5165 auto *TLI
= TLIP
? &TLIP
->getTLI(F
) : nullptr;
5166 auto *AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
5167 auto *LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
5168 auto *DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
5169 auto *AC
= &getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
5170 auto *DB
= &getAnalysis
<DemandedBitsWrapperPass
>().getDemandedBits();
5171 auto *ORE
= &getAnalysis
<OptimizationRemarkEmitterWrapperPass
>().getORE();
5173 return Impl
.runImpl(F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, ORE
);
5176 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
5177 FunctionPass::getAnalysisUsage(AU
);
5178 AU
.addRequired
<AssumptionCacheTracker
>();
5179 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
5180 AU
.addRequired
<AAResultsWrapperPass
>();
5181 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
5182 AU
.addRequired
<LoopInfoWrapperPass
>();
5183 AU
.addRequired
<DominatorTreeWrapperPass
>();
5184 AU
.addRequired
<DemandedBitsWrapperPass
>();
5185 AU
.addRequired
<OptimizationRemarkEmitterWrapperPass
>();
5186 AU
.addPreserved
<LoopInfoWrapperPass
>();
5187 AU
.addPreserved
<DominatorTreeWrapperPass
>();
5188 AU
.addPreserved
<AAResultsWrapperPass
>();
5189 AU
.addPreserved
<GlobalsAAWrapperPass
>();
5190 AU
.setPreservesCFG();
5194 } // end anonymous namespace
5196 PreservedAnalyses
SLPVectorizerPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
5197 auto *SE
= &AM
.getResult
<ScalarEvolutionAnalysis
>(F
);
5198 auto *TTI
= &AM
.getResult
<TargetIRAnalysis
>(F
);
5199 auto *TLI
= AM
.getCachedResult
<TargetLibraryAnalysis
>(F
);
5200 auto *AA
= &AM
.getResult
<AAManager
>(F
);
5201 auto *LI
= &AM
.getResult
<LoopAnalysis
>(F
);
5202 auto *DT
= &AM
.getResult
<DominatorTreeAnalysis
>(F
);
5203 auto *AC
= &AM
.getResult
<AssumptionAnalysis
>(F
);
5204 auto *DB
= &AM
.getResult
<DemandedBitsAnalysis
>(F
);
5205 auto *ORE
= &AM
.getResult
<OptimizationRemarkEmitterAnalysis
>(F
);
5207 bool Changed
= runImpl(F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, ORE
);
5209 return PreservedAnalyses::all();
5211 PreservedAnalyses PA
;
5212 PA
.preserveSet
<CFGAnalyses
>();
5213 PA
.preserve
<AAManager
>();
5214 PA
.preserve
<GlobalsAA
>();
5218 bool SLPVectorizerPass::runImpl(Function
&F
, ScalarEvolution
*SE_
,
5219 TargetTransformInfo
*TTI_
,
5220 TargetLibraryInfo
*TLI_
, AliasAnalysis
*AA_
,
5221 LoopInfo
*LI_
, DominatorTree
*DT_
,
5222 AssumptionCache
*AC_
, DemandedBits
*DB_
,
5223 OptimizationRemarkEmitter
*ORE_
) {
5232 DL
= &F
.getParent()->getDataLayout();
5236 bool Changed
= false;
5238 // If the target claims to have no vector registers don't attempt
5240 if (!TTI
->getNumberOfRegisters(true))
5243 // Don't vectorize when the attribute NoImplicitFloat is used.
5244 if (F
.hasFnAttribute(Attribute::NoImplicitFloat
))
5247 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F
.getName() << ".\n");
5249 // Use the bottom up slp vectorizer to construct chains that start with
5250 // store instructions.
5251 BoUpSLP
R(&F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, DL
, ORE_
);
5253 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
5254 // delete instructions.
5256 // Scan the blocks in the function in post order.
5257 for (auto BB
: post_order(&F
.getEntryBlock())) {
5258 collectSeedInstructions(BB
);
5260 // Vectorize trees that end at stores.
5261 if (!Stores
.empty()) {
5262 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores
.size()
5263 << " underlying objects.\n");
5264 Changed
|= vectorizeStoreChains(R
);
5267 // Vectorize trees that end at reductions.
5268 Changed
|= vectorizeChainsInBlock(BB
, R
);
5270 // Vectorize the index computations of getelementptr instructions. This
5271 // is primarily intended to catch gather-like idioms ending at
5272 // non-consecutive loads.
5273 if (!GEPs
.empty()) {
5274 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs
.size()
5275 << " underlying objects.\n");
5276 Changed
|= vectorizeGEPIndices(BB
, R
);
5281 R
.optimizeGatherSequence();
5282 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F
.getName() << "\"\n");
5283 LLVM_DEBUG(verifyFunction(F
));
5288 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef
<Value
*> Chain
, BoUpSLP
&R
,
5289 unsigned VecRegSize
) {
5290 const unsigned ChainLen
= Chain
.size();
5291 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
5293 const unsigned Sz
= R
.getVectorElementSize(Chain
[0]);
5294 const unsigned VF
= VecRegSize
/ Sz
;
5296 if (!isPowerOf2_32(Sz
) || VF
< 2)
5299 bool Changed
= false;
5300 // Look for profitable vectorizable trees at all offsets, starting at zero.
5301 for (unsigned i
= 0, e
= ChainLen
; i
+ VF
<= e
; ++i
) {
5303 ArrayRef
<Value
*> Operands
= Chain
.slice(i
, VF
);
5304 // Check that a previous iteration of this loop did not delete the Value.
5305 if (llvm::any_of(Operands
, [&R
](Value
*V
) {
5306 auto *I
= dyn_cast
<Instruction
>(V
);
5307 return I
&& R
.isDeleted(I
);
5311 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF
<< " stores at offset " << i
5314 R
.buildTree(Operands
);
5315 if (R
.isTreeTinyAndNotFullyVectorizable())
5318 R
.computeMinimumValueSizes();
5320 int Cost
= R
.getTreeCost();
5322 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost
<< " for VF=" << VF
5324 if (Cost
< -SLPCostThreshold
) {
5325 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost
<< "\n");
5327 using namespace ore
;
5329 R
.getORE()->emit(OptimizationRemark(SV_NAME
, "StoresVectorized",
5330 cast
<StoreInst
>(Chain
[i
]))
5331 << "Stores SLP vectorized with cost " << NV("Cost", Cost
)
5332 << " and with tree size "
5333 << NV("TreeSize", R
.getTreeSize()));
5337 // Move to the next bundle.
5346 bool SLPVectorizerPass::vectorizeStores(ArrayRef
<StoreInst
*> Stores
,
5348 SetVector
<StoreInst
*> Heads
;
5349 SmallDenseSet
<StoreInst
*> Tails
;
5350 SmallDenseMap
<StoreInst
*, StoreInst
*> ConsecutiveChain
;
5352 // We may run into multiple chains that merge into a single chain. We mark the
5353 // stores that we vectorized so that we don't visit the same store twice.
5354 BoUpSLP::ValueSet VectorizedStores
;
5355 bool Changed
= false;
5357 auto &&FindConsecutiveAccess
=
5358 [this, &Stores
, &Heads
, &Tails
, &ConsecutiveChain
] (int K
, int Idx
) {
5359 if (!isConsecutiveAccess(Stores
[K
], Stores
[Idx
], *DL
, *SE
))
5362 Tails
.insert(Stores
[Idx
]);
5363 Heads
.insert(Stores
[K
]);
5364 ConsecutiveChain
[Stores
[K
]] = Stores
[Idx
];
5368 // Do a quadratic search on all of the given stores in reverse order and find
5369 // all of the pairs of stores that follow each other.
5370 int E
= Stores
.size();
5371 for (int Idx
= E
- 1; Idx
>= 0; --Idx
) {
5372 // If a store has multiple consecutive store candidates, search according
5373 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
5374 // This is because usually pairing with immediate succeeding or preceding
5375 // candidate create the best chance to find slp vectorization opportunity.
5376 for (int Offset
= 1, F
= std::max(E
- Idx
, Idx
+ 1); Offset
< F
; ++Offset
)
5377 if ((Idx
>= Offset
&& FindConsecutiveAccess(Idx
- Offset
, Idx
)) ||
5378 (Idx
+ Offset
< E
&& FindConsecutiveAccess(Idx
+ Offset
, Idx
)))
5382 // For stores that start but don't end a link in the chain:
5383 for (auto *SI
: llvm::reverse(Heads
)) {
5384 if (Tails
.count(SI
))
5387 // We found a store instr that starts a chain. Now follow the chain and try
5389 BoUpSLP::ValueList Operands
;
5391 // Collect the chain into a list.
5392 while ((Tails
.count(I
) || Heads
.count(I
)) && !VectorizedStores
.count(I
)) {
5393 Operands
.push_back(I
);
5394 // Move to the next value in the chain.
5395 I
= ConsecutiveChain
[I
];
5398 // FIXME: Is division-by-2 the correct step? Should we assert that the
5399 // register size is a power-of-2?
5400 for (unsigned Size
= R
.getMaxVecRegSize(); Size
>= R
.getMinVecRegSize();
5402 if (vectorizeStoreChain(Operands
, R
, Size
)) {
5403 // Mark the vectorized stores so that we don't vectorize them again.
5404 VectorizedStores
.insert(Operands
.begin(), Operands
.end());
5414 void SLPVectorizerPass::collectSeedInstructions(BasicBlock
*BB
) {
5415 // Initialize the collections. We will make a single pass over the block.
5419 // Visit the store and getelementptr instructions in BB and organize them in
5420 // Stores and GEPs according to the underlying objects of their pointer
5422 for (Instruction
&I
: *BB
) {
5423 // Ignore store instructions that are volatile or have a pointer operand
5424 // that doesn't point to a scalar type.
5425 if (auto *SI
= dyn_cast
<StoreInst
>(&I
)) {
5426 if (!SI
->isSimple())
5428 if (!isValidElementType(SI
->getValueOperand()->getType()))
5430 Stores
[GetUnderlyingObject(SI
->getPointerOperand(), *DL
)].push_back(SI
);
5433 // Ignore getelementptr instructions that have more than one index, a
5434 // constant index, or a pointer operand that doesn't point to a scalar
5436 else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(&I
)) {
5437 auto Idx
= GEP
->idx_begin()->get();
5438 if (GEP
->getNumIndices() > 1 || isa
<Constant
>(Idx
))
5440 if (!isValidElementType(Idx
->getType()))
5442 if (GEP
->getType()->isVectorTy())
5444 GEPs
[GEP
->getPointerOperand()].push_back(GEP
);
5449 bool SLPVectorizerPass::tryToVectorizePair(Value
*A
, Value
*B
, BoUpSLP
&R
) {
5452 Value
*VL
[] = { A
, B
};
5453 return tryToVectorizeList(VL
, R
, /*UserCost=*/0, true);
5456 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef
<Value
*> VL
, BoUpSLP
&R
,
5457 int UserCost
, bool AllowReorder
) {
5461 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
5462 << VL
.size() << ".\n");
5464 // Check that all of the parts are scalar instructions of the same type,
5465 // we permit an alternate opcode via InstructionsState.
5466 InstructionsState S
= getSameOpcode(VL
);
5470 Instruction
*I0
= cast
<Instruction
>(S
.OpValue
);
5471 unsigned Sz
= R
.getVectorElementSize(I0
);
5472 unsigned MinVF
= std::max(2U, R
.getMinVecRegSize() / Sz
);
5473 unsigned MaxVF
= std::max
<unsigned>(PowerOf2Floor(VL
.size()), MinVF
);
5475 R
.getORE()->emit([&]() {
5476 return OptimizationRemarkMissed(SV_NAME
, "SmallVF", I0
)
5477 << "Cannot SLP vectorize list: vectorization factor "
5478 << "less than 2 is not supported";
5483 for (Value
*V
: VL
) {
5484 Type
*Ty
= V
->getType();
5485 if (!isValidElementType(Ty
)) {
5486 // NOTE: the following will give user internal llvm type name, which may
5488 R
.getORE()->emit([&]() {
5489 std::string type_str
;
5490 llvm::raw_string_ostream
rso(type_str
);
5492 return OptimizationRemarkMissed(SV_NAME
, "UnsupportedType", I0
)
5493 << "Cannot SLP vectorize list: type "
5494 << rso
.str() + " is unsupported by vectorizer";
5500 bool Changed
= false;
5501 bool CandidateFound
= false;
5502 int MinCost
= SLPCostThreshold
;
5504 unsigned NextInst
= 0, MaxInst
= VL
.size();
5505 for (unsigned VF
= MaxVF
; NextInst
+ 1 < MaxInst
&& VF
>= MinVF
; VF
/= 2) {
5506 // No actual vectorization should happen, if number of parts is the same as
5507 // provided vectorization factor (i.e. the scalar type is used for vector
5508 // code during codegen).
5509 auto *VecTy
= VectorType::get(VL
[0]->getType(), VF
);
5510 if (TTI
->getNumberOfParts(VecTy
) == VF
)
5512 for (unsigned I
= NextInst
; I
< MaxInst
; ++I
) {
5513 unsigned OpsWidth
= 0;
5515 if (I
+ VF
> MaxInst
)
5516 OpsWidth
= MaxInst
- I
;
5520 if (!isPowerOf2_32(OpsWidth
) || OpsWidth
< 2)
5523 ArrayRef
<Value
*> Ops
= VL
.slice(I
, OpsWidth
);
5524 // Check that a previous iteration of this loop did not delete the Value.
5525 if (llvm::any_of(Ops
, [&R
](Value
*V
) {
5526 auto *I
= dyn_cast
<Instruction
>(V
);
5527 return I
&& R
.isDeleted(I
);
5531 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations "
5535 Optional
<ArrayRef
<unsigned>> Order
= R
.bestOrder();
5536 // TODO: check if we can allow reordering for more cases.
5537 if (AllowReorder
&& Order
) {
5538 // TODO: reorder tree nodes without tree rebuilding.
5539 // Conceptually, there is nothing actually preventing us from trying to
5540 // reorder a larger list. In fact, we do exactly this when vectorizing
5541 // reductions. However, at this point, we only expect to get here when
5542 // there are exactly two operations.
5543 assert(Ops
.size() == 2);
5544 Value
*ReorderedOps
[] = {Ops
[1], Ops
[0]};
5545 R
.buildTree(ReorderedOps
, None
);
5547 if (R
.isTreeTinyAndNotFullyVectorizable())
5550 R
.computeMinimumValueSizes();
5551 int Cost
= R
.getTreeCost() - UserCost
;
5552 CandidateFound
= true;
5553 MinCost
= std::min(MinCost
, Cost
);
5555 if (Cost
< -SLPCostThreshold
) {
5556 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost
<< ".\n");
5557 R
.getORE()->emit(OptimizationRemark(SV_NAME
, "VectorizedList",
5558 cast
<Instruction
>(Ops
[0]))
5559 << "SLP vectorized with cost " << ore::NV("Cost", Cost
)
5560 << " and with tree size "
5561 << ore::NV("TreeSize", R
.getTreeSize()));
5564 // Move to the next bundle.
5572 if (!Changed
&& CandidateFound
) {
5573 R
.getORE()->emit([&]() {
5574 return OptimizationRemarkMissed(SV_NAME
, "NotBeneficial", I0
)
5575 << "List vectorization was possible but not beneficial with cost "
5576 << ore::NV("Cost", MinCost
) << " >= "
5577 << ore::NV("Treshold", -SLPCostThreshold
);
5579 } else if (!Changed
) {
5580 R
.getORE()->emit([&]() {
5581 return OptimizationRemarkMissed(SV_NAME
, "NotPossible", I0
)
5582 << "Cannot SLP vectorize list: vectorization was impossible"
5583 << " with available vectorization factors";
5589 bool SLPVectorizerPass::tryToVectorize(Instruction
*I
, BoUpSLP
&R
) {
5593 if (!isa
<BinaryOperator
>(I
) && !isa
<CmpInst
>(I
))
5596 Value
*P
= I
->getParent();
5598 // Vectorize in current basic block only.
5599 auto *Op0
= dyn_cast
<Instruction
>(I
->getOperand(0));
5600 auto *Op1
= dyn_cast
<Instruction
>(I
->getOperand(1));
5601 if (!Op0
|| !Op1
|| Op0
->getParent() != P
|| Op1
->getParent() != P
)
5604 // Try to vectorize V.
5605 if (tryToVectorizePair(Op0
, Op1
, R
))
5608 auto *A
= dyn_cast
<BinaryOperator
>(Op0
);
5609 auto *B
= dyn_cast
<BinaryOperator
>(Op1
);
5611 if (B
&& B
->hasOneUse()) {
5612 auto *B0
= dyn_cast
<BinaryOperator
>(B
->getOperand(0));
5613 auto *B1
= dyn_cast
<BinaryOperator
>(B
->getOperand(1));
5614 if (B0
&& B0
->getParent() == P
&& tryToVectorizePair(A
, B0
, R
))
5616 if (B1
&& B1
->getParent() == P
&& tryToVectorizePair(A
, B1
, R
))
5621 if (A
&& A
->hasOneUse()) {
5622 auto *A0
= dyn_cast
<BinaryOperator
>(A
->getOperand(0));
5623 auto *A1
= dyn_cast
<BinaryOperator
>(A
->getOperand(1));
5624 if (A0
&& A0
->getParent() == P
&& tryToVectorizePair(A0
, B
, R
))
5626 if (A1
&& A1
->getParent() == P
&& tryToVectorizePair(A1
, B
, R
))
5632 /// Generate a shuffle mask to be used in a reduction tree.
5634 /// \param VecLen The length of the vector to be reduced.
5635 /// \param NumEltsToRdx The number of elements that should be reduced in the
5637 /// \param IsPairwise Whether the reduction is a pairwise or splitting
5638 /// reduction. A pairwise reduction will generate a mask of
5639 /// <0,2,...> or <1,3,..> while a splitting reduction will generate
5640 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
5641 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
5642 static Value
*createRdxShuffleMask(unsigned VecLen
, unsigned NumEltsToRdx
,
5643 bool IsPairwise
, bool IsLeft
,
5644 IRBuilder
<> &Builder
) {
5645 assert((IsPairwise
|| !IsLeft
) && "Don't support a <0,1,undef,...> mask");
5647 SmallVector
<Constant
*, 32> ShuffleMask(
5648 VecLen
, UndefValue::get(Builder
.getInt32Ty()));
5651 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
5652 for (unsigned i
= 0; i
!= NumEltsToRdx
; ++i
)
5653 ShuffleMask
[i
] = Builder
.getInt32(2 * i
+ !IsLeft
);
5655 // Move the upper half of the vector to the lower half.
5656 for (unsigned i
= 0; i
!= NumEltsToRdx
; ++i
)
5657 ShuffleMask
[i
] = Builder
.getInt32(NumEltsToRdx
+ i
);
5659 return ConstantVector::get(ShuffleMask
);
5664 /// Model horizontal reductions.
5666 /// A horizontal reduction is a tree of reduction operations (currently add and
5667 /// fadd) that has operations that can be put into a vector as its leaf.
5668 /// For example, this tree:
5675 /// This tree has "mul" as its reduced values and "+" as its reduction
5676 /// operations. A reduction might be feeding into a store or a binary operation
5691 class HorizontalReduction
{
5692 using ReductionOpsType
= SmallVector
<Value
*, 16>;
5693 using ReductionOpsListType
= SmallVector
<ReductionOpsType
, 2>;
5694 ReductionOpsListType ReductionOps
;
5695 SmallVector
<Value
*, 32> ReducedVals
;
5696 // Use map vector to make stable output.
5697 MapVector
<Instruction
*, Value
*> ExtraArgs
;
5699 /// Kind of the reduction data.
5700 enum ReductionKind
{
5701 RK_None
, /// Not a reduction.
5702 RK_Arithmetic
, /// Binary reduction data.
5703 RK_Min
, /// Minimum reduction data.
5704 RK_UMin
, /// Unsigned minimum reduction data.
5705 RK_Max
, /// Maximum reduction data.
5706 RK_UMax
, /// Unsigned maximum reduction data.
5709 /// Contains info about operation, like its opcode, left and right operands.
5710 class OperationData
{
5711 /// Opcode of the instruction.
5712 unsigned Opcode
= 0;
5714 /// Left operand of the reduction operation.
5715 Value
*LHS
= nullptr;
5717 /// Right operand of the reduction operation.
5718 Value
*RHS
= nullptr;
5720 /// Kind of the reduction operation.
5721 ReductionKind Kind
= RK_None
;
5723 /// True if float point min/max reduction has no NaNs.
5726 /// Checks if the reduction operation can be vectorized.
5727 bool isVectorizable() const {
5728 return LHS
&& RHS
&&
5729 // We currently only support add/mul/logical && min/max reductions.
5730 ((Kind
== RK_Arithmetic
&&
5731 (Opcode
== Instruction::Add
|| Opcode
== Instruction::FAdd
||
5732 Opcode
== Instruction::Mul
|| Opcode
== Instruction::FMul
||
5733 Opcode
== Instruction::And
|| Opcode
== Instruction::Or
||
5734 Opcode
== Instruction::Xor
)) ||
5735 ((Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
) &&
5736 (Kind
== RK_Min
|| Kind
== RK_Max
)) ||
5737 (Opcode
== Instruction::ICmp
&&
5738 (Kind
== RK_UMin
|| Kind
== RK_UMax
)));
5741 /// Creates reduction operation with the current opcode.
5742 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
) const {
5743 assert(isVectorizable() &&
5744 "Expected add|fadd or min/max reduction operation.");
5745 Value
*Cmp
= nullptr;
5748 return Builder
.CreateBinOp((Instruction::BinaryOps
)Opcode
, LHS
, RHS
,
5751 Cmp
= Opcode
== Instruction::ICmp
? Builder
.CreateICmpSLT(LHS
, RHS
)
5752 : Builder
.CreateFCmpOLT(LHS
, RHS
);
5753 return Builder
.CreateSelect(Cmp
, LHS
, RHS
, Name
);
5755 Cmp
= Opcode
== Instruction::ICmp
? Builder
.CreateICmpSGT(LHS
, RHS
)
5756 : Builder
.CreateFCmpOGT(LHS
, RHS
);
5757 return Builder
.CreateSelect(Cmp
, LHS
, RHS
, Name
);
5759 assert(Opcode
== Instruction::ICmp
&& "Expected integer types.");
5760 Cmp
= Builder
.CreateICmpULT(LHS
, RHS
);
5761 return Builder
.CreateSelect(Cmp
, LHS
, RHS
, Name
);
5763 assert(Opcode
== Instruction::ICmp
&& "Expected integer types.");
5764 Cmp
= Builder
.CreateICmpUGT(LHS
, RHS
);
5765 return Builder
.CreateSelect(Cmp
, LHS
, RHS
, Name
);
5769 llvm_unreachable("Unknown reduction operation.");
5773 explicit OperationData() = default;
5775 /// Construction for reduced values. They are identified by opcode only and
5776 /// don't have associated LHS/RHS values.
5777 explicit OperationData(Value
*V
) {
5778 if (auto *I
= dyn_cast
<Instruction
>(V
))
5779 Opcode
= I
->getOpcode();
5782 /// Constructor for reduction operations with opcode and its left and
5784 OperationData(unsigned Opcode
, Value
*LHS
, Value
*RHS
, ReductionKind Kind
,
5786 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), Kind(Kind
), NoNaN(NoNaN
) {
5787 assert(Kind
!= RK_None
&& "One of the reduction operations is expected.");
5790 explicit operator bool() const { return Opcode
; }
5792 /// Get the index of the first operand.
5793 unsigned getFirstOperandIndex() const {
5794 assert(!!*this && "The opcode is not set.");
5808 /// Total number of operands in the reduction operation.
5809 unsigned getNumberOfOperands() const {
5810 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5811 "Expected reduction operation.");
5823 llvm_unreachable("Reduction kind is not set");
5826 /// Checks if the operation has the same parent as \p P.
5827 bool hasSameParent(Instruction
*I
, Value
*P
, bool IsRedOp
) const {
5828 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5829 "Expected reduction operation.");
5831 return I
->getParent() == P
;
5834 // Arithmetic reduction operation must be used once only.
5835 return I
->getParent() == P
;
5840 // SelectInst must be used twice while the condition op must have single
5842 auto *Cmp
= cast
<Instruction
>(cast
<SelectInst
>(I
)->getCondition());
5843 return I
->getParent() == P
&& Cmp
&& Cmp
->getParent() == P
;
5848 llvm_unreachable("Reduction kind is not set");
5850 /// Expected number of uses for reduction operations/reduced values.
5851 bool hasRequiredNumberOfUses(Instruction
*I
, bool IsReductionOp
) const {
5852 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5853 "Expected reduction operation.");
5856 return I
->hasOneUse();
5861 return I
->hasNUses(2) &&
5863 cast
<SelectInst
>(I
)->getCondition()->hasOneUse());
5867 llvm_unreachable("Reduction kind is not set");
5870 /// Initializes the list of reduction operations.
5871 void initReductionOps(ReductionOpsListType
&ReductionOps
) {
5872 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5873 "Expected reduction operation.");
5876 ReductionOps
.assign(1, ReductionOpsType());
5882 ReductionOps
.assign(2, ReductionOpsType());
5885 llvm_unreachable("Reduction kind is not set");
5888 /// Add all reduction operations for the reduction instruction \p I.
5889 void addReductionOps(Instruction
*I
, ReductionOpsListType
&ReductionOps
) {
5890 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5891 "Expected reduction operation.");
5894 ReductionOps
[0].emplace_back(I
);
5900 ReductionOps
[0].emplace_back(cast
<SelectInst
>(I
)->getCondition());
5901 ReductionOps
[1].emplace_back(I
);
5904 llvm_unreachable("Reduction kind is not set");
5908 /// Checks if instruction is associative and can be vectorized.
5909 bool isAssociative(Instruction
*I
) const {
5910 assert(Kind
!= RK_None
&& *this && LHS
&& RHS
&&
5911 "Expected reduction operation.");
5914 return I
->isAssociative();
5917 return Opcode
== Instruction::ICmp
||
5918 cast
<Instruction
>(I
->getOperand(0))->isFast();
5921 assert(Opcode
== Instruction::ICmp
&&
5922 "Only integer compare operation is expected.");
5927 llvm_unreachable("Reduction kind is not set");
5930 /// Checks if the reduction operation can be vectorized.
5931 bool isVectorizable(Instruction
*I
) const {
5932 return isVectorizable() && isAssociative(I
);
5935 /// Checks if two operation data are both a reduction op or both a reduced
5937 bool operator==(const OperationData
&OD
) {
5938 assert(((Kind
!= OD
.Kind
) || ((!LHS
== !OD
.LHS
) && (!RHS
== !OD
.RHS
))) &&
5939 "One of the comparing operations is incorrect.");
5940 return this == &OD
|| (Kind
== OD
.Kind
&& Opcode
== OD
.Opcode
);
5942 bool operator!=(const OperationData
&OD
) { return !(*this == OD
); }
5951 /// Get the opcode of the reduction operation.
5952 unsigned getOpcode() const {
5953 assert(isVectorizable() && "Expected vectorizable operation.");
5957 /// Get kind of reduction data.
5958 ReductionKind
getKind() const { return Kind
; }
5959 Value
*getLHS() const { return LHS
; }
5960 Value
*getRHS() const { return RHS
; }
5961 Type
*getConditionType() const {
5969 return CmpInst::makeCmpResultType(LHS
->getType());
5973 llvm_unreachable("Reduction kind is not set");
5976 /// Creates reduction operation with the current opcode with the IR flags
5977 /// from \p ReductionOps.
5978 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
,
5979 const ReductionOpsListType
&ReductionOps
) const {
5980 assert(isVectorizable() &&
5981 "Expected add|fadd or min/max reduction operation.");
5982 auto *Op
= createOp(Builder
, Name
);
5985 propagateIRFlags(Op
, ReductionOps
[0]);
5991 if (auto *SI
= dyn_cast
<SelectInst
>(Op
))
5992 propagateIRFlags(SI
->getCondition(), ReductionOps
[0]);
5993 propagateIRFlags(Op
, ReductionOps
[1]);
5998 llvm_unreachable("Unknown reduction operation.");
6000 /// Creates reduction operation with the current opcode with the IR flags
6002 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
,
6003 Instruction
*I
) const {
6004 assert(isVectorizable() &&
6005 "Expected add|fadd or min/max reduction operation.");
6006 auto *Op
= createOp(Builder
, Name
);
6009 propagateIRFlags(Op
, I
);
6015 if (auto *SI
= dyn_cast
<SelectInst
>(Op
)) {
6016 propagateIRFlags(SI
->getCondition(),
6017 cast
<SelectInst
>(I
)->getCondition());
6019 propagateIRFlags(Op
, I
);
6024 llvm_unreachable("Unknown reduction operation.");
6027 TargetTransformInfo::ReductionFlags
getFlags() const {
6028 TargetTransformInfo::ReductionFlags Flags
;
6029 Flags
.NoNaN
= NoNaN
;
6034 Flags
.IsSigned
= Opcode
== Instruction::ICmp
;
6035 Flags
.IsMaxOp
= false;
6038 Flags
.IsSigned
= Opcode
== Instruction::ICmp
;
6039 Flags
.IsMaxOp
= true;
6042 Flags
.IsSigned
= false;
6043 Flags
.IsMaxOp
= false;
6046 Flags
.IsSigned
= false;
6047 Flags
.IsMaxOp
= true;
6050 llvm_unreachable("Reduction kind is not set");
6056 WeakTrackingVH ReductionRoot
;
6058 /// The operation data of the reduction operation.
6059 OperationData ReductionData
;
6061 /// The operation data of the values we perform a reduction on.
6062 OperationData ReducedValueData
;
6064 /// Should we model this reduction as a pairwise reduction tree or a tree that
6065 /// splits the vector in halves and adds those halves.
6066 bool IsPairwiseReduction
= false;
6068 /// Checks if the ParentStackElem.first should be marked as a reduction
6069 /// operation with an extra argument or as extra argument itself.
6070 void markExtraArg(std::pair
<Instruction
*, unsigned> &ParentStackElem
,
6072 if (ExtraArgs
.count(ParentStackElem
.first
)) {
6073 ExtraArgs
[ParentStackElem
.first
] = nullptr;
6074 // We ran into something like:
6075 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
6076 // The whole ParentStackElem.first should be considered as an extra value
6078 // Do not perform analysis of remaining operands of ParentStackElem.first
6079 // instruction, this whole instruction is an extra argument.
6080 ParentStackElem
.second
= ParentStackElem
.first
->getNumOperands();
6082 // We ran into something like:
6083 // ParentStackElem.first += ... + ExtraArg + ...
6084 ExtraArgs
[ParentStackElem
.first
] = ExtraArg
;
6088 static OperationData
getOperationData(Value
*V
) {
6090 return OperationData();
6094 if (m_BinOp(m_Value(LHS
), m_Value(RHS
)).match(V
)) {
6095 return OperationData(cast
<BinaryOperator
>(V
)->getOpcode(), LHS
, RHS
,
6098 if (auto *Select
= dyn_cast
<SelectInst
>(V
)) {
6099 // Look for a min/max pattern.
6100 if (m_UMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6101 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMin
);
6102 } else if (m_SMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6103 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Min
);
6104 } else if (m_OrdFMin(m_Value(LHS
), m_Value(RHS
)).match(Select
) ||
6105 m_UnordFMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6106 return OperationData(
6107 Instruction::FCmp
, LHS
, RHS
, RK_Min
,
6108 cast
<Instruction
>(Select
->getCondition())->hasNoNaNs());
6109 } else if (m_UMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6110 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMax
);
6111 } else if (m_SMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6112 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Max
);
6113 } else if (m_OrdFMax(m_Value(LHS
), m_Value(RHS
)).match(Select
) ||
6114 m_UnordFMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
6115 return OperationData(
6116 Instruction::FCmp
, LHS
, RHS
, RK_Max
,
6117 cast
<Instruction
>(Select
->getCondition())->hasNoNaNs());
6119 // Try harder: look for min/max pattern based on instructions producing
6120 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
6121 // During the intermediate stages of SLP, it's very common to have
6122 // pattern like this (since optimizeGatherSequence is run only once
6124 // %1 = extractelement <2 x i32> %a, i32 0
6125 // %2 = extractelement <2 x i32> %a, i32 1
6126 // %cond = icmp sgt i32 %1, %2
6127 // %3 = extractelement <2 x i32> %a, i32 0
6128 // %4 = extractelement <2 x i32> %a, i32 1
6129 // %select = select i1 %cond, i32 %3, i32 %4
6130 CmpInst::Predicate Pred
;
6134 LHS
= Select
->getTrueValue();
6135 RHS
= Select
->getFalseValue();
6136 Value
*Cond
= Select
->getCondition();
6138 // TODO: Support inverse predicates.
6139 if (match(Cond
, m_Cmp(Pred
, m_Specific(LHS
), m_Instruction(L2
)))) {
6140 if (!isa
<ExtractElementInst
>(RHS
) ||
6141 !L2
->isIdenticalTo(cast
<Instruction
>(RHS
)))
6142 return OperationData(V
);
6143 } else if (match(Cond
, m_Cmp(Pred
, m_Instruction(L1
), m_Specific(RHS
)))) {
6144 if (!isa
<ExtractElementInst
>(LHS
) ||
6145 !L1
->isIdenticalTo(cast
<Instruction
>(LHS
)))
6146 return OperationData(V
);
6148 if (!isa
<ExtractElementInst
>(LHS
) || !isa
<ExtractElementInst
>(RHS
))
6149 return OperationData(V
);
6150 if (!match(Cond
, m_Cmp(Pred
, m_Instruction(L1
), m_Instruction(L2
))) ||
6151 !L1
->isIdenticalTo(cast
<Instruction
>(LHS
)) ||
6152 !L2
->isIdenticalTo(cast
<Instruction
>(RHS
)))
6153 return OperationData(V
);
6157 return OperationData(V
);
6159 case CmpInst::ICMP_ULT
:
6160 case CmpInst::ICMP_ULE
:
6161 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMin
);
6163 case CmpInst::ICMP_SLT
:
6164 case CmpInst::ICMP_SLE
:
6165 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Min
);
6167 case CmpInst::FCMP_OLT
:
6168 case CmpInst::FCMP_OLE
:
6169 case CmpInst::FCMP_ULT
:
6170 case CmpInst::FCMP_ULE
:
6171 return OperationData(Instruction::FCmp
, LHS
, RHS
, RK_Min
,
6172 cast
<Instruction
>(Cond
)->hasNoNaNs());
6174 case CmpInst::ICMP_UGT
:
6175 case CmpInst::ICMP_UGE
:
6176 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMax
);
6178 case CmpInst::ICMP_SGT
:
6179 case CmpInst::ICMP_SGE
:
6180 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Max
);
6182 case CmpInst::FCMP_OGT
:
6183 case CmpInst::FCMP_OGE
:
6184 case CmpInst::FCMP_UGT
:
6185 case CmpInst::FCMP_UGE
:
6186 return OperationData(Instruction::FCmp
, LHS
, RHS
, RK_Max
,
6187 cast
<Instruction
>(Cond
)->hasNoNaNs());
6191 return OperationData(V
);
6195 HorizontalReduction() = default;
6197 /// Try to find a reduction tree.
6198 bool matchAssociativeReduction(PHINode
*Phi
, Instruction
*B
) {
6199 assert((!Phi
|| is_contained(Phi
->operands(), B
)) &&
6200 "Thi phi needs to use the binary operator");
6202 ReductionData
= getOperationData(B
);
6204 // We could have a initial reductions that is not an add.
6205 // r *= v1 + v2 + v3 + v4
6206 // In such a case start looking for a tree rooted in the first '+'.
6208 if (ReductionData
.getLHS() == Phi
) {
6210 B
= dyn_cast
<Instruction
>(ReductionData
.getRHS());
6211 ReductionData
= getOperationData(B
);
6212 } else if (ReductionData
.getRHS() == Phi
) {
6214 B
= dyn_cast
<Instruction
>(ReductionData
.getLHS());
6215 ReductionData
= getOperationData(B
);
6219 if (!ReductionData
.isVectorizable(B
))
6222 Type
*Ty
= B
->getType();
6223 if (!isValidElementType(Ty
))
6225 if (!Ty
->isIntOrIntVectorTy() && !Ty
->isFPOrFPVectorTy())
6228 ReducedValueData
.clear();
6231 // Post order traverse the reduction tree starting at B. We only handle true
6232 // trees containing only binary operators.
6233 SmallVector
<std::pair
<Instruction
*, unsigned>, 32> Stack
;
6234 Stack
.push_back(std::make_pair(B
, ReductionData
.getFirstOperandIndex()));
6235 ReductionData
.initReductionOps(ReductionOps
);
6236 while (!Stack
.empty()) {
6237 Instruction
*TreeN
= Stack
.back().first
;
6238 unsigned EdgeToVist
= Stack
.back().second
++;
6239 OperationData OpData
= getOperationData(TreeN
);
6240 bool IsReducedValue
= OpData
!= ReductionData
;
6243 if (IsReducedValue
|| EdgeToVist
== OpData
.getNumberOfOperands()) {
6245 ReducedVals
.push_back(TreeN
);
6247 auto I
= ExtraArgs
.find(TreeN
);
6248 if (I
!= ExtraArgs
.end() && !I
->second
) {
6249 // Check if TreeN is an extra argument of its parent operation.
6250 if (Stack
.size() <= 1) {
6251 // TreeN can't be an extra argument as it is a root reduction
6255 // Yes, TreeN is an extra argument, do not add it to a list of
6256 // reduction operations.
6257 // Stack[Stack.size() - 2] always points to the parent operation.
6258 markExtraArg(Stack
[Stack
.size() - 2], TreeN
);
6259 ExtraArgs
.erase(TreeN
);
6261 ReductionData
.addReductionOps(TreeN
, ReductionOps
);
6268 // Visit left or right.
6269 Value
*NextV
= TreeN
->getOperand(EdgeToVist
);
6271 auto *I
= dyn_cast
<Instruction
>(NextV
);
6272 OpData
= getOperationData(I
);
6273 // Continue analysis if the next operand is a reduction operation or
6274 // (possibly) a reduced value. If the reduced value opcode is not set,
6275 // the first met operation != reduction operation is considered as the
6276 // reduced value class.
6277 if (I
&& (!ReducedValueData
|| OpData
== ReducedValueData
||
6278 OpData
== ReductionData
)) {
6279 const bool IsReductionOperation
= OpData
== ReductionData
;
6280 // Only handle trees in the current basic block.
6281 if (!ReductionData
.hasSameParent(I
, B
->getParent(),
6282 IsReductionOperation
)) {
6283 // I is an extra argument for TreeN (its parent operation).
6284 markExtraArg(Stack
.back(), I
);
6288 // Each tree node needs to have minimal number of users except for the
6289 // ultimate reduction.
6290 if (!ReductionData
.hasRequiredNumberOfUses(I
,
6291 OpData
== ReductionData
) &&
6293 // I is an extra argument for TreeN (its parent operation).
6294 markExtraArg(Stack
.back(), I
);
6298 if (IsReductionOperation
) {
6299 // We need to be able to reassociate the reduction operations.
6300 if (!OpData
.isAssociative(I
)) {
6301 // I is an extra argument for TreeN (its parent operation).
6302 markExtraArg(Stack
.back(), I
);
6305 } else if (ReducedValueData
&&
6306 ReducedValueData
!= OpData
) {
6307 // Make sure that the opcodes of the operations that we are going to
6309 // I is an extra argument for TreeN (its parent operation).
6310 markExtraArg(Stack
.back(), I
);
6312 } else if (!ReducedValueData
)
6313 ReducedValueData
= OpData
;
6315 Stack
.push_back(std::make_pair(I
, OpData
.getFirstOperandIndex()));
6319 // NextV is an extra argument for TreeN (its parent operation).
6320 markExtraArg(Stack
.back(), NextV
);
6325 /// Attempt to vectorize the tree found by
6326 /// matchAssociativeReduction.
6327 bool tryToReduce(BoUpSLP
&V
, TargetTransformInfo
*TTI
) {
6328 if (ReducedVals
.empty())
6331 // If there is a sufficient number of reduction values, reduce
6332 // to a nearby power-of-2. Can safely generate oversized
6333 // vectors and rely on the backend to split them to legal sizes.
6334 unsigned NumReducedVals
= ReducedVals
.size();
6335 if (NumReducedVals
< 4)
6338 unsigned ReduxWidth
= PowerOf2Floor(NumReducedVals
);
6340 Value
*VectorizedTree
= nullptr;
6342 // FIXME: Fast-math-flags should be set based on the instructions in the
6343 // reduction (not all of 'fast' are required).
6344 IRBuilder
<> Builder(cast
<Instruction
>(ReductionRoot
));
6345 FastMathFlags Unsafe
;
6347 Builder
.setFastMathFlags(Unsafe
);
6350 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues
;
6351 // The same extra argument may be used several time, so log each attempt
6353 for (auto &Pair
: ExtraArgs
) {
6354 assert(Pair
.first
&& "DebugLoc must be set.");
6355 ExternallyUsedValues
[Pair
.second
].push_back(Pair
.first
);
6357 // The reduction root is used as the insertion point for new instructions,
6358 // so set it as externally used to prevent it from being deleted.
6359 ExternallyUsedValues
[ReductionRoot
];
6360 SmallVector
<Value
*, 16> IgnoreList
;
6361 for (auto &V
: ReductionOps
)
6362 IgnoreList
.append(V
.begin(), V
.end());
6363 while (i
< NumReducedVals
- ReduxWidth
+ 1 && ReduxWidth
> 2) {
6364 auto VL
= makeArrayRef(&ReducedVals
[i
], ReduxWidth
);
6365 V
.buildTree(VL
, ExternallyUsedValues
, IgnoreList
);
6366 Optional
<ArrayRef
<unsigned>> Order
= V
.bestOrder();
6367 // TODO: Handle orders of size less than number of elements in the vector.
6368 if (Order
&& Order
->size() == VL
.size()) {
6369 // TODO: reorder tree nodes without tree rebuilding.
6370 SmallVector
<Value
*, 4> ReorderedOps(VL
.size());
6371 llvm::transform(*Order
, ReorderedOps
.begin(),
6372 [VL
](const unsigned Idx
) { return VL
[Idx
]; });
6373 V
.buildTree(ReorderedOps
, ExternallyUsedValues
, IgnoreList
);
6375 if (V
.isTreeTinyAndNotFullyVectorizable())
6378 V
.computeMinimumValueSizes();
6381 int TreeCost
= V
.getTreeCost();
6382 int ReductionCost
= getReductionCost(TTI
, ReducedVals
[i
], ReduxWidth
);
6383 int Cost
= TreeCost
+ ReductionCost
;
6384 if (Cost
>= -SLPCostThreshold
) {
6385 V
.getORE()->emit([&]() {
6386 return OptimizationRemarkMissed(
6387 SV_NAME
, "HorSLPNotBeneficial", cast
<Instruction
>(VL
[0]))
6388 << "Vectorizing horizontal reduction is possible"
6389 << "but not beneficial with cost "
6390 << ore::NV("Cost", Cost
) << " and threshold "
6391 << ore::NV("Threshold", -SLPCostThreshold
);
6396 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
6397 << Cost
<< ". (HorRdx)\n");
6398 V
.getORE()->emit([&]() {
6399 return OptimizationRemark(
6400 SV_NAME
, "VectorizedHorizontalReduction", cast
<Instruction
>(VL
[0]))
6401 << "Vectorized horizontal reduction with cost "
6402 << ore::NV("Cost", Cost
) << " and with tree size "
6403 << ore::NV("TreeSize", V
.getTreeSize());
6406 // Vectorize a tree.
6407 DebugLoc Loc
= cast
<Instruction
>(ReducedVals
[i
])->getDebugLoc();
6408 Value
*VectorizedRoot
= V
.vectorizeTree(ExternallyUsedValues
);
6410 // Emit a reduction.
6411 Builder
.SetInsertPoint(cast
<Instruction
>(ReductionRoot
));
6412 Value
*ReducedSubTree
=
6413 emitReduction(VectorizedRoot
, Builder
, ReduxWidth
, TTI
);
6414 if (VectorizedTree
) {
6415 Builder
.SetCurrentDebugLocation(Loc
);
6416 OperationData
VectReductionData(ReductionData
.getOpcode(),
6417 VectorizedTree
, ReducedSubTree
,
6418 ReductionData
.getKind());
6420 VectReductionData
.createOp(Builder
, "op.rdx", ReductionOps
);
6422 VectorizedTree
= ReducedSubTree
;
6424 ReduxWidth
= PowerOf2Floor(NumReducedVals
- i
);
6427 if (VectorizedTree
) {
6428 // Finish the reduction.
6429 for (; i
< NumReducedVals
; ++i
) {
6430 auto *I
= cast
<Instruction
>(ReducedVals
[i
]);
6431 Builder
.SetCurrentDebugLocation(I
->getDebugLoc());
6432 OperationData
VectReductionData(ReductionData
.getOpcode(),
6434 ReductionData
.getKind());
6435 VectorizedTree
= VectReductionData
.createOp(Builder
, "", ReductionOps
);
6437 for (auto &Pair
: ExternallyUsedValues
) {
6438 // Add each externally used value to the final reduction.
6439 for (auto *I
: Pair
.second
) {
6440 Builder
.SetCurrentDebugLocation(I
->getDebugLoc());
6441 OperationData
VectReductionData(ReductionData
.getOpcode(),
6442 VectorizedTree
, Pair
.first
,
6443 ReductionData
.getKind());
6444 VectorizedTree
= VectReductionData
.createOp(Builder
, "op.extra", I
);
6448 ReductionRoot
->replaceAllUsesWith(VectorizedTree
);
6449 // Mark all scalar reduction ops for deletion, they are replaced by the
6450 // vector reductions.
6451 V
.eraseInstructions(IgnoreList
);
6453 return VectorizedTree
!= nullptr;
6456 unsigned numReductionValues() const {
6457 return ReducedVals
.size();
6461 /// Calculate the cost of a reduction.
6462 int getReductionCost(TargetTransformInfo
*TTI
, Value
*FirstReducedVal
,
6463 unsigned ReduxWidth
) {
6464 Type
*ScalarTy
= FirstReducedVal
->getType();
6465 Type
*VecTy
= VectorType::get(ScalarTy
, ReduxWidth
);
6467 int PairwiseRdxCost
;
6468 int SplittingRdxCost
;
6469 switch (ReductionData
.getKind()) {
6472 TTI
->getArithmeticReductionCost(ReductionData
.getOpcode(), VecTy
,
6473 /*IsPairwiseForm=*/true);
6475 TTI
->getArithmeticReductionCost(ReductionData
.getOpcode(), VecTy
,
6476 /*IsPairwiseForm=*/false);
6482 Type
*VecCondTy
= CmpInst::makeCmpResultType(VecTy
);
6483 bool IsUnsigned
= ReductionData
.getKind() == RK_UMin
||
6484 ReductionData
.getKind() == RK_UMax
;
6486 TTI
->getMinMaxReductionCost(VecTy
, VecCondTy
,
6487 /*IsPairwiseForm=*/true, IsUnsigned
);
6489 TTI
->getMinMaxReductionCost(VecTy
, VecCondTy
,
6490 /*IsPairwiseForm=*/false, IsUnsigned
);
6494 llvm_unreachable("Expected arithmetic or min/max reduction operation");
6497 IsPairwiseReduction
= PairwiseRdxCost
< SplittingRdxCost
;
6498 int VecReduxCost
= IsPairwiseReduction
? PairwiseRdxCost
: SplittingRdxCost
;
6500 int ScalarReduxCost
= 0;
6501 switch (ReductionData
.getKind()) {
6504 TTI
->getArithmeticInstrCost(ReductionData
.getOpcode(), ScalarTy
);
6511 TTI
->getCmpSelInstrCost(ReductionData
.getOpcode(), ScalarTy
) +
6512 TTI
->getCmpSelInstrCost(Instruction::Select
, ScalarTy
,
6513 CmpInst::makeCmpResultType(ScalarTy
));
6516 llvm_unreachable("Expected arithmetic or min/max reduction operation");
6518 ScalarReduxCost
*= (ReduxWidth
- 1);
6520 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost
- ScalarReduxCost
6521 << " for reduction that starts with " << *FirstReducedVal
6523 << (IsPairwiseReduction
? "pairwise" : "splitting")
6524 << " reduction)\n");
6526 return VecReduxCost
- ScalarReduxCost
;
6529 /// Emit a horizontal reduction of the vectorized value.
6530 Value
*emitReduction(Value
*VectorizedValue
, IRBuilder
<> &Builder
,
6531 unsigned ReduxWidth
, const TargetTransformInfo
*TTI
) {
6532 assert(VectorizedValue
&& "Need to have a vectorized tree node");
6533 assert(isPowerOf2_32(ReduxWidth
) &&
6534 "We only handle power-of-two reductions for now");
6536 if (!IsPairwiseReduction
) {
6537 // FIXME: The builder should use an FMF guard. It should not be hard-coded
6539 assert(Builder
.getFastMathFlags().isFast() && "Expected 'fast' FMF");
6540 return createSimpleTargetReduction(
6541 Builder
, TTI
, ReductionData
.getOpcode(), VectorizedValue
,
6542 ReductionData
.getFlags(), ReductionOps
.back());
6545 Value
*TmpVec
= VectorizedValue
;
6546 for (unsigned i
= ReduxWidth
/ 2; i
!= 0; i
>>= 1) {
6548 createRdxShuffleMask(ReduxWidth
, i
, true, true, Builder
);
6550 createRdxShuffleMask(ReduxWidth
, i
, true, false, Builder
);
6552 Value
*LeftShuf
= Builder
.CreateShuffleVector(
6553 TmpVec
, UndefValue::get(TmpVec
->getType()), LeftMask
, "rdx.shuf.l");
6554 Value
*RightShuf
= Builder
.CreateShuffleVector(
6555 TmpVec
, UndefValue::get(TmpVec
->getType()), (RightMask
),
6557 OperationData
VectReductionData(ReductionData
.getOpcode(), LeftShuf
,
6558 RightShuf
, ReductionData
.getKind());
6559 TmpVec
= VectReductionData
.createOp(Builder
, "op.rdx", ReductionOps
);
6562 // The result is in the first element of the vector.
6563 return Builder
.CreateExtractElement(TmpVec
, Builder
.getInt32(0));
6567 } // end anonymous namespace
6569 /// Recognize construction of vectors like
6570 /// %ra = insertelement <4 x float> undef, float %s0, i32 0
6571 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
6572 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
6573 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
6574 /// starting from the last insertelement instruction.
6576 /// Returns true if it matches
6577 static bool findBuildVector(InsertElementInst
*LastInsertElem
,
6578 TargetTransformInfo
*TTI
,
6579 SmallVectorImpl
<Value
*> &BuildVectorOpds
,
6584 if (auto *CI
= dyn_cast
<ConstantInt
>(LastInsertElem
->getOperand(2))) {
6585 UserCost
+= TTI
->getVectorInstrCost(Instruction::InsertElement
,
6586 LastInsertElem
->getType(),
6587 CI
->getZExtValue());
6589 BuildVectorOpds
.push_back(LastInsertElem
->getOperand(1));
6590 V
= LastInsertElem
->getOperand(0);
6591 if (isa
<UndefValue
>(V
))
6593 LastInsertElem
= dyn_cast
<InsertElementInst
>(V
);
6594 if (!LastInsertElem
|| !LastInsertElem
->hasOneUse())
6597 std::reverse(BuildVectorOpds
.begin(), BuildVectorOpds
.end());
6601 /// Like findBuildVector, but looks for construction of aggregate.
6603 /// \return true if it matches.
6604 static bool findBuildAggregate(InsertValueInst
*IV
,
6605 SmallVectorImpl
<Value
*> &BuildVectorOpds
) {
6607 BuildVectorOpds
.push_back(IV
->getInsertedValueOperand());
6608 Value
*V
= IV
->getAggregateOperand();
6609 if (isa
<UndefValue
>(V
))
6611 IV
= dyn_cast
<InsertValueInst
>(V
);
6612 if (!IV
|| !IV
->hasOneUse())
6615 std::reverse(BuildVectorOpds
.begin(), BuildVectorOpds
.end());
6619 static bool PhiTypeSorterFunc(Value
*V
, Value
*V2
) {
6620 return V
->getType() < V2
->getType();
6623 /// Try and get a reduction value from a phi node.
6625 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
6626 /// if they come from either \p ParentBB or a containing loop latch.
6628 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
6629 /// if not possible.
6630 static Value
*getReductionValue(const DominatorTree
*DT
, PHINode
*P
,
6631 BasicBlock
*ParentBB
, LoopInfo
*LI
) {
6632 // There are situations where the reduction value is not dominated by the
6633 // reduction phi. Vectorizing such cases has been reported to cause
6634 // miscompiles. See PR25787.
6635 auto DominatedReduxValue
= [&](Value
*R
) {
6636 return isa
<Instruction
>(R
) &&
6637 DT
->dominates(P
->getParent(), cast
<Instruction
>(R
)->getParent());
6640 Value
*Rdx
= nullptr;
6642 // Return the incoming value if it comes from the same BB as the phi node.
6643 if (P
->getIncomingBlock(0) == ParentBB
) {
6644 Rdx
= P
->getIncomingValue(0);
6645 } else if (P
->getIncomingBlock(1) == ParentBB
) {
6646 Rdx
= P
->getIncomingValue(1);
6649 if (Rdx
&& DominatedReduxValue(Rdx
))
6652 // Otherwise, check whether we have a loop latch to look at.
6653 Loop
*BBL
= LI
->getLoopFor(ParentBB
);
6656 BasicBlock
*BBLatch
= BBL
->getLoopLatch();
6660 // There is a loop latch, return the incoming value if it comes from
6661 // that. This reduction pattern occasionally turns up.
6662 if (P
->getIncomingBlock(0) == BBLatch
) {
6663 Rdx
= P
->getIncomingValue(0);
6664 } else if (P
->getIncomingBlock(1) == BBLatch
) {
6665 Rdx
= P
->getIncomingValue(1);
6668 if (Rdx
&& DominatedReduxValue(Rdx
))
6674 /// Attempt to reduce a horizontal reduction.
6675 /// If it is legal to match a horizontal reduction feeding the phi node \a P
6676 /// with reduction operators \a Root (or one of its operands) in a basic block
6677 /// \a BB, then check if it can be done. If horizontal reduction is not found
6678 /// and root instruction is a binary operation, vectorization of the operands is
6680 /// \returns true if a horizontal reduction was matched and reduced or operands
6681 /// of one of the binary instruction were vectorized.
6682 /// \returns false if a horizontal reduction was not matched (or not possible)
6683 /// or no vectorization of any binary operation feeding \a Root instruction was
6685 static bool tryToVectorizeHorReductionOrInstOperands(
6686 PHINode
*P
, Instruction
*Root
, BasicBlock
*BB
, BoUpSLP
&R
,
6687 TargetTransformInfo
*TTI
,
6688 const function_ref
<bool(Instruction
*, BoUpSLP
&)> Vectorize
) {
6689 if (!ShouldVectorizeHor
)
6695 if (Root
->getParent() != BB
|| isa
<PHINode
>(Root
))
6697 // Start analysis starting from Root instruction. If horizontal reduction is
6698 // found, try to vectorize it. If it is not a horizontal reduction or
6699 // vectorization is not possible or not effective, and currently analyzed
6700 // instruction is a binary operation, try to vectorize the operands, using
6701 // pre-order DFS traversal order. If the operands were not vectorized, repeat
6702 // the same procedure considering each operand as a possible root of the
6703 // horizontal reduction.
6704 // Interrupt the process if the Root instruction itself was vectorized or all
6705 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
6706 SmallVector
<std::pair
<Instruction
*, unsigned>, 8> Stack(1, {Root
, 0});
6707 SmallPtrSet
<Value
*, 8> VisitedInstrs
;
6709 while (!Stack
.empty()) {
6712 std::tie(Inst
, Level
) = Stack
.pop_back_val();
6713 auto *BI
= dyn_cast
<BinaryOperator
>(Inst
);
6714 auto *SI
= dyn_cast
<SelectInst
>(Inst
);
6716 HorizontalReduction HorRdx
;
6717 if (HorRdx
.matchAssociativeReduction(P
, Inst
)) {
6718 if (HorRdx
.tryToReduce(R
, TTI
)) {
6720 // Set P to nullptr to avoid re-analysis of phi node in
6721 // matchAssociativeReduction function unless this is the root node.
6727 Inst
= dyn_cast
<Instruction
>(BI
->getOperand(0));
6729 Inst
= dyn_cast
<Instruction
>(BI
->getOperand(1));
6731 // Set P to nullptr to avoid re-analysis of phi node in
6732 // matchAssociativeReduction function unless this is the root node.
6738 // Set P to nullptr to avoid re-analysis of phi node in
6739 // matchAssociativeReduction function unless this is the root node.
6741 if (Vectorize(Inst
, R
)) {
6746 // Try to vectorize operands.
6747 // Continue analysis for the instruction from the same basic block only to
6748 // save compile time.
6749 if (++Level
< RecursionMaxDepth
)
6750 for (auto *Op
: Inst
->operand_values())
6751 if (VisitedInstrs
.insert(Op
).second
)
6752 if (auto *I
= dyn_cast
<Instruction
>(Op
))
6753 if (!isa
<PHINode
>(I
) && !R
.isDeleted(I
) && I
->getParent() == BB
)
6754 Stack
.emplace_back(I
, Level
);
6759 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode
*P
, Value
*V
,
6760 BasicBlock
*BB
, BoUpSLP
&R
,
6761 TargetTransformInfo
*TTI
) {
6764 auto *I
= dyn_cast
<Instruction
>(V
);
6768 if (!isa
<BinaryOperator
>(I
))
6770 // Try to match and vectorize a horizontal reduction.
6771 auto &&ExtraVectorization
= [this](Instruction
*I
, BoUpSLP
&R
) -> bool {
6772 return tryToVectorize(I
, R
);
6774 return tryToVectorizeHorReductionOrInstOperands(P
, I
, BB
, R
, TTI
,
6775 ExtraVectorization
);
6778 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst
*IVI
,
6779 BasicBlock
*BB
, BoUpSLP
&R
) {
6780 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
6781 if (!R
.canMapToVector(IVI
->getType(), DL
))
6784 SmallVector
<Value
*, 16> BuildVectorOpds
;
6785 if (!findBuildAggregate(IVI
, BuildVectorOpds
))
6788 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI
<< "\n");
6789 // Aggregate value is unlikely to be processed in vector register, we need to
6790 // extract scalars into scalar registers, so NeedExtraction is set true.
6791 return tryToVectorizeList(BuildVectorOpds
, R
);
6794 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst
*IEI
,
6795 BasicBlock
*BB
, BoUpSLP
&R
) {
6797 SmallVector
<Value
*, 16> BuildVectorOpds
;
6798 if (!findBuildVector(IEI
, TTI
, BuildVectorOpds
, UserCost
) ||
6799 (llvm::all_of(BuildVectorOpds
,
6800 [](Value
*V
) { return isa
<ExtractElementInst
>(V
); }) &&
6801 isShuffle(BuildVectorOpds
)))
6804 // Vectorize starting with the build vector operands ignoring the BuildVector
6805 // instructions for the purpose of scheduling and user extraction.
6806 return tryToVectorizeList(BuildVectorOpds
, R
, UserCost
);
6809 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst
*CI
, BasicBlock
*BB
,
6811 if (tryToVectorizePair(CI
->getOperand(0), CI
->getOperand(1), R
))
6814 bool OpsChanged
= false;
6815 for (int Idx
= 0; Idx
< 2; ++Idx
) {
6817 vectorizeRootInstruction(nullptr, CI
->getOperand(Idx
), BB
, R
, TTI
);
6822 bool SLPVectorizerPass::vectorizeSimpleInstructions(
6823 SmallVectorImpl
<Instruction
*> &Instructions
, BasicBlock
*BB
, BoUpSLP
&R
) {
6824 bool OpsChanged
= false;
6825 for (auto *I
: reverse(Instructions
)) {
6828 if (auto *LastInsertValue
= dyn_cast
<InsertValueInst
>(I
))
6829 OpsChanged
|= vectorizeInsertValueInst(LastInsertValue
, BB
, R
);
6830 else if (auto *LastInsertElem
= dyn_cast
<InsertElementInst
>(I
))
6831 OpsChanged
|= vectorizeInsertElementInst(LastInsertElem
, BB
, R
);
6832 else if (auto *CI
= dyn_cast
<CmpInst
>(I
))
6833 OpsChanged
|= vectorizeCmpInst(CI
, BB
, R
);
6835 Instructions
.clear();
6839 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock
*BB
, BoUpSLP
&R
) {
6840 bool Changed
= false;
6841 SmallVector
<Value
*, 4> Incoming
;
6842 SmallPtrSet
<Value
*, 16> VisitedInstrs
;
6844 bool HaveVectorizedPhiNodes
= true;
6845 while (HaveVectorizedPhiNodes
) {
6846 HaveVectorizedPhiNodes
= false;
6848 // Collect the incoming values from the PHIs.
6850 for (Instruction
&I
: *BB
) {
6851 PHINode
*P
= dyn_cast
<PHINode
>(&I
);
6855 if (!VisitedInstrs
.count(P
) && !R
.isDeleted(P
))
6856 Incoming
.push_back(P
);
6860 llvm::stable_sort(Incoming
, PhiTypeSorterFunc
);
6862 // Try to vectorize elements base on their type.
6863 for (SmallVector
<Value
*, 4>::iterator IncIt
= Incoming
.begin(),
6867 // Look for the next elements with the same type.
6868 SmallVector
<Value
*, 4>::iterator SameTypeIt
= IncIt
;
6869 while (SameTypeIt
!= E
&&
6870 (*SameTypeIt
)->getType() == (*IncIt
)->getType()) {
6871 VisitedInstrs
.insert(*SameTypeIt
);
6875 // Try to vectorize them.
6876 unsigned NumElts
= (SameTypeIt
- IncIt
);
6877 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
6878 << NumElts
<< ")\n");
6879 // The order in which the phi nodes appear in the program does not matter.
6880 // So allow tryToVectorizeList to reorder them if it is beneficial. This
6881 // is done when there are exactly two elements since tryToVectorizeList
6882 // asserts that there are only two values when AllowReorder is true.
6883 bool AllowReorder
= NumElts
== 2;
6884 if (NumElts
> 1 && tryToVectorizeList(makeArrayRef(IncIt
, NumElts
), R
,
6885 /*UserCost=*/0, AllowReorder
)) {
6886 // Success start over because instructions might have been changed.
6887 HaveVectorizedPhiNodes
= true;
6892 // Start over at the next instruction of a different type (or the end).
6897 VisitedInstrs
.clear();
6899 SmallVector
<Instruction
*, 8> PostProcessInstructions
;
6900 SmallDenseSet
<Instruction
*, 4> KeyNodes
;
6901 for (BasicBlock::iterator it
= BB
->begin(), e
= BB
->end(); it
!= e
; ++it
) {
6902 // Skip instructions marked for the deletion.
6903 if (R
.isDeleted(&*it
))
6905 // We may go through BB multiple times so skip the one we have checked.
6906 if (!VisitedInstrs
.insert(&*it
).second
) {
6907 if (it
->use_empty() && KeyNodes
.count(&*it
) > 0 &&
6908 vectorizeSimpleInstructions(PostProcessInstructions
, BB
, R
)) {
6909 // We would like to start over since some instructions are deleted
6910 // and the iterator may become invalid value.
6918 if (isa
<DbgInfoIntrinsic
>(it
))
6921 // Try to vectorize reductions that use PHINodes.
6922 if (PHINode
*P
= dyn_cast
<PHINode
>(it
)) {
6923 // Check that the PHI is a reduction PHI.
6924 if (P
->getNumIncomingValues() != 2)
6927 // Try to match and vectorize a horizontal reduction.
6928 if (vectorizeRootInstruction(P
, getReductionValue(DT
, P
, BB
, LI
), BB
, R
,
6938 // Ran into an instruction without users, like terminator, or function call
6939 // with ignored return value, store. Ignore unused instructions (basing on
6940 // instruction type, except for CallInst and InvokeInst).
6941 if (it
->use_empty() && (it
->getType()->isVoidTy() || isa
<CallInst
>(it
) ||
6942 isa
<InvokeInst
>(it
))) {
6943 KeyNodes
.insert(&*it
);
6944 bool OpsChanged
= false;
6945 if (ShouldStartVectorizeHorAtStore
|| !isa
<StoreInst
>(it
)) {
6946 for (auto *V
: it
->operand_values()) {
6947 // Try to match and vectorize a horizontal reduction.
6948 OpsChanged
|= vectorizeRootInstruction(nullptr, V
, BB
, R
, TTI
);
6951 // Start vectorization of post-process list of instructions from the
6952 // top-tree instructions to try to vectorize as many instructions as
6954 OpsChanged
|= vectorizeSimpleInstructions(PostProcessInstructions
, BB
, R
);
6956 // We would like to start over since some instructions are deleted
6957 // and the iterator may become invalid value.
6965 if (isa
<InsertElementInst
>(it
) || isa
<CmpInst
>(it
) ||
6966 isa
<InsertValueInst
>(it
))
6967 PostProcessInstructions
.push_back(&*it
);
6973 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock
*BB
, BoUpSLP
&R
) {
6974 auto Changed
= false;
6975 for (auto &Entry
: GEPs
) {
6976 // If the getelementptr list has fewer than two elements, there's nothing
6978 if (Entry
.second
.size() < 2)
6981 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
6982 << Entry
.second
.size() << ".\n");
6984 // We process the getelementptr list in chunks of 16 (like we do for
6985 // stores) to minimize compile-time.
6986 for (unsigned BI
= 0, BE
= Entry
.second
.size(); BI
< BE
; BI
+= 16) {
6987 auto Len
= std::min
<unsigned>(BE
- BI
, 16);
6988 auto GEPList
= makeArrayRef(&Entry
.second
[BI
], Len
);
6990 // Initialize a set a candidate getelementptrs. Note that we use a
6991 // SetVector here to preserve program order. If the index computations
6992 // are vectorizable and begin with loads, we want to minimize the chance
6993 // of having to reorder them later.
6994 SetVector
<Value
*> Candidates(GEPList
.begin(), GEPList
.end());
6996 // Some of the candidates may have already been vectorized after we
6997 // initially collected them. If so, they are marked as deleted, so remove
6998 // them from the set of candidates.
6999 Candidates
.remove_if(
7000 [&R
](Value
*I
) { return R
.isDeleted(cast
<Instruction
>(I
)); });
7002 // Remove from the set of candidates all pairs of getelementptrs with
7003 // constant differences. Such getelementptrs are likely not good
7004 // candidates for vectorization in a bottom-up phase since one can be
7005 // computed from the other. We also ensure all candidate getelementptr
7006 // indices are unique.
7007 for (int I
= 0, E
= GEPList
.size(); I
< E
&& Candidates
.size() > 1; ++I
) {
7008 auto *GEPI
= GEPList
[I
];
7009 if (!Candidates
.count(GEPI
))
7011 auto *SCEVI
= SE
->getSCEV(GEPList
[I
]);
7012 for (int J
= I
+ 1; J
< E
&& Candidates
.size() > 1; ++J
) {
7013 auto *GEPJ
= GEPList
[J
];
7014 auto *SCEVJ
= SE
->getSCEV(GEPList
[J
]);
7015 if (isa
<SCEVConstant
>(SE
->getMinusSCEV(SCEVI
, SCEVJ
))) {
7016 Candidates
.remove(GEPI
);
7017 Candidates
.remove(GEPJ
);
7018 } else if (GEPI
->idx_begin()->get() == GEPJ
->idx_begin()->get()) {
7019 Candidates
.remove(GEPJ
);
7024 // We break out of the above computation as soon as we know there are
7025 // fewer than two candidates remaining.
7026 if (Candidates
.size() < 2)
7029 // Add the single, non-constant index of each candidate to the bundle. We
7030 // ensured the indices met these constraints when we originally collected
7031 // the getelementptrs.
7032 SmallVector
<Value
*, 16> Bundle(Candidates
.size());
7033 auto BundleIndex
= 0u;
7034 for (auto *V
: Candidates
) {
7035 auto *GEP
= cast
<GetElementPtrInst
>(V
);
7036 auto *GEPIdx
= GEP
->idx_begin()->get();
7037 assert(GEP
->getNumIndices() == 1 || !isa
<Constant
>(GEPIdx
));
7038 Bundle
[BundleIndex
++] = GEPIdx
;
7041 // Try and vectorize the indices. We are currently only interested in
7042 // gather-like cases of the form:
7044 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
7046 // where the loads of "a", the loads of "b", and the subtractions can be
7047 // performed in parallel. It's likely that detecting this pattern in a
7048 // bottom-up phase will be simpler and less costly than building a
7049 // full-blown top-down phase beginning at the consecutive loads.
7050 Changed
|= tryToVectorizeList(Bundle
, R
);
7056 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP
&R
) {
7057 bool Changed
= false;
7058 // Attempt to sort and vectorize each of the store-groups.
7059 for (StoreListMap::iterator it
= Stores
.begin(), e
= Stores
.end(); it
!= e
;
7061 if (it
->second
.size() < 2)
7064 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
7065 << it
->second
.size() << ".\n");
7067 // Process the stores in chunks of 16.
7068 // TODO: The limit of 16 inhibits greater vectorization factors.
7069 // For example, AVX2 supports v32i8. Increasing this limit, however,
7070 // may cause a significant compile-time increase.
7071 for (unsigned CI
= 0, CE
= it
->second
.size(); CI
< CE
; CI
+= 16) {
7072 unsigned Len
= std::min
<unsigned>(CE
- CI
, 16);
7073 Changed
|= vectorizeStores(makeArrayRef(&it
->second
[CI
], Len
), R
);
7079 char SLPVectorizer::ID
= 0;
7081 static const char lv_name
[] = "SLP Vectorizer";
7083 INITIALIZE_PASS_BEGIN(SLPVectorizer
, SV_NAME
, lv_name
, false, false)
7084 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
7085 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
7086 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
7087 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
)
7088 INITIALIZE_PASS_DEPENDENCY(LoopSimplify
)
7089 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass
)
7090 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass
)
7091 INITIALIZE_PASS_END(SLPVectorizer
, SV_NAME
, lv_name
, false, false)
7093 Pass
*llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }